diff --git a/META b/META index 007d559df0674ab2918a90b7f4ab2c1a3cc69976..04e43caa0b8bc724af77abf56f88721ac6a364cc 100644 --- a/META +++ b/META @@ -3,9 +3,9 @@ Api_revision: 0 Major: 2 Meta: 1 - Micro: 4 + Micro: 5 Minor: 0 Name: slurm Release: 1 Release_tags: dist - Version: 2.0.4 + Version: 2.0.5 diff --git a/NEWS b/NEWS index c9ed38141c1ee796dcefe282933ef83a2d87777c..55e1b1b0d5264347db3d4a40346d77f53182a8cf 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,83 @@ This file describes changes in recent versions of SLURM. It primarily documents those changes that are of interest to users and admins. +* Changes in SLURM 2.0.5 +======================== + -- BLUEGENE - Added support for emulating systems with a X-dimension of 4. + -- BLUEGENE - When a nodecard goes down on a non-Dynamic system SLURM will + now only drain blocks under 1 midplane, if no such block exists then SLURM + will drain the entire midplane and not mark any block in error state. + Previously SLURM would drain every overlapping block of the nodecard + making it possible for a large block to make other blocks not work since + they overlap some other part of the block that really isn't bad. + -- BLUEGENE - Handle L3 errors on boot better. + -- Don't revoke a pending batch launch request from the slurmctld if the + job is immediately suspended (a normal event with gang scheduling). + -- BLUEGENE - Fixed issue with restart of slurmctld would allow error block + nodes to be considered for building new blocks when testing if a job would + run. This is a visual bug only, jobs would never run on new block, but + the block would appear in slurm tools. + -- Better responsiveness when starting new allocations when running with the + slurmdbd. + -- Fixed race condition when reconfiguring the slurmctld and using the + consumable resources plugin which would cause the controller to core. + -- Fixed race condition that sometimes caused jobs to stay in completing + state longer than necessary after being terminated. + -- Fixed issue where if a parent account has a qos added and then a child + account has the qos removed the users still get the qos. + -- BLUEGENE - New blocks in dynamic mode will only be made in the system + when the block is actually needed for a job, not when testing. + -- BLUEGENE - Don't remove larger block used for small block until job starts. + -- Add new squeue output format and sort option of "%L" to print a job's time + left (time limit minus time used). + -- BLUEGENE - Fixed draining state count for sinfo/sview. + -- Fix for sview to not core when viewing nodes allocated to a partition + and the all jobs finish. + -- Fix cons_res to not core dump when finishing a job running on a + defunct partition. + -- Don't require a node to have --ntasks-per-node CPUs for use when the + --overcommit option is also used. + -- Increase the maximum number of tasks which can be launched by a job step + per node from 64 to 128. + -- sview - make right click on popup window title show sorted list. + -- scontrol now displays correct units for job min memory and min tmp disk. + -- better support for salloc/sbatch arbitrary layout for setting correct + SLURM_TASKS_PER_NODE + -- Env var SLURM_CPUS_ON_NODE is now set correctly depending on the + FastSchedule configuration parameter. + -- Correction to topology/3d_torus plugin calculation when coordinate value + exceeds "9" (i.e. a hex value). + -- In sched/wiki2 - Strip single and double quotes out of a node's reason + string to avoid confusing Moab's parser. + -- Modified scancel to cancel any pending jobs before cancelling any other + -- Updated sview config info + -- Fix a couple of bugs with respect to scheduling with overlapping + reservations (one with a flag of "Maintenance"). + -- Fix bug when updating a pending job's nice value after explicitly setting + it's priority. + -- We no longer add blank QOS' + -- Fix task affinity for systems running fastschedule!=0 and they have less + resources configured than in existence. + -- Slurm.pm loads without warning now on AIX systems + -- modified pmi code to do strncpy's on the correct len + -- Fix for filling in a qos structure to return SLURM_SUCCESS on success. + -- BLUEGENE - Added SLURM_BG_NUM_NODES with cnode count of allocation, + SLURM_JOB_NUM_NODES represents midplane counts until 2.1. + -- BLUEGENE - Added fix for if a block is in error state and the midplane + containning the block is also set to drain/down. This previously + prevented dynamic creation of new blocks when this state was present. + -- Fixed bug where a users association limits were not enforced, only + parent limits were being enforced. + -- For OpenMPI use of SLURM reserved ports, reserve a count of ports equal to + the maximum task count on any node plus one (the plus one is a correction). + -- Do not reset SLURM_TASKS_PER_NODE when srun --preserve-env option is used + (needed by OpenMPI). + -- Fix possible assert failure in task/affinity if a node is configured with + more resources than physically exist. + -- Sview can now resize columns. + -- Avoid clearing a drained node's reason field when state is changed from + down (i.e. returned to service). Note the drain state flag stays set. + * Changes in SLURM 2.0.4 ======================== -- Permit node suspend/resume logic to be enabled through "scontrol reconfig" @@ -10,7 +87,8 @@ documents those changes that are of interest to users and admins. -- Better logging for when job's request bad output file locations. -- Fix issue where if user specified non-existant file to write to slurmstepd will regain privileges before sending batch script ended to the controller. - -- Fix bug when using the priority_multifactor plugin with no associations yet. + -- Fix bug when using the priority_multifactor plugin with no associations + yet. -- BLUEGENE - we no longer check for the images to sync state. This was needed long ago when rebooting blocks wasn't a possibility and should had been removed when that functionality was available. @@ -28,8 +106,6 @@ documents those changes that are of interest to users and admins. association. -- Do not set a job's virtual memory limit based upon the job's specified memory limit (which should be a real memory limit, not virtual). - -- In sched/wiki2 - Strip single and double quotes out of a node's reason - string to avoid confusing Moab's parser. -- BLUEGENE - fix for sinfo/sview for displaying proper node count for nodes in draining state. -- Fix for sview when viewing a certain part of a group (like 1 job) so it @@ -45,7 +121,7 @@ documents those changes that are of interest to users and admins. to properly handle user names that contain all digits. Return error code from uid_from_string() and gid_from_string() functions rather than a uid of -1, which might be a valid uid or gid on some systems. - -- Fix in re-calcuation of job priorities do to DOWN or DRAINED nodes. + -- Fix in re-calcuation of job priorities due to DOWN or DRAINED nodes. * Changes in SLURM 2.0.3 ======================== @@ -4239,4 +4315,4 @@ documents those changes that are of interest to users and admins. -- Change directory to /tmp in slurmd if daemonizing. -- Logfiles are reopened on reconfigure. -$Id: NEWS 18108 2009-07-10 17:08:17Z jette $ +$Id: NEWS 18691 2009-09-11 22:41:00Z jette $ diff --git a/auxdir/libtool.m4 b/auxdir/libtool.m4 new file mode 100644 index 0000000000000000000000000000000000000000..2ca1c1f584490065201c64b7146ebe7bd919bfdc --- /dev/null +++ b/auxdir/libtool.m4 @@ -0,0 +1,7373 @@ +# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008 Free Software Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +m4_define([_LT_COPYING], [dnl +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008 Free Software Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +]) + +# serial 56 LT_INIT + + +# LT_PREREQ(VERSION) +# ------------------ +# Complain and exit if this libtool version is less that VERSION. +m4_defun([LT_PREREQ], +[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, + [m4_default([$3], + [m4_fatal([Libtool version $1 or higher is required], + 63)])], + [$2])]) + + +# _LT_CHECK_BUILDDIR +# ------------------ +# Complain if the absolute build directory name contains unusual characters +m4_defun([_LT_CHECK_BUILDDIR], +[case `pwd` in + *\ * | *\ *) + AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; +esac +]) + + +# LT_INIT([OPTIONS]) +# ------------------ +AC_DEFUN([LT_INIT], +[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT +AC_BEFORE([$0], [LT_LANG])dnl +AC_BEFORE([$0], [LT_OUTPUT])dnl +AC_BEFORE([$0], [LTDL_INIT])dnl +m4_require([_LT_CHECK_BUILDDIR])dnl + +dnl Autoconf doesn't catch unexpanded LT_ macros by default: +m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl +m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl +dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 +dnl unless we require an AC_DEFUNed macro: +AC_REQUIRE([LTOPTIONS_VERSION])dnl +AC_REQUIRE([LTSUGAR_VERSION])dnl +AC_REQUIRE([LTVERSION_VERSION])dnl +AC_REQUIRE([LTOBSOLETE_VERSION])dnl +m4_require([_LT_PROG_LTMAIN])dnl + +dnl Parse OPTIONS +_LT_SET_OPTIONS([$0], [$1]) + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +AC_SUBST(LIBTOOL)dnl + +_LT_SETUP + +# Only expand once: +m4_define([LT_INIT]) +])# LT_INIT + +# Old names: +AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) +AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PROG_LIBTOOL], []) +dnl AC_DEFUN([AM_PROG_LIBTOOL], []) + + +# _LT_CC_BASENAME(CC) +# ------------------- +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +m4_defun([_LT_CC_BASENAME], +[for cc_temp in $1""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` +]) + + +# _LT_FILEUTILS_DEFAULTS +# ---------------------- +# It is okay to use these file commands and assume they have been set +# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. +m4_defun([_LT_FILEUTILS_DEFAULTS], +[: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} +])# _LT_FILEUTILS_DEFAULTS + + +# _LT_SETUP +# --------- +m4_defun([_LT_SETUP], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +_LT_DECL([], [host_alias], [0], [The host system])dnl +_LT_DECL([], [host], [0])dnl +_LT_DECL([], [host_os], [0])dnl +dnl +_LT_DECL([], [build_alias], [0], [The build system])dnl +_LT_DECL([], [build], [0])dnl +_LT_DECL([], [build_os], [0])dnl +dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +dnl +AC_REQUIRE([AC_PROG_LN_S])dnl +test -z "$LN_S" && LN_S="ln -s" +_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl +dnl +AC_REQUIRE([LT_CMD_MAX_LEN])dnl +_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl +_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl +dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +m4_require([_LT_CMD_RELOAD])dnl +m4_require([_LT_CHECK_MAGIC_METHOD])dnl +m4_require([_LT_CMD_OLD_ARCHIVE])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl + +_LT_CONFIG_LIBTOOL_INIT([ +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi +]) +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +_LT_CHECK_OBJDIR + +m4_require([_LT_TAG_COMPILER])dnl +_LT_PROG_ECHO_BACKSLASH + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\([["`\\]]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +_LT_CC_BASENAME([$compiler]) + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + _LT_PATH_MAGIC + fi + ;; +esac + +# Use C for the default configuration in the libtool script +LT_SUPPORTED_TAG([CC]) +_LT_LANG_C_CONFIG +_LT_LANG_DEFAULT_CONFIG +_LT_CONFIG_COMMANDS +])# _LT_SETUP + + +# _LT_PROG_LTMAIN +# --------------- +# Note that this code is called both from `configure', and `config.status' +# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, +# `config.status' has no value for ac_aux_dir unless we are using Automake, +# so we pass a copy along to make sure it has a sensible value anyway. +m4_defun([_LT_PROG_LTMAIN], +[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl +_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) +ltmain="$ac_aux_dir/ltmain.sh" +])# _LT_PROG_LTMAIN + + +## ------------------------------------- ## +## Accumulate code for creating libtool. ## +## ------------------------------------- ## + +# So that we can recreate a full libtool script including additional +# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS +# in macros and then make a single call at the end using the `libtool' +# label. + + +# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) +# ---------------------------------------- +# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL_INIT], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_INIT], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_INIT]) + + +# _LT_CONFIG_LIBTOOL([COMMANDS]) +# ------------------------------ +# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) + + +# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) +# ----------------------------------------------------- +m4_defun([_LT_CONFIG_SAVE_COMMANDS], +[_LT_CONFIG_LIBTOOL([$1]) +_LT_CONFIG_LIBTOOL_INIT([$2]) +]) + + +# _LT_FORMAT_COMMENT([COMMENT]) +# ----------------------------- +# Add leading comment marks to the start of each line, and a trailing +# full-stop to the whole comment if one is not present already. +m4_define([_LT_FORMAT_COMMENT], +[m4_ifval([$1], [ +m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], + [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) +)]) + + + +## ------------------------ ## +## FIXME: Eliminate VARNAME ## +## ------------------------ ## + + +# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) +# ------------------------------------------------------------------- +# CONFIGNAME is the name given to the value in the libtool script. +# VARNAME is the (base) name used in the configure script. +# VALUE may be 0, 1 or 2 for a computed quote escaped value based on +# VARNAME. Any other value will be used directly. +m4_define([_LT_DECL], +[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], + [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], + [m4_ifval([$1], [$1], [$2])]) + lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) + m4_ifval([$4], + [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) + lt_dict_add_subkey([lt_decl_dict], [$2], + [tagged?], [m4_ifval([$5], [yes], [no])])]) +]) + + +# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) +# -------------------------------------------------------- +m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) + + +# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_tag_varnames], +[_lt_decl_filter([tagged?], [yes], $@)]) + + +# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) +# --------------------------------------------------------- +m4_define([_lt_decl_filter], +[m4_case([$#], + [0], [m4_fatal([$0: too few arguments: $#])], + [1], [m4_fatal([$0: too few arguments: $#: $1])], + [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], + [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], + [lt_dict_filter([lt_decl_dict], $@)])[]dnl +]) + + +# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) +# -------------------------------------------------- +m4_define([lt_decl_quote_varnames], +[_lt_decl_filter([value], [1], $@)]) + + +# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_dquote_varnames], +[_lt_decl_filter([value], [2], $@)]) + + +# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_varnames_tagged], +[m4_assert([$# <= 2])dnl +_$0(m4_quote(m4_default([$1], [[, ]])), + m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), + m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) +m4_define([_lt_decl_varnames_tagged], +[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) + + +# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_all_varnames], +[_$0(m4_quote(m4_default([$1], [[, ]])), + m4_if([$2], [], + m4_quote(lt_decl_varnames), + m4_quote(m4_shift($@))))[]dnl +]) +m4_define([_lt_decl_all_varnames], +[lt_join($@, lt_decl_varnames_tagged([$1], + lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl +]) + + +# _LT_CONFIG_STATUS_DECLARE([VARNAME]) +# ------------------------------------ +# Quote a variable value, and forward it to `config.status' so that its +# declaration there will have the same value as in `configure'. VARNAME +# must have a single quote delimited value for this to work. +m4_define([_LT_CONFIG_STATUS_DECLARE], +[$1='`$ECHO "X$][$1" | $Xsed -e "$delay_single_quote_subst"`']) + + +# _LT_CONFIG_STATUS_DECLARATIONS +# ------------------------------ +# We delimit libtool config variables with single quotes, so when +# we write them to config.status, we have to be sure to quote all +# embedded single quotes properly. In configure, this macro expands +# each variable declared with _LT_DECL (and _LT_TAGDECL) into: +# +# <var>='`$ECHO "X$<var>" | $Xsed -e "$delay_single_quote_subst"`' +m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], +[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), + [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAGS +# ---------------- +# Output comment and list of tags supported by the script +m4_defun([_LT_LIBTOOL_TAGS], +[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl +available_tags="_LT_TAGS"dnl +]) + + +# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) +# ----------------------------------- +# Extract the dictionary values for VARNAME (optionally with TAG) and +# expand to a commented shell variable setting: +# +# # Some comment about what VAR is for. +# visible_name=$lt_internal_name +m4_define([_LT_LIBTOOL_DECLARE], +[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], + [description])))[]dnl +m4_pushdef([_libtool_name], + m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl +m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), + [0], [_libtool_name=[$]$1], + [1], [_libtool_name=$lt_[]$1], + [2], [_libtool_name=$lt_[]$1], + [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl +m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl +]) + + +# _LT_LIBTOOL_CONFIG_VARS +# ----------------------- +# Produce commented declarations of non-tagged libtool config variables +# suitable for insertion in the LIBTOOL CONFIG section of the `libtool' +# script. Tagged libtool config variables (even for the LIBTOOL CONFIG +# section) are produced by _LT_LIBTOOL_TAG_VARS. +m4_defun([_LT_LIBTOOL_CONFIG_VARS], +[m4_foreach([_lt_var], + m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAG_VARS(TAG) +# ------------------------- +m4_define([_LT_LIBTOOL_TAG_VARS], +[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) + + +# _LT_TAGVAR(VARNAME, [TAGNAME]) +# ------------------------------ +m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) + + +# _LT_CONFIG_COMMANDS +# ------------------- +# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of +# variables for single and double quote escaping we saved from calls +# to _LT_DECL, we can put quote escaped variables declarations +# into `config.status', and then the shell code to quote escape them in +# for loops in `config.status'. Finally, any additional code accumulated +# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. +m4_defun([_LT_CONFIG_COMMANDS], +[AC_PROVIDE_IFELSE([LT_OUTPUT], + dnl If the libtool generation code has been placed in $CONFIG_LT, + dnl instead of duplicating it all over again into config.status, + dnl then we will have config.status run $CONFIG_LT later, so it + dnl needs to know what name is stored there: + [AC_CONFIG_COMMANDS([libtool], + [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], + dnl If the libtool generation code is destined for config.status, + dnl expand the accumulated commands and init code now: + [AC_CONFIG_COMMANDS([libtool], + [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) +])#_LT_CONFIG_COMMANDS + + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], +[ + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +_LT_CONFIG_STATUS_DECLARATIONS +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# Quote evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_quote_varnames); do + case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_dquote_varnames); do + case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Fix-up fallback echo if it was mangled by the above quoting rules. +case \$lt_ECHO in +*'\\\[$]0 --fallback-echo"')dnl " + lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\[$]0 --fallback-echo"\[$]/\[$]0 --fallback-echo"/'\` + ;; +esac + +_LT_OUTPUT_LIBTOOL_INIT +]) + + +# LT_OUTPUT +# --------- +# This macro allows early generation of the libtool script (before +# AC_OUTPUT is called), incase it is used in configure for compilation +# tests. +AC_DEFUN([LT_OUTPUT], +[: ${CONFIG_LT=./config.lt} +AC_MSG_NOTICE([creating $CONFIG_LT]) +cat >"$CONFIG_LT" <<_LTEOF +#! $SHELL +# Generated by $as_me. +# Run this file to recreate a libtool stub with the current configuration. + +lt_cl_silent=false +SHELL=\${CONFIG_SHELL-$SHELL} +_LTEOF + +cat >>"$CONFIG_LT" <<\_LTEOF +AS_SHELL_SANITIZE +_AS_PREPARE + +exec AS_MESSAGE_FD>&1 +exec AS_MESSAGE_LOG_FD>>config.log +{ + echo + AS_BOX([Running $as_me.]) +} >&AS_MESSAGE_LOG_FD + +lt_cl_help="\ +\`$as_me' creates a local libtool stub from the current configuration, +for use in further configure time tests before the real libtool is +generated. + +Usage: $[0] [[OPTIONS]] + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + +Report bugs to <bug-libtool@gnu.org>." + +lt_cl_version="\ +m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl +m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) +configured by $[0], generated by m4_PACKAGE_STRING. + +Copyright (C) 2008 Free Software Foundation, Inc. +This config.lt script is free software; the Free Software Foundation +gives unlimited permision to copy, distribute and modify it." + +while test $[#] != 0 +do + case $[1] in + --version | --v* | -V ) + echo "$lt_cl_version"; exit 0 ;; + --help | --h* | -h ) + echo "$lt_cl_help"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --quiet | --q* | --silent | --s* | -q ) + lt_cl_silent=: ;; + + -*) AC_MSG_ERROR([unrecognized option: $[1] +Try \`$[0] --help' for more information.]) ;; + + *) AC_MSG_ERROR([unrecognized argument: $[1] +Try \`$[0] --help' for more information.]) ;; + esac + shift +done + +if $lt_cl_silent; then + exec AS_MESSAGE_FD>/dev/null +fi +_LTEOF + +cat >>"$CONFIG_LT" <<_LTEOF +_LT_OUTPUT_LIBTOOL_COMMANDS_INIT +_LTEOF + +cat >>"$CONFIG_LT" <<\_LTEOF +AC_MSG_NOTICE([creating $ofile]) +_LT_OUTPUT_LIBTOOL_COMMANDS +AS_EXIT(0) +_LTEOF +chmod +x "$CONFIG_LT" + +# configure is writing to config.log, but config.lt does its own redirection, +# appending to config.log, which fails on DOS, as config.log is still kept +# open by configure. Here we exec the FD to /dev/null, effectively closing +# config.log, so it can be properly (re)opened and appended to by config.lt. +if test "$no_create" != yes; then + lt_cl_success=: + test "$silent" = yes && + lt_config_lt_args="$lt_config_lt_args --quiet" + exec AS_MESSAGE_LOG_FD>/dev/null + $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false + exec AS_MESSAGE_LOG_FD>>config.log + $lt_cl_success || AS_EXIT(1) +fi +])# LT_OUTPUT + + +# _LT_CONFIG(TAG) +# --------------- +# If TAG is the built-in tag, create an initial libtool script with a +# default configuration from the untagged config vars. Otherwise add code +# to config.status for appending the configuration named by TAG from the +# matching tagged config vars. +m4_defun([_LT_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_CONFIG_SAVE_COMMANDS([ + m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl + m4_if(_LT_TAG, [C], [ + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +_LT_COPYING +_LT_LIBTOOL_TAGS + +# ### BEGIN LIBTOOL CONFIG +_LT_LIBTOOL_CONFIG_VARS +_LT_LIBTOOL_TAG_VARS +# ### END LIBTOOL CONFIG + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + _LT_PROG_LTMAIN + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + _LT_PROG_XSI_SHELLFNS + + sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +], +[cat <<_LT_EOF >> "$ofile" + +dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded +dnl in a comment (ie after a #). +# ### BEGIN LIBTOOL TAG CONFIG: $1 +_LT_LIBTOOL_TAG_VARS(_LT_TAG) +# ### END LIBTOOL TAG CONFIG: $1 +_LT_EOF +])dnl /m4_if +], +[m4_if([$1], [], [ + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile'], []) +])dnl /_LT_CONFIG_SAVE_COMMANDS +])# _LT_CONFIG + + +# LT_SUPPORTED_TAG(TAG) +# --------------------- +# Trace this macro to discover what tags are supported by the libtool +# --tag option, using: +# autoconf --trace 'LT_SUPPORTED_TAG:$1' +AC_DEFUN([LT_SUPPORTED_TAG], []) + + +# C support is built-in for now +m4_define([_LT_LANG_C_enabled], []) +m4_define([_LT_TAGS], []) + + +# LT_LANG(LANG) +# ------------- +# Enable libtool support for the given language if not already enabled. +AC_DEFUN([LT_LANG], +[AC_BEFORE([$0], [LT_OUTPUT])dnl +m4_case([$1], + [C], [_LT_LANG(C)], + [C++], [_LT_LANG(CXX)], + [Java], [_LT_LANG(GCJ)], + [Fortran 77], [_LT_LANG(F77)], + [Fortran], [_LT_LANG(FC)], + [Windows Resource], [_LT_LANG(RC)], + [m4_ifdef([_LT_LANG_]$1[_CONFIG], + [_LT_LANG($1)], + [m4_fatal([$0: unsupported language: "$1"])])])dnl +])# LT_LANG + + +# _LT_LANG(LANGNAME) +# ------------------ +m4_defun([_LT_LANG], +[m4_ifdef([_LT_LANG_]$1[_enabled], [], + [LT_SUPPORTED_TAG([$1])dnl + m4_append([_LT_TAGS], [$1 ])dnl + m4_define([_LT_LANG_]$1[_enabled], [])dnl + _LT_LANG_$1_CONFIG($1)])dnl +])# _LT_LANG + + +# _LT_LANG_DEFAULT_CONFIG +# ----------------------- +m4_defun([_LT_LANG_DEFAULT_CONFIG], +[AC_PROVIDE_IFELSE([AC_PROG_CXX], + [LT_LANG(CXX)], + [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) + +AC_PROVIDE_IFELSE([AC_PROG_F77], + [LT_LANG(F77)], + [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) + +AC_PROVIDE_IFELSE([AC_PROG_FC], + [LT_LANG(FC)], + [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) + +dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal +dnl pulling things in needlessly. +AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([LT_PROG_GCJ], + [LT_LANG(GCJ)], + [m4_ifdef([AC_PROG_GCJ], + [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([A][M_PROG_GCJ], + [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([LT_PROG_GCJ], + [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) + +AC_PROVIDE_IFELSE([LT_PROG_RC], + [LT_LANG(RC)], + [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) +])# _LT_LANG_DEFAULT_CONFIG + +# Obsolete macros: +AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) +AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) +AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) +AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_CXX], []) +dnl AC_DEFUN([AC_LIBTOOL_F77], []) +dnl AC_DEFUN([AC_LIBTOOL_FC], []) +dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) + + +# _LT_TAG_COMPILER +# ---------------- +m4_defun([_LT_TAG_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl + +_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl +_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl +_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl +_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_TAG_COMPILER + + +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +m4_defun([_LT_COMPILER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* +])# _LT_COMPILER_BOILERPLATE + + +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +m4_defun([_LT_LINKER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* +])# _LT_LINKER_BOILERPLATE + +# _LT_REQUIRED_DARWIN_CHECKS +# ------------------------- +m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ + case $host_os in + rhapsody* | darwin*) + AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) + AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) + AC_CHECK_TOOL([LIPO], [lipo], [:]) + AC_CHECK_TOOL([OTOOL], [otool], [:]) + AC_CHECK_TOOL([OTOOL64], [otool64], [:]) + _LT_DECL([], [DSYMUTIL], [1], + [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) + _LT_DECL([], [NMEDIT], [1], + [Tool to change global to local symbols on Mac OS X]) + _LT_DECL([], [LIPO], [1], + [Tool to manipulate fat objects and archives on Mac OS X]) + _LT_DECL([], [OTOOL], [1], + [ldd/readelf like tool for Mach-O binaries on Mac OS X]) + _LT_DECL([], [OTOOL64], [1], + [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) + + AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], + [lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi]) + AC_CACHE_CHECK([for -exported_symbols_list linker flag], + [lt_cv_ld_exported_symbols_list], + [lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [lt_cv_ld_exported_symbols_list=yes], + [lt_cv_ld_exported_symbols_list=no]) + LDFLAGS="$save_LDFLAGS" + ]) + case $host_os in + rhapsody* | darwin1.[[012]]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[[012]]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac +]) + + +# _LT_DARWIN_LINKER_FEATURES +# -------------------------- +# Checks for linker and compiler features on darwin +m4_defun([_LT_DARWIN_LINKER_FEATURES], +[ + m4_require([_LT_REQUIRED_DARWIN_CHECKS]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_automatic, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_TAGVAR(whole_archive_flag_spec, $1)='' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=echo + _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + m4_if([$1], [CXX], +[ if test "$lt_cv_apple_cc_single_mod" != "yes"; then + _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi +],[]) + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi +]) + +# _LT_SYS_MODULE_PATH_AIX +# ----------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +m4_defun([_LT_SYS_MODULE_PATH_AIX], +[m4_require([_LT_DECL_SED])dnl +AC_LINK_IFELSE(AC_LANG_PROGRAM,[ +lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\(.*\)$/\1/ + p + } + }' +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then + aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +fi],[]) +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi +])# _LT_SYS_MODULE_PATH_AIX + + +# _LT_SHELL_INIT(ARG) +# ------------------- +m4_define([_LT_SHELL_INIT], +[ifdef([AC_DIVERSION_NOTICE], + [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], + [AC_DIVERT_PUSH(NOTICE)]) +$1 +AC_DIVERT_POP +])# _LT_SHELL_INIT + + +# _LT_PROG_ECHO_BACKSLASH +# ----------------------- +# Add some code to the start of the generated configure script which +# will find an echo command which doesn't interpret backslashes. +m4_defun([_LT_PROG_ECHO_BACKSLASH], +[_LT_SHELL_INIT([ +# Check that we are running under the correct shell. +SHELL=${CONFIG_SHELL-/bin/sh} + +case X$lt_ECHO in +X*--fallback-echo) + # Remove one level of quotation (which was required for Make). + ECHO=`echo "$lt_ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` + ;; +esac + +ECHO=${lt_ECHO-echo} +if test "X[$]1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X[$]1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then + # Yippee, $ECHO works! + : +else + # Restart under the correct shell. + exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} +fi + +if test "X[$]1" = X--fallback-echo; then + # used as fallback echo + shift + cat <<_LT_EOF +[$]* +_LT_EOF + exit 0 +fi + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +if test -z "$lt_ECHO"; then + if test "X${echo_test_string+set}" != Xset; then + # find a string as large as possible, as long as the shell can cope with it + for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do + # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... + if { echo_test_string=`eval $cmd`; } 2>/dev/null && + { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null + then + break + fi + done + fi + + if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && + echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + : + else + # The Solaris, AIX, and Digital Unix default echo programs unquote + # backslashes. This makes it impossible to quote backslashes using + # echo "$something" | sed 's/\\/\\\\/g' + # + # So, first we look for a working echo in the user's PATH. + + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for dir in $PATH /usr/ucb; do + IFS="$lt_save_ifs" + if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && + test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + ECHO="$dir/echo" + break + fi + done + IFS="$lt_save_ifs" + + if test "X$ECHO" = Xecho; then + # We didn't find a better echo, so look for alternatives. + if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' && + echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # This shell has a builtin print -r that does the trick. + ECHO='print -r' + elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } && + test "X$CONFIG_SHELL" != X/bin/ksh; then + # If we have ksh, try running configure again with it. + ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} + export ORIGINAL_CONFIG_SHELL + CONFIG_SHELL=/bin/ksh + export CONFIG_SHELL + exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} + else + # Try using printf. + ECHO='printf %s\n' + if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && + echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # Cool, printf works + : + elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL + export CONFIG_SHELL + SHELL="$CONFIG_SHELL" + export SHELL + ECHO="$CONFIG_SHELL [$]0 --fallback-echo" + elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + ECHO="$CONFIG_SHELL [$]0 --fallback-echo" + else + # maybe with a smaller string... + prev=: + + for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do + if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null + then + break + fi + prev="$cmd" + done + + if test "$prev" != 'sed 50q "[$]0"'; then + echo_test_string=`eval $prev` + export echo_test_string + exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} + else + # Oops. We lost completely, so just stick with echo. + ECHO=echo + fi + fi + fi + fi + fi +fi + +# Copy echo and quote the copy suitably for passing to libtool from +# the Makefile, instead of quoting the original, which is used later. +lt_ECHO=$ECHO +if test "X$lt_ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then + lt_ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" +fi + +AC_SUBST(lt_ECHO) +]) +_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) +_LT_DECL([], [ECHO], [1], + [An echo program that does not interpret backslashes]) +])# _LT_PROG_ECHO_BACKSLASH + + +# _LT_ENABLE_LOCK +# --------------- +m4_defun([_LT_ENABLE_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AS_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '[#]line __oline__ "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +sparc*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" +])# _LT_ENABLE_LOCK + + +# _LT_CMD_OLD_ARCHIVE +# ------------------- +m4_defun([_LT_CMD_OLD_ARCHIVE], +[AC_CHECK_TOOL(AR, ar, false) +test -z "$AR" && AR=ar +test -z "$AR_FLAGS" && AR_FLAGS=cru +_LT_DECL([], [AR], [1], [The archiver]) +_LT_DECL([], [AR_FLAGS], [1]) + +AC_CHECK_TOOL(STRIP, strip, :) +test -z "$STRIP" && STRIP=: +_LT_DECL([], [STRIP], [1], [A symbol stripping program]) + +AC_CHECK_TOOL(RANLIB, ranlib, :) +test -z "$RANLIB" && RANLIB=: +_LT_DECL([], [RANLIB], [1], + [Commands used to install an old-style archive]) + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi +_LT_DECL([], [old_postinstall_cmds], [2]) +_LT_DECL([], [old_postuninstall_cmds], [2]) +_LT_TAGDECL([], [old_archive_cmds], [2], + [Commands used to build an old-style archive]) +])# _LT_CMD_OLD_ARCHIVE + + +# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([_LT_COMPILER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $RM conftest* +]) + +if test x"[$]$2" = xyes; then + m4_if([$5], , :, [$5]) +else + m4_if([$6], , :, [$6]) +fi +])# _LT_COMPILER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) + + +# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------- +# Check whether the given linker option works +AC_DEFUN([_LT_LINKER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $3" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" +]) + +if test x"[$]$2" = xyes; then + m4_if([$4], , :, [$4]) +else + m4_if([$5], , :, [$5]) +fi +])# _LT_LINKER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) + + +# LT_CMD_MAX_LEN +#--------------- +AC_DEFUN([LT_CMD_MAX_LEN], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`$SHELL [$]0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \ + = "XX$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac +]) +if test -n $lt_cv_sys_max_cmd_len ; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +max_cmd_len=$lt_cv_sys_max_cmd_len +_LT_DECL([], [max_cmd_len], [0], + [What is the maximum length of a command?]) +])# LT_CMD_MAX_LEN + +# Old name: +AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) + + +# _LT_HEADER_DLFCN +# ---------------- +m4_defun([_LT_HEADER_DLFCN], +[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl +])# _LT_HEADER_DLFCN + + +# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# ---------------------------------------------------------------- +m4_defun([_LT_TRY_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "$cross_compiling" = yes; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +[#line __oline__ "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include <dlfcn.h> +#endif + +#include <stdio.h> + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +}] +_LT_EOF + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_dlunknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_TRY_DLOPEN_SELF + + +# LT_SYS_DLOPEN_SELF +# ------------------ +AC_DEFUN([LT_SYS_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen="shl_load"], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen="dlopen"], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +_LT_DECL([dlopen_support], [enable_dlopen], [0], + [Whether dlopen is supported]) +_LT_DECL([dlopen_self], [enable_dlopen_self], [0], + [Whether dlopen of programs is supported]) +_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], + [Whether dlopen of statically linked programs is supported]) +])# LT_SYS_DLOPEN_SELF + +# Old name: +AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) + + +# _LT_COMPILER_C_O([TAGNAME]) +# --------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler. +# This macro does not hard code the compiler like AC_PROG_CC_C_O. +m4_defun([_LT_COMPILER_C_O], +[m4_require([_LT_DECL_SED])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . 2>&AS_MESSAGE_LOG_FD + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* +]) +_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], + [Does compiler simultaneously support -c and -o options?]) +])# _LT_COMPILER_C_O + + +# _LT_COMPILER_FILE_LOCKS([TAGNAME]) +# ---------------------------------- +# Check to see if we can do hard links to lock some files if needed +m4_defun([_LT_COMPILER_FILE_LOCKS], +[m4_require([_LT_ENABLE_LOCK])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_COMPILER_C_O([$1]) + +hard_links="nottested" +if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test "$hard_links" = no; then + AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) +])# _LT_COMPILER_FILE_LOCKS + + +# _LT_CHECK_OBJDIR +# ---------------- +m4_defun([_LT_CHECK_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +_LT_DECL([], [objdir], [0], + [The name of the directory that contains temporary libtool files])dnl +m4_pattern_allow([LT_OBJDIR])dnl +AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", + [Define to the sub-directory in which libtool stores uninstalled libraries.]) +])# _LT_CHECK_OBJDIR + + +# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) +# -------------------------------------- +# Check hardcoding attributes. +m4_defun([_LT_LINKER_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || + test -n "$_LT_TAGVAR(runpath_var, $1)" || + test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && + test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then + # Linking always hardcodes the temporary library directory. + _LT_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) + +if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || + test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi +_LT_TAGDECL([], [hardcode_action], [0], + [How to hardcode a shared library path into an executable]) +])# _LT_LINKER_HARDCODE_LIBPATH + + +# _LT_CMD_STRIPLIB +# ---------------- +m4_defun([_LT_CMD_STRIPLIB], +[m4_require([_LT_DECL_EGREP]) +striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) +_LT_DECL([], [striplib], [1]) +])# _LT_CMD_STRIPLIB + + +# _LT_SYS_DYNAMIC_LINKER([TAG]) +# ----------------------------- +# PORTME Fill in your ld.so characteristics +m4_defun([_LT_SYS_DYNAMIC_LINKER], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_OBJDUMP])dnl +m4_require([_LT_DECL_SED])dnl +AC_MSG_CHECKING([dynamic linker characteristics]) +m4_if([$1], + [], [ +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'` + else + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[[lt_foo]]++; } + if (lt_freq[[lt_foo]] == 1) { print lt_foo; } +}'` + sys_lib_search_path_spec=`$ECHO $lt_search_path_spec` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[[4-9]]*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib<name>.so + # instead of lib<name>.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[[123]]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ + freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix[[3-9]]*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux* | k*bsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # Some binutils ld are patched to set DT_RUNPATH + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ + LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], + [shlibpath_overrides_runpath=yes])]) + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[[89]] | openbsd2.[[89]].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + +_LT_DECL([], [variables_saved_for_relink], [1], + [Variables whose values should be saved in libtool wrapper scripts and + restored at link time]) +_LT_DECL([], [need_lib_prefix], [0], + [Do we need the "lib" prefix for modules?]) +_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) +_LT_DECL([], [version_type], [0], [Library versioning type]) +_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) +_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) +_LT_DECL([], [shlibpath_overrides_runpath], [0], + [Is shlibpath searched before the hard-coded library search path?]) +_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) +_LT_DECL([], [library_names_spec], [1], + [[List of archive names. First name is the real one, the rest are links. + The last name is the one that the linker finds with -lNAME]]) +_LT_DECL([], [soname_spec], [1], + [[The coded name of the library, if different from the real name]]) +_LT_DECL([], [postinstall_cmds], [2], + [Command to use after installation of a shared archive]) +_LT_DECL([], [postuninstall_cmds], [2], + [Command to use after uninstallation of a shared archive]) +_LT_DECL([], [finish_cmds], [2], + [Commands used to finish a libtool library installation in a directory]) +_LT_DECL([], [finish_eval], [1], + [[As "finish_cmds", except a single script fragment to be evaled but + not shown]]) +_LT_DECL([], [hardcode_into_libs], [0], + [Whether we should hardcode library paths into libraries]) +_LT_DECL([], [sys_lib_search_path_spec], [2], + [Compile-time system search path for libraries]) +_LT_DECL([], [sys_lib_dlsearch_path_spec], [2], + [Run-time system search path for libraries]) +])# _LT_SYS_DYNAMIC_LINKER + + +# _LT_PATH_TOOL_PREFIX(TOOL) +# -------------------------- +# find a file program which can recognize shared library +AC_DEFUN([_LT_PATH_TOOL_PREFIX], +[m4_require([_LT_DECL_EGREP])dnl +AC_MSG_CHECKING([for $1]) +AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +[case $MAGIC_CMD in +[[\\/*] | ?:[\\/]*]) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR +dnl $ac_dummy forces splitting on constant user-supplied paths. +dnl POSIX.2 word splitting is done only on the output of word expansions, +dnl not every word. This closes a longstanding sh security hole. + ac_dummy="m4_if([$2], , $PATH, [$2])" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/$1; then + lt_cv_path_MAGIC_CMD="$ac_dir/$1" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac]) +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + AC_MSG_RESULT($MAGIC_CMD) +else + AC_MSG_RESULT(no) +fi +_LT_DECL([], [MAGIC_CMD], [0], + [Used to examine libraries when file_magic_cmd begins with "file"])dnl +])# _LT_PATH_TOOL_PREFIX + +# Old name: +AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) + + +# _LT_PATH_MAGIC +# -------------- +# find a file program which can recognize a shared library +m4_defun([_LT_PATH_MAGIC], +[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + else + MAGIC_CMD=: + fi +fi +])# _LT_PATH_MAGIC + + +# LT_PATH_LD +# ---------- +# find the pathname to the GNU or non-GNU linker +AC_DEFUN([LT_PATH_LD], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl + +AC_ARG_WITH([gnu-ld], + [AS_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test "$withval" = no || with_gnu_ld=yes], + [with_gnu_ld=no])dnl + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by $CC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(lt_cv_path_LD, +[if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in + *GNU* | *'with BFD'*) + test "$with_gnu_ld" != no && break + ;; + *) + test "$with_gnu_ld" != yes && break + ;; + esac + fi + done + IFS="$lt_save_ifs" +else + lt_cv_path_LD="$LD" # Let the user override the test with a path. +fi]) +LD="$lt_cv_path_LD" +if test -n "$LD"; then + AC_MSG_RESULT($LD) +else + AC_MSG_RESULT(no) +fi +test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH]) +_LT_PATH_LD_GNU +AC_SUBST([LD]) + +_LT_TAGDECL([], [LD], [1], [The linker used to build libraries]) +])# LT_PATH_LD + +# Old names: +AU_ALIAS([AM_PROG_LD], [LT_PATH_LD]) +AU_ALIAS([AC_PROG_LD], [LT_PATH_LD]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_PROG_LD], []) +dnl AC_DEFUN([AC_PROG_LD], []) + + +# _LT_PATH_LD_GNU +#- -------------- +m4_defun([_LT_PATH_LD_GNU], +[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld, +[# I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 </dev/null` in +*GNU* | *'with BFD'*) + lt_cv_prog_gnu_ld=yes + ;; +*) + lt_cv_prog_gnu_ld=no + ;; +esac]) +with_gnu_ld=$lt_cv_prog_gnu_ld +])# _LT_PATH_LD_GNU + + +# _LT_CMD_RELOAD +# -------------- +# find reload flag for linker +# -- PORTME Some linkers may need a different reload flag. +m4_defun([_LT_CMD_RELOAD], +[AC_CACHE_CHECK([for $LD option to reload object files], + lt_cv_ld_reload_flag, + [lt_cv_ld_reload_flag='-r']) +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac +_LT_DECL([], [reload_flag], [1], [How to create reloadable object files])dnl +_LT_DECL([], [reload_cmds], [2])dnl +])# _LT_CMD_RELOAD + + +# _LT_CHECK_MAGIC_METHOD +# ---------------------- +# how to check for library dependencies +# -- PORTME fill in with the dynamic library characteristics +m4_defun([_LT_CHECK_MAGIC_METHOD], +[m4_require([_LT_DECL_EGREP]) +m4_require([_LT_DECL_OBJDUMP]) +AC_CACHE_CHECK([how to recognize dependent libraries], +lt_cv_deplibs_check_method, +[lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# `unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# which responds to the $file_magic_cmd with a given extended regex. +# If you have `file' or equivalent on your system and you're not sure +# whether `pass_all' will *always* work, you probably want this one. + +case $host_os in +aix[[4-9]]*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[[45]]*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + if ( file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'] + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[[3-9]]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be Linux ELF. +linux* | k*bsd*-gnu) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +]) +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + +_LT_DECL([], [deplibs_check_method], [1], + [Method to check whether dependent libraries are shared objects]) +_LT_DECL([], [file_magic_cmd], [1], + [Command to use when deplibs_check_method == "file_magic"]) +])# _LT_CHECK_MAGIC_METHOD + + +# LT_PATH_NM +# ---------- +# find the pathname to a BSD- or MS-compatible name lister +AC_DEFUN([LT_PATH_NM], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, +[if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} +fi]) +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + AC_CHECK_TOOLS(DUMPBIN, ["dumpbin -symbols" "link -dump -symbols"], :) + AC_SUBST([DUMPBIN]) + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm +AC_SUBST([NM]) +_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl + +AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], + [lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:__oline__: $ac_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:__oline__: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:__oline__: output\"" >&AS_MESSAGE_LOG_FD) + cat conftest.out >&AS_MESSAGE_LOG_FD + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest*]) +])# LT_PATH_NM + +# Old names: +AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) +AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_PROG_NM], []) +dnl AC_DEFUN([AC_PROG_NM], []) + + +# LT_LIB_M +# -------- +# check for math library +AC_DEFUN([LT_LIB_M], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +LIBM= +case $host in +*-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*) + # These system don't have libm, or don't need it + ;; +*-ncr-sysv4.3*) + AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") + AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") + ;; +*) + AC_CHECK_LIB(m, cos, LIBM="-lm") + ;; +esac +AC_SUBST([LIBM]) +])# LT_LIB_M + +# Old name: +AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_CHECK_LIBM], []) + + +# _LT_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------- +m4_defun([_LT_COMPILER_NO_RTTI], +[m4_require([_LT_TAG_COMPILER])dnl + +_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + +if test "$GCC" = yes; then + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + + _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +fi +_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], + [Compiler flag to turn off builtin functions]) +])# _LT_COMPILER_NO_RTTI + + +# _LT_CMD_GLOBAL_SYMBOLS +# ---------------------- +m4_defun([_LT_CMD_GLOBAL_SYMBOLS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([LT_PATH_NM])dnl +AC_REQUIRE([LT_PATH_LD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_TAG_COMPILER])dnl + +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[[BCDT]]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[[ABCDEGRST]]' + fi + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris*) + symcode='[[BDRT]]' + ;; +sco3.2v5*) + symcode='[[DT]]' + ;; +sysv4.2uw2*) + symcode='[[DT]]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[[ABDT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK ['"\ +" {last_section=section; section=\$ 3};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx]" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if AC_TRY_EVAL(ac_compile); then + # Now try to grab the symbols. + nlist=conftest.nm + if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +const struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[[]] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_save_LIBS="$LIBS" + lt_save_CFLAGS="$CFLAGS" + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS="$lt_save_LIBS" + CFLAGS="$lt_save_CFLAGS" + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) +else + AC_MSG_RESULT(ok) +fi + +_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], + [Take the output of nm and produce a listing of raw symbols and C names]) +_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], + [Transform the output of nm in a proper C declaration]) +_LT_DECL([global_symbol_to_c_name_address], + [lt_cv_sys_global_symbol_to_c_name_address], [1], + [Transform the output of nm in a C name address pair]) +_LT_DECL([global_symbol_to_c_name_address_lib_prefix], + [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], + [Transform the output of nm in a C name address pair when lib prefix is needed]) +]) # _LT_CMD_GLOBAL_SYMBOLS + + +# _LT_COMPILER_PIC([TAGNAME]) +# --------------------------- +m4_defun([_LT_COMPILER_PIC], +[m4_require([_LT_TAG_COMPILER])dnl +_LT_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_TAGVAR(lt_prog_compiler_static, $1)= + +AC_MSG_CHECKING([for $compiler option to produce PIC]) +m4_if([$1], [CXX], [ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + case $host_os in + aix[[4-9]]*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + dgux*) + case $cc_basename in + ec++*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64 which still supported -KPIC. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xlc* | xlC*) + # IBM XL 8.0 on PPC + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd* | netbsdelf*-gnu) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +], +[ + if test "$GCC" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + hpux9* | hpux10* | hpux11*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + linux* | k*bsd*-gnu) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' + _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + ccc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xl*) + # IBM XL C 8.0/Fortran 10.1 on PPC + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + *Sun\ F*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='' + ;; + esac + ;; + esac + ;; + + newsos6) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + rdos*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + solaris*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; + + sunos4*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + unicos*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + + uts4*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +]) +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" + ;; +esac +AC_MSG_RESULT([$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) +_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], + [How to pass a linker flag through the compiler]) + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], + [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], + [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], + [Additional compiler flags for building library objects]) + +# +# Check to make sure the static flag actually works. +# +wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" +_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], + _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), + $lt_tmp_static_flag, + [], + [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) +_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], + [Compiler flag to prevent dynamic linking]) +])# _LT_COMPILER_PIC + + +# _LT_LINKER_SHLIBS([TAGNAME]) +# ---------------------------- +# See if the linker supports building shared libraries. +m4_defun([_LT_LINKER_SHLIBS], +[AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +m4_if([$1], [CXX], [ + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + case $host_os in + aix[[4-9]]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" + ;; + cygwin* | mingw* | cegcc*) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + ;; + linux* | k*bsd*-gnu) + _LT_TAGVAR(link_all_deplibs, $1)=no + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] +], [ + runpath_var= + _LT_TAGVAR(allow_undefined_flag, $1)= + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(archive_cmds, $1)= + _LT_TAGVAR(archive_expsym_cmds, $1)= + _LT_TAGVAR(compiler_needs_object, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(hardcode_automatic, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= + _LT_TAGVAR(hardcode_libdir_separator, $1)= + _LT_TAGVAR(hardcode_minus_L, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_TAGVAR(inherit_rpath, $1)=no + _LT_TAGVAR(link_all_deplibs, $1)=unknown + _LT_TAGVAR(module_cmds, $1)= + _LT_TAGVAR(module_expsym_cmds, $1)= + _LT_TAGVAR(old_archive_from_new_cmds, $1)= + _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_TAGVAR(thread_safe_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. +dnl Note also adjust exclude_expsyms for C++ above. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + _LT_TAGVAR(ld_shlibs, $1)=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[[3-9]]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach <jrb3@best.com> says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag= + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + _LT_TAGVAR(whole_archive_flag_spec, $1)= + tmp_sharedflag='--shared' ;; + xl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + xlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' + _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + sunos4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + + if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then + runpath_var= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GCC" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + _LT_TAGVAR(link_all_deplibs, $1)=no + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + bsdi[[45]]*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' + _LT_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + freebsd1*) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + hpux9*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + AC_LINK_IFELSE(int foo(void) {}, + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + ) + LDFLAGS="$save_LDFLAGS" + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + newsos6) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *nto* | *qnx*) + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + else + case $host_os in + openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + ;; + esac + fi + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + os2*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + solaris*) + _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + fi + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4) + case $host_vendor in + sni) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4.3*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_TAGVAR(ld_shlibs, $1)=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' + ;; + esac + fi + fi +]) +AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) +test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld + +_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl +_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl +_LT_DECL([], [extract_expsyms_cmds], [2], + [The commands to extract the exported symbol list from a shared archive]) + +# +# Do we need to explicitly link libc? +# +case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $_LT_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_MSG_CHECKING([whether -lc should be explicitly linked in]) + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) + pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) + _LT_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) + then + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + else + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + AC_MSG_RESULT([$_LT_TAGVAR(archive_cmds_need_lc, $1)]) + ;; + esac + fi + ;; +esac + +_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], + [Whether or not to add -lc for building shared libraries]) +_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], + [enable_shared_with_static_runtimes], [0], + [Whether or not to disallow shared libs when runtime libs are static]) +_LT_TAGDECL([], [export_dynamic_flag_spec], [1], + [Compiler flag to allow reflexive dlopens]) +_LT_TAGDECL([], [whole_archive_flag_spec], [1], + [Compiler flag to generate shared objects directly from archives]) +_LT_TAGDECL([], [compiler_needs_object], [1], + [Whether the compiler copes with passing no objects directly]) +_LT_TAGDECL([], [old_archive_from_new_cmds], [2], + [Create an old-style archive from a shared archive]) +_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], + [Create a temporary old-style archive to link instead of a shared archive]) +_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) +_LT_TAGDECL([], [archive_expsym_cmds], [2]) +_LT_TAGDECL([], [module_cmds], [2], + [Commands used to build a loadable module if different from building + a shared archive.]) +_LT_TAGDECL([], [module_expsym_cmds], [2]) +_LT_TAGDECL([], [with_gnu_ld], [1], + [Whether we are building with GNU ld or not]) +_LT_TAGDECL([], [allow_undefined_flag], [1], + [Flag that allows shared libraries with undefined symbols to be built]) +_LT_TAGDECL([], [no_undefined_flag], [1], + [Flag that enforces no undefined symbols]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], + [Flag to hardcode $libdir into a binary during linking. + This must work even if $libdir does not exist]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec_ld], [1], + [[If ld is used when linking, flag to hardcode $libdir into a binary + during linking. This must work even if $libdir does not exist]]) +_LT_TAGDECL([], [hardcode_libdir_separator], [1], + [Whether we need a single "-rpath" flag with a separated argument]) +_LT_TAGDECL([], [hardcode_direct], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary]) +_LT_TAGDECL([], [hardcode_direct_absolute], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary and the resulting library dependency is + "absolute", i.e impossible to change by setting ${shlibpath_var} if the + library is relocated]) +_LT_TAGDECL([], [hardcode_minus_L], [0], + [Set to "yes" if using the -LDIR flag during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_shlibpath_var], [0], + [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_automatic], [0], + [Set to "yes" if building a shared library automatically hardcodes DIR + into the library and all subsequent libraries and executables linked + against it]) +_LT_TAGDECL([], [inherit_rpath], [0], + [Set to yes if linker adds runtime paths of dependent libraries + to runtime path list]) +_LT_TAGDECL([], [link_all_deplibs], [0], + [Whether libtool must link a program against all its dependency libraries]) +_LT_TAGDECL([], [fix_srcfile_path], [1], + [Fix the shell variable $srcfile for the compiler]) +_LT_TAGDECL([], [always_export_symbols], [0], + [Set to "yes" if exported symbols are required]) +_LT_TAGDECL([], [export_symbols_cmds], [2], + [The commands to list exported symbols]) +_LT_TAGDECL([], [exclude_expsyms], [1], + [Symbols that should not be listed in the preloaded symbols]) +_LT_TAGDECL([], [include_expsyms], [1], + [Symbols that must always be exported]) +_LT_TAGDECL([], [prelink_cmds], [2], + [Commands necessary for linking programs (against libraries) with templates]) +_LT_TAGDECL([], [file_list_spec], [1], + [Specify filename containing input files]) +dnl FIXME: Not yet implemented +dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], +dnl [Compiler flag to generate thread safe objects]) +])# _LT_LINKER_SHLIBS + + +# _LT_LANG_C_CONFIG([TAG]) +# ------------------------ +# Ensure that the configuration variables for a C compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_C_CONFIG], +[m4_require([_LT_DECL_EGREP])dnl +lt_save_CC="$CC" +AC_LANG_PUSH(C) + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + +_LT_TAG_COMPILER +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + LT_SYS_DLOPEN_SELF + _LT_CMD_STRIPLIB + + # Report which library types will actually be built + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_CONFIG($1) +fi +AC_LANG_POP +CC="$lt_save_CC" +])# _LT_LANG_C_CONFIG + + +# _LT_PROG_CXX +# ------------ +# Since AC_PROG_CXX is broken, in that it returns g++ if there is no c++ +# compiler, we have our own version here. +m4_defun([_LT_PROG_CXX], +[ +pushdef([AC_MSG_ERROR], [_lt_caught_CXX_error=yes]) +AC_PROG_CXX +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_PROG_CXXCPP +else + _lt_caught_CXX_error=yes +fi +popdef([AC_MSG_ERROR]) +])# _LT_PROG_CXX + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([_LT_PROG_CXX], []) + + +# _LT_LANG_CXX_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a C++ compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_CXX_CONFIG], +[AC_REQUIRE([_LT_PROG_CXX])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl + +AC_LANG_PUSH(C++) +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(compiler_needs_object, $1)=no +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + else + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + fi + + if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + LT_PATH_LD + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) + _LT_TAGVAR(ld_shlibs, $1)=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GXX" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. + _LT_SYS_MODULE_PATH_AIX + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared + # libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach <jrb3@best.com> says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + freebsd[[12]]*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + freebsd-elf*) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + gnu*) + ;; + + hpux9*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' + ;; + *) + if test "$GXX" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` -o $lib' + fi + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + esac + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + ;; + + linux* | k*bsd*-gnu) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [[1-5]]* | *pgcpp\ [[1-5]]*) + _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' + _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ + $RANLIB $oldlib' + _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 will use weak symbols + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' + ;; + xl*) + # IBM XL 8.0 on PPC, with GNU ld + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='echo' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + openbsd2*) + # C++ shared libraries are fairly broken + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=echo + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + case $host in + osf3*) + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && $ECHO "X${wl}-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + ;; + *) + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~ + $RM $lib.exp' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + + output_verbose_link_cmd='echo' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' + fi + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) + test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + + _LT_TAGVAR(GCC, $1)="$GXX" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + CC=$lt_save_CC + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test "$_lt_caught_CXX_error" != yes + +AC_LANG_POP +])# _LT_LANG_CXX_CONFIG + + +# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) +# --------------------------------- +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +m4_defun([_LT_SYS_HIDDEN_LIBDEPS], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +# Dependencies to place before and after the object being linked: +_LT_TAGVAR(predep_objects, $1)= +_LT_TAGVAR(postdep_objects, $1)= +_LT_TAGVAR(predeps, $1)= +_LT_TAGVAR(postdeps, $1)= +_LT_TAGVAR(compiler_lib_search_path, $1)= + +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF +int a; +void foo (void) { a = 0; } +_LT_EOF +], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF +], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer*4 a + a=0 + return + end +_LT_EOF +], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer a + a=0 + return + end +_LT_EOF +], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF +public class foo { + private int a; + public void bar (void) { + a = 0; + } +}; +_LT_EOF +]) +dnl Parse the compiler output and extract the necessary +dnl objects, libraries and library flags. +if AC_TRY_EVAL(ac_compile); then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case $p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" || + test $p = "-R"; then + prev=$p + continue + else + prev= + fi + + if test "$pre_test_object_deps_done" = no; then + case $p in + -L* | -R*) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then + _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" + else + _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$_LT_TAGVAR(postdeps, $1)"; then + _LT_TAGVAR(postdeps, $1)="${prev}${p}" + else + _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" + fi + fi + ;; + + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$_LT_TAGVAR(predep_objects, $1)"; then + _LT_TAGVAR(predep_objects, $1)="$p" + else + _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" + fi + else + if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then + _LT_TAGVAR(postdep_objects, $1)="$p" + else + _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling $1 test program" +fi + +$RM -f confest.$objext + +# PORTME: override above test on systems where it is broken +m4_if([$1], [CXX], +[case $host_os in +interix[[3-9]]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + _LT_TAGVAR(predep_objects,$1)= + _LT_TAGVAR(postdep_objects,$1)= + _LT_TAGVAR(postdeps,$1)= + ;; + +linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; + +solaris*) + case $cc_basename in + CC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; +esac +]) + +case " $_LT_TAGVAR(postdeps, $1) " in +*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; +esac + _LT_TAGVAR(compiler_lib_search_dirs, $1)= +if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then + _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi +_LT_TAGDECL([], [compiler_lib_search_dirs], [1], + [The directories searched by this compiler when creating a shared library]) +_LT_TAGDECL([], [predep_objects], [1], + [Dependencies to place before and after the objects being linked to + create a shared library]) +_LT_TAGDECL([], [postdep_objects], [1]) +_LT_TAGDECL([], [predeps], [1]) +_LT_TAGDECL([], [postdeps], [1]) +_LT_TAGDECL([], [compiler_lib_search_path], [1], + [The library search path used internally by the compiler when linking + a shared library]) +])# _LT_SYS_HIDDEN_LIBDEPS + + +# _LT_PROG_F77 +# ------------ +# Since AC_PROG_F77 is broken, in that it returns the empty string +# if there is no fortran compiler, we have our own version here. +m4_defun([_LT_PROG_F77], +[ +pushdef([AC_MSG_ERROR], [_lt_disable_F77=yes]) +AC_PROG_F77 +if test -z "$F77" || test "X$F77" = "Xno"; then + _lt_disable_F77=yes +fi +popdef([AC_MSG_ERROR]) +])# _LT_PROG_F77 + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([_LT_PROG_F77], []) + + +# _LT_LANG_F77_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a Fortran 77 compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_F77_CONFIG], +[AC_REQUIRE([_LT_PROG_F77])dnl +AC_LANG_PUSH(Fortran 77) + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the F77 compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_F77" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + CC=${F77-"f77"} + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + GCC=$G77 + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$G77" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC="$lt_save_CC" +fi # test "$_lt_disable_F77" != yes + +AC_LANG_POP +])# _LT_LANG_F77_CONFIG + + +# _LT_PROG_FC +# ----------- +# Since AC_PROG_FC is broken, in that it returns the empty string +# if there is no fortran compiler, we have our own version here. +m4_defun([_LT_PROG_FC], +[ +pushdef([AC_MSG_ERROR], [_lt_disable_FC=yes]) +AC_PROG_FC +if test -z "$FC" || test "X$FC" = "Xno"; then + _lt_disable_FC=yes +fi +popdef([AC_MSG_ERROR]) +])# _LT_PROG_FC + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([_LT_PROG_FC], []) + + +# _LT_LANG_FC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for a Fortran compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_FC_CONFIG], +[AC_REQUIRE([_LT_PROG_FC])dnl +AC_LANG_PUSH(Fortran) + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for fc test sources. +ac_ext=${ac_fc_srcext-f} + +# Object file extension for compiled fc test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the FC compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_FC" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + CC=${FC-"f95"} + compiler=$CC + GCC=$ac_cv_fc_compiler_gnu + + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC="$lt_save_CC" +fi # test "$_lt_disable_FC" != yes + +AC_LANG_POP +])# _LT_LANG_FC_CONFIG + + +# _LT_LANG_GCJ_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Java Compiler compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_GCJ_CONFIG], +[AC_REQUIRE([LT_PROG_GCJ])dnl +AC_LANG_SAVE + +# Source file extension for Java test sources. +ac_ext=java + +# Object file extension for compiled Java test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}" + +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +lt_save_GCC=$GCC +GCC=yes +CC=${GCJ-"gcj"} +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)="$LD" +_LT_CC_BASENAME([$compiler]) + +# GCJ did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC="$lt_save_CC" +])# _LT_LANG_GCJ_CONFIG + + +# _LT_LANG_RC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for the Windows resource compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_RC_CONFIG], +[AC_REQUIRE([LT_PROG_RC])dnl +AC_LANG_SAVE + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' + +# Code to be used in simple link tests +lt_simple_link_test_code="$lt_simple_compile_test_code" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +lt_save_GCC=$GCC +GCC= +CC=${RC-"windres"} +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) +_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + +if test -n "$compiler"; then + : + _LT_CONFIG($1) +fi + +GCC=$lt_save_GCC +AC_LANG_RESTORE +CC="$lt_save_CC" +])# _LT_LANG_RC_CONFIG + + +# LT_PROG_GCJ +# ----------- +AC_DEFUN([LT_PROG_GCJ], +[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], + [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], + [AC_CHECK_TOOL(GCJ, gcj,) + test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS)])])[]dnl +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_GCJ], []) + + +# LT_PROG_RC +# ---------- +AC_DEFUN([LT_PROG_RC], +[AC_CHECK_TOOL(RC, windres,) +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_RC], []) + + +# _LT_DECL_EGREP +# -------------- +# If we don't have a new enough Autoconf to choose the best grep +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_EGREP], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_REQUIRE([AC_PROG_FGREP])dnl +test -z "$GREP" && GREP=grep +_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) +_LT_DECL([], [EGREP], [1], [An ERE matcher]) +_LT_DECL([], [FGREP], [1], [A literal string matcher]) +dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too +AC_SUBST([GREP]) +]) + + +# _LT_DECL_OBJDUMP +# -------------- +# If we don't have a new enough Autoconf to choose the best objdump +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_OBJDUMP], +[AC_CHECK_TOOL(OBJDUMP, objdump, false) +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) +AC_SUBST([OBJDUMP]) +]) + + +# _LT_DECL_SED +# ------------ +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +m4_defun([_LT_DECL_SED], +[AC_PROG_SED +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" +_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) +_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], + [Sed that helps us avoid accidentally triggering echo(1) options like -n]) +])# _LT_DECL_SED + +m4_ifndef([AC_PROG_SED], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ + +m4_defun([AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +IFS=$as_save_IFS +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_SUBST([SED]) +AC_MSG_RESULT([$SED]) +])#AC_PROG_SED +])#m4_ifndef + +# Old name: +AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_SED], []) + + +# _LT_CHECK_SHELL_FEATURES +# ------------------------ +# Find out whether the shell is Bourne or XSI compatible, +# or has some other useful features. +m4_defun([_LT_CHECK_SHELL_FEATURES], +[AC_MSG_CHECKING([whether the shell understands some XSI constructs]) +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +AC_MSG_RESULT([$xsi_shell]) +_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) + +AC_MSG_CHECKING([whether the shell understands "+="]) +lt_shell_append=no +( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +AC_MSG_RESULT([$lt_shell_append]) +_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi +_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac +_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl +_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl +])# _LT_CHECK_SHELL_FEATURES + + +# _LT_PROG_XSI_SHELLFNS +# --------------------- +# Bourne and XSI compatible variants of some useful shell functions. +m4_defun([_LT_PROG_XSI_SHELLFNS], +[case $xsi_shell in + yes) + cat << \_LT_EOF >> "$cfgfile" + +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac +} + +# func_basename file +func_basename () +{ + func_basename_result="${1##*/}" +} + +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac + func_basename_result="${1##*/}" +} + +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +func_stripname () +{ + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary parameter first. + func_stripname_result=${3} + func_stripname_result=${func_stripname_result#"${1}"} + func_stripname_result=${func_stripname_result%"${2}"} +} + +# func_opt_split +func_opt_split () +{ + func_opt_split_opt=${1%%=*} + func_opt_split_arg=${1#*=} +} + +# func_lo2o object +func_lo2o () +{ + case ${1} in + *.lo) func_lo2o_result=${1%.lo}.${objext} ;; + *) func_lo2o_result=${1} ;; + esac +} + +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=${1%.*}.lo +} + +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=$(( $[*] )) +} + +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=${#1} +} + +_LT_EOF + ;; + *) # Bourne compatible functions. + cat << \_LT_EOF >> "$cfgfile" + +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + # Extract subdirectory from the argument. + func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi +} + +# func_basename file +func_basename () +{ + func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` +} + +dnl func_dirname_and_basename +dnl A portable version of this function is already defined in general.m4sh +dnl so there is no need for it here. + +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# func_strip_suffix prefix name +func_stripname () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "X${3}" \ + | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "X${3}" \ + | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;; + esac +} + +# sed scripts: +my_sed_long_opt='1s/^\(-[[^=]]*\)=.*/\1/;q' +my_sed_long_arg='1s/^-[[^=]]*=//' + +# func_opt_split +func_opt_split () +{ + func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"` + func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"` +} + +# func_lo2o object +func_lo2o () +{ + func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"` +} + +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[[^.]]*$/.lo/'` +} + +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=`expr "$[@]"` +} + +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=`expr "$[1]" : ".*" 2>/dev/null || echo $max_cmd_len` +} + +_LT_EOF +esac + +case $lt_shell_append in + yes) + cat << \_LT_EOF >> "$cfgfile" + +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "$[1]+=\$[2]" +} +_LT_EOF + ;; + *) + cat << \_LT_EOF >> "$cfgfile" + +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "$[1]=\$$[1]\$[2]" +} + +_LT_EOF + ;; + esac +]) diff --git a/auxdir/ltoptions.m4 b/auxdir/ltoptions.m4 new file mode 100644 index 0000000000000000000000000000000000000000..34151a3ba625f326e6645d6afc79586f10746a3e --- /dev/null +++ b/auxdir/ltoptions.m4 @@ -0,0 +1,368 @@ +# Helper functions for option handling. -*- Autoconf -*- +# +# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 6 ltoptions.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) + + +# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) +# ------------------------------------------ +m4_define([_LT_MANGLE_OPTION], +[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) + + +# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) +# --------------------------------------- +# Set option OPTION-NAME for macro MACRO-NAME, and if there is a +# matching handler defined, dispatch to it. Other OPTION-NAMEs are +# saved as a flag. +m4_define([_LT_SET_OPTION], +[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl +m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), + _LT_MANGLE_DEFUN([$1], [$2]), + [m4_warning([Unknown $1 option `$2'])])[]dnl +]) + + +# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) +# ------------------------------------------------------------ +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +m4_define([_LT_IF_OPTION], +[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) + + +# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) +# ------------------------------------------------------- +# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME +# are set. +m4_define([_LT_UNLESS_OPTIONS], +[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), + [m4_define([$0_found])])])[]dnl +m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 +])[]dnl +]) + + +# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) +# ---------------------------------------- +# OPTION-LIST is a space-separated list of Libtool options associated +# with MACRO-NAME. If any OPTION has a matching handler declared with +# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about +# the unknown option and exit. +m4_defun([_LT_SET_OPTIONS], +[# Set options +m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [_LT_SET_OPTION([$1], _LT_Option)]) + +m4_if([$1],[LT_INIT],[ + dnl + dnl Simply set some default values (i.e off) if boolean options were not + dnl specified: + _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no + ]) + _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no + ]) + dnl + dnl If no reference was made to various pairs of opposing options, then + dnl we run the default mode handler for the pair. For example, if neither + dnl `shared' nor `disable-shared' was passed, we enable building of shared + dnl archives by default: + _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) + _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], + [_LT_ENABLE_FAST_INSTALL]) + ]) +])# _LT_SET_OPTIONS + + +## --------------------------------- ## +## Macros to handle LT_INIT options. ## +## --------------------------------- ## + +# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) +# ----------------------------------------- +m4_define([_LT_MANGLE_DEFUN], +[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) + + +# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) +# ----------------------------------------------- +m4_define([LT_OPTION_DEFINE], +[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl +])# LT_OPTION_DEFINE + + +# dlopen +# ------ +LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes +]) + +AU_DEFUN([AC_LIBTOOL_DLOPEN], +[_LT_SET_OPTION([LT_INIT], [dlopen]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `dlopen' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) + + +# win32-dll +# --------- +# Declare package support for building win32 dll's. +LT_OPTION_DEFINE([LT_INIT], [win32-dll], +[enable_win32_dll=yes + +case $host in +*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-cegcc*) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; +esac + +test -z "$AS" && AS=as +_LT_DECL([], [AS], [0], [Assembler program])dnl + +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [0], [DLL creation program])dnl + +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [0], [Object dumper program])dnl +])# win32-dll + +AU_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +_LT_SET_OPTION([LT_INIT], [win32-dll]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `win32-dll' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) + + +# _LT_ENABLE_SHARED([DEFAULT]) +# ---------------------------- +# implement the --enable-shared flag, and supports the `shared' and +# `disable-shared' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_SHARED], +[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([shared], + [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) + + _LT_DECL([build_libtool_libs], [enable_shared], [0], + [Whether or not to build shared libraries]) +])# _LT_ENABLE_SHARED + +LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) +]) + +AC_DEFUN([AC_DISABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], [disable-shared]) +]) + +AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_SHARED], []) +dnl AC_DEFUN([AM_DISABLE_SHARED], []) + + + +# _LT_ENABLE_STATIC([DEFAULT]) +# ---------------------------- +# implement the --enable-static flag, and support the `static' and +# `disable-static' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_STATIC], +[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([static], + [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_static=]_LT_ENABLE_STATIC_DEFAULT) + + _LT_DECL([build_old_libs], [enable_static], [0], + [Whether or not to build static libraries]) +])# _LT_ENABLE_STATIC + +LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) +]) + +AC_DEFUN([AC_DISABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], [disable-static]) +]) + +AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_STATIC], []) +dnl AC_DEFUN([AM_DISABLE_STATIC], []) + + + +# _LT_ENABLE_FAST_INSTALL([DEFAULT]) +# ---------------------------------- +# implement the --enable-fast-install flag, and support the `fast-install' +# and `disable-fast-install' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_FAST_INSTALL], +[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([fast-install], + [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) + +_LT_DECL([fast_install], [enable_fast_install], [0], + [Whether or not to optimize for fast installation])dnl +])# _LT_ENABLE_FAST_INSTALL + +LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) + +# Old names: +AU_DEFUN([AC_ENABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `fast-install' option into LT_INIT's first parameter.]) +]) + +AU_DEFUN([AC_DISABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `disable-fast-install' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) +dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) + + +# _LT_WITH_PIC([MODE]) +# -------------------- +# implement the --with-pic flag, and support the `pic-only' and `no-pic' +# LT_INIT options. +# MODE is either `yes' or `no'. If omitted, it defaults to `both'. +m4_define([_LT_WITH_PIC], +[AC_ARG_WITH([pic], + [AS_HELP_STRING([--with-pic], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [pic_mode="$withval"], + [pic_mode=default]) + +test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) + +_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl +])# _LT_WITH_PIC + +LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) + +# Old name: +AU_DEFUN([AC_LIBTOOL_PICMODE], +[_LT_SET_OPTION([LT_INIT], [pic-only]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `pic-only' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) + +## ----------------- ## +## LTDL_INIT Options ## +## ----------------- ## + +m4_define([_LTDL_MODE], []) +LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], + [m4_define([_LTDL_MODE], [nonrecursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [recursive], + [m4_define([_LTDL_MODE], [recursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [subproject], + [m4_define([_LTDL_MODE], [subproject])]) + +m4_define([_LTDL_TYPE], []) +LT_OPTION_DEFINE([LTDL_INIT], [installable], + [m4_define([_LTDL_TYPE], [installable])]) +LT_OPTION_DEFINE([LTDL_INIT], [convenience], + [m4_define([_LTDL_TYPE], [convenience])]) diff --git a/auxdir/ltsugar.m4 b/auxdir/ltsugar.m4 new file mode 100644 index 0000000000000000000000000000000000000000..9000a057d31ddf75cb85ccda8757de4493bcdbe7 --- /dev/null +++ b/auxdir/ltsugar.m4 @@ -0,0 +1,123 @@ +# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 6 ltsugar.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) + + +# lt_join(SEP, ARG1, [ARG2...]) +# ----------------------------- +# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their +# associated separator. +# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier +# versions in m4sugar had bugs. +m4_define([lt_join], +[m4_if([$#], [1], [], + [$#], [2], [[$2]], + [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) +m4_define([_lt_join], +[m4_if([$#$2], [2], [], + [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) + + +# lt_car(LIST) +# lt_cdr(LIST) +# ------------ +# Manipulate m4 lists. +# These macros are necessary as long as will still need to support +# Autoconf-2.59 which quotes differently. +m4_define([lt_car], [[$1]]) +m4_define([lt_cdr], +[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], + [$#], 1, [], + [m4_dquote(m4_shift($@))])]) +m4_define([lt_unquote], $1) + + +# lt_append(MACRO-NAME, STRING, [SEPARATOR]) +# ------------------------------------------ +# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. +# Note that neither SEPARATOR nor STRING are expanded; they are appended +# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). +# No SEPARATOR is output if MACRO-NAME was previously undefined (different +# than defined and empty). +# +# This macro is needed until we can rely on Autoconf 2.62, since earlier +# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. +m4_define([lt_append], +[m4_define([$1], + m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) + + + +# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) +# ---------------------------------------------------------- +# Produce a SEP delimited list of all paired combinations of elements of +# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list +# has the form PREFIXmINFIXSUFFIXn. +# Needed until we can rely on m4_combine added in Autoconf 2.62. +m4_define([lt_combine], +[m4_if(m4_eval([$# > 3]), [1], + [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl +[[m4_foreach([_Lt_prefix], [$2], + [m4_foreach([_Lt_suffix], + ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, + [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) + + +# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) +# ----------------------------------------------------------------------- +# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited +# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. +m4_define([lt_if_append_uniq], +[m4_ifdef([$1], + [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], + [lt_append([$1], [$2], [$3])$4], + [$5])], + [lt_append([$1], [$2], [$3])$4])]) + + +# lt_dict_add(DICT, KEY, VALUE) +# ----------------------------- +m4_define([lt_dict_add], +[m4_define([$1($2)], [$3])]) + + +# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) +# -------------------------------------------- +m4_define([lt_dict_add_subkey], +[m4_define([$1($2:$3)], [$4])]) + + +# lt_dict_fetch(DICT, KEY, [SUBKEY]) +# ---------------------------------- +m4_define([lt_dict_fetch], +[m4_ifval([$3], + m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), + m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) + + +# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) +# ----------------------------------------------------------------- +m4_define([lt_if_dict_fetch], +[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], + [$5], + [$6])]) + + +# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) +# -------------------------------------------------------------- +m4_define([lt_dict_filter], +[m4_if([$5], [], [], + [lt_join(m4_quote(m4_default([$4], [[, ]])), + lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), + [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl +]) diff --git a/auxdir/ltversion.m4 b/auxdir/ltversion.m4 new file mode 100644 index 0000000000000000000000000000000000000000..b8e154fe6e10fb20e2cf7d9e3a59116fb27cb79b --- /dev/null +++ b/auxdir/ltversion.m4 @@ -0,0 +1,23 @@ +# ltversion.m4 -- version numbers -*- Autoconf -*- +# +# Copyright (C) 2004 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# Generated from ltversion.in. + +# serial 3012 ltversion.m4 +# This file is part of GNU Libtool + +m4_define([LT_PACKAGE_VERSION], [2.2.6]) +m4_define([LT_PACKAGE_REVISION], [1.3012]) + +AC_DEFUN([LTVERSION_VERSION], +[macro_version='2.2.6' +macro_revision='1.3012' +_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) +_LT_DECL(, macro_revision, 0) +]) diff --git a/auxdir/lt~obsolete.m4 b/auxdir/lt~obsolete.m4 new file mode 100644 index 0000000000000000000000000000000000000000..637bb2066c425f79faecd6cc9e4e6b5074c6b55c --- /dev/null +++ b/auxdir/lt~obsolete.m4 @@ -0,0 +1,92 @@ +# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004. +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 4 lt~obsolete.m4 + +# These exist entirely to fool aclocal when bootstrapping libtool. +# +# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) +# which have later been changed to m4_define as they aren't part of the +# exported API, or moved to Autoconf or Automake where they belong. +# +# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN +# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us +# using a macro with the same name in our local m4/libtool.m4 it'll +# pull the old libtool.m4 in (it doesn't see our shiny new m4_define +# and doesn't know about Autoconf macros at all.) +# +# So we provide this file, which has a silly filename so it's always +# included after everything else. This provides aclocal with the +# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything +# because those macros already exist, or will be overwritten later. +# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. +# +# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. +# Yes, that means every name once taken will need to remain here until +# we give up compatibility with versions before 1.7, at which point +# we need to keep only those names which we still refer to. + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) + +m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) +m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) +m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) +m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) +m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) +m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) +m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) +m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) +m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) +m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) +m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) +m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) +m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) +m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) +m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) +m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) +m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) +m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) +m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) +m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) +m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) +m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) +m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) +m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) +m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) +m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) +m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) +m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) +m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) +m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) +m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) +m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) +m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) +m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) +m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) +m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) +m4_ifndef([AC_LIBTOOL_RC], [AC_DEFUN([AC_LIBTOOL_RC])]) +m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) +m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) +m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) +m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) +m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) +m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) +m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) +m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) diff --git a/contribs/perlapi/libslurm-perl/Slurm.pm b/contribs/perlapi/libslurm-perl/Slurm.pm index 3a7416a7fdb26f6e7eafcc4790e27c08aca3aa26..a4d55b6e7c8adbf655977008d699d8434e19137d 100644 --- a/contribs/perlapi/libslurm-perl/Slurm.pm +++ b/contribs/perlapi/libslurm-perl/Slurm.pm @@ -222,7 +222,7 @@ require DynaLoader; push @ISA, 'DynaLoader'; bootstrap Slurm $VERSION; -sub dl_load_flags {0x01} +sub dl_load_flags { if($^O eq 'aix') { 0x00 } else { 0x01 }} ############################################################ # Preloaded methods go here. diff --git a/contribs/torque/qstat.pl b/contribs/torque/qstat.pl index 9a35c0ddc1cff21f3da41e18bf778232194a79d6..6a5a8e50485107c7770f7e41f1a1e6fc6e04f0be 100755 --- a/contribs/torque/qstat.pl +++ b/contribs/torque/qstat.pl @@ -103,7 +103,11 @@ if ($man) # Use sole remaining argument as jobIds -$hostname = `hostname -f`; +if($^O eq 'aix') { + $hostname = `hostname`; +} else { + $hostname = `hostname -f`; +} chomp $hostname; # Handle unsupported arguments diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in index 1c7c823e4a277f90cebcc5ab2f79b451a699a522..c1a22288f71e320d9ff4b9d7e0e114fbde34e0c5 100644 --- a/doc/html/configurator.html.in +++ b/doc/html/configurator.html.in @@ -316,7 +316,7 @@ function displayfile() </HEAD> <BODY> <FORM name=config> -<H1>SLURM Version @SLURM_MAJOR@.@SLURM_MINOR@ Configration Tool</H1> +<H1>SLURM Version @SLURM_MAJOR@.@SLURM_MINOR@ Configuration Tool</H1> <P>This form can be used to create a SLURM configuration file with you controlling many of the important configuration parameters.</P> @@ -325,7 +325,7 @@ Configuration files for other versions of SLURM should be built using the tool distributed with it in <i>doc/html/configurator.html</i>. Some parameters will be set to default values, but you can manually edit the resulting <I>slurm.conf</I> as desired -for greater flexibiilty. See <I>man slurm.conf</I> for more +for greater flexibility. See <I>man slurm.conf</I> for more details about the configuration parameters.</P> <P>Note the while SLURM daemons create log files and other files as needed, @@ -375,7 +375,7 @@ You can also specify addresses of these computers if desired Only a few of the possible parameters associated with the nodes will be set by this tool, but many others are available. All of the nodes will be placed into a single partition (or queue) -with global access. Many options are availble to group nodes into +with global access. Many options are available to group nodes into partitions with a wide variety of configuration parameters. Manually edit the <i>slurm.conf</i> produced to exercise these options. Node names and addresses may be specified using a numeric range specification. @@ -453,10 +453,10 @@ after making any changes to system password or group databases. <P> <H2>SLURM Port Numbers</H2> -The SLURM controller (slurmctld) requires a unique port for communcations -as do the SLURM compute node deamonds (slurmd). If not set, slurm ports +The SLURM controller (slurmctld) requires a unique port for communications +as do the SLURM compute node daemons (slurmd). If not set, slurm ports are set by checking for an entry in <I>/etc/services</I> and if that -fails by using an interal default set at SLURM build time. +fails by using an interval default set at SLURM build time. <P> <input type="text" name="slurmctld_port" value="6817"> <B>SlurmctldPort</B> <P> @@ -528,15 +528,19 @@ to Maui (configuration parameter <B>SchedulerPort</B> must specified)<BR> to Moab (configuration parameter <B>SchedulerPort</B> must specified)<BR> <P> <input type="text" name="scheduler_port" value="7321"> <B>SchedulerPort</B>: scheduler -communcations port (used by Wiki only) +communications port (used by Wiki only) <P> -Define what node configuration should be used. +Define what node configuration (sockets, cores, memory, etc.) should be used. Using values defined in the configuration file will provide faster scheduling.<BR> Select one value for <B>FastSchedule</B>:<BR> <input type="radio" name="fast_schedule" value="1" checked> <B>1</B>: Use node configuration values defined in configuration file<BR> <input type="radio" name="fast_schedule" value="0"> -<B>0</B>: Use node configuration values actually found on each node +<B>0</B>: Use node configuration values actually found on each node +(if configured with with gang scheduling or allocation of individual +processors to jobs rather than only whole node allocations, the processor +count on the node should match the configured value to avoid having extra +processors left idle) <P> <H2>Interconnect</H2> @@ -631,7 +635,7 @@ Select one value for <B>SelectType</B>:<BR> </DL> <input type="radio" name="select_type" value="linear" checked> <B>Linear</B>: Node-base -resource allocation, does not manage indivual processor allocation<BR> +resource allocation, does not manage individual processor allocation<BR> <input type="radio" name="select_type" value="bluegene"> <B>BlueGene</B>: For IBM Blue Gene systems only<BR> <P> @@ -716,7 +720,7 @@ Write completion status to a MySQL database<BR> <input type="radio" name="job_comp_type" value="pgsql"> <B>PGSQL</B>: Write completion status to a PostreSQL database<BR> <input type="radio" name="job_comp_type" value="slurmdbd"> <B>SlurmDBD</B>: -Write completion status to Slurm adatabase daemon (serving multiple Slurm clusters) +Write completion status to Slurm a database daemon (serving multiple Slurm clusters) which will write to some database<BR> <P> <input type="text" name="job_comp_loc" value=""> <B>JobCompLoc</B>: diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1 index 34ebc87bb5d87d25a740815332525af7efbfb4dd..a1821d37b9cf94fd5589e1570ad26df85c627531 100644 --- a/doc/man/man1/sacct.1 +++ b/doc/man/man1/sacct.1 @@ -235,7 +235,14 @@ Display jobs that ran on any of these nodes. .TP \f3\-o \fP\f3,\fP \f3\-\-format\fP -Comma seperated list of fields. (use "\-\-helpformat" for a list of available fields). +Comma seperated list of fields. (use "\-\-helpformat" for a list of +available fields). + +NOTE: When using the format option for listing various fields you can put a +%NUMBER afterwards to specify how many characters should be printed. + +i.e. format=name%30 will print 30 characters of field name right +justified. A -30 will print 30 characters left justified. .IP .TP diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1 index 51088bf9dd3c0b0efa74442719819c364164cd9e..aa7c8f7d792761078a16fdb94a605dd4f3d578fb 100644 --- a/doc/man/man1/salloc.1 +++ b/doc/man/man1/salloc.1 @@ -87,9 +87,24 @@ For example: \-\-begin=16:00 \-\-begin=now+1hour \-\-begin=now+60 (seconds by default) - \-\-begin=2010-01-20T12:34:00 + \-\-begin=2010\-01\-20T12:34:00 .fi +.RS +.PP +Notes on date/time specifications: + \- Although the 'seconds' field of the HH:MM:SS time specification is +allowed by the code, note that the poll time of the SLURM scheduler +is not precise enough to guarantee dispatch of the job on the exact +second. The job will be eligible to start on the next poll +following the specified time. The exact poll interval depends on the +SLURM scheduler (e.g., 60 seconds with the default sched/builtin). + \- If no time (HH:MM:SS) is specified, the default is (00:00:00). + \- If a date is specified without a year (e.g., MM/DD) then the current +year is assumed, unless the combination of MM/DD and HH:MM:SS has +already passed for that year, in which case the next year is used. +.RE + .TP \fB\-\-bell\fR Force salloc to ring the terminal bell when the job allocation is granted diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1 index cd6850da998f3a02bdbe95eff033e1cda8561d27..5d28eb45a4fc01ddb312b6915114441694ee69c1 100644 --- a/doc/man/man1/sbatch.1 +++ b/doc/man/man1/sbatch.1 @@ -86,9 +86,24 @@ For example: \-\-begin=16:00 \-\-begin=now+1hour \-\-begin=now+60 (seconds by default) - \-\-begin=2010-01-20T12:34:00 + \-\-begin=2010\-01\-20T12:34:00 .fi +.RS +.PP +Notes on date/time specifications: + \- Although the 'seconds' field of the HH:MM:SS time specification is +allowed by the code, note that the poll time of the SLURM scheduler +is not precise enough to guarantee dispatch of the job on the exact +second. The job will be eligible to start on the next poll +following the specified time. The exact poll interval depends on the +SLURM scheduler (e.g., 60 seconds with the default sched/builtin). + \- If no time (HH:MM:SS) is specified, the default is (00:00:00). + \- If a date is specified without a year (e.g., MM/DD) then the current +year is assumed, unless the combination of MM/DD and HH:MM:SS has +already passed for that year, in which case the next year is used. +.RE + .TP \fB\-\-checkpoint\fR=<\fItime\fR> Specifies the interval between creating checkpoints of the job step. diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1 index d8029e791fbc7e38e1a27278139a36484ece23ca..b3decb0f4e32cbcbac9ea434f93b4c860cee4708 100644 --- a/doc/man/man1/scontrol.1 +++ b/doc/man/man1/scontrol.1 @@ -64,7 +64,7 @@ are unavailable to user's group. Instruct the Slurm controller to terminate immediately and generate a core file. .TP -\fBcheckpoint\fP \fICKPT_OP\fP \fIID\fP \fIOPTIONS\fP +\fBcheckpoint\fP \fICKPT_OP\fP \fIID\fP Perform a checkpoint activity on the job step(s) with the specified identification. \fIID\fP can be used to identify a specific job (e.g. "<job_id>", which applies to all of its existing steps) @@ -357,8 +357,14 @@ Identify the job to be updated. This specification is required. \fIMinCores\fP=<count> Set the job's minimum number of cores per socket to the specified value. .TP -\fIMinMemory\fP=<megabytes> +\fIMinMemoryCPU\fP=<megabytes> +Set the job's minimum real memory required per allocated CPU to the specified +value. +Either \fIMinMemoryCPU\fP or \fIMinMemoryNode\fP may be set, but not both. +.TP +\fIMinMemoryNode\fP=<megabytes> Set the job's minimum real memory required per node to the specified value. +Either \fIMinMemoryCPU\fP or \fIMinMemoryNode\fP may be set, but not both. .TP \fIMinProcs\fP=<count> Set the job's minimum number of processors per node to the specified value. @@ -377,6 +383,11 @@ Set the job's name to the specified value. .TP \fINice\fP[=delta] Adjust job's priority by the specified value. Default value is 100. +The adjustment range is from \-10000 (highest priority) +to 10000 (lowest priority). +Nice value changes are not additive, but overwrite any prior nice +value and are applied to the job's base priority. +Only privileged users can specify a negative adjustment. .TP \fIPartition\fP=<name> Set the job's partition to the specified value. @@ -386,6 +397,7 @@ Set the job's priority to the specified value. Note that a job priority of zero prevents the job from ever being scheduled. By setting a job's priority to zero it is held. Set the priority to a non\-zero value to permit it to run. +Explicitly setting a job's priority clears any previously set nice value. .TP \fIReqCores\fP=<count> Set the job's count of required cores to the specified value. @@ -725,7 +737,7 @@ JobId=65539 UserId=1500 JobState=PENDING TimeLimit=0:20:00 .br StartTime=0 EndTime=0 Shared=0 ReqProcs=1000 .br - ReqNodes=400 Contiguous=1 MinProcs=4 MinMemory=1024 + ReqNodes=400 Contiguous=1 MinProcs=4 MinMemoryNode=1024 .br MinTmpDisk=2034 ReqNodeList=lx[3000-3003] .br diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1 index 349da10d5f93cf913d933557d80c52d861601681..48a3d359a6367da57e02884a95b2f642db417a8d 100644 --- a/doc/man/man1/squeue.1 +++ b/doc/man/man1/squeue.1 @@ -163,6 +163,12 @@ This reports the value of the \fBsrun \-\-minthreads\fR option. Time limit of the job in days\-hours:minutes:seconds. The value may be "NOT_SET" if not yet established or "UNLIMITED" for no limit. .TP +\fB%L\fR +Time left for the job to execute in days\-hours:minutes:seconds. +This value is calculated by subtracting the job's time used from its time +limit. +The value may be "NOT_SET" if not yet established or "UNLIMITED" for no limit. +.TP \fB%m\fR Minimum size of memory (in MB) requested by the job .TP diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1 index 5a740b5c5d45a3ef3208640c4f828f27be324c7a..2b5051649cf5b08466cf6116620d6ab5b3e1aa27 100644 --- a/doc/man/man1/srun.1 +++ b/doc/man/man1/srun.1 @@ -74,9 +74,24 @@ For example: \-\-begin=16:00 \-\-begin=now+1hour \-\-begin=now+60 (seconds by default) - \-\-begin=2010-01-20T12:34:00 + \-\-begin=2010\-01\-20T12:34:00 .fi +.RS +.PP +Notes on date/time specifications: + \- Although the 'seconds' field of the HH:MM:SS time specification is +allowed by the code, note that the poll time of the SLURM scheduler +is not precise enough to guarantee dispatch of the job on the exact +second. The job will be eligible to start on the next poll +following the specified time. The exact poll interval depends on the +SLURM scheduler (e.g., 60 seconds with the default sched/builtin). + \- If no time (HH:MM:SS) is specified, the default is (00:00:00). + \- If a date is specified without a year (e.g., MM/DD) then the current +year is assumed, unless the combination of MM/DD and HH:MM:SS has +already passed for that year, in which case the next year is used. +.RE + .TP \fB\-\-checkpoint\fR=<\fItime\fR> Specifies the interval between creating checkpoints of the job step. @@ -1010,9 +1025,9 @@ slurm.conf this value is ignored. \fB\-X\fR, \fB\-\-disable\-status\fR Disable the display of task status when srun receives a single SIGINT (Ctrl\-C). Instead immediately forward the SIGINT to the running job. -A second Ctrl\-C in one second will forcibly terminate the job and -\fBsrun\fR will immediately exit. May also be set via the environment -variable SLURM_DISABLE_STATUS. +Without this option a second Ctrl\-C in one second is required to forcibly +terminate the job and \fBsrun\fR will immediately exit. May also be +set via the environment variable SLURM_DISABLE_STATUS. .TP \fB\-x\fR, \fB\-\-exclude\fR=<\fIhost1,host2,...\fR or \fIfilename\fR> diff --git a/doc/man/man3/slurm_kill_job.3 b/doc/man/man3/slurm_kill_job.3 index c1695350105c37f709bc0a203b026f8c946bff76..c1d3de090120a69e2702f00500b51968e7161fcc 100644 --- a/doc/man/man3/slurm_kill_job.3 +++ b/doc/man/man3/slurm_kill_job.3 @@ -80,11 +80,14 @@ This function may only be successfully executed by the job's owner or user root. .LP \fBslurm_kill_job_step\fR Request that a signal be sent to a specific job step. This function may only be successfully executed by the job's owner or user root. -\fBslurm_signal_job\fR Request that send the specified signal to all +.LP +\fBslurm_signal_job\fR Request that the specified signal be sent to all steps of an existing job. -\fBslurm_signal_job_step\fR Request that send the specified signal to +.LP +\fBslurm_signal_job_step\fR Request that the specified signal be sent to an existing job step. -\fBslurm_terminate_job\fR Request that terminates all steps of an +.LP +\fBslurm_terminate_job\fR Request termination of all steps of an existing job by sending a REQUEST_TERMINATE_JOB rpc to all slurmd in the the job allocation, and then calls slurm_complete_job(). \fBslurm_signal_job_step\fR Request that terminates a job step by diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5 index 835dfbaa888f364334ffc24683276c0bf5771d3e..1fc3d6d6ceb82567029b85ff8d24ef54a0c26203 100644 --- a/doc/man/man5/slurm.conf.5 +++ b/doc/man/man5/slurm.conf.5 @@ -46,12 +46,16 @@ TrackWckey being set. By enforcing Associations no new job is allowed to run unless a corresponding association exists in the system. If limits are enforced users can be limited by association to how many nodes or how long jobs can run or other limits. With wckeys enforced jobs will not be scheduled -unless a valid workload characterization key is specified. +unless a valid workload characterization key is specified. This value may not +be reset via "scontrol reconfig". It only takes effect upon restart +of the slurmctld daemon. .TP \fBAccountingStorageBackupHost\fR The name of the backup machine hosting the accounting storage database. -Only used for accounting_storage/slurmdbd plugin, ignored otherwise. +If used with the accounting_storage/slurmdbd plugin, this is where the backup +slurmdbd would be running. +Only used for database type storage plugins, ignored otherwise. .TP \fBAccountingStorageHost\fR @@ -180,12 +184,24 @@ The default value is 0 to disable caching group data. The system\-initiated checkpoint method to be used for user jobs. The slurmctld daemon must be restarted for a change in \fBCheckpointType\fR to take effect. -Acceptable values at present include -"checkpoint/aix" (only on AIX systems), -"checkpoint/ompi" (requires OpenMPI version 1.3 or higher), -"checkpoint/xlch" (for XLCH, requires that SlurmUser be root), and -"checkpoint/none". -The default value is "checkpoint/none". +Supported values presently include: +.RS +.TP 18 +\fBcheckpoint/aix\fR +for AIX systems only +.TP +\fBcheckpoint/blcr\fR +Berkeley Lab Checkpoint Restart (BLCR) +.TP +\fBcheckpoint/none\fR +no checkpoint support (default) +.TP +\fBcheckpoint/ompi\fR +OpenMPI (version 1.3 or higher) +.TP +\fBcheckpoint/xlch\fR +XLCH (requires that SlurmUser be root) +.RE .TP \fBClusterName\fR @@ -849,16 +865,15 @@ prevents regular users from viewing reservations. \fBusage\fR (NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing usage of any other user. This applys to sreport. +.TP \fBusers\fR (NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing information of any user other than themselves, this also makes it so users can only see associations they deal with. Coordinators can see associations of all users they are coordinator of, but can only see themselves when listing users. -.TP .RE - .TP \fBProctrackType\fR Identifies the plugin to be used for process tracking. @@ -1006,7 +1021,7 @@ be removed from power savings mode (using SLURM's hostlist expression format). By default no program is run. Related configuration options include \fBResumeTimeout\fR, \fBResumeRate\fR, -\fBSuspendRate\fR, \fBSuspendTime\fR, \fBResumeTimeout\fR, \fBSuspendProgram\fR, +\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR, \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR. More information is available at the SLURM web site (https://computing.llnl.gov/linux/slurm/power_save.html). @@ -1021,7 +1036,7 @@ assigned work at the same time (e.g. a large job starts). A value of zero results in no limits being imposed. The default value is 300 nodes per minute. Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, -\fBSuspendRate\fR, \fBSuspendTime\fR, \fBResumeTimeout\fR, \fBSuspendProgram\fR, +\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR, \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR. .TP @@ -1061,11 +1076,10 @@ explicitly changes its state (even if the slurmd daemon registers and resumes communications). .TP \fB1\fR -A non\-responding (DOWN) node will become available for use upon -registration. Note that DOWN node's state will be changed only if -it was set DOWN due to being non\-responsive. If the node was -set DOWN for any other reason (low memory, prolog failure, epilog -failure, etc.), its state will not automatically be changed. +A DOWN node will become available for use upon registration with a +valid configuration only if it was set DOWN due to being non\-responsive. +If the node was set DOWN for any other reason (low memory, prolog failure, +epilog failure, etc.), its state will not automatically be changed. .TP \fB2\fR A DOWN node will become available for use upon registration with a @@ -1202,7 +1216,8 @@ The following values are supported for \fBSelectType=select/cons_res\fR: .TP \fBCR_CPU\fR CPUs are consumable resources. -There is no notion of sockets, cores or threads. +There is no notion of sockets, cores or threads; +do not define those values in the node specification. On a multi\-core system, each core will be considered a CPU. On a multi\-core and hyperthreaded system, each thread will be considered a CPU. @@ -1210,6 +1225,8 @@ On single\-core systems, each CPUs will be considered a CPU. .TP \fBCR_CPU_Memory\fR CPUs and memory are consumable resources. +There is no notion of sockets, cores or threads; +do not define those values in the node specification. Setting a value for \fBDefMemPerCPU\fR is strongly recommended. .TP \fBCR_Core\fR @@ -1406,7 +1423,7 @@ Use SLURM's hostlist expression to identify nodes. By default no nodes are excluded. Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, \fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, -\fBResumeTimeout\fR, and \fBSuspendExcParts\fR. +\fBSuspendTimeout\fR, and \fBSuspendExcParts\fR. .TP \fBSuspendExcParts\fR @@ -1416,7 +1433,7 @@ Multiple partitions can be identified and separated by commas. By default no nodes are excluded. Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, \fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR -\fBResumeTimeout\fR, and \fBSuspendExcNodes\fR. +\fBSuspendTimeout\fR, and \fBSuspendExcNodes\fR. .TP \fBSuspendProgram\fR @@ -1431,7 +1448,7 @@ be placed into power savings mode (using SLURM's hostlist expression format). By default, no program is run. Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, -\fBResumeRate\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBResumeTimeout\fR, +\fBResumeRate\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR. .TP @@ -2159,7 +2176,11 @@ The four options most likely to be used are: \fBProlog\fR and \fBEpilog\fR (executed once on each compute node for each job) plus \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR (executed once on the \fBControlMachine\fR for each job). - + +NOTE: Standard output and error messages are normally not preserved. +Explicitly write output and error messages to an appropriate location +if you which to preserve that information. + NOTE: The Prolog script is ONLY run on any individual node when it first sees a job step from a new allocation; it does not run the Prolog immediately when an allocation is granted. If no job steps diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5 index a119ea62691605a71cd5f34107925fae4f5e9cbd..f8998b2c74c6874324998480d8e55f4558fd4c07 100644 --- a/doc/man/man5/slurmdbd.conf.5 +++ b/doc/man/man5/slurmdbd.conf.5 @@ -247,7 +247,10 @@ Ideally this should be the host on which slurmdbd executes. .TP \fBStorageBackupHost\fR Define the name of the backup host the database is running where we are going -to store the data. +to store the data. This can be viewed as a backup solution when the +StorageHost is not responding. It is up to the backup solution to enforce the +coherency of the accounting information between the two hosts. With clustered +database solutions (acitve/passive HA), you would not need to use this feature. Default is none. .TP diff --git a/doc/man/man8/spank.8 b/doc/man/man8/spank.8 index 29495c78ad113f8880df3741e17accea0baac078..80c0203463264af25cbbba37c5067c754e743fd9 100644 --- a/doc/man/man8/spank.8 +++ b/doc/man/man8/spank.8 @@ -53,9 +53,9 @@ option processing. \fBslurm_spank_init_post_opt\fR Called at the same point as \fBslurm_spank_init\fR, but after all user options to the plugin have been processed. The reason that the -\fBunit\fR and \fBinit_post_opt\fR callbacks are separated is so that +\fBinit\fR and \fBinit_post_opt\fR callbacks are separated is so that plugins can process system-wide options specified in plugstack.conf in -the \fBinit\fR callback, then process user options, and finaly take some +the \fBinit\fR callback, then process user options, and finally take some action in \fBslurm_spank_init_post_opt\fR if necessary. .TP \fBslurm_spank_local_user_init\fR diff --git a/slurm.spec b/slurm.spec index 67f6a2ccb5ad26a32e93a27cc2739eb97d659aba..6c870083098d31b001a59dc3436878e1d7b9e121 100644 --- a/slurm.spec +++ b/slurm.spec @@ -1,4 +1,4 @@ -# $Id: slurm.spec 17631 2009-05-28 21:18:15Z jette $ +# $Id: slurm.spec 18573 2009-08-27 18:14:02Z jette $ # # Note that this package is not relocatable @@ -75,14 +75,14 @@ %endif Name: slurm -Version: 2.0.4 +Version: 2.0.5 Release: 1%{?dist} Summary: Simple Linux Utility for Resource Management License: GPL Group: System Environment/Base -Source: slurm-2.0.4.tar.bz2 +Source: slurm-2.0.5.tar.bz2 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release} URL: https://computing.llnl.gov/linux/slurm/ @@ -269,7 +269,7 @@ SLURM process tracking plugin for SGI job containers. ############################################################################# %prep -%setup -n slurm-2.0.4 +%setup -n slurm-2.0.5 %build %configure --program-prefix=%{?_program_prefix:%{_program_prefix}} \ @@ -369,6 +369,7 @@ rm -rf $RPM_BUILD_ROOT %{_libdir}/slurm/src/* %{_mandir}/man1/* %{_mandir}/man5/slurm.* +%{_mandir}/man5/topology.* %{_mandir}/man5/wiki.* %{_mandir}/man8/slurmctld.* %{_mandir}/man8/slurmd.* diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in index 12f9218b8c687c143e2414377bdb838e97b09a16..a5b81bf2ce7f0fd23f39e656b6fce1323c25a892 100644 --- a/slurm/slurm.h.in +++ b/slurm/slurm.h.in @@ -182,7 +182,7 @@ BEGIN_C_DECLS /* eg. the maximum count of nodes any job may use in some partition */ #define INFINITE (0xffffffff) #define NO_VAL (0xfffffffe) -#define MAX_TASKS_PER_NODE 64 +#define MAX_TASKS_PER_NODE 128 /* Job step ID of batch scripts */ #define SLURM_BATCH_SCRIPT (0xfffffffe) diff --git a/src/api/allocate.c b/src/api/allocate.c index 4088095189777b9c84b96347609da24469d8c389..9bf33f95186aa6f9977b455ecd25eb6cb24887dd 100644 --- a/src/api/allocate.c +++ b/src/api/allocate.c @@ -1,6 +1,6 @@ /*****************************************************************************\ * allocate.c - allocate nodes for a job or step with supplied contraints - * $Id: allocate.c 17903 2009-06-19 18:04:48Z jette $ + * $Id: allocate.c 18263 2009-07-30 19:09:07Z da $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. * Copyright (C) 2008-2009 Lawrence Livermore National Security. @@ -317,9 +317,9 @@ int slurm_job_will_run (job_desc_msg_t *req) slurm_make_time_str(&will_run_resp->start_time, buf, sizeof(buf)); info("Job %u to start at %s using %u processors on %s", - will_run_resp->job_id, buf, - will_run_resp->proc_cnt, - will_run_resp->node_list); + will_run_resp->job_id, buf, + will_run_resp->proc_cnt, + will_run_resp->node_list); slurm_free_will_run_response_msg(will_run_resp); break; default: diff --git a/src/api/config_info.c b/src/api/config_info.c index c645df051f14f3b72876d538481bfdcf366d6648..58d042a3cdc9648fe132d066ffd15ab95bc8c21d 100644 --- a/src/api/config_info.c +++ b/src/api/config_info.c @@ -166,7 +166,7 @@ void slurm_print_ctl_conf ( FILE* out, slurm_ctl_conf_ptr->backup_addr); fprintf(out, "BackupController = %s\n", slurm_ctl_conf_ptr->backup_controller); - fprintf(out, "BatchStartTime = %u sec\n", + fprintf(out, "BatchStartTimeout = %u sec\n", slurm_ctl_conf_ptr->batch_start_timeout); slurm_make_time_str ((time_t *)&slurm_ctl_conf_ptr->boot_time, time_str, sizeof(time_str)); diff --git a/src/api/job_info.c b/src/api/job_info.c index c3f00de88f119241da2c504ef3b5cb001a47affb..4c9fb8ebeba05ef77159b43e5ae726bff4ed39e3 100644 --- a/src/api/job_info.c +++ b/src/api/job_info.c @@ -380,9 +380,9 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) } else tmp3_ptr = "Node"; convert_num_unit((float)job_ptr->job_min_memory, tmp1, sizeof(tmp1), - UNIT_NONE); + UNIT_MEGA); convert_num_unit((float)job_ptr->job_min_tmp_disk, tmp2, sizeof(tmp2), - UNIT_NONE); + UNIT_MEGA); snprintf(tmp_line, sizeof(tmp_line), "MinMemory%s=%s MinTmpDisk=%s Features=%s", tmp3_ptr, tmp1, tmp2, job_ptr->features); diff --git a/src/api/pmi.c b/src/api/pmi.c index 549226efa9478fc1ca864fc553656a5d40bf3b2a..87082bef8e69df2e9fe42e0fbbe4d4ae138c7afb 100644 --- a/src/api/pmi.c +++ b/src/api/pmi.c @@ -1141,8 +1141,9 @@ static int _kvs_put( const char kvsname[], const char key[], const char value[], rc = PMI_FAIL; /* malloc error */ else { rc = PMI_SUCCESS; - strncpy(kvs_recs[i].kvs_values[j], value, PMI_MAX_VAL_LEN); - strncpy(kvs_recs[i].kvs_keys[j], key, PMI_MAX_KEY_LEN); + strncpy(kvs_recs[i].kvs_values[j], value, + PMI_MAX_VAL_LEN); + strncpy(kvs_recs[i].kvs_keys[j], key, PMI_MAX_KEY_LEN); } goto fini; } @@ -1307,13 +1308,14 @@ int PMI_KVS_Get( const char kvsname[], const char key[], char value[], int lengt if (strncmp(kvs_recs[i].kvs_name, kvsname, PMI_MAX_KVSNAME_LEN)) continue; for (j=0; j<kvs_recs[i].kvs_cnt; j++) { - if (strncmp(kvs_recs[i].kvs_keys[j], key, PMI_MAX_KEY_LEN)) + if (strncmp(kvs_recs[i].kvs_keys[j], key, + PMI_MAX_KEY_LEN)) continue; if (strlen(kvs_recs[i].kvs_values[j]) > (length-1)) rc = PMI_ERR_INVALID_LENGTH; else { strncpy(value, kvs_recs[i].kvs_values[j], - PMI_MAX_VAL_LEN); + length); rc = PMI_SUCCESS; } goto fini; @@ -1390,9 +1392,10 @@ int PMI_KVS_Iter_first(const char kvsname[], char key[], int key_len, char val[] rc = PMI_ERR_INVALID_VAL_LENGTH; } else { strncpy(key, kvs_recs[i].kvs_keys[kvs_recs[i].kvs_inx], - PMI_MAX_KEY_LEN); - strncpy(val, kvs_recs[i].kvs_values[kvs_recs[i].kvs_inx], - PMI_MAX_VAL_LEN); + key_len); + strncpy(val, + kvs_recs[i].kvs_values[kvs_recs[i].kvs_inx], + val_len); rc = PMI_SUCCESS; } goto fini; @@ -1467,9 +1470,10 @@ int PMI_KVS_Iter_next(const char kvsname[], char key[], int key_len, rc = PMI_ERR_INVALID_VAL_LENGTH; } else { strncpy(key, kvs_recs[i].kvs_keys[kvs_recs[i].kvs_inx], - PMI_MAX_KEY_LEN); - strncpy(val, kvs_recs[i].kvs_values[kvs_recs[i].kvs_inx], - PMI_MAX_VAL_LEN); + key_len); + strncpy(val, + kvs_recs[i].kvs_values[kvs_recs[i].kvs_inx], + val_len); rc = PMI_SUCCESS; } goto fini; @@ -1569,7 +1573,8 @@ specific arguments in the args array, this function may parse more than one argument as long as the options are contiguous in the args array. @*/ -int PMI_Parse_option(int num_args, char *args[], int *num_parsed, PMI_keyval_t **keyvalp, +int PMI_Parse_option(int num_args, char *args[], int *num_parsed, + PMI_keyval_t **keyvalp, int *size) { int i, n, s, len; diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c index e02746c9b41186833d0a81da8900ec2d7ce23546..cd21fdbde1556db5d49ef31f592ff924ba7f4e14 100644 --- a/src/common/assoc_mgr.c +++ b/src/common/assoc_mgr.c @@ -987,26 +987,23 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc, if (assoc_pptr) *assoc_pptr = ret_assoc; assoc->id = ret_assoc->id; - if(!assoc->user) - assoc->user = ret_assoc->user; - assoc->uid = ret_assoc->uid; if(!assoc->acct) assoc->acct = ret_assoc->acct; if(!assoc->cluster) assoc->cluster = ret_assoc->cluster; - if(!assoc->partition) - assoc->partition = ret_assoc->partition; - - assoc->shares_raw = ret_assoc->shares_raw; - assoc->grp_cpu_mins = ret_assoc->grp_cpu_mins; + assoc->grp_cpu_mins = ret_assoc->grp_cpu_mins; assoc->grp_cpus = ret_assoc->grp_cpus; assoc->grp_jobs = ret_assoc->grp_jobs; assoc->grp_nodes = ret_assoc->grp_nodes; assoc->grp_submit_jobs = ret_assoc->grp_submit_jobs; assoc->grp_wall = ret_assoc->grp_wall; + assoc->grp_used_cpus = ret_assoc->grp_used_cpus; + assoc->grp_used_nodes = ret_assoc->grp_used_nodes; + assoc->grp_used_wall = ret_assoc->grp_used_wall; + assoc->max_cpu_mins_pj = ret_assoc->max_cpu_mins_pj; assoc->max_cpus_pj = ret_assoc->max_cpus_pj; assoc->max_jobs = ret_assoc->max_jobs; @@ -1022,6 +1019,26 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc, assoc->parent_assoc_ptr = ret_assoc->parent_assoc_ptr; assoc->parent_id = ret_assoc->parent_id; + if(!assoc->partition) + assoc->partition = ret_assoc->partition; + + assoc->rgt = ret_assoc->rgt; + + assoc->shares_norm = ret_assoc->shares_norm; + assoc->shares_raw = ret_assoc->shares_raw; + + assoc->uid = ret_assoc->uid; + + assoc->usage_efctv = ret_assoc->usage_efctv; + assoc->usage_norm = ret_assoc->usage_norm; + assoc->usage_raw = ret_assoc->usage_raw; + + assoc->used_jobs = ret_assoc->used_jobs; + assoc->used_submit_jobs = ret_assoc->used_submit_jobs; + + if(!assoc->user) + assoc->user = ret_assoc->user; + slurm_mutex_unlock(&assoc_mgr_association_lock); return SLURM_SUCCESS; @@ -1058,7 +1075,7 @@ extern int assoc_mgr_fill_in_user(void *db_conn, acct_user_rec_t *user, if(!found_user) { slurm_mutex_unlock(&assoc_mgr_user_lock); - if(enforce) + if(enforce & ACCOUNTING_ENFORCE_ASSOCS) return SLURM_ERROR; else return SLURM_SUCCESS; @@ -1111,7 +1128,7 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, acct_qos_rec_t *qos, while((found_qos = list_next(itr))) { if(qos->id == found_qos->id) break; - else if(qos->name && strcasecmp(qos->name, found_qos->name)) + else if(qos->name && !strcasecmp(qos->name, found_qos->name)) break; } list_iterator_destroy(itr); @@ -1169,7 +1186,7 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, acct_qos_rec_t *qos, qos->user_limit_list = found_qos->user_limit_list; slurm_mutex_unlock(&assoc_mgr_qos_lock); - return SLURM_ERROR; + return SLURM_SUCCESS; } extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey, @@ -1838,7 +1855,8 @@ extern int assoc_mgr_update_wckeys(acct_update_object_t *update) while((object = list_pop(update->objects))) { if(object->cluster && assoc_mgr_cluster_name) { /* only update the local clusters assocs */ - if(strcasecmp(object->cluster, assoc_mgr_cluster_name)) { + if(strcasecmp(object->cluster, + assoc_mgr_cluster_name)) { destroy_acct_wckey_rec(object); continue; } diff --git a/src/common/env.c b/src/common/env.c index 78498e26510c8724493a2dc745c375efaa172285..5839383f34a970b39c3b87d18146885ed6412b31 100644 --- a/src/common/env.c +++ b/src/common/env.c @@ -305,7 +305,7 @@ char *getenvp(char **env, const char *name) return NULL; } -int setup_env(env_t *env) +int setup_env(env_t *env, bool preserve_env) { int rc = SLURM_SUCCESS; char *dist = NULL, *lllp_dist = NULL; @@ -320,7 +320,7 @@ int setup_env(env_t *env) rc = SLURM_FAILURE; } - if (env->nprocs + if (!preserve_env && env->nprocs && setenvf(&env->env, "SLURM_NPROCS", "%d", env->nprocs)) { error("Unable to set SLURM_NPROCS environment variable"); rc = SLURM_FAILURE; @@ -686,7 +686,7 @@ int setup_env(env_t *env) rc = SLURM_FAILURE; } - if (env->nhosts + if (!preserve_env && env->nhosts && setenvf(&env->env, "SLURM_NNODES", "%d", env->nhosts)) { error("Unable to set SLURM_NNODES environment var"); rc = SLURM_FAILURE; @@ -698,7 +698,7 @@ int setup_env(env_t *env) rc = SLURM_FAILURE; } - if (env->task_count + if (!preserve_env && env->task_count && setenvf (&env->env, "SLURM_TASKS_PER_NODE", "%s", env->task_count)) { error ("Can't set SLURM_TASKS_PER_NODE env variable"); @@ -882,7 +882,8 @@ extern char *uint32_compressed_to_str(uint32_t array_len, * SLURM_JOB_NODELIST * SLURM_JOB_CPUS_PER_NODE * LOADLBATCH (AIX only) - * MPIRUN_PARTITION, MPIRUN_NOFREE, and MPIRUN_NOALLOCATE (BGL only) + * SLURM_BG_NUM_NODES, MPIRUN_PARTITION, MPIRUN_NOFREE, and + * MPIRUN_NOALLOCATE (BGL only) * * Sets OBSOLETE variables (needed for MPI, do not remove): * SLURM_JOBID @@ -890,7 +891,7 @@ extern char *uint32_compressed_to_str(uint32_t array_len, * SLURM_NODELIST * SLURM_TASKS_PER_NODE */ -void +int env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc, const job_desc_msg_t *desc) { @@ -901,6 +902,15 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc, char *dist = NULL, *lllp_dist = NULL; slurm_step_layout_t *step_layout = NULL; uint32_t num_tasks = desc->num_tasks; + int rc = SLURM_SUCCESS; + +#ifdef HAVE_BG + uint32_t node_cnt = alloc->node_cnt; + select_g_get_jobinfo(alloc->select_jobinfo, + SELECT_DATA_NODE_CNT, + &node_cnt); + env_array_overwrite_fmt(dest, "SLURM_BG_NUM_NODES", "%u", node_cnt); +#endif env_array_overwrite_fmt(dest, "SLURM_JOB_ID", "%u", alloc->job_id); env_array_overwrite_fmt(dest, "SLURM_JOB_NUM_NODES", "%u", @@ -986,21 +996,32 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc, && desc->cpus_per_task != (uint16_t)NO_VAL) num_tasks /= desc->cpus_per_task; //num_tasks = desc->num_procs; - } + } + + if(desc->task_dist == SLURM_DIST_ARBITRARY) { + tmp = desc->req_nodes; + env_array_overwrite_fmt(dest, "SLURM_ARBITRARY_NODELIST", + "%s", tmp); + } else + tmp = alloc->node_list; //info("got %d and %d", num_tasks, desc->cpus_per_task); - step_layout = slurm_step_layout_create(alloc->node_list, - alloc->cpus_per_node, - alloc->cpu_count_reps, - alloc->node_cnt, - num_tasks, - desc->cpus_per_task, - desc->task_dist, - desc->plane_size); + if(!(step_layout = slurm_step_layout_create(tmp, + alloc->cpus_per_node, + alloc->cpu_count_reps, + alloc->node_cnt, + num_tasks, + desc->cpus_per_task, + desc->task_dist, + desc->plane_size))) + return SLURM_ERROR; + + tmp = _uint16_array_to_str(step_layout->node_cnt, step_layout->tasks); slurm_step_layout_destroy(step_layout); env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", tmp); xfree(tmp); + return rc; } /* @@ -1026,7 +1047,7 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc, * SLURM_NPROCS * SLURM_TASKS_PER_NODE */ -extern void +extern int env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch, const char *node_name) { @@ -1037,7 +1058,15 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch, slurm_step_layout_t *step_layout = NULL; uint32_t num_tasks = batch->nprocs; uint16_t cpus_per_task; + uint16_t task_dist; +#ifdef HAVE_BG + uint32_t node_cnt = 0; + select_g_get_jobinfo(batch->select_jobinfo, + SELECT_DATA_NODE_CNT, + &node_cnt); + env_array_overwrite_fmt(dest, "SLURM_BG_NUM_NODES", "%u", node_cnt); +#endif /* There is no explicit node count in the batch structure, * so we need to calculate the node count. */ for (i = 0; i < batch->num_cpu_groups; i++) { @@ -1050,8 +1079,8 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch, env_array_overwrite_fmt(dest, "SLURM_JOB_NODELIST", "%s", batch->nodes); tmp = uint32_compressed_to_str(batch->num_cpu_groups, - batch->cpus_per_node, - batch->cpu_count_reps); + batch->cpus_per_node, + batch->cpu_count_reps); env_array_overwrite_fmt(dest, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp); xfree(tmp); @@ -1080,21 +1109,33 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch, env_array_overwrite_fmt(dest, "SLURM_CPUS_PER_TASK", "%u", cpus_per_task); } - num_tasks = num_cpus / cpus_per_task; - step_layout = slurm_step_layout_create(batch->nodes, - batch->cpus_per_node, - batch->cpu_count_reps, - num_nodes, - num_tasks, - cpus_per_task, - (uint16_t)SLURM_DIST_BLOCK, - (uint16_t)NO_VAL); + if((tmp = getenvp(*dest, "SLURM_ARBITRARY_NODELIST"))) { + task_dist = SLURM_DIST_ARBITRARY; + num_tasks = batch->nprocs; + } else { + tmp = batch->nodes; + task_dist = SLURM_DIST_BLOCK; + num_tasks = num_cpus / cpus_per_task; + } + + if(!(step_layout = slurm_step_layout_create(tmp, + batch->cpus_per_node, + batch->cpu_count_reps, + num_nodes, + num_tasks, + cpus_per_task, + task_dist, + (uint16_t)NO_VAL))) + return SLURM_ERROR; + tmp = _uint16_array_to_str(step_layout->node_cnt, step_layout->tasks); slurm_step_layout_destroy(step_layout); env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", tmp); xfree(tmp); + return SLURM_SUCCESS; + } /* @@ -1103,7 +1144,8 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch, * pointed to by "dest" is NULL, memory will automatically be xmalloc'ed. * The array is terminated by a NULL pointer, and thus is suitable for * use by execle() and other env_array_* functions. If preserve_env is - * true, the variables SLURM_NNODES and SLURM_NPROCS remain unchanged. + * true, the variables SLURM_NNODES, SLURM_NPROCS and SLURM_TASKS_PER_NODE + * remain unchanged. * * Sets variables: * SLURM_STEP_ID @@ -1156,8 +1198,9 @@ env_array_for_step(char ***dest, "%hu", step->step_layout->node_cnt); env_array_overwrite_fmt(dest, "SLURM_NPROCS", "%u", step->step_layout->task_cnt); + env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", + tmp); } - env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", tmp); env_array_overwrite_fmt(dest, "SLURM_SRUN_COMM_PORT", "%hu", launcher_port); diff --git a/src/common/env.h b/src/common/env.h index 94a715e53de22c6a2273ccbe74bc2ab903ce5df9..1a0bec38e96707d1c90f4e364c28944cf12c63e3 100644 --- a/src/common/env.h +++ b/src/common/env.h @@ -84,7 +84,7 @@ int setenvfs(const char *fmt, ...); int setenvf(char ***envp, const char *name, const char *fmt, ...); void unsetenvp(char **env, const char *name); char * getenvp(char **env, const char *name); -int setup_env(env_t *env); +int setup_env(env_t *env, bool preserve_env); /********************************************************************** * Newer environment variable handling scheme @@ -106,9 +106,9 @@ int setup_env(env_t *env); * Sets OBSOLETE variables: * ? probably only needed for users... */ -void env_array_for_job(char ***dest, - const resource_allocation_response_msg_t *alloc, - const job_desc_msg_t *desc); +int env_array_for_job(char ***dest, + const resource_allocation_response_msg_t *alloc, + const job_desc_msg_t *desc); /* * Set in "dest" the environment variables relevant to a SLURM batch @@ -133,9 +133,9 @@ void env_array_for_job(char ***dest, * SLURM_TASKS_PER_NODE <- poorly named, really CPUs per node * ? probably only needed for users... */ -extern void env_array_for_batch_job(char ***dest, - const batch_job_launch_msg_t *batch, - const char* node_name); +extern int env_array_for_batch_job(char ***dest, + const batch_job_launch_msg_t *batch, + const char* node_name); /* * Set in "dest" the environment variables relevant to a SLURM job step, diff --git a/src/common/list.h b/src/common/list.h index 9039c8fd65116c09d2fe037820773d4803c913ed..53667c2d775900bb714f0e9e8822c9c0865ddc25 100644 --- a/src/common/list.h +++ b/src/common/list.h @@ -231,7 +231,6 @@ void * list_peek (List l); * Note: The item is not removed from the list. */ - /**************************** * Queue Access Functions * ****************************/ diff --git a/src/common/node_select.c b/src/common/node_select.c index e2b1f713042ae43427dc11644417f9e6439e6747..5a4ae881b9ee5dc1cdc9900ecf5a6034fb74e2b3 100644 --- a/src/common/node_select.c +++ b/src/common/node_select.c @@ -1516,7 +1516,7 @@ extern int select_g_unpack_node_info( int i, record_count = 0; node_select_info_msg_t *buf; - buf = xmalloc(sizeof(bg_info_record_t)); + buf = xmalloc(sizeof(node_select_info_msg_t)); safe_unpack32(&(buf->record_count), buffer); safe_unpack_time(&(buf->last_update), buffer); buf->bg_info_array = xmalloc(sizeof(bg_info_record_t) * @@ -1530,10 +1530,7 @@ extern int select_g_unpack_node_info( return SLURM_SUCCESS; unpack_error: - for(i=0; i<record_count; i++) - _free_node_info(&(buf->bg_info_array[i])); - xfree(buf->bg_info_array); - xfree(buf); + select_g_free_node_info(&buf); return SLURM_ERROR; } diff --git a/src/common/parse_config.c b/src/common/parse_config.c index b8a9f986a11dc43a50d48bd01d9d16065f24f69d..9a36b137450e34069cca6029c859574efccee654 100644 --- a/src/common/parse_config.c +++ b/src/common/parse_config.c @@ -4,7 +4,7 @@ * NOTE: when you see the prefix "s_p_", think "slurm parser". ***************************************************************************** * Copyright (C) 2006-2007 The Regents of the University of California. - * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2008-2009 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov>. * CODE-OCEC-09-009. All rights reserved. @@ -19,7 +19,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -377,7 +377,8 @@ static int _handle_string(s_p_values_t *v, const char *value, const char *line, char **leftover) { if (v->data_count != 0) { - debug("%s specified more than once", v->key); + error("%s specified more than once, latest value used", + v->key); xfree(v->data); v->data_count = 0; } @@ -401,7 +402,8 @@ static int _handle_long(s_p_values_t *v, const char *value, const char *line, char **leftover) { if (v->data_count != 0) { - debug("%s specified more than once", v->key); + error("%s specified more than once, latest value used", + v->key); xfree(v->data); v->data_count = 0; } @@ -443,7 +445,8 @@ static int _handle_uint16(s_p_values_t *v, const char *value, const char *line, char **leftover) { if (v->data_count != 0) { - debug("%s specified more than once", v->key); + error("%s specified more than once, latest value used", + v->key); xfree(v->data); v->data_count = 0; } @@ -475,7 +478,8 @@ static int _handle_uint16(s_p_values_t *v, error("%s value (%s) is out of range", v->key, value); return -1; } else if (value[0] == '-') { - error("%s value (%s) is less than zero", v->key, value); + error("%s value (%s) is less than zero", v->key, + value); return -1; } else if (num > 0xffff) { error("%s value (%s) is greater than 65535", v->key, @@ -494,7 +498,8 @@ static int _handle_uint32(s_p_values_t *v, const char *value, const char *line, char **leftover) { if (v->data_count != 0) { - debug("%s specified more than once", v->key); + error("%s specified more than once, latest value used", + v->key); xfree(v->data); v->data_count = 0; } @@ -530,7 +535,8 @@ static int _handle_uint32(s_p_values_t *v, error("%s value (%s) is out of range", v->key, value); return -1; } else if (value[0] == '-') { - error("%s value (%s) is less than zero", v->key, value); + error("%s value (%s) is less than zero", v->key, + value); return -1; } else if (num > 0xffffffff) { error("%s value (%s) is greater than 4294967295", @@ -546,7 +552,8 @@ static int _handle_uint32(s_p_values_t *v, } static int _handle_pointer(s_p_values_t *v, - const char *value, const char *line, char **leftover) + const char *value, const char *line, + char **leftover) { if (v->handler != NULL) { /* call the handler function */ @@ -557,7 +564,8 @@ static int _handle_pointer(s_p_values_t *v, return rc == 0 ? 0 : -1; } else { if (v->data_count != 0) { - debug("%s specified more than once", v->key); + error("%s specified more than once, " + "latest value used", v->key); xfree(v->data); v->data_count = 0; } @@ -593,10 +601,12 @@ static int _handle_array(s_p_values_t *v, } static int _handle_boolean(s_p_values_t *v, - const char *value, const char *line, char **leftover) + const char *value, const char *line, + char **leftover) { if (v->data_count != 0) { - debug("%s specified more than once", v->key); + error("%s specified more than once, latest value used", + v->key); xfree(v->data); v->data_count = 0; } @@ -809,7 +819,7 @@ int s_p_parse_file(s_p_hashtbl_t *hashtbl, char *filename) int merged_lines; int inc_rc; - if(!filename) { + if (!filename) { error("s_p_parse_file: No filename given."); return SLURM_ERROR; } @@ -824,7 +834,7 @@ int s_p_parse_file(s_p_hashtbl_t *hashtbl, char *filename) } line_number = 1; - while((merged_lines = _get_next_line(line, BUFFER_SIZE, f)) > 0) { + while ((merged_lines = _get_next_line(line, BUFFER_SIZE, f)) > 0) { /* skip empty lines */ if (line[0] == '\0') { line_number += merged_lines; @@ -1065,7 +1075,7 @@ int s_p_get_pointer(void **ptr, const char *key, const s_p_hashtbl_t *hashtbl) * that element contains a pointer to the newly parsed value. You can * think of this as being an array of S_P_POINTER types. * - * OUT ptr_array - pointer to a void pointer-pointer where the value is returned + * OUT ptr_array - pointer to void pointer-pointer where the value is returned * OUT count - length of ptr_array * IN key - hash table key * IN hashtbl - hash table created by s_p_hashtbl_create() diff --git a/src/common/proc_args.c b/src/common/proc_args.c index 540f5fac5b53216fd0fe0adc2b752ab39551a66c..563053bf3e0a3ae5152c4cd6d1faaac52903f237 100644 --- a/src/common/proc_args.c +++ b/src/common/proc_args.c @@ -336,6 +336,41 @@ bool verify_node_count(const char *arg, int *min_nodes, int *max_nodes) return true; } +/* + * If the node list supplied is a file name, translate that into + * a list of nodes, we orphan the data pointed to + * RET true if the node list is a valid one + */ +bool verify_node_list(char **node_list_pptr, enum task_dist_states dist, + int task_count) +{ + char *nodelist = NULL; + + xassert (node_list_pptr); + xassert (*node_list_pptr); + + if (strchr(*node_list_pptr, '/') == NULL) + return true; /* not a file name */ + + /* If we are using Arbitrary grab count out of the hostfile + using them exactly the way we read it in since we are + saying, lay it out this way! */ + if(dist == SLURM_DIST_ARBITRARY) + nodelist = slurm_read_hostfile(*node_list_pptr, task_count); + else + nodelist = slurm_read_hostfile(*node_list_pptr, NO_VAL); + + if (!nodelist) + return false; + + xfree(*node_list_pptr); + *node_list_pptr = xstrdup(nodelist); + free(nodelist); + + return true; +} + + /* * get either 1 or 2 integers for a resource count in the form of either * (count, min-max, or '*') diff --git a/src/common/proc_args.h b/src/common/proc_args.h index 7e715e1c3d689b68ba8fa81019dc3198b916aebe..4835f48bbb142f442552378cef7681e5ecbbb4e3 100644 --- a/src/common/proc_args.h +++ b/src/common/proc_args.h @@ -72,6 +72,9 @@ long str_to_bytes(const char *arg); /* verify that a node count in arg is of a known form (count or min-max) */ bool verify_node_count(const char *arg, int *min_nodes, int *max_nodes); +/* verify a node list is valid based on the dist and task count given */ +bool verify_node_list(char **node_list_pptr, enum task_dist_states dist, + int task_count); /* parse a possible range of values from the form: count, min-max, or '*' */ bool get_resource_arg_range(const char *arg, const char *what, int* min, int *max, bool isFatal); diff --git a/src/common/read_config.c b/src/common/read_config.c index 26f5502a63ebb793c8571cd023518cb4fc529a57..ec6e36d768009f1d2ca3d8831ceed72d1c8ff1b4 100644 --- a/src/common/read_config.c +++ b/src/common/read_config.c @@ -1227,8 +1227,8 @@ extern int slurm_conf_get_addr(const char *node_name, slurm_addr *address) * Returns SLURM_SUCCESS on success, SLURM_FAILURE on failure. */ extern int slurm_conf_get_cpus_sct(const char *node_name, - uint16_t *cpus, uint16_t *sockets, - uint16_t *cores, uint16_t *threads) + uint16_t *cpus, uint16_t *sockets, + uint16_t *cores, uint16_t *threads) { int idx; names_ll_t *p; @@ -2467,6 +2467,12 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) if (!s_p_get_uint16(&conf->unkillable_timeout, "UnkillableStepTimeout", hashtbl)) conf->unkillable_timeout = DEFAULT_UNKILLABLE_TIMEOUT; + +#ifdef HAVE_BG + if (conf->node_prefix == NULL) + fatal("No valid node name prefix identified"); +#endif + xfree(default_storage_type); xfree(default_storage_loc); xfree(default_storage_host); diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c index 07f1fce00a55bc6656a6d7e6a11c8b40b77e3599..e0d44ea222cc40a57c312dd3759854935290c776 100644 --- a/src/common/slurm_accounting_storage.c +++ b/src/common/slurm_accounting_storage.c @@ -6959,7 +6959,7 @@ extern char *acct_qos_str(List qos_list, uint32_t level) return NULL; } else if(!level) { debug2("no level"); - return "None"; + return ""; } itr = list_iterator_create(qos_list); diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c index 5c73501a795508b7784b803d7a56731c22d3e570..13f9d46feebdeea8f54c0a1b42237618386a6aa2 100644 --- a/src/common/slurm_protocol_api.c +++ b/src/common/slurm_protocol_api.c @@ -3466,7 +3466,10 @@ extern void convert_num_unit(float num, char *buf, int buf_size, int orig_type) char *unit = "\0KMGP?"; int i = (int)num % 512; - if((i > 0 && num < 1024) || (int)num == 0) { + if((int)num == 0) { + snprintf(buf, buf_size, "%d", (int)num); + return; + } else if((i > 0 && num < 1024)) { snprintf(buf, buf_size, "%d%c", (int)num, unit[orig_type]); return; } diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c index c17e974c60e0eb54e346305346e0994cae2835a7..6ac8ccbee18619ad64e3332d2ff5371d146a34fb 100644 --- a/src/common/slurm_protocol_defs.c +++ b/src/common/slurm_protocol_defs.c @@ -159,13 +159,19 @@ extern int slurm_addto_char_list(List char_list, char *names) if(!strcasecmp(tmp_char, name)) break; } - - if(!tmp_char) { - _make_lower(name); - list_append(char_list, name); + /* If we get a duplicate remove the + first one and tack this on the end. + This is needed for get associations + with qos. + */ + if(tmp_char) + list_delete_item(itr); + else count++; - } else - xfree(name); + + _make_lower(name); + list_append(char_list, name); + list_iterator_reset(itr); i++; @@ -187,12 +193,18 @@ extern int slurm_addto_char_list(List char_list, char *names) break; } - if(!tmp_char) { - _make_lower(name); - list_append(char_list, name); + /* If we get a duplicate remove the + first one and tack this on the end. + This is needed for get associations + with qos. + */ + if(tmp_char) + list_delete_item(itr); + else count++; - } else - xfree(name); + + _make_lower(name); + list_append(char_list, name); } list_iterator_destroy(itr); return count; diff --git a/src/common/slurm_step_layout.c b/src/common/slurm_step_layout.c index db7390cbe484cbf508e4dfca01ea4ded0c5bd1b8..075a8495b0af772925cf5a68c7613b1eeb8fabc9 100644 --- a/src/common/slurm_step_layout.c +++ b/src/common/slurm_step_layout.c @@ -117,7 +117,7 @@ slurm_step_layout_t *slurm_step_layout_create( } else { step_layout->node_list = xstrdup(tlist); } - + step_layout->task_cnt = num_tasks; #ifdef HAVE_FRONT_END /* Limited job step support */ @@ -499,9 +499,10 @@ static int _task_layout_hostfile(slurm_step_layout_t *step_layout, debug2("list is %s", arbitrary_nodes); step_alloc_hosts = hostlist_create(arbitrary_nodes); if(hostlist_count(step_alloc_hosts) != step_layout->task_cnt) { - error("Asked for %u tasks have %d in the nodelist." - "Check your nodelist", + error("Asked for %u tasks have %d in the nodelist. " + "Check your nodelist, or set the -n option to be %d", step_layout->task_cnt, + hostlist_count(step_alloc_hosts), hostlist_count(step_alloc_hosts)); return SLURM_ERROR; } diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c index b7c38cbe8d898d8802dd26c71817d5f41da33fce..1978b310750218dc9a0efa1633a3f72ed6da2da0 100644 --- a/src/common/slurmdbd_defs.c +++ b/src/common/slurmdbd_defs.c @@ -89,9 +89,11 @@ static pthread_t agent_tid = 0; static time_t agent_shutdown = 0; static pthread_mutex_t slurmdbd_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t slurmdbd_cond = PTHREAD_COND_INITIALIZER; static slurm_fd slurmdbd_fd = -1; static char * slurmdbd_auth_info = NULL; static bool rollback_started = 0; +static bool halt_agent = 0; static void * _agent(void *x); static void _agent_queue_del(void *x); @@ -232,15 +234,21 @@ extern int slurm_send_recv_slurmdbd_msg(uint16_t rpc_version, xassert(req); xassert(resp); + /* To make sure we can get this to send instead of the agent + sending stuff that can happen anytime we set halt_agent and + then after we get into the mutex we unset. + */ + halt_agent = 1; read_timeout = SLURMDBD_TIMEOUT * 1000; slurm_mutex_lock(&slurmdbd_lock); + halt_agent = 0; if (slurmdbd_fd < 0) { /* Either slurm_open_slurmdbd_conn() was not executed or * the connection to Slurm DBD has been closed */ _open_slurmdbd_fd(); if (slurmdbd_fd < 0) { - slurm_mutex_unlock(&slurmdbd_lock); - return SLURM_ERROR; + rc = SLURM_ERROR; + goto end_it; } } @@ -251,16 +259,15 @@ extern int slurm_send_recv_slurmdbd_msg(uint16_t rpc_version, if (rc != SLURM_SUCCESS) { error("slurmdbd: Sending message type %u: %d: %m", req->msg_type, rc); - slurm_mutex_unlock(&slurmdbd_lock); - return rc; + goto end_it; } buffer = _recv_msg(read_timeout); if (buffer == NULL) { error("slurmdbd: Getting response to message type %u", req->msg_type); - slurm_mutex_unlock(&slurmdbd_lock); - return SLURM_ERROR; + rc = SLURM_ERROR; + goto end_it; } rc = unpack_slurmdbd_msg(rpc_version, resp, buffer); @@ -270,6 +277,8 @@ extern int slurm_send_recv_slurmdbd_msg(uint16_t rpc_version, rc = ((dbd_id_rc_msg_t *)resp->data)->return_code; free_buf(buffer); +end_it: + pthread_cond_signal(&slurmdbd_cond); slurm_mutex_unlock(&slurmdbd_lock); return rc; @@ -315,8 +324,9 @@ extern int slurm_send_slurmdbd_msg(uint16_t rpc_version, slurmdbd_msg_t *req) error("slurmdbd: agent queue is full, discarding request"); rc = SLURM_ERROR; } - slurm_mutex_unlock(&agent_lock); + pthread_cond_broadcast(&agent_cond); + slurm_mutex_unlock(&agent_lock); return rc; } @@ -1688,8 +1698,10 @@ static void *_agent(void *x) xsignal_unblock(sigarray); while (agent_shutdown == 0) { - slurm_mutex_lock(&slurmdbd_lock); + if(halt_agent) + pthread_cond_wait(&slurmdbd_cond, &slurmdbd_lock); + if ((slurmdbd_fd < 0) && (difftime(time(NULL), fail_time) >= 10)) { /* The connection to Slurm DBD is not open */ diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c index 0e515fef073e3c1a272f77749fd81c6950e2ea78..46deb4e9746609f5efb48165559a6f7d879c9f0b 100644 --- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c +++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c @@ -636,6 +636,9 @@ static int _setup_association_limits(acct_association_rec_t *assoc, list_iterator_create(assoc->qos_list); while((tmp_char = list_next(qos_itr))) { + /* we don't want to include blank names */ + if(!tmp_char[0]) + continue; if(!set) { if(tmp_char[0] == '+' || tmp_char[0] == '-') qos_type = "delta_qos"; @@ -645,11 +648,12 @@ static int _setup_association_limits(acct_association_rec_t *assoc, } list_iterator_destroy(qos_itr); - - xstrfmtcat(*cols, ", %s", qos_type); - xstrfmtcat(*vals, ", '%s'", qos_val); - xstrfmtcat(*extra, ", %s=\"%s\"", qos_type, qos_val); - xfree(qos_val); + if(qos_val) { + xstrfmtcat(*cols, ", %s", qos_type); + xstrfmtcat(*vals, ", '%s'", qos_val); + xstrfmtcat(*extra, ", %s=\"%s\"", qos_type, qos_val); + xfree(qos_val); + } } else if((qos_level == QOS_LEVEL_SET) && default_qos_str) { /* Add default qos to the account */ xstrcat(*cols, ", qos"); @@ -3035,7 +3039,7 @@ static int _mysql_acct_check_tables(MYSQL *db_conn) { "max_vsize", "bigint unsigned default 0 not null" }, { "max_vsize_task", "smallint unsigned default 0 not null" }, { "max_vsize_node", "int unsigned default 0 not null" }, - { "ave_vsize", "double default 0.0 not null" }, + { "ave_vsize", "double unsigned default 0.0 not null" }, { "max_rss", "bigint unsigned default 0 not null" }, { "max_rss_task", "smallint unsigned default 0 not null" }, { "max_rss_node", "int unsigned default 0 not null" }, @@ -3153,7 +3157,7 @@ static int _mysql_acct_check_tables(MYSQL *db_conn) "end if; " "if @qos = '' then set @s = CONCAT(" "@s, '@qos := qos, " - "@delta_qos := CONCAT(@delta_qos, delta_qos), '); " + "@delta_qos := CONCAT(delta_qos, @delta_qos), '); " "end if; " "set @s = concat(@s, ' @my_acct := parent_acct from ', " "my_table, ' where acct = \"', @my_acct, '\" && " @@ -5644,9 +5648,11 @@ extern List acct_storage_p_modify_associations( ", qos=if(qos='', '', " "replace(qos, ',%s', ''))" ", delta_qos=if(qos='', " - "concat(replace(delta_qos, " - "',%s', ''), ',%s'), '')", - new_qos+1, new_qos, new_qos); + "concat(replace(replace(" + "delta_qos, ',+%s', ''), " + "',-%s', ''), ',%s'), '')", + new_qos+1, new_qos+1, + new_qos+1, new_qos); } else if(new_qos[0] == '+') { xstrfmtcat(vals, ", qos=if(qos='', '', " @@ -5654,10 +5660,12 @@ extern List acct_storage_p_modify_associations( "replace(qos, ',%s', ''), " "\"%s\")), delta_qos=if(" "qos='', concat(" - "replace(delta_qos, " - "',%s', ''), ',%s'), '')", + "replace(replace(" + "delta_qos, ',+%s', ''), " + "',-%s', ''), ',%s'), '')", new_qos+1, new_qos+1, - new_qos, new_qos); + new_qos+1, new_qos+1, + new_qos); } else if(new_qos[0]) xstrfmtcat(tmp_qos, ",%s", new_qos); else @@ -8553,9 +8561,10 @@ empty: parent_qos+1); /* then add the parents delta */ - if(parent_delta_qos) + if(parent_delta_qos) slurm_addto_char_list(delta_qos_list, parent_delta_qos+1); + /* now add the associations */ if(row[ASSOC_REQ_DELTA_QOS][0]) slurm_addto_char_list( diff --git a/src/plugins/accounting_storage/mysql/mysql_rollup.c b/src/plugins/accounting_storage/mysql/mysql_rollup.c index 801331617c0864def287236327ce2f4c04e4da35..e01bc40307f7ce954c527e4d0e27629e50624f00 100644 --- a/src/plugins/accounting_storage/mysql/mysql_rollup.c +++ b/src/plugins/accounting_storage/mysql/mysql_rollup.c @@ -375,6 +375,17 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn, } xfree(query); + /* If a reservation overlaps another reservation we + total up everything here as if they didn't but when + calculating the total time for a cluster we will + remove the extra time received. This may result in + unexpected results with association based reports + since the association is given the total amount of + time of each reservation, thus equaling more time + that is available. Job/Cluster/Reservation reports + should be fine though since we really don't over + allocate resources. + */ while((row = mysql_fetch_row(result))) { int row_start = atoi(row[RESV_REQ_START]); int row_end = atoi(row[RESV_REQ_END]); @@ -942,6 +953,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn, list_flush(assoc_usage_list); list_flush(cluster_usage_list); list_flush(wckey_usage_list); + list_flush(resv_usage_list); curr_start = curr_end; curr_end = curr_start + add_sec; } diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c index f97d954b02ba8718ba5ddac8b5cb681557f3a1df..730247c93d61ce58ed23fd30d648125d63ae1b01 100644 --- a/src/plugins/sched/backfill/backfill.c +++ b/src/plugins/sched/backfill/backfill.c @@ -343,6 +343,10 @@ static void _attempt_backfill(void) bitstr_t *avail_bitmap = NULL, *resv_bitmap = NULL; time_t now = time(NULL), start_res; node_space_map_t node_space[MAX_BACKFILL_JOB_CNT + 2]; + static int sched_timeout = 0; + + if(!sched_timeout) + sched_timeout = MIN(slurm_get_msg_timeout(), 10); if (slurm_get_root_filter()) filter_root = true; @@ -466,9 +470,15 @@ static void _attempt_backfill(void) j = _try_sched(job_ptr, &avail_bitmap, min_nodes, max_nodes, req_nodes); - if (j != SLURM_SUCCESS) + if (j != SLURM_SUCCESS) { + if((time(NULL) - now) >= sched_timeout) { + debug("backfill: loop taking to long " + "breaking out"); + break; + } continue; /* not runable */ - + } + job_ptr->start_time = MAX(job_ptr->start_time, start_res); if (job_ptr->start_time <= now) { int rc = _start_job(job_ptr, resv_bitmap); @@ -500,6 +510,10 @@ static void _attempt_backfill(void) #if __DEBUG _dump_node_space_table(node_space); #endif + if((time(NULL) - now) >= sched_timeout) { + debug("backfill: loop taking to long breaking out"); + break; + } } FREE_NULL_BITMAP(avail_bitmap); FREE_NULL_BITMAP(resv_bitmap); diff --git a/src/plugins/sched/wiki2/get_jobs.c b/src/plugins/sched/wiki2/get_jobs.c index 024acb08ec77bb1e01c4948a05e2019a3c5a7c35..cfb308776c6d1cd03e666566c5c577e82b4bbcc9 100644 --- a/src/plugins/sched/wiki2/get_jobs.c +++ b/src/plugins/sched/wiki2/get_jobs.c @@ -16,7 +16,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -414,7 +414,7 @@ static void _get_job_comment(struct job_record *job_ptr, /* SHARED NODES */ if (cr_enabled) { /* consumable resources */ if (job_ptr->part_ptr && - (job_ptr->part_ptr->max_share == 0)) /* Exclusive use */ + (job_ptr->part_ptr->max_share == 0)) /* Exclusive use */ sharing = 0; else if (job_ptr->details && job_ptr->details->shared) sharing = 1; diff --git a/src/plugins/sched/wiki2/get_nodes.c b/src/plugins/sched/wiki2/get_nodes.c index 2f13cc288ab953b3887aced7883679f77f6b8977..00026b999547e906606c6b64a6b669b8fea0071f 100644 --- a/src/plugins/sched/wiki2/get_nodes.c +++ b/src/plugins/sched/wiki2/get_nodes.c @@ -17,7 +17,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -295,9 +295,9 @@ static char * _dump_node(struct node_record *node_ptr, hostlist_t hl, /* Strip out any quotes, they confuse Moab */ char *reason, *bad_char; reason = xstrdup(node_ptr->reason); - while ((bad_char = strchr(node_ptr->reason, '\''))) + while ((bad_char = strchr(reason, '\''))) bad_char[0] = ' '; - while ((bad_char = strchr(node_ptr->reason, '\"'))) + while ((bad_char = strchr(reason, '\"'))) bad_char[0] = ' '; snprintf(tmp, sizeof(tmp), "CAT=\"%s\";", reason); xstrcat(buf, tmp); diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/block_allocator/block_allocator.c index 201d889e19802b6af71c153670b211a560b5b435..90d70127497188343f5462d1fe05e90c6ead9568 100644 --- a/src/plugins/select/bluegene/block_allocator/block_allocator.c +++ b/src/plugins/select/bluegene/block_allocator/block_allocator.c @@ -1,7 +1,7 @@ /*****************************************************************************\ * block_allocator.c - Assorted functions for layout of bluegene blocks, * wiring, mapping for smap, etc. - * $Id: block_allocator.c 18102 2009-07-09 20:45:13Z jette $ + * $Id: block_allocator.c 18612 2009-09-02 19:00:21Z da $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -1255,10 +1255,11 @@ extern void ba_update_node_state(ba_node_t *ba_node, uint16_t state) /* basically set the node as used */ if((node_base_state == NODE_STATE_DOWN) - || (ba_node->state & NODE_STATE_DRAIN)) + || (state & NODE_STATE_DRAIN)) ba_node->used = true; else ba_node->used = false; + ba_node->state = state; } @@ -1506,14 +1507,26 @@ extern int check_and_set_node_list(List nodes) grid[ba_node->coord[X]] [ba_node->coord[Y]] [ba_node->coord[Z]]; + if(ba_node->used && curr_ba_node->used) { - debug4("I have already been to " - "this node %c%c%c", - alpha_num[ba_node->coord[X]], - alpha_num[ba_node->coord[Y]], - alpha_num[ba_node->coord[Z]]); - rc = SLURM_ERROR; - goto end_it; + /* Only error if the midplane isn't already + * marked down or in a error state outside of + * the bluegene block. + */ + uint16_t base_state, node_flags; + base_state = curr_ba_node->state & NODE_STATE_BASE; + node_flags = curr_ba_node->state & NODE_STATE_FLAGS; + if (!(node_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL)) + && (base_state != NODE_STATE_DOWN)) { + debug4("I have already been to " + "this node %c%c%c %s", + alpha_num[ba_node->coord[X]], + alpha_num[ba_node->coord[Y]], + alpha_num[ba_node->coord[Z]], + node_state_string(curr_ba_node->state)); + rc = SLURM_ERROR; + goto end_it; + } } if(ba_node->used) @@ -3787,7 +3800,6 @@ requested_end: static bool _node_used(ba_node_t* ba_node, int x_size) { ba_switch_t* ba_switch = NULL; - /* if we've used this node in another block already */ if (!ba_node || ba_node->used) { debug4("node %c%c%c used", @@ -3819,7 +3831,7 @@ static bool _node_used(ba_node_t* ba_node, int x_size) return true; } } - + return false; } @@ -4026,6 +4038,35 @@ static int _set_external_wires(int dim, int count, ba_node_t* source, /* set up split x */ if(DIM_SIZE[X] == 1) { + } else if(DIM_SIZE[X] == 4) { + switch(count) { + case 0: + case 3: + /* 0 and 3rd Node */ + /* nothing */ + break; + case 1: + /* 1st Node */ + target = &ba_system_ptr->grid[0] + [source->coord[Y]] + [source->coord[Z]]; + /* 4->3 of 0th */ + _switch_config(source, target, dim, 4, 3); + break; + case 2: + /* 2nd Node */ + target = &ba_system_ptr->grid[3] + [source->coord[Y]] + [source->coord[Z]]; + /* 4->3 of 3rd and back */ + _switch_config(source, target, dim, 4, 3); + _switch_config(source, target, dim, 3, 4); + break; + default: + fatal("got %d for a count on a %d X-dim system", + count, DIM_SIZE[X]); + break; + } } else if(DIM_SIZE[X] == 5) { /* 4 X dim fixes for wires */ switch(count) { diff --git a/src/plugins/select/bluegene/plugin/bg_block_info.c b/src/plugins/select/bluegene/plugin/bg_block_info.c index 114334712df40f567969a9b84a01b90be6424350..cec58fa4d7b2f44aad3a4646e4a5f31b7fbe26dd 100644 --- a/src/plugins/select/bluegene/plugin/bg_block_info.c +++ b/src/plugins/select/bluegene/plugin/bg_block_info.c @@ -1,7 +1,7 @@ /*****************************************************************************\ * bg_block_info.c - bluegene block information from the db2 database. * - * $Id: bg_block_info.c 17534 2009-05-19 00:58:46Z da $ + * $Id: bg_block_info.c 18147 2009-07-15 16:25:53Z da $ ***************************************************************************** * Copyright (C) 2004-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -412,7 +412,18 @@ extern int update_block_list() } remove_from_bg_list(bg_lists->booted, bg_record); - } + } else if(bg_record->state == RM_PARTITION_ERROR) { + if(bg_record->boot_state == 1) + error("Block %s in an error " + "state while booting.", + bg_record->bg_block_id); + else + error("Block %s in an error state.", + bg_record->bg_block_id); + remove_from_bg_list(bg_lists->booted, + bg_record); + trigger_block_error(); + } updated = 1; } @@ -433,32 +444,14 @@ extern int update_block_list() break; case RM_PARTITION_ERROR: - bg_record->boot_state = 0; - bg_record->boot_count = 0; - if(bg_record->job_running > NO_JOB_RUNNING) { - error("Block %s in an error " - "state while booting. " - "Failing job %u.", - bg_record->bg_block_id, - bg_record->job_running); - freeit = xmalloc( - sizeof(kill_job_struct_t)); - freeit->jobid = bg_record->job_running; - list_push(kill_job_list, freeit); - if(remove_from_bg_list( - bg_lists->job_running, - bg_record) - == SLURM_SUCCESS) { - num_unused_cpus += - bg_record->cpu_cnt; - } - } else - error("block %s in an error " - "state while booting.", - bg_record->bg_block_id); - remove_from_bg_list(bg_lists->booted, - bg_record); - trigger_block_error(); + /* If we get an error on boot that + * means it is a transparent L3 error + * and should be trying to fix + * itself. If this is the case we + * just hang out waiting for the state + * to go to free where we will try to + * boot again below. + */ break; case RM_PARTITION_FREE: if(bg_record->boot_count < RETRY_BOOT_COUNT) { diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c index 0174a675a08943b69f916f63528588c4370b4d87..f6aad49f5f4ee7f33fad0ca4082a4f25f19f464e 100644 --- a/src/plugins/select/bluegene/plugin/bg_job_place.c +++ b/src/plugins/select/bluegene/plugin/bg_job_place.c @@ -97,7 +97,8 @@ static int _check_for_booted_overlapping_blocks( bool test_only); static int _dynamically_request(List block_list, int *blocks_added, ba_request_t *request, - char *user_req_nodes); + char *user_req_nodes, + bool test_only); static int _find_best_block_match(List block_list, int *blocks_added, struct job_record* job_ptr, bitstr_t* slurm_block_bitmap, @@ -668,6 +669,10 @@ static int _check_for_booted_overlapping_blocks( bg_record); } + debug("Removing unusable block %s " + "from the system.", + bg_record->bg_block_id); + if(!found_record) { debug("This record %s wasn't " "found in the " @@ -679,10 +684,6 @@ static int _check_for_booted_overlapping_blocks( } else destroy_bg_record(bg_record); - debug("removing the block %s" - "from the system", - bg_record->bg_block_id); - list_push(temp_list, found_record); free_block_list(temp_list); list_destroy(temp_list); @@ -708,7 +709,8 @@ static int _check_for_booted_overlapping_blocks( static int _dynamically_request(List block_list, int *blocks_added, ba_request_t *request, - char *user_req_nodes) + char *user_req_nodes, + bool test_only) { List list_of_lists = NULL; List temp_list = NULL; @@ -755,7 +757,10 @@ static int _dynamically_request(List block_list, int *blocks_added, while((bg_record = list_pop(new_blocks))) { if(block_exist_in_list(block_list, bg_record)) destroy_bg_record(bg_record); - else { + else if(test_only) { + list_append(block_list, bg_record); + (*blocks_added) = 1; + } else { if(job_block_test_list == bg_lists->job_running) { if(configure_block(bg_record) @@ -774,18 +779,18 @@ static int _dynamically_request(List block_list, int *blocks_added, list_append(block_list, bg_record); print_bg_record(bg_record); (*blocks_added) = 1; - } + } } list_destroy(new_blocks); if(!*blocks_added) { - memcpy(request->geometry, start_geo, + memcpy(request->geometry, start_geo, sizeof(int)*BA_SYSTEM_DIMENSIONS); rc = SLURM_ERROR; continue; } list_sort(block_list, (ListCmpF)_bg_record_sort_aval_dec); - + rc = SLURM_SUCCESS; break; } else if (errno == ESLURM_INTERCONNECT_FAILURE) { @@ -907,57 +912,8 @@ static int _find_best_block_match(List block_list, } if(!req_nodes) req_nodes = min_nodes; - } - if (target_size == 0) { /* no geometry specified */ - if(job_ptr->details->req_nodes - && !start_req) { - bg_record_t *tmp_record = NULL; - char *tmp_nodes= job_ptr->details->req_nodes; - int len = strlen(tmp_nodes); - - i = 0; - while(i<len - && tmp_nodes[i] != '[' - && (tmp_nodes[i] < '0' || tmp_nodes[i] > 'Z' - || (tmp_nodes[i] > '9' - && tmp_nodes[i] < 'A'))) - i++; - - if(i<len) { - len -= i; - tmp_record = xmalloc(sizeof(bg_record_t)); - tmp_record->bg_block_list = - list_create(destroy_ba_node); - - len += strlen(bg_conf->slurm_node_prefix)+1; - tmp_record->nodes = xmalloc(len); - - snprintf(tmp_record->nodes, - len, - "%s%s", - bg_conf->slurm_node_prefix, - tmp_nodes+i); - - - process_nodes(tmp_record, false); - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) { - req_geometry[i] = tmp_record->geo[i]; - start[i] = tmp_record->start[i]; - } - destroy_bg_record(tmp_record); - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_GEOMETRY, - &req_geometry); - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_START, - &start); - start_req = 1; - } else - error("BPs=%s is in a weird format", - tmp_nodes); - } else { - req_geometry[X] = (uint16_t)NO_VAL; - } + } else { + req_geometry[X] = (uint16_t)NO_VAL; target_size = min_nodes; } @@ -1090,7 +1046,8 @@ static int _find_best_block_match(List block_list, if((rc = _dynamically_request(block_list, blocks_added, &request, - job_ptr->details->req_nodes)) + job_ptr->details->req_nodes, + test_only)) == SLURM_SUCCESS) { create_try = 1; continue; @@ -1147,8 +1104,17 @@ static int _find_best_block_match(List block_list, destroy_bg_record(bg_record); if(errno == ESLURM_INTERCONNECT_FAILURE || !list_count(job_list)) { - error("this job will never " - "run on this system"); + char *nodes; + if (slurmctld_conf. + slurmctld_debug < 5) + break; + nodes = bitmap2node_name( + slurm_block_bitmap); + debug("job %u not " + "runable on %s", + job_ptr->job_id, + nodes); + xfree(nodes); break; } continue; @@ -1238,6 +1204,11 @@ static int _sync_block_lists(List full_list, List incomp_list) itr = list_iterator_create(full_list); itr2 = list_iterator_create(incomp_list); while((new_record = list_next(itr))) { + /* Make sure we aren't adding any block that doesn't + have a block_id. + */ + if(!new_record->bg_block_id) + continue; while((bg_record = list_next(itr2))) { if(bit_equal(bg_record->bitmap, new_record->bitmap) && bit_equal(bg_record->ionode_bitmap, @@ -1246,10 +1217,9 @@ static int _sync_block_lists(List full_list, List incomp_list) } if(!bg_record) { - bg_record = xmalloc(sizeof(bg_record_t)); - copy_bg_record(new_record, bg_record); - debug4("adding %s", bg_record->bg_block_id); - list_append(incomp_list, bg_record); + list_remove(itr); + debug4("adding %s", new_record->bg_block_id); + list_append(incomp_list, new_record); count++; } list_iterator_reset(itr2); @@ -1385,8 +1355,8 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap, } select_g_sprint_jobinfo(job_ptr->select_jobinfo, buf, sizeof(buf), SELECT_PRINT_MIXED); - debug("bluegene:submit_job: %s nodes=%u-%u-%u", - buf, min_nodes, req_nodes, max_nodes); + debug("bluegene:submit_job: %d %s nodes=%u-%u-%u", + mode, buf, min_nodes, req_nodes, max_nodes); select_g_sprint_jobinfo(job_ptr->select_jobinfo, buf, sizeof(buf), SELECT_PRINT_BLRTS_IMAGE); #ifdef HAVE_BGL @@ -1450,29 +1420,29 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap, bg_record->ionodes); if(!bg_record->bg_block_id) { - uint16_t geo[BA_SYSTEM_DIMENSIONS]; - debug2("%d can start unassigned job %u at " "%u on %s", test_only, job_ptr->job_id, starttime, bg_record->nodes); select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_BLOCK_ID, - "unassigned"); - + SELECT_DATA_BLOCK_ID, + "unassigned"); + min_nodes = bg_record->node_cnt; select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_NODE_CNT, - &min_nodes); - memset(geo, 0, - sizeof(uint16_t) * BA_SYSTEM_DIMENSIONS); - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_GEOMETRY, - &geo); + SELECT_DATA_NODE_CNT, + &min_nodes); /* This is a fake record so we need to * destroy it after we get the info from - * it */ - destroy_bg_record(bg_record); + * it. if it was just testing then + * we added this record to the + * block_list. If this is the case + * it will be set below, but set + * blocks_added to 0 since we don't + * want to sync this with the list. */ + if(!blocks_added) + destroy_bg_record(bg_record); + blocks_added = 0; } else { if((bg_record->ionodes) && (job_ptr->part_ptr->max_share <= 1)) @@ -1490,9 +1460,6 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap, select_g_set_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_NODE_CNT, &bg_record->node_cnt); - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_GEOMETRY, - &bg_record->geo); /* tmp16 = bg_record->conn_type; */ /* select_g_set_jobinfo(job_ptr->select_jobinfo, */ diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.c b/src/plugins/select/bluegene/plugin/bg_job_run.c index 86aa2c87a96ff21a0225d91daecc24a99380bc91..2befbf484335387bd16f4cb399d5bb946a463b9d 100644 --- a/src/plugins/select/bluegene/plugin/bg_job_run.c +++ b/src/plugins/select/bluegene/plugin/bg_job_run.c @@ -2,7 +2,7 @@ * bg_job_run.c - blue gene job execution (e.g. initiation and termination) * functions. * - * $Id: bg_job_run.c 18063 2009-07-06 23:56:31Z da $ + * $Id: bg_job_run.c 18162 2009-07-15 23:23:06Z da $ ***************************************************************************** * Copyright (C) 2004-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -163,7 +163,8 @@ static int _remove_job(db_job_id_t job_id) else if(job_state == RM_JOB_DYING) { if(count > MAX_POLL_RETRIES) error("Job %d isn't dying, trying for " - "%d seconds", count*POLL_INTERVAL); + "%d seconds", job_id, + count*POLL_INTERVAL); continue; } else if(job_state == RM_JOB_ERROR) { error("job %d is in a error state.", job_id); @@ -215,14 +216,16 @@ static int _reset_block(bg_record_t *bg_record) /* remove user from list */ if(bg_record->target_name) { - if(strcmp(bg_record->target_name, bg_conf->slurm_user_name)) { + if(strcmp(bg_record->target_name, + bg_conf->slurm_user_name)) { xfree(bg_record->target_name); bg_record->target_name = xstrdup(bg_conf->slurm_user_name); } update_block_user(bg_record, 1); } else { - bg_record->target_name = xstrdup(bg_conf->slurm_user_name); + bg_record->target_name = + xstrdup(bg_conf->slurm_user_name); } @@ -230,10 +233,17 @@ static int _reset_block(bg_record_t *bg_record) bg_record->boot_count = 0; last_bg_update = time(NULL); - if(remove_from_bg_list(bg_lists->job_running, bg_record) - == SLURM_SUCCESS) { - num_unused_cpus += bg_record->cpu_cnt; - } + /* Only remove from the job_running list if + job_running == NO_JOB_RUNNING, since blocks in + error state could also be in this list and we don't + want to remove them. + */ + if(bg_record->job_running == NO_JOB_RUNNING) + if(remove_from_bg_list(bg_lists->job_running, + bg_record) + == SLURM_SUCCESS) { + num_unused_cpus += bg_record->cpu_cnt; + } } else { error("No block given to reset"); rc = SLURM_ERROR; diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.c b/src/plugins/select/bluegene/plugin/bg_record_functions.c index 36043e6e2e38287a865dd7c547817efe514a6501..f0fa4a7e88b3e3e1d972818e7a939d7992192224 100644 --- a/src/plugins/select/bluegene/plugin/bg_record_functions.c +++ b/src/plugins/select/bluegene/plugin/bg_record_functions.c @@ -168,6 +168,7 @@ extern void process_nodes(bg_record_t *bg_record, bool startup) int best_start[BA_SYSTEM_DIMENSIONS]; int start[BA_SYSTEM_DIMENSIONS]; int end[BA_SYSTEM_DIMENSIONS]; + bool start_set=0; ListIterator itr; ba_node_t* ba_node = NULL; @@ -273,6 +274,7 @@ extern void process_nodes(bg_record_t *bg_record, bool startup) bg_record->start[X] = best_start[X]; bg_record->start[Y] = best_start[Y]; bg_record->start[Z] = best_start[Z]; + start_set = 1; debug2("process_nodes: " "start is %dx%dx%d", bg_record->start[X], @@ -286,6 +288,11 @@ extern void process_nodes(bg_record_t *bg_record, bool startup) end[X] = -1; end[Y] = -1; end[Z] = -1; + if(!start_set) { + bg_record->start[X] = HOSTLIST_BASE; + bg_record->start[Y] = HOSTLIST_BASE; + bg_record->start[Z] = HOSTLIST_BASE; + } list_sort(bg_record->bg_block_list, (ListCmpF) _ba_node_cmpf_inc); @@ -311,14 +318,28 @@ extern void process_nodes(bg_record_t *bg_record, bool startup) bg_record->geo[Z]++; end[Z] = ba_node->coord[Z]; } + if(!start_set) { + if(ba_node->coord[X]<bg_record->start[X]) { + bg_record->start[X] = ba_node->coord[X]; + } + if(ba_node->coord[Y]<bg_record->start[Y]) { + bg_record->start[Y] = ba_node->coord[Y]; + } + if(ba_node->coord[Z]<bg_record->start[Z]) { + bg_record->start[Z] = ba_node->coord[Z]; + } + } } list_iterator_destroy(itr); debug3("process_nodes: " - "geo = %c%c%c bp count is %d\n", + "geo = %c%c%c bp count is %d start is %c%c%c\n", alpha_num[bg_record->geo[X]], alpha_num[bg_record->geo[Y]], alpha_num[bg_record->geo[Z]], - bg_record->bp_count); + bg_record->bp_count, + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]]); /* This check is for sub midplane systems to figure out what the largest block can be. */ @@ -502,8 +523,7 @@ extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id) itr = list_iterator_create(my_list); while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { if(bg_record->bg_block_id) - if (!strcmp(bg_record->bg_block_id, - bg_block_id)) + if (!strcasecmp(bg_record->bg_block_id, bg_block_id)) break; } list_iterator_destroy(itr); @@ -1059,26 +1079,19 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start) if(bg_record->job_running > NO_JOB_RUNNING) slurm_fail_job(bg_record->job_running); - /* mark every one of these in an error state */ - if(bg_conf->layout_mode != LAYOUT_DYNAMIC) { - if(!delete_list) - delete_list = list_create(NULL); - list_append(delete_list, bg_record); - continue; - } - - /* below is only for dynamic modes since there are - never overlapping blocks there */ - /* if the block is smaller than the create size just - continue on. + /* If Running Dynamic mode and the the block is + smaller than the create size just continue on. */ - if(bg_record->node_cnt < create_size) { + if((bg_conf->layout_mode == LAYOUT_DYNAMIC) + && (bg_record->node_cnt < create_size)) { if(!delete_list) delete_list = list_create(NULL); list_append(delete_list, bg_record); continue; } + /* keep track of the smallest size that is at least + the size of create_size. */ if(!smallest_bg_record || (smallest_bg_record->node_cnt > bg_record->node_cnt)) smallest_bg_record = bg_record; @@ -1088,31 +1101,29 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start) if(bg_conf->layout_mode != LAYOUT_DYNAMIC) { debug3("running non-dynamic mode"); - if(delete_list) { - int cnt_set = 0; - /* don't lock here since it is handled inside - the put_block_in_error_state - */ - itr = list_iterator_create(delete_list); - while ((bg_record = list_next(itr))) { - /* we already handled this */ - if(bg_record->state == RM_PARTITION_ERROR) { - rc = SLURM_NO_CHANGE_IN_DATA; - continue; - } - - rc = put_block_in_error_state( - bg_record, BLOCK_ERROR_STATE); - cnt_set++; - } - if(cnt_set) - rc = SLURM_SUCCESS; - list_iterator_destroy(itr); + + /* This should never happen, but just in case... */ + if(delete_list) list_destroy(delete_list); + + /* If we found a block that is smaller or equal to a + midplane we will just mark it in an error state as + opposed to draining the node. + */ + if(smallest_bg_record + && (smallest_bg_record->node_cnt < bg_conf->bp_node_cnt)){ + if(smallest_bg_record->state == RM_PARTITION_ERROR) { + rc = SLURM_NO_CHANGE_IN_DATA; + goto cleanup; + } + + rc = put_block_in_error_state( + smallest_bg_record, BLOCK_ERROR_STATE); goto cleanup; } - debug("didn't get a smallest block"); + debug("No block under 1 midplane available for this nodecard. " + "Draining the whole node."); if(!node_already_down(bp_name)) { time_t now = time(NULL); char reason[128], time_str[32]; diff --git a/src/plugins/select/bluegene/plugin/block_sys.c b/src/plugins/select/bluegene/plugin/block_sys.c index e2d5e2e49d065f2c3940e0bd27628fa27ed59c09..069bc7537f481ac554abcf316d24114a74411ba2 100755 --- a/src/plugins/select/bluegene/plugin/block_sys.c +++ b/src/plugins/select/bluegene/plugin/block_sys.c @@ -1,7 +1,7 @@ /*****************************************************************************\ * block_sys.c - component used for wiring up the blocks * - * $Id: block_sys.c 18102 2009-07-09 20:45:13Z jette $ + * $Id: block_sys.c 18612 2009-09-02 19:00:21Z da $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -1080,7 +1080,7 @@ extern int load_state_file(List curr_block_list, char *dir_name) #endif slurm_mutex_lock(&block_state_mutex); - reset_ba_system(false); + reset_ba_system(true); /* Locks are already in place to protect part_list here */ bitmap = bit_alloc(node_record_count); diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.c b/src/plugins/select/bluegene/plugin/dynamic_block.c index fca8258831362da4dd1650c46c44fd596139612f..3b4d745a1b132587108b710b4a0379bbd7a477d8 100644 --- a/src/plugins/select/bluegene/plugin/dynamic_block.c +++ b/src/plugins/select/bluegene/plugin/dynamic_block.c @@ -330,15 +330,28 @@ extern bg_record_t *create_small_record(bg_record_t *bg_record, found_record->user_name = xstrdup(bg_record->user_name); found_record->user_uid = bg_record->user_uid; found_record->bg_block_list = list_create(destroy_ba_node); - ba_node = list_peek(bg_record->bg_block_list); + if(bg_record->bg_block_list) + ba_node = list_peek(bg_record->bg_block_list); if(!ba_node) { - hostlist_t hl = hostlist_create(bg_record->nodes); - char *host = hostlist_shift(hl); - hostlist_destroy(hl); - found_record->nodes = xstrdup(host); - free(host); - error("you gave me a list with no ba_nodes using %s", - found_record->nodes); + if(bg_record->nodes) { + hostlist_t hl = hostlist_create(bg_record->nodes); + char *host = hostlist_shift(hl); + hostlist_destroy(hl); + found_record->nodes = xstrdup(host); + free(host); + error("you gave me a list with no ba_nodes using %s", + found_record->nodes); + } else { + found_record->nodes = xstrdup_printf( + "%s%c%c%c", + bg_conf->slurm_node_prefix, + alpha_num[found_record->start[X]], + alpha_num[found_record->start[Y]], + alpha_num[found_record->start[Z]]); + error("you gave me a record with no ba_nodes " + "and no nodes either using %s", + found_record->nodes); + } } else { int i=0,j=0; new_ba_node = ba_copy_node(ba_node); @@ -737,7 +750,6 @@ again: } if(bg_record) { - List temp_list = NULL; bg_record_t *found_record = NULL; if(bg_record->original) { @@ -769,13 +781,6 @@ again: goto finished; } _split_block(block_list, new_blocks, found_record, cnodes); - remove_from_bg_list(block_list, bg_record); - destroy_bg_record(bg_record); - remove_from_bg_list(bg_lists->main, found_record); - temp_list = list_create(NULL); - list_push(temp_list, found_record); - free_block_list(temp_list); - list_destroy(temp_list); rc = SLURM_SUCCESS; goto finished; } diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c index 36c20d92a78799fe0906c56d3d091d54bcfe8ba6..27a943c34303f71a3c2b40d30f2dd71e4991da72 100644 --- a/src/plugins/select/bluegene/plugin/select_bluegene.c +++ b/src/plugins/select/bluegene/plugin/select_bluegene.c @@ -657,17 +657,19 @@ extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr) rc = SLURM_ERROR; goto end_it; } + /* make sure we are asking for a correct name */ for(i = 0; i < BA_SYSTEM_DIMENSIONS; i++) { - if((part_desc_ptr->name[i] >= '0' - && part_desc_ptr->name[i] <= '9') - || (part_desc_ptr->name[i] >= 'A' - && part_desc_ptr->name[i] <= 'Z')) { - error("update_sub_node: " - "misformatted name given %s", - part_desc_ptr->name); - rc = SLURM_ERROR; - goto end_it; - } + if((part_desc_ptr->name[j+i] >= '0' + && part_desc_ptr->name[j+i] <= '9') + || (part_desc_ptr->name[j+i] >= 'A' + && part_desc_ptr->name[j+i] <= 'Z')) + continue; + + error("update_sub_node: " + "misformatted name given %s", + part_desc_ptr->name); + rc = SLURM_ERROR; + goto end_it; } strncpy(coord, part_desc_ptr->name+j, @@ -750,7 +752,6 @@ extern int select_p_update_node_config (int index) extern int select_p_update_node_state (int index, uint16_t state) { int x, y, z; - for (y = DIM_SIZE[Y] - 1; y >= 0; y--) { for (z = 0; z < DIM_SIZE[Z]; z++) { for (x = 0; x < DIM_SIZE[X]; x++) { diff --git a/src/plugins/select/bluegene/plugin/sfree.c b/src/plugins/select/bluegene/plugin/sfree.c index 5c043f7525b617097fad0a60cfab1f40848f852b..c8460347d6c908b57d253b74b6a7b0b9c34e8f2a 100644 --- a/src/plugins/select/bluegene/plugin/sfree.c +++ b/src/plugins/select/bluegene/plugin/sfree.c @@ -1,6 +1,6 @@ /*****************************************************************************\ * sfree.c - free specified block or all blocks. - * $Id: sfree.c 17366 2009-04-28 23:04:14Z da $ + * $Id: sfree.c 18185 2009-07-17 19:02:22Z da $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -572,7 +572,8 @@ static int _remove_job(db_job_id_t job_id) else if(job_state == RM_JOB_DYING) { if(count > MAX_POLL_RETRIES) error("Job %d isn't dying, trying for " - "%d seconds", count*POLL_INTERVAL); + "%d seconds", job_id, + count*POLL_INTERVAL); continue; } else if(job_state == RM_JOB_ERROR) { error("job %d is in a error state.", job_id); diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c index adf6872ea711d6e74bb504296e9e964732701841..6a2a4d66871208900b21c08e365318d524c1f817 100644 --- a/src/plugins/select/cons_res/select_cons_res.c +++ b/src/plugins/select/cons_res/select_cons_res.c @@ -933,6 +933,13 @@ static int _rm_job_from_res(struct part_res_record *part_record_ptr, /* reconstruct rows with remaining jobs */ struct part_res_record *p_ptr; + if(!job_ptr->part_ptr) { + error("error: 'rm' job %u does not have a " + "partition assigned", + job_ptr->job_id); + return SLURM_ERROR; + } + for (p_ptr = part_record_ptr; p_ptr; p_ptr = p_ptr->next) { if (strcmp(p_ptr->name, job_ptr->part_ptr->name) == 0) break; diff --git a/src/plugins/task/affinity/dist_tasks.c b/src/plugins/task/affinity/dist_tasks.c index 633bb4417a423e65c4897c49b0ed8457d0d4073d..f3d82e703eb51e5f6a0e3cf75cda20a1d2d4b5c3 100644 --- a/src/plugins/task/affinity/dist_tasks.c +++ b/src/plugins/task/affinity/dist_tasks.c @@ -170,7 +170,7 @@ void batch_bind(batch_job_launch_msg_t *req) bitstr_t *req_map, *hw_map; slurm_cred_arg_t arg; uint16_t sockets=0, cores=0, num_procs; - int hw_size, start, p, t, task_cnt=0; + int start, p, t, task_cnt=0; char *str; if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS) { @@ -184,16 +184,18 @@ void batch_bind(batch_job_launch_msg_t *req) return; } - hw_size = conf->sockets * conf->cores * conf->threads; num_procs = MIN((sockets * cores), (conf->sockets * conf->cores)); req_map = (bitstr_t *) bit_alloc(num_procs); - hw_map = (bitstr_t *) bit_alloc(hw_size); + hw_map = (bitstr_t *) bit_alloc(conf->block_map_size); if (!req_map || !hw_map) { error("task/affinity: malloc error"); - bit_free(req_map); - bit_free(hw_map); + if (req_map) + bit_free(req_map); + if (hw_map) + bit_free(hw_map); slurm_cred_free_args(&arg); + return; } /* Transfer core_bitmap data to local req_map. @@ -216,8 +218,13 @@ void batch_bind(batch_job_launch_msg_t *req) * add them here but limit them to what the job * requested */ for (t = 0; t < conf->threads; t++) { - uint16_t bit = p * conf->threads + t; - bit_set(hw_map, bit); + uint16_t pos = p * conf->threads + t; + if (pos >= conf->block_map_size) { + info("more resources configured than exist"); + p = num_procs; + break; + } + bit_set(hw_map, pos); task_cnt++; } } @@ -582,7 +589,7 @@ static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req, { bitstr_t *req_map, *hw_map; slurm_cred_arg_t arg; - uint16_t p, t, num_procs, num_threads, sockets, cores, hw_size; + uint16_t p, t, num_procs, num_threads, sockets, cores; uint32_t job_node_id; int start; char *str; @@ -590,7 +597,6 @@ static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req, *hw_sockets = conf->sockets; *hw_cores = conf->cores; *hw_threads = conf->threads; - hw_size = (*hw_sockets) * (*hw_cores) * (*hw_threads); if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS) { error("task/affinity: job lacks a credential"); @@ -613,7 +619,7 @@ static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req, num_procs = MIN((sockets * cores), ((*hw_sockets)*(*hw_cores))); req_map = (bitstr_t *) bit_alloc(num_procs); - hw_map = (bitstr_t *) bit_alloc(hw_size); + hw_map = (bitstr_t *) bit_alloc(conf->block_map_size); if (!req_map || !hw_map) { error("task/affinity: malloc error"); bit_free(req_map); @@ -786,7 +792,7 @@ static int _task_layout_lllp_multi(launch_tasks_request_msg_t *req, continue; if (masks[taskcount] == NULL) masks[taskcount] = - (bitstr_t *)bit_alloc(size); + bit_alloc(conf->block_map_size); bit_set(masks[taskcount], bit); if (++i < req->cpus_per_task) continue; @@ -886,7 +892,7 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req, continue; if (masks[taskcount] == NULL) masks[taskcount] = - (bitstr_t *)bit_alloc(size); + (bitstr_t *)bit_alloc(conf->block_map_size); bit_set(masks[taskcount], bit); if (++i < req->cpus_per_task) continue; @@ -1016,7 +1022,7 @@ static int _task_layout_lllp_block(launch_tasks_request_msg_t *req, for (i = 0; i < size; i++) { for (t = 0; t < task_array[i]; t++) { if (masks[taskcount] == NULL) - masks[taskcount] = (bitstr_t *)bit_alloc(size); + masks[taskcount] = (bitstr_t *)bit_alloc(conf->block_map_size); bit_set(masks[taskcount++], i); } } @@ -1077,7 +1083,12 @@ static bitstr_t *_lllp_map_abstract_mask(bitstr_t *bitmask) for (i = 0; i < num_bits; i++) { if (bit_test(bitmask,i)) { bit = BLOCK_MAP(i); - bit_set(newmask, bit); + if(bit < bit_size(newmask)) + bit_set(newmask, bit); + else + error("_lllp_map_abstract_mask: can't go from " + "%d -> %d since we only have %d bits", + i, bit, bit_size(newmask)); } } return newmask; diff --git a/src/plugins/task/affinity/task_affinity.c b/src/plugins/task/affinity/task_affinity.c index 5b6596bcc75d906abbe9492aff65e910a358cfca..c1f86f3493cf405e9a1f3a348e863f1735452858 100644 --- a/src/plugins/task/affinity/task_affinity.c +++ b/src/plugins/task/affinity/task_affinity.c @@ -178,15 +178,12 @@ extern int task_slurmd_launch_request (uint32_t job_id, launch_tasks_request_msg_t *req, uint32_t node_id) { - int hw_sockets, hw_cores, hw_threads; char buf_type[100]; debug("task_slurmd_launch_request: %u %u", job_id, node_id); - hw_sockets = conf->sockets; - hw_cores = conf->cores; - hw_threads = conf->threads; - if (((hw_sockets >= 1) && ((hw_cores > 1) || (hw_threads > 1))) + if (((conf->sockets >= 1) + && ((conf->cores > 1) || (conf->threads > 1))) || (!(req->cpu_bind_type & CPU_BIND_NONE))) { _update_bind_type(req); diff --git a/src/plugins/topology/3d_torus/hilbert_slurm.c b/src/plugins/topology/3d_torus/hilbert_slurm.c index 466aa96015e0a9662583a76c3e09eb3a8bd43954..01dba5ead26479ac2aa63da7cb245443352f27b4 100644 --- a/src/plugins/topology/3d_torus/hilbert_slurm.c +++ b/src/plugins/topology/3d_torus/hilbert_slurm.c @@ -50,7 +50,7 @@ static int _coord(char coord) if ((coord >= '0') && (coord <= '9')) return (coord - '0'); if ((coord >= 'A') && (coord <= 'Z')) - return (coord - 'A'); + return (coord - 'A' + 10); return -1; } @@ -153,7 +153,8 @@ extern void nodes_to_hilbert_curve(void) node_ptr2->comm_name = tmp_name; tmp_val = node_ptr->hilbert_integer; - node_ptr->hilbert_integer = node_ptr2->hilbert_integer; + node_ptr->hilbert_integer = node_ptr2-> + hilbert_integer; node_ptr2->hilbert_integer = tmp_val; } } diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c index fda490558395c0812793e53a001b275207f5c3f7..cd52913eed962b4904487a452bc42ab71efa600e 100644 --- a/src/sacctmgr/sacctmgr.c +++ b/src/sacctmgr/sacctmgr.c @@ -819,10 +819,12 @@ sacctmgr [<OPTION>] [<COMMAND>] \n\ list cluster - Format=, Names= \n\ add cluster - Fairshare=, GrpCPUs=, GrpJobs=, \n\ GrpNodes=, GrpSubmitJob=, MaxCPUMins= \n\ - MaxJobs=, MaxNodes=, MaxWall=, and Name= \n\ + MaxJobs=, MaxNodes=, MaxWall=, Name=, \n\ + and QosLevel= \n\ modify cluster - (set options) Fairshare=, \n\ GrpCPUs=, GrpJobs=, GrpNodes=, GrpSubmitJob=, \n\ - MaxCPUMins=, MaxJobs=, MaxNodes=, and MaxWall= \n\ + MaxCPUMins=, MaxJobs=, MaxNodes=, MaxWall=, \n\ + and QosLevel= \n\ (where options) Names= \n\ delete cluster - Names= \n\ \n\ diff --git a/src/salloc/opt.c b/src/salloc/opt.c index fcbce27b33978813086ac17c4b00125811916097..1a5c1c1546de96760c76c764388f7710eab6b115 100644 --- a/src/salloc/opt.c +++ b/src/salloc/opt.c @@ -191,6 +191,31 @@ int initialize_and_process_args(int argc, char *argv[]) } +/* + * If the node list supplied is a file name, translate that into + * a list of nodes, we orphan the data pointed to + * RET true if the node list is a valid one + */ +static bool _valid_node_list(char **node_list_pptr) +{ + int count = NO_VAL; + + /* If we are using Arbitrary and we specified the number of + procs to use then we need exactly this many since we are + saying, lay it out this way! Same for max and min nodes. + Other than that just read in as many in the hostfile */ + if(opt.nprocs_set) + count = opt.nprocs; + else if(opt.nodes_set) { + if(opt.max_nodes) + count = opt.max_nodes; + else if(opt.min_nodes) + count = opt.min_nodes; + } + + return verify_node_list(node_list_pptr, opt.distribution, count); +} + /* * print error message to stderr with opt.progname prepended */ @@ -770,6 +795,8 @@ void set_options(const int argc, char **argv) case 'x': xfree(opt.exc_nodes); opt.exc_nodes = xstrdup(optarg); + if (!_valid_node_list(&opt.exc_nodes)) + exit(1); break; case LONG_OPT_CONT: opt.contiguous = true; @@ -1263,6 +1290,49 @@ static bool _opt_verify(void) } /* else if (opt.nprocs_set && !opt.nodes_set) */ + if(!opt.nodelist) { + if((opt.nodelist = xstrdup(getenv("SLURM_HOSTFILE")))) { + /* make sure the file being read in has a / in + it to make sure it is a file in the + valid_node_list function */ + if(!strstr(opt.nodelist, "/")) { + char *add_slash = xstrdup("./"); + xstrcat(add_slash, opt.nodelist); + xfree(opt.nodelist); + opt.nodelist = add_slash; + } + opt.distribution = SLURM_DIST_ARBITRARY; + if (!_valid_node_list(&opt.nodelist)) { + error("Failure getting NodeNames from " + "hostfile"); + exit(1); + } else { + debug("loaded nodes (%s) from hostfile", + opt.nodelist); + } + } + } else { + if (!_valid_node_list(&opt.nodelist)) + exit(1); + } + + /* set up the proc and node counts based on the arbitrary list + of nodes */ + if((opt.distribution == SLURM_DIST_ARBITRARY) + && (!opt.nodes_set || !opt.nprocs_set)) { + hostlist_t hl = hostlist_create(opt.nodelist); + if(!opt.nprocs_set) { + opt.nprocs_set = 1; + opt.nprocs = hostlist_count(hl); + } + if(!opt.nodes_set) { + opt.nodes_set = 1; + hostlist_uniq(hl); + opt.min_nodes = opt.max_nodes = hostlist_count(hl); + } + hostlist_destroy(hl); + } + if (opt.time_limit_str) { opt.time_limit = time_str2mins(opt.time_limit_str); if ((opt.time_limit < 0) && (opt.time_limit != INFINITE)) { diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c index bd2e88edbc90aed9acd2ffc4eb3b85adfcd8883e..64d237b7db6751085342c20693d1d98b0711a9fd 100644 --- a/src/salloc/salloc.c +++ b/src/salloc/salloc.c @@ -278,7 +278,9 @@ int main(int argc, char *argv[]) /* * Run the user's command. */ - env_array_for_job(&env, alloc, &desc); + if(env_array_for_job(&env, alloc, &desc) != SLURM_SUCCESS) + goto relinquish; + /* Add default task count for srun, if not already set */ if (opt.nprocs_set) env_array_append_fmt(&env, "SLURM_NPROCS", "%d", opt.nprocs); diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c index 197c3366c7a08e68649c00e8c84c3fba7bb17298..0bc67e2fb347b0434afefbd6033a29569ae02dc1 100644 --- a/src/sbatch/opt.c +++ b/src/sbatch/opt.c @@ -209,6 +209,31 @@ static void argerror(const char *msg, ...) # define argerror error #endif /* USE_ARGERROR */ +/* + * If the node list supplied is a file name, translate that into + * a list of nodes, we orphan the data pointed to + * RET true if the node list is a valid one + */ +static bool _valid_node_list(char **node_list_pptr) +{ + int count = NO_VAL; + + /* If we are using Arbitrary and we specified the number of + procs to use then we need exactly this many since we are + saying, lay it out this way! Same for max and min nodes. + Other than that just read in as many in the hostfile */ + if(opt.nprocs_set) + count = opt.nprocs; + else if(opt.nodes_set) { + if(opt.max_nodes) + count = opt.max_nodes; + else if(opt.min_nodes) + count = opt.min_nodes; + } + + return verify_node_list(node_list_pptr, opt.distribution, count); +} + /* * _opt_default(): used by initialize_and_process_args to set defaults */ @@ -1190,6 +1215,8 @@ static void _set_options(int argc, char **argv) case 'x': xfree(opt.exc_nodes); opt.exc_nodes = xstrdup(optarg); + if (!_valid_node_list(&opt.exc_nodes)) + exit(1); break; case LONG_OPT_CONT: opt.contiguous = true; @@ -2033,6 +2060,50 @@ static bool _opt_verify(void) } /* else if (opt.nprocs_set && !opt.nodes_set) */ + if(!opt.nodelist) { + if((opt.nodelist = xstrdup(getenv("SLURM_HOSTFILE")))) { + /* make sure the file being read in has a / in + it to make sure it is a file in the + valid_node_list function */ + if(!strstr(opt.nodelist, "/")) { + char *add_slash = xstrdup("./"); + xstrcat(add_slash, opt.nodelist); + xfree(opt.nodelist); + opt.nodelist = add_slash; + } + opt.distribution = SLURM_DIST_ARBITRARY; + if (!_valid_node_list(&opt.nodelist)) { + error("Failure getting NodeNames from " + "hostfile"); + exit(1); + } else { + debug("loaded nodes (%s) from hostfile", + opt.nodelist); + } + } + } else { + if (!_valid_node_list(&opt.nodelist)) + exit(1); + } + + /* set up the proc and node counts based on the arbitrary list + of nodes */ + if((opt.distribution == SLURM_DIST_ARBITRARY) + && (!opt.nodes_set || !opt.nprocs_set)) { + hostlist_t hl = hostlist_create(opt.nodelist); + if(!opt.nprocs_set) { + opt.nprocs_set = 1; + opt.nprocs = hostlist_count(hl); + } + if(!opt.nodes_set) { + opt.nodes_set = 1; + hostlist_uniq(hl); + opt.min_nodes = opt.max_nodes + = hostlist_count(hl); + } + hostlist_destroy(hl); + } + if (opt.time_limit_str) { opt.time_limit = time_str2mins(opt.time_limit_str); if ((opt.time_limit < 0) && (opt.time_limit != INFINITE)) { diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c index 752a511798c81eb2fc4fb16593d3d1338540bf14..ce9f58bd67c5cf4849ae8cd399828b3492deb0ba 100644 --- a/src/sbatch/sbatch.c +++ b/src/sbatch/sbatch.c @@ -292,6 +292,12 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc) "SLURM_GET_USER_ENV", "1"); } env_array_merge(&desc->environment, (const char **)environ); + if(opt.distribution == SLURM_DIST_ARBITRARY) { + env_array_overwrite_fmt(&desc->environment, + "SLURM_ARBITRARY_NODELIST", + "%s", desc->req_nodes); + } + desc->env_size = envcount (desc->environment); desc->argv = opt.script_argv; desc->argc = opt.script_argc; diff --git a/src/scancel/scancel.c b/src/scancel/scancel.c index 93184be23ef808bd914d1bd6d2b9f1055be3cb9a..3166bd7a7837de43cc8a6fff1d234645d6a6624b 100644 --- a/src/scancel/scancel.c +++ b/src/scancel/scancel.c @@ -89,6 +89,10 @@ typedef struct job_cancel_info { pthread_cond_t *num_active_threads_cond; } job_cancel_info_t; +static pthread_attr_t attr; +static int num_active_threads = 0; +static pthread_mutex_t num_active_threads_lock; +static pthread_cond_t num_active_threads_cond; int main (int argc, char *argv[]) @@ -247,70 +251,58 @@ _filter_job_records (void) } } - -/* _cancel_jobs - filter then cancel jobs or job steps per request */ static void -_cancel_jobs (void) +_cancel_jobs_by_state(uint16_t job_state) { int i, j, err; - job_info_t *job_ptr = NULL; - pthread_attr_t attr; job_cancel_info_t *cancel_info; + job_info_t *job_ptr = job_buffer_ptr->job_array; pthread_t dummy; - int num_active_threads = 0; - pthread_mutex_t num_active_threads_lock; - pthread_cond_t num_active_threads_cond; - - slurm_attr_init(&attr); - if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) - error("pthread_attr_setdetachstate error %m"); - - slurm_mutex_init(&num_active_threads_lock); - - if (pthread_cond_init(&num_active_threads_cond, NULL)) - error("pthread_cond_init error %m"); - - job_ptr = job_buffer_ptr->job_array ; /* Spawn a thread to cancel each job or job step marked for * cancellation */ for (i = 0; i < job_buffer_ptr->record_count; i++) { - if (job_ptr[i].job_id == 0) + if (job_ptr[i].job_id == 0) continue; - /* If cancelling a list of jobs, see if the current job + if ((job_state < JOB_END) && + (job_ptr[i].job_state != job_state)) + continue; + + /* If cancelling a list of jobs, see if the current job * included a step id */ if (opt.job_cnt) { for (j = 0; j < opt.job_cnt; j++ ) { if (job_ptr[i].job_id != opt.job_id[j]) continue; - if (opt.interactive && + if (opt.interactive && (_confirmation(i, opt.step_id[j]) == 0)) continue; - cancel_info = - (job_cancel_info_t *) + cancel_info = + (job_cancel_info_t *) xmalloc(sizeof(job_cancel_info_t)); cancel_info->job_id = job_ptr[i].job_id; cancel_info->sig = opt.signal; - cancel_info->num_active_threads = + cancel_info->num_active_threads = &num_active_threads; - cancel_info->num_active_threads_lock = + cancel_info->num_active_threads_lock = &num_active_threads_lock; - cancel_info->num_active_threads_cond = + cancel_info->num_active_threads_cond = &num_active_threads_cond; - pthread_mutex_lock( &num_active_threads_lock ); + pthread_mutex_lock(&num_active_threads_lock); num_active_threads++; while (num_active_threads > MAX_THREADS) { - pthread_cond_wait(&num_active_threads_cond, - &num_active_threads_lock); + pthread_cond_wait( + &num_active_threads_cond, + &num_active_threads_lock); } - pthread_mutex_unlock( &num_active_threads_lock ); + pthread_mutex_unlock(&num_active_threads_lock); if (opt.step_id[j] == SLURM_BATCH_SCRIPT) { - err = pthread_create(&dummy, &attr, + err = pthread_create(&dummy, &attr, _cancel_job_id, cancel_info); if (err) @@ -318,47 +310,63 @@ _cancel_jobs (void) break; } else { cancel_info->step_id = opt.step_id[j]; - err = pthread_create(&dummy, &attr, + err = pthread_create(&dummy, &attr, _cancel_step_id, cancel_info); if (err) _cancel_step_id(cancel_info); - /* Don't break here. Keep looping in - * case other steps from the same job + /* Don't break here. Keep looping in + * case other steps from the same job * are cancelled. */ } } } else { - if (opt.interactive && + if (opt.interactive && (_confirmation(i, SLURM_BATCH_SCRIPT) == 0)) continue; - cancel_info = - (job_cancel_info_t *) + cancel_info = (job_cancel_info_t *) xmalloc(sizeof(job_cancel_info_t)); cancel_info->job_id = job_ptr[i].job_id; cancel_info->sig = opt.signal; cancel_info->num_active_threads = &num_active_threads; - cancel_info->num_active_threads_lock = + cancel_info->num_active_threads_lock = &num_active_threads_lock; - cancel_info->num_active_threads_cond = + cancel_info->num_active_threads_cond = &num_active_threads_cond; pthread_mutex_lock( &num_active_threads_lock ); num_active_threads++; while (num_active_threads > MAX_THREADS) { - pthread_cond_wait( &num_active_threads_cond, - &num_active_threads_lock ); + pthread_cond_wait(&num_active_threads_cond, + &num_active_threads_lock); } - pthread_mutex_unlock( &num_active_threads_lock ); + pthread_mutex_unlock(&num_active_threads_lock); - err = pthread_create(&dummy, &attr, - _cancel_job_id, + err = pthread_create(&dummy, &attr, _cancel_job_id, cancel_info); if (err) _cancel_job_id(cancel_info); } + job_ptr[i].job_id = 0; } +} + +/* _cancel_jobs - filter then cancel jobs or job steps per request */ +static void +_cancel_jobs (void) +{ + slurm_attr_init(&attr); + if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) + error("pthread_attr_setdetachstate error %m"); + + slurm_mutex_init(&num_active_threads_lock); + + if (pthread_cond_init(&num_active_threads_cond, NULL)) + error("pthread_cond_init error %m"); + + _cancel_jobs_by_state(JOB_PENDING); + _cancel_jobs_by_state(JOB_END); /* Wait for any spawned threads that have not finished */ pthread_mutex_lock( &num_active_threads_lock ); diff --git a/src/sinfo/sinfo.c b/src/sinfo/sinfo.c index 7f388adf9be224afc5251e82834ef61fb500991a..43c84051c189265cebd4c7d49f12bccc2cd5f4f0 100644 --- a/src/sinfo/sinfo.c +++ b/src/sinfo/sinfo.c @@ -92,6 +92,7 @@ static void _update_nodes_for_bg(int node_scaling, enum { SINFO_BG_IDLE_STATE, SINFO_BG_ALLOC_STATE, + SINFO_BG_DRAINING_STATE, SINFO_BG_ERROR_STATE }; #endif @@ -347,16 +348,18 @@ static int _build_sinfo_data(List sinfo_list, for (i=0; i<node_msg->record_count; i++) { node_ptr = &(node_msg->node_array[i]); - /* in each node_ptr we overload the threads var - * with the number of cnodes in the used_cpus var - * will be used to tell how many cnodes are - * allocated and the cores will represent the cnodes + /* In each node_ptr we overload the threads var + * with the number of cnodes in drained state, the + * sockets var with the nodes in draining state, and + * the used_cpus var will be used to tell how many cnodes are + * allocated. The cores will also represent the cnodes * in an error state. So we can get an idle count by - * subtracting those 2 numbers from the total possible + * subtracting those 3 numbers from the total possible * cnodes (which are the idle cnodes). */ node_ptr->threads = node_scaling; node_ptr->cores = 0; + node_ptr->sockets = 0; node_ptr->used_cpus = 0; if((node_ptr->node_state & NODE_STATE_BASE) == NODE_STATE_DOWN) continue; @@ -423,7 +426,7 @@ static int _build_sinfo_data(List sinfo_list, } else block_error = 0; node_ptr->threads = node_scaling; - for(i=0; i<3; i++) { + for(i=0; i<4; i++) { int norm = 0; switch(i) { case SINFO_BG_IDLE_STATE: @@ -446,9 +449,11 @@ static int _build_sinfo_data(List sinfo_list, * as it's current state */ node_ptr->threads -= - (node_ptr->cores + (node_ptr->cores + + node_ptr->sockets + node_ptr->used_cpus); - + if((int16_t)node_ptr->threads < 0) + node_ptr->threads = 0; if(node_ptr->threads == node_scaling) norm = 1; else { @@ -470,10 +475,21 @@ static int _build_sinfo_data(List sinfo_list, node_ptr->threads = node_ptr->used_cpus; break; + case SINFO_BG_DRAINING_STATE: + /* get the drained node count */ + if(!node_ptr->sockets) + continue; + node_ptr->node_state = + NODE_STATE_ALLOCATED; + node_ptr->node_state |= + NODE_STATE_DRAIN; + node_ptr->threads = node_ptr->sockets; + break; case SINFO_BG_ERROR_STATE: /* get the error node count */ if(!node_ptr->cores) continue; + node_ptr->node_state &= NODE_STATE_FLAGS; node_ptr->node_state |= @@ -861,18 +877,23 @@ static void _update_nodes_for_bg(int node_scaling, */ if(((node_ptr->node_state & NODE_STATE_BASE) == NODE_STATE_DOWN) - || (node_ptr->node_state & NODE_STATE_DRAIN)) + || (node_ptr->node_state & NODE_STATE_DRAIN)) { + if(bg_info_record->job_running + > NO_JOB_RUNNING) { + node_ptr->sockets += node_scaling; + node_ptr->cores -= node_scaling; + } continue; + } - if(bg_info_record->state - == RM_PARTITION_ERROR) { + if(bg_info_record->state == RM_PARTITION_ERROR) { node_ptr->cores += node_scaling; node_ptr->node_state |= NODE_STATE_DRAIN; node_ptr->node_state |= NODE_STATE_FAIL; - } else if(bg_info_record->job_running - > NO_JOB_RUNNING) + } else if(bg_info_record->job_running + > NO_JOB_RUNNING) node_ptr->used_cpus += node_scaling; else error("Hey we didn't get anything here"); diff --git a/src/slurmctld/Makefile.am b/src/slurmctld/Makefile.am index 03613a8219dd6073e79f0c43811f8b7d95f1331d..e88328435912313e1846626d16d407593ea7ed7f 100644 --- a/src/slurmctld/Makefile.am +++ b/src/slurmctld/Makefile.am @@ -10,7 +10,7 @@ sbin_PROGRAMS = slurmctld slurmctld_LDADD = \ $(top_builddir)/src/common/libdaemonize.la \ - $(top_builddir)/src/common/libcommon.o -ldl + $(top_builddir)/src/api/libslurm.o -ldl slurmctld_SOURCES = \ diff --git a/src/slurmctld/Makefile.in b/src/slurmctld/Makefile.in index 18cd8d4ab0b0d96c2d5e39726eab61b55577a815..386f375665d41e86412f1606b39f10d22c3de629 100644 --- a/src/slurmctld/Makefile.in +++ b/src/slurmctld/Makefile.in @@ -93,7 +93,7 @@ am_slurmctld_OBJECTS = acct_policy.$(OBJEXT) agent.$(OBJEXT) \ trigger_mgr.$(OBJEXT) slurmctld_OBJECTS = $(am_slurmctld_OBJECTS) slurmctld_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \ - $(top_builddir)/src/common/libcommon.o + $(top_builddir)/src/api/libslurm.o slurmctld_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(slurmctld_LDFLAGS) $(LDFLAGS) -o $@ @@ -299,7 +299,7 @@ CLEANFILES = core.* INCLUDES = -I$(top_srcdir) slurmctld_LDADD = \ $(top_builddir)/src/common/libdaemonize.la \ - $(top_builddir)/src/common/libcommon.o -ldl + $(top_builddir)/src/api/libslurm.o -ldl slurmctld_SOURCES = \ acct_policy.c \ diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c index 8138a14c9c704b09d947b3df6f69e89c9f55af1d..d8c27b27448c526dcb2bd66f7bb51db121976922 100644 --- a/src/slurmctld/acct_policy.c +++ b/src/slurmctld/acct_policy.c @@ -296,6 +296,8 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr) job_ptr->details->min_nodes, assoc_ptr->grp_nodes, assoc_ptr->acct); _cancel_job(job_ptr); + rc = false; + goto end_it; } else if ((assoc_ptr->grp_used_nodes + job_ptr->details->min_nodes) > assoc_ptr->grp_nodes) { diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c index 6887928a5775db7a6709b3b28c6cb0112ea77767..dfebbe10154a3c3291ce0ab1313c39321acb2afe 100644 --- a/src/slurmctld/agent.c +++ b/src/slurmctld/agent.c @@ -1476,8 +1476,6 @@ extern void mail_job_info (struct job_record *job_ptr, uint16_t mail_type) return; } -/* return true if the requests is to launch a batch job and the message - * destination is not yet powered up, otherwise return false */ /* Test if a batch launch request should be defered * RET -1: abort the request, pending job cancelled * 0: execute the request now @@ -1503,7 +1501,9 @@ static int _batch_launch_defer(queued_request_t *queued_req_ptr) launch_msg_ptr = (batch_job_launch_msg_t *)agent_arg_ptr->msg_args; job_ptr = find_job_record(launch_msg_ptr->job_id); - if ((job_ptr == NULL) || (job_ptr->job_state != JOB_RUNNING)) { + if ((job_ptr == NULL) || + ((job_ptr->job_state != JOB_RUNNING) && + (job_ptr->job_state != JOB_SUSPENDED))) { info("agent(batch_launch): removed pending request for " "cancelled job %u", launch_msg_ptr->job_id); @@ -1515,7 +1515,7 @@ static int _batch_launch_defer(queued_request_t *queued_req_ptr) node_ptr = find_node_record(hostname); if (node_ptr == NULL) { error("agent(batch_launch) removed pending request for job " - "%s, missing node %s", + "%u, missing node %s", launch_msg_ptr->job_id, agent_arg_ptr->hostlist); return -1; /* invalid request?? */ } diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c index cc7fb68b8274fc86873615c4741647d923d09f38..7ba26033685fc367021a4e480329c50369e5e52a 100644 --- a/src/slurmctld/controller.c +++ b/src/slurmctld/controller.c @@ -691,7 +691,7 @@ static void _init_config(void) * _slurm_rpc_reconfigure_controller function inside proc_req.c try * to keep these in sync. */ -extern int slurm_reconfigure(void) +static int _reconfigure_slurm(void) { /* Locks: Write configuration, job, node, and partition */ slurmctld_lock_t config_write_lock = { @@ -711,11 +711,14 @@ extern int slurm_reconfigure(void) _update_cred_key(); set_slurmctld_state_loc(); } + select_g_reconfigure(); /* notify select + * plugin too. This + * needs to happen + * inside the lock. */ unlock_slurmctld(config_write_lock); start_power_mgr(&slurmctld_config.thread_id_power); trigger_reconfig(); slurm_sched_partition_change(); /* notify sched plugin */ - select_g_reconfigure(); /* notify select plugin too */ priority_g_reconfig(); /* notify priority plugin too */ return rc; @@ -764,7 +767,7 @@ static void *_slurmctld_signal_hand(void *no_data) break; case SIGHUP: /* kill -1 */ info("Reconfigure signal (SIGHUP) received"); - slurm_reconfigure(); + _reconfigure_slurm(); break; case SIGABRT: /* abort */ info("SIGABRT received"); diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c index 79900139287b505d4a842f5a9e51cd258dc2fa02..34bc923ad83421b4516622fc5cc3a63dfc1f6766 100644 --- a/src/slurmctld/job_mgr.c +++ b/src/slurmctld/job_mgr.c @@ -2376,7 +2376,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, if (job_desc->account == NULL) job_desc->account = xstrdup(assoc_rec.acct); if ((accounting_enforce & ACCOUNTING_ENFORCE_LIMITS) && - (!_validate_acct_policy(job_desc, part_ptr, &assoc_rec))) { + (!_validate_acct_policy(job_desc, part_ptr, assoc_ptr))) { info("_job_create: exceeded association's node or time limit " "for user %u", job_desc->user_id); error_code = ESLURM_ACCOUNTING_POLICY; @@ -3291,11 +3291,16 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc, detail_ptr->cpus_per_task = 1; if (job_desc->job_min_procs != (uint16_t) NO_VAL) detail_ptr->job_min_procs = job_desc->job_min_procs; + if (job_desc->overcommit != (uint8_t) NO_VAL) + detail_ptr->overcommit = job_desc->overcommit; if (job_desc->ntasks_per_node != (uint16_t) NO_VAL) { detail_ptr->ntasks_per_node = job_desc->ntasks_per_node; - detail_ptr->job_min_procs = MAX(detail_ptr->job_min_procs, - (detail_ptr->cpus_per_task * - detail_ptr->ntasks_per_node)); + if (detail_ptr->overcommit == 0) { + detail_ptr->job_min_procs = + MAX(detail_ptr->job_min_procs, + (detail_ptr->cpus_per_task * + detail_ptr->ntasks_per_node)); + } } else { detail_ptr->job_min_procs = MAX(detail_ptr->job_min_procs, detail_ptr->cpus_per_task); @@ -3318,8 +3323,6 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc, detail_ptr->out = xstrdup(job_desc->out); if (job_desc->work_dir) detail_ptr->work_dir = xstrdup(job_desc->work_dir); - if (job_desc->overcommit != (uint8_t) NO_VAL) - detail_ptr->overcommit = job_desc->overcommit; if (job_desc->begin_time > time(NULL)) detail_ptr->begin_time = job_desc->begin_time; job_ptr->select_jobinfo = @@ -3552,7 +3555,7 @@ void job_time_limit(void) info("Job %u timed out, " "assoc %u is at or exceeds " "group max cpu minutes limit %llu " - "with %Lf for account %s", + "with %llu for account %s", job_ptr->job_id, assoc->id, assoc->grp_cpu_mins, usage_mins, assoc->acct); @@ -3580,7 +3583,7 @@ void job_time_limit(void) info("Job %u timed out, " "assoc %u is at or exceeds " "max cpu minutes limit %llu " - "with %Lf for account %s", + "with %llu for account %s", job_ptr->job_id, assoc->id, assoc->max_cpu_mins_pj, job_cpu_usage_mins, assoc->acct); @@ -4693,6 +4696,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid) error_code = ESLURM_DISABLED; else if (super_user || (job_ptr->priority > job_specs->priority)) { + job_ptr->details->nice = NICE_OFFSET; if(job_specs->priority == INFINITE) { job_ptr->direct_set_prio = 0; _set_job_prio(job_ptr); @@ -4715,8 +4719,11 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid) if (IS_JOB_FINISHED(job_ptr)) error_code = ESLURM_DISABLED; else if (super_user || (job_specs->nice < NICE_OFFSET)) { + int64_t new_prio = job_ptr->priority; + new_prio += job_ptr->details->nice; + new_prio -= job_specs->nice; + job_ptr->priority = MAX(new_prio, 2); job_ptr->details->nice = job_specs->nice; - _set_job_prio(job_ptr); info("update_job: setting priority to %u for " "job_id %u", job_ptr->priority, @@ -6504,7 +6511,9 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, int timelimit_set = 0; int max_nodes_set = 0; char *user_name = assoc_ptr->user; + bool rc = true; + slurm_mutex_lock(&assoc_mgr_association_lock); while(assoc_ptr) { /* for validation we don't need to look at * assoc_ptr->grp_cpu_mins. @@ -6529,7 +6538,8 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, job_desc->min_nodes, assoc_ptr->grp_nodes, assoc_ptr->acct); - return false; + rc = false; + break; } else if (job_desc->max_nodes == 0 || (max_nodes_set && (job_desc->max_nodes @@ -6560,7 +6570,8 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, job_desc->user_id, assoc_ptr->grp_submit_jobs, assoc_ptr->acct); - return false; + rc = false; + break; } @@ -6599,7 +6610,8 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, job_desc->user_id, job_desc->min_nodes, assoc_ptr->max_nodes_pj); - return false; + rc = false; + break; } else if (job_desc->max_nodes == 0 || (max_nodes_set && (job_desc->max_nodes @@ -6618,7 +6630,7 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, job_desc->max_nodes = assoc_ptr->max_nodes_pj; } } - + if ((assoc_ptr->max_submit_jobs != NO_VAL) && (assoc_ptr->max_submit_jobs != INFINITE) && (assoc_ptr->used_submit_jobs @@ -6628,7 +6640,8 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, user_name, job_desc->user_id, assoc_ptr->max_submit_jobs); - return false; + rc = false; + break; } if ((assoc_ptr->max_wall_pj != NO_VAL) && @@ -6651,14 +6664,17 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, user_name, job_desc->user_id, job_desc->time_limit, time_limit); - return false; + rc = false; + break; } } assoc_ptr = assoc_ptr->parent_assoc_ptr; parent = 1; } - return true; + slurm_mutex_unlock(&assoc_mgr_association_lock); + + return rc; } /* diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c index 3cd339a22922f35574f55dea51c5ade25caa18c0..3bd46a2b2d5a3e146458cbcbf9afdd3ef335a369 100644 --- a/src/slurmctld/job_scheduler.c +++ b/src/slurmctld/job_scheduler.c @@ -287,9 +287,13 @@ extern int schedule(void) static bool backfill_sched = false; static bool sched_test = false; static bool wiki_sched = false; + static int sched_timeout = 0; time_t now = time(NULL); DEF_TIMERS; + + if(!sched_timeout) + sched_timeout = MIN(slurm_get_msg_timeout(), 10); START_TIMER; if (!sched_test) { @@ -447,6 +451,11 @@ extern int schedule(void) delete_job_details(job_ptr); } } + + if((time(NULL) - now) >= sched_timeout) { + debug("schedule: loop taking to long breaking out"); + break; + } } bit_free(avail_node_bitmap); diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c index 57b730bf11b2283b42b77f5b2ec36ccb0f0759fb..f6a8f949a2eb29b8c2733ef1d98c59f256c9a380 100644 --- a/src/slurmctld/node_mgr.c +++ b/src/slurmctld/node_mgr.c @@ -97,6 +97,7 @@ static void _dump_node_state (struct node_record *dump_node_ptr, Buf buffer); static struct node_record * _find_alias_node_record (char *name); static int _hash_index (char *name); +static bool _is_node_drain(struct node_record *node_ptr); static void _list_delete_config (void *config_entry); static int _list_find_config (void *config_entry, void *key); static void _make_node_down(struct node_record *node_ptr, @@ -582,6 +583,20 @@ extern int load_all_node_state ( bool state_only ) node_ptr->last_idle = now; } xfree (node_name); + if(node_ptr) { + /* If the state is UNKNOWN we will assume IDLE + until the nodes check in. This is needed + for bluegene to set up drained nodes + correctly. */ + if((node_ptr->node_state & NODE_STATE_BASE) + == NODE_STATE_UNKNOWN) + node_ptr->node_state |= NODE_STATE_IDLE; + + select_g_update_node_state( + (node_ptr - node_record_table_ptr), + node_ptr->node_state); + } + } fini: info("Recovered state of %d nodes", node_cnt); @@ -1112,9 +1127,7 @@ int update_node ( update_node_msg_t * update_node_msg ) if (state_val == NODE_RESUME) { base_state &= NODE_STATE_BASE; if ((base_state == NODE_STATE_IDLE) && - ((node_ptr->node_state & NODE_STATE_DRAIN) - || (node_ptr->node_state & - NODE_STATE_FAIL))) { + _is_node_drain(node_ptr)) { clusteracct_storage_g_node_up( acct_db_conn, slurmctld_cluster_name, @@ -1264,11 +1277,10 @@ int update_node ( update_node_msg_t * update_node_msg ) } base_state = node_ptr->node_state & NODE_STATE_BASE; - if ((base_state != NODE_STATE_DOWN) - && ((node_ptr->node_state & (NODE_STATE_DRAIN | - NODE_STATE_FAIL)) == 0)) + if ((base_state != NODE_STATE_DOWN) && + !_is_node_drain(node_ptr)) { xfree(node_ptr->reason); - + } free (this_node_name); } hostlist_destroy (host_list); @@ -1825,11 +1837,12 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg) node_flags; node_ptr->last_idle = now; } - if ((node_flags & NODE_STATE_DRAIN) == 0) + if (!_is_node_drain(node_ptr)) { xfree(node_ptr->reason); - clusteracct_storage_g_node_up(acct_db_conn, + clusteracct_storage_g_node_up(acct_db_conn, slurmctld_cluster_name, node_ptr, now); + } } else if ((base_state == NODE_STATE_DOWN) && ((slurmctld_conf.ret2service == 2) || ((slurmctld_conf.ret2service == 1) && @@ -1846,12 +1859,14 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg) node_ptr->last_idle = now; } info ("node %s returned to service", reg_msg->node_name); - xfree(node_ptr->reason); reset_job_priority(); trigger_node_up(node_ptr); - clusteracct_storage_g_node_up(acct_db_conn, - slurmctld_cluster_name, - node_ptr, now); + if (!_is_node_drain(node_ptr)) { + xfree(node_ptr->reason); + clusteracct_storage_g_node_up(acct_db_conn, + slurmctld_cluster_name, + node_ptr, now); + } } else if ((base_state == NODE_STATE_ALLOCATED) && (reg_msg->job_count == 0)) { /* job vanished */ last_node_update = now; @@ -2027,16 +2042,21 @@ extern int validate_nodes_via_front_end( node_flags; node_ptr->last_idle = now; } - xfree(node_ptr->reason); - if ((node_flags & - (NODE_STATE_DRAIN | NODE_STATE_FAIL)) == 0) + if (!_is_node_drain(node_ptr)) { + xfree(node_ptr->reason); clusteracct_storage_g_node_up( acct_db_conn, slurmctld_cluster_name, node_ptr, now); + } + } else if ((base_state == NODE_STATE_DOWN) && - (slurmctld_conf.ret2service == 1)) { + ((slurmctld_conf.ret2service == 2) || + ((slurmctld_conf.ret2service == 1) && + (node_ptr->reason != NULL) && + (strncmp(node_ptr->reason, + "Not responding", 14) == 0)))) { updated_job = true; if (jobs_on_node) { node_ptr->node_state = @@ -2055,12 +2075,14 @@ extern int validate_nodes_via_front_end( else return_hostlist = hostlist_create( node_ptr->name); - xfree(node_ptr->reason); trigger_node_up(node_ptr); - clusteracct_storage_g_node_up( - acct_db_conn, - slurmctld_cluster_name, - node_ptr, now); + if (!_is_node_drain(node_ptr)) { + xfree(node_ptr->reason); + clusteracct_storage_g_node_up( + acct_db_conn, + slurmctld_cluster_name, + node_ptr, now); + } } else if ((base_state == NODE_STATE_ALLOCATED) && (jobs_on_node == 0)) { /* job vanished */ @@ -2195,12 +2217,13 @@ static void _node_did_resp(struct node_record *node_ptr) node_ptr->node_state = NODE_STATE_IDLE | node_flags; info("node_did_resp: node %s returned to service", node_ptr->name); - xfree(node_ptr->reason); trigger_node_up(node_ptr); - if ((node_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL)) == 0) + if (!_is_node_drain(node_ptr)) { + xfree(node_ptr->reason); clusteracct_storage_g_node_up(acct_db_conn, slurmctld_cluster_name, node_ptr, now); + } } base_state = node_ptr->node_state & NODE_STATE_BASE; if ((base_state == NODE_STATE_IDLE) @@ -2323,8 +2346,8 @@ void set_node_down (char *name, char *reason) return; } - if ((node_ptr->reason == NULL) - || (strncmp(node_ptr->reason, "Not responding", 14) == 0)) { + if ((node_ptr->reason == NULL) || + (strncmp(node_ptr->reason, "Not responding", 14) == 0)) { time_t now; char time_buf[64], time_str[32]; @@ -2760,3 +2783,11 @@ extern void build_config_feature_array(struct config_record *config_ptr) xfree(tmp_str); } } + +/* Return true if the node state is DRAIN or FAIL */ +static bool _is_node_drain(struct node_record *node_ptr) +{ + if (node_ptr->node_state & (NODE_STATE_DRAIN | NODE_STATE_FAIL)) + return true; + return false; +} diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c index 7dda23dc8e33a88726beccea760161a3d0ce45d8..56c760bdd29eab08256aa23228d01da1aae6259e 100644 --- a/src/slurmctld/node_scheduler.c +++ b/src/slurmctld/node_scheduler.c @@ -349,26 +349,25 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size, int error_code = SLURM_SUCCESS, i; bitstr_t *feature_bitmap, *accumulate_bitmap = NULL; bitstr_t *save_avail_node_bitmap = NULL, *resv_bitmap; + time_t start_res = time(NULL); /* Mark nodes reserved for other jobs as off limit for this job */ - if (job_ptr->resv_name == NULL) { - time_t start_res = time(NULL); - rc = job_test_resv(job_ptr, &start_res, false, &resv_bitmap); - if ((rc != SLURM_SUCCESS) || - (job_ptr->details->req_node_bitmap && - (!bit_super_set(job_ptr->details->req_node_bitmap, - resv_bitmap)))) { - FREE_NULL_BITMAP(resv_bitmap); - return ESLURM_NODES_BUSY; /* reserved */ - } - if (resv_bitmap && - (!bit_equal(resv_bitmap, avail_node_bitmap))) { - bit_and(resv_bitmap, avail_node_bitmap); - save_avail_node_bitmap = avail_node_bitmap; - avail_node_bitmap = resv_bitmap; - } else - FREE_NULL_BITMAP(resv_bitmap); + rc = job_test_resv(job_ptr, &start_res, false, &resv_bitmap); + if ((rc != SLURM_SUCCESS) || + (bit_set_count(resv_bitmap) < min_nodes) || + (job_ptr->details->req_node_bitmap && + (!bit_super_set(job_ptr->details->req_node_bitmap, + resv_bitmap)))) { + FREE_NULL_BITMAP(resv_bitmap); + return ESLURM_NODES_BUSY; /* reserved */ } + if (resv_bitmap && + (!bit_equal(resv_bitmap, avail_node_bitmap))) { + bit_and(resv_bitmap, avail_node_bitmap); + save_avail_node_bitmap = avail_node_bitmap; + avail_node_bitmap = resv_bitmap; + } else + FREE_NULL_BITMAP(resv_bitmap); /* save job and request state */ saved_min_nodes = min_nodes; @@ -1304,7 +1303,8 @@ static int _build_node_list(struct job_record *job_ptr, if (job_ptr->resv_name) { /* Limit node selection to those in selected reservation */ time_t start_res = time(NULL); - rc = job_test_resv(job_ptr, &start_res, false, &usable_node_mask); + rc = job_test_resv(job_ptr, &start_res, false, + &usable_node_mask); if (rc != SLURM_SUCCESS) { job_ptr->state_reason = WAIT_RESERVATION; xfree(job_ptr->state_desc); diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c index d4b8356244f2848ebaec126462e76ee0253b0347..6708561648f3d2d87fee7cf09a55aa75c0879313 100644 --- a/src/slurmctld/proc_req.c +++ b/src/slurmctld/proc_req.c @@ -1771,6 +1771,10 @@ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg) msg_to_slurmd(REQUEST_RECONFIGURE); } in_progress = false; + select_g_reconfigure(); /* notify select + * plugin too. This + * needs to happen + * inside the lock. */ unlock_slurmctld(config_write_lock); start_power_mgr(&slurmctld_config.thread_id_power); trigger_reconfig(); @@ -1787,7 +1791,6 @@ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg) TIME_STR); slurm_send_rc_msg(msg, SLURM_SUCCESS); slurm_sched_partition_change(); /* notify sched plugin */ - select_g_reconfigure(); /* notify select plugin too */ priority_g_reconfig(); /* notify priority plugin too */ schedule(); /* has its own locks */ save_all_state(); diff --git a/src/slurmctld/reservation.c b/src/slurmctld/reservation.c index fb7715e2e262a4dd41307d9e22a81c6d4052955a..24f876de89db6ed2423a5c6053a510f5ce77cce3 100644 --- a/src/slurmctld/reservation.c +++ b/src/slurmctld/reservation.c @@ -73,6 +73,7 @@ #include "src/slurmctld/state_save.h" #define _RESV_DEBUG 0 +#define ONE_YEAR (365 * 24 * 60 * 60) #define RESV_MAGIC 0x3b82 /* Change RESV_STATE_VERSION value when changing the state save format @@ -98,7 +99,8 @@ static void _generate_resv_id(void); static void _generate_resv_name(resv_desc_msg_t *resv_ptr); static bool _is_account_valid(char *account); static bool _is_resv_used(slurmctld_resv_t *resv_ptr); -static bool _job_overlap(time_t start_time, uint16_t flags, bitstr_t *node_bitmap); +static bool _job_overlap(time_t start_time, uint16_t flags, + bitstr_t *node_bitmap); static void _pack_resv(slurmctld_resv_t *resv_ptr, Buf buffer, bool internal); static int _post_resv_create(slurmctld_resv_t *resv_ptr); @@ -527,10 +529,10 @@ static int _post_resv_update(slurmctld_resv_t *resv_ptr, resv.nodes = resv_ptr->node_list; /* Here if the reservation has started already we need - to mark a new start time for it if certain - variables are needed in accounting. Right now if - the assocs, nodes, flags or cpu count changes we need a - new start time of now. */ + * to mark a new start time for it if certain + * variables are needed in accounting. Right now if + * the assocs, nodes, flags or cpu count changes we need a + * new start time of now. */ if((resv_ptr->start_time < now) && (resv.assocs || resv.nodes @@ -978,7 +980,8 @@ static void _pack_resv(slurmctld_resv_t *resv_ptr, Buf buffer, * Test if a new/updated reservation request will overlap running jobs * RET true if overlap */ -static bool _job_overlap(time_t start_time, uint16_t flags, bitstr_t *node_bitmap) +static bool _job_overlap(time_t start_time, uint16_t flags, + bitstr_t *node_bitmap) { ListIterator job_iterator; struct job_record *job_ptr; @@ -2349,15 +2352,29 @@ extern int job_test_resv_now(struct job_record *job_ptr) extern int job_test_resv(struct job_record *job_ptr, time_t *when, bool move_time, bitstr_t **node_bitmap) { - slurmctld_resv_t * resv_ptr; + slurmctld_resv_t * resv_ptr, *res2_ptr; time_t job_start_time, job_end_time; uint32_t duration; ListIterator iter; int i, rc = SLURM_SUCCESS; + if (job_ptr->time_limit == INFINITE) + duration = ONE_YEAR; + else if (job_ptr->time_limit != NO_VAL) + duration = (job_ptr->time_limit * 60); + else { /* partition time limit */ + if (job_ptr->part_ptr->max_time == INFINITE) + duration = ONE_YEAR; + else + duration = (job_ptr->part_ptr->max_time * 60); + } + job_start_time = job_end_time = *when; + job_end_time += duration; + *node_bitmap = (bitstr_t *) NULL; if (job_ptr->resv_name) { + bool overlap_resv = false; resv_ptr = (slurmctld_resv_t *) list_find_first (resv_list, _find_resv_name, job_ptr->resv_name); job_ptr->resv_ptr = resv_ptr; @@ -2376,7 +2393,38 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when, job_ptr->priority = 0; /* administrative hold */ return ESLURM_RESERVATION_INVALID; } + if (job_ptr->details->req_node_bitmap && + !bit_super_set(job_ptr->details->req_node_bitmap, + resv_ptr->node_bitmap)) { + return ESLURM_RESERVATION_INVALID; + } *node_bitmap = bit_copy(resv_ptr->node_bitmap); + + /* if there are any overlapping reservations, we need to + * prevent the job from using those nodes (e.g. MAINT nodes) */ + iter = list_iterator_create(resv_list); + if (!iter) + fatal("malloc: list_iterator_create"); + while ((res2_ptr = (slurmctld_resv_t *) list_next(iter))) { + if ((resv_ptr->flags & RESERVE_FLAG_MAINT) || + (res2_ptr == resv_ptr) || + (res2_ptr->node_bitmap == NULL) || + (res2_ptr->start_time >= job_end_time) || + (res2_ptr->end_time <= job_start_time)) + continue; + bit_not(res2_ptr->node_bitmap); + bit_and(*node_bitmap, res2_ptr->node_bitmap); + bit_not(res2_ptr->node_bitmap); + overlap_resv = true; + } + list_iterator_destroy(iter); +#if _RESV_DEBUG +{ + char *nodes=bitmap2node_name(*node_bitmap); + info("nodes:%s", nodes); + xfree(nodes); +} +#endif return SLURM_SUCCESS; } @@ -2388,20 +2436,7 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when, /* Job has no reservation, try to find time when this can * run and get it's required nodes (if any) */ - if (job_ptr->time_limit == INFINITE) - duration = 365 * 24 * 60 * 60; - else if (job_ptr->time_limit != NO_VAL) - duration = (job_ptr->time_limit * 60); - else { /* partition time limit */ - if (job_ptr->part_ptr->max_time == INFINITE) - duration = 365 * 24 * 60 * 60; - else - duration = (job_ptr->part_ptr->max_time * 60); - } for (i=0; ; i++) { - job_start_time = job_end_time = *when; - job_end_time += duration; - iter = list_iterator_create(resv_list); if (!iter) fatal("malloc: list_iterator_create"); @@ -2560,7 +2595,7 @@ extern void fini_job_resv_check(void) /* send all reservations to accounting. Only needed at * first registration */ -extern int send_resvs_to_accounting() +extern int send_resvs_to_accounting(void) { ListIterator itr = NULL; slurmctld_resv_t *resv_ptr; diff --git a/src/slurmctld/reservation.h b/src/slurmctld/reservation.h index d05ee8ee08079aa034776f862feb804753b2bbc3..6b6395f5e265eb0c200dcccdceade7c495da6cb4 100644 --- a/src/slurmctld/reservation.h +++ b/src/slurmctld/reservation.h @@ -68,7 +68,7 @@ extern void resv_fini(void); /* send all reservations to accounting. Only needed at * first registration */ -extern int send_resvs_to_accounting(); +extern int send_resvs_to_accounting(void); /* Set or clear NODE_STATE_MAINT for node_state as needed */ extern void set_node_maint_mode(void); diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h index bb9edfd1e0137c7491e33213ea9efaf1fa1690c8..2cadba0b37699e205f90fcb4ca6b2551933c321b 100644 --- a/src/slurmctld/slurmctld.h +++ b/src/slurmctld/slurmctld.h @@ -1486,10 +1486,6 @@ extern void set_slurmd_addr (void); */ extern void signal_step_tasks(struct step_record *step_ptr, uint16_t signal); -/* Read configuration file. - * Same name as API function for use in accounting_storage plugin */ -extern int slurm_reconfigure(void); - /* * slurmctld_shutdown - wake up slurm_rpc_mgr thread via signal * RET 0 or error code diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c index b08bb0fabc805baa9826b71f782c8674df4473f5..7e86dff234c91d1f17119be7e9d80fed141c537a 100644 --- a/src/slurmctld/step_mgr.c +++ b/src/slurmctld/step_mgr.c @@ -1360,12 +1360,14 @@ step_create(job_step_create_request_msg_t *step_specs, if ((step_specs->resv_port_cnt != (uint16_t) NO_VAL) && (step_specs->resv_port_cnt == 0)) { - /* reserved port count set to max task count any node */ + /* reserved port count set to maximum task count on + * any node plus one */ for (i=0; i<step_ptr->step_layout->node_cnt; i++) { step_specs->resv_port_cnt = MAX(step_specs->resv_port_cnt, step_ptr->step_layout->tasks[i]); } + step_specs->resv_port_cnt++; } if (step_specs->resv_port_cnt != (uint16_t) NO_VAL) { step_ptr->resv_port_cnt = step_specs->resv_port_cnt; diff --git a/src/slurmd/slurmd/get_mach_stat.c b/src/slurmd/slurmd/get_mach_stat.c index c5c83f145a64d67b9c61242eb42ace34a8e6b906..22af247c36382b554424b23fd51b4d9417653760 100644 --- a/src/slurmd/slurmd/get_mach_stat.c +++ b/src/slurmd/slurmd/get_mach_stat.c @@ -89,7 +89,7 @@ int chk_cpuinfo_float(char *buffer, char *keyword, float *val); #if DEBUG_MODULE #define DEBUG_DETAIL 1 -#define debug0 printf +#define debug printf #define debug1 printf #define debug2 printf #define debug3 printf diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c index 93e95b3e0e90ae413add51ff8ed78655297e6670..39bb28ca70241cc1527c5bbce9551f008cc81da3 100644 --- a/src/slurmd/slurmd/req.c +++ b/src/slurmd/slurmd/req.c @@ -740,7 +740,7 @@ _check_job_credential(launch_tasks_request_msg_t *req, uid_t uid, if (i_last_bit <= i_first_bit) error("step credential has no CPUs selected"); else { - i = conf->conf_cpus / (i_last_bit - i_first_bit); + i = conf->cpus / (i_last_bit - i_first_bit); if (i > 1) alloc_lps *= i; } @@ -1000,7 +1000,7 @@ _set_batch_job_limits(slurm_msg_t *msg) if (last_bit < 1) error("Batch job credential allocates no CPUs"); else { - i = conf->conf_cpus / last_bit; + i = conf->cpus / last_bit; if (i > 1) alloc_lps *= i; } @@ -2850,7 +2850,17 @@ _rpc_terminate_job(slurm_msg_t *msg) slurm_send_rc_msg(msg, ESLURMD_KILL_JOB_ALREADY_COMPLETE); slurm_cred_begin_expiration(conf->vctx, req->job_id); + save_cred_state(conf->vctx); _waiter_complete(req->job_id); + + /* + * The controller needs to get MESSAGE_EPILOG_COMPLETE to bring + * the job out of "completing" state. Otherwise, the job + * could remain "completing" unnecessarily, until the request + * to terminate is resent. + */ + _sync_messages_kill(req); + _epilog_complete(req->job_id, rc); return; } #endif diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c index c9dd0d66f75d08976bd3c04f55399f4abb34453a..e002a1739279de15e245271a605cf442fb7a5c67 100644 --- a/src/slurmd/slurmd/slurmd.c +++ b/src/slurmd/slurmd/slurmd.c @@ -1,6 +1,6 @@ /*****************************************************************************\ * src/slurmd/slurmd/slurmd.c - main slurm node server daemon - * $Id: slurmd.c 17951 2009-06-23 22:51:55Z da $ + * $Id: slurmd.c 18662 2009-09-09 23:09:09Z jette $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. * Copyright (C) 2008-2009 Lawrence Livermore National Security. @@ -78,6 +78,8 @@ #include "src/common/forward.h" #include "src/common/bitstring.h" #include "src/common/stepd_api.h" +#include "src/common/node_select.h" +#include "src/common/slurm_jobacct_gather.h" #include "src/slurmd/slurmd/slurmd.h" #include "src/slurmd/slurmd/req.h" @@ -674,11 +676,31 @@ _read_config() &conf->block_map_size, &conf->block_map, &conf->block_map_inv); - conf->cpus = conf->actual_cpus; - conf->sockets = conf->actual_sockets; - conf->cores = conf->actual_cores; - conf->threads = conf->actual_threads; + if(cf->fast_schedule && + ((conf->conf_cpus != conf->actual_cpus) || + (conf->sockets != conf->actual_sockets) || + (conf->cores != conf->actual_cores) || + (conf->threads != conf->actual_threads))) { + info("Node configuration differs from hardware\n" + " Procs=%u:%u(hw) Sockets=%u:%u(hw)\n" + " CoresPerSocket%u:%u(hw) ThreadsPerCore:%u:%u(hw)", + conf->conf_cpus, conf->actual_cpus, + conf->conf_sockets, conf->actual_sockets, + conf->conf_cores, conf->actual_cores, + conf->conf_threads, conf->actual_threads); + } + if((cf->fast_schedule == 0) || (conf->actual_cpus < conf->conf_cpus)) { + conf->cpus = conf->actual_cpus; + conf->sockets = conf->actual_sockets; + conf->cores = conf->actual_cores; + conf->threads = conf->actual_threads; + } else { + conf->cpus = conf->conf_cpus; + conf->sockets = conf->conf_sockets; + conf->cores = conf->conf_cores; + conf->threads = conf->conf_threads; + } get_memory(&conf->real_memory_size); cf = slurm_conf_lock(); @@ -1101,7 +1123,6 @@ static int _slurmd_fini() { save_cred_state(conf->vctx); - int slurm_proctrack_init(); switch_fini(); slurmd_task_fini(); slurm_conf_destroy(); @@ -1109,6 +1130,8 @@ _slurmd_fini() slurm_auth_fini(); slurmd_req(NULL); /* purge memory allocated by slurmd_req() */ fini_setproctitle(); + slurm_select_fini(); + slurm_jobacct_gather_fini(); return SLURM_SUCCESS; } diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c index fc1bb51006aa86976f6bf4cee47e9e98130ce7f9..772f07718ba2fe3ffec1fd8089b1d05f2196e73c 100644 --- a/src/slurmd/slurmstepd/mgr.c +++ b/src/slurmd/slurmstepd/mgr.c @@ -1,6 +1,6 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/mgr.c - job manager functions for slurmstepd - * $Id: mgr.c 18075 2009-07-07 23:39:17Z jette $ + * $Id: mgr.c 18638 2009-09-08 21:54:27Z jette $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. * Copyright (C) 2008-2009 Lawrence Livermore National Security. @@ -290,7 +290,7 @@ slurmd_job_t * mgr_launch_batch_job_setup(batch_job_launch_msg_t *msg, slurm_addr *cli) { slurmd_job_t *job = NULL; - + if (!(job = job_batch_job_create(msg))) { error("job_batch_job_create() failed: %m"); return NULL; @@ -332,8 +332,8 @@ cleanup1: if (job->aborted) verbose("job %u abort complete", job->jobid); else if (msg->step_id == SLURM_BATCH_SCRIPT) { - _send_complete_batch_script_msg(job, - ESLURMD_CREATE_BATCH_DIR_ERROR, -1); + _send_complete_batch_script_msg( + job, ESLURMD_CREATE_BATCH_DIR_ERROR, -1); } else _send_step_complete_msgs(job); @@ -1388,7 +1388,7 @@ _wait_for_any_task(slurmd_job_t *job, bool waitflag) job->envtp->distribution = -1; job->envtp->batch_flag = job->batch; - setup_env(job->envtp); + setup_env(job->envtp, false); job->env = job->envtp->env; if (job->task_epilog) { _run_script_as_user("user task_epilog", diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c index ffde558c3f93a5f33d3500314cc631f146b3e57c..0ac6d6c44843d8aa1d5465978d72e9ec692e4f82 100644 --- a/src/slurmd/slurmstepd/task.c +++ b/src/slurmd/slurmstepd/task.c @@ -382,7 +382,7 @@ exec_task(slurmd_job_t *job, int i, int waitfd) job->envtp->distribution = -1; job->envtp->ckpt_dir = xstrdup(job->ckpt_dir); job->envtp->batch_flag = job->batch; - setup_env(job->envtp); + setup_env(job->envtp, false); setenvf(&job->envtp->env, "SLURMD_NODENAME", "%s", conf->node_name); job->env = job->envtp->env; job->envtp->env = NULL; diff --git a/src/smap/job_functions.c b/src/smap/job_functions.c index 3ac60f7dbce721cbcbcce22dec6c838f6284b114..52c22b5423f8198c8e9cd8d0f287eccab7a31e36 100644 --- a/src/smap/job_functions.c +++ b/src/smap/job_functions.c @@ -235,7 +235,7 @@ static void _print_header_job(void) main_xcord = 1; main_ycord++; } else { - printf("JOBID "); + printf(" JOBID "); printf("PARTITION "); #ifdef HAVE_BG printf(" BG_BLOCK "); diff --git a/src/squeue/opts.c b/src/squeue/opts.c index e8552e8e706ed0d039fb98398a28ab4aef040e8e..c31b8089a85eada81c54e83a8a0d250ab0e1cd92 100644 --- a/src/squeue/opts.c +++ b/src/squeue/opts.c @@ -1,9 +1,10 @@ /****************************************************************************\ * opts.c - srun command line option parsing * - * $Id: opts.c 16616 2009-02-20 17:00:27Z jette $ + * $Id: opts.c 18308 2009-08-04 16:31:06Z jette $ ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008-2009 Lawrence Livermore National Security * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov> * CODE-OCEC-09-009. All rights reserved. @@ -18,7 +19,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -124,8 +125,9 @@ parse_command_line( int argc, char* argv[] ) if ( ( env_val = getenv("SQUEUE_SORT") ) ) params.sort = xstrdup(env_val); - while((opt_char = getopt_long(argc, argv, "ahi:j::ln:o:p:s::S:t:u:U:vV", - long_options, &option_index)) != -1) { + while((opt_char = getopt_long(argc, argv, + "ahi:j::ln:o:p:s::S:t:u:U:vV", + long_options, &option_index)) != -1) { switch (opt_char) { case (int)'?': fprintf(stderr, "Try \"squeue --help\" " @@ -186,7 +188,7 @@ parse_command_line( int argc, char* argv[] ) if (optarg) { params.steps = xstrdup(optarg); params.step_list = - _build_step_list( params.steps ); + _build_step_list(params.steps); } params.step_flag = true; break; @@ -438,7 +440,7 @@ extern int parse_format( char* format ) right_justify, suffix ); else if (field[0] == 'S') - step_format_add_time_start( params.format_list, + step_format_add_time_start( params.format_list, field_size, right_justify, suffix ); @@ -453,8 +455,9 @@ extern int parse_format( char* format ) right_justify, suffix ); else - error ("Invalid job step format specification: %c", - field[0] ); + error ( "Invalid job step format " + "specification: %c", + field[0] ); } else { if (field[0] == 'a') job_format_add_account( params.format_list, @@ -518,7 +521,7 @@ extern int parse_format( char* format ) right_justify, suffix ); else if (field[0] == 'H') - job_format_add_min_sockets( params.format_list, + job_format_add_min_sockets( params.format_list, field_size, right_justify, suffix ); @@ -537,7 +540,7 @@ extern int parse_format( char* format ) field_size, right_justify, suffix ); else if (field[0] == 'J') - job_format_add_min_threads( params.format_list, + job_format_add_min_threads( params.format_list, field_size, right_justify, suffix ); @@ -546,6 +549,11 @@ extern int parse_format( char* format ) field_size, right_justify, suffix ); + else if (field[0] == 'L') + job_format_add_time_left( params.format_list, + field_size, + right_justify, + suffix ); else if (field[0] == 'm') job_format_add_min_memory( params.format_list, field_size, @@ -591,7 +599,8 @@ extern int parse_format( char* format ) right_justify, suffix ); else if (field[0] == 'Q') - job_format_add_priority_long( params.format_list, + job_format_add_priority_long( + params.format_list, field_size, right_justify, suffix ); @@ -601,12 +610,13 @@ extern int parse_format( char* format ) right_justify, suffix ); else if (field[0] == 'R') - job_format_add_reason_list( params.format_list, + job_format_add_reason_list( params.format_list, field_size, right_justify, suffix ); else if (field[0] == 's') - job_format_add_select_jobinfo( params.format_list, + job_format_add_select_jobinfo( + params.format_list, field_size, right_justify, suffix ); @@ -637,7 +647,7 @@ extern int parse_format( char* format ) right_justify, suffix ); else if (field[0] == 'v') - job_format_add_reservation( params.format_list, + job_format_add_reservation( params.format_list, field_size, right_justify, suffix ); @@ -652,7 +662,7 @@ extern int parse_format( char* format ) right_justify, suffix ); else if (field[0] == 'X') - job_format_add_num_sockets( params.format_list, + job_format_add_num_sockets( params.format_list, field_size, right_justify, suffix ); @@ -662,7 +672,7 @@ extern int parse_format( char* format ) right_justify, suffix ); else if (field[0] == 'Z') - job_format_add_num_threads( params.format_list, + job_format_add_num_threads( params.format_list, field_size, right_justify, suffix ); diff --git a/src/squeue/print.c b/src/squeue/print.c index 2a222df3fc786f3fee5e8731ca226a19a8791f82..22832d5c2b0b1a14d7d7a751be4cc187c722c3f9 100644 --- a/src/squeue/print.c +++ b/src/squeue/print.c @@ -2,7 +2,7 @@ * print.c - squeue print job functions ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. - * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2008-2009 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov>, * Morris Jette <jette1@llnl.gov>, et. al. @@ -450,6 +450,24 @@ int _print_job_job_state_compact(job_info_t * job, int width, bool right, return SLURM_SUCCESS; } +int _print_job_time_left(job_info_t * job, int width, bool right, + char* suffix) +{ + if (job == NULL) /* Print the Header instead */ + _print_str("TIME_LEFT", width, right, true); + else if (job->time_limit == INFINITE) + _print_str("UNLIMITED", width, right, true); + else if (job->time_limit == NO_VAL) + _print_str("NOT_SET", width, right, true); + else { + time_t time_left = job->time_limit * 60 - job_time_used(job); + _print_secs(time_left, width, right, false); + } + if (suffix) + printf("%s", suffix); + return SLURM_SUCCESS; +} + int _print_job_time_limit(job_info_t * job, int width, bool right, char* suffix) { diff --git a/src/squeue/print.h b/src/squeue/print.h index 932648872e0f8afb1171356d258437b6656864df..c10b5fbdfee13bb1758136b224115ca8389aebcb 100644 --- a/src/squeue/print.h +++ b/src/squeue/print.h @@ -1,7 +1,8 @@ /*****************************************************************************\ * print.h - squeue print job definitions ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008-2009 Lawrence Livermore National Security * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov> * CODE-OCEC-09-009. All rights reserved. @@ -16,7 +17,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -107,6 +108,9 @@ int job_format_add_function(List list, int width, bool right_justify, #define job_format_add_job_state_compact(list,wid,right,suffix) \ job_format_add_function(list,wid,right,suffix, \ _print_job_job_state_compact) +#define job_format_add_time_left(list,wid,right,suffix) \ + job_format_add_function(list,wid,right,suffix, \ + _print_job_time_left) #define job_format_add_time_limit(list,wid,right,suffix) \ job_format_add_function(list,wid,right,suffix, \ _print_job_time_limit) @@ -202,6 +206,8 @@ int _print_job_job_state(job_info_t * job, int width, bool right_justify, char* suffix); int _print_job_job_state_compact(job_info_t * job, int width, bool right_justify, char* suffix); +int _print_job_time_left(job_info_t * job, int width, bool right_justify, + char* suffix); int _print_job_time_limit(job_info_t * job, int width, bool right_justify, char* suffix); int _print_job_time_used(job_info_t * job, int width, bool right_justify, diff --git a/src/squeue/sort.c b/src/squeue/sort.c index 1deeb837b44c86394d03d2351dd7b3fef4c12b51..900ab7f360bc742e4131710df5ea1ee38cffba9d 100644 --- a/src/squeue/sort.c +++ b/src/squeue/sort.c @@ -1,7 +1,8 @@ /*****************************************************************************\ * sort.c - squeue sorting functions ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008-2009 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, et. al. * CODE-OCEC-09-009. All rights reserved. @@ -57,6 +58,7 @@ static int _sort_job_by_name(void *void1, void *void2); static int _sort_job_by_state(void *void1, void *void2); static int _sort_job_by_state_compact(void *void1, void *void2); static int _sort_job_by_time_end(void *void1, void *void2); +static int _sort_job_by_time_left(void *void1, void *void2); static int _sort_job_by_time_limit(void *void1, void *void2); static int _sort_job_by_time_start(void *void1, void *void2); static int _sort_job_by_time_used(void *void1, void *void2); @@ -135,6 +137,8 @@ void sort_job_list(List job_list) list_sort(job_list, _sort_job_by_min_threads); else if (params.sort[i] == 'l') list_sort(job_list, _sort_job_by_time_limit); + else if (params.sort[i] == 'L') + list_sort(job_list, _sort_job_by_time_left); else if (params.sort[i] == 'm') list_sort(job_list, _sort_job_by_min_memory); else if (params.sort[i] == 'M') @@ -527,13 +531,45 @@ static int _sort_job_by_time_end(void *void1, void *void2) return diff; } +static int _sort_job_by_time_left(void *void1, void *void2) +{ + int diff; + job_info_t *job1 = (job_info_t *) void1; + job_info_t *job2 = (job_info_t *) void2; + time_t time1, time2; + + if ((job1->time_limit == INFINITE) || (job1->time_limit == NO_VAL)) + time1 = INFINITE; + else + time1 = job1->time_limit - job_time_used(job1); + if ((job2->time_limit == INFINITE) || (job2->time_limit == NO_VAL)) + time2 = INFINITE; + else + time2 = job2->time_limit - job_time_used(job2); + if (time1 > time2) + diff = 1; + else if (time1 == time2) + diff = 0; + else + diff = -1; + + if (reverse_order) + diff = -diff; + return diff; +} + static int _sort_job_by_time_limit(void *void1, void *void2) { int diff; job_info_t *job1 = (job_info_t *) void1; job_info_t *job2 = (job_info_t *) void2; - diff = job1->time_limit - job2->time_limit; + if (job1->time_limit > job2->time_limit) + diff = 1; + else if (job1->time_limit == job2->time_limit) + diff = 0; + else + diff = -1; if (reverse_order) diff = -diff; diff --git a/src/srun/opt.c b/src/srun/opt.c index 48a444c4168736b054c4812e7648695b02f184fe..4dfea92c5c4df03be281c9e73bce26d5f8ef96f4 100644 --- a/src/srun/opt.c +++ b/src/srun/opt.c @@ -242,34 +242,22 @@ int initialize_and_process_args(int argc, char *argv[]) */ static bool _valid_node_list(char **node_list_pptr) { - char *nodelist = NULL; - - if (strchr(*node_list_pptr, '/') == NULL) - return true; /* not a file name */ + int count = NO_VAL; /* If we are using Arbitrary and we specified the number of procs to use then we need exactly this many since we are saying, lay it out this way! Same for max and min nodes. Other than that just read in as many in the hostfile */ - if(opt.distribution == SLURM_DIST_ARBITRARY) { - if(opt.nprocs_set) - nodelist = slurm_read_hostfile(*node_list_pptr, - opt.nprocs); - else if(opt.max_nodes) - nodelist = slurm_read_hostfile(*node_list_pptr, - opt.max_nodes); + if(opt.nprocs_set) + count = opt.nprocs; + else if(opt.nodes_set) { + if(opt.max_nodes) + count = opt.max_nodes; else if(opt.min_nodes) - nodelist = slurm_read_hostfile(*node_list_pptr, - opt.min_nodes); - } else - nodelist = slurm_read_hostfile(*node_list_pptr, NO_VAL); - - if (nodelist == NULL) - return false; - xfree(*node_list_pptr); - *node_list_pptr = xstrdup(nodelist); - free(nodelist); - return true; + count = opt.min_nodes; + } + + return verify_node_list(node_list_pptr, opt.distribution, count); } /* @@ -1580,7 +1568,24 @@ static bool _opt_verify(void) if (!_valid_node_list(&opt.nodelist)) exit(1); } - + + /* set up the proc and node counts based on the arbitrary list + of nodes */ + if((opt.distribution == SLURM_DIST_ARBITRARY) + && (!opt.nodes_set || !opt.nprocs_set)) { + hostlist_t hl = hostlist_create(opt.nodelist); + if(!opt.nprocs_set) { + opt.nprocs_set = 1; + opt.nprocs = hostlist_count(hl); + } + if(!opt.nodes_set) { + opt.nodes_set = 1; + hostlist_uniq(hl); + opt.min_nodes = opt.max_nodes = hostlist_count(hl); + } + hostlist_destroy(hl); + } + /* now if max is set make sure we have <= max_nodes in the * nodelist but only if it isn't arbitrary since the user has * laid it out how it should be so don't mess with it print an @@ -2071,7 +2076,6 @@ static void _help(void) "Usage: srun [OPTIONS...] executable [args...]\n" "\n" "Parallel run options:\n" -" -b, --batch submit as batch job for later execution\n" " --begin=time defer job until HH:MM DD/MM/YY\n" " -c, --cpus-per-task=ncpus number of cpus required per task\n" " --checkpoint=time job step checkpoint interval\n" diff --git a/src/srun/srun.c b/src/srun/srun.c index b168cf8399d4b96b3750db1ec83e8e8ca3e49727..9698b78ddacda0965f5019ac216e02a1d6fd5e01 100644 --- a/src/srun/srun.c +++ b/src/srun/srun.c @@ -351,7 +351,7 @@ int srun(int ac, char **av) env->ws_col = job->ws_col; env->ws_row = job->ws_row; } - setup_env(env); + setup_env(env, opt.preserve_env); xfree(env->task_count); xfree(env); diff --git a/src/sview/admin_info.c b/src/sview/admin_info.c index 8262aa7175d948c3f453c313a34aeeae32aa9529..b32e201057c7572b199ccb97ca4434816f5b397c 100644 --- a/src/sview/admin_info.c +++ b/src/sview/admin_info.c @@ -78,13 +78,14 @@ extern void set_menus_admin(void *arg, GtkTreePath *path, popup_info_t *popup_win = (popup_info_t *)arg; switch(type) { case TAB_CLICKED: - make_fields_menu(menu, display_data_admin, SORTID_CNT); + make_fields_menu(NULL, menu, display_data_admin, SORTID_CNT); break; case ROW_CLICKED: make_options_menu(tree_view, path, menu, options_data_admin); break; case POPUP_CLICKED: - make_popup_fields_menu(popup_win, menu); + make_fields_menu(popup_win, menu, + popup_win->display_data, SORTID_CNT); break; default: g_error("UNKNOWN type %d given to set_fields\n", type); diff --git a/src/sview/block_info.c b/src/sview/block_info.c index 0ad3790eef0666dd7c5b1cfc2d286132b108738f..412b95efe7dd8106ed9493f25b5623f0892937ed 100644 --- a/src/sview/block_info.c +++ b/src/sview/block_info.c @@ -1154,13 +1154,14 @@ extern void set_menus_block(void *arg, GtkTreePath *path, popup_info_t *popup_win = (popup_info_t *)arg; switch(type) { case TAB_CLICKED: - make_fields_menu(menu, display_data_block, SORTID_CNT); + make_fields_menu(NULL, menu, display_data_block, SORTID_CNT); break; case ROW_CLICKED: make_options_menu(tree_view, path, menu, options_data_block); break; case POPUP_CLICKED: - make_popup_fields_menu(popup_win, menu); + make_fields_menu(popup_win, menu, + popup_win->display_data, SORTID_CNT); break; default: g_error("UNKNOWN type %d given to set_fields\n", type); diff --git a/src/sview/common.c b/src/sview/common.c index 43f69091ac18a6efe92b5b3ecc5f02508281b9af..cbb3fc8c8823b67d68b116b5576f1161c4c09d19 100644 --- a/src/sview/common.c +++ b/src/sview/common.c @@ -455,12 +455,17 @@ extern void *get_pointer(GtkTreeView *tree_view, GtkTreePath *path, int loc) return ptr; } -extern void make_fields_menu(GtkMenu *menu, display_data_t *display_data, - int count) +extern void make_fields_menu(popup_info_t *popup_win, GtkMenu *menu, + display_data_t *display_data, int count) { GtkWidget *menuitem = NULL; display_data_t *first_display_data = display_data; int i = 0; + + /* we don't want to display anything on the full info page */ + if(popup_win && popup_win->spec_info->type == INFO_PAGE) + return; + for(i=0; i<count; i++) { while(display_data++) { if(display_data->id == -1) @@ -469,15 +474,25 @@ extern void make_fields_menu(GtkMenu *menu, display_data_t *display_data, continue; if(display_data->id != i) continue; + menuitem = gtk_check_menu_item_new_with_label( display_data->name); gtk_check_menu_item_set_active( GTK_CHECK_MENU_ITEM(menuitem), display_data->show); - g_signal_connect(menuitem, "toggled", - G_CALLBACK(_toggle_state_changed), - display_data); + if(popup_win) { + display_data->user_data = popup_win; + g_signal_connect( + menuitem, "toggled", + G_CALLBACK(_popup_state_changed), + display_data); + } else { + g_signal_connect( + menuitem, "toggled", + G_CALLBACK(_toggle_state_changed), + display_data); + } gtk_menu_shell_append(GTK_MENU_SHELL(menu), menuitem); break; } @@ -513,33 +528,6 @@ extern void make_options_menu(GtkTreeView *tree_view, GtkTreePath *path, } } -extern void make_popup_fields_menu(popup_info_t *popup_win, GtkMenu *menu) -{ - GtkWidget *menuitem = NULL; - display_data_t *display_data = popup_win->display_data; - - /* we don't want to display anything on the full info page */ - if(popup_win->spec_info->type == INFO_PAGE) - return; - - while(display_data++) { - if(display_data->id == -1) - break; - if(!display_data->name) - continue; - display_data->user_data = popup_win; - menuitem = - gtk_check_menu_item_new_with_label(display_data->name); - gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(menuitem), - display_data->show); - g_signal_connect(menuitem, "toggled", - G_CALLBACK(_popup_state_changed), - display_data); - gtk_menu_shell_append(GTK_MENU_SHELL(menu), menuitem); - } -} - - extern GtkScrolledWindow *create_scrolled_window() { GtkScrolledWindow *scrolled_window = NULL; @@ -615,7 +603,8 @@ extern GtkTreeView *create_treeview_2cols_attach_to_table(GtkTable *table) { GtkTreeView *tree_view = GTK_TREE_VIEW(gtk_tree_view_new()); GtkTreeStore *treestore = - gtk_tree_store_new(2, GTK_TYPE_STRING, GTK_TYPE_STRING); + gtk_tree_store_new(3, GTK_TYPE_STRING, + GTK_TYPE_STRING, GTK_TYPE_STRING); GtkTreeViewColumn *col = gtk_tree_view_column_new(); GtkCellRenderer *renderer = gtk_cell_renderer_text_new(); @@ -628,6 +617,8 @@ extern GtkTreeView *create_treeview_2cols_attach_to_table(GtkTable *table) gtk_tree_view_column_pack_start(col, renderer, TRUE); gtk_tree_view_column_add_attribute(col, renderer, "text", DISPLAY_NAME); + gtk_tree_view_column_add_attribute(col, renderer, + "font", DISPLAY_FONT); gtk_tree_view_column_set_title(col, "Name"); gtk_tree_view_column_set_resizable(col, true); gtk_tree_view_column_set_expand(col, true); @@ -638,11 +629,21 @@ extern GtkTreeView *create_treeview_2cols_attach_to_table(GtkTable *table) gtk_tree_view_column_pack_start(col, renderer, TRUE); gtk_tree_view_column_add_attribute(col, renderer, "text", DISPLAY_VALUE); + gtk_tree_view_column_add_attribute(col, renderer, + "font", DISPLAY_FONT); gtk_tree_view_column_set_title(col, "Value"); gtk_tree_view_column_set_resizable(col, true); gtk_tree_view_column_set_expand(col, true); gtk_tree_view_append_column(tree_view, col); + col = gtk_tree_view_column_new(); + renderer = gtk_cell_renderer_text_new(); + gtk_tree_view_column_pack_start(col, renderer, TRUE); + gtk_tree_view_column_set_visible(col, false); + gtk_tree_view_column_add_attribute(col, renderer, + "text", DISPLAY_FONT); + gtk_tree_view_append_column(tree_view, col); + g_object_unref(treestore); return tree_view; } @@ -760,7 +761,18 @@ extern gboolean row_clicked(GtkTreeView *tree_view, GdkEventButton *event, gtk_tree_selection_unselect_all(selection); gtk_tree_selection_select_path(selection, path); - if(event->x <= 20) { + if(event->x <= 2) { + /* When you try to resize a column this event happens + for some reason. Resizing always happens in the + first 2 of x so if that happens just return and + continue. */ + did_something = FALSE; + } else if(event->x <= 20) { + /* This should also be included with above since there + is no reason for us to handle this here since it is + already handled automatically. Just to make sure + we will keep it this way until 2.1 just so we + don't break anything. */ if(!gtk_tree_view_expand_row(tree_view, path, FALSE)) gtk_tree_view_collapse_row(tree_view, path); did_something = TRUE; @@ -1226,6 +1238,55 @@ found: DISPLAY_NAME, name, DISPLAY_VALUE, value, -1); + + return; +} + +extern void add_display_treestore_line_with_font( + int update, + GtkTreeStore *treestore, + GtkTreeIter *iter, + const char *name, char *value, + char *font) +{ + if(!name) { + g_print("error, name = %s and value = %s\n", + name, value); + return; + } + if(update) { + char *display_name = NULL; + GtkTreePath *path = gtk_tree_path_new_first(); + gtk_tree_model_get_iter(GTK_TREE_MODEL(treestore), iter, path); + while(1) { + /* search for the jobid and check to see if + it is in the list */ + gtk_tree_model_get(GTK_TREE_MODEL(treestore), iter, + DISPLAY_NAME, + &display_name, -1); + if(!strcmp(display_name, name)) { + /* update with new info */ + g_free(display_name); + goto found; + } + g_free(display_name); + + if(!gtk_tree_model_iter_next(GTK_TREE_MODEL(treestore), + iter)) { + return; + } + } + + } else { + gtk_tree_store_append(treestore, iter, NULL); + } +found: + gtk_tree_store_set(treestore, iter, + DISPLAY_NAME, name, + DISPLAY_VALUE, value, + DISPLAY_FONT, font, + -1); + return; } diff --git a/src/sview/grid.c b/src/sview/grid.c index d3da14894ef08c7be27fe0d3c5c8a5523d2bcab1..139c5d5a6a4cc989e0bad832856a7efef1fa36e6 100644 --- a/src/sview/grid.c +++ b/src/sview/grid.c @@ -939,16 +939,18 @@ get_bg: /* Here we need to reset the nodes off of what the blocks say */ for (i=0; i<node_info_ptr->record_count; i++) { node_ptr = &(node_info_ptr->node_array[i]); - /* in each node_ptr we overload the threads var - * with the number of cnodes in the used_cpus var - * will be used to tell how many cnodes are - * allocated and the cores will represent the cnodes + /* In each node_ptr we overload the threads var + * with the number of cnodes in drained state, the + * sockets var with the nodes in draining state, and + * the used_cpus var will be used to tell how many cnodes are + * allocated. The cores will also represent the cnodes * in an error state. So we can get an idle count by - * subtracting those 2 numbers from the total possible + * subtracting those 3 numbers from the total possible * cnodes (which are the idle cnodes). */ node_ptr->threads = node_scaling; node_ptr->cores = 0; + node_ptr->sockets = 0; node_ptr->used_cpus = 0; if((node_ptr->node_state & NODE_STATE_BASE) == NODE_STATE_DOWN) continue; @@ -1000,9 +1002,16 @@ get_bg: */ if(((node_ptr->node_state & NODE_STATE_BASE) == NODE_STATE_DOWN) - || (node_ptr->node_state & NODE_STATE_DRAIN)) + || (node_ptr->node_state + & NODE_STATE_DRAIN)) { + if(bg_info_record->job_running + > NO_JOB_RUNNING) { + node_ptr->sockets += alter; + node_ptr->cores -= alter; + } + continue; - + } if(bg_info_record->state == RM_PARTITION_ERROR) { node_ptr->cores += alter; @@ -1106,7 +1115,6 @@ extern void sview_reset_grid() extern void setup_popup_grid_list(popup_info_t *popup_win) { int def_color = MAKE_BLACK; - GtkTreeIter iter; if(!popup_win->model) def_color = MAKE_WHITE; @@ -1125,19 +1133,19 @@ extern void setup_popup_grid_list(popup_info_t *popup_win) /* refresh the pointer */ if(popup_win->model - && gtk_tree_model_get_iter_first(popup_win->model, &iter)) { + && gtk_tree_store_iter_is_valid(GTK_TREE_STORE(popup_win->model), + &popup_win->iter)) { gtk_tree_model_get(popup_win->model, &popup_win->iter, popup_win->node_inx_id, &popup_win->node_inx, -1); } else { - popup_win->model = NULL; popup_win->node_inx = NULL; } if(popup_win->node_inx) { int j=0; - while(popup_win->node_inx[j] >= 0) { + while(popup_win->node_inx[j] >= 0) { set_grid_used(popup_win->grid_button_list, popup_win->node_inx[j], popup_win->node_inx[j+1], true); diff --git a/src/sview/job_info.c b/src/sview/job_info.c index 12d8c29d2d4295d68a1c1436dfadb88ab89ad1cd..f2c1188d089b104546a115aed4c3285fa0e1ca25 100644 --- a/src/sview/job_info.c +++ b/src/sview/job_info.c @@ -3036,13 +3036,14 @@ extern void set_menus_job(void *arg, GtkTreePath *path, popup_info_t *popup_win = (popup_info_t *)arg; switch(type) { case TAB_CLICKED: - make_fields_menu(menu, display_data_job, SORTID_CNT); + make_fields_menu(NULL, menu, display_data_job, SORTID_CNT); break; case ROW_CLICKED: make_options_menu(tree_view, path, menu, options_data_job); break; case POPUP_CLICKED: - make_popup_fields_menu(popup_win, menu); + make_fields_menu(popup_win, menu, + popup_win->display_data, SORTID_CNT); break; default: g_error("UNKNOWN type %d given to set_fields\n", type); diff --git a/src/sview/node_info.c b/src/sview/node_info.c index c164836d3ff7c9a66930f91686394677cbb5a840..1942dd6bb8edaa752bfa36e165ebc576e953491c 100644 --- a/src/sview/node_info.c +++ b/src/sview/node_info.c @@ -997,7 +997,7 @@ display_it: break; else if(no_resp_flag1 && no_resp_flag2) break; - + if(node_ptr->node_state != search_info->int_data) continue; break; @@ -1022,7 +1022,6 @@ display_it: if(!found) continue; - list_push(send_info_list, sview_node_info_ptr); change_grid_color(popup_win->grid_button_list, i, i, 0, true); @@ -1052,13 +1051,14 @@ extern void set_menus_node(void *arg, GtkTreePath *path, popup_info_t *popup_win = (popup_info_t *)arg; switch(type) { case TAB_CLICKED: - make_fields_menu(menu, display_data_node, SORTID_CNT); + make_fields_menu(NULL, menu, display_data_node, SORTID_CNT); break; case ROW_CLICKED: make_options_menu(tree_view, path, menu, options_data_node); break; case POPUP_CLICKED: - make_popup_fields_menu(popup_win, menu); + make_fields_menu(popup_win, menu, + popup_win->display_data, SORTID_CNT); break; default: g_error("UNKNOWN type %d given to set_fields\n", type); diff --git a/src/sview/part_info.c b/src/sview/part_info.c index 8678d6863e2f953a0967b107bf1efb852ec43662..e442b458049d7a489cc66e6d6ad54f2b3bfbd496 100644 --- a/src/sview/part_info.c +++ b/src/sview/part_info.c @@ -1443,7 +1443,7 @@ static List _create_part_info_list(partition_info_msg_t *part_info_ptr, } else block_error = 0; node_ptr->threads = node_scaling; - for(j=0; j<3; j++) { + for(j=0; j<4; j++) { int norm = 0; switch(j) { case SVIEW_BG_IDLE_STATE: @@ -1467,7 +1467,10 @@ static List _create_part_info_list(partition_info_msg_t *part_info_ptr, */ node_ptr->threads -= (node_ptr->cores + + node_ptr->sockets + node_ptr->used_cpus); + if((int16_t)node_ptr->threads < 0) + node_ptr->threads = 0; if(node_ptr->threads == node_scaling) norm = 1; else { @@ -1489,6 +1492,16 @@ static List _create_part_info_list(partition_info_msg_t *part_info_ptr, node_ptr->threads = node_ptr->used_cpus; break; + case SVIEW_BG_DRAINING_STATE: + /* get the draining node count */ + if(!node_ptr->sockets) + continue; + node_ptr->node_state = + NODE_STATE_ALLOCATED; + node_ptr->node_state |= + NODE_STATE_DRAIN; + node_ptr->threads = node_ptr->sockets; + break; case SVIEW_BG_ERROR_STATE: /* get the error node count */ if(!node_ptr->cores) @@ -2231,13 +2244,14 @@ extern void set_menus_part(void *arg, GtkTreePath *path, switch(type) { case TAB_CLICKED: - make_fields_menu(menu, display_data_part, SORTID_CNT); + make_fields_menu(NULL, menu, display_data_part, SORTID_CNT); break; case ROW_CLICKED: make_options_menu(tree_view, path, menu, options_data_part); break; case POPUP_CLICKED: - make_popup_fields_menu(popup_win, menu); + make_fields_menu(popup_win, menu, + popup_win->display_data, SORTID_CNT); break; default: g_error("UNKNOWN type %d given to set_fields\n", type); @@ -2253,7 +2267,8 @@ extern void popup_all_part(GtkTreeModel *model, GtkTreeIter *iter, int id) ListIterator itr = NULL; popup_info_t *popup_win = NULL; GError *error = NULL; - + GtkTreeIter par_iter; + gtk_tree_model_get(model, iter, SORTID_NAME, &name, -1); switch(id) { @@ -2341,7 +2356,14 @@ extern void popup_all_part(GtkTreeModel *model, GtkTreeIter *iter, int id) case RESV_PAGE: case NODE_PAGE: g_free(name); - gtk_tree_model_get(model, iter, SORTID_NODELIST, &name, -1); + /* we want to include the parent's nodes here not just + the subset */ + if(gtk_tree_model_iter_parent(model, &par_iter, iter)) + gtk_tree_model_get(model, &par_iter, + SORTID_NODELIST, &name, -1); + else + gtk_tree_model_get(model, iter, + SORTID_NODELIST, &name, -1); popup_win->spec_info->search_info->gchar_data = name; if(state && strlen(state)) { popup_win->spec_info->search_info->search_type = diff --git a/src/sview/popups.c b/src/sview/popups.c index 4c83ca03b4fe8aabe14801ec7bb43655007c85c1..8364cb59bb756f9c349f38eabdf141bf314e6e98 100644 --- a/src/sview/popups.c +++ b/src/sview/popups.c @@ -40,6 +40,53 @@ #include "sview.h" #include "src/common/parse_time.h" +static char *_select_info(uint16_t select_type_param) +{ + switch (select_type_param) { + case SELECT_TYPE_INFO_NONE: + return "NONE"; + case CR_CPU: + return "CR_CPU"; + case CR_SOCKET: + return "CR_SOCKET"; + case CR_CORE: + return "CR_CORE"; + case CR_MEMORY: + return "CR_MEMORY"; + case CR_SOCKET_MEMORY: + return "CR_SOCKET_MEMORY"; + case CR_CORE_MEMORY: + return "CR_CORE_MEMORY"; + case CR_CPU_MEMORY: + return "CR_CPU_MEMORY"; + default: + return "unknown"; + } +} + +static char *_reset_period_str(uint16_t reset_period) +{ + switch (reset_period) { + case PRIORITY_RESET_NONE: + return "NONE"; + case PRIORITY_RESET_NOW: + return "NOW"; + case PRIORITY_RESET_DAILY: + return "DAILY"; + case PRIORITY_RESET_WEEKLY: + return "WEEKLY"; + case PRIORITY_RESET_MONTHLY: + return "MONTHLY"; + case PRIORITY_RESET_QUARTERLY: + return "QUARTERLY"; + case PRIORITY_RESET_YEARLY: + return "YEARLY"; + default: + return "UNKNOWN"; + } +} + + void *_refresh_thr(gpointer arg) { int msg_id = GPOINTER_TO_INT(arg); @@ -220,28 +267,58 @@ static GtkTreeStore *_local_create_treestore_2cols(GtkWidget *popup, static void _layout_ctl_conf(GtkTreeStore *treestore, slurm_ctl_conf_info_msg_t *slurm_ctl_conf_ptr) { - char temp_str[32], temp_str2[128]; + char temp_str[128]; int update = 0; GtkTreeIter iter; - + char *xbuf = NULL; + ListIterator itr = NULL; + config_key_pair_t *key_pair; + char *select_title = ""; +#ifdef HAVE_BGL + select_title = "Bluegene/L configuration"; +#endif +#ifdef HAVE_BGP + select_title = "Bluegene/P configuration"; +#endif +#ifdef HAVE_BGQ + select_title = "Bluegene/Q configuration"; +#endif + if(!slurm_ctl_conf_ptr) return; slurm_make_time_str((time_t *)&slurm_ctl_conf_ptr->last_update, temp_str, sizeof(temp_str)); + add_display_treestore_line_with_font( + update, treestore, &iter, + "Configuration data as of", temp_str, "bold"); + accounting_enforce_string( + slurm_ctl_conf_ptr->accounting_storage_enforce, + temp_str, sizeof(temp_str)); + add_display_treestore_line(update, treestore, &iter, + "AccountingStorageEnforce", + temp_str); add_display_treestore_line(update, treestore, &iter, - "Configuration data as of", temp_str); + "AccountingStorageBackupHost", + slurm_ctl_conf_ptr-> + accounting_storage_backup_host); add_display_treestore_line(update, treestore, &iter, "AccountingStorageHost", slurm_ctl_conf_ptr->accounting_storage_host); add_display_treestore_line(update, treestore, &iter, - "AccountingStorageType", - slurm_ctl_conf_ptr->accounting_storage_type); + "AccountingStorageLoc", + slurm_ctl_conf_ptr->accounting_storage_loc); + add_display_treestore_line(update, treestore, &iter, + "AccountingStoragePass", + slurm_ctl_conf_ptr->accounting_storage_pass); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->accounting_storage_port); add_display_treestore_line(update, treestore, &iter, "AccountingStoragePort", temp_str); + add_display_treestore_line(update, treestore, &iter, + "AccountingStorageType", + slurm_ctl_conf_ptr->accounting_storage_type); add_display_treestore_line(update, treestore, &iter, "AccountingStorageUser", slurm_ctl_conf_ptr->accounting_storage_user); @@ -253,6 +330,11 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "BackupController", slurm_ctl_conf_ptr->backup_controller); + snprintf(temp_str, sizeof(temp_str), "%u sec", + slurm_ctl_conf_ptr->batch_start_timeout); + add_display_treestore_line(update, treestore, &iter, + "BatchStartTimeout", + temp_str); slurm_make_time_str ((time_t *)&slurm_ctl_conf_ptr->boot_time, temp_str, sizeof(temp_str)); add_display_treestore_line(update, treestore, &iter, @@ -264,6 +346,9 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "CheckpointType", slurm_ctl_conf_ptr->checkpoint_type); + add_display_treestore_line(update, treestore, &iter, + "ClusterName", + slurm_ctl_conf_ptr->cluster_name); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->complete_wait); add_display_treestore_line(update, treestore, &iter, @@ -278,20 +363,54 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "CryptoType", slurm_ctl_conf_ptr->crypto_type); + + xbuf = debug_flags2str(slurm_ctl_conf_ptr->debug_flags); + add_display_treestore_line(update, treestore, &iter, + "DebugFlags", + xbuf); + xfree(xbuf); if (slurm_ctl_conf_ptr->def_mem_per_task & MEM_PER_CPU) { snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->def_mem_per_task & (~MEM_PER_CPU)); add_display_treestore_line(update, treestore, &iter, "DefMemPerCPU", temp_str); - } else { + } else if (slurm_ctl_conf_ptr->def_mem_per_task) { snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->def_mem_per_task); add_display_treestore_line(update, treestore, &iter, "DefMemPerNode", temp_str); + } else { + add_display_treestore_line(update, treestore, &iter, + "DefMemPerCPU", "UNLIMITED"); } + + if(slurm_ctl_conf_ptr->disable_root_jobs) + xbuf = "YES"; + else + xbuf = "NO"; + add_display_treestore_line(update, treestore, &iter, + "DisableRootJobs", + xbuf); + + if(slurm_ctl_conf_ptr->enforce_part_limits) + xbuf = "YES"; + else + xbuf = "NO"; + add_display_treestore_line(update, treestore, &iter, + "EnforcePartLimits", + xbuf); + add_display_treestore_line(update, treestore, &iter, "Epilog", slurm_ctl_conf_ptr->epilog); + snprintf(temp_str, sizeof(temp_str), "%u usec", + slurm_ctl_conf_ptr->epilog_msg_time); + add_display_treestore_line(update, treestore, &iter, + "EpilogMsgTime", + temp_str); + add_display_treestore_line(update, treestore, &iter, + "EpilogSlurmctld", + slurm_ctl_conf_ptr->epilog_slurmctld); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->fast_schedule); add_display_treestore_line(update, treestore, &iter, @@ -302,6 +421,19 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "FirstJobId", temp_str); + snprintf(temp_str, sizeof(temp_str), "%u sec", + slurm_ctl_conf_ptr->get_env_timeout); + add_display_treestore_line(update, treestore, &iter, + "GetEnvTimeout", + temp_str); + snprintf(temp_str, sizeof(temp_str), "%u sec", + slurm_ctl_conf_ptr->health_check_interval); + add_display_treestore_line(update, treestore, &iter, + "HealthCheckInterval", + temp_str); + add_display_treestore_line(update, treestore, &iter, + "HealthCheckProgram", + slurm_ctl_conf_ptr->health_check_program); #ifdef HAVE_XCPU add_display_treestore_line(update, treestore, &iter, "HAVE_XCPU", "1"); @@ -312,14 +444,18 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, "InactiveLimit", temp_str); - add_display_treestore_line(update, treestore, &iter, - "JobAcctGatherType", - slurm_ctl_conf_ptr->job_acct_gather_type); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->job_acct_gather_freq); add_display_treestore_line(update, treestore, &iter, "JobAcctGatherFrequency", temp_str); + add_display_treestore_line(update, treestore, &iter, + "JobAcctGatherType", + slurm_ctl_conf_ptr->job_acct_gather_type); + + add_display_treestore_line(update, treestore, &iter, + "JobCheckpointDir", + slurm_ctl_conf_ptr->job_ckpt_dir); add_display_treestore_line(update, treestore, &iter, "JobCompHost", @@ -327,6 +463,9 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "JobCompLoc", slurm_ctl_conf_ptr->job_comp_loc); + add_display_treestore_line(update, treestore, &iter, + "JobCompPass", + slurm_ctl_conf_ptr->job_comp_pass); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->job_comp_port); add_display_treestore_line(update, treestore, &iter, @@ -351,11 +490,23 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, slurm_ctl_conf_ptr->job_file_append); add_display_treestore_line(update, treestore, &iter, "JobFileAppend", temp_str); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->job_requeue); + add_display_treestore_line(update, treestore, &iter, + "JobRequeue", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->kill_on_bad_exit); + add_display_treestore_line(update, treestore, &iter, + "KillOnBadExit", + temp_str); + snprintf(temp_str, sizeof(temp_str), "%u sec", slurm_ctl_conf_ptr->kill_wait); add_display_treestore_line(update, treestore, &iter, "KillWait", temp_str); + add_display_treestore_line(update, treestore, &iter, + "Licenses", + slurm_ctl_conf_ptr->licenses); add_display_treestore_line(update, treestore, &iter, "MailProg", slurm_ctl_conf_ptr->mail_prog); @@ -369,11 +520,14 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, slurm_ctl_conf_ptr->max_mem_per_task & (~MEM_PER_CPU)); add_display_treestore_line(update, treestore, &iter, "MaxMemPerCPU", temp_str); - } else { + } else if (slurm_ctl_conf_ptr->max_mem_per_task) { snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->max_mem_per_task); add_display_treestore_line(update, treestore, &iter, "MaxMemPerNode", temp_str); + } else { + add_display_treestore_line(update, treestore, &iter, + "MaxMemPerCPU", "UNLIMITED"); } snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->msg_timeout); @@ -400,15 +554,79 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "NEXT_JOB_ID", temp_str); + if (slurm_ctl_conf_ptr->over_time_limit == (uint16_t) INFINITE) + snprintf(temp_str, sizeof(temp_str), "UNLIMITED"); + else + snprintf(temp_str, sizeof(temp_str), "%u min", + slurm_ctl_conf_ptr->over_time_limit); + add_display_treestore_line(update, treestore, &iter, + "OverTimeLimit", + temp_str); + add_display_treestore_line(update, treestore, &iter, "PluginDir", slurm_ctl_conf_ptr->plugindir); add_display_treestore_line(update, treestore, &iter, "PlugStackConfig", slurm_ctl_conf_ptr->plugstack); + + if (strcmp(slurm_ctl_conf_ptr->priority_type, "priority/basic") == 0) { + add_display_treestore_line(update, treestore, &iter, + "PriorityType", + slurm_ctl_conf_ptr->priority_type); + } else { + secs2time_str((time_t) slurm_ctl_conf_ptr->priority_decay_hl, + temp_str, sizeof(temp_str)); + add_display_treestore_line(update, treestore, &iter, + "PriorityDecayHalfLife", temp_str); + + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->priority_favor_small); + add_display_treestore_line(update, treestore, &iter, + "PriorityFavorSmall", + temp_str); + secs2time_str((time_t) slurm_ctl_conf_ptr->priority_max_age, + temp_str, sizeof(temp_str)); + add_display_treestore_line(update, treestore, &iter, + "PriorityMaxAge", temp_str); + add_display_treestore_line(update, treestore, &iter, + "PriorityUsageResetPeriod", + _reset_period_str( + slurm_ctl_conf_ptr-> + priority_reset_period)); + add_display_treestore_line(update, treestore, &iter, + "PriorityType", + slurm_ctl_conf_ptr->priority_type); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->priority_weight_age); + add_display_treestore_line(update, treestore, &iter, + "PriorityWeightAge", + temp_str); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->priority_weight_fs); + add_display_treestore_line(update, treestore, &iter, + "PriorityWeightFairShare", + temp_str); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->priority_weight_js); + add_display_treestore_line(update, treestore, &iter, + "PriorityWeightJobSize", + temp_str); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->priority_weight_part); + add_display_treestore_line(update, treestore, &iter, + "PriorityWeightPartition", + temp_str); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->priority_weight_qos); + add_display_treestore_line(update, treestore, &iter, + "PriorityWeightQOS", + temp_str); + } + + private_data_string(slurm_ctl_conf_ptr->private_data, - temp_str2, sizeof(temp_str2)); - snprintf(temp_str, sizeof(temp_str), "%s", temp_str2); + temp_str, sizeof(temp_str)); add_display_treestore_line(update, treestore, &iter, "PrivateData", temp_str); @@ -418,6 +636,9 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "Prolog", slurm_ctl_conf_ptr->prolog); + add_display_treestore_line(update, treestore, &iter, + "PrologSlurmctld", + slurm_ctl_conf_ptr->prolog_slurmctld); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->propagate_prio_process); add_display_treestore_line(update, treestore, &iter, @@ -431,14 +652,31 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, propagate_rlimits_except); add_display_treestore_line(update, treestore, &iter, "ResumeProgram", temp_str); - snprintf(temp_str, sizeof(temp_str), "%u", + snprintf(temp_str, sizeof(temp_str), "%u nodes/min", slurm_ctl_conf_ptr->resume_rate); add_display_treestore_line(update, treestore, &iter, "ResumeRate", temp_str); + snprintf(temp_str, sizeof(temp_str), "%u sec", + slurm_ctl_conf_ptr->resume_timeout); + add_display_treestore_line(update, treestore, &iter, + "ResumeTimeout", temp_str); + if (slurm_ctl_conf_ptr->resv_over_run == (uint16_t) INFINITE) + snprintf(temp_str, sizeof(temp_str), "UNLIMITED"); + else + snprintf(temp_str, sizeof(temp_str), "%u min", + slurm_ctl_conf_ptr->resv_over_run); + add_display_treestore_line(update, treestore, &iter, + "ResvOverRun", temp_str); + snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->ret2service); add_display_treestore_line(update, treestore, &iter, "ReturnToService", temp_str); + + add_display_treestore_line(update, treestore, &iter, + "SallocDefaultCommand", + slurm_ctl_conf_ptr->salloc_default_command); + snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->schedport); add_display_treestore_line(update, treestore, &iter, @@ -457,6 +695,13 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "SelectType", slurm_ctl_conf_ptr->select_type); + if (slurm_ctl_conf_ptr->select_type_param) { + add_display_treestore_line(update, treestore, &iter, + "SelectTypeParameters", + _select_info(slurm_ctl_conf_ptr-> + select_type_param)); + } + snprintf(temp_str, sizeof(temp_str), "%s(%u)", slurm_ctl_conf_ptr->slurm_user_name, slurm_ctl_conf_ptr->slurm_user_id); @@ -503,14 +748,25 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, slurm_ctl_conf_ptr->slurmd_timeout); add_display_treestore_line(update, treestore, &iter, "SlurmdTimeout", temp_str); + snprintf(temp_str, sizeof(temp_str), "%s(%u)", + slurm_ctl_conf_ptr->slurmd_user_name, + slurm_ctl_conf_ptr->slurmd_user_id); + add_display_treestore_line(update, treestore, &iter, + "SlurmdUser", + temp_str); add_display_treestore_line(update, treestore, &iter, - "SLURM_CONFIG_FILE", + "SLURM_CONF", slurm_ctl_conf_ptr->slurm_conf); add_display_treestore_line(update, treestore, &iter, "SLURM_VERSION", SLURM_VERSION); add_display_treestore_line(update, treestore, &iter, "SrunEpilog", slurm_ctl_conf_ptr->srun_epilog); + snprintf(temp_str, sizeof(temp_str), "%u sec", + slurm_ctl_conf_ptr->srun_io_timeout); + add_display_treestore_line(update, treestore, &iter, + "SrunIOTimeout", + temp_str); add_display_treestore_line(update, treestore, &iter, "SrunProlog", slurm_ctl_conf_ptr->srun_prolog); @@ -526,14 +782,22 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "SuspendProgram", slurm_ctl_conf_ptr->suspend_program); - snprintf(temp_str, sizeof(temp_str), "%u", + snprintf(temp_str, sizeof(temp_str), "%u nodes/min", slurm_ctl_conf_ptr->suspend_rate); add_display_treestore_line(update, treestore, &iter, "SuspendRate", temp_str); - snprintf(temp_str, sizeof(temp_str), "%d", - ((int)slurm_ctl_conf_ptr->suspend_time - 1)); + if (!slurm_ctl_conf_ptr->suspend_time) + snprintf(temp_str, sizeof(temp_str), "NONE"); + else + snprintf(temp_str, sizeof(temp_str), "%d sec", + ((int)slurm_ctl_conf_ptr->suspend_time - 1)); add_display_treestore_line(update, treestore, &iter, "SuspendTime", temp_str); + + snprintf(temp_str, sizeof(temp_str), "%u sec", + slurm_ctl_conf_ptr->suspend_timeout); + add_display_treestore_line(update, treestore, &iter, + "SuspendTimeOut", temp_str); add_display_treestore_line(update, treestore, &iter, "SwitchType", slurm_ctl_conf_ptr->switch_type); @@ -553,6 +817,13 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, add_display_treestore_line(update, treestore, &iter, "TmpFS", slurm_ctl_conf_ptr->tmp_fs); + add_display_treestore_line(update, treestore, &iter, + "TopologyPlugin", + slurm_ctl_conf_ptr->topology_plugin); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->track_wckey); + add_display_treestore_line(update, treestore, &iter, + "TrackWCKey", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->tree_width); add_display_treestore_line(update, treestore, &iter, @@ -572,6 +843,19 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, slurm_ctl_conf_ptr->wait_time); add_display_treestore_line(update, treestore, &iter, "WaitTime", temp_str); + + if (!slurm_ctl_conf_ptr->select_conf_key_pairs) + return; + + add_display_treestore_line_with_font(update, treestore, &iter, + select_title, NULL, "bold"); + itr = list_iterator_create( + (List)slurm_ctl_conf_ptr->select_conf_key_pairs); + while((key_pair = list_next(itr))) { + add_display_treestore_line(update, treestore, &iter, + key_pair->name, key_pair->value); + } + list_iterator_destroy(itr); } extern void create_config_popup(GtkAction *action, gpointer user_data) diff --git a/src/sview/resv_info.c b/src/sview/resv_info.c index 068cac31abc2066bb6c1b0e74d458877f7b2f932..f84aef71c57311c7bea3cd1f0a8d330586ca8b77 100644 --- a/src/sview/resv_info.c +++ b/src/sview/resv_info.c @@ -1194,13 +1194,14 @@ extern void set_menus_resv(void *arg, GtkTreePath *path, popup_info_t *popup_win = (popup_info_t *)arg; switch(type) { case TAB_CLICKED: - make_fields_menu(menu, display_data_resv, SORTID_CNT); + make_fields_menu(NULL, menu, display_data_resv, SORTID_CNT); break; case ROW_CLICKED: make_options_menu(tree_view, path, menu, options_data_resv); break; case POPUP_CLICKED: - make_popup_fields_menu(popup_win, menu); + make_fields_menu(popup_win, menu, + popup_win->display_data, SORTID_CNT); break; default: g_error("UNKNOWN type %d given to set_fields\n", type); diff --git a/src/sview/submit_info.c b/src/sview/submit_info.c index c767ae7700c1ce9ad327b4628687ab8a1b55e5c7..d016ab1293895cbae19234a64a12f1c2e0c812e6 100644 --- a/src/sview/submit_info.c +++ b/src/sview/submit_info.c @@ -77,13 +77,14 @@ extern void set_menus_submit(void *arg, GtkTreePath *path, popup_info_t *popup_win = (popup_info_t *)arg; switch(type) { case TAB_CLICKED: - make_fields_menu(menu, display_data_submit, SORTID_CNT); + make_fields_menu(NULL, menu, display_data_submit, SORTID_CNT); break; case ROW_CLICKED: make_options_menu(tree_view, path, menu, options_data_submit); break; case POPUP_CLICKED: - make_popup_fields_menu(popup_win, menu); + make_fields_menu(popup_win, menu, + popup_win->display_data, SORTID_CNT); break; default: g_error("UNKNOWN type %d given to set_fields\n", type); diff --git a/src/sview/sview.h b/src/sview/sview.h index e86b555701043837bac9fccb15f56ff59605f564..311eb241024962b0d6ac834015f49344b4dd02b3 100644 --- a/src/sview/sview.h +++ b/src/sview/sview.h @@ -112,7 +112,8 @@ enum { STATUS_ADMIN_MODE, }; enum { DISPLAY_NAME, - DISPLAY_VALUE + DISPLAY_VALUE, + DISPLAY_FONT }; enum { EDIT_NONE, @@ -127,6 +128,7 @@ enum { EDIT_NONE, enum { SVIEW_BG_IDLE_STATE, SVIEW_BG_ALLOC_STATE, + SVIEW_BG_DRAINING_STATE, SVIEW_BG_ERROR_STATE }; #endif @@ -404,9 +406,8 @@ extern int get_row_number(GtkTreeView *tree_view, GtkTreePath *path); extern int find_col(display_data_t *display_data, int type); extern const char *find_col_name(display_data_t *display_data, int type); extern void load_header(GtkTreeView *tree_view, display_data_t *display_data); -extern void make_fields_menu(GtkMenu *menu, display_data_t *display_data, - int count); -extern void make_popup_fields_menu(popup_info_t *popup_win, GtkMenu *men); +extern void make_fields_menu(popup_info_t *popup_win, GtkMenu *menu, + display_data_t *display_data, int count); extern void make_options_menu(GtkTreeView *tree_view, GtkTreePath *path, GtkMenu *menu, display_data_t *display_data); extern GtkScrolledWindow *create_scrolled_window(); @@ -444,4 +445,10 @@ extern void add_display_treestore_line(int update, GtkTreeStore *treestore, GtkTreeIter *iter, const char *name, char *value); +extern void add_display_treestore_line_with_font( + int update, + GtkTreeStore *treestore, + GtkTreeIter *iter, + const char *name, char *value, + char *font); #endif diff --git a/testsuite/expect/Makefile.am b/testsuite/expect/Makefile.am index c96b2cb6c4c6520d112b6cd88dde726aac0e6088..6aa9a258e4ec7eb9920da8e0b422483d60f663e5 100644 --- a/testsuite/expect/Makefile.am +++ b/testsuite/expect/Makefile.am @@ -318,6 +318,7 @@ EXTRA_DIST = \ test21.23 \ test21.24 \ test21.25 \ + test21.26 \ test22.1 \ test22.2 \ test23.1 \ diff --git a/testsuite/expect/Makefile.in b/testsuite/expect/Makefile.in index 7e5181785e350ad34e6474abfc6c4b973b125c8e..531c4b51ff76c382fbf0c05884531789e0870402 100644 --- a/testsuite/expect/Makefile.in +++ b/testsuite/expect/Makefile.in @@ -574,6 +574,7 @@ EXTRA_DIST = \ test21.23 \ test21.24 \ test21.25 \ + test21.26 \ test22.1 \ test22.2 \ test23.1 \ diff --git a/testsuite/expect/README b/testsuite/expect/README index 63b73a230579bdd43c1478b3ccbea365fcfd2dc0..64529ccf2d1183851eeac40364bdf6b7b7d3984e 100644 --- a/testsuite/expect/README +++ b/testsuite/expect/README @@ -526,6 +526,7 @@ test21.22 sacctmgr load file test21.23 sacctmgr wckey test21.24 sacctmgr dump file test21.25 sacctmgr show config +test21.26 sacctmgr +=, -= modify QoS test test22.# Testing of sreport commands and options. These also test the sacctmgr archive dump/load functions. diff --git a/testsuite/expect/globals_accounting b/testsuite/expect/globals_accounting index 9df5cd2c9a601f093a68b70c5ebf34e5c9e6cbe0..9a4745042621e11866559234c564805a8ce3a4e0 100644 --- a/testsuite/expect/globals_accounting +++ b/testsuite/expect/globals_accounting @@ -114,7 +114,10 @@ proc add_cluster {name qos fairshare grpcpu grpjob grpnode grpsubmit maxcpumin m } if { [string length $qos] } { - set command "$command qoslevel=$qos" + if { [string compare $qos " "] } { + set $qos "" + } + set command "$command qoslevel='$qos'" } set my_pid [eval spawn $sacctmgr -i add cluster $command] diff --git a/testsuite/expect/test1.60 b/testsuite/expect/test1.60 index 09d003c4ae07c49a14e41082a82a6006bc5593f8..e3d23acd5ae3887fa0e6bdebc101947dc8dfcccb 100755 --- a/testsuite/expect/test1.60 +++ b/testsuite/expect/test1.60 @@ -48,6 +48,11 @@ set file_out_n_glob "" print_header $test_id +if {[test_bluegene] != 0} { + send_user "\nWARNING: This test is incompatable with bluegene systems\n" + exit 0 +} + # # Spawn a program that generates "task_id" (%t) in stdout file names # and confirm they are created diff --git a/testsuite/expect/test1.89 b/testsuite/expect/test1.89 index b8cf2676d60469da5d50f99016c740a40c65473b..d847bdb1fc9e22e0ad783e9f40ca6f9a74bb8c3d 100755 --- a/testsuite/expect/test1.89 +++ b/testsuite/expect/test1.89 @@ -484,7 +484,8 @@ if {$exit_code == 0} { send_user "\nSUCCESS\n" } else { send_user "\nNOTE: This test can fail if the node configuration in slurm.conf \n" - send_user " (sockets, cores, threads) differs from the actual configuration\n" + send_user " (sockets, cores, threads) differs from the actual configuration \n" + send_user " or if Shared=FORCE for the default partition.\n" } exit $exit_code diff --git a/testsuite/expect/test17.6 b/testsuite/expect/test17.6 index d254b4c7a30598b6c9a561070c412e5269ed9521..4115de2b2ca66c22a46fa4edd8fcd69f18bb7b04 100755 --- a/testsuite/expect/test17.6 +++ b/testsuite/expect/test17.6 @@ -101,6 +101,9 @@ if {$job_id != 0} { set exit_code 1 } } +if {$exit_code != 0} { + exit $exit_code +} # # Submit a slurm job that will execute 'id' on 1 node and over task_cnt tasks diff --git a/testsuite/expect/test21.26 b/testsuite/expect/test21.26 new file mode 100755 index 0000000000000000000000000000000000000000..1115adbb4f7dfe8058d726578b1364dbffd329da --- /dev/null +++ b/testsuite/expect/test21.26 @@ -0,0 +1,489 @@ +#!/usr/bin/expect +############################################################################ +# Purpose: Test of SLURM functionality +# sacctmgr +=, -= modify QoS test +# +# +# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "FAILURE: ..." otherwise with an explanation of the failure, OR +# anything else indicates a failure mode that must be investigated. +############################################################################ +# Copyright (C) 2009 Lawrence Livermore National Security. +# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +# Written by Joseph Donaghy <donaghy1@llnl.gov> +# CODE-OCEC-09-009. All rights reserved. +# +# This file is part of SLURM, a resource management program. +# For details, see <https://computing.llnl.gov/linux/slurm/>. +# Please also read the included file: DISCLAIMER. +# +# SLURM is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along +# with SLURM; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +############################################################################ +source ./globals_accounting + +set test_id "test21.26" +set exit_code 0 +set cluster1 qclustest +set account1 qacctest1 +set account2 qacctest2 +set qos1 qqostest +set user1 qusertest +set access_err 0 + +print_header $test_id + +set timeout 60 + +# +# Check accounting config and bail if not found +# +if { [test_account_storage] == 0 } { + send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n" + exit 0 +} + +# +#make sure we have permission to do this work +# +if { [string compare [check_accounting_admin_level] "Administrator"] } { + send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n" + exit 0 +} + +# +# remove test associations to make sure we have a clean system +# +remove_user "" "" "$user1" +remove_acct "" "$account1,$account2" +remove_qos "$qos1" +remove_cluster "$cluster1" +if {$access_err != 0} { + send_user "\nWARNING: not authorized to perform this test\n" + exit $exit_code +} + +# Build test associations +#=====Done Cleaning System=========Begin Add Cluster====== +#add cluster +incr exit_code [add_cluster "$cluster1" " " "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} +#=====Done Add Cluster========Begin Add QoS========== +#add qos +incr exit_code [add_qos "$qos1"] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} +#====Done Add QoS===========Begin Add First Account======== +#add default account +incr exit_code [add_acct "$cluster1" "" "$account1" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} +#====Done Add First Account====Begin Add Second Account==== +#add account +incr exit_code [add_acct "$cluster1" "$account1" "$account2" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} +#=====Done Add Second Account========Begin Add User========= +#add user +incr exit_code [add_user "$cluster1" "$account1,$account2" "$user1" "" "" "$account1" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} +#====Done add user====Done building test associations===Begin test section======= + +#======Verify initial associations============= +set matches 0 +set my_pid [eval spawn $sacctmgr list assoc cluster=$cluster1 format=qos,acct,user -p] +expect { + -re "There was a problem" { + send_user "FAILURE: there was a problem with the sacctmgr command\n" + exit 1 + } + -re "\n.root.(|root)." { + incr matches + exp_continue + } + -re "\n.$account1.(|$user1)." { + incr matches + exp_continue + } + -re "\n.$account2.(|$user1)." { + incr matches + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr list associations not responding\n" + slow_kill $my_pid + exit 1 + } + eof { + wait + } +} + +if {$matches != 6} { + send_user "\nFAILURE: Initial sacctmgr add failed with ($matches)\n" + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit 1 +} + +#======Begin Modification to Add $qos1 to $account1======= +#modify test1 account to add test QoS +incr exit_code [mod_acct "$cluster1" "" "$account1" "" "" "+$qos1" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} + +#=======Done Modifying Account=======Next is Verify======= +set matches 0 +set my_pid [eval spawn $sacctmgr list assoc cluster=$cluster1 format=qos,acct,user -p] + +expect { + -re "There was a problem" { + send_user "FAILURE: there was a problem with the sacctmgr command\n" + exit 1 + } + -re "\n.root.(|root)." { + incr matches + exp_continue + } + -re "$qos1.$account1.(|$user1)." { + incr matches + exp_continue + } + -re "$qos1.$account2.(|$user1)." { + incr matches + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr list associations not responding\n" + slow_kill $my_pid + exit 1 + } + eof { + wait + } +} + +if {$matches != 6} { + send_user "\nFAILURE: failed on verify of +$qos1 to account $account1 ($matches)\n" + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit 1 +} + +#======Begin Modification to Subtract $qos1 from $account2======= +#modify test2 account to remove test QoS +incr exit_code [mod_acct "$cluster1" "" "$account2" "" "" "-$qos1" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} + +#=======Done Modifying Account=======Next is Verify======= +set matches 0 +set my_pid [eval spawn $sacctmgr list assoc cluster=$cluster1 format=qos,acct,user -p] +expect { + -re "There was a problem" { + send_user "FAILURE: there was a problem with the sacctmgr command\n" + exit 1 + } + -re "\n.root.(|root)." { + incr matches + exp_continue + } + -re "$qos1.$account1.(|$user1)." { + incr matches + exp_continue + } + -re "\n.$account2.(|$user1)." { + incr matches + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr list associations not responding\n" + slow_kill $my_pid + exit 1 + } + eof { + wait + } +} + +if {$matches != 6} { + send_user "\nFAILURE: verify of -$qos1 from account $account2 ($matches)\n" + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit 1 +} + + +#======Begin Modification to Subtract $qos1 from $account1======= +#modify test1 account to remove test QoS +incr exit_code [mod_acct "$cluster1" "" "$account1" "" "" "-$qos1" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} + +#=======Done Modifying Account=======Next is Verify======= +set matches 0 +set my_pid [eval spawn $sacctmgr list assoc cluster=$cluster1 format=qos,acct,user -p] +expect { + -re "There was a problem" { + send_user "FAILURE: there was a problem with the sacctmgr command\n" + exit 1 + } + -re "\n.root.(|root)." { + incr matches + exp_continue + } + -re "\n.$account1.(|$user1)." { + incr matches + exp_continue + } + -re "\n.$account2.(|$user1)." { + incr matches + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr list associations not responding\n" + slow_kill $my_pid + exit 1 + } + eof { + wait + } +} + +if {$matches != 6} { + send_user "\nFAILURE: verify of -$qos1 from account $account1 ($matches)\n" + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit 1 +} + +#=====Begin Modification to Add $qos1 from $account2======= +#modify test2 account to add test QoS +incr exit_code [mod_acct "$cluster1" "" "$account2" "" "" "+$qos1" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} + +#=======Done Modifying Account=======Next is Verify======= +set matches 0 +set my_pid [eval spawn $sacctmgr list assoc cluster=$cluster1 format=qos,acct,user -p] + +expect { + -re "There was a problem" { + send_user "FAILURE: there was a problem with the sacctmgr command\n" + exit 1 + } + -re "\n.root.(|root)." { + incr matches + exp_continue + } + -re "\n.$account1.(|$user1)." { + incr matches + exp_continue + } + -re "$qos1.$account2.(|$user1)." { + incr matches + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr list associations not responding\n" + slow_kill $my_pid + exit 1 + } + eof { + wait + } +} + +if {$matches != 6} { + send_user "\nFAILURE: failed on verify of +$qos1 to account $account2 ($matches)\n" + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit 1 +} + +#=======Begin Modification to Add $qos1 from $account2======= +#modify root account to add test QoS +incr exit_code [mod_acct "$cluster1" "" "root" "" "" "+$qos1" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} + +#=======Done Modifying Account=======Next Is Verify======= +set matches 0 +set my_pid [eval spawn $sacctmgr list assoc cluster=$cluster1 format=qos,acct,user -p] + +expect { + -re "There was a problem" { + send_user "FAILURE: there was a problem with the sacctmgr command\n" + exit 1 + } + -re "$qos1.root.(|root)." { + incr matches + exp_continue + } + -re "\n.$account1.(|$user1)." { + incr matches + exp_continue + } + -re "$qos1.$account2.(|$user1)." { + incr matches + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr list associations not responding\n" + slow_kill $my_pid + exit 1 + } + eof { + wait + } +} + +if {$matches != 6} { + send_user "\nFAILURE: failed on verify of +$qos1 to account root ($matches)\n" + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit 1 +} + +#======Begin Modification to Add $qos1 from $account2======= +#modify test2 account to remove test QoS +incr exit_code [mod_acct "$cluster1" "" "$account2" "" "" "-$qos1" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""] +if { $exit_code } { + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit $exit_code +} + +#=======Done Modifying Account=======Next is Verify======= +set matches 0 +set my_pid [eval spawn $sacctmgr list assoc cluster=$cluster1 format=qos,acct,user -p] + +expect { + -re "There was a problem" { + send_user "FAILURE: there was a problem with the sacctmgr command\n" + exit 1 + } + -re "$qos1.root.(|root)." { + incr matches + exp_continue + } + -re "\n.$account1.(|$user1)." { + incr matches + exp_continue + } + -re "\n.$account2.(|$user1)." { + incr matches + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr list associations not responding\n" + slow_kill $my_pid + exit 1 + } + eof { + wait + } +} + +if {$matches != 6} { + send_user "\nFAILURE: failed on verify of -$qos1 to account $account2 ($matches)\n" + remove_user "" "" "$user1" + remove_acct "" "$account1,$account2" + remove_qos "$qos1" + remove_cluster "$cluster1" + exit 1 +} + + +#======Done With List====== Ending======= +# This is the end below here +# +incr exit_code [remove_user "" "" "$user1"] +incr exit_code [remove_acct "" "$account1,$account2"] +incr exit_code [remove_qos "$qos1"] +incr exit_code [remove_cluster "$cluster1"] + +if {$exit_code == 0} { + send_user "\nSUCCESS: $test_id\n" +} else { + send_user "\nFAILURE: $test_id\n" +} +exit $exit_code + diff --git a/testsuite/expect/test24.1 b/testsuite/expect/test24.1 index 318e7ac44f95d52920449b83509b85e276ba1d0c..b3097c795ae03577e2f09f2dce02c075a36d2b6f 100755 --- a/testsuite/expect/test24.1 +++ b/testsuite/expect/test24.1 @@ -48,11 +48,11 @@ file delete $test_prog send_user "build_dir is $build_dir\n" if {[test_aix]} { - send_user "$bin_cc ${test_prog}.c -ldl -lntbl -fno-gcse -fno-strict-aliasing -Wl,-brtl -Wl,-bgcbypass:1000 -Wl,-bexpfull -Wl,-bmaxdata:0x70000000 -Wl,-brtl -g -lpthreads -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/common/libcommon.o ${build_dir}/src/slurmctld/locks.o ${build_dir}/src/sshare/process.o\n" - exec $bin_cc ${test_prog}.c -ldl -lntbl -fno-gcse -fno-strict-aliasing -Wl,-brtl -Wl,-bgcbypass:1000 -Wl,-bexpfull -Wl,-bmaxdata:0x70000000 -Wl,-brtl -g -lpthreads -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/common/libcommon.o ${build_dir}/src/slurmctld/locks.o ${build_dir}/src/sshare/process.o + send_user "$bin_cc ${test_prog}.c -ldl -lntbl -fno-gcse -fno-strict-aliasing -Wl,-brtl -Wl,-bgcbypass:1000 -Wl,-bexpfull -Wl,-bmaxdata:0x70000000 -Wl,-brtl -g -lpthreads -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/api/libslurm.o ${build_dir}/src/slurmctld/locks.o ${build_dir}/src/sshare/process.o\n" + exec $bin_cc ${test_prog}.c -ldl -lntbl -fno-gcse -fno-strict-aliasing -Wl,-brtl -Wl,-bgcbypass:1000 -Wl,-bexpfull -Wl,-bmaxdata:0x70000000 -Wl,-brtl -g -lpthreads -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/api/libslurm.o ${build_dir}/src/slurmctld/locks.o ${build_dir}/src/sshare/process.o } else { - send_user "$bin_cc ${test_prog}.c -g -pthread -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/common/libcommon.o ${build_dir}/src/slurmctld/locks.o ${build_dir}/src/sshare/process.o -ldl -export-dynamic \n" - exec $bin_cc ${test_prog}.c -g -pthread -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/common/libcommon.o ${build_dir}/src/slurmctld/locks.o ${build_dir}/src/sshare/process.o -ldl -export-dynamic + send_user "$bin_cc ${test_prog}.c -g -pthread -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/api/libslurm.o ${build_dir}/src/slurmctld/locks.o ${build_dir}/src/sshare/process.o -ldl -export-dynamic \n" + exec $bin_cc ${test_prog}.c -g -pthread -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/api/libslurm.o ${build_dir}/src/slurmctld/locks.o ${build_dir}/src/sshare/process.o -ldl -export-dynamic } exec $bin_chmod 700 $test_prog diff --git a/testsuite/expect/test6.11 b/testsuite/expect/test6.11 index de4e295b2181aa16b0bcd9f08669c48130757ab5..e1bf2423117e05374d21f4bcc5d49df073d0853b 100755 --- a/testsuite/expect/test6.11 +++ b/testsuite/expect/test6.11 @@ -110,7 +110,7 @@ if {$matches == 0} { # Test of --quiet option spawn $scancel --quiet $job_id expect { - re "error" { + -re "error" { set exit_code 1 exp_continue } diff --git a/testsuite/slurm_unit/slurmctld/security_2_1.bash b/testsuite/slurm_unit/slurmctld/security_2_1.bash index 9baad8f660b7388ed90cee7e24fd9a159f2d2c83..5048d8ccdf68cc519440c116408fd8347ea009d9 100755 --- a/testsuite/slurm_unit/slurmctld/security_2_1.bash +++ b/testsuite/slurm_unit/slurmctld/security_2_1.bash @@ -4,7 +4,7 @@ # Create private config file # Set AuthType=auth/dummy -file_orig=`${slurm_bin}scontrol show config | awk '{ if ( $1 ~ /SLURM_CONFIG_FILE/ ) { print $3 } }'` +file_orig=`${slurm_bin}scontrol show config | awk '{ if ( $1 ~ /SLURM_CONF/ ) { print $3 } }'` grep -iv AuthType <$file_orig >tmp.$$ echo "AuthType=auth/dummy" >>tmp.$$