From 55f1dc8d4e5f3d80a1496b097a64f8bdf7f4d74e Mon Sep 17 00:00:00 2001 From: green <green> Date: Thu, 3 Apr 2008 03:21:19 +0000 Subject: [PATCH] r=shadow,umka b=14257 Do not include LdLM_FLOCK locks into ldlm_pool sanity calculations. --- lustre/ChangeLog | 7 +++++++ lustre/ldlm/ldlm_pool.c | 10 ++++++++++ 2 files changed, 17 insertions(+) diff --git a/lustre/ChangeLog b/lustre/ChangeLog index 73ef20b577..96b469dcef 100644 --- a/lustre/ChangeLog +++ b/lustre/ChangeLog @@ -478,6 +478,13 @@ Details : On SLES10/PPC, fs.h includes idr.h which requires BITS_PER_LONG to be defined. Add a hack in mkfs_lustre.c to work around this compile issue. +Severity : normal +Bugzilla : 14257 +Description: LASSERT on MDS when client holding flock lock dies +Details : ldlm pool logic depends on number of granted locks equal to + number of released locks which is not true for flock locks, so + just exclude such locks from consideration. + -------------------------------------------------------------------------------- 2007-12-07 Cluster File Systems, Inc. <info@clusterfs.com> diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c index 541afe19fc..dcae25431d 100644 --- a/lustre/ldlm/ldlm_pool.c +++ b/lustre/ldlm/ldlm_pool.c @@ -667,7 +667,15 @@ EXPORT_SYMBOL(ldlm_pool_fini); void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock) { + /* FLOCK locks are special in a sense that they are almost never + * cancelled, instead special kind of lock is used to drop them. + * also there is no LRU for flock locks, so no point in tracking + * them anyway */ + if (lock->l_resource->lr_type == LDLM_FLOCK) + return; + ENTRY; + atomic_inc(&pl->pl_granted); atomic_inc(&pl->pl_grant_rate); atomic_inc(&pl->pl_grant_speed); @@ -686,6 +694,8 @@ EXPORT_SYMBOL(ldlm_pool_add); void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock) { + if (lock->l_resource->lr_type == LDLM_FLOCK) + return; ENTRY; LASSERT(atomic_read(&pl->pl_granted) > 0); atomic_dec(&pl->pl_granted); -- GitLab