From 4fcbc3b43b69a7290d3add3f08d3504fc4b75425 Mon Sep 17 00:00:00 2001
From: vitaly <vitaly>
Date: Wed, 9 Apr 2008 20:21:48 +0000
Subject: [PATCH] Branch HEAD b=14661 i=green i=shadow

-- remove BL_AST lock from lru once we mark the lock as a such;
-- do not place BL_AST lock into lru even if the lock is matched and put later;
   => no BL_AST locks in lru at all;
-- CANCELING locks are not so numerous, but to avoid its traversal by another
   thread another time, drop them from the unused list once come them across.
---
 lustre/ldlm/ldlm_lock.c    |  3 ++-
 lustre/ldlm/ldlm_lockd.c   |  6 ++++++
 lustre/ldlm/ldlm_request.c | 23 ++++++++++++++---------
 3 files changed, 22 insertions(+), 10 deletions(-)

diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c
index 6f7f5cf814..98044fa40c 100644
--- a/lustre/ldlm/ldlm_lock.c
+++ b/lustre/ldlm/ldlm_lock.c
@@ -626,7 +626,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
                         ldlm_handle_bl_callback(ns, NULL, lock);
         } else if (ns_is_client(ns) &&
                    !lock->l_readers && !lock->l_writers &&
-                   !(lock->l_flags & LDLM_FL_NO_LRU)) {
+                   !(lock->l_flags & LDLM_FL_NO_LRU) &&
+                   !(lock->l_flags & LDLM_FL_BL_AST)) {
                 /* If this is a client-side namespace and this was the last
                  * reference, put it on the LRU. */
                 ldlm_lock_add_to_lru(lock);
diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c
index 1b0437a48d..63d3172013 100644
--- a/lustre/ldlm/ldlm_lockd.c
+++ b/lustre/ldlm/ldlm_lockd.c
@@ -1386,6 +1386,9 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
         }
 
         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
+                /* BL_AST locks are not needed in lru.
+                 * let ldlm_cancel_lru() be fast. */
+                ldlm_lock_remove_from_lru(lock);
                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
         }
@@ -1686,6 +1689,9 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
                         ldlm_callback_reply(req, -EINVAL);
                         RETURN(0);
                 }
+                /* BL_AST locks are not needed in lru.
+                 * let ldlm_cancel_lru() be fast. */
+                ldlm_lock_remove_from_lru(lock);
                 lock->l_flags |= LDLM_FL_BL_AST;
         }
         unlock_res_and_lock(lock);
diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c
index fae763233b..6c305f4c3e 100644
--- a/lustre/ldlm/ldlm_request.c
+++ b/lustre/ldlm/ldlm_request.c
@@ -1314,7 +1314,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                           int count, int max, int cancel_flags, int flags)
 {
         ldlm_cancel_lru_policy_t pf;
-        struct ldlm_lock *lock;
+        struct ldlm_lock *lock, *next;
         int added = 0, unused;
         ENTRY;
 
@@ -1332,12 +1332,16 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                 if (max && added >= max)
                         break;
 
-                list_for_each_entry(lock, &ns->ns_unused_list, l_lru) {
-                        /* Somebody is already doing CANCEL or there is a
-                         * blocking request will send cancel. */
-                        if (!(lock->l_flags & LDLM_FL_CANCELING) &&
-                            !(lock->l_flags & LDLM_FL_BL_AST)) 
+                list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){
+                        /* No locks which got blocking requests. */
+                        LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+
+                        /* Somebody is already doing CANCEL. No need in this
+                         * lock in lru, do not traverse it again. */
+                        if (!(lock->l_flags & LDLM_FL_CANCELING))
                                 break;
+
+                        ldlm_lock_remove_from_lru_nolock(lock);
                 }
                 if (&lock->l_lru == &ns->ns_unused_list)
                         break;
@@ -1364,12 +1368,12 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                 lock_res_and_lock(lock);
                 /* Check flags again under the lock. */
                 if ((lock->l_flags & LDLM_FL_CANCELING) ||
-                    (lock->l_flags & LDLM_FL_BL_AST) ||
                     (ldlm_lock_remove_from_lru(lock) == 0)) {
                         /* other thread is removing lock from lru or
                          * somebody is already doing CANCEL or
                          * there is a blocking request which will send
-                         * cancel by itseft. */
+                         * cancel by itseft or the lock is matched
+                         * is already not unused. */
                         unlock_res_and_lock(lock);
                         LDLM_LOCK_PUT(lock);
                         spin_lock(&ns->ns_unused_lock);
@@ -1700,7 +1704,8 @@ int ldlm_cli_join_lru(struct ldlm_namespace *ns,
                 if (list_empty(&lock->l_lru) &&
                     !lock->l_readers && !lock->l_writers &&
                     !(lock->l_flags & LDLM_FL_LOCAL) &&
-                    !(lock->l_flags & LDLM_FL_CBPENDING)) {
+                    !(lock->l_flags & LDLM_FL_CBPENDING) &&
+                    !(lock->l_flags & LDLM_FL_BL_AST)) {
                         ldlm_lock_add_to_lru(lock);
                         lock->l_flags &= ~LDLM_FL_NO_LRU;
                         LDLM_DEBUG(lock, "join lock to lru");
-- 
GitLab