diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c
index 6f7f5cf8144013ab1cb8cf2409648297a2969a9d..98044fa40c35cd63a91bc776bb8a0406b309bbc9 100644
--- a/lustre/ldlm/ldlm_lock.c
+++ b/lustre/ldlm/ldlm_lock.c
@@ -626,7 +626,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
                         ldlm_handle_bl_callback(ns, NULL, lock);
         } else if (ns_is_client(ns) &&
                    !lock->l_readers && !lock->l_writers &&
-                   !(lock->l_flags & LDLM_FL_NO_LRU)) {
+                   !(lock->l_flags & LDLM_FL_NO_LRU) &&
+                   !(lock->l_flags & LDLM_FL_BL_AST)) {
                 /* If this is a client-side namespace and this was the last
                  * reference, put it on the LRU. */
                 ldlm_lock_add_to_lru(lock);
diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c
index 1b0437a48d430abfe3a12b2f0a4429fa709a70a6..63d31720137729c2bb4812041b56bd0551f600e1 100644
--- a/lustre/ldlm/ldlm_lockd.c
+++ b/lustre/ldlm/ldlm_lockd.c
@@ -1386,6 +1386,9 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
         }
 
         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
+                /* BL_AST locks are not needed in lru.
+                 * let ldlm_cancel_lru() be fast. */
+                ldlm_lock_remove_from_lru(lock);
                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
         }
@@ -1686,6 +1689,9 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
                         ldlm_callback_reply(req, -EINVAL);
                         RETURN(0);
                 }
+                /* BL_AST locks are not needed in lru.
+                 * let ldlm_cancel_lru() be fast. */
+                ldlm_lock_remove_from_lru(lock);
                 lock->l_flags |= LDLM_FL_BL_AST;
         }
         unlock_res_and_lock(lock);
diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c
index fae763233b2685a2e856b126ead76eb1312d3394..6c305f4c3e82709a2c49d6efd1fb553dee2d4314 100644
--- a/lustre/ldlm/ldlm_request.c
+++ b/lustre/ldlm/ldlm_request.c
@@ -1314,7 +1314,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                           int count, int max, int cancel_flags, int flags)
 {
         ldlm_cancel_lru_policy_t pf;
-        struct ldlm_lock *lock;
+        struct ldlm_lock *lock, *next;
         int added = 0, unused;
         ENTRY;
 
@@ -1332,12 +1332,16 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                 if (max && added >= max)
                         break;
 
-                list_for_each_entry(lock, &ns->ns_unused_list, l_lru) {
-                        /* Somebody is already doing CANCEL or there is a
-                         * blocking request will send cancel. */
-                        if (!(lock->l_flags & LDLM_FL_CANCELING) &&
-                            !(lock->l_flags & LDLM_FL_BL_AST)) 
+                list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){
+                        /* No locks which got blocking requests. */
+                        LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+
+                        /* Somebody is already doing CANCEL. No need in this
+                         * lock in lru, do not traverse it again. */
+                        if (!(lock->l_flags & LDLM_FL_CANCELING))
                                 break;
+
+                        ldlm_lock_remove_from_lru_nolock(lock);
                 }
                 if (&lock->l_lru == &ns->ns_unused_list)
                         break;
@@ -1364,12 +1368,12 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                 lock_res_and_lock(lock);
                 /* Check flags again under the lock. */
                 if ((lock->l_flags & LDLM_FL_CANCELING) ||
-                    (lock->l_flags & LDLM_FL_BL_AST) ||
                     (ldlm_lock_remove_from_lru(lock) == 0)) {
                         /* other thread is removing lock from lru or
                          * somebody is already doing CANCEL or
                          * there is a blocking request which will send
-                         * cancel by itseft. */
+                         * cancel by itseft or the lock is matched
+                         * is already not unused. */
                         unlock_res_and_lock(lock);
                         LDLM_LOCK_PUT(lock);
                         spin_lock(&ns->ns_unused_lock);
@@ -1700,7 +1704,8 @@ int ldlm_cli_join_lru(struct ldlm_namespace *ns,
                 if (list_empty(&lock->l_lru) &&
                     !lock->l_readers && !lock->l_writers &&
                     !(lock->l_flags & LDLM_FL_LOCAL) &&
-                    !(lock->l_flags & LDLM_FL_CBPENDING)) {
+                    !(lock->l_flags & LDLM_FL_CBPENDING) &&
+                    !(lock->l_flags & LDLM_FL_BL_AST)) {
                         ldlm_lock_add_to_lru(lock);
                         lock->l_flags &= ~LDLM_FL_NO_LRU;
                         LDLM_DEBUG(lock, "join lock to lru");