Skip to content
Snippets Groups Projects
Commit 4fcbc3b4 authored by Vitaly Fertman's avatar Vitaly Fertman
Browse files

Branch HEAD

b=14661
i=green
i=shadow

-- remove BL_AST lock from lru once we mark the lock as a such;
-- do not place BL_AST lock into lru even if the lock is matched and put later;
   => no BL_AST locks in lru at all;
-- CANCELING locks are not so numerous, but to avoid its traversal by another
   thread another time, drop them from the unused list once come them across.
parent c98f3957
No related branches found
No related tags found
No related merge requests found
...@@ -626,7 +626,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ...@@ -626,7 +626,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_handle_bl_callback(ns, NULL, lock); ldlm_handle_bl_callback(ns, NULL, lock);
} else if (ns_is_client(ns) && } else if (ns_is_client(ns) &&
!lock->l_readers && !lock->l_writers && !lock->l_readers && !lock->l_writers &&
!(lock->l_flags & LDLM_FL_NO_LRU)) { !(lock->l_flags & LDLM_FL_NO_LRU) &&
!(lock->l_flags & LDLM_FL_BL_AST)) {
/* If this is a client-side namespace and this was the last /* If this is a client-side namespace and this was the last
* reference, put it on the LRU. */ * reference, put it on the LRU. */
ldlm_lock_add_to_lru(lock); ldlm_lock_add_to_lru(lock);
......
...@@ -1386,6 +1386,9 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, ...@@ -1386,6 +1386,9 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
} }
if (dlm_req->lock_flags & LDLM_FL_AST_SENT) { if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
/* BL_AST locks are not needed in lru.
* let ldlm_cancel_lru() be fast. */
ldlm_lock_remove_from_lru(lock);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
LDLM_DEBUG(lock, "completion AST includes blocking AST"); LDLM_DEBUG(lock, "completion AST includes blocking AST");
} }
...@@ -1686,6 +1689,9 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) ...@@ -1686,6 +1689,9 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
ldlm_callback_reply(req, -EINVAL); ldlm_callback_reply(req, -EINVAL);
RETURN(0); RETURN(0);
} }
/* BL_AST locks are not needed in lru.
* let ldlm_cancel_lru() be fast. */
ldlm_lock_remove_from_lru(lock);
lock->l_flags |= LDLM_FL_BL_AST; lock->l_flags |= LDLM_FL_BL_AST;
} }
unlock_res_and_lock(lock); unlock_res_and_lock(lock);
......
...@@ -1314,7 +1314,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, ...@@ -1314,7 +1314,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
int count, int max, int cancel_flags, int flags) int count, int max, int cancel_flags, int flags)
{ {
ldlm_cancel_lru_policy_t pf; ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock; struct ldlm_lock *lock, *next;
int added = 0, unused; int added = 0, unused;
ENTRY; ENTRY;
...@@ -1332,12 +1332,16 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, ...@@ -1332,12 +1332,16 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
if (max && added >= max) if (max && added >= max)
break; break;
list_for_each_entry(lock, &ns->ns_unused_list, l_lru) { list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){
/* Somebody is already doing CANCEL or there is a /* No locks which got blocking requests. */
* blocking request will send cancel. */ LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
if (!(lock->l_flags & LDLM_FL_CANCELING) &&
!(lock->l_flags & LDLM_FL_BL_AST)) /* Somebody is already doing CANCEL. No need in this
* lock in lru, do not traverse it again. */
if (!(lock->l_flags & LDLM_FL_CANCELING))
break; break;
ldlm_lock_remove_from_lru_nolock(lock);
} }
if (&lock->l_lru == &ns->ns_unused_list) if (&lock->l_lru == &ns->ns_unused_list)
break; break;
...@@ -1364,12 +1368,12 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, ...@@ -1364,12 +1368,12 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
lock_res_and_lock(lock); lock_res_and_lock(lock);
/* Check flags again under the lock. */ /* Check flags again under the lock. */
if ((lock->l_flags & LDLM_FL_CANCELING) || if ((lock->l_flags & LDLM_FL_CANCELING) ||
(lock->l_flags & LDLM_FL_BL_AST) ||
(ldlm_lock_remove_from_lru(lock) == 0)) { (ldlm_lock_remove_from_lru(lock) == 0)) {
/* other thread is removing lock from lru or /* other thread is removing lock from lru or
* somebody is already doing CANCEL or * somebody is already doing CANCEL or
* there is a blocking request which will send * there is a blocking request which will send
* cancel by itseft. */ * cancel by itseft or the lock is matched
* is already not unused. */
unlock_res_and_lock(lock); unlock_res_and_lock(lock);
LDLM_LOCK_PUT(lock); LDLM_LOCK_PUT(lock);
spin_lock(&ns->ns_unused_lock); spin_lock(&ns->ns_unused_lock);
...@@ -1700,7 +1704,8 @@ int ldlm_cli_join_lru(struct ldlm_namespace *ns, ...@@ -1700,7 +1704,8 @@ int ldlm_cli_join_lru(struct ldlm_namespace *ns,
if (list_empty(&lock->l_lru) && if (list_empty(&lock->l_lru) &&
!lock->l_readers && !lock->l_writers && !lock->l_readers && !lock->l_writers &&
!(lock->l_flags & LDLM_FL_LOCAL) && !(lock->l_flags & LDLM_FL_LOCAL) &&
!(lock->l_flags & LDLM_FL_CBPENDING)) { !(lock->l_flags & LDLM_FL_CBPENDING) &&
!(lock->l_flags & LDLM_FL_BL_AST)) {
ldlm_lock_add_to_lru(lock); ldlm_lock_add_to_lru(lock);
lock->l_flags &= ~LDLM_FL_NO_LRU; lock->l_flags &= ~LDLM_FL_NO_LRU;
LDLM_DEBUG(lock, "join lock to lru"); LDLM_DEBUG(lock, "join lock to lru");
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment