diff --git a/lustre/ChangeLog b/lustre/ChangeLog index 865ce021b52b59e7e3318fee507874643fcf19c8..7201156ccf088947436f7973672a870aae4d7029 100644 --- a/lustre/ChangeLog +++ b/lustre/ChangeLog @@ -97,6 +97,13 @@ Details : In reconstruct_* functions, LASSERTs on both the data supplied by a client, and the data on disk are dangerous and incorrect. Change them with client eviction. +Severity : normal +Bugzilla : 15346 +Description: skiplist implementation simplification +Details : skiplists are used to group compatible locks on granted list + that was implemented as tracking first and last lock of each lock group + the patch changes that to using doubly linked lists + ------------------------------------------------------------------------------- diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index ea0292c1945d5f3821d0245d48f5def6fc7c829e..a1865c31145f84555cbf565dfc999cf145b9b35d 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -152,12 +152,6 @@ typedef enum { #define LDLM_CB_BLOCKING 1 #define LDLM_CB_CANCELING 2 -/* position flag of skip list pointers */ -#define LDLM_SL_HEAD(skip_list) ((skip_list)->next != NULL) -#define LDLM_SL_TAIL(skip_list) ((skip_list)->prev != NULL) -#define LDLM_SL_EMPTY(skip_list) ((skip_list)->next == NULL && \ - (skip_list)->prev == NULL) - /* compatibility matrix */ #define LCK_COMPAT_EX LCK_NL #define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR) @@ -414,11 +408,8 @@ struct ldlm_lock { /* protected by ns_hash_lock. FIXME */ struct list_head l_lru; - /* protected by lr_lock */ - struct list_head l_res_link; // position in one of three res lists - - struct list_head l_sl_mode; // skip pointer for request mode - struct list_head l_sl_policy; // skip pointer for inodebits + /* protected by lr_lock, linkage to resource's lock queues */ + struct list_head l_res_link; struct ldlm_interval *l_tree_node; /* tree node for ldlm_extent */ @@ -477,6 +468,10 @@ struct ldlm_lock { struct list_head l_cp_ast; struct ldlm_lock *l_blocking_lock; int l_bl_ast_run; + + /* protected by lr_lock, linkages to "skip lists" */ + struct list_head l_sl_mode; + struct list_head l_sl_policy; }; struct ldlm_resource { diff --git a/lustre/ldlm/ldlm_inodebits.c b/lustre/ldlm/ldlm_inodebits.c index c378c28ff5b295744c87f2063869425d611e5dcb..67d72aede942538ce248fda94d3f7541eb79d83b 100644 --- a/lustre/ldlm/ldlm_inodebits.c +++ b/lustre/ldlm/ldlm_inodebits.c @@ -37,7 +37,7 @@ static int ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req, struct list_head *work_list) { - struct list_head *tmp, *tmp_tail; + struct list_head *tmp; struct ldlm_lock *lock; ldlm_mode_t req_mode = req->l_req_mode; __u64 req_bits = req->l_policy_data.l_inodebits.bits; @@ -47,28 +47,36 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req, LASSERT(req_bits); /* There is no sense in lock with no bits set, I think. Also such a lock would be compatible with any other bit lock */ + list_for_each(tmp, queue) { + struct list_head *mode_tail; + lock = list_entry(tmp, struct ldlm_lock, l_res_link); if (req == lock) RETURN(compat); + /* last lock in mode group */ + LASSERT(lock->l_sl_mode.prev != NULL); + mode_tail = &list_entry(lock->l_sl_mode.prev, + struct ldlm_lock, + l_sl_mode)->l_res_link; + /* locks are compatible, bits don't matter */ if (lockmode_compat(lock->l_req_mode, req_mode)) { - /* jump to next mode group */ - if (LDLM_SL_HEAD(&lock->l_sl_mode)) - tmp = &list_entry(lock->l_sl_mode.next, - struct ldlm_lock, - l_sl_mode)->l_res_link; + /* jump to last lock in mode group */ + tmp = mode_tail; continue; } - tmp_tail = tmp; - if (LDLM_SL_HEAD(&lock->l_sl_mode)) - tmp_tail = &list_entry(lock->l_sl_mode.next, - struct ldlm_lock, - l_sl_mode)->l_res_link; for (;;) { + struct list_head *head; + + /* last lock in policy group */ + tmp = &list_entry(lock->l_sl_policy.prev, + struct ldlm_lock, + l_sl_policy)->l_res_link; + /* locks with bits overlapped are conflicting locks */ if (lock->l_policy_data.l_inodebits.bits & req_bits) { /* conflicting policy */ @@ -76,36 +84,26 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req, RETURN(0); compat = 0; + + /* add locks of the policy group to + * @work_list as blocking locks for + * @req */ if (lock->l_blocking_ast) - ldlm_add_ast_work_item(lock, req, + ldlm_add_ast_work_item(lock, req, work_list); - /* add all members of the policy group */ - if (LDLM_SL_HEAD(&lock->l_sl_policy)) { - do { - tmp = lock->l_res_link.next; - lock = list_entry(tmp, - struct ldlm_lock, - l_res_link); - if (lock->l_blocking_ast) - ldlm_add_ast_work_item( - lock, - req, - work_list); - } while (!LDLM_SL_TAIL(&lock->l_sl_policy)); - } - } else if (LDLM_SL_HEAD(&lock->l_sl_policy)) { - /* jump to next policy group */ - tmp = &list_entry(lock->l_sl_policy.next, - struct ldlm_lock, - l_sl_policy)->l_res_link; + head = &lock->l_sl_policy; + list_for_each_entry(lock, head, l_sl_policy) + if (lock->l_blocking_ast) + ldlm_add_ast_work_item(lock, req, + work_list); } - if (tmp == tmp_tail) + if (tmp == mode_tail) break; - else - tmp = tmp->next; + + tmp = tmp->next; lock = list_entry(tmp, struct ldlm_lock, l_res_link); - } /* for locks in a mode group */ - } /* for each lock in the queue */ + } /* loop over policy groups within one mode group */ + } /* loop over mode groups within @queue */ RETURN(compat); } diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index f76d5479fa3e67cf8c066de4536e831edb013a6e..64efe3f7adeaf8cb242ab8d79bc090756ec6529a 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -39,13 +39,6 @@ //struct lustre_lock ldlm_everything_lock; -/* lock's skip list pointers fix mode */ -#define LDLM_JOIN_NONE 0 -#define LDLM_MODE_JOIN_RIGHT 1 -#define LDLM_MODE_JOIN_LEFT (1 << 1) -#define LDLM_POLICY_JOIN_RIGHT (1 << 2) -#define LDLM_POLICY_JOIN_LEFT (1 << 3) - /* lock types */ char *ldlm_lockname[] = { [0] "--", @@ -340,10 +333,8 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) CFS_INIT_LIST_HEAD(&lock->l_cp_ast); cfs_waitq_init(&lock->l_waitq); lock->l_blocking_lock = NULL; - lock->l_sl_mode.prev = NULL; - lock->l_sl_mode.next = NULL; - lock->l_sl_policy.prev = NULL; - lock->l_sl_policy.next = NULL; + CFS_INIT_LIST_HEAD(&lock->l_sl_mode); + CFS_INIT_LIST_HEAD(&lock->l_sl_policy); atomic_inc(&resource->lr_namespace->ns_locks); CFS_INIT_LIST_HEAD(&lock->l_handle.h_link); @@ -681,6 +672,12 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) LDLM_LOCK_PUT(lock); } +struct sl_insert_point { + struct list_head *res_link; + struct list_head *mode_link; + struct list_head *policy_link; +}; + /* * search_granted_lock * @@ -689,109 +686,98 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) * Parameters: * queue [input]: the granted list where search acts on; * req [input]: the lock whose position to be located; - * lockp [output]: the position where the lock should be inserted before, or - * NULL indicating @req should be appended to @queue. - * Return Values: - * Bit-masks combination of following values indicating in which way the - * lock need to be inserted. - * - LDLM_JOIN_NONE: noting about skip list needs to be fixed; - * - LDLM_MODE_JOIN_RIGHT: @req needs join right becoming the head of a - * mode group; - * - LDLM_POLICY_JOIN_RIGHT: @req needs join right becoming the head of - * a policy group. + * prev [output]: positions within 3 lists to insert @req to + * Return Value: + * filled @prev * NOTE: called by * - ldlm_grant_lock_with_skiplist */ -static int search_granted_lock(struct list_head *queue, - struct ldlm_lock *req, - struct ldlm_lock **lockp) +static void search_granted_lock(struct list_head *queue, + struct ldlm_lock *req, + struct sl_insert_point *prev) { - struct list_head *tmp, *tmp_tail; - struct ldlm_lock *lock, *mode_head_lock; - int rc = LDLM_JOIN_NONE; + struct list_head *tmp; + struct ldlm_lock *lock, *mode_end, *policy_end; ENTRY; list_for_each(tmp, queue) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); + mode_end = list_entry(lock->l_sl_mode.prev, struct ldlm_lock, + l_sl_mode); + if (lock->l_req_mode != req->l_req_mode) { - if (LDLM_SL_HEAD(&lock->l_sl_mode)) - tmp = &list_entry(lock->l_sl_mode.next, - struct ldlm_lock, - l_sl_mode)->l_res_link; + /* jump to last lock of mode group */ + tmp = &mode_end->l_res_link; continue; } - - /* found the same mode group */ + + /* suitable mode group is found */ if (lock->l_resource->lr_type == LDLM_PLAIN) { - *lockp = lock; - rc = LDLM_MODE_JOIN_RIGHT; - GOTO(out, rc); + /* insert point is last lock of the mode group */ + prev->res_link = &mode_end->l_res_link; + prev->mode_link = &mode_end->l_sl_mode; + prev->policy_link = &req->l_sl_policy; + EXIT; + return; } else if (lock->l_resource->lr_type == LDLM_IBITS) { - tmp_tail = tmp; - if (LDLM_SL_HEAD(&lock->l_sl_mode)) - tmp_tail = &list_entry(lock->l_sl_mode.next, - struct ldlm_lock, - l_sl_mode)->l_res_link; - mode_head_lock = lock; for (;;) { + policy_end = list_entry(lock->l_sl_policy.prev, + struct ldlm_lock, + l_sl_policy); + if (lock->l_policy_data.l_inodebits.bits == req->l_policy_data.l_inodebits.bits) { - /* matched policy lock is found */ - *lockp = lock; - rc |= LDLM_POLICY_JOIN_RIGHT; - - /* if the policy group head is also a - * mode group head or a single mode - * group lock */ - if (LDLM_SL_HEAD(&lock->l_sl_mode) || - (tmp == tmp_tail && - LDLM_SL_EMPTY(&lock->l_sl_mode))) - rc |= LDLM_MODE_JOIN_RIGHT; - GOTO(out, rc); + /* insert point is last lock of + * the policy group */ + prev->res_link = + &policy_end->l_res_link; + prev->mode_link = + &policy_end->l_sl_mode; + prev->policy_link = + &policy_end->l_sl_policy; + EXIT; + return; } - if (LDLM_SL_HEAD(&lock->l_sl_policy)) - tmp = &list_entry(lock->l_sl_policy.next, - struct ldlm_lock, - l_sl_policy)->l_res_link; - - if (tmp == tmp_tail) + if (policy_end == mode_end) + /* done with mode group */ break; - else - tmp = tmp->next; + + /* jump to next policy group within the mode group */ + tmp = policy_end->l_res_link.next; lock = list_entry(tmp, struct ldlm_lock, l_res_link); - } /* for all locks in the matched mode group */ - - /* no matched policy group is found, insert before - * the mode group head lock */ - *lockp = mode_head_lock; - rc = LDLM_MODE_JOIN_RIGHT; - GOTO(out, rc); + } /* loop over policy groups within the mode group */ + + /* insert point is last lock of the mode group, + * new policy group is started */ + prev->res_link = &mode_end->l_res_link; + prev->mode_link = &mode_end->l_sl_mode; + prev->policy_link = &req->l_sl_policy; + EXIT; + return; } else { LDLM_ERROR(lock, "is not LDLM_PLAIN or LDLM_IBITS lock"); LBUG(); } } - /* no matched mode group is found, append to the end */ - *lockp = NULL; - rc = LDLM_JOIN_NONE; + /* insert point is last lock on the queue, + * new mode group and new policy group are started */ + prev->res_link = queue->prev; + prev->mode_link = &req->l_sl_mode; + prev->policy_link = &req->l_sl_policy; EXIT; -out: - return rc; + return; } static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, - struct ldlm_lock *lockp, - int join) + struct sl_insert_point *prev) { struct ldlm_resource *res = lock->l_resource; ENTRY; - LASSERT(lockp || join == LDLM_JOIN_NONE); - check_res_locked(res); ldlm_resource_dump(D_OTHER, res); @@ -804,72 +790,25 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, } LASSERT(list_empty(&lock->l_res_link)); + LASSERT(list_empty(&lock->l_sl_mode)); + LASSERT(list_empty(&lock->l_sl_policy)); - if (!lockp) - list_add_tail(&lock->l_res_link, &lock->l_resource->lr_granted); - else if ((join & LDLM_MODE_JOIN_LEFT) || (join & LDLM_POLICY_JOIN_LEFT)) - list_add(&lock->l_res_link, &lockp->l_res_link); - else - list_add_tail(&lock->l_res_link, &lockp->l_res_link); - - /* fix skip lists */ - if (join & LDLM_MODE_JOIN_RIGHT) { - LASSERT(! LDLM_SL_TAIL(&lockp->l_sl_mode)); - if (LDLM_SL_EMPTY(&lockp->l_sl_mode)) { - lock->l_sl_mode.next = &lockp->l_sl_mode; - lockp->l_sl_mode.prev = &lock->l_sl_mode; - } else if (LDLM_SL_HEAD(&lockp->l_sl_mode)) { - lock->l_sl_mode.next = lockp->l_sl_mode.next; - lockp->l_sl_mode.next = NULL; - lock->l_sl_mode.next->prev = &lock->l_sl_mode; - } - } else if (join & LDLM_MODE_JOIN_LEFT) { - LASSERT(! LDLM_SL_HEAD(&lockp->l_sl_mode)); - if (LDLM_SL_EMPTY(&lockp->l_sl_mode)) { - lock->l_sl_mode.prev = &lockp->l_sl_mode; - lockp->l_sl_mode.next = &lock->l_sl_mode; - } else if (LDLM_SL_TAIL(&lockp->l_sl_mode)) { - lock->l_sl_mode.prev = lockp->l_sl_mode.prev; - lockp->l_sl_mode.prev = NULL; - lock->l_sl_mode.prev->next = &lock->l_sl_mode; - } - } - - if (join & LDLM_POLICY_JOIN_RIGHT) { - LASSERT(! LDLM_SL_TAIL(&lockp->l_sl_policy)); - if (LDLM_SL_EMPTY(&lockp->l_sl_policy)) { - lock->l_sl_policy.next = &lockp->l_sl_policy; - lockp->l_sl_policy.prev = &lock->l_sl_policy; - } else if (LDLM_SL_HEAD(&lockp->l_sl_policy)) { - lock->l_sl_policy.next = lockp->l_sl_policy.next; - lockp->l_sl_policy.next = NULL; - lock->l_sl_policy.next->prev = &lock->l_sl_policy; - } - } else if (join & LDLM_POLICY_JOIN_LEFT) { - LASSERT(! LDLM_SL_HEAD(&lockp->l_sl_policy)); - if (LDLM_SL_EMPTY(&lockp->l_sl_policy)) { - lock->l_sl_policy.prev = &lockp->l_sl_policy; - lockp->l_sl_policy.next = &lock->l_sl_policy; - } else if (LDLM_SL_TAIL(&lockp->l_sl_policy)) { - lock->l_sl_policy.prev = lockp->l_sl_policy.prev; - lockp->l_sl_policy.prev = NULL; - lock->l_sl_policy.prev->next = &lock->l_sl_policy; - } - } + list_add(&lock->l_res_link, prev->res_link); + list_add(&lock->l_sl_mode, prev->mode_link); + list_add(&lock->l_sl_policy, prev->policy_link); EXIT; } static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock) { - int join = LDLM_JOIN_NONE; - struct ldlm_lock *lockp = NULL; + struct sl_insert_point prev; ENTRY; LASSERT(lock->l_req_mode == lock->l_granted_mode); - join = search_granted_lock(&lock->l_resource->lr_granted, lock, &lockp); - ldlm_granted_list_add_lock(lock, lockp, join); + search_granted_lock(&lock->l_resource->lr_granted, lock, &prev); + ldlm_granted_list_add_lock(lock, &prev); EXIT; } @@ -921,7 +860,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue, lock = list_entry(tmp, struct ldlm_lock, l_res_link); if (lock == old_lock) - continue; + break; /* llite sometimes wants to match locks that will be * canceled when their users drop, but we allow it to match @@ -1553,55 +1492,12 @@ void ldlm_cancel_callback(struct ldlm_lock *lock) void ldlm_unlink_lock_skiplist(struct ldlm_lock *req) { - struct ldlm_lock *lock; - if (req->l_resource->lr_type != LDLM_PLAIN && req->l_resource->lr_type != LDLM_IBITS) return; - - if (LDLM_SL_HEAD(&req->l_sl_mode)) { - lock = list_entry(req->l_res_link.next, struct ldlm_lock, - l_res_link); - if (req->l_sl_mode.next == &lock->l_sl_mode) { - lock->l_sl_mode.prev = NULL; - } else { - lock->l_sl_mode.next = req->l_sl_mode.next; - lock->l_sl_mode.next->prev = &lock->l_sl_mode; - } - req->l_sl_mode.next = NULL; - } else if (LDLM_SL_TAIL(&req->l_sl_mode)) { - lock = list_entry(req->l_res_link.prev, struct ldlm_lock, - l_res_link); - if (req->l_sl_mode.prev == &lock->l_sl_mode) { - lock->l_sl_mode.next = NULL; - } else { - lock->l_sl_mode.prev = req->l_sl_mode.prev; - lock->l_sl_mode.prev->next = &lock->l_sl_mode; - } - req->l_sl_mode.prev = NULL; - } - if (LDLM_SL_HEAD(&req->l_sl_policy)) { - lock = list_entry(req->l_res_link.next, struct ldlm_lock, - l_res_link); - if (req->l_sl_policy.next == &lock->l_sl_policy) { - lock->l_sl_policy.prev = NULL; - } else { - lock->l_sl_policy.next = req->l_sl_policy.next; - lock->l_sl_policy.next->prev = &lock->l_sl_policy; - } - req->l_sl_policy.next = NULL; - } else if (LDLM_SL_TAIL(&req->l_sl_policy)) { - lock = list_entry(req->l_res_link.prev, struct ldlm_lock, - l_res_link); - if (req->l_sl_policy.prev == &lock->l_sl_policy) { - lock->l_sl_policy.next = NULL; - } else { - lock->l_sl_policy.prev = req->l_sl_policy.prev; - lock->l_sl_policy.prev->next = &lock->l_sl_policy; - } - req->l_sl_policy.prev = NULL; - } + list_del_init(&req->l_sl_policy); + list_del_init(&req->l_sl_mode); } void ldlm_lock_cancel(struct ldlm_lock *lock) @@ -1691,8 +1587,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, struct ldlm_namespace *ns; int granted = 0; int old_mode, rc; - struct ldlm_lock *mark_lock = NULL; - int join= LDLM_JOIN_NONE; + struct sl_insert_point prev; ldlm_error_t err; struct ldlm_interval *node; ENTRY; @@ -1722,27 +1617,10 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, /* remember the lock position where the lock might be * added back to the granted list later and also * remember the join mode for skiplist fixing. */ - if (LDLM_SL_HEAD(&lock->l_sl_mode)) - join = LDLM_MODE_JOIN_RIGHT; - else if (LDLM_SL_TAIL(&lock->l_sl_mode)) - join = LDLM_MODE_JOIN_LEFT; - if (LDLM_SL_HEAD(&lock->l_sl_policy)) - join |= LDLM_POLICY_JOIN_RIGHT; - else if (LDLM_SL_TAIL(&lock->l_sl_policy)) - join |= LDLM_POLICY_JOIN_LEFT; - - LASSERT(!((join & LDLM_MODE_JOIN_RIGHT) && - (join & LDLM_POLICY_JOIN_LEFT))); - LASSERT(!((join & LDLM_MODE_JOIN_LEFT) && - (join & LDLM_POLICY_JOIN_RIGHT))); - - if ((join & LDLM_MODE_JOIN_LEFT) || - (join & LDLM_POLICY_JOIN_LEFT)) - mark_lock = list_entry(lock->l_res_link.prev, - struct ldlm_lock, l_res_link); - else if (lock->l_res_link.next != &res->lr_granted) - mark_lock = list_entry(lock->l_res_link.next, - struct ldlm_lock, l_res_link); + prev.res_link = lock->l_res_link.prev; + prev.mode_link = lock->l_sl_mode.prev; + prev.policy_link = lock->l_sl_policy.prev; + ldlm_resource_unlink_lock(lock); } else { ldlm_resource_unlink_lock(lock); if (res->lr_type == LDLM_EXTENT) { @@ -1782,8 +1660,8 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, if (res->lr_type == LDLM_EXTENT) ldlm_extent_add_lock(res, lock); else - ldlm_granted_list_add_lock(lock, mark_lock, - join); + ldlm_granted_list_add_lock(lock, &prev); + res = NULL; } else { *flags |= LDLM_FL_BLOCK_GRANTED; diff --git a/lustre/ldlm/ldlm_plain.c b/lustre/ldlm/ldlm_plain.c index 71778cd072c031b1d4088e327a02f7994c43616d..68b5bf338e97fa3b7e983528e639a24593e0e806 100644 --- a/lustre/ldlm/ldlm_plain.c +++ b/lustre/ldlm/ldlm_plain.c @@ -54,31 +54,32 @@ ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req, if (req == lock) RETURN(compat); - if (lockmode_compat(lock->l_req_mode, req_mode)) { - /* jump to next mode group */ - if (LDLM_SL_HEAD(&lock->l_sl_mode)) - tmp = &list_entry(lock->l_sl_mode.next, - struct ldlm_lock, - l_sl_mode)->l_res_link; + /* last lock in mode group */ + tmp = &list_entry(lock->l_sl_mode.prev, + struct ldlm_lock, + l_sl_mode)->l_res_link; + + if (lockmode_compat(lock->l_req_mode, req_mode)) continue; - } if (!work_list) RETURN(0); compat = 0; + + /* add locks of the mode group to @work_list as + * blocking locks for @req */ if (lock->l_blocking_ast) ldlm_add_ast_work_item(lock, req, work_list); - if (LDLM_SL_HEAD(&lock->l_sl_mode)) { - /* add all members of the mode group */ - do { - tmp = lock->l_res_link.next; - lock = list_entry(tmp, struct ldlm_lock, - l_res_link); + + { + struct list_head *head; + + head = &lock->l_sl_mode; + list_for_each_entry(lock, head, l_sl_mode) if (lock->l_blocking_ast) - ldlm_add_ast_work_item( - lock, req, work_list); - } while (!LDLM_SL_TAIL(&lock->l_sl_mode)); + ldlm_add_ast_work_item(lock, req, + work_list); } } diff --git a/lustre/mdc/mdc_locks.c b/lustre/mdc/mdc_locks.c index 91d923d9e8acbcdbd88a23ceec9ddf5839a11011..d06c73913448aa5aa49ddd2bf108aeeb91982300 100644 --- a/lustre/mdc/mdc_locks.c +++ b/lustre/mdc/mdc_locks.c @@ -766,6 +766,7 @@ int mdc_intent_lock(struct obd_export *exp, struct mdc_op_data *op_data, op_data->namelen, op_data->name, op_data->fid1.id, ldlm_it2str(it->it_op), it->it_flags); + lockh.cookie = 0; if (op_data->fid2.id && (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR)) { rc = mdc_revalidate_lock(exp, it, &op_data->fid2);