From a56ee6e3b000875ae04bc867acc0cc077b17a3d9 Mon Sep 17 00:00:00 2001 From: yury <yury> Date: Fri, 14 Sep 2007 16:14:11 +0000 Subject: [PATCH] b=13595,13608 r=nathan,adilger,shadow,green - separates client and server namespaces. Each "side" has own list and own lock; - separate pool shrinker to client and server shrinkers which work each with own list. This is needed to avoid mixing up server and client pool cached resources which are too different. Client's locks may be canceled in sync manner and we can return to VM number of still cached resources. And server resources (locks) are not removed in sync way, we just change SLV and expect that client will cancel something. To VM we return 0 as number of canceled locks; - in ldlm_pools_shrink() use down_trylock() to avoid locking ns sem when it is already locked. This fixes hang up in test 116 if memory pressure comes. This issue is due to deadlock bewteen shrinker and pool thread if client and server run on same host; - move lru add stuff into separate func; - change l_last_used and move lock to tail of lru for case of FL_TEST_LOCK to make sure that it will still hang for some time in lru afer that. So that, if we looked for look even with FL_TEST_LOCK this means that we may need its resourse yet some time and better to stay lock in cache. --- lustre/include/lustre_dlm.h | 5 +-- lustre/ldlm/ldlm_internal.h | 29 +++++++++++++ lustre/ldlm/ldlm_lock.c | 66 +++++++++++++++++++++-------- lustre/ldlm/ldlm_lockd.c | 14 ++++--- lustre/ldlm/ldlm_pool.c | 83 ++++++++++++++++++++++--------------- lustre/ldlm/ldlm_request.c | 7 +--- lustre/ldlm/ldlm_resource.c | 42 ++++++++++--------- 7 files changed, 161 insertions(+), 85 deletions(-) diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 40c03e092b..a7ccb9903e 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -672,7 +672,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, struct ldlm_lock *lock); void ldlm_resource_unlink_lock(struct ldlm_lock *lock); void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc); -void ldlm_dump_all_namespaces(int level); +void ldlm_dump_all_namespaces(int level, ldlm_side_t client); void ldlm_namespace_dump(int level, struct ldlm_namespace *); void ldlm_resource_dump(int level, struct ldlm_resource *); int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *, @@ -758,10 +758,9 @@ void unlock_res_and_lock(struct ldlm_lock *lock); /* ldlm_pool.c */ int ldlm_pools_init(ldlm_side_t client); +void ldlm_pools_recalc(ldlm_side_t client); void ldlm_pools_fini(void); void ldlm_pools_wakeup(void); -void ldlm_pools_recalc(void); -int ldlm_pools_shrink(int nr, unsigned int gfp_mask); int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, int idx, ldlm_side_t client); diff --git a/lustre/ldlm/ldlm_internal.h b/lustre/ldlm/ldlm_internal.h index 936170fcf9..440a21e857 100644 --- a/lustre/ldlm/ldlm_internal.h +++ b/lustre/ldlm/ldlm_internal.h @@ -4,6 +4,31 @@ #define MAX_STRING_SIZE 128 +extern atomic_t ldlm_srv_namespace_nr; +extern atomic_t ldlm_cli_namespace_nr; +extern struct semaphore ldlm_srv_namespace_lock; +extern struct list_head ldlm_srv_namespace_list; +extern struct semaphore ldlm_cli_namespace_lock; +extern struct list_head ldlm_cli_namespace_list; + +static inline atomic_t *ldlm_namespace_nr(ldlm_side_t client) +{ + return client == LDLM_NAMESPACE_SERVER ? + &ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr; +} + +static inline struct list_head *ldlm_namespace_list(ldlm_side_t client) +{ + return client == LDLM_NAMESPACE_SERVER ? + &ldlm_srv_namespace_list : &ldlm_cli_namespace_list; +} + +static inline struct semaphore *ldlm_namespace_lock(ldlm_side_t client) +{ + return client == LDLM_NAMESPACE_SERVER ? + &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock; +} + /* ldlm_request.c */ typedef enum { LDLM_ASYNC, @@ -54,7 +79,11 @@ int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue, struct list_head *work_list); int ldlm_run_bl_ast_work(struct list_head *rpc_list); int ldlm_run_cp_ast_work(struct list_head *rpc_list); +int ldlm_lock_remove_from_lru(struct ldlm_lock *lock); int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock); +void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock); +void ldlm_lock_add_to_lru(struct ldlm_lock *lock); +void ldlm_lock_touch_in_lru(struct ldlm_lock *lock); void ldlm_lock_destroy_nolock(struct ldlm_lock *lock); /* ldlm_lockd.c */ diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index cc3bbb5c98..4e62e68e9a 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -172,10 +172,11 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) { int rc = 0; if (!list_empty(&lock->l_lru)) { + struct ldlm_namespace *ns = lock->l_resource->lr_namespace; LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); list_del_init(&lock->l_lru); - lock->l_resource->lr_namespace->ns_nr_unused--; - LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0); + ns->ns_nr_unused--; + LASSERT(ns->ns_nr_unused >= 0); rc = 1; } return rc; @@ -183,15 +184,49 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) { + struct ldlm_namespace *ns = lock->l_resource->lr_namespace; int rc; ENTRY; - spin_lock(&lock->l_resource->lr_namespace->ns_unused_lock); + spin_lock(&ns->ns_unused_lock); rc = ldlm_lock_remove_from_lru_nolock(lock); - spin_unlock(&lock->l_resource->lr_namespace->ns_unused_lock); + spin_unlock(&ns->ns_unused_lock); EXIT; return rc; } +void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock) +{ + struct ldlm_namespace *ns = lock->l_resource->lr_namespace; + lock->l_last_used = cfs_time_current(); + LASSERT(list_empty(&lock->l_lru)); + list_add_tail(&lock->l_lru, &ns->ns_unused_list); + LASSERT(ns->ns_nr_unused >= 0); + ns->ns_nr_unused++; +} + +void ldlm_lock_add_to_lru(struct ldlm_lock *lock) +{ + struct ldlm_namespace *ns = lock->l_resource->lr_namespace; + ENTRY; + spin_lock(&ns->ns_unused_lock); + ldlm_lock_add_to_lru_nolock(lock); + spin_unlock(&ns->ns_unused_lock); + EXIT; +} + +void ldlm_lock_touch_in_lru(struct ldlm_lock *lock) +{ + struct ldlm_namespace *ns = lock->l_resource->lr_namespace; + ENTRY; + spin_lock(&ns->ns_unused_lock); + if (!list_empty(&lock->l_lru)) { + ldlm_lock_remove_from_lru_nolock(lock); + ldlm_lock_add_to_lru_nolock(lock); + } + spin_unlock(&ns->ns_unused_lock); + EXIT; +} + /* This used to have a 'strict' flag, which recovery would use to mark an * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I * shall explain why it's gone: with the new hash table scheme, once you call @@ -531,7 +566,6 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) lock->l_readers++; if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) lock->l_writers++; - lock->l_last_used = cfs_time_current(); LDLM_LOCK_GET(lock); LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]); } @@ -603,17 +637,13 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) !(lock->l_flags & LDLM_FL_NO_LRU)) { /* If this is a client-side namespace and this was the last * reference, put it on the LRU. */ - LASSERT(list_empty(&lock->l_lru)); - LASSERT(ns->ns_nr_unused >= 0); - lock->l_last_used = cfs_time_current(); - spin_lock(&ns->ns_unused_lock); - list_add_tail(&lock->l_lru, &ns->ns_unused_list); - ns->ns_nr_unused++; - spin_unlock(&ns->ns_unused_lock); + ldlm_lock_add_to_lru(lock); unlock_res_and_lock(lock); - /* Call ldlm_cancel_lru() only if EARLY_CANCEL is not supported - * by the server, otherwise, it is done on enqueue. */ - if (!exp_connect_cancelset(lock->l_conn_export)) + /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE + * are not supported by the server, otherwise, it is done on + * enqueue. */ + if (!exp_connect_cancelset(lock->l_conn_export) && + !exp_connect_lru_resize(lock->l_conn_export)) ldlm_cancel_lru(ns, 0, LDLM_ASYNC); } else { unlock_res_and_lock(lock); @@ -926,10 +956,12 @@ static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode, !(lock->l_flags & LDLM_FL_LOCAL)) continue; - if (flags & LDLM_FL_TEST_LOCK) + if (flags & LDLM_FL_TEST_LOCK) { LDLM_LOCK_GET(lock); - else + ldlm_lock_touch_in_lru(lock); + } else { ldlm_lock_addref_internal_nolock(lock, mode); + } return lock; } diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index 011cd703ca..ca5f7f8b0c 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -43,8 +43,6 @@ extern cfs_mem_cache_t *ldlm_resource_slab; extern cfs_mem_cache_t *ldlm_lock_slab; extern struct lustre_lock ldlm_handle_lock; -extern struct list_head ldlm_namespace_list; -extern struct semaphore ldlm_namespace_lock; static struct semaphore ldlm_ref_sem; static int ldlm_refcount; @@ -1334,7 +1332,8 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req, if (lock->l_granted_mode == LCK_PW && !lock->l_readers && !lock->l_writers && cfs_time_after(cfs_time_current(), - cfs_time_add(lock->l_last_used, cfs_time_seconds(10)))) { + cfs_time_add(lock->l_last_used, + cfs_time_seconds(10)))) { unlock_res_and_lock(lock); if (ldlm_bl_to_thread(ns, NULL, lock, 0)) ldlm_handle_bl_callback(ns, NULL, lock); @@ -1845,9 +1844,11 @@ static int ldlm_cleanup(ldlm_side_t client, int force) #endif ENTRY; - if (!list_empty(&ldlm_namespace_list)) { + if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) || + !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) { CERROR("ldlm still has namespaces; clean these up first.\n"); - ldlm_dump_all_namespaces(D_DLMTRACE); + ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE); + ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE); RETURN(-EBUSY); } @@ -1892,7 +1893,8 @@ static int ldlm_cleanup(ldlm_side_t client, int force) int __init ldlm_init(void) { init_mutex(&ldlm_ref_sem); - init_mutex(&ldlm_namespace_lock); + init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER)); + init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources", sizeof(struct ldlm_resource), 0, SLAB_HWCACHE_ALIGN); diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c index 4a53927dd4..35840bfeec 100644 --- a/lustre/ldlm/ldlm_pool.c +++ b/lustre/ldlm/ldlm_pool.c @@ -114,11 +114,6 @@ extern cfs_proc_dir_entry_t *ldlm_ns_proc_dir; #endif -extern atomic_t ldlm_srv_namespace_nr; -extern atomic_t ldlm_cli_namespace_nr; -extern struct list_head ldlm_namespace_list; -extern struct semaphore ldlm_namespace_lock; - #define avg(src, add) \ ((src) = ((src) + (add)) / 2) @@ -707,7 +702,8 @@ static int ldlm_pool_granted(struct ldlm_pool *pl) } static struct ptlrpc_thread *ldlm_pools_thread; -static struct shrinker *ldlm_pools_shrinker; +static struct shrinker *ldlm_pools_srv_shrinker; +static struct shrinker *ldlm_pools_cli_shrinker; static struct completion ldlm_pools_comp; void ldlm_pools_wakeup(void) @@ -724,7 +720,8 @@ EXPORT_SYMBOL(ldlm_pools_wakeup); /* Cancel @nr locks from all namespaces (if possible). Returns number of * cached locks after shrink is finished. All namespaces are asked to * cancel approximately equal amount of locks. */ -int ldlm_pools_shrink(int nr, unsigned int gfp_mask) +static int ldlm_pools_shrink(ldlm_side_t client, int nr, + unsigned int gfp_mask) { struct ldlm_namespace *ns; int total = 0, cached = 0; @@ -732,45 +729,57 @@ int ldlm_pools_shrink(int nr, unsigned int gfp_mask) if (nr != 0 && !(gfp_mask & __GFP_FS)) return -1; - CDEBUG(D_DLMTRACE, "request to shrink %d locks from all pools\n", - nr); - mutex_down(&ldlm_namespace_lock); - list_for_each_entry(ns, &ldlm_namespace_list, ns_list_chain) + CDEBUG(D_DLMTRACE, "request to shrink %d %s locks from all pools\n", + nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server"); + + if (down_trylock(ldlm_namespace_lock(client))) + return nr != 0 ? -1 : 0; + + list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) total += ldlm_pool_granted(&ns->ns_pool); if (nr == 0) { - mutex_up(&ldlm_namespace_lock); + mutex_up(ldlm_namespace_lock(client)); return total; } /* Check all namespaces. */ - list_for_each_entry(ns, &ldlm_namespace_list, ns_list_chain) { + list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) { struct ldlm_pool *pl = &ns->ns_pool; int cancel, nr_locks; nr_locks = ldlm_pool_granted(&ns->ns_pool); cancel = 1 + nr_locks * nr / total; - cancel = ldlm_pool_shrink(pl, cancel, gfp_mask); + ldlm_pool_shrink(pl, cancel, gfp_mask); cached += ldlm_pool_granted(&ns->ns_pool); } - mutex_up(&ldlm_namespace_lock); + mutex_up(ldlm_namespace_lock(client)); return cached; } -EXPORT_SYMBOL(ldlm_pools_shrink); -void ldlm_pools_recalc(void) +static int ldlm_pools_srv_shrink(int nr, unsigned int gfp_mask) +{ + return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER, nr, gfp_mask); +} + +static int ldlm_pools_cli_shrink(int nr, unsigned int gfp_mask) +{ + return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT, nr, gfp_mask); +} + +void ldlm_pools_recalc(ldlm_side_t client) { __u32 nr_l = 0, nr_p = 0, l; struct ldlm_namespace *ns; int rc, equal = 0; /* Check all modest namespaces. */ - mutex_down(&ldlm_namespace_lock); - list_for_each_entry(ns, &ldlm_namespace_list, ns_list_chain) { + mutex_down(ldlm_namespace_lock(client)); + list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) { if (ns->ns_appetite != LDLM_NAMESPACE_MODEST) continue; - if (ns->ns_client == LDLM_NAMESPACE_SERVER) { + if (client == LDLM_NAMESPACE_SERVER) { l = ldlm_pool_granted(&ns->ns_pool); if (l == 0) l = 1; @@ -798,22 +807,22 @@ void ldlm_pools_recalc(void) } /* The rest is given to greedy namespaces. */ - list_for_each_entry(ns, &ldlm_namespace_list, ns_list_chain) { + list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) { if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY) continue; - if (ns->ns_client == LDLM_NAMESPACE_SERVER) { + if (client == LDLM_NAMESPACE_SERVER) { if (equal) { /* In the case 2/3 locks are eaten out by * modest pools, we re-setup equal limit * for _all_ pools. */ l = LDLM_POOL_HOST_L / - atomic_read(&ldlm_srv_namespace_nr); + atomic_read(ldlm_namespace_nr(client)); } else { /* All the rest of greedy pools will have * all locks in equal parts.*/ l = (LDLM_POOL_HOST_L - nr_l) / - (atomic_read(&ldlm_srv_namespace_nr) - + (atomic_read(ldlm_namespace_nr(client)) - nr_p); } ldlm_pool_setup(&ns->ns_pool, l); @@ -825,7 +834,7 @@ void ldlm_pools_recalc(void) CERROR("%s: pool recalculation error " "%d\n", ns->ns_pool.pl_name, rc); } - mutex_up(&ldlm_namespace_lock); + mutex_up(ldlm_namespace_lock(client)); } EXPORT_SYMBOL(ldlm_pools_recalc); @@ -846,7 +855,8 @@ static int ldlm_pools_thread_main(void *arg) struct l_wait_info lwi; /* Recal all pools on this tick. */ - ldlm_pools_recalc(); + ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT); + ldlm_pools_recalc(LDLM_NAMESPACE_SERVER); /* Wait until the next check time, or until we're * stopped. */ @@ -933,18 +943,25 @@ int ldlm_pools_init(ldlm_side_t client) ENTRY; rc = ldlm_pools_thread_start(client); - if (rc == 0) - ldlm_pools_shrinker = set_shrinker(DEFAULT_SEEKS, - ldlm_pools_shrink); + if (rc == 0) { + ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS, + ldlm_pools_srv_shrink); + ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS, + ldlm_pools_cli_shrink); + } RETURN(rc); } EXPORT_SYMBOL(ldlm_pools_init); void ldlm_pools_fini(void) { - if (ldlm_pools_shrinker != NULL) { - remove_shrinker(ldlm_pools_shrinker); - ldlm_pools_shrinker = NULL; + if (ldlm_pools_srv_shrinker != NULL) { + remove_shrinker(ldlm_pools_srv_shrinker); + ldlm_pools_srv_shrinker = NULL; + } + if (ldlm_pools_cli_shrinker != NULL) { + remove_shrinker(ldlm_pools_cli_shrinker); + ldlm_pools_cli_shrinker = NULL; } ldlm_pools_thread_stop(); } @@ -1038,7 +1055,7 @@ void ldlm_pools_wakeup(void) } EXPORT_SYMBOL(ldlm_pools_wakeup); -void ldlm_pools_recalc(void) +void ldlm_pools_recalc(ldlm_side_t client) { return; } diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index 6a78cd29f3..651dfd1893 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -1421,12 +1421,7 @@ int ldlm_cli_join_lru(struct ldlm_namespace *ns, !lock->l_readers && !lock->l_writers && !(lock->l_flags & LDLM_FL_LOCAL) && !(lock->l_flags & LDLM_FL_CBPENDING)) { - lock->l_last_used = cfs_time_current(); - spin_lock(&ns->ns_unused_lock); - LASSERT(ns->ns_nr_unused >= 0); - list_add_tail(&lock->l_lru, &ns->ns_unused_list); - ns->ns_nr_unused++; - spin_unlock(&ns->ns_unused_lock); + ldlm_lock_add_to_lru(lock); lock->l_flags &= ~LDLM_FL_NO_LRU; LDLM_DEBUG(lock, "join lock to lru"); count++; diff --git a/lustre/ldlm/ldlm_resource.c b/lustre/ldlm/ldlm_resource.c index 78ad0859a3..af7c34ec49 100644 --- a/lustre/ldlm/ldlm_resource.c +++ b/lustre/ldlm/ldlm_resource.c @@ -38,8 +38,15 @@ cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab; atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0); atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0); -struct semaphore ldlm_namespace_lock; -struct list_head ldlm_namespace_list = CFS_LIST_HEAD_INIT(ldlm_namespace_list); + +struct semaphore ldlm_srv_namespace_lock; +struct list_head ldlm_srv_namespace_list = + CFS_LIST_HEAD_INIT(ldlm_srv_namespace_list); + +struct semaphore ldlm_cli_namespace_lock; +struct list_head ldlm_cli_namespace_list = + CFS_LIST_HEAD_INIT(ldlm_cli_namespace_list); + cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL; cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL; cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL; @@ -48,7 +55,8 @@ cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL; static int ldlm_proc_dump_ns(struct file *file, const char *buffer, unsigned long count, void *data) { - ldlm_dump_all_namespaces(D_DLMTRACE); + ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE); + ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE); RETURN(count); } @@ -253,12 +261,6 @@ void ldlm_proc_namespace(struct ldlm_namespace *ns) #define ldlm_proc_namespace(ns) do {} while (0) #endif /* LPROCFS */ -static atomic_t *ldlm_namespace_nr(ldlm_side_t client) -{ - return client == LDLM_NAMESPACE_SERVER ? - &ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr; -} - struct ldlm_namespace *ldlm_namespace_new(char *name, ldlm_side_t client, ldlm_appetite_t apt) { @@ -310,11 +312,11 @@ struct ldlm_namespace *ldlm_namespace_new(char *name, ldlm_side_t client, spin_lock_init(&ns->ns_unused_lock); ns->ns_connect_flags = 0; - mutex_down(&ldlm_namespace_lock); - list_add(&ns->ns_list_chain, &ldlm_namespace_list); + mutex_down(ldlm_namespace_lock(client)); + list_add(&ns->ns_list_chain, ldlm_namespace_list(client)); idx = atomic_read(ldlm_namespace_nr(client)); atomic_inc(ldlm_namespace_nr(client)); - mutex_up(&ldlm_namespace_lock); + mutex_up(ldlm_namespace_lock(client)); ldlm_proc_namespace(ns); @@ -326,10 +328,10 @@ struct ldlm_namespace *ldlm_namespace_new(char *name, ldlm_side_t client, RETURN(ns); out_del: - mutex_down(&ldlm_namespace_lock); + mutex_down(ldlm_namespace_lock(client)); list_del(&ns->ns_list_chain); atomic_dec(ldlm_namespace_nr(client)); - mutex_up(&ldlm_namespace_lock); + mutex_up(ldlm_namespace_lock(client)); out_hash: POISON(ns->ns_hash, 0x5a, sizeof(*ns->ns_hash) * RES_HASH_SIZE); OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE); @@ -471,11 +473,11 @@ int ldlm_namespace_free_prior(struct ldlm_namespace *ns) if (!ns) RETURN(ELDLM_OK); - mutex_down(&ldlm_namespace_lock); + mutex_down(ldlm_namespace_lock(ns->ns_client)); list_del(&ns->ns_list_chain); atomic_dec(ldlm_namespace_nr(ns->ns_client)); ldlm_pool_fini(&ns->ns_pool); - mutex_up(&ldlm_namespace_lock); + mutex_up(ldlm_namespace_lock(ns->ns_client)); /* At shutdown time, don't call the cancellation callback */ ldlm_namespace_cleanup(ns, 0); @@ -862,22 +864,22 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc) desc->lr_name = res->lr_name; } -void ldlm_dump_all_namespaces(int level) +void ldlm_dump_all_namespaces(int level, ldlm_side_t client) { struct list_head *tmp; if (!((libcfs_debug | D_ERROR) & level)) return; - mutex_down(&ldlm_namespace_lock); + mutex_down(ldlm_namespace_lock(client)); - list_for_each(tmp, &ldlm_namespace_list) { + list_for_each(tmp, ldlm_namespace_list(client)) { struct ldlm_namespace *ns; ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain); ldlm_namespace_dump(level, ns); } - mutex_up(&ldlm_namespace_lock); + mutex_up(ldlm_namespace_lock(client)); } void ldlm_namespace_dump(int level, struct ldlm_namespace *ns) -- GitLab