From cae83542968d4abe44acebbd761f52b258c7c6f0 Mon Sep 17 00:00:00 2001 From: bobijam <bobijam> Date: Wed, 5 Nov 2008 02:03:57 +0000 Subject: [PATCH] Branch HEAD b=16578 i=adilger Description: ldlm_cancel_pack()) ASSERTION(max >= dlm->lock_count + count) Details : If there is no extra space in the request for early cancels, ldlm_req_handles_avail() returns 0 instead of a negative value. --- lustre/ChangeLog | 31 +++++--- lustre/ldlm/ldlm_request.c | 157 +++++++++++++++++++------------------ lustre/tests/sanity.sh | 30 ++++--- 3 files changed, 119 insertions(+), 99 deletions(-) diff --git a/lustre/ChangeLog b/lustre/ChangeLog index 8cc4fcb23a..ff4b4f940c 100644 --- a/lustre/ChangeLog +++ b/lustre/ChangeLog @@ -13,6 +13,13 @@ tbd Sun Microsystems, Inc. removed cwd "./" (refer to Bugzilla 14399). * File join has been disabled in this release, refer to Bugzilla 16929. +Severity : normal +Frequency : Create a symlink file with a very long name +Bugzilla : 16578 +Description: ldlm_cancel_pack()) ASSERTION(max >= dlm->lock_count + count) +Details : If there is no extra space in the request for early cancels, + ldlm_req_handles_avail() returns 0 instead of a negative value. + Severity : enhancement Bugzilla : 1819 Description: Add /proc entry for import status @@ -1549,9 +1556,9 @@ Details : Kill unused ldlm_handle2lock_ns() function. Severity : normal Bugzilla : 16450 Description: Add lu_ref support to ldlm_lock -Details : lu_ref support for ldlm_lock and ldlm_resource. See lu_ref patch. - lu_ref fields ->l_reference and ->lr_reference are added to ldlm_lock - and ldlm_resource. LDLM interface has to be changed, because code that +Details : lu_ref support for ldlm_lock and ldlm_resource. See lu_ref patch. + lu_ref fields ->l_reference and ->lr_reference are added to ldlm_lock + and ldlm_resource. LDLM interface has to be changed, because code that releases a reference on a lock, has to "know" what reference this is. In the most frequent case @@ -1559,12 +1566,12 @@ Details : lu_ref support for ldlm_lock and ldlm_resource. See lu_ref patch. ... LDLM_LOCK_PUT(lock); - no changes are required. When any other reference (received _not_ from - ldlm_handle2lock()) is released, LDLM_LOCK_RELEASE() has to be called + no changes are required. When any other reference (received _not_ from + ldlm_handle2lock()) is released, LDLM_LOCK_RELEASE() has to be called instead of LDLM_LOCK_PUT(). Arguably, changes are pervasive, and interface requires some discipline - for proper use. On the other hand, it was very instrumental in finding + for proper use. On the other hand, it was very instrumental in finding a few leaked lock references. Severity : normal @@ -1577,7 +1584,7 @@ Details : Introduce ldlm_lock_addref_try() function (used by CLIO) that Severity : normal Bugzilla : 16450 Description: Add ldlm_weigh_callback(). -Details : Add new ->l_weigh_ast() call-back to ldlm_lock. It is called +Details : Add new ->l_weigh_ast() call-back to ldlm_lock. It is called by ldlm_cancel_shrink_policy() to estimate lock "value", instead of hard-coded `number of pages' logic. @@ -1617,8 +1624,8 @@ Details : Introduce new lu_context functions that are needed on the client Severity : normal Bugzilla : 16450 Description: Add start and stop methods to lu_device_type_operations. -Details : Introduce two new methods in lu_device_type_operations, that are - invoked when first instance of a given type is created and last one +Details : Introduce two new methods in lu_device_type_operations, that are + invoked when first instance of a given type is created and last one is destroyed respectively. This is need by CLIO. Severity : normal @@ -1663,7 +1670,7 @@ Severity : normal Bugzilla : 16450 Description: Introduce struct md_site and move meta-data specific parts of struct lu_site here. -Details : Move md-specific fields out of struct lu_site into special struct +Details : Move md-specific fields out of struct lu_site into special struct md_site, so that lu_site can be used on a client. Severity : minor @@ -1747,8 +1754,8 @@ Severity : normal Bugzilla : 17197 Description: (rw.c:1323:ll_read_ahead_pages()) ASSERTION(page_idx > ria->ria_stoff) failed Details : Once the unmatched stride IO mode is detected, shrink the stride-ahead - window to 0. If it does hit cache miss, and read-pattern is still - stride-io mode, does not reset the stride window, but also does not + window to 0. If it does hit cache miss, and read-pattern is still + stride-io mode, does not reset the stride window, but also does not increase the stride window length in this case. -------------------------------------------------------------------------------- diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index 44f5fb2a93..6bf1efe2a5 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -607,7 +607,10 @@ static inline int ldlm_req_handles_avail(int req_size, int off) int avail; avail = min_t(int, LDLM_MAXREQSIZE, CFS_PAGE_SIZE - 512) - req_size; - avail /= sizeof(struct lustre_handle); + if (likely(avail >= 0)) + avail /= (int)sizeof(struct lustre_handle); + else + avail = 0; avail += LDLM_LOCKREQ_HANDLES - off; return avail; @@ -650,12 +653,12 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, bufcount = req_capsule_filled_sizes(pill, RCL_CLIENT); avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff); - flags = ns_connect_lru_resize(ns) ? + flags = ns_connect_lru_resize(ns) ? LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED; to_free = !ns_connect_lru_resize(ns) && opc == LDLM_ENQUEUE ? 1 : 0; - /* Cancel lru locks here _only_ if the server supports + /* Cancel lru locks here _only_ if the server supports * EARLY_CANCEL. Otherwise we have to send extra CANCEL * rpc, what will make us slower. */ if (avail > count) @@ -957,7 +960,7 @@ static int ldlm_cli_cancel_local(struct ldlm_lock *lock) { int rc = LDLM_FL_LOCAL_ONLY; ENTRY; - + if (lock->l_conn_export) { int local_only; @@ -1006,7 +1009,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, LASSERT(dlm != NULL); /* Check the room in the request buffer. */ - max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) - + max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) - sizeof(struct ldlm_request); max /= sizeof(struct lustre_handle); max += LDLM_LOCKREQ_HANDLES; @@ -1132,28 +1135,28 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) __u64 old_slv, new_slv; __u32 new_limit; ENTRY; - - if (unlikely(!req->rq_import || !req->rq_import->imp_obd || + + if (unlikely(!req->rq_import || !req->rq_import->imp_obd || !imp_connect_lru_resize(req->rq_import))) { - /* - * Do nothing for corner cases. + /* + * Do nothing for corner cases. */ RETURN(0); } - /* - * In some cases RPC may contain slv and limit zeroed out. This is + /* + * In some cases RPC may contain slv and limit zeroed out. This is * the case when server does not support lru resize feature. This is * also possible in some recovery cases when server side reqs have no - * ref to obd export and thus access to server side namespace is no - * possible. + * ref to obd export and thus access to server side namespace is no + * possible. */ - if (lustre_msg_get_slv(req->rq_repmsg) == 0 || + if (lustre_msg_get_slv(req->rq_repmsg) == 0 || lustre_msg_get_limit(req->rq_repmsg) == 0) { DEBUG_REQ(D_HA, req, "Zero SLV or Limit found " - "(SLV: "LPU64", Limit: %u)", - lustre_msg_get_slv(req->rq_repmsg), + "(SLV: "LPU64", Limit: %u)", + lustre_msg_get_slv(req->rq_repmsg), lustre_msg_get_limit(req->rq_repmsg)); RETURN(0); } @@ -1162,12 +1165,12 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) new_slv = lustre_msg_get_slv(req->rq_repmsg); obd = req->rq_import->imp_obd; - /* - * Set new SLV and Limit to obd fields to make accessible for pool + /* + * Set new SLV and Limit to obd fields to make accessible for pool * thread. We do not access obd_namespace and pool directly here * as there is no reliable way to make sure that they are still * alive in cleanup time. Evil races are possible which may cause - * oops in that time. + * oops in that time. */ write_lock(&obd->obd_pool_lock); old_slv = obd->obd_pool_slv; @@ -1265,7 +1268,7 @@ static int ldlm_cancel_list(struct list_head *cancels, int count, int flags) RETURN(count); } -/** +/** * Callback function for shrink policy. Makes decision whether to keep * \a lock in LRU for current \a LRU size \a unused, added in current scan * \a added and number of locks to be preferably canceled \a count. @@ -1276,15 +1279,15 @@ static int ldlm_cancel_list(struct list_head *cancels, int count, int flags) */ static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, - int unused, int added, + int unused, int added, int count) { int lock_cost; __u64 page_nr; - /* - * Stop lru processing when we reached passed @count or checked all - * locks in lru. + /* + * Stop lru processing when we reached passed @count or checked all + * locks in lru. */ if (count && added >= count) return LDLM_POLICY_KEEP_LOCK; @@ -1299,9 +1302,9 @@ static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns, } else { struct ldlm_extent *l_extent; - /* + /* * For all extent locks cost is 1 + number of pages in - * their extent. + * their extent. */ l_extent = &lock->l_policy_data.l_extent; page_nr = l_extent->end - l_extent->start; @@ -1309,18 +1312,18 @@ static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns, } lock_cost = 1 + page_nr; } else { - /* - * For all locks which are not extent ones cost is 1 + /* + * For all locks which are not extent ones cost is 1 */ lock_cost = 1; } - /* + /* * Keep all expensive locks in lru for the memory pressure time * cancel policy. They anyways may be canceled by lru resize - * pplicy if they have not small enough CLV. + * pplicy if they have not small enough CLV. */ - return lock_cost > ns->ns_shrink_thumb ? + return lock_cost > ns->ns_shrink_thumb ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1334,8 +1337,8 @@ static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns, * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU */ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, - struct ldlm_lock *lock, - int unused, int added, + struct ldlm_lock *lock, + int unused, int added, int count) { cfs_time_t cur = cfs_time_current(); @@ -1343,8 +1346,8 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, __u64 slv, lvf, lv; cfs_time_t la; - /* - * Stop lru processing when we reached passed @count or checked all + /* + * Stop lru processing when we reached passed @count or checked all * locks in lru. */ if (count && added >= count) @@ -1352,20 +1355,20 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, slv = ldlm_pool_get_slv(pl); lvf = ldlm_pool_get_lvf(pl); - la = cfs_duration_sec(cfs_time_sub(cur, + la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used)); - /* - * Stop when slv is not yet come from server or lv is smaller than + /* + * Stop when slv is not yet come from server or lv is smaller than * it is. */ lv = lvf * la * unused; - - /* - * Inform pool about current CLV to see it via proc. + + /* + * Inform pool about current CLV to see it via proc. */ ldlm_pool_set_clv(pl, lv); - return (slv == 1 || lv < slv) ? + return (slv == 1 || lv < slv) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1379,15 +1382,15 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU */ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns, - struct ldlm_lock *lock, + struct ldlm_lock *lock, int unused, int added, int count) { - /* - * Stop lru processing when we reached passed @count or checked all - * locks in lru. + /* + * Stop lru processing when we reached passed @count or checked all + * locks in lru. */ - return (added >= count) ? + return (added >= count) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1401,18 +1404,18 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns, * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU */ static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns, - struct ldlm_lock *lock, + struct ldlm_lock *lock, int unused, int added, int count) { - /* - * Stop lru processing if young lock is found and we reached passed - * @count. + /* + * Stop lru processing if young lock is found and we reached passed + * @count. */ - return ((added >= count) && + return ((added >= count) && cfs_time_before(cfs_time_current(), cfs_time_add(lock->l_last_used, - ns->ns_max_age))) ? + ns->ns_max_age))) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1426,20 +1429,20 @@ static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns, * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU */ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns, - struct ldlm_lock *lock, + struct ldlm_lock *lock, int unused, int added, int count) { - /* - * Stop lru processing when we reached passed @count or checked all - * locks in lru. + /* + * Stop lru processing when we reached passed @count or checked all + * locks in lru. */ - return (added >= count) ? + return (added >= count) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } -typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *, - struct ldlm_lock *, int, +typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *, + struct ldlm_lock *, int, int, int); static ldlm_cancel_lru_policy_t @@ -1456,10 +1459,10 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags) if (flags & LDLM_CANCEL_AGED) return ldlm_cancel_aged_policy; } - + return ldlm_cancel_default_policy; } - + /* - Free space in lru for @count new locks, * redundant unused locks are canceled locally; * - also cancel locally unused aged locks; @@ -1502,7 +1505,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, pf = ldlm_cancel_lru_policy(ns, flags); LASSERT(pf != NULL); - + while (!list_empty(&ns->ns_unused_list)) { /* For any flags, stop scanning if @max is reached. */ if (max && added >= max) @@ -1533,11 +1536,11 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, * we find a lock that should stay in the cache. * We should take into account lock age anyway * as new lock even if it is small of weight is - * valuable resource. + * valuable resource. * * That is, for shrinker policy we drop only * old locks, but additionally chose them by - * their weight. Big extent locks will stay in + * their weight. Big extent locks will stay in * the cache. */ if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK) { @@ -1568,8 +1571,8 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, /* If we have chosen to cancel this lock voluntarily, we * better send cancel notification to server, so that it - * frees appropriate state. This might lead to a race - * where while we are doing cancel here, server is also + * frees appropriate state. This might lead to a race + * where while we are doing cancel here, server is also * silently cancelling this lock. */ lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK; @@ -1599,7 +1602,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, RETURN(ldlm_cancel_list(cancels, added, cancel_flags)); } -/* Returns number of locks which could be canceled next time when +/* Returns number of locks which could be canceled next time when * ldlm_cancel_lru() is called. Used from locks pool shrinker. */ int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns, int count, int max, int flags) @@ -1625,10 +1628,10 @@ int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns, break; /* Somebody is already doing CANCEL or there is a - * blocking request will send cancel. Let's not count + * blocking request will send cancel. Let's not count * this lock. */ if ((lock->l_flags & LDLM_FL_CANCELING) || - (lock->l_flags & LDLM_FL_BL_AST)) + (lock->l_flags & LDLM_FL_BL_AST)) continue; LDLM_LOCK_GET(lock); @@ -1658,7 +1661,7 @@ int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns, * in a thread and this function will return after the thread has been * asked to call the callback. when called with LDLM_SYNC the blocking * callback will be performed in this function. */ -int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, +int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, int flags) { CFS_LIST_HEAD(cancels); @@ -1713,7 +1716,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, /* If somebody is already doing CANCEL, or blocking ast came, * skip this lock. */ - if (lock->l_flags & LDLM_FL_BL_AST || + if (lock->l_flags & LDLM_FL_BL_AST || lock->l_flags & LDLM_FL_CANCELING) continue; @@ -1741,10 +1744,10 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, RETURN(ldlm_cancel_list(cancels, count, cancel_flags)); } -/* If @req is NULL, send CANCEL request to server with handles of locks - * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests +/* If @req is NULL, send CANCEL request to server with handles of locks + * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests * separately per lock. - * If @req is not NULL, put handles of locks in @cancels into the request + * If @req is not NULL, put handles of locks in @cancels into the request * buffer at the offset @off. * Destroy @cancels at the end. */ int ldlm_cli_cancel_list(struct list_head *cancels, int count, @@ -1756,8 +1759,8 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count, if (list_empty(cancels) || count == 0) RETURN(0); - - /* XXX: requests (both batched and not) could be sent in parallel. + + /* XXX: requests (both batched and not) could be sent in parallel. * Usually it is enough to have just 1 RPC, but it is possible that * there are to many locks to be cancelled in LRU or on a resource. * It would also speed up the case when the server does not support diff --git a/lustre/tests/sanity.sh b/lustre/tests/sanity.sh index 4c462480e9..89ea9f6a52 100644 --- a/lustre/tests/sanity.sh +++ b/lustre/tests/sanity.sh @@ -464,6 +464,16 @@ test_17f() { } run_test 17f "symlinks: long and very long symlink name ========================" +test_17g() { + mkdir -p $DIR/$tdir + for ((i = 0; i < 511; ++i)); do + LONGSYMLINK="${LONGSYMLINK}01234567" + done + ln -s $LONGSYMLINK $DIR/$tdir/$tfile + ls -l $DIR/$tdir +} +run_test 17g "symlinks: really long symlink name ===============================" + test_18() { touch $DIR/f ls $DIR || error @@ -958,7 +968,7 @@ exhaust_all_precreations() { test_27n() { [ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return remote_mds_nodsh && skip "remote MDS with nodsh" && return - remote_ost_nodsh && skip "remote OST with nodsh" && return + remote_ost_nodsh && skip "remote OST with nodsh" && return reset_enospc rm -f $DIR/d27/f27n @@ -973,7 +983,7 @@ run_test 27n "create file with some full OSTs ==================" test_27o() { [ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return remote_mds_nodsh && skip "remote MDS with nodsh" && return - remote_ost_nodsh && skip "remote OST with nodsh" && return + remote_ost_nodsh && skip "remote OST with nodsh" && return reset_enospc rm -f $DIR/d27/f27o @@ -990,7 +1000,7 @@ run_test 27o "create file with all full OSTs (should error) ====" test_27p() { [ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return remote_mds_nodsh && skip "remote MDS with nodsh" && return - remote_ost_nodsh && skip "remote OST with nodsh" && return + remote_ost_nodsh && skip "remote OST with nodsh" && return reset_enospc rm -f $DIR/d27/f27p @@ -1010,7 +1020,7 @@ run_test 27p "append to a truncated file with some full OSTs ===" test_27q() { [ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return remote_mds_nodsh && skip "remote MDS with nodsh" && return - remote_ost_nodsh && skip "remote OST with nodsh" && return + remote_ost_nodsh && skip "remote OST with nodsh" && return reset_enospc rm -f $DIR/d27/f27q @@ -1031,7 +1041,7 @@ run_test 27q "append to truncated file with all OSTs full (should error) ===" test_27r() { [ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return remote_mds_nodsh && skip "remote MDS with nodsh" && return - remote_ost_nodsh && skip "remote OST with nodsh" && return + remote_ost_nodsh && skip "remote OST with nodsh" && return reset_enospc rm -f $DIR/d27/f27r @@ -1083,7 +1093,7 @@ run_test 27u "skip object creation on OSC w/o objects ==========" test_27v() { # bug 4900 [ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return remote_mds_nodsh && skip "remote MDS with nodsh" && return - remote_ost_nodsh && skip "remote OST with nodsh" && return + remote_ost_nodsh && skip "remote OST with nodsh" && return exhaust_all_precreations @@ -2213,7 +2223,7 @@ test_51b() { run_test 51b "mkdir .../t-0 --- .../t-$NUMTEST ====================" test_51bb() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return local ndirs=${TEST51BB_NDIRS:-10} local nfiles=${TEST51BB_NFILES:-100} @@ -2235,7 +2245,7 @@ test_51bb() { declare -a dirs for ((i=0; i < $ndirs; i++)); do dirs[i]=$dir/$RANDOM - echo Creating directory ${dirs[i]} + echo Creating directory ${dirs[i]} mkdir -p ${dirs[i]} ls $dir echo Creating $nfiles in dir ${dirs[i]} ... @@ -3699,7 +3709,7 @@ test_100() { error "local: $LPORT > 1024, remote: $RPORT" fi done - [ "$rc" = 0 ] || error "privileged port not found" ) + [ "$rc" = 0 ] || error "privileged port not found" ) } run_test 100 "check local port using privileged port ===========" @@ -5729,7 +5739,7 @@ test_151() { $LCTL set_param -n obdfilter.*.writethrough_cache_enable 1 - # pages should be in the case right after write + # pages should be in the case right after write dd if=/dev/urandom of=$DIR/$tfile bs=4k count=$CPAGES || error "dd failed" BEFORE=`roc_hit` cancel_lru_locks osc -- GitLab