diff --git a/build/autoconf/lustre-build.m4 b/build/autoconf/lustre-build.m4 index 51780bc2f2b21bdb4622506e3a44afe0e7e39e25..4a95cd93ab76a5af58ce6e03e160c0b8ed5f53d3 100644 --- a/build/autoconf/lustre-build.m4 +++ b/build/autoconf/lustre-build.m4 @@ -305,7 +305,7 @@ AC_ARG_ENABLE([bgl], [enable_bgl='yes'],[enable_bgl='no']) AC_MSG_RESULT([$enable_bgl]) if test x$enable_bgl != xno; then - AC_DEFINE(BGL_SUPPORT, 1, Enable BGL Features) + AC_DEFINE(HAVE_BGL_SUPPORT, 1, Enable BGL Features) enable_doc='no' enable_tests='no' enable_server='no' diff --git a/libcfs/libcfs/debug.c b/libcfs/libcfs/debug.c index f1ccfc5e6611df1e0ff2cf0626cfa290adb56d46..25e442def2bc94b65adfc02a4d33ba80bed8176d 100644 --- a/libcfs/libcfs/debug.c +++ b/libcfs/libcfs/debug.c @@ -112,7 +112,9 @@ EXPORT_SYMBOL(libcfs_kmemory); static cfs_waitq_t debug_ctlwq; -#ifdef __arch_um__ +#ifdef HAVE_BGL_SUPPORT +char debug_file_path[1024] = "/bgl/ion/tmp/lustre-log"; +#elif defined(__arch_um__) char debug_file_path[1024] = "/r/tmp/lustre-log"; #else char debug_file_path[1024] = "/tmp/lustre-log"; diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index b7f25c21311811c7f1ad21993cb0c8a59ef716a4..15cefc045c4f10e090df625d8ad5b1dc9f9e6b35 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -59,7 +59,12 @@ struct obd_device; #define OBD_LDLM_DEVICENAME "ldlm" +#ifdef HAVE_BGL_SUPPORT +/* 1.5 times the maximum 128 tasks available in VN mode */ +#define LDLM_DEFAULT_LRU_SIZE 196 +#else #define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus()) +#endif #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000)) #define LDLM_CTIME_AGE_LIMIT (10) @@ -171,7 +176,7 @@ typedef enum { * the 1st operation, whereas the 2nd operation has canceled this lock and * is waiting for rpc_lock which is taken by the 1st operation. * LDLM_FL_BL_AST is to be set by ldlm_callback_handler() to the lock not allow - * ELC code to cancel it. + * ELC code to cancel it. * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is * droped to let ldlm_callback_handler() return EINVAL to the server. It is * used when ELC rpc is already prepared and is waiting for rpc_lock, too late @@ -249,7 +254,7 @@ struct ldlm_namespace; struct ldlm_pool_ops { int (*po_recalc)(struct ldlm_pool *pl); - int (*po_shrink)(struct ldlm_pool *pl, int nr, + int (*po_shrink)(struct ldlm_pool *pl, int nr, unsigned int gfp_mask); int (*po_setup)(struct ldlm_pool *pl, int limit); }; @@ -264,69 +269,69 @@ struct ldlm_pool_ops { #define LDLM_POOLS_FAST_SLV_CHANGE (50) struct ldlm_pool { - /** - * Pool proc directory. + /** + * Pool proc directory. */ cfs_proc_dir_entry_t *pl_proc_dir; /** * Pool name, should be long enough to contain compound proc entry name. */ char pl_name[100]; - /** - * Lock for protecting slv/clv updates. + /** + * Lock for protecting slv/clv updates. */ spinlock_t pl_lock; /** - * Number of allowed locks in in pool, both, client and server side. + * Number of allowed locks in in pool, both, client and server side. */ atomic_t pl_limit; - /** + /** * Number of granted locks in */ atomic_t pl_granted; - /** - * Grant rate per T. + /** + * Grant rate per T. */ atomic_t pl_grant_rate; - /** - * Cancel rate per T. + /** + * Cancel rate per T. */ atomic_t pl_cancel_rate; - /** - * Grant speed (GR-CR) per T. + /** + * Grant speed (GR-CR) per T. */ atomic_t pl_grant_speed; - /** + /** * Server lock volume. Protected by pl_lock. */ __u64 pl_server_lock_volume; - /** + /** * Current biggest client lock volume. Protected by pl_lock. */ __u64 pl_client_lock_volume; - /** + /** * Lock volume factor. SLV on client is calculated as following: * server_slv * lock_volume_factor. */ atomic_t pl_lock_volume_factor; - /** - * Time when last slv from server was obtained. + /** + * Time when last slv from server was obtained. */ time_t pl_recalc_time; /** - * Recalc and shrink ops. - */ + * Recalc and shrink ops. + */ struct ldlm_pool_ops *pl_ops; /** * Planned number of granted locks for next T. */ int pl_grant_plan; - /** - * Grant plan step for next T. + /** + * Grant plan step for next T. */ int pl_grant_step; - /** - * Pool statistics. + /** + * Pool statistics. */ struct lprocfs_stats *pl_stats; }; @@ -346,17 +351,17 @@ typedef enum { LDLM_NAMESPACE_MODEST = 1 << 1 } ldlm_appetite_t; -/* - * Default value for ->ns_shrink_thumb. If lock is not extent one its cost +/* + * Default value for ->ns_shrink_thumb. If lock is not extent one its cost * is one page. Here we have 256 pages which is 1M on i386. Thus by default * all extent locks which have more than 1M long extent will be kept in lru, - * others (including ibits locks) will be canceled on memory pressure event. + * others (including ibits locks) will be canceled on memory pressure event. */ #define LDLM_LOCK_SHRINK_THUMB 256 -/* - * Default values for the "max_nolock_size", "contention_time" and - * "contended_locks" namespace tunables. +/* + * Default values for the "max_nolock_size", "contention_time" and + * "contended_locks" namespace tunables. */ #define NS_DEFAULT_MAX_NOLOCK_BYTES 0 #define NS_DEFAULT_CONTENTION_SECONDS 2 @@ -368,47 +373,47 @@ struct ldlm_namespace { */ char *ns_name; - /** - * Is this a client-side lock tree? + /** + * Is this a client-side lock tree? */ ldlm_side_t ns_client; - /** + /** * Namespce connect flags supported by server (may be changed via proc, * lru resize may be disabled/enabled). */ __u64 ns_connect_flags; - /** - * Client side orig connect flags supported by server. + /** + * Client side orig connect flags supported by server. */ __u64 ns_orig_connect_flags; - /** + /** * Hash table for namespace. */ struct list_head *ns_hash; spinlock_t ns_hash_lock; /** - * Count of resources in the hash. + * Count of resources in the hash. */ __u32 ns_refcount; - /** - * All root resources in namespace. + /** + * All root resources in namespace. */ struct list_head ns_root_list; - /** + /** * Position in global namespace list. */ - struct list_head ns_list_chain; + struct list_head ns_list_chain; - /** - * All root resources in namespace. + /** + * All root resources in namespace. */ - struct list_head ns_unused_list; + struct list_head ns_unused_list; int ns_nr_unused; spinlock_t ns_unused_lock; @@ -419,8 +424,8 @@ struct ldlm_namespace { * Seconds. */ unsigned int ns_ctime_age_limit; - - /** + + /** * Lower limit to number of pages in lock to keep it in cache. */ unsigned int ns_shrink_thumb; @@ -439,25 +444,25 @@ struct ldlm_namespace { struct ldlm_pool ns_pool; ldlm_appetite_t ns_appetite; - /** + /** * If more than @ns_contented_locks found, the resource considered * as contended. */ unsigned ns_contended_locks; - /** + /** * The resource remembers contended state during @ns_contention_time, * in seconds. */ unsigned ns_contention_time; - /** + /** * Limit size of nolock requests, in bytes. */ unsigned ns_max_nolock_size; /** - * Backward link to obd, required for ldlm pool to store new SLV. + * Backward link to obd, required for ldlm pool to store new SLV. */ struct obd_device *ns_obd; @@ -467,7 +472,7 @@ struct ldlm_namespace { static inline int ns_is_client(struct ldlm_namespace *ns) { LASSERT(ns != NULL); - LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT | + LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT | LDLM_NAMESPACE_SERVER))); LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT || ns->ns_client == LDLM_NAMESPACE_SERVER); @@ -477,7 +482,7 @@ static inline int ns_is_client(struct ldlm_namespace *ns) static inline int ns_is_server(struct ldlm_namespace *ns) { LASSERT(ns != NULL); - LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT | + LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT | LDLM_NAMESPACE_SERVER))); LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT || ns->ns_client == LDLM_NAMESPACE_SERVER); @@ -512,7 +517,7 @@ typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data); /* Interval node data for each LDLM_EXTENT lock */ struct ldlm_interval { struct interval_node li_node; /* node for tree mgmt */ - struct list_head li_group; /* the locks which have the same + struct list_head li_group; /* the locks which have the same * policy - group of the policy */ }; #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node) @@ -530,7 +535,7 @@ struct ldlm_lock { struct portals_handle l_handle; // must be first in the structure atomic_t l_refc; - /* internal spinlock protects l_resource. we should hold this lock + /* internal spinlock protects l_resource. we should hold this lock * first before grabbing res_lock.*/ spinlock_t l_lock; @@ -832,10 +837,10 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); /* resource.c */ struct ldlm_namespace * -ldlm_namespace_new(struct obd_device *obd, char *name, +ldlm_namespace_new(struct obd_device *obd, char *name, ldlm_side_t client, ldlm_appetite_t apt); int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags); -void ldlm_namespace_free(struct ldlm_namespace *ns, +void ldlm_namespace_free(struct ldlm_namespace *ns, struct obd_import *imp, int force); void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client); void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client); @@ -936,7 +941,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, int cancel_flags, void *opaque); int ldlm_cli_cancel_list(struct list_head *head, int count, struct ptlrpc_request *req, int flags); - + /* mds/handler.c */ /* This has to be here because recursive inclusion sucks. */ int intent_disposition(struct ldlm_reply *rep, int flag); @@ -977,9 +982,9 @@ int ldlm_pools_init(void); void ldlm_pools_fini(void); void ldlm_pools_wakeup(void); -int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, +int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, int idx, ldlm_side_t client); -int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, +int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, unsigned int gfp_mask); void ldlm_pool_fini(struct ldlm_pool *pl); int ldlm_pool_setup(struct ldlm_pool *pl, int limit); diff --git a/lustre/llite/llite_lib.c b/lustre/llite/llite_lib.c index 452873118fabf87ee0402b08f157098642c8d7fa..45f0532fb837cfd028523bc306cb894c88a6a7d5 100644 --- a/lustre/llite/llite_lib.c +++ b/lustre/llite/llite_lib.c @@ -73,7 +73,7 @@ static inline void ll_pglist_fini(struct ll_sb_info *sbi) { struct page *page; int i; - + if (sbi->ll_pglist == NULL) return; @@ -153,11 +153,15 @@ static struct ll_sb_info *ll_init_sbi(void) si_meminfo(&si); pages = si.totalram - si.totalhigh; - if (pages >> (20 - CFS_PAGE_SHIFT) < 512) + if (pages >> (20 - CFS_PAGE_SHIFT) < 512) { +#ifdef HAVE_BGL_SUPPORT + sbi->ll_async_page_max = pages / 4; +#else sbi->ll_async_page_max = pages / 2; - else +#endif + } else { sbi->ll_async_page_max = (pages / 4) * 3; - + } lcounter_init(&sbi->ll_async_page_count); spin_lock_init(&sbi->ll_async_page_reblnc_lock); sbi->ll_async_page_sample_max = 64 * num_online_cpus(); @@ -207,7 +211,7 @@ static struct ll_sb_info *ll_init_sbi(void) out: if (sbi->ll_async_page_sample) - OBD_FREE(sbi->ll_async_page_sample, + OBD_FREE(sbi->ll_async_page_sample, sizeof(long) * num_possible_cpus()); ll_pglist_fini(sbi); OBD_FREE(sbi, sizeof(*sbi)); @@ -225,7 +229,7 @@ void ll_free_sbi(struct super_block *sb) list_del(&sbi->ll_list); spin_unlock(&ll_sb_lock); lcounter_destroy(&sbi->ll_async_page_count); - OBD_FREE(sbi->ll_async_page_sample, + OBD_FREE(sbi->ll_async_page_sample, sizeof(long) * num_possible_cpus()); OBD_FREE(sbi, sizeof(*sbi)); } @@ -332,7 +336,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt) /* force vfs to use lustre handler for flock() calls - bug 10743 */ sb->s_flags |= MS_FLOCK_LOCK; #endif - + if (sbi->ll_flags & LL_SBI_FLOCK) sbi->ll_fop = &ll_file_operations_flock; else if (sbi->ll_flags & LL_SBI_LOCALFLOCK) @@ -526,13 +530,13 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt) "rc %d\n", err); GOTO(out_dt, err); } - + spin_lock(&sbi->ll_lco.lco_lock); sbi->ll_lco.lco_flags = data->ocd_connect_flags; spin_unlock(&sbi->ll_lco.lco_lock); err = obd_register_page_removal_cb(sbi->ll_dt_exp, - ll_page_removal_cb, + ll_page_removal_cb, ll_pin_extent_cb); if (err) { CERROR("cannot register page removal callback: rc = %d\n",err); @@ -917,7 +921,7 @@ static int ll_options(char *options, int *flags) char *s1 = options, *s2; ENTRY; - if (!options) + if (!options) RETURN(0); CDEBUG(D_CONFIG, "Parsing opts %s\n", options); @@ -1053,7 +1057,7 @@ int ll_fill_super(struct super_block *sb) } err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags); - if (err) + if (err) GOTO(out_free, err); /* Generate a string unique to this super, in case some joker tries @@ -1100,10 +1104,10 @@ out_free: OBD_FREE(md, strlen(md) + 1); if (dt) OBD_FREE(dt, strlen(dt) + 1); - if (err) + if (err) ll_put_super(sb); else - LCONSOLE_WARN("Client %s has started\n", profilenm); + LCONSOLE_WARN("Client %s has started\n", profilenm); RETURN(err); } /* ll_fill_super */ @@ -1127,14 +1131,14 @@ void ll_put_super(struct super_block *sb) sprintf(ll_instance, "%p", sb); cfg.cfg_instance = ll_instance; lustre_end_log(sb, NULL, &cfg); - + if (sbi->ll_md_exp) { obd = class_exp2obd(sbi->ll_md_exp); - if (obd) + if (obd) force = obd->obd_force; } - - /* We need to set force before the lov_disconnect in + + /* We need to set force before the lov_disconnect in lustre_common_put_super, since l_d cleans up osc's as well. */ if (force) { next = 0; @@ -1142,7 +1146,7 @@ void ll_put_super(struct super_block *sb) &next)) != NULL) { obd->obd_force = force; } - } + } if (sbi->ll_lcq) { /* Only if client_common_fill_super succeeded */ @@ -1162,7 +1166,7 @@ void ll_put_super(struct super_block *sb) lustre_common_put_super(sb); LCONSOLE_WARN("client %s umount complete\n", ll_instance); - + cfs_module_put(); EXIT; @@ -1309,13 +1313,13 @@ int ll_md_setattr(struct inode *inode, struct md_op_data *op_data, struct ptlrpc_request *request = NULL; int rc; ENTRY; - - op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0, + + op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0, LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) RETURN(PTR_ERR(op_data)); - rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0, + rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0, &request, mod); if (rc) { ptlrpc_req_finished(request); @@ -1364,7 +1368,7 @@ static int ll_setattr_done_writing(struct inode *inode, struct ll_inode_info *lli = ll_i2info(inode); int rc = 0; ENTRY; - + LASSERT(op_data != NULL); if (!S_ISREG(inode->i_mode)) RETURN(0); @@ -1526,12 +1530,12 @@ int ll_setattr_raw(struct inode *inode, struct iattr *attr) attr->ia_valid |= ATTR_MTIME_SET; } if ((attr->ia_valid & ATTR_CTIME) && !(attr->ia_valid & ATTR_MTIME)) { - /* To avoid stale mtime on mds, obtain it from ost and send + /* To avoid stale mtime on mds, obtain it from ost and send to mds. */ rc = ll_glimpse_size(inode, 0); - if (rc) + if (rc) RETURN(rc); - + attr->ia_valid |= ATTR_MTIME_SET | ATTR_MTIME; attr->ia_mtime = inode->i_mtime; } @@ -1597,7 +1601,7 @@ int ll_setattr_raw(struct inode *inode, struct iattr *attr) flags = OBD_MD_FLTYPE | OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME | - OBD_MD_FLFID | OBD_MD_FLGENER | + OBD_MD_FLFID | OBD_MD_FLGENER | OBD_MD_FLGROUP; obdo_from_inode(oa, inode, flags); @@ -1838,17 +1842,17 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) if (body->valid & OBD_MD_FLATIME && body->atime > LTIME_S(inode->i_atime)) LTIME_S(inode->i_atime) = body->atime; - + /* mtime is always updated with ctime, but can be set in past. As write and utime(2) may happen within 1 second, and utime's - mtime has a priority over write's one, so take mtime from mds + mtime has a priority over write's one, so take mtime from mds for the same ctimes. */ if (body->valid & OBD_MD_FLCTIME && body->ctime >= LTIME_S(inode->i_ctime)) { LTIME_S(inode->i_ctime) = body->ctime; if (body->valid & OBD_MD_FLMTIME) { CDEBUG(D_INODE, "setting ino %lu mtime " - "from %lu to "LPU64"\n", inode->i_ino, + "from %lu to "LPU64"\n", inode->i_ino, LTIME_S(inode->i_mtime), body->mtime); LTIME_S(inode->i_mtime) = body->mtime; } @@ -1884,7 +1888,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) " to the "DFID", inode %lu/%u(%p)\n", PFID(&lli->lli_fid), PFID(&body->fid1), inode->i_ino, inode->i_generation, inode); - } else + } else lli->lli_fid = body->fid1; } @@ -1895,9 +1899,9 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) S_ISREG(inode->i_mode) && lli->lli_smd) { struct lustre_handle lockh; ldlm_mode_t mode; - + /* As it is possible a blocking ast has been processed - * by this time, we need to check there is an UPDATE + * by this time, we need to check there is an UPDATE * lock on the client and set LLIF_MDS_SIZE_LOCK holding * it. */ mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE, @@ -2077,7 +2081,7 @@ int ll_iocontrol(struct inode *inode, struct file *file, oinfo.oi_oa->o_id = lsm->lsm_object_id; oinfo.oi_oa->o_gr = lsm->lsm_object_gr; oinfo.oi_oa->o_flags = flags; - oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | + oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | OBD_MD_FLGROUP; oinfo.oi_capa = ll_mdscapa_get(inode); @@ -2310,7 +2314,7 @@ int ll_obd_statfs(struct inode *inode, void *arg) exp = sbi->ll_md_exp; else if (type == LL_STATFS_LOV) exp = sbi->ll_dt_exp; - else + else GOTO(out_statfs, rc = -ENODEV); rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL); @@ -2327,14 +2331,14 @@ int ll_process_config(struct lustre_cfg *lcfg) char *ptr; void *sb; struct lprocfs_static_vars lvars; - unsigned long x; + unsigned long x; int rc = 0; lprocfs_llite_init_vars(&lvars); /* The instance name contains the sb: lustre-client-aacfe000 */ ptr = strrchr(lustre_cfg_string(lcfg, 0), '-'); - if (!ptr || !*(++ptr)) + if (!ptr || !*(++ptr)) return -EINVAL; if (sscanf(ptr, "%lx", &x) != 1) return -EINVAL; @@ -2342,7 +2346,7 @@ int ll_process_config(struct lustre_cfg *lcfg) /* This better be a real Lustre superblock! */ LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC); - /* Note we have not called client_common_fill_super yet, so + /* Note we have not called client_common_fill_super yet, so proc fns must be able to handle that! */ rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars, lcfg, sb); @@ -2359,10 +2363,10 @@ struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data, if (namelen > ll_i2sbi(i1)->ll_namelen) return ERR_PTR(-ENAMETOOLONG); - + if (op_data == NULL) OBD_ALLOC_PTR(op_data); - + if (op_data == NULL) return ERR_PTR(-ENOMEM); diff --git a/lustre/obdclass/class_obd.c b/lustre/obdclass/class_obd.c index 3deec08c916cc663386f79b76c65749418102abc..581d5732707d0d609481785c90b2ec770dcb502e 100644 --- a/lustre/obdclass/class_obd.c +++ b/lustre/obdclass/class_obd.c @@ -287,7 +287,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) CERROR("ioctl buffer too small to hold version\n"); GOTO(out, err = -EINVAL); } - + obd = class_num2obd(index); if (!obd) GOTO(out, err = -ENOENT); @@ -552,18 +552,18 @@ int init_obdclass(void) cfs_waitq_init(&obd_race_waitq); obd_zombie_impexp_init(); #ifdef LPROCFS - obd_memory = lprocfs_alloc_stats(OBD_STATS_NUM, + obd_memory = lprocfs_alloc_stats(OBD_STATS_NUM, LPROCFS_STATS_FLAG_PERCPU); if (obd_memory == NULL) { CERROR("kmalloc of 'obd_memory' failed\n"); RETURN(-ENOMEM); } - + lprocfs_counter_init(obd_memory, OBD_MEMORY_STAT, - LPROCFS_CNTR_AVGMINMAX, + LPROCFS_CNTR_AVGMINMAX, "memused", "bytes"); lprocfs_counter_init(obd_memory, OBD_MEMORY_PAGES_STAT, - LPROCFS_CNTR_AVGMINMAX, + LPROCFS_CNTR_AVGMINMAX, "pagesused", "pages"); #endif err = obd_init_checks(); @@ -588,8 +588,13 @@ int init_obdclass(void) for (i = 0; i < class_devno_max(); i++) obd_devs[i] = NULL; - /* Default the dirty page cache cap to 1/2 of system memory */ - obd_max_dirty_pages = num_physpages / 2; + /* Default the dirty page cache cap to 1/2 of system memory. + * For clients with less memory, a larger fraction is needed + * for other purposes (mostly for BGL). */ + if (num_physpages <= 512 << (20 - CFS_PAGE_SHIFT)) + obd_max_dirty_pages = num_physpages / 4; + else + obd_max_dirty_pages = num_physpages / 2; err = obd_init_caches(); if (err) @@ -643,7 +648,7 @@ static void cleanup_obdclass(void) memory_leaked = obd_memory_sum(); pages_leaked = obd_pages_sum(); - + memory_max = obd_memory_max(); pages_max = obd_pages_max(); @@ -656,7 +661,7 @@ static void cleanup_obdclass(void) CWARN("Page leaks detected (max "LPU64", leaked "LPU64")\n", pages_max, pages_leaked); } - + EXIT; } diff --git a/lustre/obdfilter/filter_internal.h b/lustre/obdfilter/filter_internal.h index 7a76bc06baf3124c79e64c564d9c2c7ec7440273..d73a4687787ab38a51eb2c9403480177499cda7e 100644 --- a/lustre/obdfilter/filter_internal.h +++ b/lustre/obdfilter/filter_internal.h @@ -85,7 +85,7 @@ struct filter_mod_data { int fmd_refcount; /* reference counter - list holds 1 */ }; -#ifdef BGL_SUPPORT +#ifdef HAVE_BGL_SUPPORT #define FILTER_FMD_MAX_NUM_DEFAULT 128 /* many active files per client on BGL */ #else #define FILTER_FMD_MAX_NUM_DEFAULT 32