Skip to content
Snippets Groups Projects
Commit 6eb89d43 authored by tianzy's avatar tianzy
Browse files

Branch b1_6

Remove usage of trace_call_on_all_cpus()
b=15878
i=adilger
i=robert.read
parent 29156283
No related merge requests found
...@@ -218,9 +218,3 @@ int trace_max_debug_mb(void) ...@@ -218,9 +218,3 @@ int trace_max_debug_mb(void)
{ {
return max_permit_mb; return max_permit_mb;
} }
void
trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
{
#error "tbd"
}
...@@ -85,6 +85,7 @@ int tracefile_init_arch() ...@@ -85,6 +85,7 @@ int tracefile_init_arch()
/* arch related info initialized */ /* arch related info initialized */
tcd_for_each(tcd, i, j) { tcd_for_each(tcd, i, j) {
spin_lock_init(&tcd->tcd_lock);
tcd->tcd_pages_factor = pages_factor[i]; tcd->tcd_pages_factor = pages_factor[i];
tcd->tcd_type = i; tcd->tcd_type = i;
tcd->tcd_cpu = j; tcd->tcd_cpu = j;
...@@ -173,39 +174,44 @@ trace_put_console_buffer(char *buffer) ...@@ -173,39 +174,44 @@ trace_put_console_buffer(char *buffer)
struct trace_cpu_data * struct trace_cpu_data *
trace_get_tcd(void) trace_get_tcd(void)
{ {
struct trace_cpu_data *tcd;
int cpu; int cpu;
cpu = get_cpu(); cpu = get_cpu();
if (in_irq()) if (in_irq())
return &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd; tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
else if (in_softirq()) else if (in_softirq())
return &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd; tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
return &(*trace_data[TCD_TYPE_PROC])[cpu].tcd; else
tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
trace_lock_tcd(tcd);
return tcd;
} }
void void
trace_put_tcd (struct trace_cpu_data *tcd) trace_put_tcd (struct trace_cpu_data *tcd)
{ {
trace_unlock_tcd(tcd);
put_cpu(); put_cpu();
} }
int trace_lock_tcd(struct trace_cpu_data *tcd) int trace_lock_tcd(struct trace_cpu_data *tcd)
{ {
__LASSERT(tcd->tcd_type < TCD_TYPE_MAX); __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
if (tcd->tcd_type == TCD_TYPE_IRQ)
local_irq_disable(); spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
local_bh_disable();
return 1; return 1;
} }
void trace_unlock_tcd(struct trace_cpu_data *tcd) void trace_unlock_tcd(struct trace_cpu_data *tcd)
{ {
__LASSERT(tcd->tcd_type < TCD_TYPE_MAX); __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
if (tcd->tcd_type == TCD_TYPE_IRQ)
local_irq_enable(); spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
local_bh_enable();
} }
int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage) int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
...@@ -277,26 +283,3 @@ int trace_max_debug_mb(void) ...@@ -277,26 +283,3 @@ int trace_max_debug_mb(void)
return MAX(512, (total_mb * 80)/100); return MAX(512, (total_mb * 80)/100);
} }
void
trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
{
cpumask_t cpus_allowed = current->cpus_allowed;
/* use cpus_allowed to quiet 2.4 UP kernel warning only */
cpumask_t m = cpus_allowed;
int cpu;
/* Run the given routine on every CPU in thread context */
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
if (!cpu_online(cpu))
continue;
cpus_clear(m);
cpu_set(cpu, m);
set_cpus_allowed(current, m);
fn(arg);
set_cpus_allowed(current, cpus_allowed);
}
}
...@@ -496,19 +496,21 @@ panic_collect_pages(struct page_collection *pc) ...@@ -496,19 +496,21 @@ panic_collect_pages(struct page_collection *pc)
} }
} }
static void collect_pages_on_cpu(void *info) static void collect_pages_on_all_cpus(struct page_collection *pc)
{ {
struct trace_cpu_data *tcd; struct trace_cpu_data *tcd;
struct page_collection *pc = info; int i, cpu;
int i;
spin_lock(&pc->pc_lock); spin_lock(&pc->pc_lock);
tcd_for_each_type_lock(tcd, i) { for_each_possible_cpu(cpu) {
list_splice_init(&tcd->tcd_pages, &pc->pc_pages); tcd_for_each_type_lock(tcd, i, cpu) {
tcd->tcd_cur_pages = 0; list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
if (pc->pc_want_daemon_pages) { tcd->tcd_cur_pages = 0;
list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages); if (pc->pc_want_daemon_pages) {
tcd->tcd_cur_daemon_pages = 0; list_splice_init(&tcd->tcd_daemon_pages,
&pc->pc_pages);
tcd->tcd_cur_daemon_pages = 0;
}
} }
} }
spin_unlock(&pc->pc_lock); spin_unlock(&pc->pc_lock);
...@@ -521,31 +523,33 @@ static void collect_pages(struct page_collection *pc) ...@@ -521,31 +523,33 @@ static void collect_pages(struct page_collection *pc)
if (libcfs_panic_in_progress) if (libcfs_panic_in_progress)
panic_collect_pages(pc); panic_collect_pages(pc);
else else
trace_call_on_all_cpus(collect_pages_on_cpu, pc); collect_pages_on_all_cpus(pc);
} }
static void put_pages_back_on_cpu(void *info) static void put_pages_back_on_all_cpus(struct page_collection *pc)
{ {
struct page_collection *pc = info;
struct trace_cpu_data *tcd; struct trace_cpu_data *tcd;
struct list_head *cur_head; struct list_head *cur_head;
struct trace_page *tage; struct trace_page *tage;
struct trace_page *tmp; struct trace_page *tmp;
int i; int i, cpu;
spin_lock(&pc->pc_lock); spin_lock(&pc->pc_lock);
tcd_for_each_type_lock(tcd, i) { for_each_possible_cpu(cpu) {
cur_head = tcd->tcd_pages.next; tcd_for_each_type_lock(tcd, i, cpu) {
cur_head = tcd->tcd_pages.next;
list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
linkage) {
__LASSERT_TAGE_INVARIANT(tage); __LASSERT_TAGE_INVARIANT(tage);
if (tage->cpu != smp_processor_id() || tage->type != i) if (tage->cpu != cpu || tage->type != i)
continue; continue;
tage_to_tail(tage, cur_head); tage_to_tail(tage, cur_head);
tcd->tcd_cur_pages++; tcd->tcd_cur_pages++;
}
} }
} }
spin_unlock(&pc->pc_lock); spin_unlock(&pc->pc_lock);
...@@ -554,7 +558,7 @@ static void put_pages_back_on_cpu(void *info) ...@@ -554,7 +558,7 @@ static void put_pages_back_on_cpu(void *info)
static void put_pages_back(struct page_collection *pc) static void put_pages_back(struct page_collection *pc)
{ {
if (!libcfs_panic_in_progress) if (!libcfs_panic_in_progress)
trace_call_on_all_cpus(put_pages_back_on_cpu, pc); put_pages_back_on_all_cpus(pc);
} }
/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
...@@ -572,8 +576,7 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc, ...@@ -572,8 +576,7 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
__LASSERT_TAGE_INVARIANT(tage); __LASSERT_TAGE_INVARIANT(tage);
if (tage->cpu != smp_processor_id() || if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
tage->type != tcd->tcd_type)
continue; continue;
tage_to_tail(tage, &tcd->tcd_daemon_pages); tage_to_tail(tage, &tcd->tcd_daemon_pages);
...@@ -595,18 +598,15 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc, ...@@ -595,18 +598,15 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
spin_unlock(&pc->pc_lock); spin_unlock(&pc->pc_lock);
} }
static void put_pages_on_daemon_list_on_cpu(void *info) static void put_pages_on_daemon_list(struct page_collection *pc)
{ {
struct trace_cpu_data *tcd; struct trace_cpu_data *tcd;
int i; int i, cpu;
tcd_for_each_type_lock(tcd, i)
put_pages_on_tcd_daemon_list(info, tcd);
}
static void put_pages_on_daemon_list(struct page_collection *pc) for_each_possible_cpu(cpu) {
{ tcd_for_each_type_lock(tcd, i, cpu)
trace_call_on_all_cpus(put_pages_on_daemon_list_on_cpu, pc); put_pages_on_tcd_daemon_list(pc, tcd);
}
} }
void trace_debug_print(void) void trace_debug_print(void)
...@@ -1116,23 +1116,27 @@ int tracefile_init(int max_pages) ...@@ -1116,23 +1116,27 @@ int tracefile_init(int max_pages)
return 0; return 0;
} }
static void trace_cleanup_on_cpu(void *info) static void trace_cleanup_on_all_cpus(void)
{ {
struct trace_cpu_data *tcd; struct trace_cpu_data *tcd;
struct trace_page *tage; struct trace_page *tage;
struct trace_page *tmp; struct trace_page *tmp;
int i; int i, cpu;
tcd_for_each_type_lock(tcd, i) { for_each_possible_cpu(cpu) {
tcd->tcd_shutting_down = 1; tcd_for_each_type_lock(tcd, i, cpu) {
tcd->tcd_shutting_down = 1;
list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
__LASSERT_TAGE_INVARIANT(tage); linkage) {
__LASSERT_TAGE_INVARIANT(tage);
list_del(&tage->linkage); list_del(&tage->linkage);
tage_free(tage); tage_free(tage);
}
tcd->tcd_cur_pages = 0;
} }
tcd->tcd_cur_pages = 0;
} }
} }
...@@ -1143,7 +1147,7 @@ static void trace_cleanup(void) ...@@ -1143,7 +1147,7 @@ static void trace_cleanup(void)
CFS_INIT_LIST_HEAD(&pc.pc_pages); CFS_INIT_LIST_HEAD(&pc.pc_pages);
spin_lock_init(&pc.pc_lock); spin_lock_init(&pc.pc_lock);
trace_call_on_all_cpus(trace_cleanup_on_cpu, &pc); trace_cleanup_on_all_cpus();
tracefile_fini_arch(); tracefile_fini_arch();
} }
......
...@@ -104,6 +104,16 @@ extern int trace_max_debug_mb(void); ...@@ -104,6 +104,16 @@ extern int trace_max_debug_mb(void);
union trace_data_union { union trace_data_union {
struct trace_cpu_data { struct trace_cpu_data {
/*
* Even though this structure is meant to be per-CPU, locking
* is needed because in some places the data may be accessed
* from other CPUs. This lock is directly used in trace_get_tcd
* and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
* tcd_for_each_type_lock
*/
spinlock_t tcd_lock;
unsigned long tcd_lock_flags;
/* /*
* pages with trace records not yet processed by tracefiled. * pages with trace records not yet processed by tracefiled.
*/ */
...@@ -176,9 +186,9 @@ extern union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS]; ...@@ -176,9 +186,9 @@ extern union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS];
for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \ for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd) j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
#define tcd_for_each_type_lock(tcd, i) \ #define tcd_for_each_type_lock(tcd, i, cpu) \
for (i = 0; trace_data[i] && \ for (i = 0; trace_data[i] && \
(tcd = &(*trace_data[i])[smp_processor_id()].tcd) && \ (tcd = &(*trace_data[i])[cpu].tcd) && \
trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++) trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
/* XXX nikita: this declaration is internal to tracefile.c and should probably /* XXX nikita: this declaration is internal to tracefile.c and should probably
...@@ -253,8 +263,6 @@ extern void trace_unlock_tcd(struct trace_cpu_data *tcd); ...@@ -253,8 +263,6 @@ extern void trace_unlock_tcd(struct trace_cpu_data *tcd);
extern char *trace_get_console_buffer(void); extern char *trace_get_console_buffer(void);
extern void trace_put_console_buffer(char *buffer); extern void trace_put_console_buffer(char *buffer);
extern void trace_call_on_all_cpus(void (*fn)(void *arg), void *arg);
int trace_refill_stock(struct trace_cpu_data *tcd, int gfp, int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
struct list_head *stock); struct list_head *stock);
......
...@@ -233,9 +233,3 @@ int trace_max_debug_mb(void) ...@@ -233,9 +233,3 @@ int trace_max_debug_mb(void)
return MAX(512, (total_mb * 80)/100); return MAX(512, (total_mb * 80)/100);
} }
void
trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
{
#error "tbd"
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment