Skip to content
Snippets Groups Projects
Commit e428ed02 authored by tianzy's avatar tianzy
Browse files

Branch HEAD

Remove usage of trace_call_on_all_cpus()
b=15878
i=adilger
i=robert.read
parent bc359859
No related branches found
No related tags found
No related merge requests found
......@@ -218,9 +218,3 @@ int trace_max_debug_mb(void)
{
return max_permit_mb;
}
void
trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
{
#error "tbd"
}
......@@ -84,6 +84,7 @@ int tracefile_init_arch()
/* arch related info initialized */
tcd_for_each(tcd, i, j) {
spin_lock_init(&tcd->tcd_lock);
tcd->tcd_pages_factor = pages_factor[i];
tcd->tcd_type = i;
tcd->tcd_cpu = j;
......@@ -174,39 +175,44 @@ trace_put_console_buffer(char *buffer)
struct trace_cpu_data *
trace_get_tcd(void)
{
struct trace_cpu_data *tcd;
int cpu;
cpu = get_cpu();
if (in_irq())
return &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
else if (in_softirq())
return &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
return &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
else
tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
trace_lock_tcd(tcd);
return tcd;
}
void
trace_put_tcd (struct trace_cpu_data *tcd)
{
trace_unlock_tcd(tcd);
put_cpu();
}
int trace_lock_tcd(struct trace_cpu_data *tcd)
{
__LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
if (tcd->tcd_type == TCD_TYPE_IRQ)
local_irq_disable();
else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
local_bh_disable();
spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
return 1;
}
void trace_unlock_tcd(struct trace_cpu_data *tcd)
{
__LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
if (tcd->tcd_type == TCD_TYPE_IRQ)
local_irq_enable();
else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
local_bh_enable();
spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
}
int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
......@@ -278,26 +284,3 @@ int trace_max_debug_mb(void)
return MAX(512, (total_mb * 80)/100);
}
void
trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
{
cpumask_t cpus_allowed = current->cpus_allowed;
/* use cpus_allowed to quiet 2.4 UP kernel warning only */
cpumask_t m = cpus_allowed;
int cpu;
/* Run the given routine on every CPU in thread context */
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
if (!cpu_online(cpu))
continue;
cpus_clear(m);
cpu_set(cpu, m);
set_cpus_allowed(current, m);
fn(arg);
set_cpus_allowed(current, cpus_allowed);
}
}
......@@ -499,19 +499,21 @@ panic_collect_pages(struct page_collection *pc)
}
}
static void collect_pages_on_cpu(void *info)
static void collect_pages_on_all_cpus(struct page_collection *pc)
{
struct trace_cpu_data *tcd;
struct page_collection *pc = info;
int i;
int i, cpu;
spin_lock(&pc->pc_lock);
tcd_for_each_type_lock(tcd, i) {
list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
tcd->tcd_cur_pages = 0;
if (pc->pc_want_daemon_pages) {
list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
tcd->tcd_cur_daemon_pages = 0;
for_each_possible_cpu(cpu) {
tcd_for_each_type_lock(tcd, i, cpu) {
list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
tcd->tcd_cur_pages = 0;
if (pc->pc_want_daemon_pages) {
list_splice_init(&tcd->tcd_daemon_pages,
&pc->pc_pages);
tcd->tcd_cur_daemon_pages = 0;
}
}
}
spin_unlock(&pc->pc_lock);
......@@ -524,32 +526,35 @@ static void collect_pages(struct page_collection *pc)
if (libcfs_panic_in_progress)
panic_collect_pages(pc);
else
trace_call_on_all_cpus(collect_pages_on_cpu, pc);
collect_pages_on_all_cpus(pc);
}
static void put_pages_back_on_cpu(void *info)
static void put_pages_back_on_all_cpus(struct page_collection *pc)
{
struct page_collection *pc = info;
struct trace_cpu_data *tcd;
struct list_head *cur_head;
struct trace_page *tage;
struct trace_page *tmp;
int i;
int i, cpu;
spin_lock(&pc->pc_lock);
tcd_for_each_type_lock(tcd, i) {
cur_head = tcd->tcd_pages.next;
for_each_possible_cpu(cpu) {
tcd_for_each_type_lock(tcd, i, cpu) {
cur_head = tcd->tcd_pages.next;
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
struct trace_page, linkage) {
cfs_list_for_each_entry_safe_typed(tage, tmp,
&pc->pc_pages,
struct trace_page,
linkage) {
__LASSERT_TAGE_INVARIANT(tage);
__LASSERT_TAGE_INVARIANT(tage);
if (tage->cpu != smp_processor_id() || tage->type != i)
continue;
if (tage->cpu != cpu || tage->type != i)
continue;
tage_to_tail(tage, cur_head);
tcd->tcd_cur_pages++;
tage_to_tail(tage, cur_head);
tcd->tcd_cur_pages++;
}
}
}
spin_unlock(&pc->pc_lock);
......@@ -558,7 +563,7 @@ static void put_pages_back_on_cpu(void *info)
static void put_pages_back(struct page_collection *pc)
{
if (!libcfs_panic_in_progress)
trace_call_on_all_cpus(put_pages_back_on_cpu, pc);
put_pages_back_on_all_cpus(pc);
}
/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
......@@ -577,8 +582,7 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
__LASSERT_TAGE_INVARIANT(tage);
if (tage->cpu != smp_processor_id() ||
tage->type != tcd->tcd_type)
if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
continue;
tage_to_tail(tage, &tcd->tcd_daemon_pages);
......@@ -600,18 +604,15 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
spin_unlock(&pc->pc_lock);
}
static void put_pages_on_daemon_list_on_cpu(void *info)
static void put_pages_on_daemon_list(struct page_collection *pc)
{
struct trace_cpu_data *tcd;
int i;
tcd_for_each_type_lock(tcd, i)
put_pages_on_tcd_daemon_list(info, tcd);
}
int i, cpu;
static void put_pages_on_daemon_list(struct page_collection *pc)
{
trace_call_on_all_cpus(put_pages_on_daemon_list_on_cpu, pc);
for_each_possible_cpu(cpu) {
tcd_for_each_type_lock(tcd, i, cpu)
put_pages_on_tcd_daemon_list(pc, tcd);
}
}
void trace_debug_print(void)
......@@ -1125,24 +1126,29 @@ int tracefile_init(int max_pages)
return 0;
}
static void trace_cleanup_on_cpu(void *info)
static void trace_cleanup_on_all_cpus(void)
{
struct trace_cpu_data *tcd;
struct trace_page *tage;
struct trace_page *tmp;
int i;
int i, cpu;
tcd_for_each_type_lock(tcd, i) {
tcd->tcd_shutting_down = 1;
for_each_possible_cpu(cpu) {
tcd_for_each_type_lock(tcd, i, cpu) {
tcd->tcd_shutting_down = 1;
cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
struct trace_page, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
cfs_list_for_each_entry_safe_typed(tage, tmp,
&tcd->tcd_pages,
struct trace_page,
linkage) {
__LASSERT_TAGE_INVARIANT(tage);
list_del(&tage->linkage);
tage_free(tage);
list_del(&tage->linkage);
tage_free(tage);
}
tcd->tcd_cur_pages = 0;
}
tcd->tcd_cur_pages = 0;
}
}
......@@ -1153,7 +1159,7 @@ static void trace_cleanup(void)
CFS_INIT_LIST_HEAD(&pc.pc_pages);
spin_lock_init(&pc.pc_lock);
trace_call_on_all_cpus(trace_cleanup_on_cpu, &pc);
trace_cleanup_on_all_cpus();
tracefile_fini_arch();
}
......
......@@ -104,6 +104,16 @@ extern int trace_max_debug_mb(void);
union trace_data_union {
struct trace_cpu_data {
/*
* Even though this structure is meant to be per-CPU, locking
* is needed because in some places the data may be accessed
* from other CPUs. This lock is directly used in trace_get_tcd
* and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
* tcd_for_each_type_lock
*/
spinlock_t tcd_lock;
unsigned long tcd_lock_flags;
/*
* pages with trace records not yet processed by tracefiled.
*/
......@@ -176,9 +186,9 @@ extern union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS];
for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
#define tcd_for_each_type_lock(tcd, i) \
#define tcd_for_each_type_lock(tcd, i, cpu) \
for (i = 0; trace_data[i] && \
(tcd = &(*trace_data[i])[smp_processor_id()].tcd) && \
(tcd = &(*trace_data[i])[cpu].tcd) && \
trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
/* XXX nikita: this declaration is internal to tracefile.c and should probably
......@@ -253,8 +263,6 @@ extern void trace_unlock_tcd(struct trace_cpu_data *tcd);
extern char *trace_get_console_buffer(void);
extern void trace_put_console_buffer(char *buffer);
extern void trace_call_on_all_cpus(void (*fn)(void *arg), void *arg);
int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
struct list_head *stock);
......
......@@ -259,18 +259,3 @@ int trace_max_debug_mb(void)
return MAX(512, (total_mb * 80)/100);
}
void
trace_call_on_all_cpus(void (*fn)(void *_arg), void *arg)
{
int cpu;
KAFFINITY mask = cfs_query_thread_affinity();
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
if (cfs_tie_thread_to_cpu(cpu)) {
ASSERT((int)KeGetCurrentProcessorNumber() == cpu);
fn(arg);
cfs_set_thread_affinity(mask);
}
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment