summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-01-04 16:03:48 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-01-04 16:03:48 +0000
commit78c388aed2b7184182c08428db1de6c872d815f5 (patch)
tree4b2003b1b4ceb241a17faa995da8dd1004bb8e45 /arch/sparc64/kernel/smp.c
parenteb7a5bf93aaa4be1d7c6181100ab7639e74d67f7 (diff)
Merge with Linux 2.1.131 and more MIPS goodies.
(Did I mention that CVS is buggy ...)
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c88
1 files changed, 56 insertions, 32 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 3ec32e92b..4bdfca1b7 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -41,7 +41,9 @@ int smp_threads_ready = 0;
struct cpuinfo_sparc cpu_data[NR_CPUS] __attribute__ ((aligned (64)));
-static unsigned char boot_cpu_id __initdata = 0;
+/* Please don't make this initdata!!! --DaveM */
+static unsigned char boot_cpu_id = 0;
+
static int smp_activated = 0;
volatile int cpu_number_map[NR_CPUS];
@@ -63,7 +65,7 @@ int smp_info(char *buf)
for (i = 0; i < NR_CPUS; i++)
if(cpu_present_map & (1UL << i))
len += sprintf(buf + len,
- "CPU%d:\t\tonline\n", i
+ "CPU%d:\t\tonline\n", i);
return len;
}
@@ -82,12 +84,16 @@ int smp_bogo(char *buf)
__initfunc(void smp_store_cpu_info(int id))
{
- cpu_data[id].udelay_val = loops_per_sec;
cpu_data[id].irq_count = 0;
+ cpu_data[id].bh_count = 0;
+ /* multiplier and counter set by
+ smp_setup_percpu_timer() */
+ cpu_data[id].udelay_val = loops_per_sec;
+
cpu_data[id].pgcache_size = 0;
+ cpu_data[id].pte_cache = NULL;
cpu_data[id].pgdcache_size = 0;
cpu_data[id].pgd_cache = NULL;
- cpu_data[id].pte_cache = NULL;
}
extern void distribute_irqs(void);
@@ -137,6 +143,11 @@ __initfunc(void smp_callin(void))
__asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory");
+ /* Clear this or we will die instantly when we
+ * schedule back to this idler...
+ */
+ current->tss.flags &= ~(SPARC_FLAG_NEWCHILD);
+
while(!smp_processors_ready)
membar("#LoadLoad");
}
@@ -380,7 +391,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
{
u32 ctx = mm->context & 0x3ff;
- if(mm == current->mm && mm->count == 1) {
+ if(mm == current->mm && atomic_read(&mm->count) == 1) {
if(mm->cpu_vm_mask == (1UL << smp_processor_id()))
goto local_flush_and_out;
return smp_cross_call_avoidance(mm);
@@ -396,7 +407,9 @@ void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
{
u32 ctx = mm->context & 0x3ff;
- if(mm == current->mm && mm->count == 1) {
+ start &= PAGE_MASK;
+ end &= PAGE_MASK;
+ if(mm == current->mm && atomic_read(&mm->count) == 1) {
if(mm->cpu_vm_mask == (1UL << smp_processor_id()))
goto local_flush_and_out;
return smp_cross_call_avoidance(mm);
@@ -404,8 +417,6 @@ void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
smp_cross_call(&xcall_flush_tlb_range, ctx, start, end);
local_flush_and_out:
- start &= PAGE_MASK;
- end &= PAGE_MASK;
__flush_tlb_range(ctx, start, SECONDARY_CONTEXT, end, PAGE_SIZE, (end-start));
}
@@ -413,13 +424,14 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
{
u32 ctx = mm->context & 0x3ff;
- if(mm == current->mm && mm->count == 1) {
+ page &= PAGE_MASK;
+ if(mm == current->mm && atomic_read(&mm->count) == 1) {
if(mm->cpu_vm_mask == (1UL << smp_processor_id()))
goto local_flush_and_out;
return smp_cross_call_avoidance(mm);
}
#if 0 /* XXX Disabled until further notice... */
- else if(mm->count == 1) {
+ else if(atomic_read(&mm->count) == 1) {
/* Try to handle two special cases to avoid cross calls
* in common scenerios where we are swapping process
* pages out.
@@ -433,11 +445,11 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
smp_cross_call(&xcall_flush_tlb_page, ctx, page, 0);
local_flush_and_out:
- __flush_tlb_page(ctx, (page & PAGE_MASK), SECONDARY_CONTEXT);
+ __flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
}
/* CPU capture. */
-#define CAPTURE_DEBUG
+/* #define CAPTURE_DEBUG */
extern unsigned long xcall_capture;
static atomic_t smp_capture_depth = ATOMIC_INIT(0);
@@ -446,37 +458,42 @@ static unsigned long penguins_are_doing_time = 0;
void smp_capture(void)
{
- int result = atomic_add_return(1, &smp_capture_depth);
+ if (smp_processors_ready) {
+ int result = atomic_add_return(1, &smp_capture_depth);
- membar("#StoreStore | #LoadStore");
- if(result == 1) {
- int ncpus = smp_num_cpus;
+ membar("#StoreStore | #LoadStore");
+ if(result == 1) {
+ int ncpus = smp_num_cpus;
#ifdef CAPTURE_DEBUG
- printk("CPU[%d]: Sending penguins to jail...", smp_processor_id());
+ printk("CPU[%d]: Sending penguins to jail...",
+ smp_processor_id());
#endif
- penguins_are_doing_time = 1;
- membar("#StoreStore | #LoadStore");
- atomic_inc(&smp_capture_registry);
- smp_cross_call(&xcall_capture, 0, 0, 0);
- while(atomic_read(&smp_capture_registry) != ncpus)
- membar("#LoadLoad");
+ penguins_are_doing_time = 1;
+ membar("#StoreStore | #LoadStore");
+ atomic_inc(&smp_capture_registry);
+ smp_cross_call(&xcall_capture, 0, 0, 0);
+ while(atomic_read(&smp_capture_registry) != ncpus)
+ membar("#LoadLoad");
#ifdef CAPTURE_DEBUG
- printk("done\n");
+ printk("done\n");
#endif
+ }
}
}
void smp_release(void)
{
- if(atomic_dec_and_test(&smp_capture_depth)) {
+ if(smp_processors_ready) {
+ if(atomic_dec_and_test(&smp_capture_depth)) {
#ifdef CAPTURE_DEBUG
- printk("CPU[%d]: Giving pardon to imprisoned penguins\n",
- smp_processor_id());
+ printk("CPU[%d]: Giving pardon to imprisoned penguins\n",
+ smp_processor_id());
#endif
- penguins_are_doing_time = 0;
- membar("#StoreStore | #StoreLoad");
- atomic_dec(&smp_capture_registry);
+ penguins_are_doing_time = 0;
+ membar("#StoreStore | #StoreLoad");
+ atomic_dec(&smp_capture_registry);
+ }
}
}
@@ -539,8 +556,12 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
if(!--prof_counter(cpu))
{
if (cpu == boot_cpu_id) {
- extern void irq_enter(int, int);
- extern void irq_exit(int, int);
+/* XXX Keep this in sync with irq.c --DaveM */
+#define irq_enter(cpu, irq) \
+do { hardirq_enter(cpu); \
+ spin_unlock_wait(&global_irq_lock); \
+} while(0)
+#define irq_exit(cpu, irq) hardirq_exit(cpu)
irq_enter(cpu, 0);
kstat.irqs[cpu][0]++;
@@ -548,6 +569,9 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
timer_tick_interrupt(regs);
irq_exit(cpu, 0);
+
+#undef irq_enter
+#undef irq_exit
}
if(current->pid) {