summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-08-25 09:12:35 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-08-25 09:12:35 +0000
commitc7fc24dc4420057f103afe8fc64524ebc25c5d37 (patch)
tree3682407a599b8f9f03fc096298134cafba1c9b2f /arch/sparc64/kernel/smp.c
parent1d793fade8b063fde3cf275bf1a5c2d381292cd9 (diff)
o Merge with Linux 2.1.116.
o New Newport console code. o New G364 console code.
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c140
1 files changed, 52 insertions, 88 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index ca2aa4360..3ec32e92b 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -3,7 +3,6 @@
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tasks.h>
@@ -48,7 +47,8 @@ static int smp_activated = 0;
volatile int cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS];
-struct klock_info klock_info = { KLOCK_CLEAR, 0 };
+/* Kernel spinlock */
+spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
__initfunc(void smp_setup(char *str, int *ints))
{
@@ -63,8 +63,7 @@ int smp_info(char *buf)
for (i = 0; i < NR_CPUS; i++)
if(cpu_present_map & (1UL << i))
len += sprintf(buf + len,
- "CPU%d:\t\t%s\n",
- i, klock_info.akp == i ? "akp" : "online");
+ "CPU%d:\t\tonline\n", i
return len;
}
@@ -85,10 +84,9 @@ __initfunc(void smp_store_cpu_info(int id))
{
cpu_data[id].udelay_val = loops_per_sec;
cpu_data[id].irq_count = 0;
- cpu_data[id].last_tlbversion_seen = tlb_context_cache & CTX_VERSION_MASK;
cpu_data[id].pgcache_size = 0;
+ cpu_data[id].pgdcache_size = 0;
cpu_data[id].pgd_cache = NULL;
- cpu_data[id].pmd_cache = NULL;
cpu_data[id].pte_cache = NULL;
}
@@ -164,8 +162,6 @@ void cpu_panic(void)
panic("SMP bolixed\n");
}
-static void smp_tickoffset_init(void);
-
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
extern unsigned long smp_trampoline;
@@ -175,23 +171,8 @@ __initfunc(void smp_boot_cpus(void))
int cpucount = 0, i;
printk("Entering UltraSMPenguin Mode...\n");
- boot_cpu_id = hard_smp_processor_id();
- smp_tickoffset_init();
__sti();
- cpu_present_map = 0;
- for(i = 0; i < linux_num_cpus; i++)
- cpu_present_map |= (1UL << linux_cpus[i].mid);
- for(i = 0; i < NR_CPUS; i++) {
- cpu_number_map[i] = -1;
- __cpu_logical_map[i] = -1;
- }
- cpu_number_map[boot_cpu_id] = 0;
- prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node;
- __cpu_logical_map[0] = boot_cpu_id;
- klock_info.akp = boot_cpu_id;
- current->processor = boot_cpu_id;
smp_store_cpu_info(boot_cpu_id);
- smp_setup_percpu_timer();
if(linux_num_cpus == 1)
return;
@@ -368,16 +349,28 @@ void smp_flush_tlb_all(void)
__flush_tlb_all();
}
+/* We know that the window frames of the user have been flushed
+ * to the stack before we get here because all callers of us
+ * are flush_tlb_*() routines, and these run after flush_cache_*()
+ * which performs the flushw.
+ */
static void smp_cross_call_avoidance(struct mm_struct *mm)
{
+ u32 ctx;
+
spin_lock(&scheduler_lock);
- get_new_mmu_context(mm, &tlb_context_cache);
+ get_new_mmu_context(mm);
mm->cpu_vm_mask = (1UL << smp_processor_id());
- if(segment_eq(current->tss.current_ds,USER_DS)) {
- u32 ctx = mm->context & 0x1fff;
-
- current->tss.ctx = ctx;
- spitfire_set_secondary_context(ctx);
+ current->tss.ctx = ctx = mm->context & 0x3ff;
+ spitfire_set_secondary_context(ctx);
+ __asm__ __volatile__("flush %g6");
+ spitfire_flush_dtlb_secondary_context();
+ spitfire_flush_itlb_secondary_context();
+ __asm__ __volatile__("flush %g6");
+ if(!segment_eq(current->tss.current_ds,USER_DS)) {
+ /* Rarely happens. */
+ current->tss.ctx = 0;
+ spitfire_set_secondary_context(0);
__asm__ __volatile__("flush %g6");
}
spin_unlock(&scheduler_lock);
@@ -385,7 +378,7 @@ static void smp_cross_call_avoidance(struct mm_struct *mm)
void smp_flush_tlb_mm(struct mm_struct *mm)
{
- u32 ctx = mm->context & 0x1fff;
+ u32 ctx = mm->context & 0x3ff;
if(mm == current->mm && mm->count == 1) {
if(mm->cpu_vm_mask == (1UL << smp_processor_id()))
@@ -395,13 +388,13 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
smp_cross_call(&xcall_flush_tlb_mm, ctx, 0, 0);
local_flush_and_out:
- __flush_tlb_mm(ctx);
+ __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
}
void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
- u32 ctx = mm->context & 0x1fff;
+ u32 ctx = mm->context & 0x3ff;
if(mm == current->mm && mm->count == 1) {
if(mm->cpu_vm_mask == (1UL << smp_processor_id()))
@@ -411,12 +404,14 @@ void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
smp_cross_call(&xcall_flush_tlb_range, ctx, start, end);
local_flush_and_out:
- __flush_tlb_range(ctx, start, end);
+ start &= PAGE_MASK;
+ end &= PAGE_MASK;
+ __flush_tlb_range(ctx, start, SECONDARY_CONTEXT, end, PAGE_SIZE, (end-start));
}
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
{
- u32 ctx = mm->context & 0x1fff;
+ u32 ctx = mm->context & 0x3ff;
if(mm == current->mm && mm->count == 1) {
if(mm->cpu_vm_mask == (1UL << smp_processor_id()))
@@ -424,7 +419,7 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
return smp_cross_call_avoidance(mm);
}
#if 0 /* XXX Disabled until further notice... */
- else if(mm != current->mm && mm->count == 1) {
+ else if(mm->count == 1) {
/* Try to handle two special cases to avoid cross calls
* in common scenerios where we are swapping process
* pages out.
@@ -438,7 +433,7 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
smp_cross_call(&xcall_flush_tlb_page, ctx, page, 0);
local_flush_and_out:
- __flush_tlb_page(ctx, page);
+ __flush_tlb_page(ctx, (page & PAGE_MASK), SECONDARY_CONTEXT);
}
/* CPU capture. */
@@ -500,8 +495,7 @@ void smp_penguin_jailcell(void)
static inline void sparc64_do_profile(unsigned long pc)
{
-#ifdef CONFIG_PROFILE
- if(prof_buffer && current->pid) {
+ if (prof_buffer && current->pid) {
extern int _stext;
pc -= (unsigned long) &_stext;
@@ -511,7 +505,6 @@ static inline void sparc64_do_profile(unsigned long pc)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
-#endif
}
static unsigned long current_tick_offset;
@@ -543,8 +536,8 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
do {
if(!user)
sparc64_do_profile(regs->tpc);
- if(!--prof_counter(cpu)) {
-
+ if(!--prof_counter(cpu))
+ {
if (cpu == boot_cpu_id) {
extern void irq_enter(int, int);
extern void irq_exit(int, int);
@@ -563,7 +556,7 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
update_one_process(current, 1, user, !user, cpu);
if(--current->counter < 0) {
current->counter = 0;
- need_resched = 1;
+ current->need_resched = 1;
}
if(user) {
@@ -581,7 +574,6 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
atomic_inc((atomic_t *)inc);
atomic_inc((atomic_t *)inc2);
}
-
prof_counter(cpu) = prof_multiplier(cpu);
}
@@ -600,49 +592,6 @@ __initfunc(static void smp_setup_percpu_timer(void))
prof_counter(cpu) = prof_multiplier(cpu) = 1;
- if (cpu == boot_cpu_id) {
- extern unsigned long tl0_itick;
- extern unsigned long tl0_smp_itick;
- unsigned long flags;
-
- save_flags(flags); cli();
-
- /*
- * Steal TICK_INT interrupts from timer_interrupt().
- */
- __asm__ __volatile__("
- .globl tl0_smp_itick
- b,pt %%xcc, 1f
- nop
-
- tl0_smp_itick:
- rdpr %%pil, %%g2
- wrpr %%g0, 15, %%pil
- b,pt %%xcc, etrap_irq
- rd %%pc, %%g7
- call smp_percpu_timer_interrupt
- add %%sp, %0, %%o0
- b,pt %%xcc, rtrap
- clr %%l6
-
- 1:"
- : /* no outputs */
- : "i" (STACK_BIAS + REGWIN_SZ));
-
- memcpy(&tl0_itick, &tl0_smp_itick, 8 * 4);
-
- __asm__ __volatile__("
- membar #StoreStore
- flush %0 + 0x00
- flush %0 + 0x08
- flush %0 + 0x10
- flush %0 + 0x18"
- : /* no outputs */
- : "r" (&tl0_itick));
-
- restore_flags(flags);
- }
-
__asm__ __volatile__("rd %%tick, %%g1\n\t"
"add %%g1, %0, %%g1\n\t"
"wr %%g1, 0x0, %%tick_cmpr"
@@ -651,12 +600,27 @@ __initfunc(static void smp_setup_percpu_timer(void))
: "g1");
}
-__initfunc(static void smp_tickoffset_init(void))
+__initfunc(void smp_tick_init(void))
{
+ int i;
+
+ boot_cpu_id = hard_smp_processor_id();
current_tick_offset = timer_tick_offset;
+ cpu_present_map = 0;
+ for(i = 0; i < linux_num_cpus; i++)
+ cpu_present_map |= (1UL << linux_cpus[i].mid);
+ for(i = 0; i < NR_CPUS; i++) {
+ cpu_number_map[i] = -1;
+ __cpu_logical_map[i] = -1;
+ }
+ cpu_number_map[boot_cpu_id] = 0;
+ prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node;
+ __cpu_logical_map[0] = boot_cpu_id;
+ current->processor = boot_cpu_id;
+ prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
}
-__initfunc(int setup_profiling_timer(unsigned int multiplier))
+int __init setup_profiling_timer(unsigned int multiplier)
{
unsigned long flags;
int i;