summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKanoj Sarcar <kanoj@engr.sgi.com>2000-04-17 15:54:31 +0000
committerKanoj Sarcar <kanoj@engr.sgi.com>2000-04-17 15:54:31 +0000
commit0414d77d4b2ac65f2ed6be694821bbff6453ad5a (patch)
tree3d51eeca750ed28e2aa26c8efc4e1d598dfe2a4a /arch
parent716ec6321bcc060b053d8c27325ba84377259d9f (diff)
Intercpu interrupt changes: add in dedicated intr levels for reschedule
and call function interrupts (tlbflush for later).
Diffstat (limited to 'arch')
-rw-r--r--arch/mips64/kernel/smp.c97
-rw-r--r--arch/mips64/sgi-ip27/ip27-irq.c33
2 files changed, 106 insertions, 24 deletions
diff --git a/arch/mips64/kernel/smp.c b/arch/mips64/kernel/smp.c
index 2ed180820..963d57379 100644
--- a/arch/mips64/kernel/smp.c
+++ b/arch/mips64/kernel/smp.c
@@ -10,6 +10,7 @@
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/hardirq.h>
+#include <asm/softirq.h>
#ifdef CONFIG_SGI_IP27
@@ -17,29 +18,32 @@
#include <asm/sn/intr.h>
#include <asm/sn/addrs.h>
#include <asm/sn/agent.h>
+#include <asm/sn/sn0/ip27.h>
-#define DOACTION 0xab
+#define DORESCHED 0xab
+#define DOCALL 0xbc
+
+#define IRQ_TO_SWLEVEL(i) i + 7 /* Delete this from here */
static void sendintr(int destid, unsigned char status)
{
- int level;
+ int irq;
#if (CPUS_PER_NODE == 2)
- /*
- * CPU slice A gets level CPU_ACTION_A
- * CPU slice B gets level CPU_ACTION_B
- */
- if (status == DOACTION)
- level = CPU_ACTION_A + cputoslice(destid);
- else /* DOTLBACTION */
- level = N_INTPEND_BITS + TLB_INTR_A + cputoslice(destid);
+ switch (status) {
+ case DORESCHED: irq = CPU_RESCHED_A_IRQ; break;
+ case DOCALL: irq = CPU_CALL_A_IRQ; break;
+ default: panic("sendintr");
+ }
+ irq += cputoslice(destid);
/*
* Convert the compact hub number to the NASID to get the correct
* part of the address space. Then set the interrupt bit associated
* with the CPU we want to send the interrupt to.
*/
- REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cputocnode(destid)), level);
+ REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cputocnode(destid)),
+ IRQ_TO_SWLEVEL(irq));
#else
<< Bomb! Must redefine this for more than 2 CPUS. >>
#endif
@@ -63,6 +67,8 @@ static void smp_tune_scheduling (void)
void __init smp_boot_cpus(void)
{
+ extern void allowboot(void);
+
global_irq_holder = 0;
current->processor = 0;
init_idle();
@@ -98,12 +104,13 @@ void smp_send_stop(void)
*/
void smp_send_reschedule(int cpu)
{
- panic("smp_send_reschedule\n");
+ sendintr(cpu, DORESCHED);
}
/* Not really SMP stuff ... */
int setup_profiling_timer(unsigned int multiplier)
{
+ return 0;
}
/*
@@ -117,12 +124,70 @@ int setup_profiling_timer(unsigned int multiplier)
* Does not return until remote CPUs are nearly ready to execute <func>
* or are or have executed.
*/
-int
-smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+static volatile struct call_data_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t started;
+ atomic_t finished;
+ int wait;
+} *call_data = NULL;
+
+int smp_call_function (void (*func) (void *info), void *info, int retry,
+ int wait)
+{
+ struct call_data_struct data;
+ int i, cpus = smp_num_cpus-1;
+ static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+
+ if (cpus == 0)
+ return 0;
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ spin_lock_bh(&lock);
+ call_data = &data;
+ /* Send a message to all other CPUs and wait for them to respond */
+ for (i = 0; i < smp_num_cpus; i++)
+ if (smp_processor_id() != i)
+ sendintr(i, DOCALL);
+
+ /* Wait for response */
+ /* FIXME: lock-up detection, backtrace on lock-up */
+ while (atomic_read(&data.started) != cpus)
+ barrier();
+
+ if (wait)
+ while (atomic_read(&data.finished) != cpus)
+ barrier();
+ spin_unlock_bh(&lock);
+ return 0;
+}
+
+void smp_call_function_interrupt(void)
{
- /* XXX - kinda important ;-) */
- panic("smp_call_function\n");
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+ int wait = call_data->wait;
+
+ /*
+ * Notify initiating CPU that I've grabbed the data and am
+ * about to execute the function.
+ */
+ atomic_inc(&call_data->started);
+
+ /*
+ * At this point the info structure may be out of scope unless wait==1.
+ */
+ (*func)(info);
+ if (wait)
+ atomic_inc(&call_data->finished);
}
+
void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
unsigned long va)
diff --git a/arch/mips64/sgi-ip27/ip27-irq.c b/arch/mips64/sgi-ip27/ip27-irq.c
index d58e0c6c9..2e91e9bf7 100644
--- a/arch/mips64/sgi-ip27/ip27-irq.c
+++ b/arch/mips64/sgi-ip27/ip27-irq.c
@@ -198,9 +198,11 @@ static unsigned int bridge_startup(unsigned int irq)
case IOC3_ETH_INT: pin = 2; break;
case SCSI1_INT: pin = 1; break;
case SCSI0_INT: pin = 0; break;
- case SWLEVEL_TO_IRQ(CPU_ACTION_A):
- case SWLEVEL_TO_IRQ(CPU_ACTION_B):
- return;
+ case CPU_RESCHED_A_IRQ:
+ case CPU_RESCHED_B_IRQ:
+ case CPU_CALL_A_IRQ:
+ case CPU_CALL_B_IRQ:
+ return 0;
default: panic("bridge_startup: whoops? %d\n", irq);
}
@@ -258,6 +260,11 @@ static unsigned int bridge_shutdown(unsigned int irq)
case IOC3_ETH_INT: pin = 2; break;
case SCSI1_INT: pin = 1; break;
case SCSI0_INT: pin = 0; break;
+ case CPU_RESCHED_A_IRQ:
+ case CPU_RESCHED_B_IRQ:
+ case CPU_CALL_A_IRQ:
+ case CPU_CALL_B_IRQ:
+ return 0;
default: panic("bridge_startup: whoops?");
}
@@ -677,18 +684,28 @@ int intr_disconnect_level(cpuid_t cpu, int bit)
}
+void handle_resched_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* Nothing, the return from intr will work for us */
+}
+
void handle_cpuintr(int irq, void *dev_id, struct pt_regs *regs)
{
- printk("HANDLE_CPUINTR: cpu%d irq%d\n", smp_processor_id(), irq);
}
void install_cpuintr(cpuid_t cpu)
{
- int intr_bit = CPU_ACTION_A + cputoslice(cpu);
+ int irq;
+ extern void smp_call_function_interrupt(void);
- intr_connect_level(cpu, intr_bit);
- if (request_irq(SWLEVEL_TO_IRQ(intr_bit), handle_cpuintr, 0,
- "intercpu", 0))
+ irq = CPU_RESCHED_A_IRQ + cputoslice(cpu);
+ intr_connect_level(cpu, IRQ_TO_SWLEVEL(irq));
+ if (request_irq(irq, handle_resched_intr, SA_SHIRQ, "resched", 0))
+ panic("intercpu intr unconnectible\n");
+ irq = CPU_CALL_A_IRQ + cputoslice(cpu);
+ intr_connect_level(cpu, IRQ_TO_SWLEVEL(irq));
+ if (request_irq(irq, smp_call_function_interrupt, SA_SHIRQ,
+ "callfunc", 0))
panic("intercpu intr unconnectible\n");
}