summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/hardirq.h26
-rw-r--r--include/asm-alpha/smp.h1
-rw-r--r--include/asm-alpha/softirq.h7
-rw-r--r--include/asm-arm/hardirq.h15
-rw-r--r--include/asm-i386/hardirq.h16
-rw-r--r--include/asm-ia64/hardirq.h18
-rw-r--r--include/asm-m68k/hardirq.h21
-rw-r--r--include/asm-m68k/softirq.h10
-rw-r--r--include/asm-m68k/system.h2
-rw-r--r--include/asm-mips/hardirq.h12
-rw-r--r--include/asm-mips/mc146818rtc.h1
-rw-r--r--include/asm-mips64/hardirq.h17
-rw-r--r--include/asm-mips64/processor.h1
-rw-r--r--include/asm-mips64/smp.h1
-rw-r--r--include/asm-ppc/hardirq.h13
-rw-r--r--include/asm-ppc/softirq.h2
-rw-r--r--include/asm-s390/hardirq.h43
-rw-r--r--include/asm-s390/irq.h4
-rw-r--r--include/asm-s390/lowcore.h32
-rw-r--r--include/asm-s390/softirq.h6
-rw-r--r--include/asm-sh/hardirq.h23
-rw-r--r--include/asm-sh/softirq.h6
-rw-r--r--include/asm-sparc/hardirq.h41
-rw-r--r--include/asm-sparc/highmem.h149
-rw-r--r--include/asm-sparc/irq.h6
-rw-r--r--include/asm-sparc/kmap_types.h10
-rw-r--r--include/asm-sparc/pgalloc.h6
-rw-r--r--include/asm-sparc/softirq.h17
-rw-r--r--include/asm-sparc/stat.h2
-rw-r--r--include/asm-sparc/system.h2
-rw-r--r--include/asm-sparc/vaddrs.h16
-rw-r--r--include/asm-sparc64/display7seg.h79
-rw-r--r--include/asm-sparc64/hardirq.h23
-rw-r--r--include/asm-sparc64/smp.h2
-rw-r--r--include/asm-sparc64/softirq.h7
-rw-r--r--include/asm-sparc64/stat.h2
-rw-r--r--include/asm-sparc64/system.h2
-rw-r--r--include/linux/agp_backend.h1
-rw-r--r--include/linux/interrupt.h19
-rw-r--r--include/linux/irq_cpustat.h35
-rw-r--r--include/net/ipip.h21
41 files changed, 516 insertions, 201 deletions
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index 3961ac4a4..66ab89ce4 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -1,20 +1,19 @@
#ifndef _ALPHA_HARDIRQ_H
#define _ALPHA_HARDIRQ_H
-/* Initially just a straight copy of the i386 code. */
-
#include <linux/config.h>
#include <linux/threads.h>
-#ifndef CONFIG_SMP
-extern int __local_irq_count;
-#define local_irq_count(cpu) ((void)(cpu), __local_irq_count)
-extern unsigned long __irq_attempt[];
-#define irq_attempt(cpu, irq) ((void)(cpu), __irq_attempt[irq])
-#else
-#define local_irq_count(cpu) (cpu_data[cpu].irq_count)
-#define irq_attempt(cpu, irq) (cpu_data[cpu].irq_attempt[irq])
-#endif
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
+ unsigned int __local_irq_count;
+ unsigned int __local_bh_count;
+ unsigned int __syscall_count;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
@@ -31,6 +30,9 @@ extern unsigned long __irq_attempt[];
#ifndef CONFIG_SMP
+extern unsigned long __irq_attempt[];
+#define irq_attempt(cpu, irq) ((void)(cpu), __irq_attempt[irq])
+
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) ((void) 0)
@@ -41,6 +43,8 @@ extern unsigned long __irq_attempt[];
#else
+#define irq_attempt(cpu, irq) (cpu_data[cpu].irq_attempt[irq])
+
#include <asm/atomic.h>
#include <linux/spinlock.h>
#include <asm/smp.h>
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 3143b0872..44ec5e8ea 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -33,7 +33,6 @@ struct cpuinfo_alpha {
unsigned long irq_attempt[NR_IRQS];
unsigned long prof_multiplier;
unsigned long prof_counter;
- int irq_count, bh_count;
unsigned char mcheck_expected;
unsigned char mcheck_taken;
unsigned char mcheck_extra;
diff --git a/include/asm-alpha/softirq.h b/include/asm-alpha/softirq.h
index ef77f861e..dd18dc490 100644
--- a/include/asm-alpha/softirq.h
+++ b/include/asm-alpha/softirq.h
@@ -6,13 +6,6 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
-#ifndef CONFIG_SMP
-extern int __local_bh_count;
-#define local_bh_count(cpu) ((void)(cpu), __local_bh_count)
-#else
-#define local_bh_count(cpu) (cpu_data[cpu].bh_count)
-#endif
-
extern inline void cpu_bh_disable(int cpu)
{
local_bh_count(cpu)++;
diff --git a/include/asm-arm/hardirq.h b/include/asm-arm/hardirq.h
index c12ed91e3..550310ffe 100644
--- a/include/asm-arm/hardirq.h
+++ b/include/asm-arm/hardirq.h
@@ -4,11 +4,16 @@
#include <linux/config.h>
#include <linux/threads.h>
-extern unsigned int local_irq_count[NR_CPUS];
-extern unsigned int local_bh_count[NR_CPUS];
-
-#define local_irq_count(cpu) (local_irq_count[(cpu)])
-#define local_bh_count(cpu) (local_bh_count[(cpu)])
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
+ unsigned int __local_irq_count;
+ unsigned int __local_bh_count;
+ unsigned int __syscall_count;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h
index aef182212..3860288ad 100644
--- a/include/asm-i386/hardirq.h
+++ b/include/asm-i386/hardirq.h
@@ -5,21 +5,17 @@
#include <linux/threads.h>
#include <linux/irq.h>
+/* entry.S is sensitive to the offsets of these fields */
typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
unsigned int __local_irq_count;
unsigned int __local_bh_count;
- unsigned int __nmi_counter;
- unsigned int __pad[5];
+ unsigned int __syscall_count;
+ unsigned int __nmi_count; /* arch dependent */
} ____cacheline_aligned irq_cpustat_t;
-extern irq_cpustat_t irq_stat [NR_CPUS];
-
-/*
- * Simple wrappers reducing source bloat
- */
-#define local_irq_count(cpu) (irq_stat[(cpu)].__local_irq_count)
-#define local_bh_count(cpu) (irq_stat[(cpu)].__local_bh_count)
-#define nmi_counter(cpu) (irq_stat[(cpu)].__nmi_counter)
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
diff --git a/include/asm-ia64/hardirq.h b/include/asm-ia64/hardirq.h
index 7b24f385c..38a12be6e 100644
--- a/include/asm-ia64/hardirq.h
+++ b/include/asm-ia64/hardirq.h
@@ -11,23 +11,17 @@
#include <linux/threads.h>
#include <linux/irq.h>
+/* entry.S is sensitive to the offsets of these fields */
typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
unsigned int __local_irq_count;
unsigned int __local_bh_count;
- unsigned int __nmi_counter;
-# if NR_CPUS > 1
- unsigned int __pad[13]; /* this assumes 64-byte cache-lines... */
-# endif
+ unsigned int __syscall_count;
+ unsigned int __nmi_count; /* arch dependent */
} ____cacheline_aligned irq_cpustat_t;
-extern irq_cpustat_t irq_stat[NR_CPUS];
-
-/*
- * Simple wrappers reducing source bloat
- */
-#define local_irq_count(cpu) (irq_stat[(cpu)].__local_irq_count)
-#define local_bh_count(cpu) (irq_stat[(cpu)].__local_bh_count)
-#define nmi_counter(cpu) (irq_stat[(cpu)].__nmi_counter)
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
diff --git a/include/asm-m68k/hardirq.h b/include/asm-m68k/hardirq.h
index aac7e8597..8a1e6a445 100644
--- a/include/asm-m68k/hardirq.h
+++ b/include/asm-m68k/hardirq.h
@@ -3,17 +3,26 @@
#include <linux/threads.h>
-extern unsigned int local_irq_count[NR_CPUS];
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
+ unsigned int __local_irq_count;
+ unsigned int __local_bh_count;
+ unsigned int __syscall_count;
+} ____cacheline_aligned irq_cpustat_t;
-#define in_interrupt() (local_irq_count[smp_processor_id()] + local_bh_count[smp_processor_id()] != 0)
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#define in_irq() (local_irq_count[smp_processor_id()] != 0)
+#define in_interrupt() (local_irq_count(smp_processor_id()) + local_bh_count(smp_processor_id()) != 0)
-#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
+#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+
+#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
-#define irq_enter(cpu) (local_irq_count[cpu]++)
-#define irq_exit(cpu) (local_irq_count[cpu]--)
+#define irq_enter(cpu) (local_irq_count(cpu)++)
+#define irq_exit(cpu) (local_irq_count(cpu)--)
#define synchronize_irq() barrier()
diff --git a/include/asm-m68k/softirq.h b/include/asm-m68k/softirq.h
index a51563cd7..89a3e3971 100644
--- a/include/asm-m68k/softirq.h
+++ b/include/asm-m68k/softirq.h
@@ -7,16 +7,14 @@
#include <asm/atomic.h>
-extern unsigned int local_bh_count[NR_CPUS];
-
-#define local_bh_disable() (local_bh_count[smp_processor_id()]++)
-#define local_bh_enable() (local_bh_count[smp_processor_id()]--)
+#define local_bh_disable() (local_bh_count(smp_processor_id())++)
+#define local_bh_enable() (local_bh_count(smp_processor_id())--)
#define in_softirq() (local_bh_count != 0)
/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (local_bh_count[cpu] ? 0 : (local_bh_count[cpu]=1))
-#define softirq_endlock(cpu) (local_bh_count[cpu] = 0)
+#define softirq_trylock(cpu) (local_bh_count(cpu) ? 0 : (local_bh_count(cpu)=1))
+#define softirq_endlock(cpu) (local_bh_count(cpu) = 0)
#define synchronize_bh() barrier()
#endif
diff --git a/include/asm-m68k/system.h b/include/asm-m68k/system.h
index 64dab98d8..a33bb7c14 100644
--- a/include/asm-m68k/system.h
+++ b/include/asm-m68k/system.h
@@ -52,7 +52,7 @@ asmlinkage void resume(void);
#else
#include <asm/hardirq.h>
#define __sti() ({ \
- if (!local_irq_count[smp_processor_id()]) \
+ if (!local_irq_count(smp_processor_id())) \
asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
})
#endif
diff --git a/include/asm-mips/hardirq.h b/include/asm-mips/hardirq.h
index 4331f1367..eead2fb87 100644
--- a/include/asm-mips/hardirq.h
+++ b/include/asm-mips/hardirq.h
@@ -14,18 +14,16 @@
#include <linux/threads.h>
#include <linux/irq.h>
+/* entry.S is sensitive to the offsets of these fields */
typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
unsigned int __local_irq_count;
unsigned int __local_bh_count;
+ unsigned int __syscall_count;
} ____cacheline_aligned irq_cpustat_t;
-extern irq_cpustat_t irq_stat [NR_CPUS];
-
-/*
- * Simple wrappers reducing source bloat
- */
-#define local_irq_count(cpu) (irq_stat[(cpu)].__local_irq_count)
-#define local_bh_count(cpu) (irq_stat[(cpu)].__local_bh_count)
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
diff --git a/include/asm-mips/mc146818rtc.h b/include/asm-mips/mc146818rtc.h
index bae7fcd1a..f521b7239 100644
--- a/include/asm-mips/mc146818rtc.h
+++ b/include/asm-mips/mc146818rtc.h
@@ -10,6 +10,7 @@
#ifndef _ASM_MC146818RTC_H
#define _ASM_MC146818RTC_H
+#include <linux/config.h>
#include <asm/io.h>
#ifndef RTC_PORT
diff --git a/include/asm-mips64/hardirq.h b/include/asm-mips64/hardirq.h
index 192b61f7b..088fb8853 100644
--- a/include/asm-mips64/hardirq.h
+++ b/include/asm-mips64/hardirq.h
@@ -14,19 +14,16 @@
#include <linux/threads.h>
#include <linux/irq.h>
+/* entry.S is sensitive to the offsets of these fields */
typedef struct {
- unsigned long __local_irq_count;
- unsigned long __local_bh_count;
- unsigned long __pad[14];
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
+ unsigned int __local_irq_count;
+ unsigned int __local_bh_count;
+ unsigned int __syscall_count;
} ____cacheline_aligned irq_cpustat_t;
-extern irq_cpustat_t irq_stat [NR_CPUS];
-
-/*
- * Simple wrappers reducing source bloat
- */
-#define local_irq_count(cpu) (irq_stat[(cpu)].__local_irq_count)
-#define local_bh_count(cpu) (irq_stat[(cpu)].__local_bh_count)
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
diff --git a/include/asm-mips64/processor.h b/include/asm-mips64/processor.h
index 565d6cb88..3e8cab4b5 100644
--- a/include/asm-mips64/processor.h
+++ b/include/asm-mips64/processor.h
@@ -63,7 +63,6 @@ struct cpuinfo_mips {
unsigned long *pte_quick;
unsigned long pgtable_cache_sz;
unsigned long last_asn;
- unsigned int irq_count, bh_count;
unsigned long asid_cache;
#if defined(CONFIG_SGI_IP27)
cpuid_t p_cpuid; /* PROM assigned cpuid */
diff --git a/include/asm-mips64/smp.h b/include/asm-mips64/smp.h
index ed0420dd4..b8a8f8d75 100644
--- a/include/asm-mips64/smp.h
+++ b/include/asm-mips64/smp.h
@@ -20,7 +20,6 @@ struct cpuinfo_mips { /* XXX */
unsigned long smp_local_irq_count;
unsigned long prof_multiplier;
unsigned long prof_counter;
- int irq_count, bh_count;
} __attribute__((aligned(64)));
extern struct cpuinfo_mips cpu_data[NR_CPUS];
diff --git a/include/asm-ppc/hardirq.h b/include/asm-ppc/hardirq.h
index cc7425e24..07398997a 100644
--- a/include/asm-ppc/hardirq.h
+++ b/include/asm-ppc/hardirq.h
@@ -4,19 +4,16 @@
#include <linux/config.h>
#include <asm/smp.h>
+/* entry.S is sensitive to the offsets of these fields */
typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
unsigned int __local_irq_count;
unsigned int __local_bh_count;
- unsigned int __pad[6];
+ unsigned int __syscall_count;
} ____cacheline_aligned irq_cpustat_t;
-extern irq_cpustat_t irq_stat [NR_CPUS];
-
-/*
- * Simple wrappers reducing source bloat
- */
-#define local_irq_count(cpu) (irq_stat[(cpu)].__local_irq_count)
-#define local_bh_count(cpu) (irq_stat[(cpu)].__local_bh_count)
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
diff --git a/include/asm-ppc/softirq.h b/include/asm-ppc/softirq.h
index f23ed416b..4eaed0be6 100644
--- a/include/asm-ppc/softirq.h
+++ b/include/asm-ppc/softirq.h
@@ -4,8 +4,6 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
-extern unsigned int local_bh_count[NR_CPUS];
-
#define local_bh_disable() do { local_bh_count(smp_processor_id())++; barrier(); } while (0)
#define local_bh_enable() do { barrier(); local_bh_count(smp_processor_id())--; } while (0)
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
index c21db1cd7..b32a0684a 100644
--- a/include/asm-s390/hardirq.h
+++ b/include/asm-s390/hardirq.h
@@ -16,21 +16,50 @@
#include <linux/threads.h>
#include <asm/lowcore.h>
#include <linux/sched.h>
+
+/* No irq_cpustat_t for s390, the data is held directly in S390_lowcore */
+
+/*
+ * Simple wrappers reducing source bloat. S390 specific because each
+ * cpu stores its data in S390_lowcore (PSA) instead of using a cache
+ * aligned array element like most architectures.
+ */
+
+#ifdef CONFIG_SMP
+
+#define softirq_active(cpu) (safe_get_cpu_lowcore(cpu).__softirq_active)
+#define softirq_mask(cpu) (safe_get_cpu_lowcore(cpu).__softirq_mask)
+#define local_irq_count(cpu) (safe_get_cpu_lowcore(cpu).__local_irq_count)
+#define local_bh_count(cpu) (safe_get_cpu_lowcore(cpu).__local_bh_count)
+#define syscall_count(cpu) (safe_get_cpu_lowcore(cpu).__syscall_count)
+
+#else /* CONFIG_SMP */
+
+/* Optimize away the cpu calculation, it is always current PSA */
+#define softirq_active(cpu) ((void)(cpu), S390_lowcore.__softirq_active)
+#define softirq_mask(cpu) ((void)(cpu), S390_lowcore.__softirq_mask)
+#define local_irq_count(cpu) ((void)(cpu), S390_lowcore.__local_irq_count)
+#define local_bh_count(cpu) ((void)(cpu), S390_lowcore.__local_bh_count)
+#define syscall_count(cpu) ((void)(cpu), S390_lowcore.__syscall_count)
+
+#endif /* CONFIG_SMP */
+
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
+ * Special definitions for s390, always access current PSA.
*/
-#define in_interrupt() ((atomic_read(&S390_lowcore.local_irq_count) + atomic_read(&S390_lowcore.local_bh_count)) != 0)
+#define in_interrupt() ((S390_lowcore.__local_irq_count + S390_lowcore.__local_bh_count) != 0)
-#define in_irq() (atomic_read(&S390_lowcore.local_irq_count) != 0)
+#define in_irq() (S390_lowcore.__local_irq_count != 0)
#ifndef CONFIG_SMP
-#define hardirq_trylock(cpu) (atomic_read(&S390_lowcore.local_irq_count) == 0)
+#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
-#define hardirq_enter(cpu) (atomic_inc(&S390_lowcore.local_irq_count))
-#define hardirq_exit(cpu) (atomic_dec(&S390_lowcore.local_irq_count))
+#define hardirq_enter(cpu) (local_irq_count(cpu)++)
+#define hardirq_exit(cpu) (local_irq_count(cpu)--)
#define synchronize_irq() do { } while (0)
@@ -54,14 +83,14 @@ static inline void release_irqlock(int cpu)
static inline void hardirq_enter(int cpu)
{
- atomic_inc(&safe_get_cpu_lowcore(cpu).local_irq_count);
+ ++local_irq_count(cpu);
atomic_inc(&global_irq_count);
}
static inline void hardirq_exit(int cpu)
{
atomic_dec(&global_irq_count);
- atomic_dec(&safe_get_cpu_lowcore(cpu).local_irq_count);
+ --local_irq_count(cpu);
}
static inline int hardirq_trylock(int cpu)
diff --git a/include/asm-s390/irq.h b/include/asm-s390/irq.h
index 895c24649..5efcdd40b 100644
--- a/include/asm-s390/irq.h
+++ b/include/asm-s390/irq.h
@@ -734,8 +734,8 @@ static inline void irq_exit(int cpu, unsigned int irq)
#else
-#define irq_enter(cpu, irq) (++local_irq_count[cpu])
-#define irq_exit(cpu, irq) (--local_irq_count[cpu])
+#define irq_enter(cpu, irq) (++local_irq_count(cpu))
+#define irq_exit(cpu, irq) (--local_irq_count(cpu))
#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 22ab31bd8..3430056da 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -41,9 +41,10 @@
#define __LC_SAVE_AREA 0xC00
#define __LC_KERNEL_STACK 0xC40
#define __LC_KERNEL_LEVEL 0xC44
-#define __LC_CPUID 0xC50
-#define __LC_CPUADDR 0xC58
-#define __LC_IPLDEV 0xC6C
+#define __LC_IRQ_STAT 0xC48
+#define __LC_CPUID 0xC60
+#define __LC_CPUADDR 0xC68
+#define __LC_IPLDEV 0xC7C
/* interrupt handler start with all io, external and mcck interrupt disabled */
@@ -145,19 +146,26 @@ struct _lowcore
__u32 save_area[16]; /* 0xc00 */
__u32 kernel_stack; /* 0xc40 */
__u32 kernel_level; /* 0xc44 */
- atomic_t local_bh_count; /* 0xc48 */
- atomic_t local_irq_count; /* 0xc4c */
- struct cpuinfo_S390 cpu_data; /* 0xc50 */
- __u32 ipl_device; /* 0xc6c */
+ /* entry.S sensitive area start */
+ /* Next 6 words are the s390 equivalent of irq_stat */
+ __u32 __softirq_active; /* 0xc48 */
+ __u32 __softirq_mask; /* 0xc4c */
+ __u32 __local_irq_count; /* 0xc50 */
+ __u32 __local_bh_count; /* 0xc54 */
+ __u32 __syscall_count; /* 0xc58 */
+ __u8 pad10[0xc60-0xc5c]; /* 0xc5c */
+ struct cpuinfo_S390 cpu_data; /* 0xc60 */
+ __u32 ipl_device; /* 0xc7c */
+ /* entry.S sensitive area end */
/* SMP info area: defined by DJB */
- __u64 jiffy_timer_cc; /* 0xc70 */
- atomic_t ext_call_fast; /* 0xc78 */
- atomic_t ext_call_queue; /* 0xc7c */
- atomic_t ext_call_count; /* 0xc80 */
+ __u64 jiffy_timer_cc; /* 0xc80 */
+ atomic_t ext_call_fast; /* 0xc88 */
+ atomic_t ext_call_queue; /* 0xc8c */
+ atomic_t ext_call_count; /* 0xc90 */
/* Align SMP info to the top 1k of prefix area */
- __u8 pad10[0x1000-0xc84]; /* 0xc84 */
+ __u8 pad11[0x1000-0xc94]; /* 0xc94 */
} __attribute__((packed)); /* End structure*/
extern __inline__ void set_prefix(__u32 address)
diff --git a/include/asm-s390/softirq.h b/include/asm-s390/softirq.h
index b0ad1dc56..ce1254eba 100644
--- a/include/asm-s390/softirq.h
+++ b/include/asm-s390/softirq.h
@@ -17,13 +17,13 @@
#include <asm/hardirq.h>
#include <asm/lowcore.h>
-#define cpu_bh_disable(cpu) do { atomic_inc(&S390_lowcore.local_bh_count); barrier(); } while (0)
-#define cpu_bh_enable(cpu) do { barrier(); atomic_dec(&S390_lowcore.local_bh_count); } while (0)
+#define cpu_bh_disable(cpu) do { local_bh_count(cpu)++; barrier(); } while (0)
+#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--; } while (0)
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-#define in_softirq() (atomic_read(&S390_lowcore.local_bh_count) != 0)
+#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#endif /* __ASM_SOFTIRQ_H */
diff --git a/include/asm-sh/hardirq.h b/include/asm-sh/hardirq.h
index 592b31d95..cfb53de78 100644
--- a/include/asm-sh/hardirq.h
+++ b/include/asm-sh/hardirq.h
@@ -4,28 +4,33 @@
#include <linux/config.h>
#include <linux/threads.h>
-extern unsigned int __local_irq_count[NR_CPUS];
-extern unsigned int __local_bh_count[NR_CPUS];
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
+ unsigned int __local_irq_count;
+ unsigned int __local_bh_count;
+ unsigned int __syscall_count;
+} ____cacheline_aligned irq_cpustat_t;
-#define local_irq_count(cpu) (__local_irq_count[(cpu)])
-#define local_bh_count(cpu) (__local_bh_count[(cpu)])
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
*/
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
- (__local_irq_count[__cpu] + __local_bh_count[__cpu] != 0); })
+ (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
-#define in_irq() (__local_irq_count[smp_processor_id()] != 0)
+#define in_irq() (local_irq_count(smp_processor_id()) != 0)
#ifndef CONFIG_SMP
-#define hardirq_trylock(cpu) (__local_irq_count[cpu] == 0)
+#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
-#define irq_enter(cpu, irq) (__local_irq_count[cpu]++)
-#define irq_exit(cpu, irq) (__local_irq_count[cpu]--)
+#define irq_enter(cpu, irq) (local_irq_count(cpu)++)
+#define irq_exit(cpu, irq) (local_irq_count(cpu)--)
#define synchronize_irq() barrier()
diff --git a/include/asm-sh/softirq.h b/include/asm-sh/softirq.h
index 44f4e0423..5e539b0ae 100644
--- a/include/asm-sh/softirq.h
+++ b/include/asm-sh/softirq.h
@@ -4,12 +4,12 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
-#define cpu_bh_disable(cpu) do { __local_bh_count[(cpu)]++; barrier(); } while (0)
-#define cpu_bh_enable(cpu) do { barrier(); __local_bh_count[(cpu)]--; } while (0)
+#define cpu_bh_disable(cpu) do { local_bh_count(cpu)++; barrier(); } while (0)
+#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--; } while (0)
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-#define in_softirq() (__local_bh_count[smp_processor_id()] != 0)
+#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#endif /* __ASM_SH_SOFTIRQ_H */
diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h
index 8b7b095ad..cfee071dc 100644
--- a/include/asm-sparc/hardirq.h
+++ b/include/asm-sparc/hardirq.h
@@ -10,26 +10,37 @@
#include <linux/config.h>
#include <linux/threads.h>
-#ifndef CONFIG_SMP
-extern unsigned int __local_irq_count;
-#define local_irq_count(cpu) __local_irq_count
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
+ unsigned int __local_irq_count;
+ unsigned int __local_bh_count;
+ unsigned int __syscall_count;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
*/
-#define in_interrupt() ((__local_irq_count + __local_bh_count) != 0)
+#define in_interrupt() ({ int __cpu = smp_processor_id(); \
+ (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
+
+#define in_irq() ({ int __cpu = smp_processor_id(); \
+ (local_irq_count(__cpu) != 0); })
-#define hardirq_trylock(cpu) ((void)(cpu), __local_irq_count == 0)
+#ifndef CONFIG_SMP
+
+#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { (void)(cpu); } while (0)
-#define hardirq_enter(cpu) (__local_irq_count++)
-#define hardirq_exit(cpu) (__local_irq_count--)
+#define hardirq_enter(cpu) (++local_irq_count(cpu))
+#define hardirq_exit(cpu) (--local_irq_count(cpu))
#define synchronize_irq() barrier()
-#define in_irq() (__local_irq_count != 0)
-
#else
#include <asm/atomic.h>
@@ -37,22 +48,10 @@ extern unsigned int __local_irq_count;
#include <asm/system.h>
#include <asm/smp.h>
-extern unsigned int __local_irq_count[NR_CPUS];
-#define local_irq_count(cpu) __local_irq_count[cpu]
extern unsigned char global_irq_holder;
extern spinlock_t global_irq_lock;
extern atomic_t global_irq_count;
-/*
- * Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
- */
-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
-
-#define in_irq() ({ int __cpu = smp_processor_id(); \
- (local_irq_count(__cpu) != 0); })
-
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
diff --git a/include/asm-sparc/highmem.h b/include/asm-sparc/highmem.h
new file mode 100644
index 000000000..ede2167e1
--- /dev/null
+++ b/include/asm-sparc/highmem.h
@@ -0,0 +1,149 @@
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual adresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/vaddrs.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
+
+/* undef for production */
+#define HIGHMEM_DEBUG 1
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+extern pte_t *pkmap_page_table;
+
+extern void kmap_init(void) __init;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#define LAST_PKMAP 1024
+
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern unsigned long kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+extern inline unsigned long kmap(struct page *page)
+{
+ if (in_interrupt())
+ BUG();
+ if (page < highmem_start_page)
+ return (unsigned long) page_address(page);
+ return kmap_high(page);
+}
+
+extern inline void kunmap(struct page *page)
+{
+ if (in_interrupt())
+ BUG();
+ if (page < highmem_start_page)
+ return;
+ kunmap_high(page);
+}
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+extern inline unsigned long kmap_atomic(struct page *page, enum km_type type)
+{
+ unsigned long idx;
+ unsigned long vaddr;
+
+ if (page < highmem_start_page)
+ return (unsigned long) page_address(page);
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = FIX_KMAP_BEGIN + idx * PAGE_SIZE;
+
+/* XXX Fix - Anton */
+#if 0
+ __flush_cache_one(vaddr);
+#else
+ flush_cache_all();
+#endif
+
+#if HIGHMEM_DEBUG
+ if (!pte_none(*(kmap_pte+idx)))
+ BUG();
+#endif
+ set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
+/* XXX Fix - Anton */
+#if 0
+ __flush_tlb_one(vaddr);
+#else
+ flush_tlb_all();
+#endif
+
+ return vaddr;
+}
+
+extern inline void kunmap_atomic(unsigned long vaddr, enum km_type type)
+{
+#if HIGHMEM_DEBUG
+ unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
+
+#if 0
+ if (vaddr < FIXADDR_START) // FIXME
+ return;
+#endif
+
+ if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE)
+ BUG();
+
+/* XXX Fix - Anton */
+#if 0
+ __flush_cache_one(vaddr);
+#else
+ flush_cache_all();
+#endif
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(kmap_pte+idx);
+/* XXX Fix - Anton */
+#if 0
+ __flush_tlb_one(vaddr);
+#else
+ flush_tlb_all();
+#endif
+#endif
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff --git a/include/asm-sparc/irq.h b/include/asm-sparc/irq.h
index 5859d82e9..4b6dd0dae 100644
--- a/include/asm-sparc/irq.h
+++ b/include/asm-sparc/irq.h
@@ -23,16 +23,14 @@ BTFIXUPDEF_CALL(char *, __irq_itoa, unsigned int)
/* IRQ handler dispatch entry and exit. */
#ifdef CONFIG_SMP
-extern unsigned int __local_irq_count[NR_CPUS];
#define irq_enter(cpu, irq) \
do { hardirq_enter(cpu); \
spin_unlock_wait(&global_irq_lock); \
} while(0)
#define irq_exit(cpu, irq) hardirq_exit(cpu)
#else
-extern unsigned int __local_irq_count;
-#define irq_enter(cpu, irq) (__local_irq_count++)
-#define irq_exit(cpu, irq) (__local_irq_count--)
+#define irq_enter(cpu, irq) (++local_irq_count(cpu))
+#define irq_exit(cpu, irq) (--local_irq_count(cpu))
#endif
/* Dave Redman (djhr@tadpole.co.uk)
diff --git a/include/asm-sparc/kmap_types.h b/include/asm-sparc/kmap_types.h
new file mode 100644
index 000000000..d92d81b20
--- /dev/null
+++ b/include/asm-sparc/kmap_types.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+enum km_type {
+ KM_BOUNCE_READ,
+ KM_BOUNCE_WRITE,
+ KM_TYPE_NR
+};
+
+#endif
diff --git a/include/asm-sparc/pgalloc.h b/include/asm-sparc/pgalloc.h
index 20e9d805e..180709c85 100644
--- a/include/asm-sparc/pgalloc.h
+++ b/include/asm-sparc/pgalloc.h
@@ -1,4 +1,4 @@
-/* $Id: pgalloc.h,v 1.6 2000/07/10 20:56:53 anton Exp $ */
+/* $Id: pgalloc.h,v 1.9 2000/08/01 04:53:58 anton Exp $ */
#ifndef _SPARC_PGALLOC_H
#define _SPARC_PGALLOC_H
@@ -85,7 +85,9 @@ BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
-#define flush_page_to_ram(page) __flush_page_to_ram(page_address(page))
+extern void flush_page_to_ram(struct page *page);
+
+#define flush_dcache_page(page) do { } while (0)
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
diff --git a/include/asm-sparc/softirq.h b/include/asm-sparc/softirq.h
index f35407dc8..100f25a22 100644
--- a/include/asm-sparc/softirq.h
+++ b/include/asm-sparc/softirq.h
@@ -14,26 +14,9 @@
#include <asm/smp.h>
#include <asm/hardirq.h>
-
-#ifdef CONFIG_SMP
-extern unsigned int __local_bh_count[NR_CPUS];
-#define local_bh_count(cpu) __local_bh_count[cpu]
-
#define local_bh_disable() (local_bh_count(smp_processor_id())++)
#define local_bh_enable() (local_bh_count(smp_processor_id())--)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
-#else
-
-extern unsigned int __local_bh_count;
-#define local_bh_count(cpu) __local_bh_count
-
-#define local_bh_disable() (__local_bh_count++)
-#define local_bh_enable() (__local_bh_count--)
-
-#define in_softirq() (__local_bh_count != 0)
-
-#endif /* SMP */
-
#endif /* __SPARC_SOFTIRQ_H */
diff --git a/include/asm-sparc/stat.h b/include/asm-sparc/stat.h
index a70d4df3c..7be8e7092 100644
--- a/include/asm-sparc/stat.h
+++ b/include/asm-sparc/stat.h
@@ -1,4 +1,4 @@
-/* $Id: stat.h,v 1.11 2000/01/16 15:22:53 jj Exp $ */
+/* $Id: stat.h,v 1.12 2000/08/04 05:35:55 davem Exp $ */
#ifndef _SPARC_STAT_H
#define _SPARC_STAT_H
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 4174294ca..0a101cacd 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -1,4 +1,4 @@
-/* $Id: system.h,v 1.82 2000/05/09 17:40:15 davem Exp $ */
+/* $Id: system.h,v 1.83 2000/08/04 05:35:55 davem Exp $ */
#include <linux/config.h>
#ifndef __SPARC_SYSTEM_H
diff --git a/include/asm-sparc/vaddrs.h b/include/asm-sparc/vaddrs.h
index 5cbec8210..704f79003 100644
--- a/include/asm-sparc/vaddrs.h
+++ b/include/asm-sparc/vaddrs.h
@@ -1,4 +1,4 @@
-/* $Id: vaddrs.h,v 1.25 2000/06/05 06:08:46 anton Exp $ */
+/* $Id: vaddrs.h,v 1.26 2000/08/01 04:53:58 anton Exp $ */
#ifndef _SPARC_VADDRS_H
#define _SPARC_VADDRS_H
@@ -12,6 +12,20 @@
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com)
*/
+#define SRMMU_MAXMEM 0x0c000000
+
+#define SRMMU_NOCACHE_VADDR 0xfc000000 /* KERNBASE + SRMMU_MAXMEM */
+/* XXX Make this dynamic based on ram size - Anton */
+#define SRMMU_NOCACHE_NPAGES 256
+#define SRMMU_NOCACHE_SIZE (SRMMU_NOCACHE_NPAGES * PAGE_SIZE)
+#define SRMMU_NOCACHE_END (SRMMU_NOCACHE_VADDR + SRMMU_NOCACHE_SIZE)
+
+#define FIX_KMAP_BEGIN 0xfc100000
+#define FIX_KMAP_END (FIX_KMAP_BEGIN + ((KM_TYPE_NR*NR_CPUS)-1)*PAGE_SIZE)
+
+#define PKMAP_BASE 0xfc140000
+#define PKMAP_BASE_END (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE)
+
#define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */
#define IOBASE_VADDR 0xfe000000
#define IOBASE_END 0xfe300000
diff --git a/include/asm-sparc64/display7seg.h b/include/asm-sparc64/display7seg.h
new file mode 100644
index 000000000..955a35022
--- /dev/null
+++ b/include/asm-sparc64/display7seg.h
@@ -0,0 +1,79 @@
+/* $Id: display7seg.h,v 1.2 2000/08/02 06:22:35 davem Exp $
+ *
+ * display7seg - Driver interface for the 7-segment display
+ * present on Sun Microsystems CP1400 and CP1500
+ *
+ * Copyright (c) 2000 Eric Brower <ebrower@usa.net>
+ *
+ */
+
+#ifndef __display7seg_h__
+#define __display7seg_h__
+
+#define D7S_IOC 'p'
+
+#define D7SIOCRD _IOR(D7S_IOC, 0x45, int) /* Read device state */
+#define D7SIOCWR _IOW(D7S_IOC, 0x46, int) /* Write device state */
+#define D7SIOCTM _IO (D7S_IOC, 0x47) /* Translate mode (FLIP)*/
+
+/*
+ * ioctl flag definitions
+ *
+ * POINT - Toggle decimal point (0=absent 1=present)
+ * ALARM - Toggle alarm LED (0=green 1=red)
+ * FLIP - Toggle inverted mode (0=normal 1=flipped)
+ * bits 0-4 - Character displayed (see definitions below)
+ *
+ * Display segments are defined as follows,
+ * subject to D7S_FLIP register state:
+ *
+ * a
+ * ---
+ * f| |b
+ * -g-
+ * e| |c
+ * ---
+ * d
+ */
+
+#define D7S_POINT (1 << 7) /* Decimal point*/
+#define D7S_ALARM (1 << 6) /* Alarm LED */
+#define D7S_FLIP (1 << 5) /* Flip display */
+
+#define D7S_0 0x00 /* Numerals 0-9 */
+#define D7S_1 0x01
+#define D7S_2 0x02
+#define D7S_3 0x03
+#define D7S_4 0x04
+#define D7S_5 0x05
+#define D7S_6 0x06
+#define D7S_7 0x07
+#define D7S_8 0x08
+#define D7S_9 0x09
+#define D7S_A 0x0A /* Letters A-F, H, L, P */
+#define D7S_B 0x0B
+#define D7S_C 0x0C
+#define D7S_D 0x0D
+#define D7S_E 0x0E
+#define D7S_F 0x0F
+#define D7S_H 0x10
+#define D7S_E2 0x11
+#define D7S_L 0x12
+#define D7S_P 0x13
+#define D7S_SEGA 0x14 /* Individual segments */
+#define D7S_SEGB 0x15
+#define D7S_SEGC 0x16
+#define D7S_SEGD 0x17
+#define D7S_SEGE 0x18
+#define D7S_SEGF 0x19
+#define D7S_SEGG 0x1A
+#define D7S_SEGABFG 0x1B /* Segment groupings */
+#define D7S_SEGCDEG 0x1C
+#define D7S_SEGBCEF 0x1D
+#define D7S_SEGADG 0x1E
+#define D7S_BLANK 0x1F /* Clear all segments */
+
+#define D7S_MIN_VAL 0x0
+#define D7S_MAX_VAL 0x1F
+
+#endif /* ifndef __display7seg_h__ */
diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h
index 090239035..e4007b644 100644
--- a/include/asm-sparc64/hardirq.h
+++ b/include/asm-sparc64/hardirq.h
@@ -11,12 +11,27 @@
#include <linux/brlock.h>
#include <linux/spinlock.h>
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_active;
+ unsigned int __softirq_mask;
#ifndef CONFIG_SMP
-extern unsigned int __local_irq_count;
-#define local_irq_count(cpu) __local_irq_count
-#define irq_enter(cpu, irq) (__local_irq_count++)
-#define irq_exit(cpu, irq) (__local_irq_count--)
+ unsigned int __local_irq_count;
#else
+ unsigned int __unused_on_SMP; /* DaveM says use brlock for SMP irq. KAO */
+#endif
+ unsigned int __local_bh_count;
+ unsigned int __syscall_count;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+/* Note that local_irq_count() is replaced by sparc64 specific version for SMP */
+
+#ifndef CONFIG_SMP
+#define irq_enter(cpu, irq) ((void)(irq), local_irq_count(cpu)++)
+#define irq_exit(cpu, irq) ((void)(irq), local_irq_count(cpu)--)
+#else
+#undef local_irq_count
#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
#define irq_enter(cpu, irq) br_read_lock(BR_GLOBALIRQ_LOCK)
#define irq_exit(cpu, irq) br_read_unlock(BR_GLOBALIRQ_LOCK)
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index c70b95dad..0927fe5cc 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -34,7 +34,7 @@ extern struct prom_cpuinfo linux_cpus[64];
/* Keep this a multiple of 64-bytes for cache reasons. */
struct cpuinfo_sparc {
/* Dcache line 1 */
- unsigned int bh_count;
+ unsigned int __pad0; /* bh_count moved to irq_stat for consistency. KAO */
unsigned int multiplier;
unsigned int counter;
unsigned int idle_volume;
diff --git a/include/asm-sparc64/softirq.h b/include/asm-sparc64/softirq.h
index b224a279a..730da1b30 100644
--- a/include/asm-sparc64/softirq.h
+++ b/include/asm-sparc64/softirq.h
@@ -11,13 +11,6 @@
#include <asm/hardirq.h>
#include <asm/system.h> /* for membar() */
-#ifndef CONFIG_SMP
-extern unsigned int __local_bh_count;
-#define local_bh_count(cpu) __local_bh_count
-#else
-#define local_bh_count(cpu) (cpu_data[cpu].bh_count)
-#endif
-
#define local_bh_disable() (local_bh_count(smp_processor_id())++)
#define local_bh_enable() (local_bh_count(smp_processor_id())--)
diff --git a/include/asm-sparc64/stat.h b/include/asm-sparc64/stat.h
index 4a4fabac0..48ae70dc9 100644
--- a/include/asm-sparc64/stat.h
+++ b/include/asm-sparc64/stat.h
@@ -1,4 +1,4 @@
-/* $Id: stat.h,v 1.6 1999/12/21 14:09:48 jj Exp $ */
+/* $Id: stat.h,v 1.7 2000/08/04 05:35:55 davem Exp $ */
#ifndef _SPARC64_STAT_H
#define _SPARC64_STAT_H
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index cf5cd0b02..803a15a51 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -1,4 +1,4 @@
-/* $Id: system.h,v 1.60 2000/05/29 05:34:02 davem Exp $ */
+/* $Id: system.h,v 1.61 2000/08/04 05:35:55 davem Exp $ */
#ifndef __SPARC64_SYSTEM_H
#define __SPARC64_SYSTEM_H
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 698a6f262..a35094450 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -50,6 +50,7 @@ enum chipset_type {
VIA_VP3,
VIA_MVP3,
VIA_MVP4,
+ VIA_APOLLO_SUPER,
VIA_APOLLO_PRO,
SIS_GENERIC,
AMD_GENERIC,
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 8eb171810..9d214fadc 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -61,20 +61,9 @@ enum
TASKLET_SOFTIRQ
};
-#if SMP_CACHE_BYTES <= 32
-/* It is trick to make assembly easier. */
-#define SOFTIRQ_STATE_PAD 32
-#else
-#define SOFTIRQ_STATE_PAD SMP_CACHE_BYTES
-#endif
-
-struct softirq_state
-{
- __u32 active;
- __u32 mask;
-} __attribute__ ((__aligned__(SOFTIRQ_STATE_PAD)));
-
-extern struct softirq_state softirq_state[NR_CPUS];
+/* softirq mask and active fields moved to irq_cpustat_t in
+ * asm/hardirq.h to get better cache usage. KAO
+ */
struct softirq_action
{
@@ -87,7 +76,7 @@ extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *d
static inline void __cpu_raise_softirq(int cpu, int nr)
{
- softirq_state[cpu].active |= (1<<nr);
+ softirq_active(cpu) |= (1<<nr);
}
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
new file mode 100644
index 000000000..98cf96ff4
--- /dev/null
+++ b/include/linux/irq_cpustat.h
@@ -0,0 +1,35 @@
+#ifndef __irq_cpustat_h
+#define __irq_cpustat_h
+
+/*
+ * Contains default mappings for irq_cpustat_t, used by almost every
+ * architecture. Some arch (like s390) have per cpu hardware pages and
+ * they define their own mappings for irq_stat.
+ *
+ * Keith Owens <kaos@ocs.com.au> July 2000.
+ */
+
+/*
+ * Simple wrappers reducing source bloat. Define all irq_stat fields
+ * here, even ones that are arch dependent. That way we get common
+ * definitions instead of differing sets for each arch.
+ */
+
+extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
+
+#ifdef CONFIG_SMP
+#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
+#else
+#define __IRQ_STAT(cpu, member) ((void)(cpu), irq_stat[0].member)
+#endif
+
+ /* arch independent irq_stat fields */
+#define softirq_active(cpu) __IRQ_STAT((cpu), __softirq_active)
+#define softirq_mask(cpu) __IRQ_STAT((cpu), __softirq_mask)
+#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count)
+#define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count)
+#define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count)
+ /* arch dependent irq_stat fields */
+#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */
+
+#endif /* __irq_cpustat_h */
diff --git a/include/net/ipip.h b/include/net/ipip.h
index b522397f8..398660ca8 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -9,7 +9,7 @@
struct ip_tunnel
{
struct ip_tunnel *next;
- struct net_device *dev;
+ struct net_device *dev;
struct net_device_stats stat;
int recursion; /* Depth of hard_start_xmit recursion */
@@ -25,6 +25,25 @@ struct ip_tunnel
struct ip_tunnel_parm parms;
};
+#define IPTUNNEL_XMIT() do { \
+ int err; \
+ int pkt_len = skb->len; \
+ \
+ iph->tot_len = htons(skb->len); \
+ ip_select_ident(iph, &rt->u.dst); \
+ ip_send_check(iph); \
+ \
+ err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, do_ip_send); \
+ if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) { \
+ stats->tx_bytes += pkt_len; \
+ stats->tx_packets++; \
+ } else { \
+ stats->tx_errors++; \
+ stats->tx_aborted_errors++; \
+ } \
+} while (0)
+
+
extern int ipip_init(void);
extern int ipgre_init(void);
extern int sit_init(void);