summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/apic.h (renamed from include/asm-i386/i82489.h)18
-rw-r--r--include/asm-i386/atomic.h11
-rw-r--r--include/asm-i386/bigmem.h69
-rw-r--r--include/asm-i386/bugs.h43
-rw-r--r--include/asm-i386/cache.h8
-rw-r--r--include/asm-i386/desc.h104
-rw-r--r--include/asm-i386/dma.h5
-rw-r--r--include/asm-i386/e820.h39
-rw-r--r--include/asm-i386/fixmap.h14
-rw-r--r--include/asm-i386/hardirq.h4
-rw-r--r--include/asm-i386/hw_irq.h223
-rw-r--r--include/asm-i386/ide.h8
-rw-r--r--include/asm-i386/init.h18
-rw-r--r--include/asm-i386/io.h16
-rw-r--r--include/asm-i386/kmap_types.h10
-rw-r--r--include/asm-i386/ldt.h2
-rw-r--r--include/asm-i386/mmu_context.h30
-rw-r--r--include/asm-i386/page.h1
-rw-r--r--include/asm-i386/parport.h59
-rw-r--r--include/asm-i386/pci.h11
-rw-r--r--include/asm-i386/pgtable.h113
-rw-r--r--include/asm-i386/processor.h120
-rw-r--r--include/asm-i386/resource.h4
-rw-r--r--include/asm-i386/semaphore-helper.h94
-rw-r--r--include/asm-i386/semaphore.h43
-rw-r--r--include/asm-i386/serial.h3
-rw-r--r--include/asm-i386/signal.h6
-rw-r--r--include/asm-i386/smp.h14
-rw-r--r--include/asm-i386/smplock.h2
-rw-r--r--include/asm-i386/softirq.h56
-rw-r--r--include/asm-i386/spinlock.h121
-rw-r--r--include/asm-i386/system.h28
-rw-r--r--include/asm-i386/uaccess.h2
-rw-r--r--include/asm-i386/unistd.h3
34 files changed, 811 insertions, 491 deletions
diff --git a/include/asm-i386/i82489.h b/include/asm-i386/apic.h
index 76f580bde..7b035d9a3 100644
--- a/include/asm-i386/i82489.h
+++ b/include/asm-i386/apic.h
@@ -1,10 +1,10 @@
-#ifndef __ASM_I82489_H
-#define __ASM_I82489_H
+#ifndef __ASM_APIC_H
+#define __ASM_APIC_H
/*
- * Offsets for programming the 82489 and Pentium integrated APIC
+ * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
*
- * Alan Cox <Alan.Cox@linux.org>, 1995.
+ * Alan Cox <Alan.Cox@linux.org>, 1995.
*/
#define APIC_PHYS_BASE 0xfee00000 /* IA s/w dev Vol 3, Section 7.4 */
@@ -90,14 +90,6 @@
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
-extern __inline void apic_write(unsigned long reg, unsigned long v)
-{
- *((volatile unsigned long *)(APIC_BASE+reg))=v;
-}
-
-extern __inline unsigned long apic_read(unsigned long reg)
-{
- return *((volatile unsigned long *)(APIC_BASE+reg));
-}
+#define MAX_IO_APICS 8
#endif
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index fd1d75beb..6346f91d1 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -73,6 +73,17 @@ static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
return c != 0;
}
+extern __inline__ int atomic_add_negative(int i, volatile atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ LOCK "addl %2,%0; sets %1"
+ :"=m" (__atomic_fool_gcc(v)), "=qm" (c)
+ :"ir" (i), "m" (__atomic_fool_gcc(v)));
+ return c;
+}
+
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK "andl %0,%1" \
diff --git a/include/asm-i386/bigmem.h b/include/asm-i386/bigmem.h
new file mode 100644
index 000000000..1c5c4cf4b
--- /dev/null
+++ b/include/asm-i386/bigmem.h
@@ -0,0 +1,69 @@
+/*
+ * bigmem.h: virtual kernel memory mappings for big memory
+ *
+ * Used in CONFIG_BIGMEM systems for memory pages which are not
+ * addressable by direct kernel virtual adresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ */
+
+#ifndef _ASM_BIGMEM_H
+#define _ASM_BIGMEM_H
+
+#include <linux/init.h>
+
+#define BIGMEM_DEBUG /* undef for production */
+
+/* declarations for bigmem.c */
+extern unsigned long bigmem_start, bigmem_end;
+extern int nr_free_bigpages;
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+
+extern void kmap_init(void) __init;
+
+/* kmap helper functions necessary to access the bigmem pages in kernel */
+#include <asm/pgtable.h>
+#include <asm/kmap_types.h>
+
+extern inline unsigned long kmap(unsigned long kaddr, enum km_type type)
+{
+ if (__pa(kaddr) < bigmem_start)
+ return kaddr;
+ {
+ enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
+ unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN+idx);
+
+#ifdef BIGMEM_DEBUG
+ if (!pte_none(*(kmap_pte-idx)))
+ {
+ __label__ here;
+ here:
+ printk(KERN_ERR "not null pte on CPU %d from %p\n",
+ smp_processor_id(), &&here);
+ }
+#endif
+ set_pte(kmap_pte-idx, mk_pte(kaddr & PAGE_MASK, kmap_prot));
+ __flush_tlb_one(vaddr);
+
+ return vaddr | (kaddr & ~PAGE_MASK);
+ }
+}
+
+extern inline void kunmap(unsigned long vaddr, enum km_type type)
+{
+#ifdef BIGMEM_DEBUG
+ enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
+ if ((vaddr & PAGE_MASK) == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ {
+ /* force other mappings to Oops if they'll try to access
+ this pte without first remap it */
+ pte_clear(kmap_pte-idx);
+ __flush_tlb_one(vaddr);
+ }
+#endif
+}
+
+#endif /* _ASM_BIGMEM_H */
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 046fd5278..1914385eb 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -23,28 +23,37 @@
#define CONFIG_BUGi386
-__initfunc(static void no_halt(char *s, int *ints))
+static int __init no_halt(char *s)
{
boot_cpu_data.hlt_works_ok = 0;
+ return 1;
}
-__initfunc(static void mca_pentium(char *s, int *ints))
+__setup("no-hlt", no_halt);
+
+static int __init mca_pentium(char *s)
{
mca_pentium_flag = 1;
+ return 1;
}
-__initfunc(static void no_387(char *s, int *ints))
+__setup("mca-pentium", mca_pentium);
+
+static int __init no_387(char *s)
{
boot_cpu_data.hard_math = 0;
write_cr0(0xE | read_cr0());
+ return 1;
}
+__setup("no387", no_387);
+
static char __initdata fpu_error = 0;
-__initfunc(static void copro_timeout(void))
+static void __init copro_timeout(void)
{
fpu_error = 1;
- timer_table[COPRO_TIMER].expires = jiffies+100;
+ timer_table[COPRO_TIMER].expires = jiffies+HZ;
timer_active |= 1<<COPRO_TIMER;
printk(KERN_ERR "387 failed: trying to reset\n");
send_sig(SIGFPE, current, 1);
@@ -55,7 +64,7 @@ __initfunc(static void copro_timeout(void))
static double __initdata x = 4195835.0;
static double __initdata y = 3145727.0;
-__initfunc(static void check_fpu(void))
+static void __init check_fpu(void)
{
unsigned short control_word;
@@ -102,7 +111,7 @@ __initfunc(static void check_fpu(void))
* should get there first..
*/
printk(KERN_INFO "Checking 386/387 coupling... ");
- timer_table[COPRO_TIMER].expires = jiffies+50;
+ timer_table[COPRO_TIMER].expires = jiffies+HZ/2;
timer_table[COPRO_TIMER].fn = copro_timeout;
timer_active |= 1<<COPRO_TIMER;
__asm__("clts ; fninit ; fnstcw %0 ; fwait":"=m" (*&control_word));
@@ -134,7 +143,7 @@ __initfunc(static void check_fpu(void))
printk("Hmm, FPU using exception 16 error reporting with FDIV bug.\n");
}
-__initfunc(static void check_hlt(void))
+static void __init check_hlt(void)
{
printk(KERN_INFO "Checking 'hlt' instruction... ");
if (!boot_cpu_data.hlt_works_ok) {
@@ -150,7 +159,7 @@ __initfunc(static void check_hlt(void))
* machine even from user space.
*/
-__initfunc(static void check_popad(void))
+static void __init check_popad(void)
{
#ifndef CONFIG_X86_POPAD_OK
int res, inp = (int) &res;
@@ -183,7 +192,7 @@ __initfunc(static void check_popad(void))
extern void vide(void);
__asm__(".align 4\nvide: ret");
-__initfunc(static void check_amd_k6(void))
+static void __init check_amd_k6(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86_model == 6 &&
@@ -229,7 +238,7 @@ __initfunc(static void check_amd_k6(void))
extern void trap_init_f00f_bug(void);
-__initfunc(static void check_pentium_f00f(void))
+static void __init check_pentium_f00f(void)
{
/*
* Pentium and Pentium MMX
@@ -271,7 +280,7 @@ static inline int test_cyrix_52div(void)
extern unsigned char Cx86_dir0_msb; /* exported HACK from cyrix_model() */
-__initfunc(static void check_cx686_cpuid(void))
+static void __init check_cx686_cpuid(void)
{
if (boot_cpu_data.cpuid_level == -1 &&
((Cx86_dir0_msb == 5) || (Cx86_dir0_msb == 3))) {
@@ -311,7 +320,7 @@ __initfunc(static void check_cx686_cpuid(void))
extern void calibrate_delay(void) __init;
-__initfunc(static void check_cx686_slop(void))
+static void __init check_cx686_slop(void)
{
if (Cx86_dir0_msb == 3) {
unsigned char ccr3, ccr5;
@@ -339,7 +348,7 @@ __initfunc(static void check_cx686_slop(void))
* PII and PPro exhibit this behavior too, but they have cpuid available.
*/
-__initfunc(static void check_cyrix_cpu(void))
+static void __init check_cyrix_cpu(void)
{
if ((boot_cpu_data.cpuid_level == -1) && (boot_cpu_data.x86 == 4)
&& test_cyrix_52div()) {
@@ -354,7 +363,7 @@ __initfunc(static void check_cyrix_cpu(void))
* enable the workaround for it.
*/
-__initfunc(static void check_cyrix_coma(void))
+static void __init check_cyrix_coma(void)
{
}
@@ -371,7 +380,7 @@ __initfunc(static void check_cyrix_coma(void))
* reads from the APIC.
*/
-__initfunc(static void check_config(void))
+static void __init check_config(void)
{
/*
* We'd better not be a i386 if we're configured to use some
@@ -400,7 +409,7 @@ __initfunc(static void check_config(void))
#endif
}
-__initfunc(static void check_bugs(void))
+static void __init check_bugs(void)
{
check_cyrix_cpu();
identify_cpu(&boot_cpu_data);
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
index cea6c8540..379568491 100644
--- a/include/asm-i386/cache.h
+++ b/include/asm-i386/cache.h
@@ -15,4 +15,12 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#ifdef MODULE
+#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
+#else
+#define __cacheline_aligned \
+ __attribute__((__aligned__(L1_CACHE_BYTES), \
+ __section__(".data.cacheline_aligned")))
+#endif
+
#endif
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index e91580e04..667ad58f3 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -1,6 +1,46 @@
#ifndef __ARCH_DESC_H
#define __ARCH_DESC_H
+#include <asm/ldt.h>
+
+/*
+ * The layout of the GDT under Linux:
+ *
+ * 0 - null
+ * 1 - not used
+ * 2 - kernel code segment
+ * 3 - kernel data segment
+ * 4 - user code segment <-- new cacheline
+ * 5 - user data segment
+ * 6 - not used
+ * 7 - not used
+ * 8 - APM BIOS support <-- new cacheline
+ * 9 - APM BIOS support
+ * 10 - APM BIOS support
+ * 11 - APM BIOS support
+ *
+ * The TSS+LDT descriptors are spread out a bit so that every CPU
+ * has an exclusive cacheline for the per-CPU TSS and LDT:
+ *
+ * 12 - CPU#0 TSS <-- new cacheline
+ * 13 - CPU#0 LDT
+ * 14 - not used
+ * 15 - not used
+ * 16 - CPU#1 TSS <-- new cacheline
+ * 17 - CPU#1 LDT
+ * 18 - not used
+ * 19 - not used
+ * ... NR_CPUS per-CPU TSS+LDT's if on SMP
+ *
+ * Entry into gdt where to find first TSS.
+ */
+#define __FIRST_TSS_ENTRY 12
+#define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY+1)
+
+#define __TSS(n) (((n)<<2) + __FIRST_TSS_ENTRY)
+#define __LDT(n) (((n)<<2) + __FIRST_LDT_ENTRY)
+
+#ifndef __ASSEMBLY__
struct desc_struct {
unsigned long a,b;
};
@@ -16,46 +56,44 @@ struct Xgt_desc_struct {
#define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2))
#define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2))
+#define load_TR(n) __asm__ __volatile__("ltr %%ax"::"a" (__TSS(n)<<3))
+
+#define __load_LDT(n) __asm__ __volatile__("lldt %%ax"::"a" (__LDT(n)<<3))
+
/*
- * Entry into gdt where to find first TSS. GDT layout:
- * 0 - null
- * 1 - not used
- * 2 - kernel code segment
- * 3 - kernel data segment
- * 4 - user code segment
- * 5 - user data segment
- * 6 - not used
- * 7 - not used
- * 8 - APM BIOS support
- * 9 - APM BIOS support
- * 10 - APM BIOS support
- * 11 - APM BIOS support
- * 12 - TSS #0
- * 13 - LDT #0
- * 14 - TSS #1
- * 15 - LDT #1
+ * This is the ldt that every process will get unless we need
+ * something other than this.
*/
-#define FIRST_TSS_ENTRY 12
-#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
-#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
-#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
-#define load_TR(n) __asm__ __volatile__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
-#define load_ldt(n) __asm__ __volatile__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
-#define store_TR(n) \
-__asm__("str %%ax\n\t" \
- "subl %2,%%eax\n\t" \
- "shrl $4,%%eax" \
- :"=a" (n) \
- :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
-
+extern struct desc_struct default_ldt[];
extern void set_intr_gate(unsigned int irq, void * addr);
extern void set_ldt_desc(unsigned int n, void *addr, unsigned int size);
extern void set_tss_desc(unsigned int n, void *addr);
+extern inline void clear_LDT(void)
+{
+ int cpu = smp_processor_id();
+ set_ldt_desc(cpu, &default_ldt[0], 5);
+ __load_LDT(cpu);
+}
+
/*
- * This is the ldt that every process will get unless we need
- * something other than this.
+ * load one particular LDT into the current CPU
*/
-extern struct desc_struct default_ldt;
+extern inline void load_LDT (struct mm_struct *mm)
+{
+ int cpu = smp_processor_id();
+ void *segments = mm->segments;
+ int count = LDT_ENTRIES;
+
+ if (!segments) {
+ segments = &default_ldt[0];
+ count = 5;
+ }
+
+ set_ldt_desc(cpu, segments, count);
+ __load_LDT(cpu);
+}
+
+#endif /* !__ASSEMBLY__ */
#endif
diff --git a/include/asm-i386/dma.h b/include/asm-i386/dma.h
index 12e02b1d0..1bc9899b2 100644
--- a/include/asm-i386/dma.h
+++ b/include/asm-i386/dma.h
@@ -9,8 +9,8 @@
#define _ASM_DMA_H
#include <linux/config.h>
+#include <linux/spinlock.h> /* And spinlocks */
#include <asm/io.h> /* need byte IO */
-#include <asm/spinlock.h> /* And spinlocks */
#include <linux/delay.h>
@@ -289,9 +289,10 @@ extern void free_dma(unsigned int dmanr); /* release it again */
/* From PCI */
-#ifdef CONFIG_PCI_QUIRKS
+#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
+
#endif /* _ASM_DMA_H */
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
new file mode 100644
index 000000000..50b708d91
--- /dev/null
+++ b/include/asm-i386/e820.h
@@ -0,0 +1,39 @@
+/*
+ * structures and definitions for the int 15, ax=e820 memory map
+ * scheme.
+ *
+ * In a nutshell, arch/i386/boot/setup.S populates a scratch table
+ * in the empty_zero_block that contains a list of usable address/size
+ * duples. In arch/i386/kernel/setup.c, this information is
+ * transferred into the e820map, and in arch/i386/mm/init.c, that
+ * new information is used to mark pages reserved or not.
+ *
+ */
+#ifndef __E820_HEADER
+#define __E820_HEADER
+
+#define E820MAP 0x2d0 /* our map */
+#define E820MAX 32 /* number of entries in E820MAP */
+#define E820NR 0x1e8 /* # entries in E820MAP */
+
+#define E820_RAM 1
+#define E820_RESERVED 2
+#define E820_ACPI 3
+
+#define HIGH_MEMORY (1024*1024)
+
+#ifndef __ASSEMBLY__
+
+struct e820map {
+ int nr_map;
+ struct {
+ long long addr; /* start of memory segment */
+ long long size; /* size of memory segment */
+ long type; /* type of memory segment */
+ } map[E820MAX];
+};
+
+extern struct e820map e820;
+#endif/*!__ASSEMBLY__*/
+
+#endif/*__E820_HEADER*/
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index c259a45ee..34c82dbe0 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -6,6 +6,8 @@
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_FIXMAP_H
@@ -13,7 +15,12 @@
#include <linux/config.h>
#include <linux/kernel.h>
+#include <asm/apic.h>
#include <asm/page.h>
+#ifdef CONFIG_BIGMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
/*
* Here we define all the compile-time 'special' virtual
@@ -45,7 +52,8 @@ enum fixed_addresses {
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
#ifdef CONFIG_X86_IO_APIC
- FIX_IO_APIC_BASE,
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
#endif
#ifdef CONFIG_X86_VISWS_APIC
FIX_CO_CPU, /* Cobalt timer */
@@ -53,6 +61,10 @@ enum fixed_addresses {
FIX_LI_PCIA, /* Lithium PCI Bridge A */
FIX_LI_PCIB, /* Lithium PCI Bridge B */
#endif
+#ifdef CONFIG_BIGMEM
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
__end_of_fixed_addresses
};
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h
index 533961343..f96faa806 100644
--- a/include/asm-i386/hardirq.h
+++ b/include/asm-i386/hardirq.h
@@ -1,7 +1,7 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <linux/tasks.h>
+#include <linux/threads.h>
extern unsigned int local_irq_count[NR_CPUS];
@@ -53,7 +53,7 @@ static inline void hardirq_exit(int cpu)
static inline int hardirq_trylock(int cpu)
{
- return !atomic_read(&global_irq_count) && !test_bit(0,&global_irq_lock);
+ return !local_irq_count[cpu] && !test_bit(0,&global_irq_lock);
}
#define hardirq_endlock(cpu) do { } while (0)
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
new file mode 100644
index 000000000..8cce40151
--- /dev/null
+++ b/include/asm-i386/hw_irq.h
@@ -0,0 +1,223 @@
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
+
+/*
+ * linux/include/asm/hw_irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ * moved some of the old arch/i386/kernel/irq.h to here. VY
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <asm/irq.h>
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR 0x20
+
+#define SYSCALL_VECTOR 0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+/*
+ * Special IRQ vectors used by the SMP architecture:
+ *
+ * (some of the following vectors are 'rare', they are merged
+ * into a single vector (FUNCTION_VECTOR) to save vector space.
+ * TLB, reschedule and local APIC vectors are performance-critical.)
+ */
+#define RESCHEDULE_VECTOR 0x30
+#define INVALIDATE_TLB_VECTOR 0x31
+#define STOP_CPU_VECTOR 0x40
+#define LOCAL_TIMER_VECTOR 0x41
+#define CALL_FUNCTION_VECTOR 0x50
+
+/*
+ * First APIC vector available to drivers: (vectors 0x51-0xfe)
+ */
+#define IRQ0_TRAP_VECTOR 0x51
+
+/*
+ * This IRQ should never happen, but we print a message nevertheless.
+ */
+#define SPURIOUS_APIC_VECTOR 0xff
+
+extern int irq_vector[NR_IRQS];
+#define IO_APIC_VECTOR(irq) irq_vector[irq]
+
+extern void init_IRQ_SMP(void);
+
+/*
+ * Various low-level irq details needed by irq.c, process.c,
+ * time.c, io_apic.c and smp.c
+ *
+ * Interrupt entry/exit code at both C and assembly level
+ */
+
+extern void no_action(int cpl, void *dev_id, struct pt_regs *regs);
+extern void mask_irq(unsigned int irq);
+extern void unmask_irq(unsigned int irq);
+extern void disable_8259A_irq(unsigned int irq);
+extern int i8259A_irq_pending(unsigned int irq);
+extern void ack_APIC_irq(void);
+extern void FASTCALL(send_IPI_self(int vector));
+extern void init_VISWS_APIC_irqs(void);
+extern void setup_IO_APIC(void);
+extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
+extern void make_8259A_irq(unsigned int irq);
+extern void send_IPI(int dest, int vector);
+extern void init_pic_mode(void);
+extern void print_IO_APIC(void);
+
+extern unsigned long io_apic_irqs;
+
+extern char _stext, _etext;
+
+#define MAX_IRQ_SOURCES 128
+#define MAX_MP_BUSSES 32
+enum mp_bustype {
+ MP_BUS_ISA,
+ MP_BUS_EISA,
+ MP_BUS_PCI
+};
+extern int mp_bus_id_to_type [MAX_MP_BUSSES];
+extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
+
+
+#ifdef __SMP__
+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
+
+#else
+
+#define IO_APIC_IRQ(x) (0)
+
+#endif
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+#define SAVE_ALL \
+ "cld\n\t" \
+ "pushl %es\n\t" \
+ "pushl %ds\n\t" \
+ "pushl %eax\n\t" \
+ "pushl %ebp\n\t" \
+ "pushl %edi\n\t" \
+ "pushl %esi\n\t" \
+ "pushl %edx\n\t" \
+ "pushl %ecx\n\t" \
+ "pushl %ebx\n\t" \
+ "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
+ "movl %dx,%ds\n\t" \
+ "movl %dx,%es\n\t"
+
+#define IRQ_NAME2(nr) nr##_interrupt(void)
+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
+
+#define GET_CURRENT \
+ "movl %esp, %ebx\n\t" \
+ "andl $-8192, %ebx\n\t"
+
+#ifdef __SMP__
+
+/*
+ * SMP has a few special interrupts for IPI messages
+ */
+
+ /* there is a second layer of macro just to get the symbolic
+ name for the vector evaluated. This change is for RTLinux */
+#define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
+#define XBUILD_SMP_INTERRUPT(x,v)\
+asmlinkage void x(void); \
+asmlinkage void call_##x(void); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(x) ":\n\t" \
+ "pushl $"#v"\n\t" \
+ SAVE_ALL \
+ SYMBOL_NAME_STR(call_##x)":\n\t" \
+ "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \
+ "jmp ret_from_intr\n");
+
+#define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
+#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
+asmlinkage void x(struct pt_regs * regs); \
+asmlinkage void call_##x(void); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(x) ":\n\t" \
+ "pushl $"#v"\n\t" \
+ SAVE_ALL \
+ "movl %esp,%eax\n\t" \
+ "pushl %eax\n\t" \
+ SYMBOL_NAME_STR(call_##x)":\n\t" \
+ "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "jmp ret_from_intr\n");
+
+#endif /* __SMP__ */
+
+#define BUILD_COMMON_IRQ() \
+asmlinkage void call_do_IRQ(void); \
+__asm__( \
+ "\n" __ALIGN_STR"\n" \
+ "common_interrupt:\n\t" \
+ SAVE_ALL \
+ "pushl $ret_from_intr\n\t" \
+ SYMBOL_NAME_STR(call_do_IRQ)":\n\t" \
+ "jmp "SYMBOL_NAME_STR(do_IRQ));
+
+/*
+ * subtle. orig_eax is used by the signal code to distinct between
+ * system calls and interrupted 'random user-space'. Thus we have
+ * to put a negative value into orig_eax here. (the problem is that
+ * both system calls and IRQs want to have small integer numbers in
+ * orig_eax, and the syscall code has won the optimization conflict ;)
+ *
+ * Subtle as a pigs ear. VY
+ */
+
+#define BUILD_IRQ(nr) \
+asmlinkage void IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $"#nr"-256\n\t" \
+ "jmp common_interrupt");
+
+/*
+ * x86 profiling function, SMP safe. We might want to do this in
+ * assembly totally?
+ */
+static inline void x86_do_profile (unsigned long eip)
+{
+ if (prof_buffer && current->pid) {
+ eip -= (unsigned long) &_stext;
+ eip >>= prof_shift;
+ /*
+ * Don't ignore out-of-bounds EIP values silently,
+ * put them into the last histogram slot, so if
+ * present, they will show up as a sharp peak.
+ */
+ if (eip > prof_len-1)
+ eip = prof_len-1;
+ atomic_inc((atomic_t *)&prof_buffer[eip]);
+ }
+}
+
+#ifdef __SMP__ /*more of this file should probably be ifdefed SMP */
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
+ send_IPI_self(IO_APIC_VECTOR(i));
+}
+#else
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+#endif
+
+#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h
index 6876f9f60..1947b62c0 100644
--- a/include/asm-i386/ide.h
+++ b/include/asm-i386/ide.h
@@ -13,8 +13,10 @@
#ifdef __KERNEL__
+#include <linux/config.h>
+
#ifndef MAX_HWIFS
-#define MAX_HWIFS 8
+#define MAX_HWIFS 10
#endif
#define ide__sti() __sti()
@@ -67,7 +69,7 @@ static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port,
static __inline__ void ide_init_default_hwifs(void)
{
-#ifdef __DO_I_NEED_THIS
+#ifndef CONFIG_BLK_DEV_IDEPCI
hw_regs_t hw;
int index;
@@ -76,7 +78,7 @@ static __inline__ void ide_init_default_hwifs(void)
hw.irq = ide_default_irq(ide_default_io_base(index));
ide_register_hw(&hw, NULL);
}
-#endif /* __DO_I_NEED_THIS */
+#endif /* CONFIG_BLK_DEV_IDEPCI */
}
typedef union {
diff --git a/include/asm-i386/init.h b/include/asm-i386/init.h
index 7618c0054..17d215574 100644
--- a/include/asm-i386/init.h
+++ b/include/asm-i386/init.h
@@ -1,17 +1 @@
-#ifndef _I386_INIT_H
-#define _I386_INIT_H
-
-#define __init __attribute__ ((__section__ (".text.init")))
-#define __initdata __attribute__ ((__section__ (".data.init")))
-#define __initfunc(__arginit) \
- __arginit __init; \
- __arginit
-/* For assembly routines */
-#define __INIT .section ".text.init",#alloc,#execinstr
-#define __FINIT .previous
-#define __INITDATA .section ".data.init",#alloc,#write
-
-#define __cacheline_aligned __attribute__ \
- ((__section__ (".data.cacheline_aligned")))
-
-#endif
+#error "<asm/init.h> should never be used - use <linux/init.h> instead"
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 93fd0c1b5..906fca475 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -27,6 +27,7 @@
/*
* Bit simplified and optimized by Jan Hubicka
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
*/
#ifdef SLOW_IO_BY_JUMPING
@@ -98,6 +99,7 @@ __OUTS(l)
#ifdef __KERNEL__
+#include <linux/config.h>
#include <linux/vmalloc.h>
#include <asm/page.h>
@@ -109,12 +111,20 @@ __OUTS(l)
*/
extern inline unsigned long virt_to_phys(volatile void * address)
{
+#ifdef CONFIG_BIGMEM
+ return __pa(address);
+#else
return __io_phys(address);
+#endif
}
extern inline void * phys_to_virt(unsigned long address)
{
+#ifdef CONFIG_BIGMEM
+ return __va(address);
+#else
return __io_virt(address);
+#endif
}
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
@@ -152,10 +162,16 @@ extern void iounmap(void *addr);
#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
#define memset_io(a,b,c) memset(__io_virt(a),(b),(c))
#define memcpy_fromio(a,b,c) memcpy((a),__io_virt(b),(c))
diff --git a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h
new file mode 100644
index 000000000..b3f16e29b
--- /dev/null
+++ b/include/asm-i386/kmap_types.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+enum km_type {
+ KM_READ,
+ KM_WRITE,
+ KM_TYPE_NR,
+};
+
+#endif
diff --git a/include/asm-i386/ldt.h b/include/asm-i386/ldt.h
index 55b75ca39..9d1110f98 100644
--- a/include/asm-i386/ldt.h
+++ b/include/asm-i386/ldt.h
@@ -11,6 +11,7 @@
/* The size of each LDT entry. */
#define LDT_ENTRY_SIZE 8
+#ifndef __ASSEMBLY__
struct modify_ldt_ldt_s {
unsigned int entry_number;
unsigned long base_addr;
@@ -27,4 +28,5 @@ struct modify_ldt_ldt_s {
#define MODIFY_LDT_CONTENTS_STACK 1
#define MODIFY_LDT_CONTENTS_CODE 2
+#endif /* !__ASSEMBLY__ */
#endif
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index e8b812e2f..cd142e995 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -1,13 +1,33 @@
#ifndef __I386_MMU_CONTEXT_H
#define __I386_MMU_CONTEXT_H
+#include <asm/desc.h>
+#include <asm/atomic.h>
+
/*
- * get a new mmu context.. x86's don't know about contexts.
+ * possibly do the LDT unload here?
*/
-#define get_mmu_context(x) do { } while (0)
+#define destroy_context(mm) do { } while(0)
+#define init_new_context(tsk,mm) do { } while (0)
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+{
+
+ if (prev != next) {
+ /*
+ * Re-load LDT if necessary
+ */
+ if (prev->segments != next->segments)
+ load_LDT(next);
+
+ /* Re-load page tables */
+ asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd)));
+ clear_bit(cpu, &prev->cpu_vm_mask);
+ }
+ set_bit(cpu, &next->cpu_vm_mask);
+}
-#define init_new_context(mm) do { } while(0)
-#define destroy_context(mm) do { } while(0)
-#define activate_context(tsk) do { } while(0)
+#define activate_mm(prev, next) \
+ switch_mm((prev),(next),NULL,smp_processor_id())
#endif
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 847dffbd0..2e5006f4a 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -101,6 +101,7 @@ typedef unsigned long pgprot_t;
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
+#define PHYSMAP_NR(addr) ((unsigned long)(addr) >> PAGE_SHIFT)
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/parport.h b/include/asm-i386/parport.h
new file mode 100644
index 000000000..c08ee4686
--- /dev/null
+++ b/include/asm-i386/parport.h
@@ -0,0 +1,59 @@
+/*
+ * parport.h: ia32-specific parport initialisation
+ *
+ * Copyright (C) 1999 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_I386_PARPORT_H
+#define _ASM_I386_PARPORT_H 1
+
+#include <linux/config.h>
+
+/* Maximum number of ports to support. It is useless to set this greater
+ than PARPORT_MAX (in <linux/parport.h>). */
+#define PARPORT_PC_MAX_PORTS 8
+
+/* If parport_cs (PCMCIA) is managing ports for us, we'll need the
+ * probing routines forever; otherwise we can lose them at boot time. */
+#ifdef CONFIG_PARPORT_PC_PCMCIA
+#define __maybe_initdata
+#define __maybe_init
+#else
+#define __maybe_initdata __initdata
+#define __maybe_init __init
+#endif
+
+static int __maybe_init parport_pc_init_pci(int irq, int dma);
+
+static int user_specified __maybe_initdata = 0;
+int __init
+parport_pc_init(int *io, int *io_hi, int *irq, int *dma)
+{
+ int count = 0, i = 0;
+
+ if (io && *io) {
+ /* Only probe the ports we were given. */
+ user_specified = 1;
+ do {
+ if (!*io_hi) *io_hi = 0x400 + *io;
+ if (parport_pc_probe_port(*(io++), *(io_hi++),
+ *(irq++), *(dma++)))
+ count++;
+ } while (*io && (++i < PARPORT_PC_MAX_PORTS));
+ } else {
+ /* Probe all the likely ports. */
+ if (parport_pc_probe_port(0x3bc, 0x7bc, irq[0], dma[0]))
+ count++;
+ if (parport_pc_probe_port(0x378, 0x778, irq[0], dma[0]))
+ count++;
+ if (parport_pc_probe_port(0x278, 0x678, irq[0], dma[0]))
+ count++;
+ count += parport_pc_init_pci (irq[0], dma[0]);
+ }
+
+ return count;
+}
+
+#endif /* !(_ASM_I386_PARPORT_H) */
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
new file mode 100644
index 000000000..d7c59f21b
--- /dev/null
+++ b/include/asm-i386/pci.h
@@ -0,0 +1,11 @@
+#ifndef __i386_PCI_H
+#define __i386_PCI_H
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+#define pcibios_assign_all_busses() 0
+
+#endif /* __i386_PCI_H */
+
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index b4c8d0e99..36303437b 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -15,7 +15,9 @@
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/fixmap.h>
-#include <linux/tasks.h>
+#include <linux/threads.h>
+
+extern pgd_t swapper_pg_dir[1024];
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
@@ -56,21 +58,21 @@ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
static inline void flush_tlb_mm(struct mm_struct *mm)
{
- if (mm == current->mm)
+ if (mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
- if (vma->vm_mm == current->mm)
+ if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
- if (mm == current->mm)
+ if (mm == current->active_mm)
__flush_tlb();
}
@@ -86,79 +88,19 @@ static inline void flush_tlb_range(struct mm_struct *mm,
#define local_flush_tlb() \
__flush_tlb()
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-#define CLEVER_SMP_INVALIDATE
-#ifdef CLEVER_SMP_INVALIDATE
-
-/*
- * Smarter SMP flushing macros.
- * c/o Linus Torvalds.
- *
- * These mean you can really definitely utterly forget about
- * writing to user space from interrupts. (Its not allowed anyway).
- */
-
-static inline void flush_tlb_current_task(void)
-{
- /* just one copy of this mm? */
- if (atomic_read(&current->mm->count) == 1)
- local_flush_tlb(); /* and that's us, so.. */
- else
- smp_flush_tlb();
-}
-
-#define flush_tlb() flush_tlb_current_task()
-
-#define flush_tlb_all() smp_flush_tlb()
+#define flush_tlb() flush_tlb_current_task()
-static inline void flush_tlb_mm(struct mm_struct * mm)
-{
- if (mm == current->mm && atomic_read(&mm->count) == 1)
- local_flush_tlb();
- else
- smp_flush_tlb();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct * vma,
- unsigned long va)
-{
- if (vma->vm_mm == current->mm && atomic_read(&current->mm->count) == 1)
- __flush_tlb_one(va);
- else
- smp_flush_tlb();
-}
-
-static inline void flush_tlb_range(struct mm_struct * mm,
- unsigned long start, unsigned long end)
+static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
{
flush_tlb_mm(mm);
}
-#else
-
-#define flush_tlb() \
- smp_flush_tlb()
-
-#define flush_tlb_all() flush_tlb()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- flush_tlb();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- flush_tlb();
-}
-
-static inline void flush_tlb_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- flush_tlb();
-}
-#endif
#endif
#endif /* !__ASSEMBLY__ */
@@ -302,15 +244,6 @@ extern pte_t * __bad_pagetable(void);
#define PAGE_PTR(address) \
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
-/* to set the page-dir */
-#define SET_PAGE_DIR(tsk,pgdir) \
-do { \
- unsigned long __pgdir = __pa(pgdir); \
- (tsk)->tss.cr3 = __pgdir; \
- if ((tsk) == current) \
- __asm__ __volatile__("movl %0,%%cr3": :"r" (__pgdir)); \
-} while (0)
-
#define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
@@ -401,13 +334,11 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
extern __inline__ pgd_t *get_pgd_slow(void)
{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
if (ret) {
- init = pgd_offset(&init_mm, 0);
- memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
@@ -416,9 +347,9 @@ extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
- if((ret = pgd_quicklist) != NULL) {
+ if ((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
- ret[0] = ret[1];
+ ret[0] = 0;
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pgd_slow();
@@ -481,9 +412,9 @@ extern __inline__ void free_pmd_slow(pmd_t *pmd)
extern void __bad_pte(pmd_t *pmd);
extern void __bad_pte_kernel(pmd_t *pmd);
-#define pte_free_kernel(pte) free_pte_fast(pte)
-#define pte_free(pte) free_pte_fast(pte)
-#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pte_free_kernel(pte) free_pte_slow(pte)
+#define pte_free(pte) free_pte_slow(pte)
+#define pgd_free(pgd) free_pgd_slow(pgd)
#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
@@ -572,8 +503,6 @@ extern inline void set_pgdir(unsigned long address, pgd_t entry)
#endif
}
-extern pgd_t swapper_pg_dir[1024];
-
/*
* The i386 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
@@ -596,4 +525,6 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
#define PageSkip(page) (0)
#define kern_addr_valid(addr) (1)
+#define io_remap_page_range remap_page_range
+
#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index dde35a87c..99b291d40 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -12,6 +12,7 @@
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/types.h>
+#include <linux/threads.h>
/*
* Default implementation of macro that returns current
@@ -95,6 +96,7 @@ struct cpuinfo_x86 {
#define X86_FEATURE_AMD3D 0x80000000
extern struct cpuinfo_x86 boot_cpu_data;
+extern struct tss_struct init_tss[NR_CPUS];
#ifdef __SMP__
extern struct cpuinfo_x86 cpu_data[];
@@ -124,6 +126,48 @@ extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
: "cc");
}
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x0040 /* Machine check enable */
+#define X86_CR4_PGE 0x0080 /* enable global pages */
+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features |= mask;
+ __asm__("movl %%cr4,%%eax\n\t"
+ "orl %0,%%eax\n\t"
+ "movl %%eax,%%cr4\n"
+ : : "irg" (mask)
+ :"ax");
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features &= ~mask;
+ __asm__("movl %%cr4,%%eax\n\t"
+ "andl %0,%%eax\n\t"
+ "movl %%eax,%%cr4\n"
+ : : "irg" (~mask)
+ :"ax");
+}
+
/*
* Cyrix CPU configuration register indexes
*/
@@ -177,6 +221,8 @@ extern unsigned int mca_pentium_flag;
* Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
*/
#define IO_BITMAP_SIZE 32
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
struct i387_hard_struct {
long cwd;
@@ -213,7 +259,7 @@ typedef struct {
unsigned long seg;
} mm_segment_t;
-struct thread_struct {
+struct tss_struct {
unsigned short back_link,__blh;
unsigned long esp0;
unsigned short ss0,__ss0h;
@@ -221,7 +267,7 @@ struct thread_struct {
unsigned short ss1,__ss1h;
unsigned long esp2;
unsigned short ss2,__ss2h;
- unsigned long cr3;
+ unsigned long __cr3;
unsigned long eip;
unsigned long eflags;
unsigned long eax,ecx,edx,ebx;
@@ -238,19 +284,43 @@ struct thread_struct {
unsigned short ldt, __ldth;
unsigned short trace, bitmap;
unsigned long io_bitmap[IO_BITMAP_SIZE+1];
- unsigned long tr;
+ /*
+ * pads the TSS to be cacheline-aligned (size is 0x100)
+ */
+ unsigned long __cacheline_filler[5];
+};
+
+struct thread_struct {
+ unsigned long esp0;
+ unsigned long eip;
+ unsigned long esp;
+ unsigned long fs;
+ unsigned long gs;
+/* Hardware debugging registers */
+ unsigned long debugreg[8]; /* %%db0-7 debug registers */
+/* fault info */
unsigned long cr2, trap_no, error_code;
- mm_segment_t segment;
-/* debug registers */
- long debugreg[8]; /* Hardware debugging registers */
/* floating point info */
- union i387_union i387;
+ union i387_union i387;
/* virtual 86 mode info */
- struct vm86_struct * vm86_info;
- unsigned long screen_bitmap;
- unsigned long v86flags, v86mask, v86mode, saved_esp0;
+ struct vm86_struct * vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags, v86mask, v86mode, saved_esp0;
+/* IO permissions */
+ int ioperm;
+ unsigned long io_bitmap[IO_BITMAP_SIZE+1];
};
+#define INIT_THREAD { \
+ 0, \
+ 0, 0, 0, 0, \
+ { [0 ... 7] = 0 }, /* debugging registers */ \
+ 0, 0, 0, \
+ { { 0, }, }, /* 387 state */ \
+ 0,0,0,0,0,0, \
+ 0,{~0,} /* io permissions */ \
+}
+
#define INIT_MMAP \
{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
@@ -259,19 +329,15 @@ struct thread_struct {
sizeof(init_stack) + (long) &init_stack, /* esp0 */ \
__KERNEL_DS, 0, /* ss0 */ \
0,0,0,0,0,0, /* stack1, stack2 */ \
- (long) &swapper_pg_dir - PAGE_OFFSET, /* cr3 */ \
+ 0, /* cr3 */ \
0,0, /* eip,eflags */ \
0,0,0,0, /* eax,ecx,edx,ebx */ \
0,0,0,0, /* esp,ebp,esi,edi */ \
0,0,0,0,0,0, /* es,cs,ss */ \
0,0,0,0,0,0, /* ds,fs,gs */ \
- _LDT(0),0, /* ldt */ \
- 0, 0x8000, /* tace, bitmap */ \
- {~0, }, /* ioperm */ \
- _TSS(0), 0, 0, 0, (mm_segment_t) { 0 }, /* obsolete */ \
- { 0, }, \
- { { 0, }, }, /* 387 state */ \
- NULL, 0, 0, 0, 0, 0, /* vm86_info */ \
+ __LDT(0),0, /* ldt */ \
+ 0, INVALID_IO_BITMAP_OFFSET, /* tace, bitmap */ \
+ {~0, } /* ioperm */ \
}
#define start_thread(regs, new_eip, new_esp) do { \
@@ -291,10 +357,13 @@ struct mm_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
+/*
+ * create a kernel thread without removing it from tasklists
+ */
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */
-extern void copy_segments(int nr, struct task_struct *p, struct mm_struct * mm);
+extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
extern void release_segments(struct mm_struct * mm);
extern void forget_segments(void);
@@ -302,7 +371,7 @@ extern void forget_segments(void);
* FPU lazy state save handling..
*/
#define save_fpu(tsk) do { \
- asm volatile("fnsave %0\n\tfwait":"=m" (tsk->tss.i387)); \
+ asm volatile("fnsave %0\n\tfwait":"=m" (tsk->thread.i387)); \
tsk->flags &= ~PF_USEDFPU; \
stts(); \
} while (0)
@@ -327,20 +396,11 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
return ((unsigned long *)t->esp)[3];
}
+#define THREAD_SIZE (2*PAGE_SIZE)
extern struct task_struct * alloc_task_struct(void);
extern void free_task_struct(struct task_struct *);
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
-/*
- * Return_address is a replacement for __builtin_return_address(count)
- * which on certain architectures cannot reasonably be implemented in GCC
- * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386).
- * Note that __builtin_return_address(x>=1) is forbidden because the GCC
- * aborts compilation on some CPUs. It's simply not possible to unwind
- * some CPU's stackframes.
- */
-#define return_address() __builtin_return_address(0)
-
#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/resource.h b/include/asm-i386/resource.h
index e7e2d1159..0f4089694 100644
--- a/include/asm-i386/resource.h
+++ b/include/asm-i386/resource.h
@@ -28,8 +28,8 @@
{ _STK_LIM, LONG_MAX }, \
{ 0, LONG_MAX }, \
{ LONG_MAX, LONG_MAX }, \
- { MAX_TASKS_PER_USER, MAX_TASKS_PER_USER }, \
- { NR_OPEN, NR_OPEN }, \
+ { 0, 0 }, \
+ { INR_OPEN, INR_OPEN }, \
{ LONG_MAX, LONG_MAX }, \
{ LONG_MAX, LONG_MAX }, \
}
diff --git a/include/asm-i386/semaphore-helper.h b/include/asm-i386/semaphore-helper.h
deleted file mode 100644
index c8636da0c..000000000
--- a/include/asm-i386/semaphore-helper.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef _I386_SEMAPHORE_HELPER_H
-#define _I386_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- */
-
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * but on the x86 we need an external synchronizer.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (atomic_read(&sem->count) <= 0)
- sem->waking++;
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking <= 0)
- atomic_inc(&sem->count);
- else {
- sem->waking--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index 3298aeb13..3997b2aae 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -17,6 +17,10 @@
* potential and subtle race discovered by Ulrich Schmid
* in down_interruptible(). Since I started to play here I
* also implemented the `trylock' semaphore operation.
+ * 1999-07-02 Artur Skawina <skawina@geocities.com>
+ * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
+ * do this). Changed calling sequences from push/jmp to
+ * traditional call/ret.
*
* If you would like to see an analysis of this implementation, please
* ftp to gcom.com and download the file
@@ -26,12 +30,12 @@
#include <asm/system.h>
#include <asm/atomic.h>
-#include <asm/spinlock.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
struct semaphore {
atomic_t count;
- int waking;
+ int sleepers;
wait_queue_head_t wait;
#if WAITQUEUE_DEBUG
long __magic;
@@ -58,7 +62,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-extern inline void __sema_init (struct semaphore *sem, int val)
+extern inline void sema_init (struct semaphore *sem, int val)
{
/*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
@@ -67,19 +71,12 @@ extern inline void __sema_init (struct semaphore *sem, int val)
* GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
*/
atomic_set(&sem->count, val);
- sem->waking = 0;
+ sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
#if WAITQUEUE_DEBUG
sem->__magic = (int)&sem->__magic;
#endif
}
-#define sema_init(sem,val) \
- do { \
- struct semaphore *__sem = (sem); \
- printk("sema_init called at %s, %d for semaphore 0x%08lx\n", \
- __FILE__, __LINE__, (unsigned long) __sem); \
- __sema_init(__sem, (val)); \
- while(1);
static inline void init_MUTEX (struct semaphore *sem)
{
@@ -119,12 +116,12 @@ extern inline void down(struct semaphore * sem)
#ifdef __SMP__
"lock ; "
#endif
- "decl 0(%0)\n\t"
+ "decl (%0)\n\t" /* --sem->count */
"js 2f\n"
"1:\n"
".section .text.lock,\"ax\"\n"
- "2:\tpushl $1b\n\t"
- "jmp __down_failed\n"
+ "2:\tcall __down_failed\n\t"
+ "jmp 1b\n"
".previous"
:/* no outputs */
:"c" (sem)
@@ -144,13 +141,13 @@ extern inline int down_interruptible(struct semaphore * sem)
#ifdef __SMP__
"lock ; "
#endif
- "decl 0(%1)\n\t"
+ "decl (%1)\n\t" /* --sem->count */
"js 2f\n\t"
"xorl %0,%0\n"
"1:\n"
".section .text.lock,\"ax\"\n"
- "2:\tpushl $1b\n\t"
- "jmp __down_failed_interruptible\n"
+ "2:\tcall __down_failed_interruptible\n\t"
+ "jmp 1b\n"
".previous"
:"=a" (result)
:"c" (sem)
@@ -171,13 +168,13 @@ extern inline int down_trylock(struct semaphore * sem)
#ifdef __SMP__
"lock ; "
#endif
- "decl 0(%1)\n\t"
+ "decl (%1)\n\t" /* --sem->count */
"js 2f\n\t"
"xorl %0,%0\n"
"1:\n"
".section .text.lock,\"ax\"\n"
- "2:\tpushl $1b\n\t"
- "jmp __down_failed_trylock\n"
+ "2:\tcall __down_failed_trylock\n\t"
+ "jmp 1b\n"
".previous"
:"=a" (result)
:"c" (sem)
@@ -201,12 +198,12 @@ extern inline void up(struct semaphore * sem)
#ifdef __SMP__
"lock ; "
#endif
- "incl 0(%0)\n\t"
+ "incl (%0)\n\t" /* ++sem->count */
"jle 2f\n"
"1:\n"
".section .text.lock,\"ax\"\n"
- "2:\tpushl $1b\n\t"
- "jmp __up_wakeup\n"
+ "2:\tcall __up_wakeup\n\t"
+ "jmp 1b\n"
".previous"
:/* no outputs */
:"c" (sem)
diff --git a/include/asm-i386/serial.h b/include/asm-i386/serial.h
index f61901120..33cd30fc9 100644
--- a/include/asm-i386/serial.h
+++ b/include/asm-i386/serial.h
@@ -27,6 +27,9 @@
#define ACCENT_FLAGS 0
#define BOCA_FLAGS 0
#define HUB6_FLAGS 0
+#define RS_TABLE_SIZE 64
+#else
+#define RS_TABLE_SIZE
#endif
/*
diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h
index 030a19b7b..b6823738b 100644
--- a/include/asm-i386/signal.h
+++ b/include/asm-i386/signal.h
@@ -180,12 +180,12 @@ typedef struct sigaltstack {
extern __inline__ void sigaddset(sigset_t *set, int _sig)
{
- __asm__("btsl %1,%0" : "=m"(*set) : "ir"(_sig - 1) : "cc");
+ __asm__("btsl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
}
extern __inline__ void sigdelset(sigset_t *set, int _sig)
{
- __asm__("btrl %1,%0" : "=m"(*set) : "ir"(_sig - 1) : "cc");
+ __asm__("btrl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
}
extern __inline__ int __const_sigismember(sigset_t *set, int _sig)
@@ -198,7 +198,7 @@ extern __inline__ int __gen_sigismember(sigset_t *set, int _sig)
{
int ret;
__asm__("btl %2,%1\n\tsbbl %0,%0"
- : "=r"(ret) : "m"(*set), "ir"(_sig-1) : "cc");
+ : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
return ret;
}
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index ec24476ae..91199de7f 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -8,7 +8,7 @@
#ifdef CONFIG_X86_LOCAL_APIC
#ifndef ASSEMBLY
#include <asm/fixmap.h>
-#include <asm/i82489.h>
+#include <asm/apic.h>
#include <asm/bitops.h>
#endif
#endif
@@ -16,7 +16,7 @@
#ifdef __SMP__
#ifndef ASSEMBLY
-#include <linux/tasks.h>
+#include <linux/threads.h>
#include <linux/ptrace.h>
/*
@@ -186,6 +186,16 @@ extern inline int cpu_logical_map(int cpu)
return __cpu_logical_map[cpu];
}
+extern __inline void apic_write(unsigned long reg, unsigned long v)
+{
+ *((volatile unsigned long *)(APIC_BASE+reg))=v;
+}
+
+extern __inline unsigned long apic_read(unsigned long reg)
+{
+ return *((volatile unsigned long *)(APIC_BASE+reg));
+}
+
/*
* General functions that each host system must provide.
diff --git a/include/asm-i386/smplock.h b/include/asm-i386/smplock.h
index 4d44a2919..152c1a9fa 100644
--- a/include/asm-i386/smplock.h
+++ b/include/asm-i386/smplock.h
@@ -4,7 +4,7 @@
* i386 SMP lock implementation
*/
#include <linux/interrupt.h>
-#include <asm/spinlock.h>
+#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h
index b8f33d7af..6eb68524a 100644
--- a/include/asm-i386/softirq.h
+++ b/include/asm-i386/softirq.h
@@ -18,24 +18,7 @@ extern unsigned int local_bh_count[NR_CPUS];
#define get_active_bhs() (bh_mask & bh_active)
#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- mb();
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
+extern spinlock_t i386_bh_lock;
#ifdef __SMP__
@@ -101,21 +84,58 @@ extern inline void end_bh_atomic(void)
#endif /* SMP */
+extern inline void init_bh(int nr, void (*routine)(void))
+{
+ unsigned long flags;
+
+ bh_base[nr] = routine;
+ atomic_set(&bh_mask_count[nr], 0);
+
+ spin_lock_irqsave(&i386_bh_lock, flags);
+ bh_mask |= 1 << nr;
+ spin_unlock_irqrestore(&i386_bh_lock, flags);
+}
+
+extern inline void remove_bh(int nr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i386_bh_lock, flags);
+ bh_mask &= ~(1 << nr);
+ spin_unlock_irqrestore(&i386_bh_lock, flags);
+
+ synchronize_bh();
+ bh_base[nr] = NULL;
+}
+
+extern inline void mark_bh(int nr)
+{
+ set_bit(nr, &bh_active);
+}
+
/*
* These use a mask count to correctly handle
* nested disable/enable calls
*/
extern inline void disable_bh(int nr)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i386_bh_lock, flags);
bh_mask &= ~(1 << nr);
atomic_inc(&bh_mask_count[nr]);
+ spin_unlock_irqrestore(&i386_bh_lock, flags);
synchronize_bh();
}
extern inline void enable_bh(int nr)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i386_bh_lock, flags);
if (atomic_dec_and_test(&bh_mask_count[nr]))
bh_mask |= 1 << nr;
+ spin_unlock_irqrestore(&i386_bh_lock, flags);
}
#endif /* __ASM_SOFTIRQ_H */
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index d3e9fc744..a10ed9c5c 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -2,122 +2,7 @@
#define __ASM_SPINLOCK_H
/*
- * These are the generic versions of the spinlocks
- * and read-write locks.. We should actually do a
- * <linux/spinlock.h> with all of this. Oh, well.
- */
-#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0)
-#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0)
-#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0)
-
-#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0)
-#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0)
-#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0)
-
-#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0)
-#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
-#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
-
-#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0)
-#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0)
-#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
-
-#define read_unlock_irqrestore(lock, flags) do { read_unlock(lock); local_irq_restore(flags); } while (0)
-#define read_unlock_irq(lock) do { read_unlock(lock); local_irq_enable(); } while (0)
-#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
-
-#define write_unlock_irqrestore(lock, flags) do { write_unlock(lock); local_irq_restore(flags); } while (0)
-#define write_unlock_irq(lock) do { write_unlock(lock); local_irq_enable(); } while (0)
-#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
-
-#ifndef __SMP__
-
-#define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
-
-#if (DEBUG_SPINLOCKS < 1)
-
-/*
- * Your basic spinlocks, allowing only a single CPU anywhere
- *
- * Gcc-2.7.x has a nasty bug with empty initializers.
- */
-#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
- typedef struct { } spinlock_t;
- #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
-#else
- typedef struct { int gcc_is_buggy; } spinlock_t;
- #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
-#endif
-
-#define spin_lock_init(lock) do { } while(0)
-#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
-#define spin_trylock(lock) (1)
-#define spin_unlock_wait(lock) do { } while(0)
-#define spin_unlock(lock) do { } while(0)
-
-#elif (DEBUG_SPINLOCKS < 2)
-
-typedef struct {
- volatile unsigned int lock;
-} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
-
-#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
-#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
-
-#define spin_lock(x) do { (x)->lock = 1; } while (0)
-#define spin_unlock_wait(x) do { } while (0)
-#define spin_unlock(x) do { (x)->lock = 0; } while (0)
-
-#else /* (DEBUG_SPINLOCKS >= 2) */
-
-typedef struct {
- volatile unsigned int lock;
- volatile unsigned int babble;
- const char *module;
-} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ }
-
-#include <linux/kernel.h>
-
-#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
-#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
-
-#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
-#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
-#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
-
-#endif /* DEBUG_SPINLOCKS */
-
-/*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts
- * but no interrupt writers. For those circumstances we
- * can "mix" irq-safe locks - any writer needs to get a
- * irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- *
- * Gcc-2.7.x has a nasty bug with empty initializers.
- */
-#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
- typedef struct { } rwlock_t;
- #define RW_LOCK_UNLOCKED (rwlock_t) { }
-#else
- typedef struct { int gcc_is_buggy; } rwlock_t;
- #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-#endif
-
-#define read_lock(lock) (void)(lock) /* Not "unused variable". */
-#define read_unlock(lock) do { } while(0)
-#define write_lock(lock) (void)(lock) /* Not "unused variable". */
-#define write_unlock(lock) do { } while(0)
-
-#else /* __SMP__ */
-
-/*
- * Your basic spinlocks, allowing only a single CPU anywhere
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
typedef struct {
@@ -177,10 +62,9 @@ __asm__ __volatile__( \
*/
typedef struct {
volatile unsigned int lock;
- unsigned long previous;
} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
/*
* On x86, we implement read-write locks as a 32-bit counter
@@ -221,5 +105,4 @@ typedef struct {
#define write_unlock(rw) \
asm volatile("lock ; btrl $31,%0":"=m" (__dummy_lock(&(rw)->lock)))
-#endif /* __SMP__ */
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 147ad6da4..f2ccae723 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -9,6 +9,7 @@
struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
@@ -22,9 +23,9 @@ extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *n
"popl %%ebp\n\t" \
"popl %%edi\n\t" \
"popl %%esi\n\t" \
- :"=m" (prev->tss.esp),"=m" (prev->tss.eip), \
+ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
"=b" (last) \
- :"m" (next->tss.esp),"m" (next->tss.eip), \
+ :"m" (next->thread.esp),"m" (next->thread.eip), \
"a" (prev), "d" (next), \
"b" (prev)); \
} while (0)
@@ -129,24 +130,26 @@ struct __xchg_dummy { unsigned long a[100]; };
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
*/
-static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 1:
- __asm__("xchgb %b0,%1"
+ __asm__ __volatile__("xchgb %b0,%1"
:"=q" (x)
:"m" (*__xg(ptr)), "0" (x)
:"memory");
break;
case 2:
- __asm__("xchgw %w0,%1"
+ __asm__ __volatile__("xchgw %w0,%1"
:"=r" (x)
:"m" (*__xg(ptr)), "0" (x)
:"memory");
break;
case 4:
- __asm__("xchgl %0,%1"
+ __asm__ __volatile__("xchgl %0,%1"
:"=r" (x)
:"m" (*__xg(ptr)), "0" (x)
:"memory");
@@ -172,14 +175,15 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory")
+#define set_rmb(var, value) do { xchg(&var, value); } while (0)
+#define set_mb(var, value) set_rmb(var, value)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
/* interrupt control.. */
-#define __sti() __asm__ __volatile__ ("sti": : :"memory")
-#define __cli() __asm__ __volatile__ ("cli": : :"memory")
-#define __save_flags(x) \
-__asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
-#define __restore_flags(x) \
-__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
+#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
+#define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
+#define __cli() __asm__ __volatile__("cli": : :"memory")
+#define __sti() __asm__ __volatile__("sti": : :"memory")
/* For spinlocks etc */
#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index a44ca6c78..2b1b3d7f4 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -42,7 +42,7 @@ extern int __verify_write(const void *, unsigned long);
unsigned long flag,sum; \
asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
:"=&r" (flag), "=r" (sum) \
- :"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \
+ :"1" (addr),"g" ((int)(size)),"g" (current->addr_limit.seg)); \
flag; })
#ifdef CONFIG_X86_WP_WORKS_OK
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index c961fdd2e..9cfc401fa 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -196,7 +196,7 @@
#define __NR_putpmsg 189 /* some people actually want streams */
#define __NR_vfork 190
-/* user-visible error numbers are in the range -1 - -122: see <asm-i386/errno.h> */
+/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
#define __syscall_return(type, res) \
do { \
@@ -287,7 +287,6 @@ __syscall_return(type,__res); \
* some others too.
*/
#define __NR__exit __NR_exit
-static inline _syscall0(int,idle)
static inline _syscall0(int,pause)
static inline _syscall0(int,sync)
static inline _syscall0(pid_t,setsid)