summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-08-25 09:12:35 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-08-25 09:12:35 +0000
commitc7fc24dc4420057f103afe8fc64524ebc25c5d37 (patch)
tree3682407a599b8f9f03fc096298134cafba1c9b2f /include/asm-i386
parent1d793fade8b063fde3cf275bf1a5c2d381292cd9 (diff)
o Merge with Linux 2.1.116.
o New Newport console code. o New G364 console code.
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/bitops.h30
-rw-r--r--include/asm-i386/bugs.h122
-rw-r--r--include/asm-i386/current.h17
-rw-r--r--include/asm-i386/debugreg.h64
-rw-r--r--include/asm-i386/elf.h2
-rw-r--r--include/asm-i386/fixmap.h82
-rw-r--r--include/asm-i386/hardirq.h7
-rw-r--r--include/asm-i386/ioctls.h2
-rw-r--r--include/asm-i386/irq.h13
-rw-r--r--include/asm-i386/linux_logo.h47
-rw-r--r--include/asm-i386/math_emu.h4
-rw-r--r--include/asm-i386/md.h13
-rw-r--r--include/asm-i386/pgtable.h213
-rw-r--r--include/asm-i386/posix_types.h4
-rw-r--r--include/asm-i386/processor.h138
-rw-r--r--include/asm-i386/siginfo.h1
-rw-r--r--include/asm-i386/signal.h11
-rw-r--r--include/asm-i386/smp.h20
-rw-r--r--include/asm-i386/smp_lock.h91
-rw-r--r--include/asm-i386/smplock.h59
-rw-r--r--include/asm-i386/softirq.h2
-rw-r--r--include/asm-i386/spinlock.h39
-rw-r--r--include/asm-i386/string.h2
-rw-r--r--include/asm-i386/system.h138
-rw-r--r--include/asm-i386/termios.h1
-rw-r--r--include/asm-i386/timex.h15
-rw-r--r--include/asm-i386/unistd.h8
-rw-r--r--include/asm-i386/user.h2
-rw-r--r--include/asm-i386/vga.h20
-rw-r--r--include/asm-i386/vm86.h4
30 files changed, 835 insertions, 336 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 3b31d4f57..00dd9dcc8 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -189,6 +189,36 @@ extern __inline__ unsigned long ffz(unsigned long word)
#ifdef __KERNEL__
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+extern __inline__ int ffs(int x)
+{
+ int r;
+
+ __asm__("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "g" (x));
+ return r+1;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
+#ifdef __KERNEL__
+
#define ext2_set_bit test_and_set_bit
#define ext2_clear_bit test_and_clear_bit
#define ext2_test_bit test_bit
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index af144354f..ba3a969ae 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -2,6 +2,11 @@
* include/asm-i386/bugs.h
*
* Copyright (C) 1994 Linus Torvalds
+ *
+ * Cyrix stuff, June 1998 by:
+ * - Rafael R. Reilova (moved everything from head.S),
+ * - Channing Corn (tests & fixes),
+ * - Andrew D. Balsa (code cleanup).
*/
/*
@@ -14,10 +19,6 @@
#include <linux/config.h>
#include <asm/processor.h>
-#ifdef CONFIG_MTRR
-# include <asm/mtrr.h>
-#endif
-
#define CONFIG_BUGi386
__initfunc(static void no_halt(char *s, int *ints))
@@ -41,7 +42,7 @@ __initfunc(static void copro_timeout(void))
timer_table[COPRO_TIMER].expires = jiffies+100;
timer_active |= 1<<COPRO_TIMER;
printk(KERN_ERR "387 failed: trying to reset\n");
- send_sig(SIGFPE, last_task_used_math, 1);
+ send_sig(SIGFPE, current, 1);
outb_p(0,0xf1);
outb_p(0,0xf0);
}
@@ -83,7 +84,7 @@ __initfunc(static void check_fpu(void))
if (fpu_error)
return;
if (!ignore_irq13) {
- printk("Ok, fpu using old IRQ13 error reporting\n");
+ printk("OK, FPU using old IRQ 13 error reporting\n");
return;
}
__asm__("fninit\n\t"
@@ -98,9 +99,9 @@ __initfunc(static void check_fpu(void))
: "=m" (*&boot_cpu_data.fdiv_bug)
: "m" (*&x), "m" (*&y));
if (!boot_cpu_data.fdiv_bug)
- printk("Ok, fpu using exception 16 error reporting.\n");
+ printk("OK, FPU using exception 16 error reporting.\n");
else
- printk("Hmm, fpu using exception 16 error reporting with FDIV bug.\n");
+ printk("Hmm, FPU using exception 16 error reporting with FDIV bug.\n");
}
__initfunc(static void check_hlt(void))
@@ -111,7 +112,7 @@ __initfunc(static void check_hlt(void))
return;
}
__asm__ __volatile__("hlt ; hlt ; hlt ; hlt");
- printk("Ok.\n");
+ printk("OK.\n");
}
__initfunc(static void check_tlb(void))
@@ -145,9 +146,9 @@ __initfunc(static void check_popad(void))
: "=eax" (res)
: "edx" (inp)
: "eax", "ecx", "edx", "edi" );
- /* If this fails, it means that any user program may lock CPU hard. Too bad. */
+ /* If this fails, it means that any user program may lock the CPU hard. Too bad. */
if (res != 12345678) printk( "Buggy.\n" );
- else printk( "Ok.\n" );
+ else printk( "OK.\n" );
#endif
}
@@ -156,7 +157,7 @@ __initfunc(static void check_popad(void))
* misexecution of code under Linux. Owners of such processors should
* contact AMD for precise details and a CPU swap.
*
- * See http://www.chorus.com/~poulot/k6bug.html
+ * See http://www.mygale.com/~poulot/k6bug.html
* http://www.amd.com/K6/k6docs/revgd.html
*
* The following test is erm.. interesting. AMD neglected to up
@@ -202,7 +203,7 @@ __initfunc(static void check_amd_k6(void))
printk("system stability may be impaired when more than 32 MB are used.\n");
else
printk("probably OK (after B9730xxxx).\n");
- printk(KERN_INFO "Please see http://www.chorus.com/poulot/k6bug.html\n");
+ printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");
}
}
@@ -226,13 +227,102 @@ __initfunc(static void check_pentium_f00f(void))
}
}
+/*
+ * Perform the Cyrix 5/2 test. A Cyrix won't change
+ * the flags, while other 486 chips will.
+ */
+
+static inline int test_cyrix_52div(void)
+{
+ unsigned int test;
+
+ __asm__ __volatile__(
+ "sahf\n\t" /* clear flags (%eax = 0x0005) */
+ "div %b2\n\t" /* divide 5 by 2 */
+ "lahf" /* store flags into %ah */
+ : "=a" (test)
+ : "0" (5), "q" (2)
+ : "cc");
+
+ /* AH is 0x02 on Cyrix after the divide.. */
+ return (unsigned char) (test >> 8) == 0x02;
+}
+
+/*
+ * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
+ * by the fact that they preserve the flags across the division of 5/2.
+ * PII and PPro exhibit this behavior too, but they have cpuid available.
+ */
+
+__initfunc(static void check_cyrix_cpu(void))
+{
+ if ((boot_cpu_data.cpuid_level == -1) && (boot_cpu_data.x86 == 4)
+ && test_cyrix_52div()) {
+
+ /* default to an unknown Cx486, (we will differentiate later) */
+ /* NOTE: using 0xff since 0x00 is a valid DIR0 value */
+ strcpy(boot_cpu_data.x86_vendor_id, "CyrixInstead");
+ boot_cpu_data.x86_model = 0xff;
+ boot_cpu_data.x86_mask = 0;
+ }
+}
+
+/*
+ * Fix two problems with the Cyrix 6x86 and 6x86L:
+ * -- the cpuid is disabled on power up, enable it, use it.
+ * -- the SLOP bit needs resetting on some motherboards due to old BIOS,
+ * so that the udelay loop calibration works well. Recalibrate.
+ */
+
+extern void calibrate_delay(void) __init;
+
+__initfunc(static void check_cx686_cpuid_slop(void))
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX &&
+ (boot_cpu_data.x86_model & 0xf0) == 0x30) { /* 6x86(L) */
+ int dummy;
+ unsigned char ccr3, ccr4, ccr5;
+
+ cli();
+ ccr3 = getCx86(CX86_CCR3);
+ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
+ ccr4 = getCx86(CX86_CCR4);
+ setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */
+ ccr5 = getCx86(CX86_CCR5);
+ if (ccr5 & 2) /* reset SLOP if needed, old BIOS do this wrong */
+ setCx86(CX86_CCR5, ccr5 & 0xfd);
+ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
+ sti();
+
+ boot_cpu_data.cpuid_level = 1; /* should cover all 6x86(L) */
+ boot_cpu_data.x86 = 5;
+
+ /* we know we have level 1 available on the 6x86(L) */
+ cpuid(1, &dummy, &dummy, &dummy,
+ &boot_cpu_data.x86_capability);
+ /*
+ * DON'T use the x86_mask and x86_model from cpuid, these are
+ * not as accurate (or the same) as those from the DIR regs.
+ * already in place after cyrix_model() in setup.c
+ */
+
+ if (ccr5 & 2) { /* possible wrong calibration done */
+ printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
+ calibrate_delay();
+ boot_cpu_data.loops_per_sec = loops_per_sec;
+ }
+ }
+}
+
__initfunc(static void check_bugs(void))
{
+ check_cyrix_cpu();
identify_cpu(&boot_cpu_data);
#ifndef __SMP__
printk("CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
+ check_cx686_cpuid_slop();
check_tlb();
check_fpu();
check_hlt();
@@ -240,10 +330,4 @@ __initfunc(static void check_bugs(void))
check_amd_k6();
check_pentium_f00f();
system_utsname.machine[1] = '0' + boot_cpu_data.x86;
-#if !defined(__SMP__) && defined(CONFIG_MTRR)
- /* Must be done after other processors booted: at this point we are
- called before SMP initialisation, so this is for the non-SMP case
- only. The SMP case is handled in arch/i386/kernel/smp.c */
- mtrr_init ();
-#endif
}
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index 976320d75..bc1496a2c 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -1,14 +1,15 @@
#ifndef _I386_CURRENT_H
#define _I386_CURRENT_H
-static inline unsigned long get_esp(void)
-{
- unsigned long esp;
- __asm__("movl %%esp,%0":"=r" (esp));
- return esp;
-}
-
-#define current ((struct task_struct *)(get_esp() & ~8191UL))
+struct task_struct;
+static inline struct task_struct * get_current(void)
+{
+ struct task_struct *current;
+ __asm__("andl %%esp,%0; ":"=r" (current) : "0" (~8191UL));
+ return current;
+ }
+
+#define current get_current()
#endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-i386/debugreg.h b/include/asm-i386/debugreg.h
new file mode 100644
index 000000000..f0b2b06ae
--- /dev/null
+++ b/include/asm-i386/debugreg.h
@@ -0,0 +1,64 @@
+#ifndef _I386_DEBUGREG_H
+#define _I386_DEBUGREG_H
+
+
+/* Indicate the register numbers for a number of the specific
+ debug registers. Registers 0-3 contain the addresses we wish to trap on */
+#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
+#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
+
+#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
+#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
+
+/* Define a few things for the status register. We can use this to determine
+ which debugging register was responsible for the trap. The other bits
+ are either reserved or not of interest to us. */
+
+#define DR_TRAP0 (0x1) /* db0 */
+#define DR_TRAP1 (0x2) /* db1 */
+#define DR_TRAP2 (0x4) /* db2 */
+#define DR_TRAP3 (0x8) /* db3 */
+
+#define DR_STEP (0x4000) /* single-step */
+#define DR_SWITCH (0x8000) /* task switch */
+
+/* Now define a bunch of things for manipulating the control register.
+ The top two bytes of the control register consist of 4 fields of 4
+ bits - each field corresponds to one of the four debug registers,
+ and indicates what types of access we trap on, and how large the data
+ field is that we are looking at */
+
+#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
+#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
+
+#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
+#define DR_RW_WRITE (0x1)
+#define DR_RW_READ (0x3)
+
+#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
+#define DR_LEN_2 (0x4)
+#define DR_LEN_4 (0xC)
+
+/* The low byte to the control register determine which registers are
+ enabled. There are 4 fields of two bits. One bit is "local", meaning
+ that the processor will reset the bit after a task switch and the other
+ is global meaning that we have to explicitly reset the bit. With linux,
+ you can use either one, since we explicitly zero the register when we enter
+ kernel mode. */
+
+#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
+#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
+#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
+
+#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
+#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
+
+/* The second byte to the control register has a few special things.
+ We can slow the instruction pipeline for instructions coming via the
+ gdt or the ldt if we want to. I am not sure why this is an advantage */
+
+#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
+#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
+#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
+
+#endif
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 700bb8233..65a74c5cb 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -71,7 +71,7 @@ typedef struct user_i387_struct elf_fpregset_t;
pr_reg[16] = regs->xss;
/* This yields a mask that user programs can use to figure out what
- instruction set this cpu supports. This could be done in userspace,
+ instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (boot_cpu_data.x86_capability)
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
new file mode 100644
index 000000000..c56966f64
--- /dev/null
+++ b/include/asm-i386/fixmap.h
@@ -0,0 +1,82 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/page.h>
+#include <linux/kernel.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * bigger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+
+/*
+ * on UP currently we will have no trace of the fixmap mechanizm,
+ * no page table allocations, etc. This might change in the
+ * future, say framebuffers for the console driver(s) could be
+ * fix-mapped?
+ */
+enum fixed_addresses {
+#if __SMP__
+ FIX_APIC_BASE,
+ FIX_IO_APIC_BASE,
+#endif
+ __end_of_fixed_addresses
+};
+
+extern void set_fixmap (enum fixed_addresses idx, unsigned long phys);
+
+/*
+ * used by vmalloc.c.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap, and leave one page empty
+ * at the top of mem..
+ */
+#define FIXADDR_TOP (0xffffe000UL)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+extern inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /*
+ * this branch gets completely eliminated after inlining,
+ * except when someone tries to use fixaddr indices in an
+ * illegal way. (such as mixing up address types or using
+ * out-of-range indices)
+ */
+ if (idx >= __end_of_fixed_addresses)
+ panic("illegal fixaddr index!");
+
+ return FIXADDR_TOP - (idx << PAGE_SHIFT);
+}
+
+#endif
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h
index f679516bb..bfc535137 100644
--- a/include/asm-i386/hardirq.h
+++ b/include/asm-i386/hardirq.h
@@ -5,6 +5,13 @@
extern unsigned int local_irq_count[NR_CPUS];
+/*
+ * Are we in an interrupt context? Either doing bottom half
+ * or hardware interrupt processing?
+ */
+#define in_interrupt() ({ int __cpu = smp_processor_id(); \
+ (local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
diff --git a/include/asm-i386/ioctls.h b/include/asm-i386/ioctls.h
index 1b8af73a4..9fc340a8a 100644
--- a/include/asm-i386/ioctls.h
+++ b/include/asm-i386/ioctls.h
@@ -65,6 +65,8 @@
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
+#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
/* Used for packet mode */
#define TIOCPKT_DATA 0
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 67b1a59e5..3d9a7c46c 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -10,14 +10,15 @@
* <tomsoft@informatik.tu-chemnitz.de>
*/
-#ifndef __SMP__
-#define NR_IRQS 16
-#else
-#define NR_IRQS 24
-#endif
-
#define TIMER_IRQ 0
+/*
+ * 16 XT IRQ's, 8 potential APIC interrupt sources.
+ * Right now the APIC is only used for SMP, but this
+ * may change.
+ */
+#define NR_IRQS 64
+
static __inline__ int irq_cannonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);
diff --git a/include/asm-i386/linux_logo.h b/include/asm-i386/linux_logo.h
new file mode 100644
index 000000000..6773bef77
--- /dev/null
+++ b/include/asm-i386/linux_logo.h
@@ -0,0 +1,47 @@
+/* $Id: linux_logo.h,v 1.6 1998/07/07 13:34:56 jj Exp $
+ * include/asm-i386/linux_logo.h: This is a linux logo
+ * to be displayed on boot.
+ *
+ * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu)
+ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * You can put anything here, but:
+ * LINUX_LOGO_COLORS has to be less than 224
+ * image size has to be 80x80
+ * values have to start from 0x20
+ * (i.e. RGB(linux_logo_red[0],
+ * linux_logo_green[0],
+ * linux_logo_blue[0]) is color 0x20)
+ * BW image has to be 80x80 as well, with MS bit
+ * on the left
+ * Serial_console ascii image can be any size,
+ * but should contain %s to display the version
+ */
+
+#include <linux/init.h>
+#include <linux/version.h>
+
+#define linux_logo_banner "Linux/ia32 version " UTS_RELEASE
+
+#define LINUX_LOGO_COLORS 221
+
+#ifdef INCLUDE_LINUX_LOGO_DATA
+
+#define INCLUDE_LINUX_LOGO16
+
+#include <linux/linux_logo.h>
+
+#else
+
+/* prototypes only */
+extern unsigned char linux_logo_red[];
+extern unsigned char linux_logo_green[];
+extern unsigned char linux_logo_blue[];
+extern unsigned char linux_logo[];
+extern unsigned char linux_logo_bw[];
+extern unsigned char linux_logo16_red[];
+extern unsigned char linux_logo16_green[];
+extern unsigned char linux_logo16_blue[];
+extern unsigned char linux_logo16[];
+
+#endif
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h
index 7284939fe..a5e7361e4 100644
--- a/include/asm-i386/math_emu.h
+++ b/include/asm-i386/math_emu.h
@@ -3,8 +3,8 @@
#include <asm/sigcontext.h>
-void restore_i387_soft(void *s387, struct _fpstate *buf);
-struct _fpstate * save_i387_soft(void *s387, struct _fpstate * buf);
+int restore_i387_soft(void *s387, struct _fpstate *buf);
+int save_i387_soft(void *s387, struct _fpstate * buf);
/* This structure matches the layout of the data saved to the stack
following a device-not-present interrupt, part of it saved
diff --git a/include/asm-i386/md.h b/include/asm-i386/md.h
new file mode 100644
index 000000000..0a2c5dd01
--- /dev/null
+++ b/include/asm-i386/md.h
@@ -0,0 +1,13 @@
+/* $Id: md.h,v 1.1 1997/12/15 15:11:57 jj Exp $
+ * md.h: High speed xor_block operation for RAID4/5
+ *
+ */
+
+#ifndef __ASM_MD_H
+#define __ASM_MD_H
+
+/* #define HAVE_ARCH_XORBLOCK */
+
+#define MD_XORBLOCK_ALIGNMENT sizeof(long)
+
+#endif /* __ASM_MD_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 47e1d2cfc..4a8e92c54 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -12,8 +12,11 @@
* This file contains the functions and defines necessary to modify and use
* the i386 page table tree.
*/
-
#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/tasks.h>
+
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
@@ -38,7 +41,7 @@
#define __flush_tlb() \
do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3":"=r" (tmpreg) : :"memory"); } while (0)
-#if defined(CONFIG_M386) || defined(CONFIG_AMD_K5_INVBUG)
+#ifdef CONFIG_M386
#define __flush_tlb_one(addr) flush_tlb()
#else
#define __flush_tlb_one(addr) \
@@ -97,7 +100,8 @@ static inline void flush_tlb_range(struct mm_struct *mm,
static inline void flush_tlb_current_task(void)
{
- if (current->mm->count == 1) /* just one copy of this mm */
+ /* just one copy of this mm? */
+ if (atomic_read(&current->mm->count) == 1)
local_flush_tlb(); /* and that's us, so.. */
else
smp_flush_tlb();
@@ -109,7 +113,7 @@ static inline void flush_tlb_current_task(void)
static inline void flush_tlb_mm(struct mm_struct * mm)
{
- if (mm == current->mm && mm->count == 1)
+ if (mm == current->mm && atomic_read(&mm->count) == 1)
local_flush_tlb();
else
smp_flush_tlb();
@@ -118,7 +122,7 @@ static inline void flush_tlb_mm(struct mm_struct * mm)
static inline void flush_tlb_page(struct vm_area_struct * vma,
unsigned long va)
{
- if (vma->vm_mm == current->mm && current->mm->count == 1)
+ if (vma->vm_mm == current->mm && atomic_read(&current->mm->count) == 1)
__flush_tlb_one(va);
else
smp_flush_tlb();
@@ -159,7 +163,7 @@ static inline void flush_tlb_range(struct mm_struct *mm,
#endif /* !__ASSEMBLY__ */
-/* Certain architectures need to do special things when pte's
+/* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
@@ -182,6 +186,7 @@ static inline void flush_tlb_range(struct mm_struct *mm,
#define PTRS_PER_PTE 1024
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
/*
* pgd entries used up by user/kernel:
@@ -203,6 +208,7 @@ static inline void flush_tlb_range(struct mm_struct *mm,
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#define VMALLOC_END (FIXADDR_START)
/*
* The 4MB page is guessing.. Detailed in the infamous "Chapter H"
@@ -222,6 +228,9 @@ static inline void flush_tlb_range(struct mm_struct *mm,
#define _PAGE_4M 0x080 /* 4 MB page, Pentium+.. */
#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
+#define _PAGE_READABLE (_PAGE_PRESENT)
+#define _PAGE_WRITABLE (_PAGE_PRESENT | _PAGE_RW)
+
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -327,23 +336,27 @@ extern inline void pgd_clear(pgd_t * pgdp) { }
* Undefined behaviour if not..
*/
extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_RW; return pte; }
extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
/*
+ * These are harder, as writability is two bits, not one..
+ */
+extern inline int pte_write(pte_t pte) { return (pte_val(pte) & _PAGE_WRITABLE) == _PAGE_WRITABLE; }
+extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~((pte_val(pte) & _PAGE_PRESENT) << 1); return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
+
+/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
@@ -385,85 +398,137 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
-extern inline void pte_free_kernel(pte_t * pte)
+
+#define pgd_quicklist (current_cpu_data.pgd_quick)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (current_cpu_data.pte_quick)
+#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+
+extern __inline__ pgd_t *get_pgd_slow(void)
{
- free_page((unsigned long) pte);
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+
+ if (ret) {
+ init = pgd_offset(&init_mm, 0);
+ memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
}
-extern const char bad_pmd_string[];
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ } else
+ ret = (unsigned long *)get_pgd_slow();
+ return (pgd_t *)ret;
+}
-extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (page) {
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
- return page + address;
- }
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
- }
- if (pmd_bad(*pmd)) {
- printk(bad_pmd_string, pmd_val(*pmd));
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(BAD_PAGETABLE);
- return NULL;
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
}
- return (pte_t *) pmd_page(*pmd) + address;
+ return (pte_t *)ret;
}
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-extern inline void pmd_free_kernel(pmd_t * pmd)
+extern __inline__ void free_pte_fast(pte_t *pte)
{
- pmd_val(*pmd) = 0;
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
}
-extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
+extern __inline__ void free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+/* We don't use pmd cache, so these are dummy routines */
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+ return (pmd_t *)0;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
{
- return (pmd_t *) pgd;
}
-extern inline void pte_free(pte_t * pte)
+extern __inline__ void free_pmd_slow(pmd_t *pmd)
{
- free_page((unsigned long) pte);
+}
+
+extern void __bad_pte(pmd_t *pmd);
+extern void __bad_pte_kernel(pmd_t *pmd);
+
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(pte)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
+
+extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ pte_t * page = (pte_t *) get_pte_fast();
+
+ if (!page)
+ return get_pte_kernel_slow(pmd, address);
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
+ return page + address;
+ }
+ if (pmd_bad(*pmd)) {
+ __bad_pte_kernel(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + address;
}
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
address = (address >> (PAGE_SHIFT-2)) & 4*(PTRS_PER_PTE - 1);
-repeat:
if (pmd_none(*pmd))
goto getnew;
if (pmd_bad(*pmd))
goto fix;
return (pte_t *) (pmd_page(*pmd) + address);
-
getnew:
{
- unsigned long page = __get_free_page(GFP_KERNEL);
- if (!pmd_none(*pmd))
- goto freenew;
+ unsigned long page = (unsigned long) get_pte_fast();
+
if (!page)
- goto oom;
- memset((void *) page, 0, PAGE_SIZE);
+ return get_pte_slow(pmd, address);
pmd_val(*pmd) = _PAGE_TABLE + __pa(page);
return (pte_t *) (page + address);
-freenew:
- free_page(page);
- goto repeat;
}
-
fix:
- printk(bad_pmd_string, pmd_val(*pmd));
-oom:
- pmd_val(*pmd) = _PAGE_TABLE + __pa(BAD_PAGETABLE);
+ __bad_pte(pmd);
return NULL;
}
@@ -473,7 +538,6 @@ oom:
*/
extern inline void pmd_free(pmd_t * pmd)
{
- pmd_val(*pmd) = 0;
}
extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
@@ -481,14 +545,36 @@ extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
return (pmd_t *) pgd;
}
-extern inline void pgd_free(pgd_t * pgd)
-{
- free_page((unsigned long) pgd);
-}
+#define pmd_free_kernel pmd_free
+#define pmd_alloc_kernel pmd_alloc
-extern inline pgd_t * pgd_alloc(void)
+extern int do_check_pgt_cache(int, int);
+
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
- return (pgd_t *) get_free_page(GFP_KERNEL);
+ struct task_struct * p;
+ pgd_t *pgd;
+#ifdef __SMP__
+ int i;
+#endif
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+#ifndef __SMP__
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[address >> PGDIR_SHIFT] = entry;
+#else
+ /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
+ modify pgd caches of other CPUs as well. -jj */
+ for (i = 0; i < NR_CPUS; i++)
+ for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[address >> PGDIR_SHIFT] = entry;
+#endif
}
extern pgd_t swapper_pg_dir[1024];
@@ -511,4 +597,7 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
#endif /* !__ASSEMBLY__ */
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define PageSkip(page) (0)
+
#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/posix_types.h b/include/asm-i386/posix_types.h
index aed0fd6fd..6413683c2 100644
--- a/include/asm-i386/posix_types.h
+++ b/include/asm-i386/posix_types.h
@@ -37,6 +37,8 @@ typedef struct {
#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
} __kernel_fsid_t;
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
#undef __FD_SET
#define __FD_SET(fd,fdsetp) \
__asm__ __volatile__("btsl %1,%0": \
@@ -62,4 +64,6 @@ typedef struct {
:"a" (0), "c" (__FDSET_LONGS), \
"D" ((__kernel_fd_set *) (fdsetp)) :"cx","di")
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+
#endif
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 14706b18a..7caccbdc0 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -34,6 +34,9 @@ struct cpuinfo_x86 {
int fdiv_bug;
int f00f_bug;
unsigned long loops_per_sec;
+ unsigned long *pgd_quick;
+ unsigned long *pte_quick;
+ unsigned long pgtable_cache_sz;
};
#define X86_VENDOR_INTEL 0
@@ -44,6 +47,43 @@ struct cpuinfo_x86 {
#define X86_VENDOR_CENTAUR 5
#define X86_VENDOR_UNKNOWN 0xff
+/*
+ * capabilities of CPUs
+ */
+
+#define X86_FEATURE_FPU 0x00000001 /* onboard FPU */
+#define X86_FEATURE_VME 0x00000002 /* Virtual Mode Extensions */
+#define X86_FEATURE_DE 0x00000004 /* Debugging Extensions */
+#define X86_FEATURE_PSE 0x00000008 /* Page Size Extensions */
+#define X86_FEATURE_TSC 0x00000010 /* Time Stamp Counter */
+#define X86_FEATURE_MSR 0x00000020 /* Model-Specific Registers, RDMSR, WRMSR */
+#define X86_FEATURE_PAE 0x00000040 /* Physical Address Extensions */
+#define X86_FEATURE_MCE 0x00000080 /* Machine Check Exceptions */
+#define X86_FEATURE_CX8 0x00000100 /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC 0x00000200 /* onboard APIC */
+#define X86_FEATURE_10 0x00000400
+#define X86_FEATURE_SEP 0x00000800 /* Fast System Call */
+#define X86_FEATURE_MTRR 0x00001000 /* Memory Type Range Registers */
+#define X86_FEATURE_PGE 0x00002000 /* Page Global Enable */
+#define X86_FEATURE_MCA 0x00004000 /* Machine Check Architecture */
+#define X86_FEATURE_CMOV 0x00008000 /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
+#define X86_FEATURE_PAT 0x00010000 /* Page Attribute Table */
+#define X86_FEATURE_PSE36 0x00020000 /* 36-bit PSEs */
+#define X86_FEATURE_18 0x00040000
+#define X86_FEATURE_19 0x00080000
+#define X86_FEATURE_20 0x00100000
+#define X86_FEATURE_21 0x00200000
+#define X86_FEATURE_22 0x00400000
+#define X86_FEATURE_MMX 0x00800000 /* multimedia extensions */
+#define X86_FEATURE_FXSR 0x01000000 /* FXSAVE and FXRSTOR instructions (fast save and restore of FPU context), and CR4.OSFXSR (OS uses these instructions) available */
+#define X86_FEATURE_25 0x02000000
+#define X86_FEATURE_26 0x04000000
+#define X86_FEATURE_27 0x08000000
+#define X86_FEATURE_28 0x10000000
+#define X86_FEATURE_29 0x20000000
+#define X86_FEATURE_30 0x40000000
+#define X86_FEATURE_AMD3D 0x80000000
+
extern struct cpuinfo_x86 boot_cpu_data;
#ifdef __SMP__
@@ -60,6 +100,41 @@ extern void identify_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
/*
+ * Generic CPUID function
+ */
+extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "a" (op)
+ : "cc");
+}
+
+/*
+ * Cyrix CPU configuration register indexes
+ */
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+
+/*
+ * Cyrix CPU indexed register access macros
+ */
+
+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86(reg, data) do { \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
+} while (0)
+
+/*
* Bus types (default is ISA, but people can check others with these..)
*/
extern int EISA_bus;
@@ -149,6 +224,8 @@ struct thread_struct {
unsigned long tr;
unsigned long cr2, trap_no, error_code;
mm_segment_t segment;
+/* debug registers */
+ long debugreg[8]; /* Hardware debugging registers */
/* floating point info */
union i387_union i387;
/* virtual 86 mode info */
@@ -160,33 +237,35 @@ struct thread_struct {
#define INIT_MMAP \
{ &init_mm, 0, 0, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, NULL, &init_mm.mmap }
-#define INIT_TSS { \
- 0,0, \
- sizeof(init_stack) + (long) &init_stack, \
- __KERNEL_DS, 0, \
- 0,0,0,0,0,0, \
- (long) &swapper_pg_dir - PAGE_OFFSET, \
- 0,0,0,0,0,0,0,0,0,0, \
- __USER_DS,0,__USER_DS,0,__USER_DS,0, \
- __USER_DS,0,__USER_DS,0,__USER_DS,0, \
- _LDT(0),0, \
- 0, 0x8000, \
- {~0, }, /* ioperm */ \
- _TSS(0), 0, 0, 0, (mm_segment_t) { 0 } /* obsolete */ , \
- { { 0, }, }, /* 387 state */ \
- NULL, 0, 0, 0, 0, 0 /* vm86_info */, \
+#define INIT_TSS { \
+ 0,0, /* back_link, __blh */ \
+ sizeof(init_stack) + (long) &init_stack, /* esp0 */ \
+ __KERNEL_DS, 0, /* ss0 */ \
+ 0,0,0,0,0,0, /* stack1, stack2 */ \
+ (long) &swapper_pg_dir - PAGE_OFFSET, /* cr3 */ \
+ 0,0, /* eip,eflags */ \
+ 0,0,0,0, /* eax,ecx,edx,ebx */ \
+ 0,0,0,0, /* esp,ebp,esi,edi */ \
+ 0,0,0,0,0,0, /* es,cs,ss */ \
+ 0,0,0,0,0,0, /* ds,fs,gs */ \
+ _LDT(0),0, /* ldt */ \
+ 0, 0x8000, /* tace, bitmap */ \
+ {~0, }, /* ioperm */ \
+ _TSS(0), 0, 0, 0, (mm_segment_t) { 0 }, /* obsolete */ \
+ { 0, }, \
+ { { 0, }, }, /* 387 state */ \
+ NULL, 0, 0, 0, 0, 0, /* vm86_info */ \
}
-#define start_thread(regs, new_eip, new_esp) do {\
- unsigned long seg = __USER_DS; \
- __asm__("movl %w0,%%fs ; movl %w0,%%gs":"=r" (seg) :"0" (seg)); \
- set_fs(USER_DS); \
- regs->xds = seg; \
- regs->xes = seg; \
- regs->xss = seg; \
- regs->xcs = __USER_CS; \
- regs->eip = new_eip; \
- regs->esp = new_esp; \
+#define start_thread(regs, new_eip, new_esp) do { \
+ __asm__("movl %w0,%%fs ; movl %w0,%%gs": :"r" (0)); \
+ set_fs(USER_DS); \
+ regs->xds = __USER_DS; \
+ regs->xes = __USER_DS; \
+ regs->xss = __USER_DS; \
+ regs->xcs = __USER_CS; \
+ regs->eip = new_eip; \
+ regs->esp = new_esp; \
} while (0)
/* Forward declaration, a strange C thing */
@@ -207,13 +286,8 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
return ((unsigned long *)t->esp)[3];
}
-/* Allocation and freeing of basic task resources. */
-/*
- * NOTE! The task struct and the stack go together
- */
-#define alloc_task_struct() \
- ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
-#define free_task_struct(p) free_pages((unsigned long)(p),1)
+extern struct task_struct * alloc_task_struct(void);
+extern void free_task_struct(struct task_struct *);
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
diff --git a/include/asm-i386/siginfo.h b/include/asm-i386/siginfo.h
index 99101420b..01ef4d08a 100644
--- a/include/asm-i386/siginfo.h
+++ b/include/asm-i386/siginfo.h
@@ -43,6 +43,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
+ uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h
index 598a21008..030a19b7b 100644
--- a/include/asm-i386/signal.h
+++ b/include/asm-i386/signal.h
@@ -72,7 +72,7 @@ typedef unsigned long sigset_t;
/*
* SA_FLAGS values:
*
- * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
+ * SA_ONSTACK indicates that a registered stack_t will be used.
* SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
@@ -97,6 +97,15 @@ typedef unsigned long sigset_t;
#define SA_RESTORER 0x04000000
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
#ifdef __KERNEL__
/*
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 96c2d2764..33dbab5bf 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -6,6 +6,8 @@
#include <asm/i82489.h>
#include <asm/bitops.h>
+#include <asm/fixmap.h>
+
#include <linux/tasks.h>
#include <linux/ptrace.h>
@@ -160,11 +162,10 @@ extern unsigned long cpu_present_map;
extern volatile int cpu_number_map[NR_CPUS];
extern volatile unsigned long smp_invalidate_needed;
extern void smp_flush_tlb(void);
-extern volatile unsigned long kernel_flag, kernel_counter;
+
extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern volatile unsigned char active_kernel_processor;
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-extern void smp_reschedule_irq(int cpl, struct pt_regs *regs);
+extern void smp_send_reschedule(int cpu);
extern unsigned long ipi_count;
extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void smp_local_timer_interrupt(struct pt_regs * regs);
@@ -183,7 +184,8 @@ extern inline int cpu_logical_map(int cpu)
extern void smp_callin(void);
extern void smp_boot_cpus(void);
-extern void smp_store_cpu_info(int id); /* Store per cpu info (like the initial udelay numbers */
+extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial udelay numbers */
+extern void smp_message_pass(int target, int msg, unsigned long data, int wait);
extern volatile unsigned long smp_proc_in_lock[NR_CPUS]; /* for computing process time */
extern volatile int smp_process_available;
@@ -195,7 +197,7 @@ extern volatile int smp_process_available;
* "Back to Back Assertions of HOLD May Cause Lost APIC Write Cycle"
*/
-#define APIC_BASE ((char *)0xFEE00000)
+#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
extern __inline void apic_write(unsigned long reg, unsigned long v)
{
@@ -239,13 +241,5 @@ extern __inline int hard_smp_processor_id(void)
#define SMP_FROM_INT 1
#define SMP_FROM_SYSCALL 2
-
-#else
-#ifndef ASSEMBLY
-extern inline int cpu_logical_map(int cpu)
-{
- return cpu;
-}
-#endif
#endif
#endif
diff --git a/include/asm-i386/smp_lock.h b/include/asm-i386/smp_lock.h
deleted file mode 100644
index fc7eb94de..000000000
--- a/include/asm-i386/smp_lock.h
+++ /dev/null
@@ -1,91 +0,0 @@
-#ifndef __I386_SMPLOCK_H
-#define __I386_SMPLOCK_H
-
-#define __STR(x) #x
-
-#ifndef __SMP__
-
-#define lock_kernel() do { } while(0)
-#define unlock_kernel() do { } while(0)
-#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
-#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
-
-#else
-
-#include <asm/hardirq.h>
-
-/* Release global kernel lock and global interrupt lock */
-#define release_kernel_lock(task, cpu, depth) \
-do { \
- if ((depth = (task)->lock_depth) != 0) { \
- __cli(); \
- (task)->lock_depth = 0; \
- active_kernel_processor = NO_PROC_ID; \
- clear_bit(0,&kernel_flag); \
- } \
- release_irqlock(cpu); \
- __sti(); \
-} while (0)
-
-/* Re-acquire the kernel lock */
-#define reacquire_kernel_lock(task, cpu, depth) \
-do { if (depth) __asm__ __volatile__( \
- "cli\n\t" \
- "call __lock_kernel\n\t" \
- "movl %2,%0\n\t" \
- "sti" \
- : "=m" (task->lock_depth) \
- : "d" (cpu), "c" (depth)); \
-} while (0)
-
-
-extern const char lk_lockmsg[];
-
-/* Locking the kernel */
-extern __inline__ void lock_kernel(void)
-{
- int cpu = smp_processor_id();
-
- if (local_irq_count[cpu]) {
- __label__ l1;
-l1: printk(lk_lockmsg, &&l1);
- }
- if (cpu == global_irq_holder) {
- __label__ l2;
-l2: printk("Ugh at %p\n", &&l2);
- sti();
- }
-
- __asm__ __volatile__("
- pushfl
- cli
- cmpl $0, %0
- jne 0f
- call __lock_kernel
-0: incl %0
- popfl
-" :
- : "m" (current->lock_depth), "d" (cpu)
- : "memory");
-}
-
-extern __inline__ void unlock_kernel(void)
-{
- __asm__ __volatile__("
- pushfl
- cli
- decl %0
- jnz 1f
- movb %1, " __STR(active_kernel_processor) "
- lock
- btrl $0, " __STR(kernel_flag) "
-1:
- popfl
-" : /* no outputs */
- : "m" (current->lock_depth), "i" (NO_PROC_ID)
- : "ax", "memory");
-}
-
-#endif /* __SMP__ */
-
-#endif /* __I386_SMPLOCK_H */
diff --git a/include/asm-i386/smplock.h b/include/asm-i386/smplock.h
new file mode 100644
index 000000000..3bb933e42
--- /dev/null
+++ b/include/asm-i386/smplock.h
@@ -0,0 +1,59 @@
+/*
+ * <asm/smplock.h>
+ *
+ * i386 SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ __asm__ __volatile__(
+ "incl %1\n\t"
+ "jne 9f"
+ spin_lock_string
+ "\n9:"
+ :"=m" (__dummy_lock(&kernel_flag)),
+ "=m" (current->lock_depth));
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ __asm__ __volatile__(
+ "decl %1\n\t"
+ "jns 9f\n"
+ spin_unlock_string
+ "\n9:"
+ :"=m" (__dummy_lock(&kernel_flag)),
+ "=m" (current->lock_depth));
+}
diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h
index 008edf305..f77cce80a 100644
--- a/include/asm-i386/softirq.h
+++ b/include/asm-i386/softirq.h
@@ -50,7 +50,7 @@ static inline void end_bh_atomic(void)
atomic_dec(&global_bh_lock);
}
-/* These are for the irq's testing the lock */
+/* These are for the IRQs testing the lock */
static inline int softirq_trylock(int cpu)
{
if (!test_and_set_bit(0,&global_bh_count)) {
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index e6a42b227..e6fdf42f1 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -9,9 +9,16 @@
/*
* Your basic spinlocks, allowing only a single CPU anywhere
+ *
+ * Gcc-2.7.x has a nasty bug with empty initializers.
*/
-typedef struct { } spinlock_t;
-#define SPIN_LOCK_UNLOCKED { }
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
+ typedef struct { } spinlock_t;
+ #define SPIN_LOCK_UNLOCKED { }
+#else
+ typedef struct { int gcc_is_buggy; } spinlock_t;
+ #define SPIN_LOCK_UNLOCKED { 0 }
+#endif
#define spin_lock_init(lock) do { } while(0)
#define spin_lock(lock) do { } while(0)
@@ -61,14 +68,14 @@ typedef struct {
#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
-#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_lock(%s:%p) already locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
-#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
-#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s: spin_unlock(%s:%p) not locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
-#define spin_lock_irq(x) do {cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_lock_irq(%s:%p) already locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
-#define spin_unlock_irq(x) do {cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_lock(%s:%p) already locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; sti();} while (0)
+#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
+#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
+#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
+#define spin_lock_irq(x) do {cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irq(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irq(x) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; sti();} while (0)
-#define spin_lock_irqsave(x,flags) do {save_flags(flags); cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_lock_irqsave(%s:%p) already locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
-#define spin_unlock_irqrestore(x,flags) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s: spin_unlock_irqrestore(%s:%p) not locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(flags);} while (0)
+#define spin_lock_irqsave(x,flags) do {save_flags(flags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irqsave(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irqrestore(x,flags) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_irqrestore(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(flags);} while (0)
#endif /* DEBUG_SPINLOCKS */
@@ -128,8 +135,7 @@ typedef struct {
typedef struct { unsigned long a[100]; } __dummy_lock_t;
#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
-#define spin_lock(lock) \
-__asm__ __volatile__( \
+#define spin_lock_string \
"\n1:\t" \
"lock ; btsl $0,%0\n\t" \
"jc 2f\n" \
@@ -138,12 +144,19 @@ __asm__ __volatile__( \
"testb $1,%0\n\t" \
"jne 2b\n\t" \
"jmp 1b\n" \
- ".previous" \
+ ".previous"
+
+#define spin_unlock_string \
+ "lock ; btrl $0,%0"
+
+#define spin_lock(lock) \
+__asm__ __volatile__( \
+ spin_lock_string \
:"=m" (__dummy_lock(lock)))
#define spin_unlock(lock) \
__asm__ __volatile__( \
- "lock ; btrl $0,%0" \
+ spin_unlock_string \
:"=m" (__dummy_lock(lock)))
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index df02f7809..65d72fabf 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -403,7 +403,7 @@ extern inline void * __constant_memcpy(void * to, const void * from, size_t n)
case 4:
*(unsigned long *)to = *(const unsigned long *)from;
return to;
- case 6: /* for ethernet addresses */
+ case 6: /* for Ethernet addresses */
*(unsigned long *)to = *(const unsigned long *)from;
*(2+(unsigned short *)to) = *(2+(const unsigned short *)from);
return to;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 88c8ebc99..91b98d5e8 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -1,6 +1,7 @@
#ifndef __ASM_SYSTEM_H
#define __ASM_SYSTEM_H
+#include <linux/kernel.h>
#include <asm/segment.h>
/*
@@ -35,84 +36,37 @@ __asm__("str %%ax\n\t" \
:"=a" (n) \
:"0" (0),"i" (FIRST_TSS_ENTRY<<3))
-/* This special macro can be used to load a debugging register */
-
-#define loaddebug(tsk,register) \
- __asm__("movl %0,%%db" #register \
- : /* no output */ \
- :"r" (tsk->debugreg[register]))
+#ifdef __KERNEL__
+struct task_struct; /* one of the stranger aspects of C forward declarations.. */
+extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
/*
- * switch_to(n) should switch tasks to task nr n, first
- * checking that n isn't the current task, in which case it does nothing.
- * This also clears the TS-flag if the task we switched to has used
- * the math co-processor latest.
- *
- * It also reloads the debug regs if necessary..
+ * We do most of the task switching in C, but we need
+ * to do the EIP/ESP switch in assembly..
*/
-
-
-#ifdef __SMP__
- /*
- * Keep the lock depth straight. If we switch on an interrupt from
- * kernel->user task we need to lose a depth, and if we switch the
- * other way we need to gain a depth. Same layer switches come out
- * the same.
- *
- * We spot a switch in user mode because the kernel counter is the
- * same as the interrupt counter depth. (We never switch during the
- * message/invalidate IPI).
- *
- * We fsave/fwait so that an exception goes off at the right time
- * (as a call from the fsave or fwait in effect) rather than to
- * the wrong process.
- */
-
-#define switch_to(prev,next) do { \
- if(prev->flags&PF_USEDFPU) \
- { \
- __asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
- __asm__ __volatile__("fwait"); \
- prev->flags&=~PF_USEDFPU; \
- } \
-__asm__("ljmp %0\n\t" \
- : /* no output */ \
- :"m" (*(((char *)&next->tss.tr)-4)), \
- "c" (next)); \
- /* Now maybe reload the debug registers */ \
- if(prev->debugreg[7]){ \
- loaddebug(prev,0); \
- loaddebug(prev,1); \
- loaddebug(prev,2); \
- loaddebug(prev,3); \
- loaddebug(prev,6); \
- loaddebug(prev,7); \
- } \
+#define switch_to(prev,next) do { \
+ unsigned long eax, edx, ecx; \
+ asm volatile("pushl %%ebx\n\t" \
+ "pushl %%esi\n\t" \
+ "pushl %%edi\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl %%esp,%0\n\t" /* save ESP */ \
+ "movl %5,%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%1\n\t" /* save EIP */ \
+ "pushl %6\n\t" /* restore EIP */ \
+ "jmp __switch_to\n" \
+ "1:\t" \
+ "popl %%ebp\n\t" \
+ "popl %%edi\n\t" \
+ "popl %%esi\n\t" \
+ "popl %%ebx" \
+ :"=m" (prev->tss.esp),"=m" (prev->tss.eip), \
+ "=a" (eax), "=d" (edx), "=c" (ecx) \
+ :"m" (next->tss.esp),"m" (next->tss.eip), \
+ "a" (prev), "d" (next)); \
} while (0)
-#else
-#define switch_to(prev,next) do { \
-__asm__("ljmp %0\n\t" \
- "cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
- "jne 1f\n\t" \
- "clts\n" \
- "1:" \
- : /* no outputs */ \
- :"m" (*(((char *)&next->tss.tr)-4)), \
- "r" (prev), "r" (next)); \
- /* Now maybe reload the debug registers */ \
- if(prev->debugreg[7]){ \
- loaddebug(prev,0); \
- loaddebug(prev,1); \
- loaddebug(prev,2); \
- loaddebug(prev,3); \
- loaddebug(prev,6); \
- loaddebug(prev,7); \
- } \
-} while (0)
-#endif
-
#define _set_base(addr,base) \
__asm__("movw %%dx,%0\n\t" \
"rorl $16,%%edx\n\t" \
@@ -157,15 +111,26 @@ static inline unsigned long _get_base(char * addr)
#define get_base(ldt) _get_base( ((char *)&(ldt)) )
-static inline unsigned long get_limit(unsigned long segment)
-{
- unsigned long __limit;
- __asm__("lsll %1,%0"
- :"=r" (__limit):"r" (segment));
- return __limit+1;
-}
-
-#define nop() __asm__ __volatile__ ("nop")
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %0,%%" #seg "\n" \
+ "2:\n" \
+ ".section fixup,\"ax\"\n" \
+ "3:\t" \
+ "pushl $0\n\t" \
+ "popl %%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,3b\n" \
+ ".previous" \
+ : :"m" (*(unsigned int *)&(value)))
/*
* Clear and set 'TS' bit respectively
@@ -180,6 +145,17 @@ __asm__ __volatile__ ( \
: /* no inputs */ \
:"ax")
+#endif /* __KERNEL__ */
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))
diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h
index 6deebbb03..b2b77853f 100644
--- a/include/asm-i386/termios.h
+++ b/include/asm-i386/termios.h
@@ -47,6 +47,7 @@ struct termio {
#define N_AX25 5
#define N_X25 6 /* X.25 async */
#define N_6PACK 7
+#define N_MASC 8 /* Reserved fo Mobitex module <kaz@cafe.net> */
#ifdef __KERNEL__
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h
new file mode 100644
index 000000000..c492e1b93
--- /dev/null
+++ b/include/asm-i386/timex.h
@@ -0,0 +1,15 @@
+/*
+ * linux/include/asm-i386/timex.h
+ *
+ * i386 architecture timex specifications
+ */
+#ifndef _ASMi386_TIMEX_H
+#define _ASMi386_TIMEX_H
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
+#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
+ (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
+ << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
+
+#endif
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 018f6f0f5..1b23625c6 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -27,7 +27,7 @@
#define __NR_lseek 19
#define __NR_getpid 20
#define __NR_mount 21
-#define __NR_umount 22
+#define __NR_oldumount 22
#define __NR_setuid 23
#define __NR_getuid 24
#define __NR_stime 25
@@ -57,7 +57,7 @@
#define __NR_geteuid 49
#define __NR_getegid 50
#define __NR_acct 51
-#define __NR_phys 52
+#define __NR_umount 52
#define __NR_lock 53
#define __NR_ioctl 54
#define __NR_fcntl 55
@@ -191,6 +191,10 @@
#define __NR_getcwd 183
#define __NR_capget 184
#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_streams1 188 /* some people actually want it */
+#define __NR_streams2 189 /* some people actually want it */
/* user-visible error numbers are in the range -1 - -122: see <asm-i386/errno.h> */
diff --git a/include/asm-i386/user.h b/include/asm-i386/user.h
index b5acf0e74..b0d067e7a 100644
--- a/include/asm-i386/user.h
+++ b/include/asm-i386/user.h
@@ -79,7 +79,7 @@ struct user{
esp register. */
long int signal; /* Signal that caused the core dump. */
int reserved; /* No longer used */
- struct pt_regs * u_ar0; /* Used by gdb to help find the values for */
+ struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */
/* the registers. */
struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
unsigned long magic; /* To uniquely identify a core file */
diff --git a/include/asm-i386/vga.h b/include/asm-i386/vga.h
new file mode 100644
index 000000000..ef0c0e50c
--- /dev/null
+++ b/include/asm-i386/vga.h
@@ -0,0 +1,20 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+/*
+ * On the PC, we can just recalculate addresses and then
+ * access the videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
index 88d0bf510..40ec82c69 100644
--- a/include/asm-i386/vm86.h
+++ b/include/asm-i386/vm86.h
@@ -60,7 +60,7 @@
#define VM86_GET_AND_RESET_IRQ 6
/*
- * This is the stack-layout seen by the user space programm when we have
+ * This is the stack-layout seen by the user space program when we have
* done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
* is 'kernel_vm86_regs' (see below).
*/
@@ -193,7 +193,7 @@ struct kernel_vm86_struct {
* this way. In front of 'return-eip' may be some data, depending on
* compilation, so we don't rely on this and save the pointer to 'oldregs'
* in 'regs32' above.
- * However, with GCC-2.7.2 and the the current CFLAGS you see exactly this:
+ * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
long return-eip; from call to vm86()
struct pt_regs oldregs; user space registers as saved by syscall