summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/atomic.h30
-rw-r--r--include/asm-i386/bitops.h40
-rw-r--r--include/asm-i386/boot.h15
-rw-r--r--include/asm-i386/cache.h16
-rw-r--r--include/asm-i386/checksum.h43
-rw-r--r--include/asm-i386/current.h12
-rw-r--r--include/asm-i386/errno.h3
-rw-r--r--include/asm-i386/hardirq.h76
-rw-r--r--include/asm-i386/ide.h2
-rw-r--r--include/asm-i386/init.h14
-rw-r--r--include/asm-i386/io.h19
-rw-r--r--include/asm-i386/irq.h385
-rw-r--r--include/asm-i386/ldt.h1
-rw-r--r--include/asm-i386/mmu_context.h3
-rw-r--r--include/asm-i386/namei.h21
-rw-r--r--include/asm-i386/pgtable.h15
-rw-r--r--include/asm-i386/poll.h25
-rw-r--r--include/asm-i386/processor.h26
-rw-r--r--include/asm-i386/scatterlist.h13
-rw-r--r--include/asm-i386/semaphore.h104
-rw-r--r--include/asm-i386/smp.h9
-rw-r--r--include/asm-i386/smp_lock.h139
-rw-r--r--include/asm-i386/socket.h14
-rw-r--r--include/asm-i386/softirq.h99
-rw-r--r--include/asm-i386/spinlock.h209
-rw-r--r--include/asm-i386/string-486.h4
-rw-r--r--include/asm-i386/string.h8
-rw-r--r--include/asm-i386/system.h49
-rw-r--r--include/asm-i386/termbits.h1
-rw-r--r--include/asm-i386/termios.h66
-rw-r--r--include/asm-i386/uaccess.h119
-rw-r--r--include/asm-i386/unistd.h6
-rw-r--r--include/asm-i386/vm86.h119
33 files changed, 1133 insertions, 572 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 1b9d99f76..5690c46e2 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -19,9 +19,18 @@
*/
#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
-typedef int atomic_t;
+#ifdef __SMP__
+typedef struct { volatile int counter; } atomic_t;
+#else
+typedef struct { int counter; } atomic_t;
+#endif
+
+#define ATOMIC_INIT(i) { (i) }
-static __inline__ void atomic_add(atomic_t i, atomic_t *v)
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+static __inline__ void atomic_add(int i, volatile atomic_t *v)
{
__asm__ __volatile__(
LOCK "addl %1,%0"
@@ -29,7 +38,7 @@ static __inline__ void atomic_add(atomic_t i, atomic_t *v)
:"ir" (i), "m" (__atomic_fool_gcc(v)));
}
-static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
+static __inline__ void atomic_sub(int i, volatile atomic_t *v)
{
__asm__ __volatile__(
LOCK "subl %1,%0"
@@ -37,7 +46,7 @@ static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
:"ir" (i), "m" (__atomic_fool_gcc(v)));
}
-static __inline__ void atomic_inc(atomic_t *v)
+static __inline__ void atomic_inc(volatile atomic_t *v)
{
__asm__ __volatile__(
LOCK "incl %0"
@@ -45,7 +54,7 @@ static __inline__ void atomic_inc(atomic_t *v)
:"m" (__atomic_fool_gcc(v)));
}
-static __inline__ void atomic_dec(atomic_t *v)
+static __inline__ void atomic_dec(volatile atomic_t *v)
{
__asm__ __volatile__(
LOCK "decl %0"
@@ -53,7 +62,7 @@ static __inline__ void atomic_dec(atomic_t *v)
:"m" (__atomic_fool_gcc(v)));
}
-static __inline__ int atomic_dec_and_test(atomic_t *v)
+static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
{
unsigned char c;
@@ -64,4 +73,13 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
return c != 0;
}
+/* These are x86-specific, used by some header files */
+#define atomic_clear_mask(mask, addr) \
+__asm__ __volatile__(LOCK "andl %0,%1" \
+: : "r" (~(mask)),"m" (__atomic_fool_gcc(addr)) : "memory")
+
+#define atomic_set_mask(mask, addr) \
+__asm__ __volatile__(LOCK "orl %0,%1" \
+: : "r" (mask),"m" (__atomic_fool_gcc(addr)) : "memory")
+
#endif
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index d220f6f51..8e6d484e4 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -15,10 +15,8 @@
#ifdef __SMP__
#define LOCK_PREFIX "lock ; "
-#define SMPVOL volatile
#else
#define LOCK_PREFIX ""
-#define SMPVOL
#endif
/*
@@ -28,33 +26,33 @@ struct __dummy { unsigned long a[100]; };
#define ADDR (*(struct __dummy *) addr)
#define CONST_ADDR (*(const struct __dummy *) addr)
-extern __inline__ int set_bit(int nr, SMPVOL void * addr)
+extern __inline__ int set_bit(int nr, volatile void * addr)
{
int oldbit;
- __asm__ __volatile__(LOCK_PREFIX
+ __asm__ __volatile__( LOCK_PREFIX
"btsl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"ir" (nr));
return oldbit;
}
-extern __inline__ int clear_bit(int nr, SMPVOL void * addr)
+extern __inline__ int clear_bit(int nr, volatile void * addr)
{
int oldbit;
- __asm__ __volatile__(LOCK_PREFIX
+ __asm__ __volatile__( LOCK_PREFIX
"btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"ir" (nr));
return oldbit;
}
-extern __inline__ int change_bit(int nr, SMPVOL void * addr)
+extern __inline__ int change_bit(int nr, volatile void * addr)
{
int oldbit;
- __asm__ __volatile__(LOCK_PREFIX
+ __asm__ __volatile__( LOCK_PREFIX
"btcl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"ir" (nr));
@@ -64,11 +62,27 @@ extern __inline__ int change_bit(int nr, SMPVOL void * addr)
/*
* This routine doesn't need to be atomic.
*/
-extern __inline__ int test_bit(int nr, const SMPVOL void * addr)
+extern __inline__ int __constant_test_bit(int nr, const volatile void * addr)
{
- return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
+ return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
+extern __inline__ int __test_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "btl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit)
+ :"m" (ADDR),"ir" (nr));
+ return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ __constant_test_bit((nr),(addr)) : \
+ __test_bit((nr),(addr)))
+
/*
* Find-bit routines..
*/
@@ -142,6 +156,12 @@ extern __inline__ unsigned long ffz(unsigned long word)
#define ext2_find_first_zero_bit find_first_zero_bit
#define ext2_find_next_zero_bit find_next_zero_bit
+/* Bitmap functions for the minix filesystem. */
+#define minix_set_bit(nr,addr) set_bit(nr,addr)
+#define minix_clear_bit(nr,addr) clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
#endif /* __KERNEL__ */
#endif /* _I386_BITOPS_H */
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
new file mode 100644
index 000000000..96b228e6e
--- /dev/null
+++ b/include/asm-i386/boot.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_BOOT_H
+#define _LINUX_BOOT_H
+
+/* Don't touch these, unless you really know what you're doing. */
+#define DEF_INITSEG 0x9000
+#define DEF_SYSSEG 0x1000
+#define DEF_SETUPSEG 0x9020
+#define DEF_SYSSIZE 0x7F00
+
+/* Internal svga startup constants */
+#define NORMAL_VGA 0xffff /* 80x25 mode */
+#define EXTENDED_VGA 0xfffe /* 80x50 mode */
+#define ASK_VGA 0xfffd /* ask for it at bootup */
+
+#endif
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
new file mode 100644
index 000000000..50c1dbe8f
--- /dev/null
+++ b/include/asm-i386/cache.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-i386/cache.h
+ */
+#ifndef __ARCH_I386_CACHE_H
+#define __ARCH_I386_CACHE_H
+
+/* bytes per L1 cache line */
+#if CPU==586 || CPU==686
+#define L1_CACHE_BYTES 32
+#else
+#define L1_CACHE_BYTES 16
+#endif
+
+#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+
+#endif
diff --git a/include/asm-i386/checksum.h b/include/asm-i386/checksum.h
index 72ba38307..284a30ee4 100644
--- a/include/asm-i386/checksum.h
+++ b/include/asm-i386/checksum.h
@@ -17,20 +17,55 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
/*
* the same as csum_partial, but copies from src while it
- * checksums
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
+unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
+extern __inline__
+unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
+ int len, int sum)
+{
+ int *src_err_ptr=NULL, *dst_err_ptr=NULL;
+
+ return csum_partial_copy_generic ( src, dst, len, sum, src_err_ptr, dst_err_ptr);
+}
+
+extern __inline__
+unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
+ int len, int sum, int *err_ptr)
+{
+ int *dst_err_ptr=NULL;
+
+ return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, dst_err_ptr);
+}
+/*
+ * This combination is currently not used, but possible:
+ */
+
+extern __inline__
+unsigned int csum_partial_copy_to_user ( const char *src, char *dst,
+ int len, int sum, int *err_ptr)
+{
+ int *src_err_ptr=NULL;
+
+ return csum_partial_copy_generic ( src, dst, len, sum, src_err_ptr, err_ptr);
+}
/*
- * the same as csum_partial, but copies from user space (but on the x86
- * we have just one address space, so this is identical to the above)
+ * These are the old (and unsafe) way of doing checksums, a warning message will be
+ * printed if they are used and an exeption occurs.
+ *
+ * these functions should go away after some time.
*/
+
#define csum_partial_copy_fromuser csum_partial_copy
+unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
/*
* This is a version of ip_compute_csum() optimized for IP headers,
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
new file mode 100644
index 000000000..01ba3e9a0
--- /dev/null
+++ b/include/asm-i386/current.h
@@ -0,0 +1,12 @@
+#ifndef _I386_CURRENT_H
+#define _I386_CURRENT_H
+
+/* Some architectures may want to do something "clever" here since
+ * this is the most frequently accessed piece of data in the entire
+ * kernel. For an example, see the Sparc implementation where an
+ * entire register is hard locked to contain the value of current.
+ */
+extern struct task_struct *current_set[NR_CPUS];
+#define current (current_set[smp_processor_id()]) /* Current on this processor */
+
+#endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-i386/errno.h b/include/asm-i386/errno.h
index 1936cb745..7cf599f4d 100644
--- a/include/asm-i386/errno.h
+++ b/include/asm-i386/errno.h
@@ -126,4 +126,7 @@
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
#endif
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h
new file mode 100644
index 000000000..bdaad9b35
--- /dev/null
+++ b/include/asm-i386/hardirq.h
@@ -0,0 +1,76 @@
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <linux/tasks.h>
+
+extern unsigned int local_irq_count[NR_CPUS];
+#define in_interrupt() (local_irq_count[smp_processor_id()] != 0)
+
+#ifndef __SMP__
+
+#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
+#define hardirq_endlock(cpu) do { } while (0)
+
+#define hardirq_enter(cpu) (local_irq_count[cpu]++)
+#define hardirq_exit(cpu) (local_irq_count[cpu]--)
+
+#define synchronize_irq() do { } while (0)
+
+#else
+
+#include <asm/atomic.h>
+
+extern unsigned char global_irq_holder;
+extern unsigned volatile int global_irq_lock;
+extern atomic_t global_irq_count;
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore.. */
+ if (global_irq_holder == (unsigned char) cpu) {
+ global_irq_holder = NO_PROC_ID;
+ global_irq_lock = 0;
+ }
+}
+
+static inline void hardirq_enter(int cpu)
+{
+ ++local_irq_count[cpu];
+ atomic_inc(&global_irq_count);
+}
+
+static inline void hardirq_exit(int cpu)
+{
+ atomic_dec(&global_irq_count);
+ --local_irq_count[cpu];
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ unsigned long flags;
+
+ __save_flags(flags);
+ __cli();
+ atomic_inc(&global_irq_count);
+ if (atomic_read(&global_irq_count) != 1 || test_bit(0,&global_irq_lock)) {
+ atomic_dec(&global_irq_count);
+ __restore_flags(flags);
+ return 0;
+ }
+ ++local_irq_count[cpu];
+ __sti();
+ return 1;
+}
+
+static inline void hardirq_endlock(int cpu)
+{
+ __cli();
+ hardirq_exit(cpu);
+ __sti();
+}
+
+extern void synchronize_irq(void);
+
+#endif /* __SMP__ */
+
+#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h
index a2d797037..b1c21cc62 100644
--- a/include/asm-i386/ide.h
+++ b/include/asm-i386/ide.h
@@ -97,7 +97,7 @@ static __inline__ void ide_release_region (ide_ioreg_t from, unsigned int extent
/*
* The following are not needed for the non-m68k ports
*/
-static __inline__ int ide_ack_intr (ide_ioreg_t base_port, ide_ioreg_t irq_port)
+static __inline__ int ide_ack_intr (ide_ioreg_t status_port, ide_ioreg_t irq_port)
{
return(1);
}
diff --git a/include/asm-i386/init.h b/include/asm-i386/init.h
new file mode 100644
index 000000000..83215545f
--- /dev/null
+++ b/include/asm-i386/init.h
@@ -0,0 +1,14 @@
+#ifndef _I386_INIT_H
+#define _I386_INIT_H
+
+#define __init __attribute__ ((__section__ (".text.init")))
+#define __initdata __attribute__ ((__section__ (".data.init")))
+#define __initfunc(__arginit) \
+ __arginit __init; \
+ __arginit
+/* For assembly routines */
+#define __INIT .section ".text.init",#alloc,#execinstr
+#define __FINIT .previous
+#define __INITDATA .section ".data.init",#alloc,#write
+
+#endif
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 142662e94..d5dba06d7 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -166,6 +166,7 @@ __OUTS(l)
#ifdef __KERNEL__
+#include <linux/vmalloc.h>
#include <asm/page.h>
#define __io_virt(x) ((void *)(PAGE_OFFSET | (unsigned long)(x)))
@@ -184,7 +185,23 @@ extern inline void * phys_to_virt(unsigned long address)
return __io_virt(address);
}
-extern void * ioremap(unsigned long offset, unsigned long size);
+extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+extern inline void * ioremap (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+/*
+ * This one maps high address device memory and turns off caching for that area.
+ * it's useful if some control registers are in such an area and write combining
+ * or read caching is not desirable:
+ */
+extern inline void * ioremap_nocache (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, _PAGE_PCD);
+}
+
extern void iounmap(void *addr);
/*
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index ed2287ebb..d764ef164 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -19,387 +19,4 @@
extern void disable_irq(unsigned int);
extern void enable_irq(unsigned int);
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
-#define GET_CURRENT \
- "movl " SYMBOL_NAME_STR(current_set) ",%ebx\n\t"
-
-#define SAVE_ALL \
- "cld\n\t" \
- "push %es\n\t" \
- "push %ds\n\t" \
- "pushl %eax\n\t" \
- "pushl %ebp\n\t" \
- "pushl %edi\n\t" \
- "pushl %esi\n\t" \
- "pushl %edx\n\t" \
- "pushl %ecx\n\t" \
- "pushl %ebx\n\t" \
- "movl $" STR(KERNEL_DS) ",%edx\n\t" \
- "mov %dx,%ds\n\t" \
- "mov %dx,%es\n\t"
-
-/*
- * SAVE_MOST/RESTORE_MOST is used for the faster version of IRQ handlers,
- * installed by using the SA_INTERRUPT flag. These kinds of IRQ's don't
- * call the routines that do signal handling etc on return, and can have
- * more relaxed register-saving etc. They are also atomic, and are thus
- * suited for small, fast interrupts like the serial lines or the harddisk
- * drivers, which don't actually need signal handling etc.
- *
- * Also note that we actually save only those registers that are used in
- * C subroutines (%eax, %edx and %ecx), so if you do something weird,
- * you're on your own. The only segments that are saved (not counting the
- * automatic stack and code segment handling) are %ds and %es, and they
- * point to kernel space. No messing around with %fs here.
- */
-#define SAVE_MOST \
- "cld\n\t" \
- "push %es\n\t" \
- "push %ds\n\t" \
- "pushl %eax\n\t" \
- "pushl %edx\n\t" \
- "pushl %ecx\n\t" \
- "movl $" STR(KERNEL_DS) ",%edx\n\t" \
- "mov %dx,%ds\n\t" \
- "mov %dx,%es\n\t"
-
-#define RESTORE_MOST \
- "popl %ecx\n\t" \
- "popl %edx\n\t" \
- "popl %eax\n\t" \
- "pop %ds\n\t" \
- "pop %es\n\t" \
- "iret"
-
-/*
- * The "inb" instructions are not needed, but seem to change the timings
- * a bit - without them it seems that the harddisk driver won't work on
- * all hardware. Arghh.
- */
-#define ACK_FIRST(mask,nr) \
- "inb $0x21,%al\n\t" \
- "jmp 1f\n" \
- "1:\tjmp 1f\n" \
- "1:\torb $" #mask ","SYMBOL_NAME_STR(cache_21)"\n\t" \
- "movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \
- "outb %al,$0x21\n\t" \
- "jmp 1f\n" \
- "1:\tjmp 1f\n" \
- "1:\tmovb $0x20,%al\n\t" \
- "outb %al,$0x20\n\t"
-
-#define ACK_SECOND(mask,nr) \
- "inb $0xA1,%al\n\t" \
- "jmp 1f\n" \
- "1:\tjmp 1f\n" \
- "1:\torb $" #mask ","SYMBOL_NAME_STR(cache_A1)"\n\t" \
- "movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \
- "outb %al,$0xA1\n\t" \
- "jmp 1f\n" \
- "1:\tjmp 1f\n" \
- "1:\tmovb $0x20,%al\n\t" \
- "outb %al,$0xA0\n\t" \
- "jmp 1f\n" \
- "1:\tjmp 1f\n" \
- "1:\toutb %al,$0x20\n\t"
-
-#define UNBLK_FIRST(mask) \
- "inb $0x21,%al\n\t" \
- "jmp 1f\n" \
- "1:\tjmp 1f\n" \
- "1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_21)"\n\t" \
- "movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \
- "outb %al,$0x21\n\t"
-
-#define UNBLK_SECOND(mask) \
- "inb $0xA1,%al\n\t" \
- "jmp 1f\n" \
- "1:\tjmp 1f\n" \
- "1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_A1)"\n\t" \
- "movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \
- "outb %al,$0xA1\n\t"
-
-#define IRQ_NAME2(nr) nr##_interrupt(void)
-#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
-#define FAST_IRQ_NAME(nr) IRQ_NAME2(fast_IRQ##nr)
-#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)
-
-#ifdef __SMP__
-
-#ifndef __SMP_PROF__
-#define SMP_PROF_INT_SPINS
-#define SMP_PROF_IPI_CNT
-#else
-#define SMP_PROF_INT_SPINS "incl "SYMBOL_NAME_STR(smp_spins)"(,%eax,4)\n\t"
-#define SMP_PROF_IPI_CNT "incl "SYMBOL_NAME_STR(ipi_count)"\n\t"
-#endif
-
-#define GET_PROCESSOR_ID \
- "movl "SYMBOL_NAME_STR(apic_reg)", %edx\n\t" \
- "movl 32(%edx), %eax\n\t" \
- "shrl $24,%eax\n\t" \
- "andb $0x0F,%al\n\t"
-
-#define GET_CURRENT \
- "movl " SYMBOL_NAME_STR(current_set) "(,%eax,4),%ebx\n\t"
-
-#define ENTER_KERNEL \
- "pushl %eax\n\t" \
- "pushl %edx\n\t" \
- "pushfl\n\t" \
- "cli\n\t" \
- GET_PROCESSOR_ID \
- GET_CURRENT \
- "btsl $" STR(SMP_FROM_INT) ","SYMBOL_NAME_STR(smp_proc_in_lock)"(,%eax,4)\n\t" \
- "1: " \
- "lock\n\t" \
- "btsl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
- "jnc 3f\n\t" \
- "cmpb "SYMBOL_NAME_STR(active_kernel_processor)", %al\n\t" \
- "je 4f\n\t" \
- "2: " \
- SMP_PROF_INT_SPINS \
- "btl %al, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \
- "jnc 5f\n\t" \
- "lock\n\t" \
- "btrl %al, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \
- "jnc 5f\n\t" \
- "movl %cr3,%edx\n\t" \
- "movl %edx,%cr3\n" \
- "5: btl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
- "jc 2b\n\t" \
- "jmp 1b\n\t" \
- "3: " \
- "movb %al, "SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \
- "4: " \
- "incl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \
- "popfl\n\t" \
- "popl %edx\n\t" \
- "popl %eax\n\t"
-
-#define LEAVE_KERNEL \
- GET_PROCESSOR_ID \
- "btrl $" STR(SMP_FROM_INT) ","SYMBOL_NAME_STR(smp_proc_in_lock)"(,%eax,4)\n\t" \
- "pushfl\n\t" \
- "cli\n\t" \
- "decl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \
- "jnz 1f\n\t" \
- "movb $" STR (NO_PROC_ID) ", "SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \
- "lock\n\t" \
- "btrl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
- "1: " \
- "popfl\n\t"
-
-
-/*
- * the syscall count inc is a gross hack because ret_from_syscall is used by both irq and
- * syscall return paths (urghh).
- */
-
-#define BUILD_IRQ(chip,nr,mask) \
-asmlinkage void IRQ_NAME(nr); \
-asmlinkage void FAST_IRQ_NAME(nr); \
-asmlinkage void BAD_IRQ_NAME(nr); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "pushl $-"#nr"-2\n\t" \
- SAVE_ALL \
- ENTER_KERNEL \
- ACK_##chip(mask,(nr&7)) \
- "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
- "sti\n\t" \
- "movl %esp,%eax\n\t" \
- "pushl %eax\n\t" \
- "pushl $" #nr "\n\t" \
- "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
- "addl $8,%esp\n\t" \
- "cli\n\t" \
- UNBLK_##chip(mask) \
- "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
- "jmp ret_from_sys_call\n" \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
- SAVE_MOST \
- ENTER_KERNEL \
- ACK_##chip(mask,(nr&7)) \
- "incl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- "pushl $" #nr "\n\t" \
- "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
- "addl $4,%esp\n\t" \
- "cli\n\t" \
- UNBLK_##chip(mask) \
- "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- LEAVE_KERNEL \
- RESTORE_MOST \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
- SAVE_MOST \
- ENTER_KERNEL \
- ACK_##chip(mask,(nr&7)) \
- LEAVE_KERNEL \
- RESTORE_MOST);
-
-
-#define BUILD_TIMER_IRQ(chip,nr,mask) \
-asmlinkage void IRQ_NAME(nr); \
-asmlinkage void FAST_IRQ_NAME(nr); \
-asmlinkage void BAD_IRQ_NAME(nr); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
-SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
-SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "pushl $-"#nr"-2\n\t" \
- SAVE_ALL \
- ENTER_KERNEL \
- ACK_##chip(mask,(nr&7)) \
- "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
- "movl %esp,%eax\n\t" \
- "pushl %eax\n\t" \
- "pushl $" #nr "\n\t" \
- "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
- "addl $8,%esp\n\t" \
- "cli\n\t" \
- UNBLK_##chip(mask) \
- "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
- "jmp ret_from_sys_call\n");
-
-
-/*
- * Message pass must be a fast IRQ..
- */
-
-#define BUILD_MSGIRQ(chip,nr,mask) \
-asmlinkage void IRQ_NAME(nr); \
-asmlinkage void FAST_IRQ_NAME(nr); \
-asmlinkage void BAD_IRQ_NAME(nr); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "pushl $-"#nr"-2\n\t" \
- SAVE_ALL \
- ENTER_KERNEL \
- ACK_##chip(mask,(nr&7)) \
- "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
- "sti\n\t" \
- "movl %esp,%eax\n\t" \
- "pushl %eax\n\t" \
- "pushl $" #nr "\n\t" \
- "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
- "addl $8,%esp\n\t" \
- "cli\n\t" \
- UNBLK_##chip(mask) \
- GET_PROCESSOR_ID \
- "btrl $" STR(SMP_FROM_INT) ","SYMBOL_NAME_STR(smp_proc_in_lock)"(,%eax,4)\n\t" \
- "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
- "jmp ret_from_sys_call\n" \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
- SAVE_MOST \
- ACK_##chip(mask,(nr&7)) \
- SMP_PROF_IPI_CNT \
- "pushl $" #nr "\n\t" \
- "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
- "addl $4,%esp\n\t" \
- "cli\n\t" \
- UNBLK_##chip(mask) \
- RESTORE_MOST \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
- SAVE_MOST \
- ACK_##chip(mask,(nr&7)) \
- RESTORE_MOST);
-
-#define BUILD_RESCHEDIRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "pushl $-"#nr"-2\n\t" \
- SAVE_ALL \
- ENTER_KERNEL \
- "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
- "sti\n\t" \
- "movl %esp,%eax\n\t" \
- "pushl %eax\n\t" \
- "pushl $" #nr "\n\t" \
- "call "SYMBOL_NAME_STR(smp_reschedule_irq)"\n\t" \
- "addl $8,%esp\n\t" \
- "cli\n\t" \
- "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
- "jmp ret_from_sys_call\n");
-#else
-
-#define BUILD_IRQ(chip,nr,mask) \
-asmlinkage void IRQ_NAME(nr); \
-asmlinkage void FAST_IRQ_NAME(nr); \
-asmlinkage void BAD_IRQ_NAME(nr); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "pushl $-"#nr"-2\n\t" \
- SAVE_ALL \
- ACK_##chip(mask,(nr&7)) \
- "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
- "sti\n\t" \
- "movl %esp,%eax\n\t" \
- "pushl %eax\n\t" \
- "pushl $" #nr "\n\t" \
- "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
- "addl $8,%esp\n\t" \
- "cli\n\t" \
- UNBLK_##chip(mask) \
- "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- GET_CURRENT \
- "jmp ret_from_sys_call\n" \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
- SAVE_MOST \
- ACK_##chip(mask,(nr&7)) \
- "incl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- "pushl $" #nr "\n\t" \
- "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
- "addl $4,%esp\n\t" \
- "cli\n\t" \
- UNBLK_##chip(mask) \
- "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- RESTORE_MOST \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
- SAVE_MOST \
- ACK_##chip(mask,(nr&7)) \
- RESTORE_MOST);
-
-#define BUILD_TIMER_IRQ(chip,nr,mask) \
-asmlinkage void IRQ_NAME(nr); \
-asmlinkage void FAST_IRQ_NAME(nr); \
-asmlinkage void BAD_IRQ_NAME(nr); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
-SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
-SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "pushl $-"#nr"-2\n\t" \
- SAVE_ALL \
- ACK_##chip(mask,(nr&7)) \
- "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
- "movl %esp,%eax\n\t" \
- "pushl %eax\n\t" \
- "pushl $" #nr "\n\t" \
- "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
- "addl $8,%esp\n\t" \
- "cli\n\t" \
- UNBLK_##chip(mask) \
- "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
- GET_CURRENT \
- "jmp ret_from_sys_call\n");
-
-#endif
-#endif
+#endif /* _ASM_IRQ_H */
diff --git a/include/asm-i386/ldt.h b/include/asm-i386/ldt.h
index 84440bf54..55b75ca39 100644
--- a/include/asm-i386/ldt.h
+++ b/include/asm-i386/ldt.h
@@ -20,6 +20,7 @@ struct modify_ldt_ldt_s {
unsigned int read_exec_only:1;
unsigned int limit_in_pages:1;
unsigned int seg_not_present:1;
+ unsigned int useable:1;
};
#define MODIFY_LDT_CONTENTS_DATA 0
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 1f4751cc2..01b8bfcba 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -6,4 +6,7 @@
*/
#define get_mmu_context(x) do { } while (0)
+#define init_new_context(mm) do { } while(0)
+#define destroy_context(mm) do { } while(0)
+
#endif
diff --git a/include/asm-i386/namei.h b/include/asm-i386/namei.h
new file mode 100644
index 000000000..c0dc3b3f8
--- /dev/null
+++ b/include/asm-i386/namei.h
@@ -0,0 +1,21 @@
+/* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $
+ * linux/include/asm-i386/namei.h
+ *
+ * Included from linux/fs/namei.c
+ */
+
+#ifndef __I386_NAMEI_H
+#define __I386_NAMEI_H
+
+/* These dummy routines maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+
+#define translate_namei(pathname, base, follow_links, res_inode) \
+ do { } while (0)
+
+#define translate_open_namei(pathname, flag, mode, res_inode, base) \
+ do { } while (0)
+
+#endif /* __I386_NAMEI_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 58990eb38..7d973530c 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -82,7 +82,7 @@ static inline void flush_tlb_range(struct mm_struct *mm,
__flush_tlb()
-#undef CLEVER_SMP_INVALIDATE
+#define CLEVER_SMP_INVALIDATE
#ifdef CLEVER_SMP_INVALIDATE
/*
@@ -91,9 +91,6 @@ static inline void flush_tlb_range(struct mm_struct *mm,
*
* These mean you can really definitely utterly forget about
* writing to user space from interrupts. (Its not allowed anyway).
- *
- * Doesn't currently work as Linus makes flush tlb calls before
- * stuff like current/current->mm are setup properly
*/
static inline void flush_tlb_current_task(void)
@@ -208,6 +205,7 @@ static inline void flush_tlb_range(struct mm_struct *mm,
#define _PAGE_ACCESSED 0x020
#define _PAGE_DIRTY 0x040
#define _PAGE_4M 0x080 /* 4 MB page, Pentium+.. */
+#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -354,6 +352,9 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgd_offset(mm, address) \
((mm)->pgd + ((address) >> PGDIR_SHIFT))
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
@@ -374,6 +375,8 @@ extern inline void pte_free_kernel(pte_t * pte)
free_page((unsigned long) pte);
}
+extern const char bad_pmd_string[];
+
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
@@ -390,7 +393,7 @@ extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
free_page((unsigned long) page);
}
if (pmd_bad(*pmd)) {
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ printk(bad_pmd_string, pmd_val(*pmd));
pmd_val(*pmd) = _KERNPG_TABLE + __pa(BAD_PAGETABLE);
return NULL;
}
@@ -443,7 +446,7 @@ freenew:
}
fix:
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ printk(bad_pmd_string, pmd_val(*pmd));
oom:
pmd_val(*pmd) = _PAGE_TABLE + __pa(BAD_PAGETABLE);
return NULL;
diff --git a/include/asm-i386/poll.h b/include/asm-i386/poll.h
new file mode 100644
index 000000000..e5feda71b
--- /dev/null
+++ b/include/asm-i386/poll.h
@@ -0,0 +1,25 @@
+#ifndef __i386_POLL_H
+#define __i386_POLL_H
+
+/* These are specified by iBCS2 */
+#define POLLIN 0x0001
+#define POLLPRI 0x0002
+#define POLLOUT 0x0004
+#define POLLERR 0x0008
+#define POLLHUP 0x0010
+#define POLLNVAL 0x0020
+
+/* The rest seem to be more-or-less nonstandard. Check them! */
+#define POLLRDNORM 0x0040
+#define POLLRDBAND 0x0080
+#define POLLWRNORM 0x0100
+#define POLLWRBAND 0x0200
+#define POLLMSG 0x0400
+
+struct pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+#endif
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index de0611190..9c6830f68 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -29,10 +29,15 @@ extern int have_cpuid; /* We have a CPUID */
/*
* Bus types (default is ISA, but people can check others with these..)
- * MCA_bus hardcoded to 0 for now.
*/
extern int EISA_bus;
-#define MCA_bus 0
+extern int MCA_bus;
+
+/* from system description table in BIOS. Mostly for MCA use, but
+others may find it useful. */
+extern unsigned int machine_id;
+extern unsigned int machine_submodel_id;
+extern unsigned int BIOS_revision;
/*
* User space process size: 3GB. This is hardcoded into a few places,
@@ -40,6 +45,11 @@ extern int EISA_bus;
*/
#define TASK_SIZE (0xC0000000UL)
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
/*
* Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
*/
@@ -130,9 +140,6 @@ struct thread_struct {
NULL, 0, 0, 0, 0 /* vm86_info */, \
}
-#define alloc_kernel_stack() __get_free_page(GFP_KERNEL)
-#define free_kernel_stack(page) free_page((page))
-
#define start_thread(regs, new_eip, new_esp) do {\
unsigned long seg = USER_DS; \
__asm__("mov %w0,%%fs ; mov %w0,%%gs":"=r" (seg) :"0" (seg)); \
@@ -145,6 +152,9 @@ struct thread_struct {
regs->esp = new_esp; \
} while (0)
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
/*
* Return saved PC of a blocked thread.
*/
@@ -153,6 +163,12 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
return ((unsigned long *)t->esp)[3];
}
+/* Allocation and freeing of basic task resources. */
+#define alloc_task_struct() kmalloc(sizeof(struct task_struct), GFP_KERNEL)
+#define alloc_kernel_stack(p) __get_free_page(GFP_KERNEL)
+#define free_task_struct(p) kfree(p)
+#define free_kernel_stack(page) free_page((page))
+
/*
* Return_address is a replacement for __builtin_return_address(count)
* which on certain architectures cannot reasonably be implemented in GCC
diff --git a/include/asm-i386/scatterlist.h b/include/asm-i386/scatterlist.h
new file mode 100644
index 000000000..6551ac94a
--- /dev/null
+++ b/include/asm-i386/scatterlist.h
@@ -0,0 +1,13 @@
+#ifndef _I386_SCATTERLIST_H
+#define _I386_SCATTERLIST_H
+
+struct scatterlist {
+ char * address; /* Location data is to be transferred to */
+ char * alt_address; /* Location of actual if address is a
+ * dma indirect buffer. NULL otherwise */
+ unsigned int length;
+};
+
+#define ISA_DMA_THRESHOLD (0x00ffffff)
+
+#endif /* !(_I386_SCATTERLIST_H) */
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index 930b39edb..4395dfce0 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -7,42 +7,120 @@
* SMP- and interrupt-safe semaphores..
*
* (C) Copyright 1996 Linus Torvalds
+ *
+ * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
+ * the original code and to make semaphore waits
+ * interruptible so that processes waiting on
+ * semaphores can be killed.
+ *
+ * If you would like to see an analysis of this implementation, please
+ * ftp to gcom.com and download the file
+ * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
+ *
*/
+#include <asm/system.h>
+#include <asm/atomic.h>
+
struct semaphore {
- int count;
- int waiting;
+ atomic_t count;
+ int waking;
struct wait_queue * wait;
};
-#define MUTEX ((struct semaphore) { 1, 0, NULL })
-#define MUTEX_LOCKED ((struct semaphore) { 0, 0, NULL })
+#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
+#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
asmlinkage void __down_failed(void /* special register calling convention */);
+asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);
extern void __down(struct semaphore * sem);
extern void __up(struct semaphore * sem);
+#define sema_init(sem, val) atomic_set(&((sem)->count), (val))
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ *
+ * This is trivially done with load_locked/store_cond,
+ * but on the x86 we need an external synchronizer.
+ * Currently this is just the global interrupt lock,
+ * bah. Go for a smaller spinlock some day.
+ *
+ * (On the other hand this shouldn't be in any critical
+ * path, so..)
+ */
+static inline void wake_one_more(struct semaphore * sem)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ sem->waking++;
+ restore_flags(flags);
+}
+
+static inline int waking_non_zero(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ save_flags(flags);
+ cli();
+ if (sem->waking > 0) {
+ sem->waking--;
+ ret = 1;
+ }
+ restore_flags(flags);
+ return ret;
+}
+
/*
* This is ugly, but we want the default case to fall through.
* "down_failed" is a special asm handler that calls the C
* routine that actually waits. See arch/i386/lib/semaphore.S
*/
-extern __inline__ void down(struct semaphore * sem)
+extern inline void down(struct semaphore * sem)
{
__asm__ __volatile__(
"# atomic down operation\n\t"
+ "movl $1f,%%eax\n\t"
#ifdef __SMP__
"lock ; "
#endif
- "decl %0\n\t"
- "movl $1f,%%eax\n\t"
+ "decl 0(%0)\n\t"
"js " SYMBOL_NAME_STR(__down_failed)
"\n1:"
:/* no outputs */
- :"m" (sem->count), "c" (sem)
- :"ax", "memory");
+ :"c" (sem)
+ :"ax","memory");
+}
+
+/*
+ * This version waits in interruptible state so that the waiting
+ * process can be killed. The down_failed_interruptible routine
+ * returns negative for signalled and zero for semaphore acquired.
+ */
+extern inline int down_interruptible(struct semaphore * sem)
+{
+ int ret;
+
+ __asm__ __volatile__(
+ "# atomic interruptible down operation\n\t"
+ "movl $1f,%0\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "decl 0(%1)\n\t"
+ "js " SYMBOL_NAME_STR(__down_failed_interruptible) "\n\t"
+ "xorl %0,%0"
+ "\n1:"
+ :"=a" (ret)
+ :"c" (sem)
+ :"memory");
+
+ return ret;
}
/*
@@ -51,19 +129,19 @@ extern __inline__ void down(struct semaphore * sem)
* The default case (no contention) will result in NO
* jumps for both down() and up().
*/
-extern __inline__ void up(struct semaphore * sem)
+extern inline void up(struct semaphore * sem)
{
__asm__ __volatile__(
"# atomic up operation\n\t"
+ "movl $1f,%%eax\n\t"
#ifdef __SMP__
"lock ; "
#endif
- "incl %0\n\t"
- "movl $1f,%%eax\n\t"
+ "incl 0(%0)\n\t"
"jle " SYMBOL_NAME_STR(__up_wakeup)
"\n1:"
:/* no outputs */
- :"m" (sem->count), "c" (sem)
+ :"c" (sem)
:"ax", "memory");
}
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index ff0caafe0..f1d977f22 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -192,8 +192,6 @@ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_reschedule_irq(int cpl, struct pt_regs *regs);
extern unsigned long ipi_count;
extern void smp_invalidate_rcv(void); /* Process an NMI */
-extern volatile unsigned long kernel_counter;
-extern volatile unsigned long syscall_count;
/*
* General functions that each host system must provide.
@@ -204,7 +202,7 @@ extern void smp_boot_cpus(void);
extern void smp_store_cpu_info(int id); /* Store per cpu info (like the initial udelay numbers */
extern volatile unsigned long smp_proc_in_lock[NR_CPUS]; /* for computing process time */
-extern volatile unsigned long smp_process_available;
+extern volatile int smp_process_available;
/*
* APIC handlers: Note according to the Intel specification update
@@ -229,10 +227,11 @@ extern __inline unsigned long apic_read(unsigned long reg)
* cpu id from the config and set up a fake apic_reg pointer so that before we activate
* the apic we get the right answer). Hopefully other processors are more sensible 8)
*/
-
+
extern __inline int smp_processor_id(void)
{
- return GET_APIC_ID(apic_read(APIC_ID));
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_ID(*(unsigned long *)(apic_reg+APIC_ID));
}
#endif /* !ASSEMBLY */
diff --git a/include/asm-i386/smp_lock.h b/include/asm-i386/smp_lock.h
index a736f0a6c..160e3562d 100644
--- a/include/asm-i386/smp_lock.h
+++ b/include/asm-i386/smp_lock.h
@@ -1,69 +1,94 @@
#ifndef __I386_SMPLOCK_H
#define __I386_SMPLOCK_H
-#ifdef __SMP__
+#define __STR(x) #x
-/*
- * Locking the kernel
- */
-
-extern __inline void lock_kernel(void)
+#ifndef __SMP__
+
+#define lock_kernel() do { } while(0)
+#define unlock_kernel() do { } while(0)
+#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
+#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
+
+#else
+
+#include <asm/hardirq.h>
+
+/* Release global kernel lock and global interrupt lock */
+#define release_kernel_lock(task, cpu, depth) \
+do { \
+ if ((depth = (task)->lock_depth) != 0) { \
+ __cli(); \
+ (task)->lock_depth = 0; \
+ active_kernel_processor = NO_PROC_ID; \
+ clear_bit(0,&kernel_flag); \
+ } \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/* Re-acquire the kernel lock */
+#define reacquire_kernel_lock(task, cpu, depth) \
+do { if (depth) __asm__ __volatile__( \
+ "cli\n\t" \
+ "movl $0f,%%eax\n\t" \
+ "jmp __lock_kernel\n" \
+ "0:\t" \
+ "movl %2,%0\n\t" \
+ "sti" \
+ : "=m" (task->lock_depth) \
+ : "d" (cpu), "c" (depth) \
+ : "ax"); \
+} while (0)
+
+
+/* Locking the kernel */
+extern __inline__ void lock_kernel(void)
{
- unsigned long flags;
- int proc = smp_processor_id();
+ int cpu = smp_processor_id();
- save_flags(flags);
- cli();
- /* set_bit works atomic in SMP machines */
- while(set_bit(0, (void *)&kernel_flag))
- {
- /*
- * We just start another level if we have the lock
- */
- if (proc == active_kernel_processor)
- break;
- do
- {
-#ifdef __SMP_PROF__
- smp_spins[smp_processor_id()]++;
-#endif
- /*
- * Doing test_bit here doesn't lock the bus
- */
- if (test_bit(proc, (void *)&smp_invalidate_needed))
- if (clear_bit(proc, (void *)&smp_invalidate_needed))
- local_flush_tlb();
- }
- while(test_bit(0, (void *)&kernel_flag));
+ if (local_irq_count[cpu]) {
+ __label__ l1;
+l1: printk("lock from interrupt context at %p\n", &&l1);
+ }
+ if (cpu == global_irq_holder) {
+ __label__ l2;
+l2: printk("Ugh at %p\n", &&l2);
+ sti();
}
- /*
- * We got the lock, so tell the world we are here and increment
- * the level counter
- */
- active_kernel_processor = proc;
- kernel_counter++;
- restore_flags(flags);
+
+ __asm__ __volatile__("
+ pushfl
+ cli
+ cmpl $0, %0
+ jne 0f
+ movl $0f, %%eax
+ jmp __lock_kernel
+0:
+ incl %0
+ popfl
+" :
+ : "m" (current_set[cpu]->lock_depth), "d" (cpu)
+ : "ax", "memory");
}
-extern __inline void unlock_kernel(void)
+extern __inline__ void unlock_kernel(void)
{
- unsigned long flags;
- save_flags(flags);
- cli();
- /*
- * If it's the last level we have in the kernel, then
- * free the lock
- */
- if (kernel_counter == 0)
- panic("Kernel counter wrong.\n"); /* FIXME: Why is kernel_counter sometimes 0 here? */
-
- if(! --kernel_counter)
- {
- active_kernel_processor = NO_PROC_ID;
- clear_bit(0, (void *)&kernel_flag);
- }
- restore_flags(flags);
+ __asm__ __volatile__("
+ pushfl
+ cli
+ decl %0
+ jnz 1f
+ movb %1, " __STR(active_kernel_processor) "
+ lock
+ btrl $0, " __STR(kernel_flag) "
+1:
+ popfl
+" : /* no outputs */
+ : "m" (current->lock_depth), "i" (NO_PROC_ID)
+ : "ax", "memory");
}
-#endif
-#endif
+#endif /* __SMP__ */
+
+#endif /* __I386_SMPLOCK_H */
diff --git a/include/asm-i386/socket.h b/include/asm-i386/socket.h
index dedf3bfc5..a6d3a5fa7 100644
--- a/include/asm-i386/socket.h
+++ b/include/asm-i386/socket.h
@@ -21,11 +21,17 @@
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
/* To add :#define SO_REUSEPORT 15 */
-#define SO_RCVLOWAT 16
-#define SO_SNDLOWAT 17
-#define SO_RCVTIMEO 18
-#define SO_SNDTIMEO 19
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO 20
+#define SO_SNDTIMEO 21
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
/* Socket types. */
#define SOCK_STREAM 1 /* stream (connection) socket */
diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h
new file mode 100644
index 000000000..07a678435
--- /dev/null
+++ b/include/asm-i386/softirq.h
@@ -0,0 +1,99 @@
+#ifndef __ASM_SOFTIRQ_H
+#define __ASM_SOFTIRQ_H
+
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+
+#define get_active_bhs() (bh_mask & bh_active)
+#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
+
+extern inline void init_bh(int nr, void (*routine)(void))
+{
+ bh_base[nr] = routine;
+ bh_mask_count[nr] = 0;
+ bh_mask |= 1 << nr;
+}
+
+extern inline void remove_bh(int nr)
+{
+ bh_base[nr] = NULL;
+ bh_mask &= ~(1 << nr);
+}
+
+extern inline void mark_bh(int nr)
+{
+ set_bit(nr, &bh_active);
+}
+
+/*
+ * These use a mask count to correctly handle
+ * nested disable/enable calls
+ */
+extern inline void disable_bh(int nr)
+{
+ bh_mask &= ~(1 << nr);
+ bh_mask_count[nr]++;
+}
+
+extern inline void enable_bh(int nr)
+{
+ if (!--bh_mask_count[nr])
+ bh_mask |= 1 << nr;
+}
+
+#ifdef __SMP__
+
+/*
+ * The locking mechanism for base handlers, to prevent re-entrancy,
+ * is entirely private to an implementation, it should not be
+ * referenced at all outside of this file.
+ */
+extern atomic_t __intel_bh_counter;
+
+extern inline void start_bh_atomic(void)
+{
+ atomic_inc(&__intel_bh_counter);
+ synchronize_irq();
+}
+
+extern inline void end_bh_atomic(void)
+{
+ atomic_dec(&__intel_bh_counter);
+}
+
+/* These are for the irq's testing the lock */
+static inline int softirq_trylock(void)
+{
+ atomic_inc(&__intel_bh_counter);
+ if (atomic_read(&__intel_bh_counter) != 1) {
+ atomic_dec(&__intel_bh_counter);
+ return 0;
+ }
+ return 1;
+}
+
+#define softirq_endlock() atomic_dec(&__intel_bh_counter)
+
+#else
+
+extern int __intel_bh_counter;
+
+extern inline void start_bh_atomic(void)
+{
+ __intel_bh_counter++;
+ barrier();
+}
+
+extern inline void end_bh_atomic(void)
+{
+ barrier();
+ __intel_bh_counter--;
+}
+
+/* These are for the irq's testing the lock */
+#define softirq_trylock() (__intel_bh_counter ? 0 : (__intel_bh_counter=1))
+#define softirq_endlock() (__intel_bh_counter = 0)
+
+#endif /* SMP */
+
+#endif /* __ASM_SOFTIRQ_H */
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
new file mode 100644
index 000000000..27630b21d
--- /dev/null
+++ b/include/asm-i386/spinlock.h
@@ -0,0 +1,209 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#ifndef __SMP__
+
+/*
+ * Your basic spinlocks, allowing only a single CPU anywhere
+ */
+typedef struct { } spinlock_t;
+#define SPIN_LOCK_UNLOCKED { }
+
+#define spin_lock_init(lock) do { } while(0)
+#define spin_lock(lock) do { } while(0)
+#define spin_trylock(lock) do { } while(0)
+#define spin_unlock_wait(lock) do { } while(0)
+#define spin_unlock(lock) do { } while(0)
+#define spin_lock_irq(lock) cli()
+#define spin_unlock_irq(lock) sti()
+
+#define spin_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define spin_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct { } rwlock_t;
+#define RW_LOCK_UNLOCKED { }
+
+#define read_lock(lock) do { } while(0)
+#define read_unlock(lock) do { } while(0)
+#define write_lock(lock) do { } while(0)
+#define write_unlock(lock) do { } while(0)
+#define read_lock_irq(lock) cli()
+#define read_unlock_irq(lock) sti()
+#define write_lock_irq(lock) cli()
+#define write_unlock_irq(lock) sti()
+
+#define read_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+#define write_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+
+#else
+
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+typedef struct {
+ volatile unsigned int lock;
+ unsigned long previous;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED { 0, 0 }
+
+#define spin_lock_init(x) do { (x)->lock = 0; (x)->previous = 0; } while(0)
+#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock)
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+#define spin_lock(lock) \
+__asm__ __volatile__( \
+ "jmp 2f\n" \
+ "1:\t" \
+ "testb $1,%0\n\t" \
+ "jne 1b\n" \
+ "2:\t" \
+ "lock ; btsl $0,%0\n\t" \
+ "jc 1b" \
+ :"=m" (__dummy_lock(lock)))
+
+#define spin_unlock(lock) \
+__asm__ __volatile__( \
+ "lock ; btrl $0,%0" \
+ :"=m" (__dummy_lock(lock)))
+
+#undef spin_lock
+static inline void spin_lock(spinlock_t * lock)
+{
+ __label__ l1;
+ int stuck = 10000000;
+l1:
+ __asm__ __volatile__(
+ "jmp 2f\n"
+ "1:\t"
+ "decl %1\n\t"
+ "je 3f\n\t"
+ "testb $1,%0\n\t"
+ "jne 1b\n"
+ "2:\t"
+ "lock ; btsl $0,%0\n\t"
+ "jc 1b\n"
+ "3:"
+ :"=m" (__dummy_lock(lock)),
+ "=r" (stuck)
+ :"1" (stuck));
+ if (!stuck) {
+ printk("spinlock stuck at %p (%lx)\n",&&l1,lock->previous);
+ } else
+ lock->previous = (unsigned long) &&l1;
+}
+
+#define spin_trylock(lock) (!set_bit(0,(lock)))
+
+#define spin_lock_irq(lock) \
+ do { __cli(); spin_lock(lock); } while (0)
+
+#define spin_unlock_irq(lock) \
+ do { spin_unlock(lock); __sti(); } while (0)
+
+#define spin_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); spin_lock(lock); } while (0)
+
+#define spin_unlock_irqrestore(lock, flags) \
+ do { spin_unlock(lock); __restore_flags(flags); } while (0)
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct {
+ volatile unsigned int lock;
+ unsigned long previous;
+} rwlock_t;
+
+#define RW_LOCK_UNLOCKED { 0, 0 }
+
+/*
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "write" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ */
+#define read_lock(rw) \
+ asm volatile("\n1:\t" \
+ "lock ; incl %0\n\t" \
+ "js 2f\n" \
+ ".text 2\n" \
+ "2:\tlock ; decl %0\n" \
+ "3:\tcmpl $0,%0\n\t" \
+ "js 3b\n\t" \
+ "jmp 1b\n" \
+ ".text" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define read_unlock(rw) \
+ asm volatile("lock ; decl %0" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define write_lock(rw) \
+ asm volatile("\n1:\t" \
+ "lock ; btsl $31,%0\n\t" \
+ "jc 3f\n\t" \
+ "testl $0x7fffffff,%0\n\t" \
+ "jne 4f\n" \
+ "2:\n" \
+ ".text 2\n" \
+ "3:\ttestl $-1,%0\n\t" \
+ "js 3b\n\t" \
+ "lock ; btsl $31,%0\n\t" \
+ "jc 3b\n" \
+ "4:\ttestl $0x7fffffff,%0\n\t" \
+ "jne 4b\n\t" \
+ "jmp 2b\n" \
+ ".text" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define write_unlock(rw) \
+ asm volatile("lock ; btrl $31,%0":"=m" (__dummy_lock(&(rw)->lock)))
+
+#define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
+#define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
+#define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
+#define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
+
+#define read_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); read_lock(lock); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ do { read_unlock(lock); __restore_flags(flags); } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); write_lock(lock); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ do { write_unlock(lock); __restore_flags(flags); } while (0)
+
+#endif /* SMP */
+#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/string-486.h b/include/asm-i386/string-486.h
index 50657fb35..34f5c16b1 100644
--- a/include/asm-i386/string-486.h
+++ b/include/asm-i386/string-486.h
@@ -339,7 +339,9 @@ __asm__ __volatile__(
"cmpl $-1,%2\n\t"
"jne 1b\n"
"3:\tsubl %1,%0"
- :"=a" (__res):"c" (s),"d" (count));
+ :"=a" (__res)
+ :"c" (s),"d" (count)
+ :"dx");
return __res;
}
/* end of additional stuff */
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index a7611c735..941916118 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -402,6 +402,10 @@ extern inline void * __constant_memcpy(void * to, const void * from, size_t n)
case 4:
*(unsigned long *)to = *(const unsigned long *)from;
return to;
+ case 6: /* for ethernet addresses */
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(2+(unsigned short *)to) = *(2+(const unsigned short *)from);
+ return to;
case 8:
*(unsigned long *)to = *(const unsigned long *)from;
*(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
@@ -546,7 +550,9 @@ __asm__ __volatile__(
"cmpl $-1,%2\n\t"
"jne 1b\n"
"3:\tsubl %1,%0"
- :"=a" (__res):"c" (s),"d" (count));
+ :"=a" (__res)
+ :"c" (s),"d" (count)
+ :"dx");
return __res;
}
/* end of additional stuff */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index cef63f6fe..a3daf450d 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -67,25 +67,14 @@ __asm__("str %%ax\n\t" \
*/
#define switch_to(prev,next) do { \
- cli();\
if(prev->flags&PF_USEDFPU) \
{ \
__asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
__asm__ __volatile__("fwait"); \
prev->flags&=~PF_USEDFPU; \
} \
- prev->lock_depth=syscall_count; \
- kernel_counter+=next->lock_depth-prev->lock_depth; \
- syscall_count=next->lock_depth; \
-__asm__("pushl %%edx\n\t" \
- "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \
- "movl 0x20(%%edx), %%edx\n\t" \
- "shrl $22,%%edx\n\t" \
- "and $0x3C,%%edx\n\t" \
- "movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \
- "popl %%edx\n\t" \
- "ljmp %0\n\t" \
- "sti\n\t" \
+ current_set[this_cpu] = next; \
+__asm__("ljmp %0\n\t" \
: /* no output */ \
:"m" (*(((char *)&next->tss.tr)-4)), \
"c" (next)); \
@@ -197,6 +186,9 @@ __asm__ __volatile__ ( \
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ */
static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
{
switch (size) {
@@ -223,16 +215,35 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
}
#define mb() __asm__ __volatile__ ("" : : :"memory")
-#define sti() __asm__ __volatile__ ("sti": : :"memory")
-#define cli() __asm__ __volatile__ ("cli": : :"memory")
-#define save_flags(x) \
+/* interrupt control.. */
+#define __sti() __asm__ __volatile__ ("sti": : :"memory")
+#define __cli() __asm__ __volatile__ ("cli": : :"memory")
+#define __save_flags(x) \
__asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
-
-#define restore_flags(x) \
+#define __restore_flags(x) \
__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
-#define iret() __asm__ __volatile__ ("iret": : :"memory")
+
+#ifdef __SMP__
+
+extern void __global_cli(void);
+extern void __global_sti(void);
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define save_flags(x) ((x)=__global_save_flags())
+#define restore_flags(x) __global_restore_flags(x)
+
+#else
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+
+#endif
#define _set_gate(gate_addr,type,dpl,addr) \
__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
diff --git a/include/asm-i386/termbits.h b/include/asm-i386/termbits.h
index c40e6f052..b89188b8c 100644
--- a/include/asm-i386/termbits.h
+++ b/include/asm-i386/termbits.h
@@ -122,6 +122,7 @@ struct termios {
#define B230400 0010003
#define B460800 0010004
#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
/* c_lflag bits */
diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h
index 76551dea9..c1176d6f8 100644
--- a/include/asm-i386/termios.h
+++ b/include/asm-i386/termios.h
@@ -21,16 +21,6 @@ struct termio {
unsigned char c_cc[NCC]; /* control characters */
};
-#ifdef __KERNEL__
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-#endif
-
/* modem lines */
#define TIOCM_LE 0x001
#define TIOCM_DTR 0x002
@@ -43,6 +33,8 @@ struct termio {
#define TIOCM_DSR 0x100
#define TIOCM_CD TIOCM_CAR
#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
@@ -53,39 +45,51 @@ struct termio {
#define N_PPP 3
#define N_STRIP 4
#define N_AX25 5
+#define N_X25 6 /* X.25 async */
#ifdef __KERNEL__
-#include <linux/string.h>
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
/*
* Translate a "termio" structure into a "termios". Ugh.
*/
-extern inline void trans_from_termio(struct termio * termio,
- struct termios * termios)
-{
-#define SET_LOW_BITS(x,y) (*(unsigned short *)(&x) = (y))
- SET_LOW_BITS(termios->c_iflag, termio->c_iflag);
- SET_LOW_BITS(termios->c_oflag, termio->c_oflag);
- SET_LOW_BITS(termios->c_cflag, termio->c_cflag);
- SET_LOW_BITS(termios->c_lflag, termio->c_lflag);
-#undef SET_LOW_BITS
- memcpy(termios->c_cc, termio->c_cc, NCC);
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+ unsigned short __tmp; \
+ get_user(__tmp,&(termio)->x); \
+ *(unsigned short *) &(termios)->x = __tmp; \
}
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
/*
* Translate a "termios" structure into a "termio". Ugh.
*/
-extern inline void trans_to_termio(struct termios * termios,
- struct termio * termio)
-{
- termio->c_iflag = termios->c_iflag;
- termio->c_oflag = termios->c_oflag;
- termio->c_cflag = termios->c_cflag;
- termio->c_lflag = termios->c_lflag;
- termio->c_line = termios->c_line;
- memcpy(termio->c_cc, termios->c_cc, NCC);
-}
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+ put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ put_user((termios)->c_line, &(termio)->c_line); \
+ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 2c81e938e..d8b03e17c 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -108,6 +108,25 @@ extern unsigned long search_exception_table(unsigned long);
#define __put_user(x,ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+/*
+ * The "xxx_ret" versions return constant specified in third argument, if
+ * something bad happens. These macros can be optimized for the
+ * case of just returning from the function xxx_ret is used.
+ */
+
+#define put_user_ret(x,ptr,ret) ({ \
+if (put_user(x,ptr)) return ret; })
+
+#define get_user_ret(x,ptr,ret) ({ \
+if (get_user(x,ptr)) return ret; })
+
+#define __put_user_ret(x,ptr,ret) ({ \
+if (__put_user(x,ptr)) return ret; })
+
+#define __get_user_ret(x,ptr,ret) ({ \
+if (__get_user(x,ptr)) return ret; })
+
+
extern long __put_user_bad(void);
@@ -153,10 +172,11 @@ struct __large_struct { unsigned long buf[100]; };
".section .fixup,\"ax\"\n" \
"3: movl %3,%0\n" \
" jmp 2b\n" \
+ ".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,3b\n" \
- ".text" \
+ ".previous" \
: "=r"(err) \
: ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
@@ -200,10 +220,11 @@ do { \
"3: movl %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \
" jmp 2b\n" \
+ ".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,3b\n" \
- ".text" \
+ ".previous" \
: "=r"(err), ltype (x) \
: "m"(__m(addr)), "i"(-EFAULT), "0"(err))
@@ -222,11 +243,12 @@ do { \
".section .fixup,\"ax\"\n" \
"3: lea 0(%1,%0,4),%0\n" \
" jmp 2b\n" \
+ ".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,2b\n" \
- ".text" \
+ ".previous" \
: "=c"(size) \
: "r"(size & 3), "0"(size / 4), "D"(to), "S"(from) \
: "di", "si", "memory")
@@ -242,10 +264,11 @@ do { \
".section .fixup,\"ax\"\n" \
"2: shl $2,%0\n" \
" jmp 1b\n" \
+ ".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,2b\n" \
- ".text" \
+ ".previous" \
: "=c"(size) \
: "S"(from), "D"(to), "0"(size/4) \
: "di", "si", "memory"); \
@@ -259,11 +282,12 @@ do { \
"3: shl $2,%0\n" \
"4: incl %0\n" \
" jmp 2b\n" \
+ ".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
- ".text" \
+ ".previous" \
: "=c"(size) \
: "S"(from), "D"(to), "0"(size/4) \
: "di", "si", "memory"); \
@@ -277,11 +301,12 @@ do { \
"3: shl $2,%0\n" \
"4: addl $2,%0\n" \
" jmp 2b\n" \
+ ".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
- ".text" \
+ ".previous" \
: "=c"(size) \
: "S"(from), "D"(to), "0"(size/4) \
: "di", "si", "memory"); \
@@ -297,12 +322,13 @@ do { \
"5: addl $2,%0\n" \
"6: incl %0\n" \
" jmp 3b\n" \
+ ".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,4b\n" \
" .long 1b,5b\n" \
" .long 2b,6b\n" \
- ".text" \
+ ".previous" \
: "=c"(size) \
: "S"(from), "D"(to), "0"(size/4) \
: "di", "si", "memory"); \
@@ -342,6 +368,33 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
return n;
}
+static inline unsigned long
+__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __copy_user(to,from,n);
+ return n;
+}
+
+static inline unsigned long
+__constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __constant_copy_user(to,from,n);
+ return n;
+}
+
+static inline unsigned long
+__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __copy_user(to,from,n);
+ return n;
+}
+
+static inline unsigned long
+__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __constant_copy_user(to,from,n);
+ return n;
+}
#define copy_to_user(to,from,n) \
(__builtin_constant_p(n) ? \
@@ -353,12 +406,32 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
__constant_copy_from_user((to),(from),(n)) : \
__generic_copy_from_user((to),(from),(n)))
+#define copy_to_user_ret(to,from,n,retval) ({ \
+if (copy_to_user(to,from,n)) \
+ return retval; \
+})
+
+#define copy_from_user_ret(to,from,n,retval) ({ \
+if (copy_from_user(to,from,n)) \
+ return retval; \
+})
+
+#define __copy_to_user(to,from,n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_to_user_nocheck((to),(from),(n)) : \
+ __generic_copy_to_user_nocheck((to),(from),(n)))
+
+#define __copy_from_user(to,from,n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_from_user_nocheck((to),(from),(n)) : \
+ __generic_copy_from_user_nocheck((to),(from),(n)))
+
/*
* Zero Userspace
*/
-#define __clear_user(addr,size) \
+#define __do_clear_user(addr,size) \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %1,%0\n" \
@@ -367,11 +440,12 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
".section .fixup,\"ax\"\n" \
"3: lea 0(%1,%0,4),%0\n" \
" jmp 2b\n" \
+ ".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,2b\n" \
- ".text" \
+ ".previous" \
: "=c"(size) \
: "r"(size & 3), "0"(size / 4), "D"(addr), "a"(0) \
: "di")
@@ -380,7 +454,14 @@ static inline unsigned long
clear_user(void *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- __clear_user(to, n);
+ __do_clear_user(to, n);
+ return n;
+}
+
+static inline unsigned long
+__clear_user(void *to, unsigned long n)
+{
+ __do_clear_user(to, n);
return n;
}
@@ -389,7 +470,7 @@ clear_user(void *to, unsigned long n)
* Copy a null terminated string from userspace.
*/
-#define __strncpy_from_user(dst,src,count,res) \
+#define __do_strncpy_from_user(dst,src,count,res) \
__asm__ __volatile__( \
" testl %1,%1\n" \
" jz 2f\n" \
@@ -404,20 +485,29 @@ clear_user(void *to, unsigned long n)
".section .fixup,\"ax\"\n" \
"3: movl %2,%0\n" \
" jmp 2b\n" \
+ ".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
- ".text" \
+ ".previous" \
: "=d"(res), "=c"(count) \
: "i"(-EFAULT), "0"(count), "1"(count), "S"(src), "D"(dst) \
: "si", "di", "ax", "memory")
static inline long
+__strncpy_from_user(char *dst, const char *src, long count)
+{
+ long res;
+ __do_strncpy_from_user(dst, src, count, res);
+ return res;
+}
+
+static inline long
strncpy_from_user(char *dst, const char *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
- __strncpy_from_user(dst, src, count, res);
+ __do_strncpy_from_user(dst, src, count, res);
return res;
}
@@ -438,10 +528,11 @@ extern inline long strlen_user(const char *s)
".section .fixup,\"ax\"\n"
"2: xorl %0,%0\n"
" jmp 1b\n"
+ ".previous\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,2b\n"
- ".text"
+ ".previous"
:"=c" (res), "=D" (s)
:"1" (s), "a" (0), "0" (-__addr_ok(s)));
return res & -__addr_ok(s);
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 403f481a3..a560044cd 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -118,7 +118,7 @@
#define __NR_iopl 110
#define __NR_vhangup 111
#define __NR_idle 112
-#define __NR_vm86 113
+#define __NR_vm86old 113
#define __NR_wait4 114
#define __NR_swapoff 115
#define __NR_sysinfo 116
@@ -171,6 +171,10 @@
#define __NR_mremap 163
#define __NR_setresuid 164
#define __NR_getresuid 165
+#define __NR_vm86 166
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
/* user-visible error numbers are in the range -1 - -122: see <asm-i386/errno.h> */
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
index ac45e0909..88d0bf510 100644
--- a/include/asm-i386/vm86.h
+++ b/include/asm-i386/vm86.h
@@ -43,12 +43,26 @@
#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */
/*
- * This is the stack-layout when we have done a "SAVE_ALL" from vm86
- * mode - the main change is that the old segment descriptors aren't
- * useful any more and are forced to be zero by the kernel (and the
- * hardware when a trap occurs), and the real segment descriptors are
- * at the end of the structure. Look at ptrace.h to see the "normal"
- * setup.
+ * Additional return values when invoking new vm86()
+ */
+#define VM86_PICRETURN 4 /* return due to pending PIC request */
+#define VM86_TRAP 6 /* return due to DOS-debugger request */
+
+/*
+ * function codes when invoking new vm86()
+ */
+#define VM86_PLUS_INSTALL_CHECK 0
+#define VM86_ENTER 1
+#define VM86_ENTER_NO_BYPASS 2
+#define VM86_REQUEST_IRQ 3
+#define VM86_FREE_IRQ 4
+#define VM86_GET_IRQ_BITS 5
+#define VM86_GET_AND_RESET_IRQ 6
+
+/*
+ * This is the stack-layout seen by the user space programm when we have
+ * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
+ * is 'kernel_vm86_regs' (see below).
*/
struct vm86_regs {
@@ -64,6 +78,8 @@ struct vm86_regs {
long eax;
long __null_ds;
long __null_es;
+ long __null_fs;
+ long __null_gs;
long orig_eax;
long eip;
unsigned short cs, __csh;
@@ -97,11 +113,96 @@ struct vm86_struct {
*/
#define VM86_SCREEN_BITMAP 0x0001
+struct vm86plus_info_struct {
+ unsigned long force_return_for_pic:1;
+ unsigned long vm86dbg_active:1; /* for debugger */
+ unsigned long vm86dbg_TFpendig:1; /* for debugger */
+ unsigned long unused:28;
+ unsigned long is_vm86pus:1; /* for vm86 internal use */
+ unsigned char vm86dbg_intxxtab[32]; /* for debugger */
+};
+
+struct vm86plus_struct {
+ struct vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
+};
+
#ifdef __KERNEL__
+/*
+ * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
+ * mode - the main change is that the old segment descriptors aren't
+ * useful any more and are forced to be zero by the kernel (and the
+ * hardware when a trap occurs), and the real segment descriptors are
+ * at the end of the structure. Look at ptrace.h to see the "normal"
+ * setup. For user space layout see 'struct vm86_regs' above.
+ */
+
+struct kernel_vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ long __null_ds;
+ long __null_es;
+ long orig_eax;
+ long eip;
+ unsigned short cs, __csh;
+ long eflags;
+ long esp;
+ unsigned short ss, __ssh;
+/*
+ * these are specific to v86 mode:
+ */
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+};
+
+struct kernel_vm86_struct {
+ struct kernel_vm86_regs regs;
+/*
+ * the below part remains on the kernel stack while we are in VM86 mode.
+ * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
+ * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
+ * 'struct kernel_vm86_regs' with the then actual values.
+ * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
+ * in kernelspace, hence we need not reget the data from userspace.
+ */
+#define VM86_TSS_ESP0 flags
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
+ struct pt_regs *regs32; /* here we save the pointer to the old regs */
+/*
+ * The below is not part of the structure, but the stack layout continues
+ * this way. In front of 'return-eip' may be some data, depending on
+ * compilation, so we don't rely on this and save the pointer to 'oldregs'
+ * in 'regs32' above.
+ * However, with GCC-2.7.2 and the the current CFLAGS you see exactly this:
-void handle_vm86_fault(struct vm86_regs *, long);
-void handle_vm86_debug(struct vm86_regs *, long);
+ long return-eip; from call to vm86()
+ struct pt_regs oldregs; user space registers as saved by syscall
+ */
+};
-#endif
+void handle_vm86_fault(struct kernel_vm86_regs *, long);
+int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
+
+#endif /* __KERNEL__ */
#endif