summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/bugs.h75
-rw-r--r--include/asm-i386/elf.h20
-rw-r--r--include/asm-i386/floppy.h194
-rw-r--r--include/asm-i386/hardirq.h23
-rw-r--r--include/asm-i386/ioctls.h2
-rw-r--r--include/asm-i386/irq.h9
-rw-r--r--include/asm-i386/pgtable.h11
-rw-r--r--include/asm-i386/processor.h60
-rw-r--r--include/asm-i386/semaphore.h13
-rw-r--r--include/asm-i386/signal.h9
-rw-r--r--include/asm-i386/smp.h35
-rw-r--r--include/asm-i386/socket.h4
-rw-r--r--include/asm-i386/softirq.h81
-rw-r--r--include/asm-i386/spinlock.h5
-rw-r--r--include/asm-i386/system.h17
-rw-r--r--include/asm-i386/types.h2
-rw-r--r--include/asm-i386/uaccess.h335
-rw-r--r--include/asm-i386/unistd.h3
18 files changed, 440 insertions, 458 deletions
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index b7f2e1507..0a5c60bf5 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -18,12 +18,12 @@
__initfunc(static void no_halt(char *s, int *ints))
{
- hlt_works_ok = 0;
+ boot_cpu_data.hlt_works_ok = 0;
}
__initfunc(static void no_387(char *s, int *ints))
{
- hard_math = 0;
+ boot_cpu_data.hard_math = 0;
__asm__("movl %%cr0,%%eax\n\t"
"orl $0xE,%%eax\n\t"
"movl %%eax,%%cr0\n\t" : : : "ax");
@@ -49,7 +49,7 @@ __initfunc(static void check_fpu(void))
{
unsigned short control_word;
- if (!hard_math) {
+ if (!boot_cpu_data.hard_math) {
#ifndef CONFIG_MATH_EMULATION
printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
printk(KERN_EMERG "Giving up.\n");
@@ -91,9 +91,9 @@ __initfunc(static void check_fpu(void))
"fistpl %0\n\t"
"fwait\n\t"
"fninit"
- : "=m" (*&fdiv_bug)
+ : "=m" (*&boot_cpu_data.fdiv_bug)
: "m" (*&x), "m" (*&y));
- if (!fdiv_bug)
+ if (!boot_cpu_data.fdiv_bug)
printk("Ok, fpu using exception 16 error reporting.\n");
else
printk("Hmm, fpu using exception 16 error reporting with FDIV bug.\n");
@@ -102,7 +102,7 @@ __initfunc(static void check_fpu(void))
__initfunc(static void check_hlt(void))
{
printk(KERN_INFO "Checking 'hlt' instruction... ");
- if (!hlt_works_ok) {
+ if (!boot_cpu_data.hlt_works_ok) {
printk("disabled\n");
return;
}
@@ -117,7 +117,7 @@ __initfunc(static void check_tlb(void))
* The 386 chips don't support TLB finegrained invalidation.
* They will fault when they hit an invlpg instruction.
*/
- if (x86 == 3) {
+ if (boot_cpu_data.x86 == 3) {
printk(KERN_EMERG "CPU is a 386 and this kernel was compiled for 486 or better.\n");
printk("Giving up.\n");
for (;;) ;
@@ -152,17 +152,53 @@ __initfunc(static void check_popad(void))
* misexecution of code under Linux. Owners of such processors should
* contact AMD for precise details and a CPU swap.
*
- * See http://www.creaweb.fr/bpc/k6bug_faq.html
+ * See http://www.chorus.com/~poulot/k6bug.html
* http://www.amd.com/K6/k6docs/revgd.html
+ *
+ * The following test is erm.. interesting. AMD neglected to up
+ * the chip setting when fixing the bug but they also tweaked some
+ * performance at the same time..
*/
+extern void vide(void);
+__asm__(".align 4\nvide: ret");
+
__initfunc(static void check_amd_k6(void))
{
- /* B Step AMD K6 */
- if(x86_model==6 && x86_mask==1 && memcmp(x86_vendor_id, "AuthenticAMD", 12)==0)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86_model == 6 &&
+ boot_cpu_data.x86_mask == 1)
{
- printk(KERN_INFO "AMD K6 stepping B detected - system stability may be impaired. Please see.\n");
- printk(KERN_INFO "http://www.creaweb.fr/bpc/k6bug_faq.html");
+ int n;
+ void (*f_vide)(void);
+ unsigned long d, d2;
+
+ printk(KERN_INFO "AMD K6 stepping B detected - ");
+
+#define K6_BUG_LOOP 1000000
+
+ /*
+ * It looks like AMD fixed the 2.6.2 bug and improved indirect
+ * calls at the same time.
+ */
+
+ n = K6_BUG_LOOP;
+ f_vide = vide;
+ __asm__ ("rdtsc" : "=a" (d));
+ while (n--)
+ f_vide();
+ __asm__ ("rdtsc" : "=a" (d2));
+ d = d2-d;
+
+ /* Knock these two lines out if it debugs out ok */
+ printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP);
+ printk(KERN_INFO "AMD K6 stepping B detected - ");
+ /* -- cut here -- */
+ if (d > 20*K6_BUG_LOOP)
+ printk(KERN_INFO "system stability may be impaired when more than 32 MB are used.\n");
+ else
+ printk(KERN_INFO "probably OK (after B9730xxxx).\n");
+ printk(KERN_INFO "Please see http://www.chorus.com/bpc/k6bug.html\n");
}
}
@@ -171,30 +207,33 @@ __initfunc(static void check_amd_k6(void))
* have the F0 0F bug, which lets nonpriviledged users lock up the system:
*/
-extern int pentium_f00f_bug;
extern void trap_init_f00f_bug(void);
-
__initfunc(static void check_pentium_f00f(void))
{
/*
* Pentium and Pentium MMX
*/
- pentium_f00f_bug = 0;
- if (x86==5 && !memcmp(x86_vendor_id, "GenuineIntel", 12)) {
+ boot_cpu_data.f00f_bug = 0;
+ if (boot_cpu_data.x86 == 5 && boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
printk(KERN_INFO "Intel Pentium with F0 0F bug - workaround enabled.\n");
- pentium_f00f_bug = 1;
+ boot_cpu_data.f00f_bug = 1;
trap_init_f00f_bug();
}
}
__initfunc(static void check_bugs(void))
{
+ identify_cpu(&boot_cpu_data);
+#ifndef __SMP__
+ printk("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+#endif
check_tlb();
check_fpu();
check_hlt();
check_popad();
check_amd_k6();
check_pentium_f00f();
- system_utsname.machine[1] = '0' + x86;
+ system_utsname.machine[1] = '0' + boot_cpu_data.x86;
}
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 1eae23267..700bb8233 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -70,4 +70,24 @@ typedef struct user_i387_struct elf_fpregset_t;
pr_reg[15] = regs->esp; \
pr_reg[16] = regs->xss;
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This could be done in userspace,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP (boot_cpu_data.x86_capability)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define ELF_PLATFORM ("i386\0i486\0i586\0i686"+((boot_cpu_data.x86-3)*5))
+
+#ifdef __KERNEL__
+#define SET_PERSONALITY(ex, ibcs2) \
+ current->personality = (ibcs2 ? PER_SVR4 : PER_LINUX)
+#endif
+
#endif
diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h
index eb7272ef6..2b548bb1c 100644
--- a/include/asm-i386/floppy.h
+++ b/include/asm-i386/floppy.h
@@ -3,33 +3,36 @@
#include <linux/vmalloc.h>
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define _CROSS_64KB(a,s,vdma) \
+(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+
+#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
+
+
#define SW fd_routine[use_virtual_dma&1]
+#define CSW fd_routine[can_use_virtual_dma & 1]
#define fd_inb(port) inb_p(port)
#define fd_outb(port,value) outb_p(port,value)
-
-#define fd_enable_dma(channel) SW._enable_dma(channel)
-#define fd_disable_dma(channel) SW._disable_dma(channel)
-#define fd_request_dma(channel) SW._request_dma(channel, "floppy")
-#define fd_free_dma(channel) SW._free_dma(channel)
-#define fd_clear_dma_ff(channel) SW._clear_dma_ff(channel)
-#define fd_set_dma_mode(channel,mode) SW._set_dma_mode(channel, mode)
-#define fd_set_dma_addr(channel,addr) SW._set_dma_addr(channel, addr)
-#define fd_set_dma_count(channel,count) SW._set_dma_count(channel ,count)
+#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
+#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq(irq) enable_irq(irq)
#define fd_disable_irq(irq) disable_irq(irq)
-#define fd_cacheflush(addr,size) /* nothing */
-#define fd_request_irq(irq) SW._request_irq(irq, \
- floppy_interrupt, \
- SA_INTERRUPT \
- | SA_SAMPLE_RANDOM, \
- "floppy", NULL)
#define fd_free_irq(irq) free_irq(irq, NULL)
#define fd_get_dma_residue(channel) SW._get_dma_residue(channel)
-
#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
-#define fd_dma_mem_free(addr,size) SW._dma_mem_free(addr,size)
+#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
+
+#define FLOPPY_CAN_FALLBACK_ON_NODMA
static int virtual_dma_count=0;
static int virtual_dma_residue=0;
@@ -42,7 +45,7 @@ static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
register unsigned char st;
#undef TRACE_FLPY_INT
-#undef NO_FLOPPY_ASSEMBLER
+#define NO_FLOPPY_ASSEMBLER
#ifdef TRACE_FLPY_INT
static int calls=0;
@@ -105,10 +108,10 @@ static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
outb_p(*lptr, virtual_dma_port+5);
else
*lptr = inb_p(virtual_dma_port+5);
- st = inb(virtual_dma_port+4);
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
+ st = inb(virtual_dma_port+4);
}
#endif
@@ -137,16 +140,13 @@ static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
#endif
}
-static void vdma_enable_dma(unsigned int dummy)
-{
- doing_pdma = 1;
-}
-
-static void vdma_disable_dma(unsigned int dummy)
+static void fd_disable_dma(void)
{
+ if(! (can_use_virtual_dma & 1))
+ disable_dma(FLOPPY_DMA);
doing_pdma = 0;
virtual_dma_residue += virtual_dma_count;
- virtual_dma_count=0;
+ virtual_dma_count=0;
}
static int vdma_request_dma(unsigned int dmanr, const char * device_id)
@@ -158,26 +158,6 @@ static void vdma_nop(unsigned int dummy)
{
}
-static void vdma_set_dma_mode(unsigned int dummy,char mode)
-{
- virtual_dma_mode = (mode == DMA_MODE_WRITE);
-}
-
-static void hset_dma_addr(unsigned int no, char *addr)
-{
- set_dma_addr(no, virt_to_bus(addr));
-}
-
-static void vdma_set_dma_addr(unsigned int dummy, char *addr)
-{
- virtual_dma_addr = addr;
-}
-
-static void vdma_set_dma_count(unsigned int dummy,unsigned int count)
-{
- virtual_dma_count = count;
- virtual_dma_residue = 0;
-}
static int vdma_get_dma_residue(unsigned int dummy)
{
@@ -185,13 +165,15 @@ static int vdma_get_dma_residue(unsigned int dummy)
}
-static int vdma_request_irq(unsigned int irq,
- void (*handler)(int, void *, struct pt_regs *),
- unsigned long flags,
- const char *device,
- void *dev_id)
+static int fd_request_irq(void)
{
- return request_irq(irq, floppy_hardint,SA_INTERRUPT,device, dev_id);
+ if(can_use_virtual_dma)
+ return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
+ "floppy", NULL);
+ else
+ return request_irq(FLOPPY_IRQ, floppy_interrupt,
+ SA_INTERRUPT|SA_SAMPLE_RANDOM,
+ "floppy", NULL);
}
@@ -214,73 +196,94 @@ static unsigned long dma_mem_alloc(unsigned long size)
return __get_dma_pages(GFP_KERNEL,__get_order(size));
}
-static void dma_mem_free(unsigned long addr, unsigned long size)
-{
- free_pages(addr, __get_order(size));
-}
static unsigned long vdma_mem_alloc(unsigned long size)
{
return (unsigned long) vmalloc(size);
+
}
-static void vdma_mem_free(unsigned long addr, unsigned long size)
+#define nodma_mem_alloc(size) vdma_mem_alloc(size)
+
+static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
+{
+ if((unsigned int) addr >= (unsigned int) high_memory)
+ return vfree((void *)addr);
+ else
+ free_pages(addr, __get_order(size));
+}
+
+#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
+
+static void _fd_chose_dma_mode(char *addr, unsigned long size)
+{
+ if(can_use_virtual_dma == 2) {
+ if((unsigned int) addr >= (unsigned int) high_memory ||
+ virt_to_bus(addr) >= 0x1000000 ||
+ _CROSS_64KB(addr, size, 0))
+ use_virtual_dma = 1;
+ else
+ use_virtual_dma = 0;
+ } else {
+ use_virtual_dma = can_use_virtual_dma & 1;
+ }
+}
+
+#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
+
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
{
- return vfree((void *)addr);
+ doing_pdma = 1;
+ virtual_dma_port = io;
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_addr = addr;
+ virtual_dma_count = size;
+ virtual_dma_residue = 0;
+ return 0;
+}
+
+static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+#ifdef FLOPPY_SANITY_CHECK
+ if (CROSS_64KB(addr, size)) {
+ printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
+ return -1;
+ }
+#endif
+ /* actual, physical DMA */
+ doing_pdma = 0;
+ clear_dma_ff(FLOPPY_DMA);
+ set_dma_mode(FLOPPY_DMA,mode);
+ set_dma_addr(FLOPPY_DMA,virt_to_bus(addr));
+ set_dma_count(FLOPPY_DMA,size);
+ enable_dma(FLOPPY_DMA);
+ return 0;
}
struct fd_routine_l {
- void (*_enable_dma)(unsigned int dummy);
- void (*_disable_dma)(unsigned int dummy);
int (*_request_dma)(unsigned int dmanr, const char * device_id);
void (*_free_dma)(unsigned int dmanr);
- void (*_clear_dma_ff)(unsigned int dummy);
- void (*_set_dma_mode)(unsigned int dummy, char mode);
- void (*_set_dma_addr)(unsigned int dummy, char *addr);
- void (*_set_dma_count)(unsigned int dummy, unsigned int count);
int (*_get_dma_residue)(unsigned int dummy);
- int (*_request_irq)(unsigned int irq,
- void (*handler)(int, void *, struct pt_regs *),
- unsigned long flags,
- const char *device,
- void *dev_id);
unsigned long (*_dma_mem_alloc) (unsigned long size);
- void (*_dma_mem_free)(unsigned long addr, unsigned long size);
+ int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
- enable_dma,
- disable_dma,
request_dma,
free_dma,
- clear_dma_ff,
- set_dma_mode,
- hset_dma_addr,
- set_dma_count,
get_dma_residue,
- request_irq,
dma_mem_alloc,
- dma_mem_free
+ hard_dma_setup
},
{
- vdma_enable_dma,
- vdma_disable_dma,
vdma_request_dma,
vdma_nop,
- vdma_nop,
- vdma_set_dma_mode,
- vdma_set_dma_addr,
- vdma_set_dma_count,
vdma_get_dma_residue,
- vdma_request_irq,
vdma_mem_alloc,
- vdma_mem_free
+ vdma_dma_setup
}
};
-__inline__ void virtual_dma_init(void)
-{
- /* Nothing to do on an i386 */
-}
static int FDC1 = 0x3f0;
static int FDC2 = -1;
@@ -293,14 +296,7 @@ static int FDC2 = -1;
#define FLOPPY_MOTOR_MASK 0xf0
-/*
- * The DMA channel used by the floppy controller cannot access data at
- * addresses >= 16MB
- *
- * Went back to the 1MB limit, as some people had problems with the floppy
- * driver otherwise. It doesn't matter much for performance anyway, as most
- * floppy accesses go through the track buffer.
- */
-#define CROSS_64KB(a,s) (((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64) && ! (use_virtual_dma & 1))
+#define AUTO_DMA
+
#endif /* __ASM_I386_FLOPPY_H */
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h
index 0ceef9108..f679516bb 100644
--- a/include/asm-i386/hardirq.h
+++ b/include/asm-i386/hardirq.h
@@ -4,7 +4,6 @@
#include <linux/tasks.h>
extern unsigned int local_irq_count[NR_CPUS];
-#define in_interrupt() (local_irq_count[smp_processor_id()] != 0)
#ifndef __SMP__
@@ -29,7 +28,7 @@ static inline void release_irqlock(int cpu)
/* if we didn't own the irq lock, just ignore.. */
if (global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
- global_irq_lock = 0;
+ clear_bit(0,&global_irq_lock);
}
}
@@ -47,26 +46,10 @@ static inline void hardirq_exit(int cpu)
static inline int hardirq_trylock(int cpu)
{
- unsigned long flags;
-
- __save_flags(flags);
- __cli();
- atomic_inc(&global_irq_count);
- if (atomic_read(&global_irq_count) != 1 || test_bit(0,&global_irq_lock)) {
- atomic_dec(&global_irq_count);
- __restore_flags(flags);
- return 0;
- }
- ++local_irq_count[cpu];
- return 1;
+ return !atomic_read(&global_irq_count) && !test_bit(0,&global_irq_lock);
}
-static inline void hardirq_endlock(int cpu)
-{
- __cli();
- hardirq_exit(cpu);
- __sti();
-}
+#define hardirq_endlock(cpu) do { } while (0)
extern void synchronize_irq(void);
diff --git a/include/asm-i386/ioctls.h b/include/asm-i386/ioctls.h
index 7930defbe..1b8af73a4 100644
--- a/include/asm-i386/ioctls.h
+++ b/include/asm-i386/ioctls.h
@@ -47,6 +47,8 @@
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
#define FIOCLEX 0x5451
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index b96288a5c..aa9b5be37 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -4,12 +4,17 @@
/*
* linux/include/asm/irq.h
*
- * (C) 1992, 1993 Linus Torvalds
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
*
- * IRQ/IPI changes taken from work by Thomas Radke <tomsoft@informatik.tu-chemnitz.de>
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@informatik.tu-chemnitz.de>
*/
+#ifndef __SMP__
#define NR_IRQS 16
+#else
+#define NR_IRQS 24
+#endif
#define TIMER_IRQ 0
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 61d94b8b9..fe0864913 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -200,7 +200,8 @@ static inline void flush_tlb_range(struct mm_struct *mm,
* memory.
*/
#define _PAGE_PRESENT 0x001
-#define _PAGE_RW 0x002
+#define _PAGE_PROTNONE 0x002 /* If not present */
+#define _PAGE_RW 0x002 /* If present */
#define _PAGE_USER 0x004
#define _PAGE_WT 0x008
#define _PAGE_PCD 0x010
@@ -213,7 +214,7 @@ static inline void flush_tlb_range(struct mm_struct *mm,
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
@@ -291,7 +292,7 @@ do { \
} while (0)
#define pte_none(x) (!pte_val(x))
-#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
#define pmd_none(x) (!pmd_val(x))
@@ -489,9 +490,9 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
{
}
-#define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
+#define SWP_TYPE(entry) (((entry) >> 2) & 0x3f)
#define SWP_OFFSET(entry) ((entry) >> 8)
-#define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
+#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 8))
#define module_map vmalloc
#define module_unmap vfree
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 9e4ca405d..778466bbe 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -12,21 +12,51 @@
#include <asm/segment.h>
/*
- * System setup and hardware bug flags..
- * [Note we don't test the 386 multiply bug or popad bug]
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ * Members of this structure are referenced in head.S, so think twice
+ * before touching them. [mj]
*/
-extern char hard_math;
-extern char x86; /* lower 4 bits */
-extern char x86_vendor_id[13];
-extern char x86_model; /* lower 4 bits */
-extern char x86_mask; /* lower 4 bits */
-extern int x86_capability; /* field of flags */
-extern int fdiv_bug;
+struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
+ char wp_works_ok; /* It doesn't on 386's */
+ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
+ char hard_math;
+ char rfu;
+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
+ __u32 x86_capability;
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ int fdiv_bug;
+ int f00f_bug;
+ unsigned long loops_per_sec;
+};
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_UNKNOWN 0xff
+
+extern struct cpuinfo_x86 boot_cpu_data;
+
+#ifdef __SMP__
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data &boot_cpu_data
+#define current_cpu_data boot_cpu_data
+#endif
+
extern char ignore_irq13;
-extern char wp_works_ok; /* doesn't work on a 386 */
-extern char hlt_works_ok; /* problems on some 486Dx4's and old 386's */
-extern int have_cpuid; /* We have a CPUID */
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
/*
* Bus types (default is ISA, but people can check others with these..)
@@ -142,7 +172,7 @@ struct thread_struct {
_LDT(0),0, \
0, 0x8000, \
{~0, }, /* ioperm */ \
- _TSS(0), 0, 0, 0, KERNEL_DS, \
+ _TSS(0), 0, 0, 0, (mm_segment_t) { 0 } /* obsolete */ , \
{ { 0, }, }, /* 387 state */ \
NULL, 0, 0, 0, 0, 0 /* vm86_info */, \
}
@@ -150,7 +180,7 @@ struct thread_struct {
#define start_thread(regs, new_eip, new_esp) do {\
unsigned long seg = __USER_DS; \
__asm__("mov %w0,%%fs ; mov %w0,%%gs":"=r" (seg) :"0" (seg)); \
- set_fs(MAKE_MM_SEG(seg)); \
+ set_fs(USER_DS); \
regs->xds = seg; \
regs->xes = seg; \
regs->xss = seg; \
@@ -175,7 +205,7 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
* NOTE! The task struct and the stack go together
*/
#define alloc_task_struct() \
- ((struct task_struct *) __get_free_pages(GFP_KERNEL,1,0))
+ ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
#define init_task (init_task_union.task)
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index 3ba3f8af5..a68b23fc6 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -21,6 +21,7 @@
#include <asm/system.h>
#include <asm/atomic.h>
+#include <asm/spinlock.h>
struct semaphore {
atomic_t count;
@@ -38,6 +39,8 @@ asmlinkage void __up_wakeup(void /* special register calling convention */);
extern void __down(struct semaphore * sem);
extern void __up(struct semaphore * sem);
+extern spinlock_t semaphore_wake_lock;
+
#define sema_init(sem, val) atomic_set(&((sem)->count), (val))
/*
@@ -55,10 +58,9 @@ static inline void wake_one_more(struct semaphore * sem)
{
unsigned long flags;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
sem->waking++;
- restore_flags(flags);
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
}
static inline int waking_non_zero(struct semaphore *sem)
@@ -66,13 +68,12 @@ static inline int waking_non_zero(struct semaphore *sem)
unsigned long flags;
int ret = 0;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->waking > 0) {
sem->waking--;
ret = 1;
}
- restore_flags(flags);
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h
index 5da9ff0ed..598a21008 100644
--- a/include/asm-i386/signal.h
+++ b/include/asm-i386/signal.h
@@ -144,11 +144,18 @@ struct k_sigaction {
/* Here we must cater to libcs that poke about in kernel headers. */
struct sigaction {
- __sighandler_t sa_handler;
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
#endif /* __KERNEL__ */
typedef struct sigaltstack {
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 0f7ae1224..6fb2e2541 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -149,28 +149,6 @@ struct mpc_config_intlocal
*/
/*
- * Per process x86 parameters
- */
-
-struct cpuinfo_x86
-{
- char hard_math;
- char x86;
- char x86_model;
- char x86_mask;
- char x86_vendor_id[16];
- int x86_capability;
- int fdiv_bug;
- int have_cpuid;
- char wp_works_ok;
- char hlt_works_ok;
- unsigned long udelay_val;
-};
-
-
-extern struct cpuinfo_x86 cpu_data[NR_CPUS];
-
-/*
* Private routines/data
*/
@@ -181,7 +159,6 @@ extern unsigned char *apic_reg;
extern unsigned char boot_cpu_id;
extern unsigned long cpu_present_map;
extern volatile int cpu_number_map[NR_CPUS];
-extern volatile int cpu_logical_map[NR_CPUS];
extern volatile unsigned long smp_invalidate_needed;
extern void smp_flush_tlb(void);
extern volatile unsigned long kernel_flag, kernel_counter;
@@ -193,6 +170,11 @@ extern unsigned long ipi_count;
extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void smp_local_timer_interrupt(struct pt_regs * regs);
extern void setup_APIC_clock (void);
+extern volatile int __cpu_logical_map[NR_CPUS];
+extern inline int cpu_logical_map(int cpu)
+{
+ return __cpu_logical_map[cpu];
+}
/*
@@ -257,5 +239,12 @@ extern __inline int hard_smp_processor_id(void)
#define SMP_FROM_INT 1
#define SMP_FROM_SYSCALL 2
+#else
+#ifndef ASSEMBLY
+extern inline int cpu_logical_map(int cpu)
+{
+ return cpu;
+}
+#endif
#endif
#endif
diff --git a/include/asm-i386/socket.h b/include/asm-i386/socket.h
index e09eaea21..1a2ae578f 100644
--- a/include/asm-i386/socket.h
+++ b/include/asm-i386/socket.h
@@ -35,6 +35,10 @@
#define SO_BINDTODEVICE 25
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
/* Socket types. */
#define SOCK_STREAM 1 /* stream (connection) socket */
#define SOCK_DGRAM 2 /* datagram (conn.less) socket */
diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h
index 07a678435..008edf305 100644
--- a/include/asm-i386/softirq.h
+++ b/include/asm-i386/softirq.h
@@ -4,6 +4,8 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
+extern unsigned int local_bh_count[NR_CPUS];
+
#define get_active_bhs() (bh_mask & bh_active)
#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
@@ -25,22 +27,6 @@ extern inline void mark_bh(int nr)
set_bit(nr, &bh_active);
}
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- bh_mask_count[nr]++;
-}
-
-extern inline void enable_bh(int nr)
-{
- if (!--bh_mask_count[nr])
- bh_mask |= 1 << nr;
-}
-
#ifdef __SMP__
/*
@@ -48,52 +34,77 @@ extern inline void enable_bh(int nr)
* is entirely private to an implementation, it should not be
* referenced at all outside of this file.
*/
-extern atomic_t __intel_bh_counter;
+extern atomic_t global_bh_lock;
+extern atomic_t global_bh_count;
-extern inline void start_bh_atomic(void)
+extern void synchronize_bh(void);
+
+static inline void start_bh_atomic(void)
{
- atomic_inc(&__intel_bh_counter);
- synchronize_irq();
+ atomic_inc(&global_bh_lock);
+ synchronize_bh();
}
-extern inline void end_bh_atomic(void)
+static inline void end_bh_atomic(void)
{
- atomic_dec(&__intel_bh_counter);
+ atomic_dec(&global_bh_lock);
}
/* These are for the irq's testing the lock */
-static inline int softirq_trylock(void)
+static inline int softirq_trylock(int cpu)
{
- atomic_inc(&__intel_bh_counter);
- if (atomic_read(&__intel_bh_counter) != 1) {
- atomic_dec(&__intel_bh_counter);
- return 0;
+ if (!test_and_set_bit(0,&global_bh_count)) {
+ if (atomic_read(&global_bh_lock) == 0) {
+ ++local_bh_count[cpu];
+ return 1;
+ }
+ clear_bit(0,&global_bh_count);
}
- return 1;
+ return 0;
}
-#define softirq_endlock() atomic_dec(&__intel_bh_counter)
+static inline void softirq_endlock(int cpu)
+{
+ local_bh_count[cpu]--;
+ clear_bit(0,&global_bh_count);
+}
#else
-extern int __intel_bh_counter;
-
extern inline void start_bh_atomic(void)
{
- __intel_bh_counter++;
+ local_bh_count[smp_processor_id()]++;
barrier();
}
extern inline void end_bh_atomic(void)
{
barrier();
- __intel_bh_counter--;
+ local_bh_count[smp_processor_id()]--;
}
/* These are for the irq's testing the lock */
-#define softirq_trylock() (__intel_bh_counter ? 0 : (__intel_bh_counter=1))
-#define softirq_endlock() (__intel_bh_counter = 0)
+#define softirq_trylock(cpu) (local_bh_count[cpu] ? 0 : (local_bh_count[cpu]=1))
+#define softirq_endlock(cpu) (local_bh_count[cpu] = 0)
+#define synchronize_bh() do { } while (0)
#endif /* SMP */
+/*
+ * These use a mask count to correctly handle
+ * nested disable/enable calls
+ */
+extern inline void disable_bh(int nr)
+{
+ bh_mask &= ~(1 << nr);
+ bh_mask_count[nr]++;
+ synchronize_bh();
+}
+
+extern inline void enable_bh(int nr)
+{
+ if (!--bh_mask_count[nr])
+ bh_mask |= 1 << nr;
+}
+
#endif /* __ASM_SOFTIRQ_H */
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index af6cf8c9c..48c586d6c 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -64,12 +64,11 @@ typedef struct { } rwlock_t;
typedef struct {
volatile unsigned int lock;
- unsigned long previous;
} spinlock_t;
-#define SPIN_LOCK_UNLOCKED { 0, 0 }
+#define SPIN_LOCK_UNLOCKED { 0 }
-#define spin_lock_init(x) do { (x)->lock = 0; (x)->previous = 0; } while(0)
+#define spin_lock_init(x) do { (x)->lock = 0; } while(0)
#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock)
typedef struct { unsigned long a[100]; } __dummy_lock_t;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 738268052..e940c7c6f 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -11,13 +11,18 @@
* 3 - kernel data segment
* 4 - user code segment
* 5 - user data segment
- * ...
- * 8 - TSS #0
- * 9 - LDT #0
- * 10 - TSS #1
- * 11 - LDT #1
+ * 6 - not used
+ * 7 - not used
+ * 8 - APM BIOS support
+ * 9 - APM BIOS support
+ * 10 - APM BIOS support
+ * 11 - APM BIOS support
+ * 12 - TSS #0
+ * 13 - LDT #0
+ * 14 - TSS #1
+ * 15 - LDT #1
*/
-#define FIRST_TSS_ENTRY 8
+#define FIRST_TSS_ENTRY 12
#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h
index 71309ac82..d792546f9 100644
--- a/include/asm-i386/types.h
+++ b/include/asm-i386/types.h
@@ -39,6 +39,8 @@ typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
+#define BITS_PER_LONG 32
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 0ac6380fd..ef08ac510 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -18,49 +18,43 @@
*/
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-#define KERNEL_DS MAKE_MM_SEG(0)
-#define USER_DS MAKE_MM_SEG(3)
+
+
+#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
+#define USER_DS MAKE_MM_SEG(0xC0000000)
#define get_ds() (KERNEL_DS)
-#define get_fs() (current->tss.segment)
-#define set_fs(x) (current->tss.segment = (x))
+#define get_fs() (current->addr_limit)
+#define set_fs(x) (current->addr_limit = (x))
#define segment_eq(a,b) ((a).seg == (b).seg)
+extern int __verify_write(const void *, unsigned long);
+
+#define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
/*
- * Address Ok:
- *
- * segment
- * 00 (kernel) 11 (user)
- *
- * high 00 1 1
- * two 01 1 1
- * bits of 10 1 1
- * address 11 1 0
+ * Uhhuh, this needs 33-bit arithmetic. We have a carry..
*/
-#define __addr_ok(x) \
- ((((unsigned long)(x)>>30)&get_fs().seg) != 3)
+#define __range_ok(addr,size) ({ \
+ unsigned long flag,sum; \
+ asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
+ :"=&r" (flag), "=r" (sum) \
+ :"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \
+ flag; })
-#define __user_ok(addr,size) \
- ((size <= 0xC0000000UL) && (addr <= 0xC0000000UL - size))
-#define __kernel_ok \
- (!get_fs().seg)
+#if CPU > 386
-extern int __verify_write(const void *, unsigned long);
+#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
-#if CPU > 386
-#define __access_ok(type,addr,size) \
- (__kernel_ok || __user_ok(addr,size))
#else
-#define __access_ok(type,addr,size) \
- (__kernel_ok || (__user_ok(addr,size) && \
- ((type) == VERIFY_READ || wp_works_ok || \
- __verify_write((void *)(addr),(size)))))
-#endif /* CPU */
-#define access_ok(type,addr,size) \
- __access_ok((type),(unsigned long)(addr),(size))
+#define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \
+ ((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \
+ segment_eq(get_fs(),KERNEL_DS) || \
+ __verify_write((void *)(addr),(size))))
+
+#endif /* CPU */
extern inline int verify_area(int type, const void * addr, unsigned long size)
{
@@ -104,38 +98,57 @@ extern unsigned long search_exception_table(unsigned long);
* with a separate "access_ok()" call (this is used when we do multiple
* accesses to the same area of user memory).
*/
-#define get_user(x,ptr) \
- __get_user_check((x),(ptr),sizeof(*(ptr)))
-#define put_user(x,ptr) \
- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+extern void __get_user_1(void);
+extern void __get_user_2(void);
+extern void __get_user_4(void);
+
+#define __get_user_x(size,ret,x,ptr) \
+ __asm__ __volatile__("call __get_user_" #size \
+ :"=a" (ret),"=d" (x) \
+ :"0" (ptr))
+
+/* Careful: we have to cast the result to the type of the pointer for sign reasons */
+#define get_user(x,ptr) \
+({ int __ret_gu,__val_gu; \
+ switch(sizeof (*(ptr))) { \
+ case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
+ case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
+ case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
+ default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \
+ } \
+ (x) = (__typeof__(*(ptr)))__val_gu; \
+ __ret_gu; \
+})
+
+extern void __put_user_1(void);
+extern void __put_user_2(void);
+extern void __put_user_4(void);
+
+extern void __put_user_bad(void);
+
+#define __put_user_x(size,ret,x,ptr) \
+ __asm__ __volatile__("call __put_user_" #size \
+ :"=a" (ret) \
+ :"0" (ptr),"d" (x) \
+ :"cx")
+
+#define put_user(x,ptr) \
+({ int __ret_pu; \
+ switch(sizeof (*(ptr))) { \
+ case 1: __put_user_x(1,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
+ case 2: __put_user_x(2,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
+ case 4: __put_user_x(4,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
+ default: __put_user_x(X,__ret_pu,x,ptr); break; \
+ } \
+ __ret_pu; \
+})
#define __get_user(x,ptr) \
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
#define __put_user(x,ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-/*
- * The "xxx_ret" versions return constant specified in third argument, if
- * something bad happens. These macros can be optimized for the
- * case of just returning from the function xxx_ret is used.
- */
-
-#define put_user_ret(x,ptr,ret) ({ \
-if (put_user(x,ptr)) return ret; })
-
-#define get_user_ret(x,ptr,ret) ({ \
-if (get_user(x,ptr)) return ret; })
-
-#define __put_user_ret(x,ptr,ret) ({ \
-if (__put_user(x,ptr)) return ret; })
-
-#define __get_user_ret(x,ptr,ret) ({ \
-if (__get_user(x,ptr)) return ret; })
-
-
-
-extern long __put_user_bad(void);
-
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
@@ -143,15 +156,6 @@ extern long __put_user_bad(void);
__pu_err; \
})
-#define __put_user_check(x,ptr,size) \
-({ \
- long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) *__pu_addr = (ptr); \
- if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
- __put_user_size((x),__pu_addr,(size),__pu_err); \
- __pu_err; \
-})
-
#define __put_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
@@ -195,16 +199,6 @@ struct __large_struct { unsigned long buf[100]; };
__gu_err; \
})
-#define __get_user_check(x,ptr,size) \
-({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) *__gu_addr = (ptr); \
- if (access_ok(VERIFY_READ,__gu_addr,size)) \
- __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
extern long __get_user_bad(void);
#define __get_user_size(x,ptr,size,retval) \
@@ -234,6 +228,20 @@ do { \
: "=r"(err), ltype (x) \
: "m"(__m(addr)), "i"(-EFAULT), "0"(err))
+/*
+ * The "xxx_ret" versions return constant specified in third argument, if
+ * something bad happens. These macros can be optimized for the
+ * case of just returning from the function xxx_ret is used.
+ */
+
+#define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
+
+#define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; })
+
+#define __put_user_ret(x,ptr,ret) ({ if (__put_user(x,ptr)) return ret; })
+
+#define __get_user_ret(x,ptr,ret) ({ if (__get_user(x,ptr)) return ret; })
+
/*
* Copy To/From Userspace
@@ -255,10 +263,28 @@ do { \
" .long 0b,3b\n" \
" .long 1b,2b\n" \
".previous" \
- : "=c"(size) \
+ : "=&c"(size) \
: "r"(size & 3), "0"(size / 4), "D"(to), "S"(from) \
: "di", "si", "memory")
+/* We let the __ versions of copy_from/to_user inline, because they're often
+ * used in fast paths and have only a small space overhead.
+ */
+static inline unsigned long
+__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __copy_user(to,from,n);
+ return n;
+}
+
+static inline unsigned long
+__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __copy_user(to,from,n);
+ return n;
+}
+
+
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
@@ -342,13 +368,8 @@ do { \
} \
} while (0)
-static inline unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- __copy_user(to,from,n);
- return n;
-}
+unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
+unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
static inline unsigned long
__constant_copy_to_user(void *to, const void *from, unsigned long n)
@@ -359,14 +380,6 @@ __constant_copy_to_user(void *to, const void *from, unsigned long n)
}
static inline unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_READ, from, n))
- __copy_user(to,from,n);
- return n;
-}
-
-static inline unsigned long
__constant_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
@@ -375,13 +388,6 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
}
static inline unsigned long
-__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
-{
- __copy_user(to,from,n);
- return n;
-}
-
-static inline unsigned long
__constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
{
__constant_copy_user(to,from,n);
@@ -389,13 +395,6 @@ __constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
}
static inline unsigned long
-__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
-{
- __copy_user(to,from,n);
- return n;
-}
-
-static inline unsigned long
__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
__constant_copy_user(to,from,n);
@@ -412,15 +411,9 @@ __constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
__constant_copy_from_user((to),(from),(n)) : \
__generic_copy_from_user((to),(from),(n)))
-#define copy_to_user_ret(to,from,n,retval) ({ \
-if (copy_to_user(to,from,n)) \
- return retval; \
-})
+#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
-#define copy_from_user_ret(to,from,n,retval) ({ \
-if (copy_from_user(to,from,n)) \
- return retval; \
-})
+#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
#define __copy_to_user(to,from,n) \
(__builtin_constant_p(n) ? \
@@ -432,116 +425,10 @@ if (copy_from_user(to,from,n)) \
__constant_copy_from_user_nocheck((to),(from),(n)) : \
__generic_copy_from_user_nocheck((to),(from),(n)))
-
-/*
- * Zero Userspace
- */
-
-#define __do_clear_user(addr,size) \
- __asm__ __volatile__( \
- "0: rep; stosl\n" \
- " movl %1,%0\n" \
- "1: rep; stosb\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: lea 0(%1,%0,4),%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,2b\n" \
- ".previous" \
- : "=c"(size) \
- : "r"(size & 3), "0"(size / 4), "D"(addr), "a"(0) \
- : "di")
-
-static inline unsigned long
-clear_user(void *to, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- __do_clear_user(to, n);
- return n;
-}
-
-static inline unsigned long
-__clear_user(void *to, unsigned long n)
-{
- __do_clear_user(to, n);
- return n;
-}
-
-
-/*
- * Copy a null terminated string from userspace.
- */
-
-#define __do_strncpy_from_user(dst,src,count,res) \
- __asm__ __volatile__( \
- " testl %1,%1\n" \
- " jz 2f\n" \
- "0: lodsb\n" \
- " stosb\n" \
- " testb %%al,%%al\n" \
- " jz 1f\n" \
- " decl %1\n" \
- " jnz 0b\n" \
- "1: subl %1,%0\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl %2,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- ".previous" \
- : "=d"(res), "=c"(count) \
- : "i"(-EFAULT), "0"(count), "1"(count), "S"(src), "D"(dst) \
- : "si", "di", "ax", "memory")
-
-static inline long
-__strncpy_from_user(char *dst, const char *src, long count)
-{
- long res;
- __do_strncpy_from_user(dst, src, count, res);
- return res;
-}
-
-static inline long
-strncpy_from_user(char *dst, const char *src, long count)
-{
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
- __do_strncpy_from_user(dst, src, count, res);
- return res;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-
-extern inline long strlen_user(const char *s)
-{
- unsigned long res;
-
- __asm__ __volatile__(
- "0: repne; scasb\n"
- " notl %0\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- "2: xorl %0,%0\n"
- " jmp 1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b,2b\n"
- ".previous"
- :"=c" (res), "=D" (s)
- :"1" (s), "a" (0), "0" (-__addr_ok(s)));
- return res & -__addr_ok(s);
-}
+long strncpy_from_user(char *dst, const char *src, long count);
+long __strncpy_from_user(char *dst, const char *src, long count);
+long strlen_user(const char *str);
+unsigned long clear_user(void *mem, unsigned long len);
+unsigned long __clear_user(void *mem, unsigned long len);
#endif /* __i386_UACCESS_H */
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 1a45b75c3..53766f701 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -21,7 +21,7 @@
#define __NR_time 13
#define __NR_mknod 14
#define __NR_chmod 15
-#define __NR_chown 16
+#define __NR_lchown 16
#define __NR_break 17
#define __NR_oldstat 18
#define __NR_lseek 19
@@ -187,6 +187,7 @@
#define __NR_rt_sigsuspend 179
#define __NR_pread 180
#define __NR_pwrite 181
+#define __NR_chown 182
/* user-visible error numbers are in the range -1 - -122: see <asm-i386/errno.h> */