diff options
Diffstat (limited to 'include/asm-ppc')
34 files changed, 1425 insertions, 1151 deletions
diff --git a/include/asm-ppc/atomic.h b/include/asm-ppc/atomic.h index c34bc36ef..6070ea738 100644 --- a/include/asm-ppc/atomic.h +++ b/include/asm-ppc/atomic.h @@ -5,23 +5,116 @@ #ifndef _ASM_PPC_ATOMIC_H_ #define _ASM_PPC_ATOMIC_H_ +#ifdef __SMP__ +typedef struct { volatile int counter; } atomic_t; +#else typedef struct { int counter; } atomic_t; -#define ATOMIC_INIT(i) { (i) } +#endif -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x) +#define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) -#define atomic_set(v) (((v)->counter) = i) +#define atomic_set(v,i) (((v)->counter) = (i)) -#define atomic_dec_return(v) ({atomic_sub(1,(v));(v);}) -#define atomic_inc_return(v) ({atomic_add(1,(v));(v);}) +extern void atomic_add(int a, atomic_t *v); +extern void atomic_sub(int a, atomic_t *v); +extern void atomic_inc(atomic_t *v); +extern int atomic_inc_return(atomic_t *v); +extern void atomic_dec(atomic_t *v); +extern int atomic_dec_return(atomic_t *v); +extern int atomic_dec_and_test(atomic_t *v); -#define atomic_inc(v) atomic_add(1,(v)) -#define atomic_dec(v) atomic_sub(1,(v)) -#endif +extern void atomic_clear_mask(unsigned long mask, unsigned long *addr); +extern void atomic_set_mask(unsigned long mask, unsigned long *addr); + +#if 0 /* for now */ +extern __inline__ void atomic_add(atomic_t a, atomic_t *v) +{ + atomic_t t; + + __asm__ __volatile__("\n\ +1: lwarx %0,0,%3\n\ + add %0,%2,%0\n\ + stwcx. %0,0,%3\n\ + bne 1b" + : "=&r" (t), "=m" (*v) + : "r" (a), "r" (v) + : "cc"); +} + +extern __inline__ void atomic_sub(atomic_t a, atomic_t *v) +{ + atomic_t t; + + __asm__ __volatile__("\n\ +1: lwarx %0,0,%3\n\ + subf %0,%2,%0\n\ + stwcx. %0,0,%3\n\ + bne 1b" + : "=&r" (t), "=m" (*v) + : "r" (a), "r" (v) + : "cc"); +} + +extern __inline__ int atomic_sub_and_test(atomic_t a, atomic_t *v) +{ + atomic_t t; + + __asm__ __volatile__("\n\ +1: lwarx %0,0,%3\n\ + subf %0,%2,%0\n\ + stwcx. %0,0,%3\n\ + bne 1b" + : "=&r" (t), "=m" (*v) + : "r" (a), "r" (v) + : "cc"); + + return t == 0; +} + +extern __inline__ void atomic_inc(atomic_t *v) +{ + atomic_t t; + + __asm__ __volatile__("\n\ +1: lwarx %0,0,%2\n\ + addic %0,%0,1\n\ + stwcx. %0,0,%2\n\ + bne 1b" + : "=&r" (t), "=m" (*v) + : "r" (v) + : "cc"); +} + +extern __inline__ void atomic_dec(atomic_t *v) +{ + atomic_t t; + + __asm__ __volatile__("\n\ +1: lwarx %0,0,%2\n\ + addic %0,%0,-1\n\ + stwcx. %0,0,%2\n\ + bne 1b" + : "=&r" (t), "=m" (*v) + : "r" (v) + : "cc"); +} + +extern __inline__ int atomic_dec_and_test(atomic_t *v) +{ + atomic_t t; + + __asm__ __volatile__("\n\ +1: lwarx %0,0,%2\n\ + addic %0,%0,-1\n\ + stwcx. %0,0,%2\n\ + bne 1b" + : "=&r" (t), "=m" (*v) + : "r" (v) + : "cc"); + + return t == 0; +} +#endif /* 0 */ +#endif /* _ASM_PPC_ATOMIC_H_ */ diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h index 959f2b302..3b8a24575 100644 --- a/include/asm-ppc/bitops.h +++ b/include/asm-ppc/bitops.h @@ -3,67 +3,155 @@ #include <asm/system.h> #include <asm/byteorder.h> +#include <linux/kernel.h> /* for printk */ #define BIT(n) 1<<(n&0x1F) typedef unsigned long BITFIELD; -/* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' - * is in the highest of the four bytes and bit '31' is the high bit - * within the first byte. powerpc is BIG-Endian. Unless noted otherwise - * all bit-ops return 0 if bit was previously clear and != 0 otherwise. +/* + * These are ifdef'd out here because using : "cc" as a constraing + * results in errors from gcc. -- Cort */ -extern __inline__ int set_bit(int nr, void * add) +#if 0 +extern __inline__ int set_bit(int nr, void * addr) { - BITFIELD *addr = add; - long mask,oldbit; -#ifdef __KERNEL__ - int s = _disable_interrupts(); -#endif - addr += nr >> 5; - mask = BIT(nr); - oldbit = (mask & *addr) != 0; - *addr |= mask; -#ifdef __KERNEL__ - _enable_interrupts(s); -#endif - return oldbit; + unsigned long old, t; + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 5); + + if ((unsigned long)addr & 3) + printk("set_bit(%lx, %p)\n", nr, addr); + + __asm__ __volatile__( + "1:lwarx %0,0,%3 \n\t" + "or %1,%0,%2 \n\t" + "stwcx. %1,0,%3 \n\t" + "bne 1b \n\t" + : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/ + : "r" (mask), "r" (p) + /*: "cc" */); + +n return (old & mask) != 0; } -extern __inline__ int change_bit(int nr, void *add) +extern __inline__ unsigned long clear_bit(unsigned long nr, void *addr) { - BITFIELD *addr = add; - int mask, retval; -#ifdef __KERNEL__ - int s = _disable_interrupts(); -#endif - addr += nr >> 5; - mask = BIT(nr); - retval = (mask & *addr) != 0; - *addr ^= mask; -#ifdef __KERNEL__ - _enable_interrupts(s); -#endif - return retval; + unsigned long old, t; + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 5); + + if ((unsigned long)addr & 3) + printk("clear_bit(%lx, %p)\n", nr, addr); + __asm__ __volatile__("\n\ +1: lwarx %0,0,%3 + andc %1,%0,%2 + stwcx. %1,0,%3 + bne 1b" + : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/ + : "r" (mask), "r" (p) + /*: "cc"*/); + + return (old & mask) != 0; } -extern __inline__ int clear_bit(int nr, void *add) +extern __inline__ unsigned long change_bit(unsigned long nr, void *addr) { - BITFIELD *addr = add; - int mask, retval; -#ifdef __KERNEL__ - int s = _disable_interrupts(); -#endif - addr += nr >> 5; - mask = BIT(nr); - retval = (mask & *addr) != 0; - *addr &= ~mask; -#ifdef __KERNEL__ - _enable_interrupts(s); + unsigned long old, t; + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 5); + + if ((unsigned long)addr & 3) + printk("change_bit(%lx, %p)\n", nr, addr); + __asm__ __volatile__("\n\ +1: lwarx %0,0,%3 + xor %1,%0,%2 + stwcx. %1,0,%3 + bne 1b" + : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/ + : "r" (mask), "r" (p) + /*: "cc"*/); + + return (old & mask) != 0; +} #endif - return retval; + +extern __inline__ int ffz(unsigned int x) +{ + int n; + + x = ~x & (x+1); /* set LS zero to 1, other bits to 0 */ + __asm__ ("cntlzw %0,%1" : "=r" (n) : "r" (x)); + return 31 - n; } +/* + * This implementation of find_{first,next}_zero_bit was stolen from + * Linus' asm-alpha/bitops.h. + */ + +extern __inline__ unsigned long find_first_zero_bit(void * addr, unsigned long size) +{ + unsigned int * p = ((unsigned int *) addr); + unsigned int result = 0; + unsigned int tmp; + + if (size == 0) + return 0; + while (size & ~31UL) { + if (~(tmp = *(p++))) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; + tmp |= ~0UL << size; +found_middle: + return result + ffz(tmp); +} + +/* + * Find next zero bit in a bitmap reasonably efficiently.. + */ +extern __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long size, + unsigned long offset) +{ + unsigned int * p = ((unsigned int *) addr) + (offset >> 5); + unsigned int result = offset & ~31UL; + unsigned int tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (32-offset); + if (size < 32) + goto found_first; + if (~tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size & ~31UL) { + if (~(tmp = *(p++))) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp |= ~0UL << size; +found_middle: + return result + ffz(tmp); +} + + #define _EXT2_HAVE_ASM_BITOPS_ #define ext2_find_first_zero_bit(addr, size) \ ext2_find_next_zero_bit((addr), (size), 0) diff --git a/include/asm-ppc/bugs.h b/include/asm-ppc/bugs.h index 202f9ab06..8dce1e290 100644 --- a/include/asm-ppc/bugs.h +++ b/include/asm-ppc/bugs.h @@ -2,7 +2,5 @@ * This file is included by 'init/main.c' */ -void -check_bugs(void) -{ -} +extern void +check_bugs(void); diff --git a/include/asm-ppc/byteorder.h b/include/asm-ppc/byteorder.h index bbb257941..eab03c752 100644 --- a/include/asm-ppc/byteorder.h +++ b/include/asm-ppc/byteorder.h @@ -1,6 +1,8 @@ #ifndef _PPC_BYTEORDER_H #define _PPC_BYTEORDER_H +#include <asm/types.h> + #ifndef __BIG_ENDIAN #define __BIG_ENDIAN #endif @@ -16,21 +18,50 @@ #define __htonl(x) ntohl(x) #define __htons(x) ntohs(x) + +#define __constant_ntohs(x) ntohs(x) +#define __constant_ntohl(x) ntohl(x) #define __constant_htonl(x) ntohl(x) #define __constant_htons(x) ntohs(x) #ifdef __KERNEL__ +/* + * 16 and 32 bit little-endian loads and stores. + */ +extern inline unsigned ld_le16(volatile unsigned short *addr) +{ + unsigned val; -/* Convert from CPU byte order, to specified byte order. */ -extern __inline__ __u16 cpu_to_le16(__u16 value) + asm volatile("lhbrx %0,0,%1" : "=r" (val) : "r" (addr)); + return val; +} + +extern inline void st_le16(volatile unsigned short *addr, unsigned val) +{ + asm volatile("sthbrx %0,0,%1" : : "r" (val), "r" (addr) : "memory"); +} + +extern inline unsigned ld_le32(volatile unsigned *addr) +{ + unsigned val; + + asm volatile("lwbrx %0,0,%1" : "=r" (val) : "r" (addr)); + return val; +} + +extern inline void st_le32(volatile unsigned *addr, unsigned val) { - return (value >> 8) | (value << 8); + asm volatile("stwbrx %0,0,%1" : : "r" (val), "r" (addr) : "memory"); } + +extern __inline__ __u16 cpu_to_le16(__u16 value) +{ + return ld_le16(&value); +} extern __inline__ __u32 cpu_to_le32(__u32 value) { - return((value>>24) | ((value>>8)&0xff00) | - ((value<<8)&0xff0000) | (value<<24)); + return ld_le32(&value); } #define cpu_to_be16(x) (x) #define cpu_to_be32(x) (x) @@ -38,33 +69,33 @@ extern __inline__ __u32 cpu_to_le32(__u32 value) /* The same, but returns converted value from the location pointer by addr. */ extern __inline__ __u16 cpu_to_le16p(__u16 *addr) { - return cpu_to_le16(*addr); + return ld_le16(addr); } extern __inline__ __u32 cpu_to_le32p(__u32 *addr) { - return cpu_to_le32(*addr); + return ld_le32(addr); } extern __inline__ __u16 cpu_to_be16p(__u16 *addr) { - return cpu_to_be16(*addr); + return *addr; } extern __inline__ __u32 cpu_to_be32p(__u32 *addr) { - return cpu_to_be32(*addr); + return *addr; } /* The same, but do the conversion in situ, ie. put the value back to addr. */ extern __inline__ void cpu_to_le16s(__u16 *addr) { - *addr = cpu_to_le16(*addr); + st_le16(addr,*addr); } extern __inline__ void cpu_to_le32s(__u32 *addr) { - *addr = cpu_to_le32(*addr); + st_le32(addr,*addr); } #define cpu_to_be16s(x) do { } while (0) @@ -86,5 +117,12 @@ extern __inline__ void cpu_to_le32s(__u32 *addr) #define be16_to_cpus(x) cpu_to_be16s(x) #define be32_to_cpus(x) cpu_to_be32s(x) + #endif /* __KERNEL__ */ #endif /* !(_PPC_BYTEORDER_H) */ + + + + + + diff --git a/include/asm-ppc/cache.h b/include/asm-ppc/cache.h index da609f271..8bdffcdfb 100644 --- a/include/asm-ppc/cache.h +++ b/include/asm-ppc/cache.h @@ -5,7 +5,8 @@ #define __ARCH_PPC_CACHE_H /* bytes per L1 cache line */ -#define L1_CACHE_BYTES 32 /* a guess */ +/* a guess */ /* a correct one -- Cort */ +#define L1_CACHE_BYTES 32 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) diff --git a/include/asm-ppc/checksum.h b/include/asm-ppc/checksum.h index 74e943792..7b55f0032 100644 --- a/include/asm-ppc/checksum.h +++ b/include/asm-ppc/checksum.h @@ -47,6 +47,20 @@ unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum); */ #define csum_partial_copy_fromuser csum_partial_copy +/* + * this is a new version of the above that records errors it finds in *errp, + * but continues and zeros the rest of the buffer. + * + * right now - it just calls csum_partial_copy() + * -- Cort + */ +extern __inline__ +unsigned int csum_partial_copy_from_user ( const char *src, char *dst, + int len, int sum, int *err_ptr) +{ + int *dst_err_ptr=NULL; + return csum_partial_copy( src, dst, len, sum); +} /* * this routine is used for miscellaneous IP-like checksums, mainly diff --git a/include/asm-ppc/current.h b/include/asm-ppc/current.h index d815aaad3..d7a0a9215 100644 --- a/include/asm-ppc/current.h +++ b/include/asm-ppc/current.h @@ -1,12 +1,10 @@ #ifndef _PPC_CURRENT_H #define _PPC_CURRENT_H -/* Some architectures may want to do something "clever" here since - * this is the most frequently accessed piece of data in the entire - * kernel. For an example, see the Sparc implementation where an - * entire register is hard locked to contain the value of current. - */ -extern struct task_struct *current_set[NR_CPUS]; -#define current (current_set[smp_processor_id()]) /* Current on this processor */ +#include <linux/config.h> + +extern struct task_struct *current_set[1]; + +register struct task_struct *current asm("r2"); #endif /* !(_PPC_CURRENT_H) */ diff --git a/include/asm-ppc/delay.h b/include/asm-ppc/delay.h index 68f1a4da7..9da227167 100644 --- a/include/asm-ppc/delay.h +++ b/include/asm-ppc/delay.h @@ -1,14 +1,31 @@ #ifndef _PPC_DELAY_H #define _PPC_DELAY_H +/* + * Copyright 1996, Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ -extern __inline__ void __delay(unsigned long ); -extern __inline__ void __udelay(unsigned long ); - +extern __inline__ void __delay(unsigned int loops) +{ + if (loops != 0) + __asm__ __volatile__("mtctr %0; 1: bdnz 1b" : : + "r" (loops) : "ctr"); +} -extern __inline__ unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c) +extern __inline__ void udelay(unsigned long usecs) { - return (a*b)/c; + unsigned long loops; + + /* compute (usecs * 2^32 / 10^6) * loops_per_sec / 2^32 */ + usecs *= 0x10c6; /* 2^32 / 10^6 */ + __asm__("mulhwu %0,%1,%2" : "=r" (loops) : + "r" (usecs), "r" (loops_per_sec)); + __delay(loops); } #endif /* defined(_PPC_DELAY_H) */ diff --git a/include/asm-ppc/dma.h b/include/asm-ppc/dma.h index ca609062e..7becf0190 100644 --- a/include/asm-ppc/dma.h +++ b/include/asm-ppc/dma.h @@ -1,16 +1,21 @@ -/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ +/* $Id: dma.h,v 1.3 1997/03/16 06:20:39 cort Exp $ * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen * and John Boyd, Nov. 1992. */ +#include <linux/config.h> + /* * Note: Adapted for PowerPC by Gary Thomas * Modified by Cort Dougan <cort@cs.nmt.edu> * + * None of this really applies for Power Macintoshes. There is + * basically just enough here to get kernel/dma.c to compile. + * * There may be some comments or restrictions made here which are - * not valid for the PowerPC (PreP) platform. Take what you read + * not valid for the PReP platform. Take what you read * with a grain of salt. */ @@ -18,6 +23,7 @@ #ifndef _ASM_DMA_H #define _ASM_DMA_H +#ifdef CONFIG_PREP #include <asm/io.h> /* need byte IO */ @@ -295,5 +301,6 @@ static __inline__ int get_dma_residue(unsigned int dmanr) /* These are in kernel/dma.c: */ extern void free_dma(unsigned int dmanr); /* release it again */ +#endif /* CONFIG_PREP */ #endif /* _ASM_DMA_H */ diff --git a/include/asm-ppc/errno.h b/include/asm-ppc/errno.h index 8c47b73e7..ff364b820 100644 --- a/include/asm-ppc/errno.h +++ b/include/asm-ppc/errno.h @@ -133,4 +133,6 @@ #define ERESTARTNOHAND 514 /* restart if no handler.. */ #define ENOIOCTLCMD 515 /* No ioctl command */ +#define _LAST_ERRNO 515 + #endif diff --git a/include/asm-ppc/ide.h b/include/asm-ppc/ide.h index 69838b8b1..bc16288a8 100644 --- a/include/asm-ppc/ide.h +++ b/include/asm-ppc/ide.h @@ -1,5 +1,5 @@ /* - * linux/include/asm-i386/ide.h + * linux/include/asm-ppc/ide.h * * Copyright (C) 1994-1996 Linus Torvalds & authors */ @@ -13,7 +13,7 @@ #ifdef __KERNEL__ -typedef unsigned short ide_ioreg_t; +#include <linux/config.h> #ifndef MAX_HWIFS #define MAX_HWIFS 4 @@ -21,11 +21,15 @@ typedef unsigned short ide_ioreg_t; #define ide_sti() sti() +#ifdef CONFIG_PREP + +typedef unsigned short ide_ioreg_t; + static __inline__ int ide_default_irq(ide_ioreg_t base) { switch (base) { - case 0x1f0: return 14; - case 0x170: return 15; + case 0x1f0: return 13; + case 0x170: return 13; case 0x1e8: return 11; case 0x168: return 10; default: @@ -66,18 +70,7 @@ typedef union { unsigned lba : 1; /* using LBA instead of CHS */ unsigned bit7 : 1; /* always 1 */ } b; - } select_t; - -static __inline__ int ide_request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), - unsigned long flags, const char *device, void *dev_id) -{ - return request_irq(irq, handler, flags, device, dev_id); -} - -static __inline__ void ide_free_irq(unsigned int irq, void *dev_id) -{ - free_irq(irq, dev_id); -} +} select_t; static __inline__ int ide_check_region (ide_ioreg_t from, unsigned int extent) { @@ -94,26 +87,92 @@ static __inline__ void ide_release_region (ide_ioreg_t from, unsigned int extent release_region(from, extent); } -/* - * The following are not needed for the non-m68k ports - */ -static __inline__ int ide_ack_intr (ide_ioreg_t base_port, ide_ioreg_t irq_port) +#define ide_fix_driveid(id) do {} while (0) + +#endif + +#ifdef CONFIG_PMAC + +#include <asm/io.h> /* so we can redefine insw/outsw */ + +typedef unsigned long ide_ioreg_t; + +static __inline__ int ide_default_irq(ide_ioreg_t base) +{ + return 0; +} + +extern __inline__ ide_ioreg_t ide_default_io_base(int index) +{ + return index; +} + +extern void ide_init_hwif_ports(ide_ioreg_t *p, ide_ioreg_t base, int *irq); + +typedef union { + unsigned all : 8; /* all of the bits together */ + struct { + unsigned bit7 : 1; /* always 1 */ + unsigned lba : 1; /* using LBA instead of CHS */ + unsigned bit5 : 1; /* always 1 */ + unsigned unit : 1; /* drive select number, 0/1 */ + unsigned head : 4; /* always zeros here */ + } b; +} select_t; + +#undef SUPPORT_SLOW_DATA_PORTS +#define SUPPORT_SLOW_DATA_PORTS 0 +#undef SUPPORT_VLB_SYNC +#define SUPPORT_VLB_SYNC 0 + +static __inline__ int ide_check_region (ide_ioreg_t from, unsigned int extent) { - return(1); + return 0; } -static __inline__ void ide_fix_driveid(struct hd_driveid *id) +static __inline__ void ide_request_region (ide_ioreg_t from, unsigned int extent, const char *name) { } -static __inline__ void ide_release_lock (int *ide_lock) +static __inline__ void ide_release_region (ide_ioreg_t from, unsigned int extent) { } -static __inline__ void ide_get_lock (int *ide_lock, void (*handler)(int, void *, struct pt_regs *), void *data) +#undef insw +#undef outsw +#define insw(port, buf, ns) ide_insw((port), (buf), (ns)) +#define outsw(port, buf, ns) ide_outsw((port), (buf), (ns)) + +void ide_insw(ide_ioreg_t port, void *buf, int ns); +void ide_outsw(ide_ioreg_t port, void *buf, int ns); + +#define ide_fix_driveid(id) do { \ + int nh; \ + unsigned short *p = (unsigned short *) id; \ + for (nh = SECTOR_WORDS * 2; nh != 0; --nh, ++p) \ + *p = (*p << 8) + (*p >> 8); \ +} while (0) + +#endif + +static __inline__ int ide_request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), + unsigned long flags, const char *device, void *dev_id) { + return request_irq(irq, handler, flags, device, dev_id); +} + +static __inline__ void ide_free_irq(unsigned int irq, void *dev_id) +{ + free_irq(irq, dev_id); } +/* + * The following are not needed for the non-m68k ports + */ +#define ide_ack_intr(base, irq) (1) +#define ide_release_lock(lock) do {} while (0) +#define ide_get_lock(lock, hdlr, data) do {} while (0) + #endif /* __KERNEL__ */ #endif /* __ASMPPC_IDE_H */ diff --git a/include/asm-ppc/init.h b/include/asm-ppc/init.h index 82ce44ce8..09b38d899 100644 --- a/include/asm-ppc/init.h +++ b/include/asm-ppc/init.h @@ -1,14 +1,23 @@ #ifndef _PPC_INIT_H #define _PPC_INIT_H -/* Throwing the initialization code and data out is not supported yet... */ - -#define __init +#define __init #define __initdata -#define __initfunc(__arginit) __arginit -/* For assembly routines */ +#define __initfunc(x) x +/* +#define __init __attribute__ ((__section__ (".text.init"))) +#define __initdata __attribute__ ((__section__ (".data.init"))) +#define __initfunc(__arginit) \ + __arginit __init; \ + __arginit +*/ + /* For assembly routines */ #define __INIT #define __FINIT #define __INITDATA - +/* +#define __INIT .section ".text.init",#alloc,#execinstr +#define __FINIT .previous +#define __INITDATA .section ".data.init",#alloc,#write +*/ #endif diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index 581d51643..e2dfcc49a 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h @@ -1,8 +1,11 @@ #ifndef _PPC_IO_H #define _PPC_IO_H +#include <linux/config.h> #include <asm/page.h> +#include <asm/byteorder.h> +#ifdef CONFIG_PREP /* from the Carolina Technical Spec -- Cort */ #define IBM_ACORN 0x82A #define SIO_CONFIG_RA 0x398 @@ -14,15 +17,67 @@ #define IBM_L2_INVALIDATE 0x814 #define IBM_SYS_CTL 0x81c - - -/* Define the particulars of outb/outw/outl "instructions" */ #define SLOW_DOWN_IO #ifndef PCI_DRAM_OFFSET #define PCI_DRAM_OFFSET 0x80000000 #endif +#define readb(addr) (*(volatile unsigned char *) (addr)) +#define readw(addr) (*(volatile unsigned short *) (addr)) +#define readl(addr) (*(volatile unsigned int *) (addr)) +#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b)) +#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b)) +#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b)) + +void outsl(int port, long *ptr, int len); + +__inline__ unsigned char outb(unsigned char val, int port); +__inline__ unsigned short outw(unsigned short val, int port); +__inline__ unsigned long outl(unsigned long val, int port); +__inline__ unsigned char inb(int port); +__inline__ unsigned short inw(int port); +__inline__ unsigned long inl(int port); + +#define inb_p inb +#define inw_p inw +#define inl_p inl +#define outb_p outb +#define outw_p outw +#define outl_p outl + +#endif /* CONFIG_PREP */ + +#ifdef CONFIG_PMAC +/* + * Read and write the non-volatile RAM. + */ +extern int nvram_readb(int addr); +extern void nvram_writeb(int addr, int val); + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +#define inb(port) in_8((unsigned char *)(port)) +#define outb(val, port) out_8((unsigned char *)(port), (val)) +#define inw(port) in_le16((unsigned short *)(port)) +#define outw(val, port) out_le16((unsigned short *)(port), (val)) +#define inl(port) in_le32((unsigned long *)(port)) +#define outl(val, port) out_le32((unsigned long *)(port), (val)) + +#define inb_p(port) in_8((unsigned char *)(port)) +#define outb_p(val, port) out_8((unsigned char *)(port), (val)) +#define inw_p(port) in_le16((unsigned short *)(port)) +#define outw_p(val, port) out_le16((unsigned short *)(port), (val)) +#define inl_p(port) in_le32(((unsigned long *)port)) +#define outl_p(val, port) out_le32((unsigned long *)(port), (val)) + +#define insw(port, buf, ns) _insw((unsigned short *)(port), (buf), (ns)) +#define outsw(port, buf, ns) _outsw((unsigned short *)(port), (buf), (ns)) +#define insl(port, buf, nl) _insl((unsigned long *)(port), (buf), (nl)) +#define outsl(port, buf, nl) _outsl((unsigned long *)(port), (buf), (nl)) +#endif /* CONFIG_PMAC */ /* * The PCI bus is inherently Little-Endian. The PowerPC is being @@ -42,19 +97,12 @@ extern inline void * bus_to_virt(unsigned long address) if (address == 0) return 0; return ((void *)(address - PCI_DRAM_OFFSET + KERNELBASE)); } -/* #define virt_to_bus(a) ((unsigned long)(((char *)a==(char *) 0) ? ((char *)0) \ - : ((char *)((long)a - KERNELBASE + PCI_DRAM_OFFSET)))) -#define bus_to_virt(a) ((void *) (((char *)a==(char *)0) ? ((char *)0) \ - : ((char *)((long)a - PCI_DRAM_OFFSET + KERNELBASE)))) -*/ - -#define readb(addr) (*(volatile unsigned char *) (addr)) -#define readw(addr) (*(volatile unsigned short *) (addr)) -#define readl(addr) (*(volatile unsigned int *) (addr)) -#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b)) -#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b)) -#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b)) +/* + * Map in an area of physical address space, for accessing + * I/O devices etc. + */ +extern void *ioremap(unsigned long address, unsigned long size); /* * Change virtual addresses to physical addresses and vv. @@ -72,27 +120,101 @@ extern inline void * phys_to_virt(unsigned long address) return (void *) address; } -/* from arch/ppc/kernel/port_io.c - * -- Cort +#define _IO_BASE ((unsigned long)0x80000000) + +/* + * These are much more useful le/be io functions from Paul + * than leXX_to_cpu() style functions since the ppc has + * load/store byte reverse instructions + * -- Cort */ -unsigned char inb(int port); -unsigned short inw(int port); -unsigned long inl(int port); -unsigned char outb(unsigned char val,int port); -unsigned short outw(unsigned short val,int port); -unsigned long outl(unsigned long val,int port); -void outsl(int port, long *ptr, int len); -static inline unsigned char inb_p(int port) {return (inb(port)); } -static inline unsigned short inw_p(int port) {return (inw(port)); } -static inline unsigned long inl_p(int port) {return (inl(port)); } +/* + * Enforce In-order Execution of I/O: + * Acts as a barrier to ensure all previous I/O accesses have + * completed before any further ones are issued. + */ +extern inline void eieio(void) +{ + asm volatile ("eieio" : :); +} + +/* + * 8, 16 and 32 bit, big and little endian I/O operations, with barrier. + */ +extern inline int in_8(volatile unsigned char *addr) +{ + int ret; + + ret = *addr; + eieio(); + return ret; +} + +extern inline void out_8(volatile unsigned char *addr, int val) +{ + *addr = val; + eieio(); +} + +extern inline int in_le16(volatile unsigned short *addr) +{ + int ret; + + ret = ld_le16(addr); + eieio(); + return ret; +} + +extern inline int in_be16(volatile unsigned short *addr) +{ + int ret; + + ret = *addr; + eieio(); + return ret; +} +extern inline void out_le16(volatile unsigned short *addr, int val) +{ + st_le16(addr, val); + eieio(); +} +extern inline void out_be16(volatile unsigned short *addr, int val) +{ + *addr = val; + eieio(); +} -static inline unsigned char outb_p(unsigned char val,int port) { return (outb(val,port)); } -static inline unsigned short outw_p(unsigned short val,int port) { return (outw(val,port)); } -static inline unsigned long outl_p(unsigned long val,int port) { return (outl(val,port)); } +extern inline unsigned in_le32(volatile unsigned *addr) +{ + unsigned ret; + ret = ld_le32(addr); + eieio(); + return ret; +} +extern inline int in_be32(volatile unsigned *addr) +{ + int ret; + + ret = *addr; + eieio(); + return ret; +} + +extern inline void out_le32(volatile unsigned *addr, int val) +{ + st_le32(addr, val); + eieio(); +} + +extern inline void out_be32(volatile unsigned *addr, int val) +{ + *addr = val; + eieio(); +} #endif diff --git a/include/asm-ppc/ioctls.h b/include/asm-ppc/ioctls.h index f56e53db7..2039f4954 100644 --- a/include/asm-ppc/ioctls.h +++ b/include/asm-ppc/ioctls.h @@ -83,8 +83,8 @@ #define TIOCGETD 0x5424 #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ #define TIOCTTYGSTRUCT 0x5426 /* For debugging only */ -#define TIOCSBRK 0x5427 /* BSD compatibility */ -#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 diff --git a/include/asm-ppc/irq.h b/include/asm-ppc/irq.h index f457f82c3..ebffe2bcb 100644 --- a/include/asm-ppc/irq.h +++ b/include/asm-ppc/irq.h @@ -1,7 +1,13 @@ #ifndef _ASM_IRQ_H #define _ASM_IRQ_H +#include <linux/config.h> + +#ifdef CONFIG_PMAC #define NR_IRQS 32 +#else +#define NR_IRQS 16 +#endif extern void disable_irq(unsigned int); extern void enable_irq(unsigned int); diff --git a/include/asm-ppc/keyboard.h b/include/asm-ppc/keyboard.h index 9ac4139b9..0c7240d14 100644 --- a/include/asm-ppc/keyboard.h +++ b/include/asm-ppc/keyboard.h @@ -2,12 +2,14 @@ * linux/include/asm-ppc/keyboard.h * * Created 3 Nov 1996 by Geert Uytterhoeven + * Modified for Power Macintosh by Paul Mackerras * - * $Id: keyboard.h,v 1.3 1997/07/22 23:18:19 ralf Exp $ + * $Id: keyboard.h,v 1.3 1997/07/24 01:55:57 ralf Exp $ */ /* - * This file contains the ppc architecture specific keyboard definitions + * This file contains the ppc architecture specific keyboard definitions - + * like the intel pc for prep systems, different for power macs. */ #ifndef __ASMPPC_KEYBOARD_H @@ -17,6 +19,29 @@ #include <asm/io.h> +#include <linux/config.h> + +#ifdef CONFIG_MAC_KEYBOARD + +extern int mackbd_setkeycode(unsigned int scancode, unsigned int keycode); +extern int mackbd_getkeycode(unsigned int scancode); +extern int mackbd_pretranslate(unsigned char scancode, char raw_mode); +extern int mackbd_translate(unsigned char scancode, unsigned char *keycode, + char raw_mode); +extern int mackbd_unexpected_up(unsigned char keycode); +extern void mackbd_leds(unsigned char leds); +extern void mackbd_init_hw(void); + +#define kbd_setkeycode mackbd_setkeycode +#define kbd_getkeycode mackbd_getkeycode +#define kbd_pretranslate mackbd_pretranslate +#define kbd_translate mackbd_translate +#define kbd_unexpected_up mackbd_unexpected_up +#define kbd_leds mackbd_leds +#define kbd_init_hw mackbd_init_hw + +#else /* CONFIG_MAC_KEYBOARD */ + #define KEYBOARD_IRQ 1 #define DISABLE_KBD_DURING_INTERRUPTS 0 @@ -47,6 +72,7 @@ extern void pckbd_init_hw(void); #define kbd_pause() do { } while(0) #define INIT_KBD +#endif /* CONFIG_MAC_KEYBOARD */ #define keyboard_setup() \ request_region(0x60, 16, "keyboard") diff --git a/include/asm-ppc/mc146818rtc.h b/include/asm-ppc/mc146818rtc.h index 91f93f598..e69de29bb 100644 --- a/include/asm-ppc/mc146818rtc.h +++ b/include/asm-ppc/mc146818rtc.h @@ -1,128 +0,0 @@ -/* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM - * Copyright Torsten Duwe <duwe@informatik.uni-erlangen.de> 1993 - * derived from Data Sheet, Copyright Motorola 1984 (!). - * It was written to be part of the Linux operating system. - */ -/* permission is hereby granted to copy, modify and redistribute this code - * in terms of the GNU Library General Public License, Version 2 or later, - * at your option. - */ - -#ifndef _MC146818RTC_H -#define _MC146818RTC_H - -#include <asm/io.h> - -#ifndef RTC_PORT -#define RTC_PORT(x) (0x70 + (x)) -#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ -#endif - -/* - * The yet supported machines all access the RTC index register via - * an ISA port access but the way to access the date register differs ... - */ -#define CMOS_READ(addr) ({ \ -outb_p((addr),RTC_PORT(0)); \ -inb_p(RTC_PORT(1)); \ -}) -#define CMOS_WRITE(val, addr) ({ \ -outb_p((addr),RTC_PORT(0)); \ -outb_p((val),RTC_PORT(1)); \ -}) - -#ifndef MCRTC_PORT -#define MCRTC_PORT(x) (0x70 + (x)) -#define MCRTC_ALWAYS_BCD 1 -#endif - -#define CMOS_MCRTC_READ(addr) ({ \ -outb_p((addr),MCRTC_PORT(0)); \ -inb_p(MCRTC_PORT(1)); \ -}) -#define CMOS_MCRTC_WRITE(val, addr) ({ \ -outb_p((addr),MCRTC_PORT(0)); \ -outb_p((val),MCRTC_PORT(1)); \ -}) - -/********************************************************************** - * register summary - **********************************************************************/ -#define MCRTC_SECONDS 0 -#define MCRTC_SECONDS_ALARM 1 -#define MCRTC_MINUTES 2 -#define MCRTC_MINUTES_ALARM 3 -#define MCRTC_HOURS 4 -#define MCRTC_HOURS_ALARM 5 -/* RTC_*_alarm is always true if 2 MSBs are set */ -# define MCRTC_ALARM_DONT_CARE 0xC0 - -#define MCRTC_DAY_OF_WEEK 6 -#define MCRTC_DAY_OF_MONTH 7 -#define MCRTC_MONTH 8 -#define MCRTC_YEAR 9 - -/* control registers - Moto names - */ -#define MCRTC_REG_A 10 -#define MCRTC_REG_B 11 -#define MCRTC_REG_C 12 -#define MCRTC_REG_D 13 - -/********************************************************************** - * register details - **********************************************************************/ -#define MCRTC_FREQ_SELECT MCRTC_REG_A - -/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus, - * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete, - * totalling to a max high interval of 2.228 ms. - */ -# define MCRTC_UIP 0x80 -# define MCRTC_DIV_CTL 0x70 - /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */ -# define MCRTC_REF_CLCK_4MHZ 0x00 -# define MCRTC_REF_CLCK_1MHZ 0x10 -# define MCRTC_REF_CLCK_32KHZ 0x20 - /* 2 values for divider stage reset, others for "testing purposes only" */ -# define MCRTC_DIV_RESET1 0x60 -# define MCRTC_DIV_RESET2 0x70 - /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ -# define MCRTC_RATE_SELECT 0x0F - -/**********************************************************************/ -#define MCRTC_CONTROL MCRTC_REG_B -# define MCRTC_SET 0x80 /* disable updates for clock setting */ -# define MCRTC_PIE 0x40 /* periodic interrupt enable */ -# define MCRTC_AIE 0x20 /* alarm interrupt enable */ -# define MCRTC_UIE 0x10 /* update-finished interrupt enable */ -# define MCRTC_SQWE 0x08 /* enable square-wave output */ -# define MCRTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ -# define MCRTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ -# define MCRTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ - -/**********************************************************************/ -#define MCRTC_INTR_FLAGS MCRTC_REG_C -/* caution - cleared by read */ -# define MCRTC_IRQF 0x80 /* any of the following 3 is active */ -# define MCRTC_PF 0x40 -# define MCRTC_AF 0x20 -# define MCRTC_UF 0x10 - -/**********************************************************************/ -#define MCRTC_VALID MCRTC_REG_D -# define MCRTC_VRT 0x80 /* valid RAM and time */ -/**********************************************************************/ - -/* example: !(CMOS_READ(MCRTC_CONTROL) & MCRTC_DM_BINARY) - * determines if the following two #defines are needed - */ -#ifndef BCD_TO_BIN -#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) -#endif - -#ifndef BIN_TO_BCD -#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) -#endif - -#endif /* _MC146818RTC_H */ diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h index 1b3217cfd..c6c835229 100644 --- a/include/asm-ppc/mmu.h +++ b/include/asm-ppc/mmu.h @@ -32,42 +32,68 @@ typedef struct _PTE /* Segment Register */ typedef struct _SEGREG - { - unsigned long t:1; /* Normal or I/O type */ - unsigned long ks:1; /* Supervisor 'key' (normally 0) */ - unsigned long kp:1; /* User 'key' (normally 1) */ - unsigned long n:1; /* No-execute */ - unsigned long :4; /* Unused */ - unsigned long vsid:24; /* Virtual Segment Identifier */ - } SEGREG; +{ + unsigned long t:1; /* Normal or I/O type */ + unsigned long ks:1; /* Supervisor 'key' (normally 0) */ + unsigned long kp:1; /* User 'key' (normally 1) */ + unsigned long n:1; /* No-execute */ + unsigned long :4; /* Unused */ + unsigned long vsid:24; /* Virtual Segment Identifier */ +} SEGREG; /* Block Address Translation (BAT) Registers */ +typedef struct _P601_BATU +{ + unsigned long bepi:15; /* Effective page index (virtual address) */ + unsigned long :8; /* unused */ + unsigned long w:1; + unsigned long i:1; /* Cache inhibit */ + unsigned long m:1; /* Memory coherence */ + unsigned long vs:1; /* Supervisor valid */ + unsigned long vp:1; /* User valid */ + unsigned long pp:2; /* Page access protections */ +} P601_BATU; + typedef struct _BATU /* Upper part of BAT */ - { - unsigned long bepi:15; /* Effective page index (virtual address) */ - unsigned long :4; /* Unused */ - unsigned long bl:11; /* Block size mask */ - unsigned long vs:1; /* Supervisor valid */ - unsigned long vp:1; /* User valid */ - } BATU; +{ + unsigned long bepi:15; /* Effective page index (virtual address) */ + unsigned long :4; /* Unused */ + unsigned long bl:11; /* Block size mask */ + unsigned long vs:1; /* Supervisor valid */ + unsigned long vp:1; /* User valid */ +} BATU; + +typedef struct _P601_BATL +{ + unsigned long brpn:15; /* Real page index (physical address) */ + unsigned long :10; /* Unused */ + unsigned long v:1; /* valid/invalid */ + unsigned long bl:6; /* Block size mask */ +} P601_BATL; typedef struct _BATL /* Lower part of BAT */ - { - unsigned long brpn:15; /* Real page index (physical address) */ - unsigned long :10; /* Unused */ - unsigned long w:1; /* Write-thru cache */ - unsigned long i:1; /* Cache inhibit */ - unsigned long m:1; /* Memory coherence */ - unsigned long g:1; /* Guarded (MBZ) */ - unsigned long :1; /* Unused */ - unsigned long pp:2; /* Page access protections */ - } BATL; +{ + unsigned long brpn:15; /* Real page index (physical address) */ + unsigned long :10; /* Unused */ + unsigned long w:1; /* Write-thru cache */ + unsigned long i:1; /* Cache inhibit */ + unsigned long m:1; /* Memory coherence */ + unsigned long g:1; /* Guarded (MBZ) */ + unsigned long :1; /* Unused */ + unsigned long pp:2; /* Page access protections */ +} BATL; typedef struct _BAT - { - BATU batu; /* Upper register */ - BATL batl; /* Lower register */ - } BAT; +{ + BATU batu; /* Upper register */ + BATL batl; /* Lower register */ +} BAT; + +typedef struct _P601_BAT +{ + P601_BATU batu; /* Upper register */ + P601_BATL batl; /* Lower register */ +} P601_BAT; /* Block size masks */ #define BL_128K 0x000 @@ -118,15 +144,6 @@ typedef struct _MMU_context pte **pmap; /* Two-level page-map structure */ } MMU_context; -#if 0 -BAT ibat[4]; /* Instruction BAT images */ -BAT dbat[4]; /* Data BAT images */ -PTE *hash_table; /* Hardware hashed page table */ -int hash_table_size; -int hash_table_mask; -unsigned long sdr; /* Hardware image of SDR */ -#endif - /* Used to set up SDR register */ #define HASH_TABLE_SIZE_64K 0x00010000 #define HASH_TABLE_SIZE_128K 0x00020000 @@ -143,6 +160,4 @@ unsigned long sdr; /* Hardware image of SDR */ #define HASH_TABLE_MASK_2M 0x01F #define HASH_TABLE_MASK_4M 0x03F -extern inline int MMU_hash_page(struct thread_struct *tss, unsigned long va, pte *pg); - #endif diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h index f4bd1e84f..89a649bb3 100644 --- a/include/asm-ppc/mmu_context.h +++ b/include/asm-ppc/mmu_context.h @@ -1,13 +1,59 @@ #ifndef __PPC_MMU_CONTEXT_H #define __PPC_MMU_CONTEXT_H +/* the way contexts are handled on the ppc they are vsid's and + don't need any special treatment right now. + perhaps I can defer flushing the tlb by keeping a list of + zombie vsid/context's and handling that through destroy_context + later -- Cort + */ + +#define NO_CONTEXT 0 +#define LAST_CONTEXT 0xfffff + +extern int next_mmu_context; +extern void mmu_context_overflow(void); +extern void set_context(int context); + /* - * get a new mmu context.. PowerPC's don't know about contexts [yet] + * Allocating context numbers this way tends to spread out + * the entries in the hash table better than a simple linear + * allocation. */ -#define get_mmu_context(x) do { } while (0) +#define MUNGE_CONTEXT(n) (((n) * 897) & LAST_CONTEXT) -#define init_new_context(mm) do { } while(0) -#define destroy_context(mm) do { } while(0) +/* + * Get a new mmu context for task tsk if necessary. + */ +#define get_mmu_context(tsk) \ +do { \ + struct mm_struct *mm = (tsk)->mm; \ + if (mm->context == NO_CONTEXT) { \ + int i; \ + if (next_mmu_context == LAST_CONTEXT) \ + mmu_context_overflow(); \ + mm->context = MUNGE_CONTEXT(++next_mmu_context);\ + if ( tsk == current ) \ + set_context(mm->context); \ + } \ +} while (0) -#endif +/* + * Set up the context for a new address space. + */ +#define init_new_context(mm) ((mm)->context = NO_CONTEXT) + +/* + * We're finished using the context for an address space. + */ +#define destroy_context(mm) do { } while (0) +/* + * compute the vsid from the context and segment + * segments > 7 are kernel segments and their + * vsid is the segment -- Cort + */ +#define VSID_FROM_CONTEXT(segment,context) \ + ((segment < 8) ? ((segment) | (context)<<4) : (segment)) + +#endif diff --git a/include/asm-ppc/nvram.h b/include/asm-ppc/nvram.h index 1d704ff6a..665bc76af 100644 --- a/include/asm-ppc/nvram.h +++ b/include/asm-ppc/nvram.h @@ -9,6 +9,7 @@ #define NVRAM_AS1 0x75 #define NVRAM_DATA 0x77 + /* RTC Offsets */ #define RTC_SECONDS 0x1FF9 @@ -18,6 +19,8 @@ #define RTC_DAY_OF_MONTH 0x1FFD #define RTC_MONTH 0x1FFE #define RTC_YEAR 0x1FFF +#define RTC_CONTROLA 0x1FF8 +#define RTC_CONTROLB 0x1FF9 #ifndef BCD_TO_BIN #define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h index 273d31fd0..a18d5e324 100644 --- a/include/asm-ppc/page.h +++ b/include/asm-ppc/page.h @@ -1,11 +1,32 @@ #ifndef _PPC_PAGE_H #define _PPC_PAGE_H +#include <linux/config.h> + /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +/* This handles the memory map.. */ + +/* + * these virtual mappings for prep and pmac + * on the prep machine the io areas are at different physical locations + * than their virtual address. On the pmac the io areas + * are mapped 1-1 virtual/physical. + * -- Cort + */ +#ifdef CONFIG_PREP +#define KERNELBASE 0x90000000 +#endif +#ifdef CONFIG_PMAC +#define KERNELBASE 0xc0000000 +#endif +#define PAGE_OFFSET KERNELBASE + + +#ifndef __ASSEMBLY__ #ifdef __KERNEL__ #define STRICT_MM_TYPECHECKS @@ -50,13 +71,13 @@ typedef unsigned long pgprot_t; #endif + +/* align addr on a size boundry - adjust address up if needed -- Cort */ +#define _ALIGN(addr,size) (((addr)+size-1)&(~(size-1))) + /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) -/* This handles the memory map.. */ - -#define KERNELBASE 0x90000000 -#define PAGE_OFFSET KERNELBASE #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) @@ -67,7 +88,8 @@ typedef unsigned long pgprot_t; #define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT) #define MAP_PAGE_RESERVED (1<<15) +extern __inline__ unsigned long get_prezerod_page(void); #endif /* __KERNEL__ */ - +#endif /* __ASSEMBLY__ */ #endif /* _PPC_PAGE_H */ diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index e9c400345..fd52a64c7 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -1,22 +1,31 @@ -/* * Last edited: Nov 7 23:44 1995 (cort) */ #ifndef _PPC_PGTABLE_H #define _PPC_PGTABLE_H +#include <linux/config.h> #include <asm/page.h> #include <asm/mmu.h> -inline void flush_tlb(void); -inline void flush_tlb_all(void); -inline void flush_tlb_mm(struct mm_struct *mm); -inline void flush_tlb_page(struct vm_area_struct *vma, long vmaddr); -inline void flush_tlb_range(struct mm_struct *mm, long start, long end); -inline void flush_page_to_ram(unsigned long); -inline void really_flush_cache_all(void); +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void flush_tlb_range(struct mm_struct *mm, unsigned long start, + unsigned long end); +extern void flush_tlb(void); + +/* Caches aren't brain-dead on the ppc. */ +#define flush_cache_all() +#define flush_cache_mm(mm) +#define flush_cache_range(mm, start, end) +#define flush_cache_page(vma, vmaddr) +/* + * For the page specified, write modified lines in the data cache + * out to memory, and invalidate lines in the instruction cache. + */ +extern void flush_page_to_ram(unsigned long); -/* only called from asm in head.S, so why bother? */ -/*void MMU_init(void);*/ +extern unsigned long va_to_phys(unsigned long address); -/* PMD_SHIFT determines the size of the area a second-level page table can map */ +/* PMD_SHIFT determines the size of the area mapped by the second-level page tables */ #define PMD_SHIFT 22 #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) @@ -27,8 +36,8 @@ inline void really_flush_cache_all(void); #define PGDIR_MASK (~(PGDIR_SIZE-1)) /* - * entries per page directory level: the i386 is two-level, so - * we don't really have any PMD directory physically. + * entries per page directory level: our page-table tree is two-level, so + * we don't really have any PMD directory. */ #define PTRS_PER_PTE 1024 #define PTRS_PER_PMD 1 @@ -41,41 +50,42 @@ inline void really_flush_cache_all(void); * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ -/* this must be a decent size since the ppc bat's can map only certain sizes - but these can be different from the physical ram size configured. - bat mapping must map at least physical ram size and vmalloc start addr - must beging AFTER the area mapped by the bat. - 32 works for now, but may need to be changed with larger differences. - offset = next greatest bat mapping to ramsize - ramsize - (ie would be 0 if batmapping = ramsize) - -- Cort 10/6/96 - */ -#define VMALLOC_OFFSET (32*1024*1024) +#define VMALLOC_OFFSET (0x2000000) /* 32M */ #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) #define VMALLOC_VMADDR(x) ((unsigned long)(x)) -#define _PAGE_PRESENT 0x001 -#define _PAGE_RW 0x002 -#define _PAGE_USER 0x004 -#define _PAGE_PCD 0x010 -#define _PAGE_ACCESSED 0x020 -#define _PAGE_DIRTY 0x040 -#define _PAGE_COW 0x200 /* implemented in software (one of the AVL bits) */ -#define _PAGE_NO_CACHE 0x400 +/* + * Bits in a linux-style PTE. These match the bits in the + * (hardware-defined) PowerPC PTE as closely as possible. + */ +#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ +#define _PAGE_USER 0x002 /* matches one of the PP bits */ +#define _PAGE_RW 0x004 /* software: user write access allowed */ +#define _PAGE_GUARDED 0x008 +#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ +#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ +#define _PAGE_DIRTY 0x080 /* C: page changed */ +#define _PAGE_ACCESSED 0x100 /* R: page referenced */ +#define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */ -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_COW) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) -#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_NO_CACHE | _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ + _PAGE_HWWRITE | _PAGE_ACCESSED) +#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_NO_CACHE | _PAGE_RW | \ + _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_ACCESSED) /* - * The i386 can't do page protection for execute, and considers that the same are read. - * Also, write permissions imply read permissions. This is the closest we can get.. + * The PowerPC can only do execute protection on a segment (256MB) basis, + * not on a page basis. So we consider execute permission the same as read. + * Also, write permissions imply read permissions. + * This is the closest we can get.. */ #define __P000 PAGE_NONE #define __P001 PAGE_READONLY @@ -96,18 +106,6 @@ inline void really_flush_cache_all(void); #define __S111 PAGE_SHARED /* - * Define this if things work differently on a i386 and a i486: - * it will (on a i486) warn about kernel memory accesses that are - * done without a 'verify_area(VERIFY_WRITE,..)' - */ -#undef CONFIG_TEST_VERIFY_AREA - -#if 0 -/* page table for 0-4MB for everybody */ -extern unsigned long pg0[1024]; -#endif - -/* * BAD_PAGETABLE is used when we need a bogus page-table, while * BAD_PAGE is used for a bogus page. * @@ -119,49 +117,36 @@ extern pte_t * __bad_pagetable(void); extern unsigned long empty_zero_page[1024]; -#define BAD_PAGETABLE __bad_pagetable() -#define BAD_PAGE __bad_page() -#define ZERO_PAGE ((unsigned long) empty_zero_page) +#define BAD_PAGETABLE __bad_pagetable() +#define BAD_PAGE __bad_page() +#define ZERO_PAGE ((unsigned long) empty_zero_page) /* number of bits that fit into a memory pointer */ -#define BITS_PER_PTR (8*sizeof(unsigned long)) +#define BITS_PER_PTR (8*sizeof(unsigned long)) /* to align the pointer to a pointer address */ -#define PTR_MASK (~(sizeof(void*)-1)) +#define PTR_MASK (~(sizeof(void*)-1)) -/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ +/* sizeof(void*) == 1<<SIZEOF_PTR_LOG2 */ /* 64-bit machines, beware! SRB. */ -#define SIZEOF_PTR_LOG2 2 - -/* to find an entry in a page-table */ -#define PAGE_PTR(address) \ -((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) +#define SIZEOF_PTR_LOG2 2 /* to set the page-dir */ /* tsk is a task_struct and pgdir is a pte_t */ -#define SET_PAGE_DIR(tsk,pgdir) \ -do { \ - (tsk)->tss.pg_tables = (unsigned long *)(pgdir); \ - if ((tsk) == current) \ - { \ -/*_printk("Change page tables = %x\n", pgdir);*/ \ - } \ -} while (0) - -/* comes from include/linux/mm.h now -- Cort */ -/*extern void *high_memory;*/ +#define SET_PAGE_DIR(tsk,pgdir) ({ \ + ((tsk)->tss.pg_tables = (unsigned long *)(pgdir)); \ +}) extern inline int pte_none(pte_t pte) { return !pte_val(pte); } extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; } extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } -extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~PAGE_MASK) != _PAGE_TABLE; } -extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_PRESENT; } -extern inline int pmd_inuse(pmd_t *pmdp) { return 0; } +extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~PAGE_MASK) != 0; } +extern inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) & PAGE_MASK) != 0; } extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; } -extern inline void pmd_reuse(pmd_t * pmdp) { } + /* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded @@ -172,7 +157,6 @@ extern inline int pgd_bad(pgd_t pgd) { return 0; } extern inline int pgd_present(pgd_t pgd) { return 1; } extern inline void pgd_clear(pgd_t * pgdp) { } - /* * The following only work if pte_present() is true. * Undefined behaviour if not.. @@ -182,48 +166,82 @@ extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -extern inline int pte_cow(pte_t pte) { return pte_val(pte) & _PAGE_COW; } - -extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_RW; return pte; } -extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; } -extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; } -extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } -extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } -extern inline pte_t pte_uncow(pte_t pte) { pte_val(pte) &= ~_PAGE_COW; return pte; } -extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } -extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; } -extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; } -extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } -extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } -extern inline pte_t pte_mkcow(pte_t pte) { pte_val(pte) |= _PAGE_COW; return pte; } -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ +extern inline int pte_uncache(pte_t pte) { return pte_val(pte) |= _PAGE_NO_CACHE; } +extern inline int pte_cache(pte_t pte) { return pte_val(pte) &= ~_PAGE_NO_CACHE; } + +extern inline pte_t pte_rdprotect(pte_t pte) { + pte_val(pte) &= ~_PAGE_USER; return pte; } +extern inline pte_t pte_exprotect(pte_t pte) { + pte_val(pte) &= ~_PAGE_USER; return pte; } +extern inline pte_t pte_wrprotect(pte_t pte) { + pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } +extern inline pte_t pte_mkclean(pte_t pte) { + pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } +extern inline pte_t pte_mkold(pte_t pte) { + pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } + +extern inline pte_t pte_mkread(pte_t pte) { + pte_val(pte) |= _PAGE_USER; return pte; } +extern inline pte_t pte_mkexec(pte_t pte) { + pte_val(pte) |= _PAGE_USER; return pte; } +extern inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_RW; + if (pte_val(pte) & _PAGE_DIRTY) + pte_val(pte) |= _PAGE_HWWRITE; + return pte; +} +extern inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_DIRTY; + if (pte_val(pte) & _PAGE_RW) + pte_val(pte) |= _PAGE_HWWRITE; + return pte; +} +extern inline pte_t pte_mkyoung(pte_t pte) { + pte_val(pte) |= _PAGE_ACCESSED; return pte; } /* Certain architectures need to do special things when pte's * within a page table are directly modified. Thus, the following * hook is made available. */ -#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +#if 1 +#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +#else +extern inline void set_pte(pte_t *pteptr, pte_t pteval) +{ + unsigned long val = pte_val(pteval); + extern void xmon(void *); + + if ((val & _PAGE_PRESENT) && ((val < 0x111000 || (val & 0x800) + || ((val & _PAGE_HWWRITE) && (~val & (_PAGE_RW|_PAGE_DIRTY)))) { + printk("bad pte val %lx ptr=%p\n", val, pteptr); + xmon(0); + } + *pteptr = pteval; +} +#endif -static pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot) +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ + +static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot) { pte_t pte; pte_val(pte) = (page) | pgprot_val(pgprot); return pte; } -/*#define mk_pte_phys(physpage, pgprot) \ -({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })*/ extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot) -{ pte_t pte; pte_val(pte) = page | pgprot_val(pgprot); return pte; } +{ pte_t pte; pte_val(pte) = __pa(page) | pgprot_val(pgprot); return pte; } extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } extern inline unsigned long pte_page(pte_t pte) -{ return pte_val(pte) & PAGE_MASK; } +{ return (pte_val(pte) & PAGE_MASK) + KERNELBASE; } extern inline unsigned long pmd_page(pmd_t pmd) -{ return pmd_val(pmd) & PAGE_MASK; } +{ return pmd_val(pmd); } /* to find an entry in a kernel page-table-directory */ @@ -250,13 +268,14 @@ extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address) /* * Allocate and free page tables. The xxx_kernel() versions are - * used to allocate a kernel page table - this turns on ASN bits - * if any, and marks the page tables reserved. + * used to allocate a kernel page table, but are actually identical + * to the xxx() versions. */ extern inline void pte_free_kernel(pte_t * pte) { free_page((unsigned long) pte); } + extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address) { address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); @@ -264,20 +283,17 @@ extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address) pte_t * page = (pte_t *) get_free_page(GFP_KERNEL); if (pmd_none(*pmd)) { if (page) { -/* pmd_set(pmd,page);*/ - pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page; + pmd_val(*pmd) = (unsigned long) page; return page + address; } -/* pmd_set(pmd, BAD_PAGETABLE);*/ - pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE; + pmd_val(*pmd) = (unsigned long) BAD_PAGETABLE; return NULL; } free_page((unsigned long) page); } if (pmd_bad(*pmd)) { printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd)); -/* pmd_set(pmd, (pte_t *) BAD_PAGETABLE); */ - pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE; + pmd_val(*pmd) = (unsigned long) BAD_PAGETABLE; return NULL; } return (pte_t *) pmd_page(*pmd) + address; @@ -308,17 +324,17 @@ extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address) pte_t * page = (pte_t *) get_free_page(GFP_KERNEL); if (pmd_none(*pmd)) { if (page) { - pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page; + pmd_val(*pmd) = (unsigned long) page; return page + address; } - pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE; + pmd_val(*pmd) = (unsigned long) BAD_PAGETABLE; return NULL; } free_page((unsigned long) page); } if (pmd_bad(*pmd)) { printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd)); - pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE; + pmd_val(*pmd) = (unsigned long) BAD_PAGETABLE; return NULL; } return (pte_t *) pmd_page(*pmd) + address; @@ -350,18 +366,17 @@ extern inline pgd_t * pgd_alloc(void) extern pgd_t swapper_pg_dir[1024]; /* - * Software maintained MMU tables may have changed -- update the - * hardware [aka cache] + * Page tables may have changed. We don't need to do anything here + * as entries are faulted into the hash table by the low-level + * data/instruction access exception handlers. */ -extern inline void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t _pte); +#define update_mmu_cache(vma,address,pte) while(0){} #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) #define SWP_OFFSET(entry) ((entry) >> 8) #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8)) -#define module_map vmalloc -#define module_unmap vfree + #endif /* _PPC_PAGE_H */ diff --git a/include/asm-ppc/ppc_machine.h b/include/asm-ppc/ppc_machine.h index 1cf9a7930..e69de29bb 100644 --- a/include/asm-ppc/ppc_machine.h +++ b/include/asm-ppc/ppc_machine.h @@ -1,56 +0,0 @@ -/* - * PowerPC machine specifics - */ - -#ifndef _PPC_MACHINE_H_ -#define _PPC_MACHINE_H_ - -#define KERNEL_STACK_SIZE (4096) /* usable stack -- not buffers at either end */ -#define KERNEL_STACK_MASK (~(KERNEL_STACK_SIZE-1)) - -/* Bit encodings for Machine State Register (MSR) */ -#define MSR_POW (1<<18) /* Enable Power Management */ -#define MSR_TGPR (1<<17) /* TLB Update registers in use */ -#define MSR_ILE (1<<16) /* Interrupt Little-Endian enable */ -#define MSR_EE (1<<15) /* External Interrupt enable */ -#define MSR_PR (1<<14) /* Supervisor/User privilege */ -#define MSR_FP (1<<13) /* Floating Point enable */ -#define MSR_ME (1<<12) /* Machine Check enable */ -#define MSR_FE0 (1<<11) /* Floating Exception mode 0 */ -#define MSR_SE (1<<10) /* Single Step */ -#define MSR_BE (1<<9) /* Branch Trace */ -#define MSR_FE1 (1<<8) /* Floating Exception mode 1 */ -#define MSR_IP (1<<6) /* Exception prefix 0x000/0xFFF */ -#define MSR_IR (1<<5) /* Instruction MMU enable */ -#define MSR_DR (1<<4) /* Data MMU enable */ -#define MSR_RI (1<<1) /* Recoverable Exception */ -#define MSR_LE (1<<0) /* Little-Endian enable */ - -#define MSR_ MSR_FE0|MSR_FE1|MSR_ME|MSR_FP -#define MSR_USER MSR_FE0|MSR_FE1|MSR_ME|MSR_PR|MSR_EE|MSR_IR|MSR_DR - -/* Bit encodings for Hardware Implementation Register (HID0) */ -#define HID0_EMCP (1<<31) /* Enable Machine Check pin */ -#define HID0_EBA (1<<29) /* Enable Bus Address Parity */ -#define HID0_EBD (1<<28) /* Enable Bus Data Parity */ -#define HID0_SBCLK (1<<27) -#define HID0_EICE (1<<26) -#define HID0_ECLK (1<<25) -#define HID0_PAR (1<<24) -#define HID0_DOZE (1<<23) -#define HID0_NAP (1<<22) -#define HID0_SLEEP (1<<21) -#define HID0_DPM (1<<20) -#define HID0_ICE (1<<15) /* Instruction Cache Enable */ -#define HID0_DCE (1<<14) /* Data Cache Enable */ -#define HID0_ILOCK (1<<13) /* Instruction Cache Lock */ -#define HID0_DLOCK (1<<12) /* Data Cache Lock */ -#define HID0_ICFI (1<<11) /* Instruction Cache Flash Invalidate */ -#define HID0_DCI (1<<10) /* Data Cache Invalidate */ -#define HID0_SIED (1<<7) /* Serial Instruction Execution [Disable] */ -#define HID0_BHTE (1<<2) /* Branch History Table Enable */ - -/* fpscr settings */ -#define FPSCR_FX (1<<31) -#define FPSCR_FEX (1<<30) -#endif diff --git a/include/asm-ppc/processor.h b/include/asm-ppc/processor.h index e3c715c0d..1e6baf0a6 100644 --- a/include/asm-ppc/processor.h +++ b/include/asm-ppc/processor.h @@ -1,12 +1,8 @@ #ifndef __ASM_PPC_PROCESSOR_H #define __ASM_PPC_PROCESSOR_H -/* - * PowerPC machine specifics - */ +#include <linux/config.h> -#define KERNEL_STACK_SIZE (4096) /* usable stack -- not buffers at either end */ -#define KERNEL_STACK_MASK (~(KERNEL_STACK_SIZE-1)) /* Bit encodings for Machine State Register (MSR) */ #define MSR_POW (1<<18) /* Enable Power Management */ @@ -26,7 +22,8 @@ #define MSR_RI (1<<1) /* Recoverable Exception */ #define MSR_LE (1<<0) /* Little-Endian enable */ -#define MSR_ MSR_FE0|MSR_FE1|MSR_ME|MSR_FP +#define MSR_ MSR_FE0|MSR_FE1|MSR_ME +#define MSR_KERNEL MSR_|MSR_IR|MSR_DR #define MSR_USER MSR_FE0|MSR_FE1|MSR_ME|MSR_PR|MSR_EE|MSR_IR|MSR_DR /* Bit encodings for Hardware Implementation Register (HID0) */ @@ -49,20 +46,16 @@ #define HID0_DCI (1<<10) /* Data Cache Invalidate */ #define HID0_SIED (1<<7) /* Serial Instruction Execution [Disable] */ #define HID0_BHTE (1<<2) /* Branch History Table Enable */ - /* fpscr settings */ #define FPSCR_FX (1<<31) #define FPSCR_FEX (1<<30) - - #ifndef __ASSEMBLY__ /* * PowerPC machine specifics */ extern inline void start_thread(struct pt_regs *, unsigned long, unsigned long ); - /* * Bus types */ @@ -77,82 +70,86 @@ extern inline void start_thread(struct pt_regs *, unsigned long, unsigned long ) #define wp_works_ok 1 #define wp_works_ok__is_a_macro /* for versions in ksyms.c */ -/* - * User space process size: 2GB. This is hardcoded into a few places, - * so don't change it unless you know what you are doing. - * - * "this is gonna have to change to 1gig for the sparc" - David S. Miller - */ #define TASK_SIZE (0x80000000UL) - /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (TASK_SIZE / 3) - struct thread_struct { - unsigned long ksp; /* Kernel stack pointer */ - unsigned long *pg_tables; /* MMU information */ - unsigned long segs[16]; /* MMU Segment registers */ - unsigned long last_pc; /* PC when last entered system */ - unsigned long user_stack; /* [User] Stack when entered kernel */ - double fpr[32]; /* Complete floating point set */ - unsigned long wchan; /* Event task is sleeping on */ - unsigned long *regs; /* Pointer to saved register state */ - unsigned long fp_used; /* number of quantums fp was used */ - unsigned long fs; /* for get_fs() validation */ - unsigned long expc; /* exception handler addr (see fault.c) */ - unsigned long excount; /* exception handler count */ + unsigned long ksp; /* Kernel stack pointer */ + unsigned long *pg_tables; /* MMU information */ +#ifdef CONFIG_PMAC + unsigned long last_pc; /* PC when last entered system */ + unsigned long user_stack; /* [User] Stack when entered kernel */ +#endif + unsigned long fpscr_pad; /* (so we can save fpscr with stfd) */ + unsigned long fpscr; /* fp status reg */ + double fpr[32]; /* Complete floating point set */ + unsigned long fp_used; + unsigned long wchan; /* Event task is sleeping on */ + struct pt_regs *regs; /* Pointer to saved register state */ + unsigned long fs; /* for get_fs() validation */ + signed long last_syscall; + unsigned long pad[2]; /* pad to 16-byte boundry */ }; +/* Points to the thread_struct of the thread (if any) which + currently owns the FPU. */ +#define fpu_tss (&(last_task_used_math->tss)) + +#ifdef CONFIG_PMAC +#define LAZY_TSS_FPR_INIT 0,0,0,0,{0}, +#endif +#ifdef CONFIG_PREP +#define LAZY_TSS_FPR_INIT 0,0,{0}, +#endif #define INIT_TSS { \ - sizeof(init_kernel_stack) + (long) &init_kernel_stack,\ - (long *)swapper_pg_dir, {0}, \ - 0, 0, {0}, \ - 0, 0, 0, \ - KERNEL_DS, 0, 0 \ + sizeof(init_stack) + (long) &init_stack, /* ksp */ \ + (long *)swapper_pg_dir, /* pg_tables */ \ + LAZY_TSS_FPR_INIT \ + 0, /*fp_used*/ 0, /*wchan*/ \ + sizeof(init_stack) + (long)&init_stack - \ + sizeof(struct pt_regs), /* regs */ \ + KERNEL_DS /*fs*/, 0 /*last_syscall*/ \ } -#define INIT_MMAP { &init_mm, 0, 0x40000000, \ - PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, NULL, &init_mm.mmap } - -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); +#define INIT_MMAP { &init_mm, KERNELBASE/*0*/, 0xffffffff/*0x40000000*/, \ + PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC } /* * Return saved PC of a blocked thread. For now, this is the "user" PC */ static inline unsigned long thread_saved_pc(struct thread_struct *t) { - return (t->last_pc); + return (t->regs) ? t->regs->nip : 0; + /*return (t->last_pc);*/ } -#define _PROC_Motorola 0 -#define _PROC_IBM 1 -#define _PROC_Be 2 - -int _Processor; +extern int _machine; +#define _MACH_Motorola 0 +#define _MACH_IBM 1 +#define _MACH_Be 2 +#define _MACH_Pmac 3 -/* Allocation and freeing of basic task resources. */ -#define alloc_task_struct() kmalloc(sizeof(struct task_struct), GFP_KERNEL) -#define free_task_struct(p) kfree(p) - -#ifdef KERNEL_STACK_BUFFER -/* give a 1 page buffer below the stack - if change then change ppc_machine.h */ -#define alloc_kernel_stack() \ - (memset((void *)__get_free_pages(GFP_KERNEL,1,0),0,KERNEL_STACK_SIZE+PAGE_SIZE)+PAGE_SIZE) -#define free_kernel_stack(page) free_pages((page)-PAGE_SIZE,1) -#else -#define alloc_kernel_stack() get_free_page(GFP_KERNEL) -#define free_kernel_stack(page) free_page((page)) -#endif +/* + * NOTE! The task struct and the stack go together + */ +#define alloc_task_struct() \ + ((struct task_struct *) __get_free_pages(GFP_KERNEL,1,0)) +#define free_task_struct(p) free_pages((unsigned long)(p),1) -#endif /* ASSEMBLY*/ +/* in process.c - for early bootup debug -- Cort */ +int ll_printk(const char *, ...); +void ll_puts(const char *); -/* +#endif /* ndef ASSEMBLY*/ * Return_address is a replacement for __builtin_return_address(count) +#define init_task (init_task_union.task) +#define init_stack (init_task_union.stack) + +#endif /* __ASM_PPC_PROCESSOR_H */ * which on certain architectures cannot reasonably be implemented in GCC * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386). * Note that __builtin_return_address(x>=1) is forbidden because the GCC diff --git a/include/asm-ppc/ptrace.h b/include/asm-ppc/ptrace.h index 5bad3b9da..13b526172 100644 --- a/include/asm-ppc/ptrace.h +++ b/include/asm-ppc/ptrace.h @@ -2,54 +2,61 @@ #define _PPC_PTRACE_H /* - * This struct defines the way the registers are stored on the - * kernel stack during a system call or other kernel entry. - * Note: the "_overhead" and "_underhead" spaces are stack locations - * used by called routines. Because of the way the PowerPC ABI - * specifies the function prologue/epilogue, registers can be - * saved in stack locations which are below the current stack - * pointer (_underhead). If an interrupt occurs during this - * [albeit] small time interval, registers which were saved on - * the stack could be trashed by the interrupt save code. The - * "_underhead" leaves a hole just in case this happens. It also - * wastes 80 bytes of stack if it doesn't! Similarly, the called - * routine stores some information "above" the stack pointer before - * if gets adjusted. This is covered by the "_overhead" field - * and [thankfully] is not totally wasted. + * this should only contain volatile regs + * since we can keep non-volatile in the tss + * should set this up when only volatiles are saved + * by intr code. * + * I can't find any reference to the above comment (from Gary Thomas) + * about _underhead/_overhead in the sys V abi for the ppc + * dated july 25, 1994. + * + * the stack must be kept to a size that is a multiple of 16 + * so this includes the stack frame overhead + * -- Cort. + */ + +/* + * GCC sometimes accesses words at negative offsets from the stack + * pointer, although the SysV ABI says it shouldn't. To cope with + * this, we leave this much untouched space on the stack on exception + * entry. */ +#define STACK_FRAME_OVERHEAD 16 +#define STACK_UNDERHEAD 64 +#ifndef __ASSEMBLY__ struct pt_regs { - unsigned long _overhead[14]; /* Callee's SP,LR,params */ - unsigned long gpr[32]; - unsigned long nip; - unsigned long msr; - unsigned long ctr; - unsigned long link; - unsigned long ccr; - unsigned long xer; - unsigned long dar; /* Fault registers */ - unsigned long dsisr; - unsigned long srr1; - unsigned long srr0; - unsigned long hash1, hash2; - unsigned long imiss, dmiss; - unsigned long icmp, dcmp; - unsigned long orig_gpr3; /* Used for restarting system calls */ - unsigned long result; /* Result of a system call */ - double fpcsr; - unsigned long trap; /* Reason for being here */ - unsigned long marker; /* Should have DEADDEAD */ - /*unsigned long _underhead[20]; *//* Callee's register save area */ - unsigned long edx; /* for binfmt_elf.c which wants edx */ + unsigned long gpr[32]; + unsigned long nip; + unsigned long msr; + unsigned long ctr; + unsigned long link; + unsigned long ccr; + unsigned long xer; + unsigned long dar; /* Fault registers */ + unsigned long dsisr; +#if 0 + unsigned long srr1; + unsigned long srr0; + unsigned long hash1, hash2; + unsigned long imiss, dmiss; + unsigned long icmp, dcmp; +#endif + unsigned long orig_gpr3; /* Used for restarting system calls */ + unsigned long result; /* Result of a system call */ + unsigned long trap; /* Reason for being here */ + unsigned long marker; /* Should have DEADDEAD */ }; + #define instruction_pointer(regs) ((regs)->nip) #define user_mode(regs) ((regs)->msr & 0x4000) #ifdef KERNEL extern void show_regs(struct pt_regs *); #endif +/* should include and generate these in ppc_defs.h -- Cort */ /* Offsets used by 'ptrace' system call interface */ /* Note: these should correspond to gpr[x] */ #define PT_R0 0 @@ -94,6 +101,7 @@ extern void show_regs(struct pt_regs *); #define PT_CCR 38 #define PT_FPR0 48 +#endif /* __ASSEMBLY__ */ -#endif +#endif /* _PPC_PTRACE_H */ diff --git a/include/asm-ppc/semaphore.h b/include/asm-ppc/semaphore.h index 4d1f91372..a4dfa0312 100644 --- a/include/asm-ppc/semaphore.h +++ b/include/asm-ppc/semaphore.h @@ -1,11 +1,18 @@ #ifndef _PPC_SEMAPHORE_H #define _PPC_SEMAPHORE_H +/* + * SMP- and interrupt-safe semaphores.. + * + * (C) Copyright 1996 Linus Torvalds + * Adapted for PowerPC by Gary Thomas and Paul Mackerras + */ + #include <asm/atomic.h> struct semaphore { atomic_t count; - atomic_t waiting; + atomic_t waking; struct wait_queue * wait; }; @@ -13,44 +20,61 @@ struct semaphore { #define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), ATOMIC_INIT(0), NULL }) extern void __down(struct semaphore * sem); +extern int __down_interruptible(struct semaphore * sem); extern void __up(struct semaphore * sem); -extern void atomic_add(int c, int *v); -extern void atomic_sub(int c, int *v); +#define sema_init(sem, val) atomic_set(&((sem)->count), (val)) + +/* + * These two _must_ execute atomically wrt each other. + * + * This is trivially done with load_locked/store_cond, + * i.e. load with reservation and store conditional on the ppc. + */ -#define sema_init(sem, val) atomic_set(&((sem)->count), val) +static inline void wake_one_more(struct semaphore * sem) +{ + atomic_inc(&sem->waking); +} static inline int waking_non_zero(struct semaphore *sem) { - unsigned long flags; - int ret = 0; + int ret, tmp; + + __asm__ __volatile__( + "1: lwarx %1,0,%2\n" + " cmpwi 0,%1,0\n" + " addi %1,%1,-1\n" + " ble- 2f\n" + " stwcx. %1,0,%2\n" + " bne- 1b\n" + " mr %0,%1\n" + "2:" + : "=r" (ret), "=r" (tmp) + : "r" (&sem->waking), "0" (0) + : "cr0", "memory"); - save_flags(flags); - cli(); - if (atomic_read(&sem->waking) > 0) { - atomic_dec(&sem->waking); - ret = 1; - } - restore_flags(flags); return ret; } extern inline void down(struct semaphore * sem) { - for (;;) - { - atomic_dec_return(&sem->count); - if ( sem->count >= 0) - break; - __down(sem); - } + if (atomic_dec_return(&sem->count) < 0) + __down(sem); +} + +extern inline int down_interruptible(struct semaphore * sem) +{ + int ret = 0; + if (atomic_dec_return(&sem->count) < 0) + ret = __down_interruptible(sem); + return ret; } extern inline void up(struct semaphore * sem) { - atomic_inc_return(&sem->count); - if ( sem->count <= 0) - __up(sem); + if (atomic_inc_return(&sem->count) <= 0) + __up(sem); } #endif /* !(_PPC_SEMAPHORE_H) */ diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h index 00a1e7c9a..7f54dd779 100644 --- a/include/asm-ppc/smp.h +++ b/include/asm-ppc/smp.h @@ -19,7 +19,7 @@ struct cpuinfo_PPC { }; extern struct cpuinfo_PPC cpu_data[NR_CPUS]; - +#endif /* __ASSEMBLY__ */ #endif /* !(__SMP__) */ diff --git a/include/asm-ppc/socket.h b/include/asm-ppc/socket.h index d09474696..632717509 100644 --- a/include/asm-ppc/socket.h +++ b/include/asm-ppc/socket.h @@ -26,15 +26,12 @@ #define SO_PRIORITY 12 #define SO_LINGER 13 #define SO_BSDCOMPAT 14 -/* To add :#define SO_REUSEPORT 14 */ +/* To add :#define SO_REUSEPORT 15 */ #define SO_RCVLOWAT 16 #define SO_SNDLOWAT 17 #define SO_RCVTIMEO 18 #define SO_SNDTIMEO 19 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 20 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 21 -#define SO_SECURITY_ENCRYPTION_NETWORK 22 +#define SO_PASSCRED 20 +#define SO_PEERCRED 21 #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-ppc/string.h b/include/asm-ppc/string.h index 66243582e..207ab3689 100644 --- a/include/asm-ppc/string.h +++ b/include/asm-ppc/string.h @@ -1,26 +1,27 @@ #ifndef _PPC_STRING_H_ #define _PPC_STRING_H_ - - -/* - * keep things happy, the compile became unhappy since memset is - * in include/linux/string.h and lib/string.c with different prototypes - * -- Cort - */ -#if 1 -#define __HAVE_ARCH_MEMSET -extern __inline__ void * memset(void * s,int c,__kernel_size_t count) +#define __HAVE_ARCH_STRCPY +#define __HAVE_ARCH_STRNCPY +#define __HAVE_ARCH_STRLEN +#define __HAVE_ARCH_STRCMP +#define __HAVE_ARCH_STRCAT +#define __HAVE_ARCH_MEMSET +#define __HAVE_ARCH_BCOPY +#define __HAVE_ARCH_MEMCPY +#define __HAVE_ARCH_MEMMOVE +#define __HAVE_ARCH_MEMCMP +#define __HAVE_ARCH_MEMCHR +/*#define bzero(addr,size) memset((addr),(int)(0),(size))*/ +extern inline void * memchr(const void * cs,int c,size_t count) { - char *xs = (char *) s; - - while (count--) - *xs++ = c; - - return s; + unsigned long i = 0; + while ( count != i ) + { + if ( (char)c == *(char *)(cs + i) ) + return (void *)(cs + i); + i--; + } + return NULL; } #endif -#define bzero(addr,size) memset((addr),(int)(0),(size)) - - -#endif diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index a1a3f12a0..df527e474 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h @@ -1,27 +1,66 @@ #ifndef __PPC_SYSTEM_H #define __PPC_SYSTEM_H -#if 0 -#define mb() \ -__asm__ __volatile__("mb": : :"memory") -#endif -#define mb() __asm__ __volatile__ ("" : : :"memory") +#include <linux/delay.h> + +#define mb() __asm__ __volatile__ ("sync" : : : "memory") + +#define __save_flags(flags) ({\ + __asm__ __volatile__ ("mfmsr %0" : "=r" ((flags)) : : "memory"); }) +/* using Paul's in misc.S now -- Cort */ +extern void __restore_flags(unsigned long flags); +/* + #define __sti() _soft_sti(void) + #define __cli() _soft_cli(void) + */ +extern void __sti(void); +extern void __cli(void); -extern void __save_flags(long *flags); -extern void __restore_flags(long flags); -extern void sti(void); -extern void cli(void); +extern void _hard_sti(void); +extern void _hard_cli(void); +extern void _soft_sti(void); +extern void _soft_cli(void); extern int _disable_interrupts(void); extern void _enable_interrupts(int); -/*extern void memcpy(void *, void *, int);*/ +extern void flush_instruction_cache(void); +extern void hard_reset_now(void); +extern void poweroff_now(void); +extern void find_scsi_boot(void); +extern int sd_find_target(void *, int); +extern int _get_PVR(void); +extern void via_cuda_init(void); +extern void read_rtc_time(void); +extern void pmac_find_display(void); +extern void giveup_fpu(void); +extern void store_cache_range(unsigned long, unsigned long); +extern void cvt_fd(float *from, double *to); +extern void cvt_df(double *from, float *to); + +struct device_node; +extern void note_scsi_host(struct device_node *, void *); struct task_struct; extern void switch_to(struct task_struct *prev, struct task_struct *next); -#define save_flags(flags) __save_flags(&(flags)) -#define restore_flags(flags) __restore_flags(flags) +struct thread_struct; +extern void _switch(struct thread_struct *prev, struct thread_struct *next, + unsigned long context); + +struct pt_regs; +extern int do_signal(unsigned long oldmask, struct pt_regs *regs); +extern void dump_regs(struct pt_regs *); + +#ifndef __SMP__ +#define cli() __cli() +#define sti() __sti() +#define save_flags(flags) __save_flags(flags) +#define restore_flags(flags) __restore_flags(flags) + +#else +#error need global cli/sti etc. defined for SMP +#endif #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) diff --git a/include/asm-ppc/termbits.h b/include/asm-ppc/termbits.h index 6c19c1fef..65cab0612 100644 --- a/include/asm-ppc/termbits.h +++ b/include/asm-ppc/termbits.h @@ -7,7 +7,6 @@ typedef unsigned char cc_t; typedef unsigned int speed_t; typedef unsigned int tcflag_t; -#if 0 /* This is how it's done on Alpha - maybe later. */ /* * termios type and macro definitions. Be careful about adding stuff * to this file since it's used in GNU libc and there are strict rules @@ -27,23 +26,24 @@ struct termios { }; /* c_cc characters */ -#define VEOF 0 -#define VEOL 1 -#define VEOL2 2 -#define VERASE 3 -#define VWERASE 4 -#define VKILL 5 -#define VREPRINT 6 -#define VSWTC 7 -#define VINTR 8 -#define VQUIT 9 -#define VSUSP 10 -#define VSTART 12 -#define VSTOP 13 -#define VLNEXT 14 -#define VDISCARD 15 -#define VMIN 16 -#define VTIME 17 +#define VINTR 0 +#define VQUIT 1 +#define VERASE 2 +#define VKILL 3 +#define VEOF 4 +#define VMIN 5 +#define VEOL 6 +#define VTIME 7 +#define VEOL2 8 +#define VSWTC 9 + +#define VWERASE 10 +#define VREPRINT 11 +#define VSUSP 12 +#define VSTART 13 +#define VSTOP 14 +#define VLNEXT 15 +#define VDISCARD 16 /* c_iflag bits */ #define IGNBRK 0000001 @@ -57,7 +57,7 @@ struct termios { #define ICRNL 0000400 #define IXON 0001000 #define IXOFF 0002000 -#if !defined(KERNEL) || defined(__USE_BSD) +#if defined(__KERNEL__) || defined(__USE_BSD) /* POSIX.1 doesn't want these... */ # define IXANY 0004000 # define IUCLC 0010000 @@ -102,7 +102,7 @@ struct termios { #define XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */ /* c_cflag bit meaning */ -#define CBAUD 0000017 +#define CBAUD 0000377 #define B0 0000000 /* hang up */ #define B50 0000001 #define B75 0000002 @@ -173,6 +173,5 @@ struct termios { #define TCSANOW 0 #define TCSADRAIN 1 #define TCSAFLUSH 2 -#endif #endif /* _PPC_TERMBITS_H */ diff --git a/include/asm-ppc/termios.h b/include/asm-ppc/termios.h index 26d88027f..b5e8c78df 100644 --- a/include/asm-ppc/termios.h +++ b/include/asm-ppc/termios.h @@ -137,18 +137,6 @@ struct termio { unsigned char c_cc[NCC]; /* control characters */ }; -#define NCCS 19 -struct termios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_cc[NCCS]; /* control characters */ - cc_t c_line; /* line discipline (== c_cc[19]) */ - int c_ispeed; /* input speed */ - int c_ospeed; /* output speed */ -}; - /* c_cc characters */ #define _VINTR 0 #define _VQUIT 1 @@ -161,150 +149,11 @@ struct termios { #define _VEOL2 8 #define _VSWTC 9 -#define VINTR 0 -#define VQUIT 1 -#define VERASE 2 -#define VKILL 3 -#define VEOF 4 -#define VMIN 5 -#define VEOL 6 -#define VTIME 7 -#define VEOL2 8 -#define VSWTC 9 - -#define VWERASE 10 -#define VREPRINT 11 -#define VSUSP 12 -#define VSTART 13 -#define VSTOP 14 -#define VLNEXT 15 -#define VDISCARD 16 - - #ifdef __KERNEL__ -/* eof=^D eol=\0 eol2=\0 erase=del - werase=^W kill=^U reprint=^R sxtc=\0 - intr=^C quit=^\ susp=^Z <OSF/1 VDSUSP> - start=^Q stop=^S lnext=^V discard=^U - vmin=\1 vtime=\0 -#define INIT_C_CC "\004\000\000\177\027\025\022\000\003\034\032\000\021\023\026\025\001\000" -*/ - /* ^C ^\ del ^U ^D 1 0 0 0 0 ^W ^R ^Z ^Q ^S ^V ^U */ #define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025" #endif -/* c_iflag bits */ -#define IGNBRK 0000001 -#define BRKINT 0000002 -#define IGNPAR 0000004 -#define PARMRK 0000010 -#define INPCK 0000020 -#define ISTRIP 0000040 -#define INLCR 0000100 -#define IGNCR 0000200 -#define ICRNL 0000400 -#define IXON 0001000 -#define IXOFF 0002000 -#define IXANY 0004000 -#define IUCLC 0010000 -#define IMAXBEL 0020000 - -/* c_oflag bits */ -#define OPOST 0000001 -#define ONLCR 0000002 -#define OLCUC 0000004 - -#define OCRNL 0000010 -#define ONOCR 0000020 -#define ONLRET 0000040 - -#define OFILL 00000100 -#define OFDEL 00000200 -#define NLDLY 00001400 -#define NL0 00000000 -#define NL1 00000400 -#define NL2 00001000 -#define NL3 00001400 -#define TABDLY 00006000 -#define TAB0 00000000 -#define TAB1 00002000 -#define TAB2 00004000 -#define TAB3 00006000 -#define CRDLY 00030000 -#define CR0 00000000 -#define CR1 00010000 -#define CR2 00020000 -#define CR3 00030000 -#define FFDLY 00040000 -#define FF0 00000000 -#define FF1 00040000 -#define BSDLY 00100000 -#define BS0 00000000 -#define BS1 00100000 -#define VTDLY 00200000 -#define VT0 00000000 -#define VT1 00200000 -#define XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */ - -/* c_cflag bit meaning */ -#define CBAUD 0000377 -#define B0 0000000 /* hang up */ -#define B50 0000001 -#define B75 0000002 -#define B110 0000003 -#define B134 0000004 -#define B150 0000005 -#define B200 0000006 -#define B300 0000007 -#define B600 0000010 -#define B1200 0000011 -#define B1800 0000012 -#define B2400 0000013 -#define B4800 0000014 -#define B9600 0000015 -#define B19200 0000016 -#define B38400 0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CBAUDEX 0000020 -#define B57600 00020 -#define B115200 00021 -#define B230400 00022 -#define B460800 00023 - -#define CSIZE 00001400 -#define CS5 00000000 -#define CS6 00000400 -#define CS7 00001000 -#define CS8 00001400 - -#define CSTOPB 00002000 -#define CREAD 00004000 -#define PARENB 00010000 -#define PARODD 00020000 -#define HUPCL 00040000 - -#define CLOCAL 00100000 -#define CRTSCTS 020000000000 /* flow control */ - -/* c_lflag bits */ -#define ISIG 0x00000080 -#define ICANON 0x00000100 -#define XCASE 0x00004000 -#define ECHO 0x00000008 -#define ECHOE 0x00000002 -#define ECHOK 0x00000004 -#define ECHONL 0x00000010 -#define NOFLSH 0x80000000 -#define TOSTOP 0x00400000 -#define ECHOCTL 0x00000040 -#define ECHOPRT 0x00000020 -#define ECHOKE 0x00000001 -#define FLUSHO 0x00800000 -#define PENDIN 0x20000000 -#define IEXTEN 0x00000400 - /* modem lines */ #define TIOCM_LE 0x001 #define TIOCM_DTR 0x002 @@ -321,23 +170,6 @@ struct termios { /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ - -/* tcflow() and TCXONC use these */ -#define TCOOFF 0 -#define TCOON 1 -#define TCIOFF 2 -#define TCION 3 - -/* tcflush() and TCFLSH use these */ -#define TCIFLUSH 0 -#define TCOFLUSH 1 -#define TCIOFLUSH 2 - -/* tcsetattr uses these */ -#define TCSANOW 0 -#define TCSADRAIN 1 -#define TCSAFLUSH 2 - /* line disciplines */ #define N_TTY 0 #define N_SLIP 1 @@ -349,55 +181,33 @@ struct termios { /* * Translate a "termio" structure into a "termios". Ugh. */ +#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ + unsigned short __tmp; \ + get_user(__tmp,&(termio)->x); \ + (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \ +} + #define user_termio_to_kernel_termios(termios, termio) \ -do { \ - unsigned short tmp; \ - get_user(tmp, &(termio)->c_iflag); \ - (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \ - get_user(tmp, &(termio)->c_oflag); \ - (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \ - get_user(tmp, &(termio)->c_cflag); \ - (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \ - get_user(tmp, &(termio)->c_lflag); \ - (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \ - get_user((termios)->c_line, &(termio)->c_line); \ - get_user((termios)->c_cc[VINTR], &(termio)->c_cc[_VINTR]); \ - get_user((termios)->c_cc[VQUIT], &(termio)->c_cc[_VQUIT]); \ - get_user((termios)->c_cc[VERASE], &(termio)->c_cc[_VERASE]); \ - get_user((termios)->c_cc[VKILL], &(termio)->c_cc[_VKILL]); \ - get_user((termios)->c_cc[VEOF], &(termio)->c_cc[_VEOF]); \ - get_user((termios)->c_cc[VMIN], &(termio)->c_cc[_VMIN]); \ - get_user((termios)->c_cc[VEOL], &(termio)->c_cc[_VEOL]); \ - get_user((termios)->c_cc[VTIME], &(termio)->c_cc[_VTIME]); \ - get_user((termios)->c_cc[VEOL2], &(termio)->c_cc[_VEOL2]); \ - get_user((termios)->c_cc[VSWTC], &(termio)->c_cc[_VSWTC]); \ -} while(0) +({ \ + SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ + copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ +}) /* * Translate a "termios" structure into a "termio". Ugh. - * - * Note the "fun" _VMIN overloading. */ #define kernel_termios_to_user_termio(termio, termios) \ -do { \ +({ \ put_user((termios)->c_iflag, &(termio)->c_iflag); \ put_user((termios)->c_oflag, &(termio)->c_oflag); \ put_user((termios)->c_cflag, &(termio)->c_cflag); \ put_user((termios)->c_lflag, &(termio)->c_lflag); \ put_user((termios)->c_line, &(termio)->c_line); \ - put_user((termios)->c_cc[VINTR], &(termio)->c_cc[_VINTR]); \ - put_user((termios)->c_cc[VQUIT], &(termio)->c_cc[_VQUIT]); \ - put_user((termios)->c_cc[VERASE], &(termio)->c_cc[_VERASE]); \ - put_user((termios)->c_cc[VKILL], &(termio)->c_cc[_VKILL]); \ - put_user((termios)->c_cc[VEOF], &(termio)->c_cc[_VEOF]); \ - put_user((termios)->c_cc[VEOL], &(termio)->c_cc[_VEOL]); \ - put_user((termios)->c_cc[VEOL2], &(termio)->c_cc[_VEOL2]); \ - put_user((termios)->c_cc[VSWTC], &(termio)->c_cc[_VSWTC]); \ - if (1/*!((termios)->c_lflag & ICANON)*/) { \ - put_user((termios)->c_cc[VMIN], &(termio)->c_cc[_VMIN]); \ - put_user((termios)->c_cc[VTIME], &(termio)->c_cc[_VTIME]); \ - } \ -} while(0) + copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ +}) #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) diff --git a/include/asm-ppc/uaccess.h b/include/asm-ppc/uaccess.h index 927447dbd..b1e19ef1f 100644 --- a/include/asm-ppc/uaccess.h +++ b/include/asm-ppc/uaccess.h @@ -1,22 +1,30 @@ -#ifndef _ASM_UACCESS_H -#define _ASM_UACCESS_H +#ifndef _PPC_UACCESS_H +#define _PPC_UACCESS_H #ifndef __ASSEMBLY__ #include <linux/sched.h> #include <linux/errno.h> -#define KERNEL_DS (0) -#define USER_DS (1) - #define VERIFY_READ 0 #define VERIFY_WRITE 1 -#define get_fs() (current->tss.fs) -#define get_ds() (KERNEL_DS) -#define set_fs(val) ( current->tss.fs = (val)) +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define KERNEL_DS (0) +#define USER_DS (1) + +#define get_fs() (current->tss.fs) +#define get_ds() (KERNEL_DS) +#define set_fs(val) (current->tss.fs = (val)) #define __user_ok(addr,size) (((size) <= 0x80000000)&&((addr) <= 0x80000000-(size))) -#define __kernel_ok (get_fs() == KERNEL_DS) +#define __kernel_ok (get_fs() == KERNEL_DS) #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) @@ -25,126 +33,227 @@ extern inline int verify_area(int type, const void * addr, unsigned long size) return access_ok(type,addr,size) ? 0 : -EFAULT; } + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +/* Returns 0 if exception not found and fixup otherwise. */ +extern unsigned long search_exception_table(unsigned long); + + /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * - * As the powerpc uses the same address space for kernel and user - * data, we can just do these as direct assignments. (Of course, the - * exception handling means that it's no longer "just"...) + * This gets kind of ugly. We want to return _two_ values in "get_user()" + * and yet we don't want to do any pointers, because that is too much + * of a performance impact. Thus we have a few rather ugly macros here, + * and hide all the uglyness from the user. + * + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). * - * Careful to not - * (a) re-use the arguments for side effects (sizeof/typeof is ok) - * (b) require any knowledge of processes at this stage + * As we use the same address space for kernel and user data on the + * PowerPC, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) */ +#define get_user(x,ptr) \ + __get_user_check((x),(ptr),sizeof(*(ptr))) +#define put_user(x,ptr) \ + __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) + +#define __get_user(x,ptr) \ + __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __put_user(x,ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) + /* - * The "__xxx" versions do not do address space checking, useful when - * doing multiple accesses to the same area (the programmer has to do the - * checks by hand with "access_ok()") + * The "xxx_ret" versions return constant specified in third argument, if + * something bad happens. These macros can be optimized for the + * case of just returning from the function xxx_ret is used. */ -#define put_user(x,ptr) ({ \ -unsigned long __pu_addr = (unsigned long)(ptr); \ -__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) -#define get_user(x,ptr) ({ \ -unsigned long __gu_addr = (unsigned long)(ptr); \ -__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) +#define put_user_ret(x,ptr,ret) ({ \ +if (put_user(x,ptr)) return ret; }) + +#define get_user_ret(x,ptr,ret) ({ \ +if (get_user(x,ptr)) return ret; }) + +#define __put_user_ret(x,ptr,ret) ({ \ +if (__put_user(x,ptr)) return ret; }) + +#define __get_user_ret(x,ptr,ret) ({ \ +if (__get_user(x,ptr)) return ret; }) + + +extern long __put_user_bad(void); + +#define __put_user_nocheck(x,ptr,size) \ +({ \ + long __pu_err; \ + __put_user_size((x),(ptr),(size),__pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x,ptr,size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) *__pu_addr = (ptr); \ + if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ + __put_user_size((x),__pu_addr,(size),__pu_err); \ + __pu_err; \ +}) + +#define __put_user_size(x,ptr,size,retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __put_user_asm(x,ptr,retval,"stb"); break; \ + case 2: __put_user_asm(x,ptr,retval,"sth"); break; \ + case 4: __put_user_asm(x,ptr,retval,"stw"); break; \ + default: __put_user_bad(); \ + } \ +} while (0) -#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr))) struct __large_struct { unsigned long buf[100]; }; -#define __m(x) ((struct __large_struct *)(x)) - -#define __put_user_check(x,addr,size) ({ \ -int __pu_ret; \ -__pu_ret = -EFAULT; \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __pu_ret =__put_user_8(x,addr); break; \ -case 2: __pu_ret =__put_user_16(x,addr); break; \ -case 4: __pu_ret =__put_user_32(x,addr); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} } __pu_ret; }) - -#define __put_user_nocheck(x,addr,size) ({ \ -int __pu_ret; \ -__pu_ret = -EFAULT; \ -switch (size) { \ -case 1: __pu_ret =__put_user_8(x,addr); break; \ -case 2: __pu_ret =__put_user_16(x,addr); break; \ -case 4: __pu_ret =__put_user_32(x,addr); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} __pu_ret; }) - -extern int __put_user_bad(void); - -#define __get_user_check(x,addr,size,type) ({ \ -register int __gu_ret asm("r4"); \ -unsigned long __gu_val = 0; \ -__gu_ret = -EFAULT; \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __gu_val = __get_user_8(__gu_val,addr); break; \ -case 2: __gu_val = __get_user_16(__gu_val,addr); break; \ -case 4: __gu_val = __get_user_32(__gu_val,addr); break; \ -default: __get_user_bad(); break; \ -} } (x) = (type) __gu_val; __gu_ret; }) - -#define __get_user_nocheck(x,addr,size,type) ({ \ -register int __gu_ret asm("r4"); \ -unsigned long __gu_val = 0; \ -__gu_ret = -EFAULT; \ -switch (size) { \ -case 1: __gu_val =__get_user_8(__gu_val,addr); break; \ -case 2: __gu_val =__get_user_16(__gu_val,addr); break; \ -case 4: __gu_val =__get_user_32(__gu_val,addr); break; \ -default: __gu_val = __get_user_bad(); break; \ -} (x) = (type) __gu_val; __gu_ret; }) - +#define __m(x) (*(struct __large_struct *)(x)) + +/* + * We don't tell gcc that we are accessing memory, but this is OK + * because we do not write to any memory gcc knows about, so there + * are no aliasing issues. + */ +#define __put_user_asm(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" %1,0(%2)\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: li %0,%3\n" \ + " b 2b\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 1b,3b\n" \ + ".text" \ + : "=r"(err) \ + : "r"(x), "b"(addr), "i"(-EFAULT), "0"(err)) + + +#define __get_user_nocheck(x,ptr,size) \ +({ \ + long __gu_err, __gu_val; \ + __get_user_size(__gu_val,(ptr),(size),__gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x,ptr,size) \ +({ \ + long __gu_err = -EFAULT, __gu_val = 0; \ + const __typeof__(*(ptr)) *__gu_addr = (ptr); \ + if (access_ok(VERIFY_READ,__gu_addr,size)) \ + __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +extern long __get_user_bad(void); + +#define __get_user_size(x,ptr,size,retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __get_user_asm(x,ptr,retval,"lbz"); break; \ + case 2: __get_user_asm(x,ptr,retval,"lhz"); break; \ + case 4: __get_user_asm(x,ptr,retval,"lwz"); break; \ + default: (x) = __get_user_bad(); \ + } \ +} while (0) + +#define __get_user_asm(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" %1,0(%2)\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: li %0,%3\n" \ + " li %1,0\n" \ + " b 2b\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 1b,3b\n" \ + ".text" \ + : "=r"(err), "=r"(x) \ + : "b"(addr), "i"(-EFAULT), "0"(err)) /* more complex routines */ -extern int __copy_tofrom_user(unsigned long to, unsigned long from, int size); - -#define copy_to_user(to,from,n) ({ \ -unsigned long __copy_to = (unsigned long) (to); \ -unsigned long __copy_size = (unsigned long) (n); \ -unsigned long __copy_res = -EFAULT; \ -if(__copy_size && __access_ok(__copy_to, __copy_size)) { \ -__copy_res = __copy_tofrom_user(__copy_to, (unsigned long) (from), __copy_size); \ -} \ -__copy_res; }) - -#define copy_from_user(to,from,n) ({ \ -unsigned long __copy_from = (unsigned long) (from); \ -unsigned long __copy_size = (unsigned long) (n); \ -unsigned long __copy_res = -EFAULT; \ -if(__copy_size && __access_ok(__copy_from, __copy_size)) { \ -__copy_res = __copy_tofrom_user((unsigned long) (to), __copy_from, __copy_size); \ -} \ -__copy_res; }) - -extern int __clear_user(unsigned long addr, int size); - -#define clear_user(addr,n) ({ \ -unsigned long __clear_addr = (unsigned long) (addr); \ -int __clear_size = (int) (n); \ -int __clear_res = -EFAULT; \ -if(__clear_size && __access_ok(__clear_addr, __clear_size)) { \ -__clear_res = __clear_user(__clear_addr, __clear_size); \ -} \ -__clear_res; }) - -extern int __strncpy_from_user(unsigned long dest, unsigned long src, int count); - -#define strncpy_from_user(dest,src,count) ({ \ -unsigned long __sfu_src = (unsigned long) (src); \ -int __sfu_count = (int) (count); \ -long __sfu_res = -EFAULT; \ -if(__access_ok(__sfu_src, __sfu_count)) { \ -__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ -} __sfu_res; }) +extern int __copy_tofrom_user(void *to, const void *from, unsigned long size); + +extern inline unsigned long +copy_from_user(void *to, const void *from, unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n)) + return __copy_tofrom_user(to, from, n); + return n? -EFAULT: 0; +} + +extern inline unsigned long +copy_to_user(void *to, const void *from, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_tofrom_user(to, from, n); + return n? -EFAULT: 0; +} + +#define __copy_from_user(to, from, size) \ + __copy_tofrom_user((to), (from), (size)) +#define __copy_to_user(to, from, size) \ + __copy_tofrom_user((to), (from), (size)) + +extern unsigned long __clear_user(void *addr, unsigned long size); + +extern inline unsigned long +clear_user(void *addr, unsigned long size) +{ + if (access_ok(VERIFY_WRITE, addr, size)) + return __clear_user(addr, size); + return size? -EFAULT: 0; +} + +extern int __strncpy_from_user(char *dst, const char *src, long count); + +extern inline long +strncpy_from_user(char *dst, const char *src, long count) +{ + if (access_ok(VERIFY_READ, src, 1)) + return __strncpy_from_user(dst, src, count); + return -EFAULT; +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 for error + */ + +extern long strlen_user(const char *); #endif /* __ASSEMBLY__ */ -#endif /* _ASM_UACCESS_H */ +#endif /* _PPC_UACCESS_H */ diff --git a/include/asm-ppc/unistd.h b/include/asm-ppc/unistd.h index 47b0a7912..5665d906c 100644 --- a/include/asm-ppc/unistd.h +++ b/include/asm-ppc/unistd.h @@ -1,10 +1,6 @@ -/* * Last edited: Nov 17 16:28 1995 (cort) */ #ifndef _ASM_PPC_UNISTD_H_ #define _ASM_PPC_UNISTD_H_ -#define _NR(n) #n -#define _lisc(n) "li 0," _NR(n) - /* * This file contains the system call numbers. */ @@ -175,180 +171,79 @@ #define __NR_mremap 163 #define __NR_setresuid 164 #define __NR_getresuid 165 -#define __NR_nfsservctl 166 +#define __NR_query_module 166 +#define __NR_poll 167 +#define __NR_nfsservctl 168 + +#define __NR(n) #n +#define __do_syscall(n) \ + asm volatile ("li 0,%0\n\ + sc\n\ + bns 1f\n\ + mr 0,3\n\ + lis 3,errno@ha\n\ + stw 0,errno@l(3)\n\ + li 3,-1\n\ +1:" : : "i" (n) : "r0", "r3") -/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ #define _syscall0(type,name) \ type name(void) \ -{ \ - __asm__ (_lisc(__NR_##name)); \ - __asm__ ("sc"); \ - __asm__ ("mr 31,3"); \ - __asm__ ("bns 10f"); \ - __asm__ ("mr 0,3"); \ - __asm__ ("lis 3,errno@ha"); \ - __asm__ ("stw 0,errno@l(3)"); \ - __asm__ ("li 3,-1"); \ - __asm__ ("10:"); \ -} - +{ __do_syscall(__NR_##name); } #define _syscall1(type,name,type1,arg1) \ type name(type1 arg1) \ -{ \ - __asm__ (_lisc(__NR_##name)); \ - __asm__ ("sc"); \ - __asm__ ("mr 31,3"); \ - __asm__ ("bns 10f"); \ - __asm__ ("mr 0,3"); \ - __asm__ ("lis 3,errno@ha"); \ - __asm__ ("stw 0,errno@l(3)"); \ - __asm__ ("li 3,-1"); \ - __asm__ ("10:"); \ -} - +{ __do_syscall(__NR_##name); } #define _syscall2(type,name,type1,arg1,type2,arg2) \ type name(type1 arg1,type2 arg2) \ -{ \ - __asm__ (_lisc(__NR_##name)); \ - __asm__ ("sc"); \ - __asm__ ("mr 31,3"); \ - __asm__ ("bns 10f"); \ - __asm__ ("mr 0,3"); \ - __asm__ ("lis 3,errno@ha"); \ - __asm__ ("stw 0,errno@l(3)"); \ - __asm__ ("li 3,-1"); \ - __asm__ ("10:"); \ -} - +{ __do_syscall(__NR_##name); } #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ type name(type1 arg1,type2 arg2,type3 arg3) \ -{ \ - __asm__ (_lisc(__NR_##name)); \ - __asm__ ("sc"); \ - __asm__ ("mr 31,3"); \ - __asm__ ("bns 10f"); \ - __asm__ ("mr 0,3"); \ - __asm__ ("lis 3,errno@ha"); \ - __asm__ ("stw 0,errno@l(3)"); \ - __asm__ ("li 3,-1"); \ - __asm__ ("10:"); \ -} - +{ __do_syscall(__NR_##name); } #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ -{ \ - __asm__ (_lisc(__NR_##name)); \ - __asm__ ("sc"); \ - __asm__ ("mr 31,3"); \ - __asm__ ("bns 10f"); \ - __asm__ ("mr 0,3"); \ - __asm__ ("lis 3,errno@ha"); \ - __asm__ ("stw 0,errno@l(3)"); \ - __asm__ ("li 3,-1"); \ - __asm__ ("10:"); \ -} - +{ __do_syscall(__NR_##name); } #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5) \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ -{ \ - __asm__ (_lisc(__NR_##name)); \ - __asm__ ("sc"); \ - __asm__ ("mr 31,3"); \ - __asm__ ("bns 10f"); \ - __asm__ ("mr 0,3"); \ - __asm__ ("lis 3,errno@ha"); \ - __asm__ ("stw 0,errno@l(3)"); \ - __asm__ ("li 3,-1"); \ - __asm__ ("10:"); \ -} +{ __do_syscall(__NR_##name); } #ifdef __KERNEL_SYSCALLS__ /* - * we need this inline - forking from kernel space will result - * in NO COPY ON WRITE (!!!), until an execve is executed. This - * is no problem, but for the stack. This is handled by not letting - * main() use the stack at all after fork(). Thus, no function - * calls - which means inline code for fork too, as otherwise we - * would use the stack upon exit from 'fork()'. - * - * Actually only pause and fork are needed inline, so that there - * won't be any messing with the stack from main(), but we define - * some others too. + * Forking from kernel space will result in NO COPY ON WRITE (!!!), + * until an execve is executed. This is no problem, but for the stack. + * This is handled by not letting main() use the stack at all after + * fork(). On the PowerPC, this means we can only call leaf functions. */ - -#if 0 /* - This is the mechanism for creating a new kernel thread. - For the time being it only behaves the same as clone(). - It should be changed very soon to work properly and cleanly. This - gets us going for now, though. - - some versions of gcc hate this -- complains about constraints being - incorrect. not sure why so it's in arch/ppc/kernel/misc.S now. - -- Cort + * Create a new kernel thread. */ -static __inline__ long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) -{ - long retval; - __asm__ ( - "li 0, 120 \n\t" /* __NR_clone */ - "li 3, %5 \n\t" /* load flags as arg to clone */ - /*"mr 1,7 \n\t"*/ /* save kernel stack */ - "sc \n\t" /* syscall */ - /*"cmp 0,1,7 \n\t"*/ /* if kernel stack changes -- child */ - "cmpi 0,3,0 \n\t" - "bne 1f \n\t" /* return if parent */ - /* this is in child */ - "li 3, %3 \n\t" /* child -- load args and call fn */ - "mtlr %4 \n\t" - "blrl \n\t" - "li 0, %2 \n\t" /* exit after child exits */ - "li 3, 0 \n\t" - "sc \n\t" - /* parent */ - "1: \n\t" - :"=3" (retval) - :"i" (__NR_clone), "i" (__NR_exit), - "r" (arg), "r" (fn), "g" (CLONE_VM|flags) - :"cc", "1", "0", "3", "7", "31", "memory" ); - return retval; -} -#else extern long __kernel_thread(unsigned long, int (*)(void *), void *); static inline long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { return __kernel_thread(flags | CLONE_VM, fn, arg); } -#endif -#define __NR__exit __NR_exit -static inline _syscall0(int,idle) /* made inline "just in case" -- Cort */ -static inline _syscall0(int,fork) /* needs to be inline */ -static inline _syscall0(int,pause) /* needs to be inline */ -static inline _syscall1(int,setup,int,magic) /* called in init before execve */ -static inline _syscall0(int,sync) -static inline _syscall0(pid_t,setsid) -static /*inline*/ _syscall3(int,write,int,fd,const char *,buf,off_t,count) -static /*inline*/ _syscall1(int,dup,int,fd) -static /*inline*/ _syscall3(int,execve,const char *,file,char **,argv,char **,envp) -static /*inline*/ _syscall3(int,open,const char *,file,int,flag,int,mode) -static /*inline*/ _syscall1(int,close,int,fd) -static /*inline*/ _syscall1(int,_exit,int,exitcode) -static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options) -static inline _syscall2(int,clone,unsigned long,flags,char *,esp) +/* + * System call prototypes. + */ +int idle(void); +int setup(int); +int sync(void); +pid_t setsid(void); +int write(int, const char *, off_t); +int dup(int); +int execve(const char *, char **, char **); +int open(const char *, int, int); +int close(int); +pid_t waitpid(pid_t, int *, int); -/* called from init before execve -- need to be inline? -- Cort */ static inline pid_t wait(int * wait_stat) { return waitpid(-1,wait_stat,0); } -#endif +#endif /* __KERNEL_SYSCALLS__ */ #endif /* _ASM_PPC_UNISTD_H_ */ - - |