summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-05-12 23:48:34 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-05-12 23:48:34 +0000
commit7fd36ebeeec9244a7431bb010e6e3c5e4848a0d5 (patch)
tree5fb03a9aafdd1cec5f4f6ff7f1873174cb89b66c /include
parentba2dacab305c598cd4c34a604f8e276bf5bab5ff (diff)
Merge with Linux 2.3.99-pre8. Linus must hate me, too man patches ;-)
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-sa1100/time.h60
-rw-r--r--include/asm-arm/bitops.h29
-rw-r--r--include/asm-arm/iomd.h8
-rw-r--r--include/asm-arm/proc-fns.h8
-rw-r--r--include/asm-arm/system.h73
-rw-r--r--include/asm-i386/string-486.h219
-rw-r--r--include/asm-s390/a.out.h38
-rw-r--r--include/asm-s390/atomic.h215
-rw-r--r--include/asm-s390/bitops.h888
-rw-r--r--include/asm-s390/bugs.h22
-rw-r--r--include/asm-s390/byteorder.h103
-rw-r--r--include/asm-s390/cache.h16
-rw-r--r--include/asm-s390/chandev.h87
-rw-r--r--include/asm-s390/checksum.h188
-rw-r--r--include/asm-s390/current.h31
-rw-r--r--include/asm-s390/delay.h25
-rw-r--r--include/asm-s390/div64.h10
-rw-r--r--include/asm-s390/dma.h17
-rw-r--r--include/asm-s390/ebcdic.h51
-rw-r--r--include/asm-s390/elf.h80
-rw-r--r--include/asm-s390/errno.h140
-rw-r--r--include/asm-s390/fcntl.h72
-rw-r--r--include/asm-s390/gdb-stub.h18
-rw-r--r--include/asm-s390/hardirq.h78
-rw-r--r--include/asm-s390/hdreg.h13
-rw-r--r--include/asm-s390/ide.h54
-rw-r--r--include/asm-s390/init.h29
-rw-r--r--include/asm-s390/io.h94
-rw-r--r--include/asm-s390/ioctl.h78
-rw-r--r--include/asm-s390/ioctls.h88
-rw-r--r--include/asm-s390/ipc.h39
-rw-r--r--include/asm-s390/ipcbuf.h29
-rw-r--r--include/asm-s390/irq.h788
-rw-r--r--include/asm-s390/irqextras390.h151
-rw-r--r--include/asm-s390/lowcore.h182
-rw-r--r--include/asm-s390/major.h150
-rw-r--r--include/asm-s390/mathemu.h48
-rw-r--r--include/asm-s390/misc390.h14
-rw-r--r--include/asm-s390/mman.h46
-rw-r--r--include/asm-s390/mmu_context.h45
-rw-r--r--include/asm-s390/msgbuf.h31
-rw-r--r--include/asm-s390/namei.h22
-rw-r--r--include/asm-s390/page.h120
-rw-r--r--include/asm-s390/param.h28
-rw-r--r--include/asm-s390/pgalloc.h345
-rw-r--r--include/asm-s390/pgtable.h418
-rw-r--r--include/asm-s390/poll.h33
-rw-r--r--include/asm-s390/posix_types.h76
-rw-r--r--include/asm-s390/processor.h186
-rw-r--r--include/asm-s390/ptrace.h327
-rw-r--r--include/asm-s390/queue.h117
-rw-r--r--include/asm-s390/resource.h54
-rw-r--r--include/asm-s390/s390-gdbregs.h84
-rw-r--r--include/asm-s390/s390-regs-common.h104
-rw-r--r--include/asm-s390/s390dyn.h60
-rw-r--r--include/asm-s390/s390io.h85
-rw-r--r--include/asm-s390/s390mach.h65
-rw-r--r--include/asm-s390/segment.h4
-rw-r--r--include/asm-s390/semaphore-helper.h100
-rw-r--r--include/asm-s390/semaphore.h191
-rw-r--r--include/asm-s390/sembuf.h25
-rw-r--r--include/asm-s390/setup.h52
-rw-r--r--include/asm-s390/shmbuf.h42
-rw-r--r--include/asm-s390/shmparam.h13
-rw-r--r--include/asm-s390/sigcontext.h36
-rw-r--r--include/asm-s390/siginfo.h205
-rw-r--r--include/asm-s390/signal.h185
-rw-r--r--include/asm-s390/sigp.h254
-rw-r--r--include/asm-s390/smp.h79
-rw-r--r--include/asm-s390/smplock.h60
-rw-r--r--include/asm-s390/socket.h66
-rw-r--r--include/asm-s390/sockios.h20
-rw-r--r--include/asm-s390/softirq.h35
-rw-r--r--include/asm-s390/spinlock.h120
-rw-r--r--include/asm-s390/stat.h85
-rw-r--r--include/asm-s390/statfs.h33
-rw-r--r--include/asm-s390/string.h107
-rw-r--r--include/asm-s390/system.h230
-rw-r--r--include/asm-s390/termbits.h180
-rw-r--r--include/asm-s390/termios.h113
-rw-r--r--include/asm-s390/timex.h29
-rw-r--r--include/asm-s390/types.h66
-rw-r--r--include/asm-s390/uaccess.h519
-rw-r--r--include/asm-s390/ucontext.h20
-rw-r--r--include/asm-s390/unaligned.h24
-rw-r--r--include/asm-s390/unistd.h374
-rw-r--r--include/asm-s390/user.h77
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/cyclades.h10
-rw-r--r--include/linux/dasd.h225
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/genhd.h30
-rw-r--r--include/linux/kernel_stat.h13
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h48
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--include/linux/raid/md.h2
-rw-r--r--include/linux/raid/md_k.h41
-rw-r--r--include/linux/raid/raid1.h79
-rw-r--r--include/linux/raid/raid5.h118
-rw-r--r--include/linux/raid/xor.h12
-rw-r--r--include/linux/sysctl.h9
101 files changed, 10170 insertions, 247 deletions
diff --git a/include/asm-arm/arch-sa1100/time.h b/include/asm-arm/arch-sa1100/time.h
new file mode 100644
index 000000000..205a317a0
--- /dev/null
+++ b/include/asm-arm/arch-sa1100/time.h
@@ -0,0 +1,60 @@
+/*
+ * linux/include/asm-arm/arch-sa1100/time.h
+ *
+ * Copyright (C) 1998 Deborah Wallach.
+ * Twiddles (C) 1999 Hugo Fiennes <hugo@empeg.com>
+ *
+ * 2000/03/29 (C) Nicolas Pitre <nico@cam.org>
+ * Rewritten: big cleanup, much simpler, better HZ acuracy.
+ *
+ */
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/irqs.h>
+
+
+/* IRQs are disabled before entering here from do_gettimeofday() */
+static unsigned long sa1100_gettimeoffset (void)
+{
+ unsigned long ticks_to_match, elapsed, usec;
+
+ /* Get ticks before next timer match */
+ ticks_to_match = OSMR0 - OSCR;
+
+ /* We need elapsed ticks since last match */
+ elapsed = LATCH - ticks_to_match;
+
+ /* Now convert them to usec */
+ usec = (unsigned long)(elapsed*tick)/LATCH;
+
+ return usec;
+}
+
+
+static void sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ int next_match;
+
+ /* Loop until we get ahead of the free running timer.
+ * This ensures an exact clock tick count and time acuracy.
+ * Should be IRQ race free.
+ */
+ do {
+ do_timer(regs);
+ OSSR = OSSR_M0; /* Clear match on timer 0 */
+ next_match = (OSMR0 += LATCH);
+ } while( (signed long)(next_match - OSCR) <= 0 );
+}
+
+
+extern inline void setup_timer (void)
+{
+ gettimeoffset = sa1100_gettimeoffset;
+ timer_irq.handler = sa1100_timer_interrupt;
+ OSMR0 = 0; /* set initial match at 0 */
+ OSSR = 0xf; /* clear status on all timers */
+ setup_arm_irq(IRQ_OST0, &timer_irq);
+ OIER |= OIER_E0; /* enable match on timer 0 to cause interrupts */
+ OSCR = 0; /* initialize free-running timer, force first match */
+}
+
diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h
index 5dcfe966f..666efc117 100644
--- a/include/asm-arm/bitops.h
+++ b/include/asm-arm/bitops.h
@@ -1,26 +1,29 @@
-#ifndef __ASM_ARM_BITOPS_H
-#define __ASM_ARM_BITOPS_H
-
/*
* Copyright 1995, Russell King.
* Various bits and pieces copyrights include:
* Linus Torvalds (test_bit).
- */
-
-/*
- * These should be done with inline assembly.
- * All bit operations return 0 if the bit
- * was cleared before the operation and != 0 if it was not.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ *
+ * Please note that the code in this file should never be included
+ * from user space. Many of these are not implemented in assembler
+ * since they would be too costly. Also, they require priviledged
+ * instructions (which are not available from user mode) to ensure
+ * that they are atomic.
*/
+#ifndef __ASM_ARM_BITOPS_H
+#define __ASM_ARM_BITOPS_H
+
+#ifdef __KERNEL__
+
/*
- * Function prototypes to keep gcc -Wall happy
+ * Function prototypes to keep gcc -Wall happy.
*/
extern void set_bit(int nr, volatile void * addr);
extern void clear_bit(int nr, volatile void * addr);
extern void change_bit(int nr, volatile void * addr);
+
extern int test_and_set_bit(int nr, volatile void * addr);
extern int test_and_clear_bit(int nr, volatile void * addr);
extern int test_and_change_bit(int nr, volatile void * addr);
@@ -53,8 +56,6 @@ extern __inline__ unsigned long ffz(unsigned long word)
return k;
}
-#ifdef __KERNEL__
-
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
@@ -72,10 +73,6 @@ extern __inline__ unsigned long ffz(unsigned long word)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
-#endif /* __KERNEL__ */
-
-#ifdef __KERNEL__
-
#define ext2_set_bit test_and_set_bit
#define ext2_clear_bit test_and_clear_bit
#define ext2_test_bit test_bit
diff --git a/include/asm-arm/iomd.h b/include/asm-arm/iomd.h
index 9726cd55d..3a1e09dfe 100644
--- a/include/asm-arm/iomd.h
+++ b/include/asm-arm/iomd.h
@@ -1,3 +1,11 @@
+/*
+ * linux/include/asm-arm/iomd.h
+ *
+ * Copyright (C) 1999 Russell King
+ *
+ * This file contains information out the IOMD ASIC used in the
+ * Acorn RiscPC and subsequently integrated into the CLPS7500 chips.
+ */
#include <linux/config.h>
#ifndef __ASSEMBLY__
diff --git a/include/asm-arm/proc-fns.h b/include/asm-arm/proc-fns.h
index 703f653b8..5e5f1e623 100644
--- a/include/asm-arm/proc-fns.h
+++ b/include/asm-arm/proc-fns.h
@@ -55,6 +55,14 @@
# define CPU_NAME sa1100
# endif
# endif
+# ifdef CONFIG_CPU_ARM720
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME arm720
+# endif
+# endif
#endif
#ifndef MULTI_CPU
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 40ab75a29..0283a7f23 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -34,11 +34,18 @@ extern unsigned int __machine_arch_type;
#define MACH_TYPE_SHARK 15
#define MACH_TYPE_BRUTUS 16
#define MACH_TYPE_PERSONAL_SERVER 17
+#define MACH_TYPE_SA1100 18 /* unused/too general */
+#define MACH_TYPE_L7200 19
+#define MACH_TYPE_SA1110 20 /* unused/too general */
+#define MACH_TYPE_INTEGRATOR 21
#define MACH_TYPE_BITSY 22
+#define MACH_TYPE_IXP1200 23
#define MACH_TYPE_THINCLIENT 24
#define MACH_TYPE_ASSABET 25
#define MACH_TYPE_VICTOR 26
#define MACH_TYPE_LART 27
+#define MACH_TYPE_RANGER 28
+#define MACH_TYPE_GRAPHICSCLIENT 29
/*
* Sort out a definition for machine_arch_type
@@ -53,6 +60,9 @@ extern unsigned int __machine_arch_type;
* - switch (machine_arch_type) { }
* - if (machine_arch_type = xxxx)
* - __machine_arch_type
+ *
+ * Please note that these are kept in numeric order (ie, the same
+ * order as the list above).
*/
#ifdef CONFIG_ARCH_EBSA110
# ifdef machine_arch_type
@@ -198,28 +208,40 @@ extern unsigned int __machine_arch_type;
# define machine_is_personal_server() (0)
#endif
-#ifdef CONFIG_SA1100_ITSY
+#ifdef CONFIG_ARCH_L7200
# ifdef machine_arch_type
# undef machine_arch_type
# define machine_arch_type __machine_arch_type
# else
-# define machine_arch_type MACH_TYPE_ITSY
+# define machine_arch_type MACH_TYPE_L7200
# endif
-# define machine_is_itsy() (machine_arch_type == MACH_TYPE_ITSY)
+# define machine_is_l7200() (machine_arch_type == MACH_TYPE_L7200)
#else
-# define machine_is_itsy() (0)
+# define machine_is_l7200() (0)
#endif
-#ifdef CONFIG_SA1100_EMPEG
+#ifdef CONFIG_SA1100_BITSY
# ifdef machine_arch_type
# undef machine_arch_type
# define machine_arch_type __machine_arch_type
# else
-# define machine_arch_type MACH_TYPE_EMPEG
+# define machine_arch_type MACH_TYPE_BITSY
# endif
-# define machine_is_empeg() (machine_arch_type == MACH_TYPE_EMPEG)
+# define machine_is_bitsy() (machine_arch_type == MACH_TYPE_BITSY)
#else
-# define machine_is_empeg() (0)
+# define machine_is_bitsy() (0)
+#endif
+
+#ifdef CONFIG_SA1100_THINCLIENT
+# ifdef machine_arch_type
+# undef machine_arch_type
+# define machine_arch_type __machine_arch_type
+# else
+# define machine_arch_type MACH_TYPE_THINCLIENT
+# endif
+# define machine_is_thinclient() (machine_arch_type == MACH_TYPE_THINCLIENT)
+#else
+# define machine_is_thinclient() (0)
#endif
#ifdef CONFIG_SA1100_ASSABET
@@ -258,52 +280,55 @@ extern unsigned int __machine_arch_type;
# define machine_is_lart() (0)
#endif
-#ifdef CONFIG_SA1100_BITSY
+/*
+ * The following are currently unregistered
+ */
+#ifdef CONFIG_SA1100_ITSY
# ifdef machine_arch_type
# undef machine_arch_type
# define machine_arch_type __machine_arch_type
# else
-# define machine_arch_type MACH_TYPE_BITSY
+# define machine_arch_type MACH_TYPE_ITSY
# endif
-# define machine_is_bitsy() (machine_arch_type == MACH_TYPE_BITSY)
+# define machine_is_itsy() (machine_arch_type == MACH_TYPE_ITSY)
#else
-# define machine_is_bitsy() (0)
+# define machine_is_itsy() (0)
#endif
-#ifdef CONFIG_SA1100_TIFON
+#ifdef CONFIG_SA1100_EMPEG
# ifdef machine_arch_type
# undef machine_arch_type
# define machine_arch_type __machine_arch_type
# else
-# define machine_arch_type MACH_TYPE_TIFON
+# define machine_arch_type MACH_TYPE_EMPEG
# endif
-# define machine_is_tifon() (machine_arch_type == MACH_TYPE_TIFON)
+# define machine_is_empeg() (machine_arch_type == MACH_TYPE_EMPEG)
#else
-# define machine_is_tifon() (0)
+# define machine_is_empeg() (0)
#endif
-#ifdef CONFIG_SA1100_PLEB
+#ifdef CONFIG_SA1100_TIFON
# ifdef machine_arch_type
# undef machine_arch_type
# define machine_arch_type __machine_arch_type
# else
-# define machine_arch_type MACH_TYPE_PLEB
+# define machine_arch_type MACH_TYPE_TIFON
# endif
-# define machine_is_pleb() (machine_arch_type == MACH_TYPE_PLEB)
+# define machine_is_tifon() (machine_arch_type == MACH_TYPE_TIFON)
#else
-# define machine_is_pleb() (0)
+# define machine_is_tifon() (0)
#endif
-#ifdef CONFIG_SA1100_THINCLIENT
+#ifdef CONFIG_SA1100_PLEB
# ifdef machine_arch_type
# undef machine_arch_type
# define machine_arch_type __machine_arch_type
# else
-# define machine_arch_type MACH_TYPE_THINCLIENT
+# define machine_arch_type MACH_TYPE_PLEB
# endif
-# define machine_is_thinclient() (machine_arch_type == MACH_TYPE_THINCLIENT)
+# define machine_is_pleb() (machine_arch_type == MACH_TYPE_PLEB)
#else
-# define machine_is_thinclient() (0)
+# define machine_is_pleb() (0)
#endif
#ifdef CONFIG_SA1100_PENNY
diff --git a/include/asm-i386/string-486.h b/include/asm-i386/string-486.h
index 7c3ee1a1b..06022132c 100644
--- a/include/asm-i386/string-486.h
+++ b/include/asm-i386/string-486.h
@@ -18,10 +18,12 @@
* 1999/10/5 Proper register args for newer GCCs and minor bugs
* fixed - Petko Manolov (petkan@spct.net)
* 1999/10/14 3DNow memscpy() added - Petkan
+ * 2000/05/09 extern changed to static in function definitions
+ * and a few cleanups - Petkan
*/
#define __HAVE_ARCH_STRCPY
-extern inline char * strcpy(char * dest,const char *src)
+static inline char * strcpy(char * dest,const char *src)
{
register char *tmp= (char *)dest;
register char dummy;
@@ -40,7 +42,7 @@ return dest;
}
#define __HAVE_ARCH_STRNCPY
-extern inline char * strncpy(char * dest,const char *src,size_t count)
+static inline char * strncpy(char * dest,const char *src,size_t count)
{
register char *tmp= (char *)dest;
register char dummy;
@@ -68,7 +70,7 @@ return dest;
}
#define __HAVE_ARCH_STRCAT
-extern inline char * strcat(char * dest,const char * src)
+static inline char * strcat(char * dest,const char * src)
{
register char *tmp = (char *)(dest-1);
register char dummy;
@@ -89,7 +91,7 @@ return dest;
}
#define __HAVE_ARCH_STRNCAT
-extern inline char * strncat(char * dest,const char * src,size_t count)
+static inline char * strncat(char * dest,const char * src,size_t count)
{
register char *tmp = (char *)(dest-1);
register char dummy;
@@ -114,7 +116,7 @@ return dest;
}
#define __HAVE_ARCH_STRCMP
-extern inline int strcmp(const char * cs,const char * ct)
+static inline int strcmp(const char * cs,const char * ct)
{
register int __res;
__asm__ __volatile__(
@@ -138,7 +140,7 @@ return __res;
}
#define __HAVE_ARCH_STRNCMP
-extern inline int strncmp(const char * cs,const char * ct,size_t count)
+static inline int strncmp(const char * cs,const char * ct,size_t count)
{
register int __res;
__asm__ __volatile__(
@@ -163,7 +165,7 @@ return __res;
}
#define __HAVE_ARCH_STRCHR
-extern inline char * strchr(const char * s, int c)
+static inline char * strchr(const char * s, int c)
{
register char * __res;
__asm__ __volatile__(
@@ -182,7 +184,7 @@ return __res;
}
#define __HAVE_ARCH_STRRCHR
-extern inline char * strrchr(const char * s, int c)
+static inline char * strrchr(const char * s, int c)
{
int d0, d1;
register char * __res;
@@ -199,35 +201,9 @@ __asm__ __volatile__(
return __res;
}
-#define __HAVE_ARCH_STRSPN
-extern inline size_t strspn(const char * cs, const char * ct)
-{
-int d0, d1;
-register char * __res;
-__asm__ __volatile__(
- "movl %6,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t"
- "movl %%ecx,%%edx\n"
- "1:\tlodsb\n\t"
- "testb %%al,%%al\n\t"
- "je 2f\n\t"
- "movl %6,%%edi\n\t"
- "movl %%edx,%%ecx\n\t"
- "repne\n\t"
- "scasb\n\t"
- "je 1b\n"
- "2:\tdecl %0"
- :"=S" (__res), "=&a" (d0), "=&c" (d1)
- :"0" (cs), "1" (0), "2" (0xffffffff), "g" (ct)
- :"dx", "di");
-return __res-cs;
-}
#define __HAVE_ARCH_STRCSPN
-extern inline size_t strcspn(const char * cs, const char * ct)
+static inline size_t strcspn(const char * cs, const char * ct)
{
int d0, d1;
register char * __res;
@@ -253,72 +229,9 @@ __asm__ __volatile__(
return __res-cs;
}
-#if 0
-#define __HAVE_ARCH_STRPBRK
-extern inline char * strpbrk(const char * cs,const char * ct)
-{
-int d0, d1;
-register char * __res;
-__asm__ __volatile__(
- "movl %6,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t"
- "movl %%ecx,%%edx\n"
- "1:\tlodsb\n\t"
- "testb %%al,%%al\n\t"
- "je 2f\n\t"
- "movl %6,%%edi\n\t"
- "movl %%edx,%%ecx\n\t"
- "repne\n\t"
- "scasb\n\t"
- "jne 1b\n\t"
- "decl %0\n\t"
- "jmp 3f\n"
- "2:\txorl %0,%0\n"
- "3:"
- :"=S" (__res), "=&a" (d0), "=&c" (d1)
- :"0" (cs), "1" (0), "2" (0xffffffff), "g" (ct)
- :"dx", "di");
-return __res;
-}
-#endif
-
-#if 0
-#define __HAVE_ARCH_STRSTR
-extern inline char * strstr(const char * cs,const char * ct)
-{
-int d0, d1;
-register char * __res;
-__asm__ __volatile__(
- "movl %6,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */
- "movl %%ecx,%%edx\n"
- "1:\tmovl %6,%%edi\n\t"
- "movl %%esi,%%eax\n\t"
- "movl %%edx,%%ecx\n\t"
- "repe\n\t"
- "cmpsb\n\t"
- "je 2f\n\t" /* also works for empty string, see above */
- "xchgl %%eax,%%esi\n\t"
- "incl %%esi\n\t"
- "cmpb $0,-1(%%eax)\n\t"
- "jne 1b\n\t"
- "xorl %%eax,%%eax\n\t"
- "2:"
- :"=a" (__res), "=&c" (d0), "=&S" (d1)
- :"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct)
- :"dx", "di");
-return __res;
-}
-#endif
#define __HAVE_ARCH_STRLEN
-extern inline size_t strlen(const char * s)
+static inline size_t strlen(const char * s)
{
/*
* slightly slower on a 486, but with better chances of
@@ -339,7 +252,7 @@ return (tmp-s-1);
/* Added by Gertjan van Wingerde to make minix and sysv module work */
#define __HAVE_ARCH_STRNLEN
-extern inline size_t strnlen(const char * s, size_t count)
+static inline size_t strnlen(const char * s, size_t count)
{
int d0;
register int __res;
@@ -359,73 +272,12 @@ return __res;
}
/* end of additional stuff */
-#if 0
-#define __HAVE_ARCH_STRTOK
-extern inline char * strtok(char * s,const char * ct)
-{
-register char * __res;
-__asm__ __volatile__(
- "testl %1,%1\n\t"
- "jne 1f\n\t"
- "testl %0,%0\n\t"
- "je 8f\n\t"
- "movl %0,%1\n"
- "1:\txorl %0,%0\n\t"
- "movl $-1,%%ecx\n\t"
- "xorl %%eax,%%eax\n\t"
- "movl %4,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t"
- "je 7f\n\t" /* empty delimiter-string */
- "movl %%ecx,%%edx\n"
- "2:\tlodsb\n\t"
- "testb %%al,%%al\n\t"
- "je 7f\n\t"
- "movl %4,%%edi\n\t"
- "movl %%edx,%%ecx\n\t"
- "repne\n\t"
- "scasb\n\t"
- "je 2b\n\t"
- "decl %1\n\t"
- "cmpb $0,(%1)\n\t"
- "je 7f\n\t"
- "movl %1,%0\n"
- "3:\tlodsb\n\t"
- "testb %%al,%%al\n\t"
- "je 5f\n\t"
- "movl %4,%%edi\n\t"
- "movl %%edx,%%ecx\n\t"
- "repne\n\t"
- "scasb\n\t"
- "jne 3b\n\t"
- "decl %1\n\t"
- "cmpb $0,(%1)\n\t"
- "je 5f\n\t"
- "movb $0,(%1)\n\t"
- "incl %1\n\t"
- "jmp 6f\n"
- "5:\txorl %1,%1\n"
- "6:\tcmpb $0,(%0)\n\t"
- "jne 7f\n\t"
- "xorl %0,%0\n"
- "7:\ttestl %0,%0\n\t"
- "jne 8f\n\t"
- "movl %0,%1\n"
- "8:"
- :"=b" (__res),"=S" (___strtok)
- :"0" (___strtok),"1" (s),"g" (ct)
- :"ax","cx","dx","di","memory");
-return __res;
-}
-#endif
/*
* These ought to get tweaked to do some cache priming.
*/
-extern inline void * __memcpy_by4(void * to, const void * from, size_t n)
+static inline void * __memcpy_by4(void * to, const void * from, size_t n)
{
register void *tmp = (void *)to;
register int dummy1,dummy2;
@@ -442,7 +294,7 @@ __asm__ __volatile__ (
return (to);
}
-extern inline void * __memcpy_by2(void * to, const void * from, size_t n)
+static inline void * __memcpy_by2(void * to, const void * from, size_t n)
{
register void *tmp = (void *)to;
register int dummy1,dummy2;
@@ -463,7 +315,7 @@ __asm__ __volatile__ (
return (to);
}
-extern inline void * __memcpy_g(void * to, const void * from, size_t n)
+static inline void * __memcpy_g(void * to, const void * from, size_t n)
{
int d0, d1, d2;
register void *tmp = (void *)to;
@@ -511,14 +363,14 @@ return (to);
** This CPU favours 3DNow strongly (eg AMD K6-II, K6-III, Athlon)
*/
-extern inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
+static inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
{
if(len<512 || in_interrupt())
return __memcpy_c(to, from, len);
return _mmx_memcpy(to, from, len);
}
-extern __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
+static inline void *__memcpy3d(void *to, const void *from, size_t len)
{
if(len<512 || in_interrupt())
return __memcpy_g(to, from, len);
@@ -536,12 +388,24 @@ extern __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
** Generic routines
*/
+
#define memcpy(d, s, count) __memcpy(d, s, count)
#endif /* CONFIG_X86_USE_3DNOW */
+
+extern void __struct_cpy_bug( void );
+
+#define struct_cpy(x,y) \
+({ \
+ if (sizeof(*(x)) != sizeof(*(y))) \
+ __struct_cpy_bug; \
+ memcpy(x, y, sizeof(*(x))); \
+})
+
+
#define __HAVE_ARCH_MEMMOVE
-extern inline void * memmove(void * dest,const void * src, size_t n)
+static inline void * memmove(void * dest,const void * src, size_t n)
{
int d0, d1, d2;
register void *tmp = (void *)dest;
@@ -564,7 +428,9 @@ __asm__ __volatile__ (
return dest;
}
-extern inline int memcmp(const void * cs,const void * ct,size_t count)
+
+#define __HAVE_ARCH_MEMCMP
+static inline int memcmp(const void * cs,const void * ct,size_t count)
{
int d0, d1, d2;
register int __res;
@@ -580,8 +446,9 @@ __asm__ __volatile__(
return __res;
}
+
#define __HAVE_ARCH_MEMCHR
-extern inline void * memchr(const void * cs,int c,size_t count)
+static inline void * memchr(const void * cs,int c,size_t count)
{
int d0;
register void * __res;
@@ -622,7 +489,7 @@ return __res;
__memset_gc((s),(c),(count)) : \
__memset_gg((s),(c),(count))))
-extern inline void * __memset_cc_by4(void * s, char c, size_t count)
+static inline void * __memset_cc_by4(void * s, char c, size_t count)
{
/*
* register char *tmp = s;
@@ -640,7 +507,7 @@ __asm__ __volatile__ (
return s;
}
-extern inline void * __memset_cc_by2(void * s, char c, size_t count)
+static inline void * __memset_cc_by2(void * s, char c, size_t count)
{
register void *tmp = (void *)s;
register int dummy;
@@ -658,7 +525,7 @@ __asm__ __volatile__ (
return s;
}
-extern inline void * __memset_gc_by4(void * s, char c, size_t count)
+static inline void * __memset_gc_by4(void * s, char c, size_t count)
{
register void *tmp = (void *)s;
register int dummy;
@@ -677,7 +544,7 @@ __asm__ __volatile__ (
return s;
}
-extern inline void * __memset_gc_by2(void * s, char c, size_t count)
+static inline void * __memset_gc_by2(void * s, char c, size_t count)
{
register void *tmp = (void *)s;
register int dummy1,dummy2;
@@ -699,7 +566,7 @@ __asm__ __volatile__ (
return s;
}
-extern inline void * __memset_cg(void * s, char c, size_t count)
+static inline void * __memset_cg(void * s, char c, size_t count)
{
int d0, d1;
register void *tmp = (void *)s;
@@ -716,7 +583,7 @@ __asm__ __volatile__ (
return s;
}
-extern inline void * __memset_gg(void * s,char c,size_t count)
+static inline void * __memset_gg(void * s,char c,size_t count)
{
int d0, d1, d2;
register void *tmp = (void *)s;
@@ -739,7 +606,7 @@ return s;
* find the first occurrence of byte 'c', or 1 past the area if none
*/
#define __HAVE_ARCH_MEMSCAN
-extern inline void * memscan(void * addr, int c, size_t size)
+static inline void * memscan(void * addr, int c, size_t size)
{
if (!size)
return addr;
diff --git a/include/asm-s390/a.out.h b/include/asm-s390/a.out.h
new file mode 100644
index 000000000..72adee6ef
--- /dev/null
+++ b/include/asm-s390/a.out.h
@@ -0,0 +1,38 @@
+/*
+ * include/asm-s390/a.out.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *
+ * Derived from "include/asm-i386/a.out.h"
+ * Copyright (C) 1992, Linus Torvalds
+ *
+ * I don't think we'll ever need a.out ...
+ */
+
+#ifndef __S390_A_OUT_H__
+#define __S390_A_OUT_H__
+
+struct exec
+{
+ unsigned long a_info; /* Use macros N_MAGIC, etc for access */
+ unsigned a_text; /* length of text, in bytes */
+ unsigned a_data; /* length of data, in bytes */
+ unsigned a_bss; /* length of uninitialized data area for file, in bytes */
+ unsigned a_syms; /* length of symbol table data in file, in bytes */
+ unsigned a_entry; /* start address */
+ unsigned a_trsize; /* length of relocation info for text, in bytes */
+ unsigned a_drsize; /* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a) ((a).a_trsize)
+#define N_DRSIZE(a) ((a).a_drsize)
+#define N_SYMSIZE(a) ((a).a_syms)
+
+#ifdef __KERNEL__
+
+#define STACK_TOP TASK_SIZE
+
+#endif
+
+#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
new file mode 100644
index 000000000..f1c1f3c5d
--- /dev/null
+++ b/include/asm-s390/atomic.h
@@ -0,0 +1,215 @@
+#ifndef __ARCH_S390_ATOMIC__
+#define __ARCH_S390_ATOMIC__
+
+/*
+ * include/asm-s390/atomic.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Denis Joseph Barrow
+ *
+ * Derived from "include/asm-i386/bitops.h"
+ * Copyright (C) 1992, Linus Torvalds
+ *
+ */
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
+ */
+
+typedef struct { volatile int counter; } atomic_t __attribute__ ((aligned (4)));
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0")
+
+static __inline__ int atomic_read(atomic_t *v)
+{
+ int retval;
+ __asm__ __volatile__("bcr 15,0\n\t"
+ "l %0,%1"
+ : "=d" (retval) : "m" (*v) );
+ return retval;
+}
+
+static __inline__ void atomic_set(atomic_t *v, int i)
+{
+ __asm__ __volatile__("st %1,%0\n\t"
+ "bcr 15,0"
+ : : "m" (*v), "d" (i) : "memory");
+}
+
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr 1,0\n"
+ " ar 1,%1\n"
+ " cs 0,1,%0\n"
+ " jl 0b"
+ : "+m" (*v) : "d" (i) : "0", "1" );
+}
+
+static __inline__ int atomic_add_return (int i, atomic_t *v)
+{
+ int newval;
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr %1,0\n"
+ " ar %1,%2\n"
+ " cs 0,%1,%0\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (newval)
+ : "d" (i) : "0", "cc" );
+ return newval;
+}
+
+static __inline__ int atomic_add_negative(int i, atomic_t *v)
+{
+ int newval;
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr %1,0\n"
+ " ar %1,%2\n"
+ " cs 0,%1,%0\n"
+ " jl 0b\n"
+ : "+m" (*v), "=&d" (newval)
+ : "d" (i) : "0", "cc" );
+ return newval < 0;
+}
+
+static __inline__ void atomic_sub(int i, atomic_t *v)
+{
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr 1,0\n"
+ " sr 1,%1\n"
+ " cs 0,1,%0\n"
+ " jl 0b"
+ : "+m" (*v) : "d" (i) : "0", "1" );
+}
+
+static __inline__ void atomic_inc(volatile atomic_t *v)
+{
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr 1,0\n"
+ " ahi 1,1\n"
+ " cs 0,1,%0\n"
+ " jl 0b"
+ : "+m" (*v) : : "0", "1" );
+}
+
+static __inline__ int atomic_inc_return(volatile atomic_t *v)
+{
+ int i;
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr %1,0\n"
+ " ahi %1,1\n"
+ " cs 0,%1,%0\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (i) : : "0" );
+ return i;
+}
+
+static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
+{
+ int i;
+
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr %1,0\n"
+ " ahi %1,1\n"
+ " cs 0,%1,%0\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (i) : : "0" );
+ return i != 0;
+}
+
+static __inline__ void atomic_dec(volatile atomic_t *v)
+{
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr 1,0\n"
+ " ahi 1,-1\n"
+ " cs 0,1,%0\n"
+ " jl 0b"
+ : "+m" (*v) : : "0", "1" );
+}
+
+static __inline__ int atomic_dec_return(volatile atomic_t *v)
+{
+ int i;
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr %1,0\n"
+ " ahi %1,-1\n"
+ " cs 0,%1,%0\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (i) : : "0" );
+ return i;
+}
+
+static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
+{
+ int i;
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr %1,0\n"
+ " ahi %1,-1\n"
+ " cs 0,%1,%0\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (i) : : "0");
+ return i == 0;
+}
+
+static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v)
+{
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr 1,0\n"
+ " nr 1,%1\n"
+ " cs 0,1,%0\n"
+ " jl 0b"
+ : "+m" (*v) : "d" (~(mask)) : "0", "1" );
+}
+
+static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v)
+{
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr 1,0\n"
+ " or 1,%1\n"
+ " cs 0,1,%0\n"
+ " jl 0b"
+ : "+m" (*v) : "d" (mask) : "0", "1" );
+}
+
+/*
+ returns 0 if expected_oldval==value in *v ( swap was successful )
+ returns 1 if unsuccessful.
+*/
+static __inline__ int
+atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
+{
+ int retval;
+
+ __asm__ __volatile__(
+ " cs %2,%3,%1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:"
+ : "=&r" (retval), "+m" (*v)
+ : "d" (expected_oldval) , "d" (new_val)
+ : "memory", "cc");
+ return retval;
+}
+
+/*
+ Spin till *v = expected_oldval then swap with newval.
+ */
+static __inline__ void
+atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v)
+{
+ __asm__ __volatile__(
+ "0: lr 1,%1\n"
+ " cs 1,%2,%0\n"
+ " jl 0b\n"
+ : "+m" (*v)
+ : "d" (expected_oldval) , "d" (new_val)
+ : "memory", "cc", "1");
+}
+
+#endif /* __ARCH_S390_ATOMIC __ */
+
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
new file mode 100644
index 000000000..3043d3b60
--- /dev/null
+++ b/include/asm-s390/bitops.h
@@ -0,0 +1,888 @@
+#ifndef _S390_BITOPS_H
+#define _S390_BITOPS_H
+
+/*
+ * include/asm-s390/bitops.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/bitops.h"
+ * Copyright (C) 1992, Linus Torvalds
+ *
+ */
+#include <linux/config.h>
+
+/*
+ * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
+ * bit 32 is the LSB of *(addr+4). That combined with the
+ * big endian byte order on S390 give the following bit
+ * order in memory:
+ * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
+ * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ * after that follows the next long with bit numbers
+ * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
+ * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
+ * The reason for this bit ordering is the fact that
+ * in the architecture independent code bits operations
+ * of the form "flags |= (1 << bitnr)" are used INTERMIXED
+ * with operation of the form "set_bit(bitnr, flags)".
+ */
+
+/* set ALIGN_CS to 1 if the SMP safe bit operations should
+ * align the address to 4 byte boundary. It seems to work
+ * without the alignment.
+ */
+#define ALIGN_CS 0
+
+/* bitmap tables from arch/S390/kernel/bitmap.S */
+extern const char _oi_bitmap[];
+extern const char _ni_bitmap[];
+extern const char _zb_findmap[];
+
+/*
+ * Function prototypes to keep gcc -Wall happy
+ */
+extern void __set_bit(int nr, volatile void * addr);
+extern void __constant_set_bit(int nr, volatile void * addr);
+extern int __test_bit(int nr, volatile void * addr);
+extern int __constant_test_bit(int nr, volatile void * addr);
+extern void __clear_bit(int nr, volatile void * addr);
+extern void __constant_clear_bit(int nr, volatile void * addr);
+extern void __change_bit(int nr, volatile void * addr);
+extern void __constant_change_bit(int nr, volatile void * addr);
+extern int test_and_set_bit(int nr, volatile void * addr);
+extern int test_and_clear_bit(int nr, volatile void * addr);
+extern int test_and_change_bit(int nr, volatile void * addr);
+extern int test_and_set_bit_simple(int nr, volatile void * addr);
+extern int test_and_clear_bit_simple(int nr, volatile void * addr);
+extern int test_and_change_bit_simple(int nr, volatile void * addr);
+extern int find_first_zero_bit(void * addr, unsigned size);
+extern int find_next_zero_bit (void * addr, int size, int offset);
+extern unsigned long ffz(unsigned long word);
+
+#ifdef CONFIG_SMP
+/*
+ * SMP save set_bit routine based on compare and swap (CS)
+ */
+extern __inline__ void set_bit_cs(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+#if ALIGN_CS == 1
+ " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
+ " nr 1,%1\n" /* isolate last 2 bits of address */
+ " xr %1,1\n" /* make addr % 4 == 0 */
+ " sll 1,3\n"
+ " ar %0,1\n" /* add alignement to bitnr */
+#endif
+ " lhi 1,31\n"
+ " nr 1,%0\n" /* make shift value */
+ " xr %0,1\n"
+ " srl %0,3\n"
+ " la %1,0(%0,%1)\n" /* calc. address for CS */
+ " lhi 2,1\n"
+ " sll 2,0(1)\n" /* make OR mask */
+ " l %0,0(%1)\n"
+ "0: lr 1,%0\n" /* CS loop starts here */
+ " or 1,2\n" /* set bit */
+ " cs %0,1,0(%1)\n"
+ " jl 0b"
+ : "+a" (nr), "+a" (addr) :
+ : "cc", "memory", "1", "2" );
+}
+
+/*
+ * SMP save clear_bit routine based on compare and swap (CS)
+ */
+extern __inline__ void clear_bit_cs(int nr, volatile void * addr)
+{
+ static const int mask = -1;
+ __asm__ __volatile__(
+#if ALIGN_CS == 1
+ " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
+ " nr 1,%1\n" /* isolate last 2 bits of address */
+ " xr %1,1\n" /* make addr % 4 == 0 */
+ " sll 1,3\n"
+ " ar %0,1\n" /* add alignement to bitnr */
+#endif
+ " lhi 1,31\n"
+ " nr 1,%0\n" /* make shift value */
+ " xr %0,1\n"
+ " srl %0,3\n"
+ " la %1,0(%0,%1)\n" /* calc. address for CS */
+ " lhi 2,1\n"
+ " sll 2,0(1)\n"
+ " x 2,%2\n" /* make AND mask */
+ " l %0,0(%1)\n"
+ "0: lr 1,%0\n" /* CS loop starts here */
+ " nr 1,2\n" /* clear bit */
+ " cs %0,1,0(%1)\n"
+ " jl 0b"
+ : "+a" (nr), "+a" (addr) : "m" (mask)
+ : "cc", "memory", "1", "2" );
+}
+
+/*
+ * SMP save change_bit routine based on compare and swap (CS)
+ */
+extern __inline__ void change_bit_cs(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+#if ALIGN_CS == 1
+ " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
+ " nr 1,%1\n" /* isolate last 2 bits of address */
+ " xr %1,1\n" /* make addr % 4 == 0 */
+ " sll 1,3\n"
+ " ar %0,1\n" /* add alignement to bitnr */
+#endif
+ " lhi 1,31\n"
+ " nr 1,%0\n" /* make shift value */
+ " xr %0,1\n"
+ " srl %0,3\n"
+ " la %1,0(%0,%1)\n" /* calc. address for CS */
+ " lhi 2,1\n"
+ " sll 2,0(1)\n" /* make XR mask */
+ " l %0,0(%1)\n"
+ "0: lr 1,%0\n" /* CS loop starts here */
+ " xr 1,2\n" /* change bit */
+ " cs %0,1,0(%1)\n"
+ " jl 0b"
+ : "+a" (nr), "+a" (addr) :
+ : "cc", "memory", "1", "2" );
+}
+
+/*
+ * SMP save test_and_set_bit routine based on compare and swap (CS)
+ */
+extern __inline__ int test_and_set_bit_cs(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+#if ALIGN_CS == 1
+ " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
+ " nr 1,%1\n" /* isolate last 2 bits of address */
+ " xr %1,1\n" /* make addr % 4 == 0 */
+ " sll 1,3\n"
+ " ar %0,1\n" /* add alignement to bitnr */
+#endif
+ " lhi 1,31\n"
+ " nr 1,%0\n" /* make shift value */
+ " xr %0,1\n"
+ " srl %0,3\n"
+ " la %1,0(%0,%1)\n" /* calc. address for CS */
+ " lhi 2,1\n"
+ " sll 2,0(1)\n" /* make OR mask */
+ " l %0,0(%1)\n"
+ "0: lr 1,%0\n" /* CS loop starts here */
+ " or 1,2\n" /* set bit */
+ " cs %0,1,0(%1)\n"
+ " jl 0b\n"
+ " nr %0,2\n" /* isolate old bit */
+ : "+a" (nr), "+a" (addr) :
+ : "cc", "memory", "1", "2" );
+ return nr;
+}
+
+/*
+ * SMP save test_and_clear_bit routine based on compare and swap (CS)
+ */
+extern __inline__ int test_and_clear_bit_cs(int nr, volatile void * addr)
+{
+ static const int mask = -1;
+ __asm__ __volatile__(
+#if ALIGN_CS == 1
+ " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
+ " nr 1,%1\n" /* isolate last 2 bits of address */
+ " xr %1,1\n" /* make addr % 4 == 0 */
+ " sll 1,3\n"
+ " ar %0,1\n" /* add alignement to bitnr */
+#endif
+ " lhi 1,31\n"
+ " nr 1,%0\n" /* make shift value */
+ " xr %0,1\n"
+ " srl %0,3\n"
+ " la %1,0(%0,%1)\n" /* calc. address for CS */
+ " lhi 2,1\n"
+ " sll 2,0(1)\n"
+ " x 2,%2\n" /* make AND mask */
+ " l %0,0(%1)\n"
+ "0: lr 1,%0\n" /* CS loop starts here */
+ " nr 1,2\n" /* clear bit */
+ " cs %0,1,0(%1)\n"
+ " jl 0b\n"
+ " x 2,%2\n"
+ " nr %0,2\n" /* isolate old bit */
+ : "+a" (nr), "+a" (addr) : "m" (mask)
+ : "cc", "memory", "1", "2" );
+ return nr;
+}
+
+/*
+ * SMP save test_and_change_bit routine based on compare and swap (CS)
+ */
+extern __inline__ int test_and_change_bit_cs(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+#if ALIGN_CS == 1
+ " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
+ " nr 1,%1\n" /* isolate last 2 bits of address */
+ " xr %1,1\n" /* make addr % 4 == 0 */
+ " sll 1,3\n"
+ " ar %0,1\n" /* add alignement to bitnr */
+#endif
+ " lhi 1,31\n"
+ " nr 1,%0\n" /* make shift value */
+ " xr %0,1\n"
+ " srl %0,3\n"
+ " la %1,0(%0,%1)\n" /* calc. address for CS */
+ " lhi 2,1\n"
+ " sll 2,0(1)\n" /* make OR mask */
+ " l %0,0(%1)\n"
+ "0: lr 1,%0\n" /* CS loop starts here */
+ " xr 1,2\n" /* change bit */
+ " cs %0,1,0(%1)\n"
+ " jl 0b\n"
+ " nr %0,2\n" /* isolate old bit */
+ : "+a" (nr), "+a" (addr) :
+ : "cc", "memory", "1", "2" );
+ return nr;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * fast, non-SMP set_bit routine
+ */
+extern __inline__ void __set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ " lhi 2,24\n"
+ " lhi 1,7\n"
+ " xr 2,%0\n"
+ " nr 1,%0\n"
+ " srl 2,3\n"
+ " la 2,0(2,%1)\n"
+ " la 1,0(1,%2)\n"
+ " oc 0(1,2),0(1)"
+ : : "r" (nr), "a" (addr), "a" (&_oi_bitmap)
+ : "cc", "memory", "1", "2" );
+}
+
+extern __inline__ void
+__constant_set_bit(const int nr, volatile void * addr)
+{
+ switch (nr&7) {
+ case 0:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "oi 0(1),0x01"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory");
+ break;
+ case 1:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "oi 0(1),0x02"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 2:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "oi 0(1),0x04"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 3:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "oi 0(1),0x08"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 4:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "oi 0(1),0x10"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 5:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "oi 0(1),0x20"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 6:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "oi 0(1),0x40"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 7:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "oi 0(1),0x80"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ }
+}
+
+#define set_bit_simple(nr,addr) \
+(__builtin_constant_p((nr)) ? \
+ __constant_set_bit((nr),(addr)) : \
+ __set_bit((nr),(addr)) )
+
+/*
+ * fast, non-SMP clear_bit routine
+ */
+extern __inline__ void
+__clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ " lhi 2,24\n"
+ " lhi 1,7\n"
+ " xr 2,%0\n"
+ " nr 1,%0\n"
+ " srl 2,3\n"
+ " la 2,0(2,%1)\n"
+ " la 1,0(1,%2)\n"
+ " nc 0(1,2),0(1)"
+ : : "r" (nr), "a" (addr), "a" (&_ni_bitmap)
+ : "cc", "memory", "1", "2" );
+}
+
+extern __inline__ void
+__constant_clear_bit(const int nr, volatile void * addr)
+{
+ switch (nr&7) {
+ case 0:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "ni 0(1),0xFE"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 1:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "ni 0(1),0xFD"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 2:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "ni 0(1),0xFB"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 3:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "ni 0(1),0xF7"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 4:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "ni 0(1),0xEF"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "cc", "memory" );
+ break;
+ case 5:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "ni 0(1),0xDF"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 6:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "ni 0(1),0xBF"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 7:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "ni 0(1),0x7F"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ }
+}
+
+#define clear_bit_simple(nr,addr) \
+(__builtin_constant_p((nr)) ? \
+ __constant_clear_bit((nr),(addr)) : \
+ __clear_bit((nr),(addr)) )
+
+/*
+ * fast, non-SMP change_bit routine
+ */
+extern __inline__ void __change_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ " lhi 2,24\n"
+ " lhi 1,7\n"
+ " xr 2,%0\n"
+ " nr 1,%0\n"
+ " srl 2,3\n"
+ " la 2,0(2,%1)\n"
+ " la 1,0(1,%2)\n"
+ " xc 0(1,2),0(1)"
+ : : "r" (nr), "a" (addr), "a" (&_oi_bitmap)
+ : "cc", "memory", "1", "2" );
+}
+
+extern __inline__ void
+__constant_change_bit(const int nr, volatile void * addr)
+{
+ switch (nr&7) {
+ case 0:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "xi 0(1),0x01"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "cc", "memory" );
+ break;
+ case 1:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "xi 0(1),0x02"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "cc", "memory" );
+ break;
+ case 2:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "xi 0(1),0x04"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "cc", "memory" );
+ break;
+ case 3:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "xi 0(1),0x08"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "cc", "memory" );
+ break;
+ case 4:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "xi 0(1),0x10"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "cc", "memory" );
+ break;
+ case 5:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "xi 0(1),0x20"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 6:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "xi 0(1),0x40"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ case 7:
+ __asm__ __volatile__ ("la 1,%0\n\t"
+ "xi 0(1),0x80"
+ : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
+ : : "1", "cc", "memory" );
+ break;
+ }
+}
+
+#define change_bit_simple(nr,addr) \
+(__builtin_constant_p((nr)) ? \
+ __constant_change_bit((nr),(addr)) : \
+ __change_bit((nr),(addr)) )
+
+/*
+ * fast, non-SMP test_and_set_bit routine
+ */
+extern __inline__ int test_and_set_bit_simple(int nr, volatile void * addr)
+{
+ static const int mask = 1;
+ int oldbit;
+ __asm__ __volatile__(
+ " lhi 1,24\n"
+ " lhi 2,7\n"
+ " xr 1,%1\n"
+ " nr 2,1\n"
+ " srl 1,3(0)\n"
+ " la 1,0(1,%2)\n"
+ " ic %0,0(0,1)\n"
+ " srl %0,0(2)\n"
+ " n %0,%4\n"
+ " la 2,0(2,%3)\n"
+ " oc 0(1,1),0(2)"
+ : "=d&" (oldbit) : "r" (nr), "a" (addr),
+ "a" (&_oi_bitmap), "m" (mask)
+ : "cc", "memory", "1", "2" );
+ return oldbit;
+}
+
+/*
+ * fast, non-SMP test_and_clear_bit routine
+ */
+extern __inline__ int test_and_clear_bit_simple(int nr, volatile void * addr)
+{
+ static const int mask = 1;
+ int oldbit;
+
+ __asm__ __volatile__(
+ " lhi 1,24\n"
+ " lhi 2,7\n"
+ " xr 1,%1\n"
+ " nr 2,1\n"
+ " srl 1,3(0)\n"
+ " la 1,0(1,%2)\n"
+ " ic %0,0(0,1)\n"
+ " srl %0,0(2)\n"
+ " n %0,%4\n"
+ " la 2,0(2,%3)\n"
+ " nc 0(1,1),0(2)"
+ : "=d&" (oldbit) : "r" (nr), "a" (addr),
+ "a" (&_ni_bitmap), "m" (mask)
+ : "cc", "memory", "1", "2" );
+ return oldbit;
+}
+
+/*
+ * fast, non-SMP test_and_change_bit routine
+ */
+extern __inline__ int test_and_change_bit_simple(int nr, volatile void * addr)
+{
+ static const int mask = 1;
+ int oldbit;
+
+ __asm__ __volatile__(
+ " lhi 1,24\n"
+ " lhi 2,7\n"
+ " xr 1,%1\n"
+ " nr 2,1\n"
+ " srl 1,3(0)\n"
+ " la 1,0(1,%2)\n"
+ " ic %0,0(0,1)\n"
+ " srl %0,0(2)\n"
+ " n %0,%4\n"
+ " la 2,0(2,%3)\n"
+ " xc 0(1,1),0(2)"
+ : "=d&" (oldbit) : "r" (nr), "a" (addr),
+ "a" (&_oi_bitmap), "m" (mask)
+ : "cc", "memory", "1", "2" );
+ return oldbit;
+}
+
+#ifdef CONFIG_SMP
+#define set_bit set_bit_cs
+#define clear_bit clear_bit_cs
+#define change_bit change_bit_cs
+#define test_and_set_bit test_and_set_bit_cs
+#define test_and_clear_bit test_and_clear_bit_cs
+#define test_and_change_bit test_and_change_bit_cs
+#else
+#define set_bit set_bit_simple
+#define clear_bit clear_bit_simple
+#define change_bit change_bit_simple
+#define test_and_set_bit test_and_set_bit_simple
+#define test_and_clear_bit test_and_clear_bit_simple
+#define test_and_change_bit test_and_change_bit_simple
+#endif
+
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+
+extern __inline__ int __test_bit(int nr, volatile void * addr)
+{
+ static const int mask = 1;
+ int oldbit;
+
+ __asm__ __volatile__(
+ " lhi 2,24\n"
+ " lhi 1,7\n"
+ " xr 2,%1\n"
+ " nr 1,%1\n"
+ " srl 2,3\n"
+ " ic %0,0(2,%2)\n"
+ " srl %0,0(1)\n"
+ " n %0,%3"
+ : "=d&" (oldbit) : "r" (nr), "a" (addr),
+ "m" (mask)
+ : "cc", "1", "2" );
+ return oldbit;
+}
+
+extern __inline__ int __constant_test_bit(int nr, volatile void * addr) {
+ return (((volatile char *) addr)[(nr>>3)^3] & (1<<(nr&7))) != 0;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p((nr)) ? \
+ __constant_test_bit((nr),(addr)) : \
+ __test_bit((nr),(addr)) )
+
+/*
+ * Find-bit routines..
+ */
+extern __inline__ int find_first_zero_bit(void * addr, unsigned size)
+{
+ static const int mask = 0xffL;
+ int res;
+
+ if (!size)
+ return 0;
+ __asm__(" lhi 0,-1\n"
+ " lr 1,%1\n"
+ " ahi 1,31\n"
+ " srl 1,5\n"
+ " sr 2,2\n"
+ "0: c 0,0(2,%2)\n"
+ " jne 1f\n"
+ " ahi 2,4\n"
+ " brct 1,0b\n"
+ " lr 2,%1\n"
+ " j 4f\n"
+ "1: l 1,0(2,%2)\n"
+ " sll 2,3(0)\n"
+ " tml 1,0xFFFF\n"
+ " jno 2f\n"
+ " ahi 2,16\n"
+ " srl 1,16\n"
+ "2: tml 1,0x00FF\n"
+ " jno 3f\n"
+ " ahi 2,8\n"
+ " srl 1,8\n"
+ "3: n 1,%3\n"
+ " ic 1,0(1,%4)\n"
+ " n 1,%3\n"
+ " ar 2,1\n"
+ "4: lr %0,2"
+ : "=d" (res) : "a" (size), "a" (addr),
+ "m" (mask), "a" (&_zb_findmap)
+ : "cc", "0", "1", "2" );
+ return (res < size) ? res : size;
+}
+
+extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
+{
+ static const int mask = 0xffL;
+ unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
+ unsigned long bitvec;
+ int set, bit = offset & 31, res;
+
+ if (bit) {
+ /*
+ * Look for zero in first word
+ */
+ bitvec = (*p) >> bit;
+ __asm__(" lr 1,%1\n"
+ " sr %0,%0\n"
+ " tml 1,0xFFFF\n"
+ " jno 0f\n"
+ " ahi %0,16\n"
+ " srl 1,16\n"
+ "0: tml 1,0x00FF\n"
+ " jno 1f\n"
+ " ahi %0,8\n"
+ " srl 1,8\n"
+ "1: n 1,%2\n"
+ " ic 1,0(1,%3)\n"
+ " n 1,%2\n"
+ " ar %0,1"
+ : "=d&" (set) : "d" (bitvec),
+ "m" (mask), "a" (&_zb_findmap)
+ : "cc", "1" );
+ if (set < (32 - bit))
+ return set + offset;
+ offset += 32 - bit;
+ p++;
+ }
+ /*
+ * No zero yet, search remaining full words for a zero
+ */
+ res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
+ return (offset + res);
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+extern __inline__ unsigned long ffz(unsigned long word)
+{
+ static const int mask = 0xffL;
+ int result;
+
+ __asm__(" lr 1,%1\n"
+ " sr %0,%0\n"
+ " tml 1,0xFFFF\n"
+ " jno 0f\n"
+ " ahi %0,16\n"
+ " srl 1,16\n"
+ "0: tml 1,0x00FF\n"
+ " jno 1f\n"
+ " ahi %0,8\n"
+ " srl 1,8\n"
+ "1: n 1,%2\n"
+ " ic 1,0(1,%3)\n"
+ " n 1,%2\n"
+ " ar %0,1"
+ : "=d&" (result) : "d" (word),
+ "m" (mask), "a" (&_zb_findmap)
+ : "cc", "1" );
+
+ return result;
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+extern int __inline__ ffs (int x)
+{
+ int r;
+
+ if (x == 0)
+ return 0;
+ __asm__(" lr %%r1,%1\n"
+ " sr %0,%0\n"
+ " tmh %%r1,0xFFFF\n"
+ " jz 0f\n"
+ " ahi %0,16\n"
+ " srl %%r1,16\n"
+ "0: tml %%r1,0xFF00\n"
+ " jz 1f\n"
+ " ahi %0,8\n"
+ " srl %%r1,8\n"
+ "1: tml %%r1,0x00F0\n"
+ " jz 2f\n"
+ " ahi %0,4\n"
+ " srl %%r1,4\n"
+ "2: tml %%r1,0x000C\n"
+ " jz 3f\n"
+ " ahi %0,2\n"
+ " srl %%r1,2\n"
+ "3: tml %%r1,0x0002\n"
+ " jz 4f\n"
+ " ahi %0,1\n"
+ "4:"
+ : "=&d" (r) : "d" (x) : "cc", "1" );
+ return r+1;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+
+#ifdef __KERNEL__
+
+/*
+ * ATTENTION: intel byte ordering convention for ext2 and minix !!
+ * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
+ * bit 32 is the LSB of (addr+4).
+ * That combined with the little endian byte order of Intel gives the
+ * following bit order in memory:
+ * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
+ * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
+ */
+
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr)^24, addr)
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr)^24, addr)
+#define ext2_test_bit(nr, addr) test_bit((nr)^24, addr)
+extern __inline__ int ext2_find_first_zero_bit(void *vaddr, unsigned size)
+{
+ static const int mask = 0xffL;
+ int res;
+
+ if (!size)
+ return 0;
+ __asm__(" lhi 0,-1\n"
+ " lr 1,%1\n"
+ " ahi 1,31\n"
+ " srl 1,5\n"
+ " sr 2,2\n"
+ "0: c 0,0(2,%2)\n"
+ " jne 1f\n"
+ " ahi 2,4\n"
+ " brct 1,0b\n"
+ " lr 2,%1\n"
+ " j 4f\n"
+ "1: l 1,0(2,%2)\n"
+ " sll 2,3(0)\n"
+ " ahi 2,24\n"
+ " tmh 1,0xFFFF\n"
+ " jo 2f\n"
+ " ahi 2,-16\n"
+ " srl 1,16\n"
+ "2: tml 1,0xFF00\n"
+ " jo 3f\n"
+ " ahi 2,-8\n"
+ " srl 1,8\n"
+ "3: n 1,%3\n"
+ " ic 1,0(1,%4)\n"
+ " n 1,%3\n"
+ " ar 2,1\n"
+ "4: lr %0,2"
+ : "=d" (res) : "a" (size), "a" (vaddr),
+ "m" (mask), "a" (&_zb_findmap)
+ : "cc", "0", "1", "2" );
+ return (res < size) ? res : size;
+}
+
+extern __inline__ int
+ext2_find_next_zero_bit(void *vaddr, unsigned size, unsigned offset)
+{
+ static const int mask = 0xffL;
+ static unsigned long orword[32] = {
+ 0x00000000, 0x01000000, 0x03000000, 0x07000000,
+ 0x0f000000, 0x1f000000, 0x3f000000, 0x7f000000,
+ 0xff000000, 0xff010000, 0xff030000, 0xff070000,
+ 0xff0f0000, 0xff1f0000, 0xff3f0000, 0xff7f0000,
+ 0xffff0000, 0xffff0100, 0xffff0300, 0xffff0700,
+ 0xffff0f00, 0xffff1f00, 0xffff3f00, 0xffff7f00,
+ 0xffffff00, 0xffffff01, 0xffffff03, 0xffffff07,
+ 0xffffff0f, 0xffffff1f, 0xffffff3f, 0xffffff7f
+ };
+ unsigned long *addr = vaddr;
+ unsigned long *p = addr + (offset >> 5);
+ unsigned long word;
+ int bit = offset & 31UL, res;
+
+ if (offset >= size)
+ return size;
+
+ if (bit) {
+ word = *p | orword[bit];
+ /* Look for zero in first longword */
+ __asm__(" lhi %0,24\n"
+ " tmh %1,0xFFFF\n"
+ " jo 0f\n"
+ " ahi %0,-16\n"
+ " srl %1,16\n"
+ "0: tml %1,0xFF00\n"
+ " jo 1f\n"
+ " ahi %0,-8\n"
+ " srl %1,8\n"
+ "1: n %1,%2\n"
+ " ic %1,0(%1,%3)\n"
+ " alr %0,%1"
+ : "=&d" (res), "+&d" (word)
+ : "m" (mask), "a" (&_zb_findmap)
+ : "cc" );
+ if (res < 32)
+ return (p - addr)*32 + res;
+ p++;
+ }
+ /* No zero yet, search remaining full bytes for a zero */
+ res = ext2_find_first_zero_bit (p, size - 32 * (p - addr));
+ return (p - addr) * 32 + res;
+}
+
+/* Bitmap functions for the minix filesystem. */
+/* FIXME !!! */
+#define minix_set_bit(nr,addr) test_and_set_bit(nr,addr)
+#define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+#endif /* __KERNEL__ */
+
+#endif /* _S390_BITOPS_H */
diff --git a/include/asm-s390/bugs.h b/include/asm-s390/bugs.h
new file mode 100644
index 000000000..2c3659621
--- /dev/null
+++ b/include/asm-s390/bugs.h
@@ -0,0 +1,22 @@
+/*
+ * include/asm-s390/bugs.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/bugs.h"
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ */
+
+static void __init check_bugs(void)
+{
+ /* s390 has no bugs ... */
+}
diff --git a/include/asm-s390/byteorder.h b/include/asm-s390/byteorder.h
new file mode 100644
index 000000000..b2fb3b955
--- /dev/null
+++ b/include/asm-s390/byteorder.h
@@ -0,0 +1,103 @@
+#ifndef _S390_BYTEORDER_H
+#define _S390_BYTEORDER_H
+
+/*
+ * include/asm-s390/byteorder.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <asm/types.h>
+
+#ifdef __GNUC__
+
+static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
+{
+ __u32 temp;
+
+ __asm__ __volatile__ (
+ " st %0,0(%1)\n"
+ " icm %0,8,3(%1)\n"
+ " icm %0,4,2(%1)\n"
+ " icm %0,2,1(%1)\n"
+ " ic %0,0(%1)"
+ : "+&d" (x) : "a" (&temp) : "memory" );
+ return x;
+}
+
+static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x)
+{
+ __u32 result;
+
+ __asm__ __volatile__ (
+ " icm %0,8,3(%1)\n"
+ " icm %0,4,2(%1)\n"
+ " icm %0,2,1(%1)\n"
+ " ic %0,0(%1)"
+ : "=&d" (result) : "a" (x) );
+ return result;
+}
+
+static __inline__ void ___arch__swab32s(__u32 *x)
+{
+ __asm__ __volatile__ (
+ " icm 0,8,3(%0)\n"
+ " icm 0,4,2(%0)\n"
+ " icm 0,2,1(%0)\n"
+ " ic 0,0(%0)\n"
+ " st 0,0(%0)"
+ : : "a" (x) : "0", "memory");
+}
+
+static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
+{
+ __u16 temp;
+
+ __asm__ __volatile__ (
+ " sth %0,0(%1)\n"
+ " icm %0,2,1(%1)\n"
+ " ic %0,0(%1)\n"
+ : "+&d" (x) : "a" (&temp) : "memory");
+ return x;
+}
+
+static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x)
+{
+ __u16 result;
+
+ __asm__ __volatile__ (
+ " sr %0,%0\n"
+ " icm %0,2,1(%1)\n"
+ " ic %0,0(%1)\n"
+ : "=&d" (result) : "a" (x) );
+ return result;
+}
+
+static __inline__ void ___arch__swab16s(__u16 *x)
+{
+ __asm__ __volatile__(
+ " icm 0,2,1(%0)\n"
+ " ic 0,0(%0)\n"
+ " sth 0,0(%0)"
+ : : "a" (x) : "0", "memory");
+}
+
+#define __arch__swab32(x) ___arch__swab32(x)
+#define __arch__swab16(x) ___arch__swab16(x)
+#define __arch__swab32p(x) ___arch__swab32p(x)
+#define __arch__swab16p(x) ___arch__swab16p(x)
+#define __arch__swab32s(x) ___arch__swab32s(x)
+#define __arch__swab16s(x) ___arch__swab16s(x)
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+# define __BYTEORDER_HAS_U64__
+# define __SWAB_64_THRU_32__
+#endif
+
+#endif /* __GNUC__ */
+
+#include <linux/byteorder/big_endian.h>
+
+#endif /* _S390_BYTEORDER_H */
diff --git a/include/asm-s390/cache.h b/include/asm-s390/cache.h
new file mode 100644
index 000000000..ad82cf8e0
--- /dev/null
+++ b/include/asm-s390/cache.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-s390/cache.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *
+ * Derived from "include/asm-i386/cache.h"
+ * Copyright (C) 1992, Linus Torvalds
+ */
+
+#ifndef __ARCH_S390_CACHE_H
+#define __ARCH_S390_CACHE_H
+
+#define L1_CACHE_BYTES 16
+
+#endif
diff --git a/include/asm-s390/chandev.h b/include/asm-s390/chandev.h
new file mode 100644
index 000000000..c9e7d2d54
--- /dev/null
+++ b/include/asm-s390/chandev.h
@@ -0,0 +1,87 @@
+/*
+ * include/asm-s390/chandev.h
+ *
+ * S390 version
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ */
+
+#include <asm/types.h>
+
+typedef enum
+{
+ none=0,
+ ctc=1,
+ escon=2,
+ lcs=4,
+ osad=8,
+ claw=16,
+} chandev_type;
+
+typedef struct chandev_model_info chandev_model_info;
+
+struct chandev_model_info
+{
+ struct chandev_model_info *next;
+ chandev_type chan_type;
+ u16 cu_type;
+ u8 cu_model;
+ u8 max_port_no;
+};
+
+typedef struct chandev chandev;
+struct chandev
+{
+ struct chandev *next;
+ chandev_model_info *model_info;
+ u16 devno;
+ int irq;
+};
+
+typedef struct chandev_noauto_range chandev_noauto_range;
+struct chandev_noauto_range
+{
+ struct chandev_noauto_range *next;
+ u16 lo_devno;
+ u16 hi_devno;
+};
+
+typedef struct chandev_force chandev_force;
+struct chandev_force
+{
+ struct chandev_force *next;
+ chandev_type chan_type;
+ s32 devif_num; /* -1 don't care e.g. tr0 implies 0 */
+ u16 read_devno;
+ u16 write_devno;
+ s16 port_no; /* where available e.g. lcs,-1 don't care */
+ u8 do_ip_checksumming;
+ u8 use_hw_stats; /* where available e.g. lcs */
+};
+
+
+
+typedef struct
+{
+ s32 devif_num; /* -1 don't care e.g. tr0 implies 0 */
+ int read_irq;
+ int write_irq;
+ s16 forced_port_no; /* -1 don't care */
+ u8 hint_port_no;
+ u8 max_port_no;
+ u8 do_ip_checksumming;
+ u8 use_hw_stats; /* where available e.g. lcs */
+} chandev_probeinfo;
+
+
+typedef int (*chandev_probefunc)(chandev_probeinfo *probeinfo);
+
+
+typedef struct chandev_probelist chandev_probelist;
+struct chandev_probelist
+{
+ struct chandev_probelist *next;
+ chandev_probefunc probefunc;
+ chandev_type chan_type;
+};
diff --git a/include/asm-s390/checksum.h b/include/asm-s390/checksum.h
new file mode 100644
index 000000000..487ccc99b
--- /dev/null
+++ b/include/asm-s390/checksum.h
@@ -0,0 +1,188 @@
+#ifndef _S390_CHECKSUM_H
+#define _S390_CHECKSUM_H
+
+/*
+ * include/asm-s390/checksum.h
+ * S390 fast network checksum routines
+ * see also arch/S390/lib/checksum.c
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Ulrich Hild (first version)
+ * Martin Schwidefsky (heavily optimized CKSM version)
+ * D.J. Barrow (third attempt)
+ */
+
+#include <asm/uaccess.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+unsigned int
+csum_partial(const unsigned char * buff, int len, unsigned int sum);
+
+/*
+ * csum_partial as an inline function
+ */
+extern inline unsigned int
+csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
+{
+ __asm__ __volatile__ (
+ " lr 2,%1\n" /* address in gpr 2 */
+ " lr 3,%2\n" /* length in gpr 3 */
+ "0: cksm %0,2\n" /* do checksum on longs */
+ " jo 0b\n"
+ : "+&d" (sum)
+ : "d" (buff), "d" (len)
+ : "cc", "2", "3" );
+ return sum;
+}
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+extern inline unsigned int
+csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
+{
+ memcpy(dst,src,len);
+ return csum_partial_inline(dst, len, sum);
+}
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+extern inline unsigned int
+csum_partial_copy_from_user(const char *src, char *dst,
+ int len, unsigned int sum, int *errp)
+{
+ if (copy_from_user(dst, src, len)) {
+ *errp = -EFAULT;
+ memset(dst, 0, len);
+ return sum;
+ }
+ return csum_partial(dst, len, sum);
+}
+
+extern inline unsigned int
+csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
+{
+ memcpy(dst,src,len);
+ return csum_partial_inline(dst, len, sum);
+}
+
+/*
+ * Fold a partial checksum without adding pseudo headers
+ */
+#if 1
+unsigned short csum_fold(unsigned int sum);
+#else
+extern inline unsigned short
+csum_fold(unsigned int sum)
+{
+ __asm__ __volatile__ (
+ " sr 3,3\n" /* %0 = H*65536 + L */
+ " lr 2,%0\n" /* %0 = H L, R2/R3 = H L / 0 0 */
+ " srdl 2,16\n" /* %0 = H L, R2/R3 = 0 H / L 0 */
+ " alr 2,3\n" /* %0 = H L, R2/R3 = L H / L 0 */
+ " alr %0,2\n" /* %0 = H+L+C L+H */
+ " srl %0,16\n" /* %0 = H+L+C */
+ : "+&d" (sum) : : "cc", "2", "3");
+ return ((unsigned short) ~sum);
+}
+#endif
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ */
+extern inline unsigned short
+ip_fast_csum(unsigned char *iph, unsigned int ihl)
+{
+ unsigned long sum;
+
+ __asm__ __volatile__ (
+ " sr %0,%0\n" /* set sum to zero */
+ " lr 2,%1\n" /* address in gpr 2 */
+ " lr 3,%2\n" /* length in gpr 3 */
+ "0: cksm %0,2\n" /* do checksum on longs */
+ " jo 0b\n"
+ : "=&d" (sum)
+ : "d" (iph), "d" (ihl*4)
+ : "cc", "2", "3" );
+ return csum_fold(sum);
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 32-bit checksum
+ */
+extern inline unsigned int
+csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
+ unsigned short len, unsigned short proto,
+ unsigned int sum)
+{
+ __asm__ __volatile__ (
+ " sll %3,16\n"
+ " or %3,%4\n" /* newproto=proto<<16 in hiword, len in lowword */
+ " alr %1,%2\n" /* saddr+=daddr */
+ " brc 12,0f\n"
+ " ahi %1,1\n" /* add carry */
+ "0: alr %1,%3\n" /* add saddr+=newproto */
+ " brc 12,1f\n"
+ " ahi %1,1\n" /* add carry again */
+ "1: alr %0,%1\n" /* sum+=saddr */
+ " brc 12,2f\n"
+ " ahi %0,1\n" /* add carry again */
+ "2:"
+ : "+&d" (sum)
+ : "d" (saddr), "d" (daddr), "d" (proto), "d" (len)
+ : "cc" );
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+
+extern inline unsigned short int
+csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
+ unsigned short len, unsigned short proto,
+ unsigned int sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+extern inline unsigned short
+ip_compute_csum(unsigned char * buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#endif /* _S390_CHECKSUM_H */
+
+
diff --git a/include/asm-s390/current.h b/include/asm-s390/current.h
new file mode 100644
index 000000000..42567eb94
--- /dev/null
+++ b/include/asm-s390/current.h
@@ -0,0 +1,31 @@
+/*
+ * include/asm-s390/current.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/current.h"
+ */
+
+#ifndef _S390_CURRENT_H
+#define _S390_CURRENT_H
+
+#ifdef __KERNEL__
+
+struct task_struct;
+
+static inline struct task_struct * get_current(void)
+{
+ struct task_struct *current;
+ __asm__("lhi %0,-8192\n\t"
+ "nr %0,15"
+ : "=r" (current) );
+ return current;
+ }
+
+#define current get_current()
+
+#endif
+
+#endif /* !(_S390_CURRENT_H) */
diff --git a/include/asm-s390/delay.h b/include/asm-s390/delay.h
new file mode 100644
index 000000000..87ac55391
--- /dev/null
+++ b/include/asm-s390/delay.h
@@ -0,0 +1,25 @@
+/*
+ * include/asm-s390/delay.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/delay.h"
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/i386/lib/delay.c
+ */
+
+#ifndef _S390_DELAY_H
+#define _S390_DELAY_H
+
+extern void __udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long usecs);
+extern void __delay(unsigned long loops);
+
+#define udelay(n) (__builtin_constant_p(n) ? \
+ __const_udelay((n) * 0x10c6ul) : \
+ __udelay(n))
+
+#endif /* defined(_S390_DELAY_H) */
diff --git a/include/asm-s390/div64.h b/include/asm-s390/div64.h
new file mode 100644
index 000000000..17824b1a2
--- /dev/null
+++ b/include/asm-s390/div64.h
@@ -0,0 +1,10 @@
+#ifndef __S390_DIV64
+#define __S390_DIV64
+
+#define do_div(n,base) ({ \
+int __res; \
+__res = ((unsigned long) n) % (unsigned) base; \
+n = ((unsigned long) n) / (unsigned) base; \
+__res; })
+
+#endif
diff --git a/include/asm-s390/dma.h b/include/asm-s390/dma.h
new file mode 100644
index 000000000..e7ae126e6
--- /dev/null
+++ b/include/asm-s390/dma.h
@@ -0,0 +1,17 @@
+/*
+ * include/asm-s390/delay.h
+ *
+ * S390 version
+ *
+ * This file exists so that an #include <dma.h> doesn't break anything.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+
+#define MAX_DMA_CHANNELS 0
+#define MAX_DMA_ADDRESS 0x80000000
+
+#endif /* _ASM_DMA_H */
diff --git a/include/asm-s390/ebcdic.h b/include/asm-s390/ebcdic.h
new file mode 100644
index 000000000..7d6aeb2a7
--- /dev/null
+++ b/include/asm-s390/ebcdic.h
@@ -0,0 +1,51 @@
+/*
+ * include/asm-s390/ebcdic.h
+ * EBCDIC -> ASCII, ASCII -> EBCDIC conversion routines.
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _EBCDIC_H
+#define _EBCDIC_H
+
+#ifndef _S390_TYPES_H
+#include <types.h>
+#endif
+
+extern __u8 _ascebc[]; /* ASCII -> EBCDIC conversion table */
+extern __u8 _ebcasc[]; /* EBCDIC -> ASCII conversion table */
+extern __u8 _ebc_tolower[]; /* EBCDIC -> lowercase */
+extern __u8 _ebc_toupper[]; /* EBCDIC -> uppercase */
+
+extern __inline__
+void codepage_convert(const __u8 *codepage, volatile __u8 * addr, int nr)
+{
+ static const __u16 tr_op[] = { 0xDC00, 0x1000,0x3000 };
+ __asm__ __volatile__(
+ " lr 1,%0\n"
+ " lr 2,%1\n"
+ " lr 3,%2\n"
+ " ahi 2,-256\n"
+ " jm 1f\n"
+ "0: tr 0(256,1),0(3)\n"
+ " ahi 1,256\n"
+ " ahi 2,-256\n"
+ " jp 0b\n"
+ "1: ahi 2,255\n"
+ " jm 2f\n"
+ " ex 2,%3\n"
+ "2:"
+ : /* no output */
+ : "a" (addr), "d" (nr), "a" (codepage), "m" (tr_op[0])
+ : "cc", "memory", "1", "2", "3" );
+}
+
+#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
+#define EBCASC(addr,nr) codepage_convert(_ebcasc, addr, nr)
+#define EBC_TOLOWER(addr,nr) codepage_convert(_ebc_tolower, addr, nr)
+#define EBC_TOUPPER(addr,nr) codepage_convert(_ebc_toupper, addr, nr)
+
+#endif
+
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h
new file mode 100644
index 000000000..60a08aaae
--- /dev/null
+++ b/include/asm-s390/elf.h
@@ -0,0 +1,80 @@
+/*
+ * include/asm-s390/elf.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/elf.h"
+ */
+
+#ifndef __ASMS390_ELF_H
+#define __ASMS390_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+
+
+typedef s390_fp_regs elf_fpregset_t;
+typedef s390_regs elf_gregset_t;
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x) == EM_S390)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2MSB
+#define ELF_ARCH EM_S390
+
+/* For SVR4/S390 the function pointer to be registered with `atexit` is
+ passed in R14. */
+#define ELF_PLAT_INIT(_r) \
+ _r->gprs[14] = 0
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE ((TASK_SIZE & 0x80000000) \
+ ? TASK_SIZE / 3 * 2 \
+ : 2 * TASK_SIZE / 3)
+
+/* Wow, the "main" arch needs arch dependent functions too.. :) */
+
+/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ now struct_user_regs, they are different) */
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+ memcpy(&pr_reg,regs,sizeof(elf_gregset_t)); \
+
+
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define ELF_PLATFORM (NULL)
+
+#ifdef __KERNEL__
+#define SET_PERSONALITY(ex, ibcs2) \
+ current->personality = (ibcs2 ? PER_SVR4 : PER_LINUX)
+#endif
+
+#endif
diff --git a/include/asm-s390/errno.h b/include/asm-s390/errno.h
new file mode 100644
index 000000000..37d3f24c0
--- /dev/null
+++ b/include/asm-s390/errno.h
@@ -0,0 +1,140 @@
+/*
+ * include/asm-s390/errno.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/errno.h"
+ */
+
+#ifndef _S390_ERRNO_H
+#define _S390_ERRNO_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Arg list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
+#endif
diff --git a/include/asm-s390/fcntl.h b/include/asm-s390/fcntl.h
new file mode 100644
index 000000000..844cb5d5b
--- /dev/null
+++ b/include/asm-s390/fcntl.h
@@ -0,0 +1,72 @@
+/*
+ * include/asm-s390/fcntl.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/fcntl.h"
+ */
+#ifndef _S390_FCNTL_H
+#define _S390_FCNTL_H
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+ located on an ext2 file system */
+#define O_ACCMODE 0003
+#define O_RDONLY 00
+#define O_WRONLY 01
+#define O_RDWR 02
+#define O_CREAT 0100 /* not fcntl */
+#define O_EXCL 0200 /* not fcntl */
+#define O_NOCTTY 0400 /* not fcntl */
+#define O_TRUNC 01000 /* not fcntl */
+#define O_APPEND 02000
+#define O_NONBLOCK 04000
+#define O_NDELAY O_NONBLOCK
+#define O_SYNC 010000
+#define FASYNC 020000 /* fcntl, for BSD compatibility */
+#define O_DIRECT 040000 /* direct disk access hint - currently ignored */
+#define O_LARGEFILE 0100000
+#define O_DIRECTORY 0200000 /* must be a directory */
+#define O_NOFOLLOW 0400000 /* don't follow links */
+
+#define F_DUPFD 0 /* dup */
+#define F_GETFD 1 /* get f_flags */
+#define F_SETFD 2 /* set f_flags */
+#define F_GETFL 3 /* more flags (cloexec) */
+#define F_SETFL 4
+#define F_GETLK 5
+#define F_SETLK 6
+#define F_SETLKW 7
+
+#define F_SETOWN 8 /* for sockets. */
+#define F_GETOWN 9 /* for sockets. */
+#define F_SETSIG 10 /* for sockets. */
+#define F_GETSIG 11 /* for sockets. */
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK 0
+#define F_WRLCK 1
+#define F_UNLCK 2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK 4 /* or 3 */
+#define F_SHLCK 8 /* or 4 */
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* or'd with one of the above to prevent
+ blocking */
+#define LOCK_UN 8 /* remove lock */
+
+struct flock {
+ short l_type;
+ short l_whence;
+ off_t l_start;
+ off_t l_len;
+ pid_t l_pid;
+};
+
+#endif
diff --git a/include/asm-s390/gdb-stub.h b/include/asm-s390/gdb-stub.h
new file mode 100644
index 000000000..fa68800f4
--- /dev/null
+++ b/include/asm-s390/gdb-stub.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-s390/gdb-stub.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#ifndef __S390_GDB_STUB__
+#define __S390_GDB_STUB__
+#include <linux/config.h>
+#if CONFIG_REMOTE_DEBUG
+#include <asm/s390-gdbregs.h>
+#include <asm/ptrace.h>
+extern int gdb_stub_initialised;
+extern void gdb_stub_handle_exception(gdb_pt_regs *regs,int sigval);
+#endif
+#endif
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
new file mode 100644
index 000000000..c21db1cd7
--- /dev/null
+++ b/include/asm-s390/hardirq.h
@@ -0,0 +1,78 @@
+/*
+ * include/asm-s390/hardirq.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ * Derived from "include/asm-i386/hardirq.h"
+ */
+
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/lowcore.h>
+#include <linux/sched.h>
+/*
+ * Are we in an interrupt context? Either doing bottom half
+ * or hardware interrupt processing?
+ */
+#define in_interrupt() ((atomic_read(&S390_lowcore.local_irq_count) + atomic_read(&S390_lowcore.local_bh_count)) != 0)
+
+#define in_irq() (atomic_read(&S390_lowcore.local_irq_count) != 0)
+
+#ifndef CONFIG_SMP
+
+#define hardirq_trylock(cpu) (atomic_read(&S390_lowcore.local_irq_count) == 0)
+#define hardirq_endlock(cpu) do { } while (0)
+
+#define hardirq_enter(cpu) (atomic_inc(&S390_lowcore.local_irq_count))
+#define hardirq_exit(cpu) (atomic_dec(&S390_lowcore.local_irq_count))
+
+#define synchronize_irq() do { } while (0)
+
+#else
+
+#include <asm/atomic.h>
+#include <asm/smp.h>
+
+extern atomic_t global_irq_holder;
+extern atomic_t global_irq_lock;
+extern atomic_t global_irq_count;
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore.. */
+ if (atomic_read(&global_irq_holder) == cpu) {
+ atomic_set(&global_irq_holder,NO_PROC_ID);
+ clear_bit(0,&global_irq_lock);
+ }
+}
+
+static inline void hardirq_enter(int cpu)
+{
+ atomic_inc(&safe_get_cpu_lowcore(cpu).local_irq_count);
+ atomic_inc(&global_irq_count);
+}
+
+static inline void hardirq_exit(int cpu)
+{
+ atomic_dec(&global_irq_count);
+ atomic_dec(&safe_get_cpu_lowcore(cpu).local_irq_count);
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ return !atomic_read(&global_irq_count) && !test_bit(0,&global_irq_lock);
+}
+
+#define hardirq_endlock(cpu) do { } while (0)
+
+extern void synchronize_irq(void);
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390/hdreg.h b/include/asm-s390/hdreg.h
new file mode 100644
index 000000000..20061819d
--- /dev/null
+++ b/include/asm-s390/hdreg.h
@@ -0,0 +1,13 @@
+/*
+ * linux/include/asm-arm/hdreg.h
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ */
+
+#ifndef __ASMS390_HDREG_H
+#define __ASMS390_HDREG_H
+
+typedef unsigned long ide_ioreg_t;
+
+#endif /* __ASMS390_HDREG_H */
+
diff --git a/include/asm-s390/ide.h b/include/asm-s390/ide.h
new file mode 100644
index 000000000..eb47027ff
--- /dev/null
+++ b/include/asm-s390/ide.h
@@ -0,0 +1,54 @@
+/*
+ * linux/include/asm-arm/ide.h
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ */
+
+/* s390 does not have IDE */
+
+#ifndef __ASMS390_IDE_H
+#define __ASMS390_IDE_H
+
+#ifdef __KERNEL__
+
+#ifndef MAX_HWIFS
+#define MAX_HWIFS 0
+#endif
+
+#define ide__sti() do {} while (0)
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned head : 4; /* always zeros here */
+ unsigned unit : 1; /* drive select number, 0 or 1 */
+ unsigned bit5 : 1; /* always 1 */
+ unsigned lba : 1; /* using LBA instead of CHS */
+ unsigned bit7 : 1; /* always 1 */
+ } b;
+ } select_t;
+
+#define ide_request_irq(irq,hand,flg,dev,id) do {} while (0)
+#define ide_free_irq(irq,dev_id) do {} while (0)
+#define ide_check_region(from,extent) do {} while (0)
+#define ide_request_region(from,extent,name) do {} while (0)
+#define ide_release_region(from,extent) do {} while (0)
+
+/*
+ * The following are not needed for the non-m68k ports
+ */
+#define ide_ack_intr(hwif) (1)
+#define ide_fix_driveid(id) do {} while (0)
+#define ide_release_lock(lock) do {} while (0)
+#define ide_get_lock(lock, hdlr, data) do {} while (0)
+
+/*
+ * We always use the new IDE port registering,
+ * so these are fixed here.
+ */
+#define ide_default_io_base(i) ((ide_ioreg_t)0)
+#define ide_default_irq(b) (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASMARM_IDE_H */
diff --git a/include/asm-s390/init.h b/include/asm-s390/init.h
new file mode 100644
index 000000000..715485b72
--- /dev/null
+++ b/include/asm-s390/init.h
@@ -0,0 +1,29 @@
+/*
+ * include/asm-s390/init.h
+ *
+ * S390 version
+ */
+
+#ifndef _S390_INIT_H
+#define _S390_INIT_H
+
+#define __init __attribute__ ((constructor))
+
+/* don't know, if need on S390 */
+#define __initdata
+#define __initfunc(__arginit) \
+ __arginit __init; \
+ __arginit
+/* For assembly routines
+ * need to define ?
+ */
+/*
+#define __INIT .section ".text.init",#alloc,#execinstr
+#define __FINIT .previous
+#define __INITDATA .section ".data.init",#alloc,#write
+*/
+
+#define __cacheline_aligned __attribute__ ((__aligned__(16)))
+
+#endif
+
diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h
new file mode 100644
index 000000000..87c4edb72
--- /dev/null
+++ b/include/asm-s390/io.h
@@ -0,0 +1,94 @@
+/*
+ * include/asm-s390/io.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/io.h"
+ */
+
+#ifndef _S390_IO_H
+#define _S390_IO_H
+
+#ifdef __KERNEL__
+
+#include <linux/vmalloc.h>
+#include <asm/page.h>
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+#define __io_virt(x) ((void *)(PAGE_OFFSET | (unsigned long)(x)))
+#define __io_phys(x) ((unsigned long)(x) & ~PAGE_OFFSET)
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are pretty trivial
+ */
+extern inline unsigned long virt_to_phys(volatile void * address)
+{
+ unsigned long real_address;
+ __asm__ (" lra %0,0(0,%1)\n"
+ " jz 0f\n"
+ " sr %0,%0\n"
+ "0:"
+ : "=a" (real_address) : "a" (address) );
+ return real_address;
+}
+
+extern inline void * phys_to_virt(unsigned long address)
+{
+ return __io_virt(address);
+}
+
+extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+extern inline void * ioremap (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+/*
+ * This one maps high address device memory and turns off caching for that area.
+ * it's useful if some control registers are in such an area and write combining
+ * or read caching is not desirable:
+ */
+extern inline void * ioremap_nocache (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+extern void iounmap(void *addr);
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently.
+ */
+
+#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
+#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
+#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
+
+#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
+#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
+#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
+
+#define memset_io(a,b,c) memset(__io_virt(a),(b),(c))
+#define memcpy_fromio(a,b,c) memcpy((a),__io_virt(b),(c))
+#define memcpy_toio(a,b,c) memcpy(__io_virt(a),(b),(c))
+
+#define inb_p(addr) readb(addr)
+#define inb(addr) readb(addr)
+
+#define outb(x,addr) ((void) writeb(x,addr))
+#define outb_p(x,addr) outb(x,addr)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/asm-s390/ioctl.h b/include/asm-s390/ioctl.h
new file mode 100644
index 000000000..35b4821e8
--- /dev/null
+++ b/include/asm-s390/ioctl.h
@@ -0,0 +1,78 @@
+/*
+ * include/asm-s390/ioctl.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/ioctl.h"
+ */
+
+#ifndef _S390_IOCTL_H
+#define _S390_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The i386 ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _S390_IOCTL_H */
diff --git a/include/asm-s390/ioctls.h b/include/asm-s390/ioctls.h
new file mode 100644
index 000000000..41748666a
--- /dev/null
+++ b/include/asm-s390/ioctls.h
@@ -0,0 +1,88 @@
+/*
+ * include/asm-s390/ioctls.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/ioctls.h"
+ */
+
+#ifndef __ARCH_S390_IOCTLS_H__
+#define __ARCH_S390_IOCTLS_H__
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+#define TCGETA 0x5405
+#define TCSETA 0x5406
+#define TCSETAW 0x5407
+#define TCSETAF 0x5408
+#define TCSBRK 0x5409
+#define TCXONC 0x540A
+#define TCFLSH 0x540B
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+#define TIOCGPGRP 0x540F
+#define TIOCSPGRP 0x5410
+#define TIOCOUTQ 0x5411
+#define TIOCSTI 0x5412
+#define TIOCGWINSZ 0x5413
+#define TIOCSWINSZ 0x5414
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define FIONREAD 0x541B
+#define TIOCINQ FIONREAD
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+#define FIONBIO 0x5421
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define FIOCLEX 0x5451
+#define FIOASYNC 0x5452
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#endif
diff --git a/include/asm-s390/ipc.h b/include/asm-s390/ipc.h
new file mode 100644
index 000000000..66d2b53de
--- /dev/null
+++ b/include/asm-s390/ipc.h
@@ -0,0 +1,39 @@
+/*
+ * include/asm-s390/ipc.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/ipc.h"
+ */
+
+#ifndef __s390_IPC_H__
+#define __s390_IPC_H__
+
+/*
+ * These are used to wrap system calls on S390.
+ *
+ * See arch/s390/kernel/sys_s390.c for ugly details..
+ */
+struct ipc_kludge {
+ struct msgbuf *msgp;
+ long msgtyp;
+};
+
+#define SEMOP 1
+#define SEMGET 2
+#define SEMCTL 3
+#define MSGSND 11
+#define MSGRCV 12
+#define MSGGET 13
+#define MSGCTL 14
+#define SHMAT 21
+#define SHMDT 22
+#define SHMGET 23
+#define SHMCTL 24
+
+/* Used by the DIPC package, try and avoid reusing it */
+#define DIPC 25
+
+#define IPCCALL(version,op) ((version)<<16 | (op))
+
+#endif
diff --git a/include/asm-s390/ipcbuf.h b/include/asm-s390/ipcbuf.h
new file mode 100644
index 000000000..e3245babe
--- /dev/null
+++ b/include/asm-s390/ipcbuf.h
@@ -0,0 +1,29 @@
+#ifndef __S390_IPCBUF_H__
+#define __S390_IPCBUF_H__
+
+/*
+ * The user_ipc_perm structure for S/390 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* __S390_IPCBUF_H__ */
diff --git a/include/asm-s390/irq.h b/include/asm-s390/irq.h
new file mode 100644
index 000000000..895c24649
--- /dev/null
+++ b/include/asm-s390/irq.h
@@ -0,0 +1,788 @@
+/*
+ * arch/s390/kernel/s390io.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ */
+
+#ifndef __irq_h
+#define __irq_h
+
+#include <linux/config.h>
+#include <asm/hardirq.h>
+
+/*
+ * How many IRQ's for S390 ?!?
+ */
+#define __MAX_SUBCHANNELS 65536
+#define NR_IRQS __MAX_SUBCHANNELS
+
+#define INVALID_STORAGE_AREA ((void *)(-1 - 0x3FFF ))
+
+extern int disable_irq(unsigned int);
+extern int enable_irq(unsigned int);
+
+/*
+ * Interrupt controller descriptor. This is all we need
+ * to describe about the low-level hardware.
+ */
+struct hw_interrupt_type {
+ const __u8 *typename;
+ int (*handle)(unsigned int irq,
+ int cpu,
+ struct pt_regs * regs);
+ int (*enable) (unsigned int irq);
+ int (*disable)(unsigned int irq);
+};
+
+/*
+ * Status: reason for being disabled: somebody has
+ * done a "disable_irq()" or we must not re-enter the
+ * already executing irq..
+ */
+#define IRQ_INPROGRESS 1
+#define IRQ_DISABLED 2
+#define IRQ_PENDING 4
+
+/*
+ * path management control word
+ */
+typedef struct {
+ __u32 intparm; /* interruption parameter */
+ __u32 res0 : 2; /* reserved zeros */
+ __u32 isc : 3; /* interruption sublass */
+ __u32 res5 : 3; /* reserved zeros */
+ __u32 ena : 1; /* enabled */
+ __u32 lm : 2; /* limit mode */
+ __u32 mme : 2; /* measurement-mode enable */
+ __u32 mp : 1; /* multipath mode */
+ __u32 tf : 1; /* timing facility */
+ __u32 dnv : 1; /* device number valid */
+ __u32 dev : 16; /* device number */
+ __u8 lpm; /* logical path mask */
+ __u8 pnom; /* path not operational mask */
+ __u8 lpum; /* last path used mask */
+ __u8 pim; /* path installed mask */
+ __u16 mbi; /* measurement-block index */
+ __u8 pom; /* path operational mask */
+ __u8 pam; /* path available mask */
+ __u8 chpid[8]; /* CHPID 0-7 (if available) */
+ __u32 unused1 : 8; /* reserved zeros */
+ __u32 st : 3; /* subchannel type */
+ __u32 unused2 : 20; /* reserved zeros */
+ __u32 csense : 1; /* concurrent sense; can be enabled ...*/
+ /* ... per MSCH, however, if facility */
+ /* ... is not installed, this results */
+ /* ... in an operand exception. */
+ } __attribute__ ((packed)) pmcw_t;
+
+/*
+ * subchannel status word
+ */
+typedef struct {
+ __u32 key : 4; /* subchannel key */
+ __u32 sctl : 1; /* suspend control */
+ __u32 eswf : 1; /* ESW format */
+ __u32 cc : 2; /* deferred condition code */
+ __u32 fmt : 1; /* format */
+ __u32 pfch : 1; /* prefetch */
+ __u32 isic : 1; /* initial-status interruption control */
+ __u32 alcc : 1; /* address-limit checking control */
+ __u32 ssi : 1; /* supress-suspended interruption */
+ __u32 zcc : 1; /* zero condition code */
+ __u32 ectl : 1; /* extended control */
+ __u32 pno : 1; /* path not operational */
+ __u32 res : 1; /* reserved */
+ __u32 fctl : 3; /* function control */
+ __u32 actl : 7; /* activity control */
+ __u32 stctl : 5; /* status control */
+ __u32 cpa; /* channel program address */
+ __u32 dstat : 8; /* device status */
+ __u32 cstat : 8; /* subchannel status */
+ __u32 count : 16; /* residual count */
+ } __attribute__ ((packed)) scsw_t;
+
+#define SCSW_FCTL_CLEAR_FUNC 0x1
+#define SCSW_FCTL_HALT_FUNC 0x2
+#define SCSW_FCTL_START_FUNC 0x4
+
+#define SCSW_ACTL_SUSPENDED 0x1
+#define SCSW_ACTL_DEVACT 0x2
+#define SCSW_ACTL_SCHACT 0x4
+#define SCSW_ACTL_CLEAR_PEND 0x8
+#define SCSW_ACTL_HALT_PEND 0x10
+#define SCSW_ACTL_START_PEND 0x20
+#define SCSW_ACTL_RESUME_PEND 0x40
+
+#define SCSW_STCTL_STATUS_PEND 0x1
+#define SCSW_STCTL_SEC_STATUS 0x2
+#define SCSW_STCTL_PRIM_STATUS 0x4
+#define SCSW_STCTL_INTER_STATUS 0x8
+#define SCSW_STCTL_ALERT_STATUS 0x10
+
+#define DEV_STAT_ATTENTION 0x80
+#define DEV_STAT_STAT_MOD 0x40
+#define DEV_STAT_CU_END 0x20
+#define DEV_STAT_BUSY 0x10
+#define DEV_STAT_CHN_END 0x08
+#define DEV_STAT_DEV_END 0x04
+#define DEV_STAT_UNIT_CHECK 0x02
+#define DEV_STAT_UNIT_EXCEP 0x01
+
+#define SCHN_STAT_PCI 0x80
+#define SCHN_STAT_INCORR_LEN 0x40
+#define SCHN_STAT_PROG_CHECK 0x20
+#define SCHN_STAT_PROT_CHECK 0x10
+#define SCHN_STAT_CHN_DATA_CHK 0x08
+#define SCHN_STAT_CHN_CTRL_CHK 0x04
+#define SCHN_STAT_INTF_CTRL_CHK 0x02
+#define SCHN_STAT_CHAIN_CHECK 0x01
+
+/*
+ * subchannel information block
+ */
+typedef struct {
+ pmcw_t pmcw; /* path management control word */
+ scsw_t scsw; /* subchannel status word */
+ __u8 mda[12]; /* model dependent area */
+ } __attribute__ ((packed,aligned(4))) schib_t;
+
+typedef struct {
+ __u8 cmd_code;/* command code */
+ __u8 flags; /* flags, like IDA adressing, etc. */
+ __u16 count; /* byte count */
+ __u32 cda; /* data address */
+ } ccw1_t __attribute__ ((packed,aligned(8)));
+
+#define CCW_FLAG_DC 0x80
+#define CCW_FLAG_CC 0x40
+#define CCW_FLAG_SLI 0x20
+#define CCW_FLAG_SKIP 0x10
+#define CCW_FLAG_PCI 0x08
+#define CCW_FLAG_IDA 0x04
+#define CCW_FLAG_SUSPEND 0x02
+
+#define CCW_CMD_READ_IPL 0x02
+#define CCW_CMD_NOOP 0x03
+#define CCW_CMD_BASIC_SENSE 0x04
+#define CCW_CMD_TIC 0x08
+#define CCW_CMD_SENSE_PGID 0x34
+#define CCW_CMD_RDC 0x64
+#define CCW_CMD_SET_PGID 0xAF
+#define CCW_CMD_SENSE_ID 0xE4
+
+#define SENSE_MAX_COUNT 0x20
+
+/*
+ * architectured values for first sense byte
+ */
+#define SNS0_CMD_REJECT 0x80
+#define SNS_CMD_REJECT SNS0_CMD_REJECT
+#define SNS0_INTERVENTION_REQ 0x40
+#define SNS0_BUS_OUT_CHECK 0x20
+#define SNS0_EQUIPMENT_CHECK 0x10
+#define SNS0_DATA_CHECK 0x08
+#define SNS0_OVERRUN 0x04
+
+/*
+ * operation request block
+ */
+typedef struct {
+ __u32 intparm; /* interruption parameter */
+ __u32 key : 4; /* flags, like key, suspend control, etc. */
+ __u32 spnd : 1; /* suspend control */
+ __u32 res1 : 3; /* reserved */
+ __u32 fmt : 1; /* format control */
+ __u32 pfch : 1; /* prefetch control */
+ __u32 isic : 1; /* initial-status-interruption control */
+ __u32 alcc : 1; /* address-limit-checking control */
+ __u32 ssic : 1; /* suppress-suspended-interr. control */
+ __u32 res2 : 3; /* reserved */
+ __u32 lpm : 8; /* logical path mask */
+ __u32 ils : 1; /* incorrect length */
+ __u32 zero : 7; /* reserved zeros */
+ __u32 cpa; /* channel program address */
+ } __attribute__ ((packed,aligned(4))) orb_t;
+
+typedef struct {
+ __u32 res0 : 4; /* reserved */
+ __u32 pvrf : 1; /* path-verification-required flag */
+ __u32 cpt : 1; /* channel-path timeout */
+ __u32 fsavf : 1; /* Failing storage address validity flag */
+ __u32 cons : 1; /* concurrent-sense */
+ __u32 res8 : 2; /* reserved */
+ __u32 scnt : 6; /* sense count if cons == 1 */
+ __u32 res16 : 16; /* reserved */
+ } __attribute__ ((packed)) erw_t;
+
+/*
+ * subchannel logout area
+ */
+typedef struct {
+ __u32 res0 : 1; /* reserved */
+ __u32 esf : 7; /* extended status flags */
+ __u32 lpum : 8; /* last path used mask */
+ __u32 res16 : 1; /* reserved */
+ __u32 fvf : 5; /* field-validity flags */
+ __u32 sacc : 2; /* storage access code */
+ __u32 termc : 2; /* termination code */
+ __u32 devsc : 1; /* device-status check */
+ __u32 serr : 1; /* secondary error */
+ __u32 ioerr : 1; /* i/o-error alert */
+ __u32 seqc : 3; /* sequence code */
+ } __attribute__ ((packed)) sublog_t ;
+
+/*
+ * Format 0 Extended Status Word (ESW)
+ */
+typedef struct {
+ sublog_t sublog; /* subchannel logout */
+ erw_t erw; /* extended report word */
+ __u32 faddr; /* failing address */
+ __u32 zeros[2]; /* 2 fullwords of zeros */
+ } __attribute__ ((packed)) esw0_t;
+
+/*
+ * Format 1 Extended Status Word (ESW)
+ */
+typedef struct {
+ __u8 zero0; /* reserved zeros */
+ __u8 lpum; /* last path used mask */
+ __u8 zero16; /* reserved zeros */
+ erw_t erw; /* extended report word */
+ __u32 zeros[3]; /* 2 fullwords of zeros */
+ } __attribute__ ((packed)) esw1_t;
+
+/*
+ * Format 2 Extended Status Word (ESW)
+ */
+typedef struct {
+ __u8 zero0; /* reserved zeros */
+ __u8 lpum; /* last path used mask */
+ __u16 dcti; /* device-connect-time interval */
+ erw_t erw; /* extended report word */
+ __u32 zeros[3]; /* 2 fullwords of zeros */
+ } __attribute__ ((packed)) esw2_t;
+
+/*
+ * Format 3 Extended Status Word (ESW)
+ */
+typedef struct {
+ __u8 zero0; /* reserved zeros */
+ __u8 lpum; /* last path used mask */
+ __u16 res; /* reserved */
+ erw_t erw; /* extended report word */
+ __u32 zeros[3]; /* 2 fullwords of zeros */
+ } __attribute__ ((packed)) esw3_t;
+
+typedef union {
+ esw0_t esw0;
+ esw1_t esw1;
+ esw2_t esw2;
+ esw3_t esw3;
+ } __attribute__ ((packed)) esw_t;
+
+/*
+ * interruption response block
+ */
+typedef struct {
+ scsw_t scsw; /* subchannel status word */
+ esw_t esw; /* extended status word */
+ __u8 ecw[32]; /* extended control word */
+ } irb_t __attribute__ ((packed,aligned(4)));
+
+/*
+ * TPI info structure
+ */
+typedef struct {
+ __u32 res : 16; /* reserved 0x00000001 */
+ __u32 irq : 16; /* aka. subchannel number */
+ __u32 intparm; /* interruption parameter */
+ } __attribute__ ((packed)) tpi_info_t;
+
+
+/*
+ * This is the "IRQ descriptor", which contains various information
+ * about the irq, including what kind of hardware handling it has,
+ * whether it is disabled etc etc.
+ *
+ * Pad this out to 32 bytes for cache and indexing reasons.
+ */
+typedef struct {
+ __u32 status; /* IRQ status - IRQ_INPROGRESS, IRQ_DISABLED */
+ struct hw_interrupt_type *handler; /* handle/enable/disable functions */
+ struct irqaction *action; /* IRQ action list */
+ } irq_desc_t;
+
+//
+// command information word (CIW) layout
+//
+typedef struct _ciw {
+ __u32 et : 2; // entry type
+ __u32 reserved : 2; // reserved
+ __u32 ct : 4; // command type
+ __u32 cmd : 8; // command
+ __u32 count : 16; // count
+ } __attribute__ ((packed)) ciw_t;
+
+#define CIW_TYPE_RCD 0x0 // read configuration data
+#define CIW_TYPE_SII 0x1 // set interface identifier
+#define CIW_TYPE_RNI 0x2 // read node identifier
+
+//
+// sense-id response buffer layout
+//
+typedef struct {
+ /* common part */
+ __u8 reserved; /* always 0x'FF' */
+ __u16 cu_type; /* control unit type */
+ __u8 cu_model; /* control unit model */
+ __u16 dev_type; /* device type */
+ __u8 dev_model; /* device model */
+ __u8 unused; /* padding byte */
+ /* extended part */
+ ciw_t ciw[62]; /* variable # of CIWs */
+ } __attribute__ ((packed,aligned(4))) senseid_t;
+
+/*
+ * sense data
+ */
+typedef struct {
+ __u8 res[32]; /* reserved */
+ __u8 data[32]; /* sense data */
+ } __attribute__ ((packed)) sense_t;
+
+/*
+ * device status area, to be provided by the device driver
+ * when calling request_irq() as parameter "dev_id", later
+ * tied to the "action" control block.
+ *
+ * Note : No data area must be added after union ii or the
+ * effective devstat size calculation will fail !
+ */
+typedef struct {
+ __u16 devno; /* device number, aka. "cuu" from irb */
+ unsigned int intparm; /* interrupt parameter */
+ __u8 cstat; /* channel status - accumulated */
+ __u8 dstat; /* device status - accumulated */
+ __u8 lpum; /* last path used mask from irb */
+ __u8 unused; /* not used - reserved */
+ unsigned int flag; /* flag : see below */
+ __u32 cpa; /* CCW address from irb at primary status */
+ __u32 rescnt; /* res. count from irb at primary status */
+ __u32 scnt; /* sense count, if DEVSTAT_FLAG_SENSE_AVAIL */
+ union {
+ irb_t irb; /* interruption response block */
+ sense_t sense; /* sense information */
+ } ii; /* interrupt information */
+ } devstat_t;
+
+#define DEVSTAT_FLAG_SENSE_AVAIL 0x00000001
+#define DEVSTAT_NOT_OPER 0x00000002
+#define DEVSTAT_START_FUNCTION 0x00000004
+#define DEVSTAT_HALT_FUNCTION 0x00000008
+#define DEVSTAT_STATUS_PENDING 0x00000010
+#define DEVSTAT_REVALIDATE 0x00000020
+#define DEVSTAT_DEVICE_GONE 0x00000040
+#define DEVSTAT_DEVICE_OWNED 0x00000080
+#define DEVSTAT_CLEAR_FUNCTION 0x00000100
+#define DEVSTAT_FINAL_STATUS 0x80000000
+
+#define INTPARM_STATUS_PENDING 0xFFFFFFFF
+
+typedef struct {
+ __u8 state1 : 2; /* path state value 1 */
+ __u8 state2 : 2; /* path state value 2 */
+ __u8 state3 : 1; /* path state value 3 */
+ __u8 resvd : 3; /* reserved */
+ } __attribute__ ((packed)) path_state_t;
+
+typedef struct {
+ union {
+ __u8 fc; /* SPID function code */
+ path_state_t ps; /* SNID path state */
+ } inf;
+ __u32 cpu_addr : 16; /* CPU address */
+ __u32 cpu_id : 24; /* CPU identification */
+ __u32 cpu_model : 16; /* CPU model */
+ __u32 tod_high; /* high word TOD clock */
+ } __attribute__ ((packed)) pgid_t;
+
+#define SPID_FUNC_MULTI_PATH 0x80
+#define SPID_FUNC_ESTABLISH 0x00
+#define SPID_FUNC_RESIGN 0x40
+#define SPID_FUNC_DISBAND 0x20
+
+#define SNID_STATE1_RESET 0x0
+#define SNID_STATE1_UNGROUPED 0x8
+#define SNID_STATE1_GROUPED 0xC
+
+#define SNID_STATE2_NOT_RESVD 0x0
+#define SNID_STATE2_RESVD_ELSE 0x8
+#define SNID_STATE2_RESVD_SELF 0xC
+
+#define SNID_STATE3_MULTI_PATH 1
+
+/*
+ * Flags used as input parameters for do_IO()
+ */
+#define DOIO_EARLY_NOTIFICATION 0x01 /* allow for I/O completion ... */
+ /* ... notification after ... */
+ /* ... primary interrupt status */
+#define DOIO_RETURN_CHAN_END DOIO_EARLY_NOTIFICATION
+#define DOIO_VALID_LPM 0x02 /* LPM input parameter is valid */
+#define DOIO_WAIT_FOR_INTERRUPT 0x04 /* wait synchronously for interrupt */
+#define DOIO_REPORT_ALL 0x08 /* report all interrupt conditions */
+#define DOIO_ALLOW_SUSPEND 0x10 /* allow for channel prog. suspend */
+#define DOIO_DENY_PREFETCH 0x20 /* don't allow for CCW prefetch */
+#define DOIO_SUPPRESS_INTER 0x40 /* suppress intermediate inter. */
+ /* ... for suspended CCWs */
+#define DOIO_TIMEOUT 0x80 /* 3 secs. timeout for sync. I/O */
+
+/*
+ * do_IO()
+ *
+ * Start a S/390 channel program. When the interrupt arrives
+ * handle_IRQ_event() is called, which eventually calls the
+ * IRQ handler, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered -
+ * should never occur, as the IRQ (subchannel ID) should be
+ * disabled if no handler is present. Depending on the action
+ * taken, do_IO() returns : 0 - Success
+ * -EIO - Status pending
+ * see : action->dev_id->cstat
+ * action->dev_id->dstat
+ * -EBUSY - Device busy
+ * -ENODEV - Device not operational
+ */
+int do_IO( int irq, /* IRQ aka. subchannel number */
+ ccw1_t *cpa, /* logical channel program address */
+ unsigned long intparm, /* interruption parameter */
+ __u8 lpm, /* logical path mask */
+ unsigned long flag); /* flags : see above */
+
+int start_IO( int irq, /* IRQ aka. subchannel number */
+ ccw1_t *cpa, /* logical channel program address */
+ unsigned int intparm, /* interruption parameter */
+ __u8 lpm, /* logical path mask */
+ unsigned int flag); /* flags : see above */
+
+void do_crw_pending( void ); /* CRW handler */
+
+int resume_IO( int irq); /* IRQ aka. subchannel number */
+
+int halt_IO( int irq, /* IRQ aka. subchannel number */
+ unsigned long intparm, /* dummy intparm */
+ unsigned long flag); /* possible DOIO_WAIT_FOR_INTERRUPT */
+
+int clear_IO( int irq, /* IRQ aka. subchannel number */
+ unsigned long intparm, /* dummy intparm */
+ unsigned long flag); /* possible DOIO_WAIT_FOR_INTERRUPT */
+
+int process_IRQ( struct pt_regs regs,
+ unsigned int irq,
+ unsigned int intparm);
+
+
+int enable_cpu_sync_isc ( int irq );
+int disable_cpu_sync_isc( int irq );
+
+typedef struct {
+ int irq; /* irq, aka. subchannel */
+ __u16 devno; /* device number */
+ unsigned int status; /* device status */
+ senseid_t sid_data; /* senseID data */
+ } dev_info_t;
+
+int get_dev_info( int irq, dev_info_t *); /* to be eliminated - don't use */
+
+int get_dev_info_by_irq ( int irq, dev_info_t *pdi);
+int get_dev_info_by_devno( __u16 devno, dev_info_t *pdi);
+
+int get_irq_by_devno( __u16 devno );
+unsigned int get_devno_by_irq( int irq );
+
+int get_irq_first( void );
+int get_irq_next ( int irq );
+
+int read_dev_chars( int irq, void **buffer, int length );
+int read_conf_data( int irq, void **buffer, int *length );
+
+extern int handle_IRQ_event( unsigned int irq, int cpu, struct pt_regs *);
+
+extern int set_cons_dev(int irq);
+extern int reset_cons_dev(int irq);
+extern int wait_cons_dev(int irq);
+
+/*
+ * Some S390 specific IO instructions as inline
+ */
+
+extern __inline__ int stsch(int irq, volatile schib_t *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "LR 1,%1\n\t"
+ "STSCH 0(%2)\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "r" (irq | 0x10000L), "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int msch(int irq, volatile schib_t *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "LR 1,%1\n\t"
+ "MSCH 0(%2)\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "r" (irq | 0x10000L), "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int msch_err(int irq, volatile schib_t *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " msch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ ".section .fixup,\"ax\"\n"
+ "2: l %0,%3\n"
+ " bras 1,3f\n"
+ " .long 1b\n"
+ "3: l 1,0(1)\n"
+ " br 1\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,2b\n"
+ ".previous"
+ : "=d" (ccode)
+ : "r" (irq | 0x10000L), "a" (addr), "i" (__LC_PGM_ILC)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int tsch(int irq, volatile irb_t *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "LR 1,%1\n\t"
+ "TSCH 0(%2)\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "r" (irq | 0x10000L), "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int tpi( volatile tpi_info_t *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "TPI 0(%1)\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int ssch(int irq, volatile orb_t *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "LR 1,%1\n\t"
+ "SSCH 0(%2)\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "r" (irq | 0x10000L), "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int rsch(int irq)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "LR 1,%1\n\t"
+ "RSCH\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "r" (irq | 0x10000L)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int csch(int irq)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "LR 1,%1\n\t"
+ "CSCH\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "r" (irq | 0x10000L)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int hsch(int irq)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "LR 1,%1\n\t"
+ "HSCH\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "r" (irq | 0x10000L)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int iac( void)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "IAC 1\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : : "cc", "1" );
+ return ccode;
+}
+
+typedef struct {
+ __u16 vrdcdvno : 16; /* device number (input) */
+ __u16 vrdclen : 16; /* data block length (input) */
+ __u32 vrdcvcla : 8; /* virtual device class (output) */
+ __u32 vrdcvtyp : 8; /* virtual device type (output) */
+ __u32 vrdcvsta : 8; /* virtual device status (output) */
+ __u32 vrdcvfla : 8; /* virtual device flags (output) */
+ __u32 vrdcrccl : 8; /* real device class (output) */
+ __u32 vrdccrty : 8; /* real device type (output) */
+ __u32 vrdccrmd : 8; /* real device model (output) */
+ __u32 vrdccrft : 8; /* real device feature (output) */
+ } __attribute__ ((packed,aligned(4))) diag210_t;
+
+void VM_virtual_device_info( __u16 devno, /* device number */
+ senseid_t *ps ); /* ptr to senseID data */
+
+extern __inline__ int diag210( diag210_t * addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "LR 1,%1\n\t"
+ ".long 0x83110210\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+/*
+ * Various low-level irq details needed by irq.c, process.c,
+ * time.c, io_apic.c and smp.c
+ *
+ * Interrupt entry/exit code at both C and assembly level
+ */
+
+void mask_irq(unsigned int irq);
+void unmask_irq(unsigned int irq);
+
+#define MAX_IRQ_SOURCES 128
+
+extern spinlock_t irq_controller_lock;
+
+#ifdef CONFIG_SMP
+
+#include <asm/atomic.h>
+
+static inline void irq_enter(int cpu, unsigned int irq)
+{
+ hardirq_enter(cpu);
+ while (test_bit(0,&global_irq_lock)) {
+ eieio();
+ }
+}
+
+static inline void irq_exit(int cpu, unsigned int irq)
+{
+ hardirq_exit(cpu);
+ release_irqlock(cpu);
+}
+
+
+#else
+
+#define irq_enter(cpu, irq) (++local_irq_count[cpu])
+#define irq_exit(cpu, irq) (--local_irq_count[cpu])
+
+#endif
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+#ifdef CONFIG_SMP
+
+/*
+ * SMP has a few special interrupts for IPI messages
+ */
+
+#endif /* CONFIG_SMP */
+
+/*
+ * x86 profiling function, SMP safe. We might want to do this in
+ * assembly totally?
+ */
+static inline void s390_do_profile (unsigned long addr)
+{
+#if 0
+ if (prof_buffer && current->pid) {
+ addr -= (unsigned long) &_stext;
+ addr >>= prof_shift;
+ /*
+ * Don't ignore out-of-bounds EIP values silently,
+ * put them into the last histogram slot, so if
+ * present, they will show up as a sharp peak.
+ */
+ if (addr > prof_len-1)
+ addr = prof_len-1;
+ atomic_inc((atomic_t *)&prof_buffer[addr]);
+ }
+#endif
+}
+
+#include <asm/s390io.h>
+
+#define s390irq_spin_lock(irq) \
+ spin_lock(&(ioinfo[irq]->irq_lock))
+
+#define s390irq_spin_unlock(irq) \
+ spin_unlock(&(ioinfo[irq]->irq_lock))
+
+#define s390irq_spin_lock_irqsave(irq,flags) \
+ spin_lock_irqsave(&(ioinfo[irq]->irq_lock), flags)
+#define s390irq_spin_unlock_irqrestore(irq,flags) \
+ spin_unlock_irqrestore(&(ioinfo[irq]->irq_lock), flags)
+#endif
+
diff --git a/include/asm-s390/irqextras390.h b/include/asm-s390/irqextras390.h
new file mode 100644
index 000000000..0ca2f718a
--- /dev/null
+++ b/include/asm-s390/irqextras390.h
@@ -0,0 +1,151 @@
+/*
+ * include/asm-s390/irqextras390.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#ifndef __irqextras390_h
+#define __irqextras390_h
+
+/*
+ irqextras390.h by D.J. Barrow
+ if you are a bitfield fan & are paranoid that ansi dosen't
+ give hard definitions about the size of an int or long you might
+ prefer these definitions as an alternative.
+
+*/
+
+#include <linux/types.h>
+
+typedef struct
+{
+ unsigned key:4;
+ unsigned s:1;
+ unsigned l:1;
+ unsigned cc:2;
+ unsigned f:1;
+ unsigned p:1;
+ unsigned i:1;
+ unsigned a:1;
+ unsigned u:1;
+ unsigned z:1;
+ unsigned e:1;
+ unsigned n:1;
+ unsigned zero:1;
+
+ unsigned fc_start:1;
+ unsigned fc_halt:1;
+ unsigned fc_clear:1;
+
+ unsigned ac_resume_pending:1;
+ unsigned ac_start_pending:1;
+ unsigned ac_halt_pending:1;
+ unsigned ac_clear_pending:1;
+ unsigned ac_subchannel_active:1;
+ unsigned ac_device_active:1;
+ unsigned ac_suspended:1;
+
+ unsigned sc_alert:1;
+ unsigned sc_intermediate:1;
+ unsigned sc_primary:1;
+ unsigned sc_seconary:1;
+ unsigned sc_status_pending:1;
+
+ __u32 ccw_address;
+
+ unsigned dev_status_attention:1;
+ unsigned dev_status_modifier:1;
+ unsigned dev_status_control_unit_end:1;
+ unsigned dev_status_busy:1;
+ unsigned dev_status_channel_end:1;
+ unsigned dev_status_device_end:1;
+ unsigned dev_status_unit_check:1;
+ unsigned dev_status_unit_exception:1;
+
+ unsigned sch_status_program_cont_int:1;
+ unsigned sch_status_incorrect_length:1;
+ unsigned sch_status_program_check:1;
+ unsigned sch_status_protection_check:1;
+ unsigned sch_status_channel_data_check:1;
+ unsigned sch_status_channel_control_check:1;
+ unsigned sch_status_interface_control_check:1;
+ unsigned sch_status_chaining_check:1;
+
+ __u16 byte_count;
+} scsw_bits_t __attribute__((packed));
+
+typedef struct
+{
+ __u32 flags;
+ __u32 ccw_address;
+ __u8 dev_status;
+ __u8 sch_status;
+ __u16 byte_count;
+} scsw_words_t __attribute__((packed));
+
+typedef struct
+{
+ __u8 cmd_code;
+
+ unsigned cd:1;
+ unsigned cc:1;
+ unsigned sli:1;
+ unsigned skip:1;
+ unsigned pci:1;
+ unsigned ida:1;
+ unsigned s:1;
+ unsigned res1:1;
+
+ __u16 count;
+
+ void *ccw_data_address;
+} ccw1_bits_t __attribute__((packed,aligned(8)));
+
+typedef struct
+{
+ __u32 interruption_parm;
+ unsigned key:4;
+ unsigned s:1;
+ unsigned res1:3;
+ unsigned f:1;
+ unsigned p:1;
+ unsigned i:1;
+ unsigned a:1;
+ unsigned u:1;
+ __u8 lpm;
+ unsigned l:1;
+ unsigned res2:7;
+ ccw1_bits_t *ccw_program_address;
+} orb_bits_t __attribute__((packed));
+
+void fixchannelprogram(orb_bits_t *orbptr);
+void fixccws(ccw1_bits_t *ccwptr);
+enum
+{
+ ccw_write=0x1,
+ ccw_read=0x2,
+ ccw_read_backward=0xc,
+ ccw_control=0x3,
+ ccw_sense=0x4,
+ ccw_sense_id=0xe4,
+ ccw_transfer_in_channel0=0x8,
+ ccw_transfer_in_channel1=0x8,
+ ccw_set_x_mode=0xc3, // according to uli's lan notes
+ ccw_nop=0x3 // according to uli's notes again
+ // n.b. ccw_control clashes with this
+ // so I presume its a special case of
+ // control
+};
+
+
+
+#endif
+
+
+
+
+
+
+
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
new file mode 100644
index 000000000..22ab31bd8
--- /dev/null
+++ b/include/asm-s390/lowcore.h
@@ -0,0 +1,182 @@
+/*
+ * include/asm-s390/lowcore.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner (hp@de.ibm.com),
+ * Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#ifndef _ASM_S390_LOWCORE_H
+#define _ASM_S390_LOWCORE_H
+
+#define __LC_EXT_OLD_PSW 0x018
+#define __LC_SVC_OLD_PSW 0x020
+#define __LC_PGM_OLD_PSW 0x028
+#define __LC_MCK_OLD_PSW 0x030
+#define __LC_IO_OLD_PSW 0x038
+#define __LC_EXT_NEW_PSW 0x058
+#define __LC_SVC_NEW_PSW 0x060
+#define __LC_PGM_NEW_PSW 0x068
+#define __LC_MCK_NEW_PSW 0x070
+#define __LC_IO_NEW_PSW 0x078
+#define __LC_EXT_PARAMS 0x080
+#define __LC_CPU_ADDRESS 0x084
+#define __LC_EXT_INT_CODE 0x086
+#define __LC_SVC_INT_CODE 0x08B
+#define __LC_PGM_ILC 0x08C
+#define __LC_PGM_INT_CODE 0x08E
+#define __LC_TRANS_EXC_ADDR 0x090
+#define __LC_SUBCHANNEL_ID 0x0B8
+#define __LC_SUBCHANNEL_NR 0x0BA
+#define __LC_IO_INT_PARM 0x0BC
+#define __LC_MCCK_CODE 0x0E8
+#define __LC_AREGS_SAVE_AREA 0x200
+#define __LC_CREGS_SAVE_AREA 0x240
+#define __LC_RETURN_PSW 0x280
+
+#define __LC_SYNC_IO_WORD 0x400
+
+#define __LC_SAVE_AREA 0xC00
+#define __LC_KERNEL_STACK 0xC40
+#define __LC_KERNEL_LEVEL 0xC44
+#define __LC_CPUID 0xC50
+#define __LC_CPUADDR 0xC58
+#define __LC_IPLDEV 0xC6C
+
+
+/* interrupt handler start with all io, external and mcck interrupt disabled */
+
+#define _RESTART_PSW_MASK 0x00080000
+#define _EXT_PSW_MASK 0x04080000
+#define _PGM_PSW_MASK 0x04080000
+#define _SVC_PSW_MASK 0x04080000
+#define _MCCK_PSW_MASK 0x040A0000
+#define _IO_PSW_MASK 0x04080000
+#define _USER_PSW_MASK 0x070DC000/* DAT, IO, EXT, Home-space */
+#define _WAIT_PSW_MASK 0x070E0000/* DAT, IO, EXT, Wait, Home-space */
+#define _DW_PSW_MASK 0x000A0000/* disabled wait PSW mask */
+
+#define _PRIMARY_MASK 0x0000 /* MASK for SACF */
+#define _SECONDARY_MASK 0x0100 /* MASK for SACF */
+#define _ACCESS_MASK 0x0200 /* MASK for SACF */
+#define _HOME_MASK 0x0300 /* MASK for SACF */
+
+#define _PSW_PRIM_SPACE_MODE 0x00000000
+#define _PSW_SEC_SPACE_MODE 0x00008000
+#define _PSW_ACC_REG_MODE 0x00004000
+#define _PSW_HOME_SPACE_MODE 0x0000C000
+
+#define _PSW_WAIT_MASK_BIT 0x00020000 /* Wait bit */
+#define _PSW_IO_MASK_BIT 0x02000000 /* IO bit */
+#define _PSW_IO_WAIT 0x02020000 /* IO & Wait bit */
+
+/* we run in 31 Bit mode */
+#define _ADDR_31 0x80000000
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <linux/types.h>
+#include <asm/atomic.h>
+#include <asm/sigp.h>
+
+
+struct _lowcore
+{
+ /* prefix area: defined by architecture */
+ psw_t restart_psw; /* 0x000 */
+ __u32 ccw2[4]; /* 0x008 */
+ psw_t external_old_psw; /* 0x018 */
+ psw_t svc_old_psw; /* 0x020 */
+ psw_t program_old_psw; /* 0x028 */
+ psw_t mcck_old_psw; /* 0x030 */
+ psw_t io_old_psw; /* 0x038 */
+ __u8 pad1[0x58-0x40]; /* 0x040 */
+ psw_t external_new_psw; /* 0x058 */
+ psw_t svc_new_psw; /* 0x060 */
+ psw_t program_new_psw; /* 0x068 */
+ psw_t mcck_new_psw; /* 0x070 */
+ psw_t io_new_psw; /* 0x078 */
+ __u32 ext_params; /* 0x080 */
+ __u16 cpu_addr; /* 0x084 */
+ __u16 ext_int_code; /* 0x086 */
+ __u16 svc_ilc; /* 0x088 */
+ __u16 scv_code; /* 0x08a */
+ __u16 pgm_ilc; /* 0x08c */
+ __u16 pgm_code; /* 0x08e */
+ __u32 trans_exc_code; /* 0x090 */
+ __u16 mon_class_num; /* 0x094 */
+ __u16 per_perc_atmid; /* 0x096 */
+ __u32 per_address; /* 0x098 */
+ __u32 monitor_code; /* 0x09c */
+ __u8 exc_access_id; /* 0x0a0 */
+ __u8 per_access_id; /* 0x0a1 */
+ __u8 pad2[0xB8-0xA2]; /* 0x0a2 */
+ __u16 subchannel_id; /* 0x0b8 */
+ __u16 subchannel_nr; /* 0x0ba */
+ __u32 io_int_parm; /* 0x0bc */
+ __u8 pad3[0xD8-0xC0]; /* 0x0c0 */
+ __u32 cpu_timer_save_area[2]; /* 0x0d8 */
+ __u32 clock_comp_save_area[2]; /* 0x0e0 */
+ __u32 mcck_interuption_code[2]; /* 0x0e8 */
+ __u8 pad4[0xf4-0xf0]; /* 0x0f0 */
+ __u32 external_damage_code; /* 0x0f4 */
+ __u32 failing_storage_address; /* 0x0f8 */
+ __u8 pad5[0x100-0xfc]; /* 0x0fc */
+ __u32 st_status_fixed_logout[4];/* 0x100 */
+ __u8 pad6[0x160-0x110]; /* 0x110 */
+ __u32 floating_pt_save_area[8]; /* 0x160 */
+ __u32 gpregs_save_area[16]; /* 0x180 */
+ __u8 pad7[0x200-0x1c0]; /* 0x1c0 */
+
+ __u32 access_regs_save_area[16];/* 0x200 */
+ __u32 cregs_save_area[16]; /* 0x240 */
+ psw_t return_psw; /* 0x280 */
+ __u8 pad8[0x400-0x288]; /* 0x288 */
+
+ __u32 sync_io_word; /* 0x400 */
+
+ __u8 pad9[0xc00-0x404]; /* 0x404 */
+
+ /* System info area */
+ __u32 save_area[16]; /* 0xc00 */
+ __u32 kernel_stack; /* 0xc40 */
+ __u32 kernel_level; /* 0xc44 */
+ atomic_t local_bh_count; /* 0xc48 */
+ atomic_t local_irq_count; /* 0xc4c */
+ struct cpuinfo_S390 cpu_data; /* 0xc50 */
+ __u32 ipl_device; /* 0xc6c */
+
+ /* SMP info area: defined by DJB */
+ __u64 jiffy_timer_cc; /* 0xc70 */
+ atomic_t ext_call_fast; /* 0xc78 */
+ atomic_t ext_call_queue; /* 0xc7c */
+ atomic_t ext_call_count; /* 0xc80 */
+
+ /* Align SMP info to the top 1k of prefix area */
+ __u8 pad10[0x1000-0xc84]; /* 0xc84 */
+} __attribute__((packed)); /* End structure*/
+
+extern __inline__ void set_prefix(__u32 address)
+{
+ __asm__ __volatile__ ("spx %0" : : "m" (address) : "memory" );
+}
+
+#define S390_lowcore (*((struct _lowcore *) 0))
+extern struct _lowcore *lowcore_ptr[];
+
+#ifndef CONFIG_SMP
+#define get_cpu_lowcore(cpu) S390_lowcore
+#define safe_get_cpu_lowcore(cpu) S390_lowcore
+#else
+#define get_cpu_lowcore(cpu) (*lowcore_ptr[cpu])
+#define safe_get_cpu_lowcore(cpu) \
+ ((cpu)==smp_processor_id() ? S390_lowcore:(*lowcore_ptr[(cpu)]))
+#endif
+#endif /* __ASSEMBLY__ */
+
+#endif
+
diff --git a/include/asm-s390/major.h b/include/asm-s390/major.h
new file mode 100644
index 000000000..f07022803
--- /dev/null
+++ b/include/asm-s390/major.h
@@ -0,0 +1,150 @@
+#ifndef _LINUX_MAJOR_H
+#define _LINUX_MAJOR_H
+
+/*
+ * This file has definitions for major device numbers.
+ * For the device number assignments, see Documentation/devices.txt.
+ */
+
+/* limits */
+
+/*
+ * Important: Don't change this to 256. Major number 255 is and must be
+ * reserved for future expansion into a larger dev_t space.
+ */
+#define MAX_CHRDEV 255
+#define MAX_BLKDEV 255
+
+#define UNNAMED_MAJOR 0
+#define MEM_MAJOR 1
+#define RAMDISK_MAJOR 1
+#define FLOPPY_MAJOR 2
+#define PTY_MASTER_MAJOR 2
+#define IDE0_MAJOR 3
+#define PTY_SLAVE_MAJOR 3
+#define HD_MAJOR IDE0_MAJOR
+#define TTY_MAJOR 4
+#define TTYAUX_MAJOR 5
+#define LP_MAJOR 6
+#define VCS_MAJOR 7
+#define LOOP_MAJOR 7
+#define SCSI_DISK0_MAJOR 8
+#define SCSI_TAPE_MAJOR 9
+#define MD_MAJOR 9
+#define MISC_MAJOR 10
+#define SCSI_CDROM_MAJOR 11
+#define QIC02_TAPE_MAJOR 12
+#define XT_DISK_MAJOR 13
+#define SOUND_MAJOR 14
+#define CDU31A_CDROM_MAJOR 15
+#define JOYSTICK_MAJOR 15
+#define GOLDSTAR_CDROM_MAJOR 16
+#define OPTICS_CDROM_MAJOR 17
+#define SANYO_CDROM_MAJOR 18
+#define CYCLADES_MAJOR 19
+#define CYCLADESAUX_MAJOR 20
+#define MITSUMI_X_CDROM_MAJOR 20
+#define MFM_ACORN_MAJOR 21 /* ARM Linux /dev/mfm */
+#define SCSI_GENERIC_MAJOR 21
+#define Z8530_MAJOR 34
+#define DIGI_MAJOR 23
+#define IDE1_MAJOR 22
+#define DIGICU_MAJOR 22
+#define MITSUMI_CDROM_MAJOR 23
+#define CDU535_CDROM_MAJOR 24
+#define STL_SERIALMAJOR 24
+#define MATSUSHITA_CDROM_MAJOR 25
+#define STL_CALLOUTMAJOR 25
+#define MATSUSHITA_CDROM2_MAJOR 26
+#define QIC117_TAPE_MAJOR 27
+#define MATSUSHITA_CDROM3_MAJOR 27
+#define MATSUSHITA_CDROM4_MAJOR 28
+#define STL_SIOMEMMAJOR 28
+#define ACSI_MAJOR 28
+#define AZTECH_CDROM_MAJOR 29
+#define GRAPHDEV_MAJOR 29 /* SparcLinux & Linux/68k /dev/fb */
+#define SHMIQ_MAJOR 85 /* Linux/mips, SGI /dev/shmiq */
+#define CM206_CDROM_MAJOR 32
+#define IDE2_MAJOR 33
+#define IDE3_MAJOR 34
+#define NETLINK_MAJOR 36
+#define PS2ESDI_MAJOR 36
+#define IDETAPE_MAJOR 37
+#define Z2RAM_MAJOR 37
+#define APBLOCK_MAJOR 38 /* AP1000 Block device */
+#define DDV_MAJOR 39 /* AP1000 DDV block device */
+#define NBD_MAJOR 43 /* Network block device */
+#define RISCOM8_NORMAL_MAJOR 48
+#define DAC960_MAJOR 48 /* 48..55 */
+#define RISCOM8_CALLOUT_MAJOR 49
+#define MKISS_MAJOR 55
+#define DSP56K_MAJOR 55 /* DSP56001 processor device */
+
+#define IDE4_MAJOR 56
+#define IDE5_MAJOR 57
+
+#define SCSI_DISK1_MAJOR 65
+#define SCSI_DISK2_MAJOR 66
+#define SCSI_DISK3_MAJOR 67
+#define SCSI_DISK4_MAJOR 68
+#define SCSI_DISK5_MAJOR 69
+#define SCSI_DISK6_MAJOR 70
+#define SCSI_DISK7_MAJOR 71
+
+
+#define LVM_BLK_MAJOR 58 /* Logical Volume Manager */
+
+#define COMPAQ_SMART2_MAJOR 72
+#define COMPAQ_SMART2_MAJOR1 73
+#define COMPAQ_SMART2_MAJOR2 74
+#define COMPAQ_SMART2_MAJOR3 75
+#define COMPAQ_SMART2_MAJOR4 76
+#define COMPAQ_SMART2_MAJOR5 77
+#define COMPAQ_SMART2_MAJOR6 78
+#define COMPAQ_SMART2_MAJOR7 79
+
+#define SPECIALIX_NORMAL_MAJOR 75
+#define SPECIALIX_CALLOUT_MAJOR 76
+
+#define DASD_MAJOR 94
+
+#define LVM_CHAR_MAJOR 109 /* Logical Volume Manager */
+
+#define MDISK_MAJOR 64
+
+#define I2O_MAJOR 80 /* 80->87 */
+
+#define IDE6_MAJOR 88
+#define IDE7_MAJOR 89
+#define IDE8_MAJOR 90
+#define IDE9_MAJOR 91
+
+#define AURORA_MAJOR 79
+
+#define RTF_MAJOR 150
+#define RAW_MAJOR 162
+
+#define USB_ACM_MAJOR 166
+#define USB_ACM_AUX_MAJOR 167
+#define USB_CHAR_MAJOR 180
+
+#define UNIX98_PTY_MASTER_MAJOR 128
+#define UNIX98_PTY_MAJOR_COUNT 8
+#define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT)
+
+/*
+ * Tests for SCSI devices.
+ */
+
+#define SCSI_DISK_MAJOR(M) ((M) == SCSI_DISK0_MAJOR || \
+ ((M) >= SCSI_DISK1_MAJOR && (M) <= SCSI_DISK7_MAJOR))
+
+#define SCSI_BLK_MAJOR(M) \
+ (SCSI_DISK_MAJOR(M) \
+ || (M) == SCSI_CDROM_MAJOR)
+
+static __inline__ int scsi_blk_major(int m) {
+ return SCSI_BLK_MAJOR(m);
+}
+
+#endif
diff --git a/include/asm-s390/mathemu.h b/include/asm-s390/mathemu.h
new file mode 100644
index 000000000..c78d97b43
--- /dev/null
+++ b/include/asm-s390/mathemu.h
@@ -0,0 +1,48 @@
+/*
+ * arch/s390/kernel/mathemu.h
+ * IEEE floating point emulation.
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#ifndef __MATHEMU__
+#define __MATHEMU__
+
+extern int math_emu_b3(__u8 *, struct pt_regs *);
+extern int math_emu_ed(__u8 *, struct pt_regs *);
+extern void math_emu_ldr(__u8 *);
+extern void math_emu_ler(__u8 *);
+extern void math_emu_std(__u8 *, struct pt_regs *);
+extern void math_emu_ld(__u8 *, struct pt_regs *);
+extern void math_emu_ste(__u8 *, struct pt_regs *);
+extern void math_emu_le(__u8 *, struct pt_regs *);
+extern int math_emu_lfpc(__u8 *, struct pt_regs *);
+extern int math_emu_stfpc(__u8 *, struct pt_regs *);
+extern int math_emu_srnm(__u8 *, struct pt_regs *);
+
+
+extern __u64 __adddf3(__u64,__u64);
+extern __u64 __subdf3(__u64,__u64);
+extern __u64 __muldf3(__u64,__u64);
+extern __u64 __divdf3(__u64,__u64);
+extern long __cmpdf2(__u64,__u64);
+extern __u64 __negdf2(__u64);
+extern __u64 __absdf2(__u64);
+extern __u32 __addsf3(__u32,__u32);
+extern __u32 __subsf3(__u32,__u32);
+extern __u32 __mulsf3(__u32,__u32);
+extern __u32 __divsf3(__u32,__u32);
+extern __u32 __negsf2(__u32);
+extern __u32 __abssf2(__u32);
+extern long __cmpsf2(__u32,__u32);
+extern __u32 __truncdfsf2(__u64);
+extern __u32 __fixsfsi(__u32);
+extern __u32 __fixdfsi(__u64);
+extern __u64 __floatsidf(__u32);
+extern __u32 __floatsisf(__u32);
+extern __u64 __extendsfdf2(__u32);
+
+#endif /* __MATHEMU__ */
+
diff --git a/include/asm-s390/misc390.h b/include/asm-s390/misc390.h
new file mode 100644
index 000000000..43d89ccfb
--- /dev/null
+++ b/include/asm-s390/misc390.h
@@ -0,0 +1,14 @@
+/*
+ * include/asm-s390/misc390.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#define allocaligned2(type,name,number,align) \
+ __u8 name##buff[(sizeof(type)*(number+1))-1]; \
+ type *name=(type *)(((__u32)(&name##buff[align-1]))&(-align))
+
+#define allocaligned(type,name,number) allocaligned2(type,name,number,__alignof__(type))
+
diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h
new file mode 100644
index 000000000..8fbe65fd5
--- /dev/null
+++ b/include/asm-s390/mman.h
@@ -0,0 +1,46 @@
+/*
+ * include/asm-s390/mman.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/mman.h"
+ */
+
+#ifndef __S390_MMAN_H__
+#define __S390_MMAN_H__
+
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+#define PROT_NONE 0x0 /* page can not be accessed */
+
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_TYPE 0x0f /* Mask for type of mapping */
+#define MAP_FIXED 0x10 /* Interpret addr exactly */
+#define MAP_ANONYMOUS 0x20 /* don't use a file */
+
+#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+#define MAP_LOCKED 0x2000 /* pages are locked */
+#define MAP_NORESERVE 0x4000 /* don't check for reservations */
+
+#define MS_ASYNC 1 /* sync memory asynchronously */
+#define MS_INVALIDATE 2 /* invalidate the caches */
+#define MS_SYNC 4 /* synchronous memory sync */
+
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+
+#define MADV_NORMAL 0x0 /* default page-in behavior */
+#define MADV_RANDOM 0x1 /* page-in minimum required */
+#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
+#define MADV_WILLNEED 0x3 /* pre-fault pages */
+#define MADV_DONTNEED 0x4 /* discard these pages */
+
+/* compatibility flags */
+#define MAP_ANON MAP_ANONYMOUS
+#define MAP_FILE 0
+
+#endif /* __S390_MMAN_H__ */
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
new file mode 100644
index 000000000..c2a215135
--- /dev/null
+++ b/include/asm-s390/mmu_context.h
@@ -0,0 +1,45 @@
+/*
+ * include/asm-s390/mmu_context.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/mmu_context.h"
+ */
+
+#ifndef __S390_MMU_CONTEXT_H
+#define __S390_MMU_CONTEXT_H
+
+/*
+ * get a new mmu context.. S390 don't know about contexts.
+ */
+#define init_new_context(tsk,mm) do { } while (0)
+
+#define destroy_context(mm) flush_tlb_mm(mm)
+
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk, unsigned cpu)
+{
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk, unsigned cpu)
+{
+ unsigned long pgd;
+
+ if (prev != next) {
+ pgd = (__pa(next->pgd) & PAGE_MASK) | _SEGMENT_TABLE;
+ /* Load page tables */
+ asm volatile(" lctl 7,7,%0\n" /* secondary space */
+ " lctl 13,13,%0\n" /* home space */
+ : : "m" (pgd) );
+ }
+ set_bit(cpu, &next->cpu_vm_mask);
+}
+
+extern inline void activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+{
+ switch_mm(prev, next, current, smp_processor_id());
+}
+
+#endif
diff --git a/include/asm-s390/msgbuf.h b/include/asm-s390/msgbuf.h
new file mode 100644
index 000000000..f4aac0220
--- /dev/null
+++ b/include/asm-s390/msgbuf.h
@@ -0,0 +1,31 @@
+#ifndef _S390_MSGBUF_H
+#define _S390_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for S/390 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ unsigned long __unused1;
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ unsigned long __unused2;
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long __unused3;
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _S390_MSGBUF_H */
diff --git a/include/asm-s390/namei.h b/include/asm-s390/namei.h
new file mode 100644
index 000000000..524b93937
--- /dev/null
+++ b/include/asm-s390/namei.h
@@ -0,0 +1,22 @@
+/*
+ * include/asm-s390/namei.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/namei.h"
+ *
+ * Included from linux/fs/namei.c
+ */
+
+#ifndef __S390_NAMEI_H
+#define __S390_NAMEI_H
+
+/* This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+
+#define __prefix_lookup_dentry(name, lookup_flags) \
+ do {} while (0)
+
+#endif /* __S390_NAMEI_H */
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
new file mode 100644
index 000000000..49be624a9
--- /dev/null
+++ b/include/asm-s390/page.h
@@ -0,0 +1,120 @@
+/*
+ * include/asm-s390/page.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner (hp@de.ibm.com)
+ */
+
+#ifndef _S390_PAGE_H
+#define _S390_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#define STRICT_MM_TYPECHECKS
+
+#define BUG() do { \
+ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ __asm__ __volatile__(".word 0x0000"); \
+} while (0)
+
+#define PAGE_BUG(page) do { \
+ BUG(); \
+} while (0)
+
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+/*
+ * gcc uses builtin, i.e. MVCLE for both operations
+ */
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct {
+ unsigned long pgd0;
+ unsigned long pgd1;
+ unsigned long pgd2;
+ unsigned long pgd3;
+ } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd0)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef struct {
+ unsigned long pgd0;
+ unsigned long pgd1;
+ unsigned long pgd2;
+ unsigned long pgd3;
+ } pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/*
+ *
+ *
+ */
+
+#define __PAGE_OFFSET (0x0)
+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
+#define PHYSMAP_NR(addr) ((unsigned long)(addr) >> PAGE_SHIFT)
+
+#endif /* __KERNEL__ */
+
+#endif /* _S390_PAGE_H */
diff --git a/include/asm-s390/param.h b/include/asm-s390/param.h
new file mode 100644
index 000000000..147daa647
--- /dev/null
+++ b/include/asm-s390/param.h
@@ -0,0 +1,28 @@
+/*
+ * include/asm-s390/param.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/param.h"
+ */
+
+#ifndef _ASMS390_PARAM_H
+#define _ASMS390_PARAM_H
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
new file mode 100644
index 000000000..c1fed9346
--- /dev/null
+++ b/include/asm-s390/pgalloc.h
@@ -0,0 +1,345 @@
+/*
+ * include/asm-s390/bugs.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner (hp@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/pgalloc.h"
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef _S390_PGALLOC_H
+#define _S390_PGALLOC_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <linux/threads.h>
+
+#define pgd_quicklist (S390_lowcore.cpu_data.pgd_quick)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (S390_lowcore.cpu_data.pte_quick)
+#define pgtable_cache_size (S390_lowcore.cpu_data.pgtable_cache_sz)
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+
+extern __inline__ pgd_t* get_pgd_slow(void)
+{
+ int i;
+ pgd_t *pgd,*ret = (pgd_t *)__get_free_pages(GFP_KERNEL,2);
+ if (ret)
+ for (i=0,pgd=ret;i<USER_PTRS_PER_PGD;i++,pgd++)
+ pmd_clear(pmd_offset(pgd,i*PGDIR_SIZE));
+ return ret;
+}
+
+extern __inline__ pgd_t* get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ /*
+ * Need to flush tlb, since private page tables
+ * are unique thru address of pgd and virtual address.
+ * If we reuse pgd we need to be sure no tlb entry
+ * with that pdg is left -> global flush
+ *
+ * Fixme: To avoid this global flush we should
+ * use pdg_quicklist as fix lenght fifo list
+ * and not as stack
+ */
+ } else
+ ret = (unsigned long *)get_pgd_slow();
+ return (pgd_t *)ret;
+}
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+ free_pages((unsigned long)pgd,2);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t* get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+extern __inline__ void free_pte_fast(pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(pte)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
+
+extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ pte_t * page = (pte_t *) get_pte_fast();
+
+ if (!page)
+ return get_pte_kernel_slow(pmd, address);
+ pmd_val(pmd[0]) = _KERNPG_TABLE + __pa(page);
+ pmd_val(pmd[1]) = _KERNPG_TABLE + __pa(page+1024);
+ pmd_val(pmd[2]) = _KERNPG_TABLE + __pa(page+2048);
+ pmd_val(pmd[3]) = _KERNPG_TABLE + __pa(page+3072);
+ return page + address;
+ }
+ if (pmd_bad(*pmd)) {
+ __handle_bad_pmd_kernel(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + address;
+}
+
+extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+
+ if (pmd_none(*pmd))
+ goto getnew;
+ if (pmd_bad(*pmd))
+ goto fix;
+ return (pte_t *) pmd_page(*pmd) + address;
+getnew:
+{
+ unsigned long page = (unsigned long) get_pte_fast();
+
+ if (!page)
+ return get_pte_slow(pmd, address);
+ pmd_val(pmd[0]) = _PAGE_TABLE + __pa(page);
+ pmd_val(pmd[1]) = _PAGE_TABLE + __pa(page+1024);
+ pmd_val(pmd[2]) = _PAGE_TABLE + __pa(page+2048);
+ pmd_val(pmd[3]) = _PAGE_TABLE + __pa(page+3072);
+ return (pte_t *) page + address;
+}
+fix:
+ __handle_bad_pmd(pmd);
+ return NULL;
+}
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+extern inline void pmd_free(pmd_t * pmd)
+{
+}
+
+extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
+{
+ return (pmd_t *) pgd;
+}
+
+#define pmd_free_kernel pmd_free
+#define pmd_alloc_kernel pmd_alloc
+
+extern int do_check_pgt_cache(int, int);
+
+#define set_pgdir(addr,entry) do { } while(0)
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * called only from vmalloc/vfree
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ */
+
+/*
+ * s390 has two ways of flushing TLBs
+ * 'ptlb' does a flush of the local processor
+ * 'ipte' invalidates a pte in a page table and flushes that out of
+ * the TLBs of all PUs of a SMP
+ */
+
+#define __flush_tlb() \
+do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
+
+
+static inline void __flush_global_tlb(void)
+{
+ int cs1=0,dum=0;
+ int *adr;
+ long long dummy=0;
+ adr = (int*) (((int)(((int*) &dummy)+1) & 0xfffffffc)|1);
+ __asm__ __volatile__("lr 2,%0\n\t"
+ "lr 3,%1\n\t"
+ "lr 4,%2\n\t"
+ ".long 0xb2500024" :
+ : "d" (cs1), "d" (dum), "d" (adr)
+ : "2", "3", "4");
+}
+
+#if 0
+#define flush_tlb_one(a,b) __flush_tlb()
+#define __flush_tlb_one(a,b) __flush_tlb()
+#else
+static inline void __flush_tlb_one(struct mm_struct *mm,
+ unsigned long addr)
+{
+ pgd_t * pgdir;
+ pmd_t * pmd;
+ pte_t * pte, *pto;
+
+ pgdir = pgd_offset(mm, addr);
+ if (pgd_none(*pgdir) || pgd_bad(*pgdir))
+ return;
+ pmd = pmd_offset(pgdir, addr);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ return;
+ pte = pte_offset(pmd,addr);
+
+ /*
+ * S390 has 1mb segments, we are emulating 4MB segments
+ */
+
+ pto = (pte_t*) (((unsigned long) pte) & 0x7ffffc00);
+
+ __asm__ __volatile(" ic 0,2(%0)\n"
+ " ipte %1,%2\n"
+ " stc 0,2(%0)"
+ : : "a" (pte), "a" (pto), "a" (addr): "0");
+}
+#endif
+
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb()
+#define local_flush_tlb() __flush_tlb()
+
+/*
+ * We always need to flush, since s390 does not flush tlb
+ * on each context switch
+ */
+
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ __flush_tlb_one(vma->vm_mm,addr);
+}
+
+static inline void flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ __flush_tlb();
+}
+
+#else
+
+/*
+ * We aren't very clever about this yet - SMP could certainly
+ * avoid some global flushes..
+ */
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+ __flush_tlb()
+
+/*
+ * We only have to do global flush of tlb if process run since last
+ * flush on any other pu than current.
+ * If we have threads (mm->count > 1) we always do a global flush,
+ * since the process runs on more than one processor at the same time.
+ */
+
+static inline void flush_tlb_current_task(void)
+{
+ if ((atomic_read(&current->mm->mm_count) != 1) ||
+ (current->mm->cpu_vm_mask != (1UL << smp_processor_id()))) {
+ current->mm->cpu_vm_mask = (1UL << smp_processor_id());
+ __flush_global_tlb();
+ } else {
+ local_flush_tlb();
+ }
+}
+
+#define flush_tlb() flush_tlb_current_task()
+
+#define flush_tlb_all() __flush_global_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct * mm)
+{
+ if ((atomic_read(&mm->mm_count) != 1) ||
+ (mm->cpu_vm_mask != (1UL << smp_processor_id()))) {
+ mm->cpu_vm_mask = (1UL << smp_processor_id());
+ __flush_global_tlb();
+ } else {
+ local_flush_tlb();
+ }
+}
+
+static inline void flush_tlb_page(struct vm_area_struct * vma,
+ unsigned long va)
+{
+ __flush_tlb_one(vma->vm_mm,va);
+}
+
+static inline void flush_tlb_range(struct mm_struct * mm,
+ unsigned long start, unsigned long end)
+{
+ if ((atomic_read(&mm->mm_count) != 1) ||
+ (mm->cpu_vm_mask != (1UL << smp_processor_id()))) {
+ mm->cpu_vm_mask = (1UL << smp_processor_id());
+ __flush_global_tlb();
+ } else {
+ local_flush_tlb();
+ }
+}
+
+#endif
+
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* S/390 does not keep any page table caches in TLB */
+}
+
+#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
new file mode 100644
index 000000000..f5f854749
--- /dev/null
+++ b/include/asm-s390/pgtable.h
@@ -0,0 +1,418 @@
+/*
+ * include/asm-s390/pgtable.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner
+ *
+ * Derived from "include/asm-i386/pgtable.h"
+ */
+
+#ifndef _ASM_S390_PGTABLE_H
+#define _ASM_S390_PGTABLE_H
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the S390, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * S390 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the S390 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <linux/tasks.h>
+
+extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
+
+/* Caches aren't brain-dead on S390. */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(mm, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma,pg) do { } while (0)
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#endif /* !__ASSEMBLY__ */
+
+/* Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+
+/* PMD_SHIFT determines the size of the area a second-level page table can map */
+#define PMD_SHIFT 22
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: the S390 is two-level, so
+ * we don't really have any PMD directory physically.
+ * for S390 segment-table entries are combined to one PGD
+ * that leads to 1024 pte per pgd
+ */
+#define PTRS_PER_PTE 1024
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PGD 512
+
+
+/*
+ * pgd entries used up by user/kernel:
+ */
+#define USER_PTRS_PER_PGD 512
+#define USER_PGD_PTRS 512
+#define KERNEL_PGD_PTRS 512
+#define FIRST_USER_PGD_NR 0
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+#ifndef __ASSEMBLY__
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#define VMALLOC_END (0x7fffffffL)
+
+
+/*
+ * A pagetable entry of S390 has following format:
+ *
+ * | PFRA | | OS |
+ * 0 0IP0
+ * 00000000001111111111222222222233
+ * 01234567890123456789012345678901
+ *
+ * I Page-Invalid Bit: Page is not available for address-translation
+ * P Page-Protection Bit: Store access not possible for page
+ */
+
+/*
+ * A segmenttable entry of S390 has following format:
+ *
+ * | P-table origin | |PTL
+ * 0 IC
+ * 00000000001111111111222222222233
+ * 01234567890123456789012345678901
+ *
+ * I Segment-Invalid Bit: Segment is not available for address-translation
+ * C Common-Segment Bit: Segment is not private (PoP 3-30)
+ * PTL Page-Table-Length: Length of Page-table (PTL+1*16 entries -> up to 256 entries)
+ */
+
+/*
+ * The segmenttable origin of S390 has following format:
+ *
+ * |S-table origin | | STL |
+ * X **GPS
+ * 00000000001111111111222222222233
+ * 01234567890123456789012345678901
+ *
+ * X Space-Switch event:
+ * G Segment-Invalid Bit: *
+ * P Private-Space Bit: Segment is not private (PoP 3-30)
+ * S Storage-Alteration:
+ * STL Segment-Table-Length: Length of Page-table (STL+1*16 entries -> up to 2048 entries)
+ */
+
+#define _PAGE_PRESENT 0x001 /* Software */
+#define _PAGE_ACCESSED 0x002 /* Software accessed */
+#define _PAGE_DIRTY 0x004 /* Software dirty */
+#define _PAGE_RO 0x200 /* HW read-only */
+#define _PAGE_INVALID 0x400 /* HW invalid */
+
+#define _PAGE_TABLE_LEN 0xf /* only full page-tables */
+#define _PAGE_TABLE_COM 0x10 /* common page-table */
+#define _PAGE_TABLE_INV 0x20 /* invalid page-table */
+#define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */
+
+#define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */
+#define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */
+
+/*
+ * User and Kernel pagetables are identical
+ */
+
+#define _PAGE_TABLE (_PAGE_TABLE_LEN )
+#define _KERNPG_TABLE (_PAGE_TABLE_LEN )
+
+/*
+ * The Kernel segment-tables includes the User segment-table
+ */
+
+#define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000)
+#define _KERNSEG_TABLE (_KERNEL_SEG_TABLE_LEN)
+/*
+ * No mapping available
+ */
+#define PAGE_NONE __pgprot(_PAGE_INVALID )
+
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_RO)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_RO)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+/*
+ * The S390 can't do page protection for execute, and considers that the same are read.
+ * Also, write permissions imply read permissions. This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+
+/*
+ * Define this if things work differently on an i386 and an i486:
+ * it will (on an i486) warn about kernel memory accesses that are
+ * done without a 'verify_area(VERIFY_WRITE,..)'
+ *
+ * Kernel and User memory-access are done equal, so we don't need verify
+ */
+#undef TEST_VERIFY_AREA
+
+/* page table for 0-4MB for everybody */
+extern unsigned long pg0[1024];
+
+/* number of bits that fit into a memory pointer */
+#define BITS_PER_PTR (8*sizeof(unsigned long))
+
+/* to align the pointer to a pointer address */
+#define PTR_MASK (~(sizeof(void*)-1))
+
+/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
+/* 64-bit machines, beware! SRB. */
+#define SIZEOF_PTR_LOG2 2
+
+/* to find an entry in a page-table */
+#define PAGE_PTR(address) \
+((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
+
+
+
+/*
+ * CR 7 (SPST) and cr 13 (HPST) are set to the user pgdir.
+ * Kernel is running in its own, disjunct address space,
+ * running in primary address space.
+ * Copy to/from user is done via access register mode with
+ * access registers set to 0 or 1. For that purpose we need
+ * set up CR 7 with the user pgd.
+ *
+ */
+
+#define SET_PAGE_DIR(tsk,pgdir) \
+do { \
+ unsigned long __pgdir = (__pa(pgdir) & PAGE_MASK ) | _SEGMENT_TABLE; \
+ (tsk)->thread.user_seg = __pgdir; \
+ if ((tsk) == current) { \
+ __asm__ __volatile__("lctl 7,7,%0": :"m" (__pgdir)); \
+ __asm__ __volatile__("lctl 13,13,%0": :"m" (__pgdir)); \
+ } \
+} while (0)
+
+/*
+ * CR 7 (SPST) and cr 13 (HPST) are set to the user pgdir.
+ * Kernel is running in its own, disjunct address space,
+ * running in primary address space.
+ * Copy to/from user is done via access register mode with
+ * access registers set to 0 or 1. For that purpose we need
+ * set up CR 7 with the user pgd.
+ *
+ */
+
+#define SET_PAGE_DIR(tsk,pgdir) \
+do { \
+ unsigned long __pgdir = (__pa(pgdir) & PAGE_MASK ) | _SEGMENT_TABLE; \
+ (tsk)->thread.user_seg = __pgdir; \
+ if ((tsk) == current) { \
+ __asm__ __volatile__("lctl 7,7,%0": :"m" (__pgdir)); \
+ __asm__ __volatile__("lctl 13,13,%0": :"m" (__pgdir)); \
+ } \
+} while (0)
+
+
+extern inline int pte_none(pte_t pte) { return ((pte_val(pte) & (_PAGE_INVALID | _PAGE_RO)) == _PAGE_INVALID); }
+extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
+extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = _PAGE_INVALID; }
+extern inline int pte_pagenr(pte_t pte) { return ((unsigned long)((pte_val(pte) >> PAGE_SHIFT))); }
+
+extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; }
+extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) == 0); }
+extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; }
+extern inline void pmd_clear(pmd_t * pmdp) {
+ pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
+ pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
+ pmd_val(pmdp[2]) = _PAGE_TABLE_INV;
+ pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
+ }
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+extern inline int pgd_none(pgd_t pgd) { return 0; }
+extern inline int pgd_bad(pgd_t pgd) { return 0; }
+extern inline int pgd_present(pgd_t pgd) { return 1; }
+extern inline void pgd_clear(pgd_t * pgdp) { }
+
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RO); }
+extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+
+/* who needs that
+extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
+extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
+extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
+extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
+extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
+extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
+*/
+
+extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RO; return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RO ; return pte; }
+
+extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+
+extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) \
+({ pte_t __pte; pte_val(__pte) = __pa(((page)-mem_map)<<PAGE_SHIFT) + pgprot_val(pgprot); __pte; })
+
+/* This takes a physical page address that is used by the remapping functions */
+#define mk_pte_phys(physpage, pgprot) \
+({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
+
+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{ pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot); return pte; }
+
+#define page_address(page) \
+({ if (!(page)->virtual) BUG(); (page)->virtual; })
+#define pte_page(x) (mem_map+pte_pagenr(x))
+
+#define pmd_page(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+#define __pgd_offset(address) pgd_index(address)
+
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* Find an entry in the second-level page table.. */
+extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) dir;
+}
+
+/* Find an entry in the third-level page table.. */
+#define pte_offset(pmd, address) \
+((pte_t *) (pmd_page(*pmd) + ((address>>10) & ((PTRS_PER_PTE-1)<<2))))
+
+
+/* We don't use pmd cache, so these are dummy routines */
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+ return (pmd_t *)0;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
+{
+}
+
+extern __inline__ void free_pmd_slow(pmd_t *pmd)
+{
+}
+
+extern void __handle_bad_pmd(pmd_t *pmd);
+extern void __handle_bad_pmd_kernel(pmd_t *pmd);
+
+/*
+ * The S390 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+extern inline void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte)
+{
+}
+
+/*
+ * a page-table entry has only 19 bit for offset and 7 bit for type
+ * if bits 0, 20 or 23 are set, a translation specification exceptions occures, and it's
+ * hard to find out the failing address
+ * therefor, we zero out this bits
+ */
+
+#define SWP_TYPE(entry) (((entry).val >> 1) & 0x3f)
+#define SWP_OFFSET(entry) (((entry).val >> 12) & 0x7FFFF )
+#define SWP_ENTRY(type,offset) ((swp_entry_t) { (((type) << 1) | \
+ ((offset) << 12) | \
+ _PAGE_INVALID | _PAGE_RO) \
+ & 0x7ffff6fe })
+
+#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define module_map vmalloc
+#define module_unmap vfree
+
+#endif /* !__ASSEMBLY__ */
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define PageSkip(page) (0)
+#define kern_addr_valid(addr) (1)
+
+#endif /* _S390_PAGE_H */
+
diff --git a/include/asm-s390/poll.h b/include/asm-s390/poll.h
new file mode 100644
index 000000000..011747b53
--- /dev/null
+++ b/include/asm-s390/poll.h
@@ -0,0 +1,33 @@
+/*
+ * include/asm-s390/poll.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/poll.h"
+ */
+
+#ifndef __S390_POLL_H
+#define __S390_POLL_H
+
+/* These are specified by iBCS2 */
+#define POLLIN 0x0001
+#define POLLPRI 0x0002
+#define POLLOUT 0x0004
+#define POLLERR 0x0008
+#define POLLHUP 0x0010
+#define POLLNVAL 0x0020
+
+/* The rest seem to be more-or-less nonstandard. Check them! */
+#define POLLRDNORM 0x0040
+#define POLLRDBAND 0x0080
+#define POLLWRNORM 0x0100
+#define POLLWRBAND 0x0200
+#define POLLMSG 0x0400
+
+struct pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+#endif
diff --git a/include/asm-s390/posix_types.h b/include/asm-s390/posix_types.h
new file mode 100644
index 000000000..5db9d438f
--- /dev/null
+++ b/include/asm-s390/posix_types.h
@@ -0,0 +1,76 @@
+/*
+ * include/asm-s390/posix_types.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/posix_types.h"
+ */
+
+#ifndef __ARCH_S390_POSIX_TYPES_H
+#define __ARCH_S390_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned short __kernel_dev_t;
+typedef unsigned long __kernel_ino_t;
+typedef unsigned short __kernel_mode_t;
+typedef unsigned short __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_ipc_pid_t;
+typedef unsigned short __kernel_uid_t;
+typedef unsigned short __kernel_gid_t;
+typedef unsigned long __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+
+typedef struct {
+#if defined(__KERNEL__) || defined(__USE_ALL)
+ int val[2];
+#else /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
+ int __val[2];
+#endif /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
+} __kernel_fsid_t;
+
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#ifndef _S390_BITOPS_H
+#include <asm/bitops.h>
+#endif
+
+#undef __FD_SET
+#define __FD_SET(fd,fdsetp) set_bit(fd,fdsetp)
+
+#undef __FD_CLR
+#define __FD_CLR(fd,fdsetp) clear_bit(fd,fdsetp)
+
+#undef __FD_ISSET
+#define __FD_ISSET(fd,fdsetp) test_bit(fd,fdsetp)
+
+#undef __FD_ZERO
+#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
+
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)*/
+
+#endif
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
new file mode 100644
index 000000000..d55567d94
--- /dev/null
+++ b/include/asm-s390/processor.h
@@ -0,0 +1,186 @@
+/*
+ * include/asm-s390/processor.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner (hp@de.ibm.com),
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/processor.h"
+ * Copyright (C) 1994, Linus Torvalds
+ */
+
+#ifndef __ASM_S390_PROCESSOR_H
+#define __ASM_S390_PROCESSOR_H
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; __asm__("basr %0,0":"=a"(pc)); pc; })
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ * Members of this structure are referenced in head.S, so think twice
+ * before touching them. [mj]
+ */
+
+typedef struct
+{
+ unsigned int version : 8;
+ unsigned int ident : 24;
+ unsigned int machine : 16;
+ unsigned int unused : 16;
+} __attribute__ ((packed)) cpuid_t;
+
+struct cpuinfo_S390
+{
+ cpuid_t cpu_id;
+ __u16 cpu_addr;
+ __u16 cpu_nr;
+ unsigned long loops_per_sec;
+ unsigned long *pgd_quick;
+ unsigned long *pte_quick;
+ unsigned long pgtable_cache_sz;
+};
+
+extern void print_cpu_info(struct cpuinfo_S390 *);
+
+/* Lazy FPU handling on uni-processor */
+extern struct task_struct *last_task_used_math;
+
+/*
+ * User space process size: 2GB (default).
+ */
+#define TASK_SIZE (0x80000000)
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
+
+#define THREAD_SIZE (2*PAGE_SIZE)
+
+typedef struct {
+ unsigned long seg;
+ unsigned long acc4;
+} mm_segment_t;
+
+/* if you change the thread_struct structure, you must
+ * update the _TSS_* defines in entry.S
+ */
+
+struct thread_struct
+ {
+
+ struct pt_regs *regs; /* the user registers can be found on*/
+ s390_fp_regs fp_regs;
+ __u32 ar2; /* kernel access register 2 */
+ __u32 ar4; /* kernel access register 4 */
+ __u32 ksp; /* kernel stack pointer */
+ __u32 user_seg; /* HSTD */
+ __u32 error_code; /* error-code of last prog-excep. */
+ __u32 prot_addr; /* address of protection-excep. */
+ __u32 trap_no;
+ /* perform syscall argument validation (get/set_fs) */
+ mm_segment_t fs;
+ per_struct per_info;/* Must be aligned on an 4 byte boundary*/
+};
+
+typedef struct thread_struct thread_struct;
+
+#define INIT_MMAP \
+{ &init_mm, 0, 0, NULL, PAGE_SHARED, \
+VM_READ | VM_WRITE | VM_EXEC, 1, NULL, &init_mm.mmap }
+
+#define INIT_THREAD { (struct pt_regs *) 0, \
+ { 0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
+ {0},{0},{0},{0},{0},{0}}}, \
+ 0, 0, \
+ sizeof(init_stack) + (__u32) &init_stack, \
+ (__pa((__u32) &swapper_pg_dir[0]) + _SEGMENT_TABLE),\
+ 0,0,0, \
+ (mm_segment_t) { 0,1}, \
+ (per_struct) {{{{0,}}},0,0,0,0,{{0,}}} \
+}
+
+/* need to define ... */
+#define start_thread(regs, new_psw, new_stackp) do { \
+ unsigned long *u_stack = new_stackp; \
+ regs->psw.mask = _USER_PSW_MASK; \
+ regs->psw.addr = new_psw | 0x80000000 ; \
+ get_user(regs->gprs[2],u_stack); \
+ get_user(regs->gprs[3],u_stack+1); \
+ get_user(regs->gprs[4],u_stack+2); \
+ regs->gprs[15] = new_stackp ; \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/* Copy and release all segment info associated with a VM */
+#define copy_segments(nr, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+#define forget_segments() do { } while (0)
+
+
+/*
+ * Return saved PC of a blocked thread. used in kernel/sched
+ */
+extern inline unsigned long thread_saved_pc(struct thread_struct *t)
+{
+ return (t->regs) ? ((unsigned long)t->regs->psw.addr) : 0;
+}
+
+unsigned long get_wchan(struct task_struct *p);
+#define KSTK_EIP(tsk) ((tsk)->thread.regs->psw.addr)
+#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
+
+/* Allocation and freeing of basic task resources. */
+/*
+ * NOTE! The task struct and the stack go together
+ */
+#define alloc_task_struct() \
+ ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
+#define free_task_struct(p) free_pages((unsigned long)(p),1)
+#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+
+#define init_task (init_task_union.task)
+#define init_stack (init_task_union.stack)
+
+/*
+ * Set of msr bits that gdb can change on behalf of a process.
+ */
+/* Only let our hackers near the condition codes */
+#define PSW_MASK_DEBUGCHANGE 0x00003000UL
+/* Don't let em near the addressing mode either */
+#define PSW_ADDR_DEBUGCHANGE 0x7FFFFFFFUL
+#define PSW_ADDR_MASK 0x7FFFFFFFUL
+/* Program event recording mask */
+#define PSW_PER_MASK 0x40000000UL
+#define USER_STD_MASK 0x00000080UL
+#define PSW_PROBLEM_STATE 0x00010000UL
+
+/*
+ * Function to drop a processor into disabled wait state
+ */
+
+static inline void disabled_wait(unsigned long code)
+{
+ char psw_buffer[2*sizeof(psw_t)];
+ psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1)
+ & -sizeof(psw_t));
+
+ dw_psw->mask = 0x000a0000;
+ dw_psw->addr = code;
+ /* load disabled wait psw, the processor is dead afterwards */
+ asm volatile ("lpsw 0(%0)" : : "a" (dw_psw));
+}
+
+#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
new file mode 100644
index 000000000..5986542dd
--- /dev/null
+++ b/include/asm-s390/ptrace.h
@@ -0,0 +1,327 @@
+/*
+ * include/asm-s390/ptrace.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#ifndef _S390_PTRACE_H
+#define _S390_PTRACE_H
+#include <linux/config.h>
+#include <asm/s390-regs-common.h>
+#include <asm/current.h>
+#include <linux/types.h>
+#include <asm/setup.h>
+#include <linux/stddef.h>
+
+
+#define S390_REGS \
+S390_REGS_COMMON \
+__u32 orig_gpr2;
+
+typedef struct
+{
+ S390_REGS
+} s390_regs;
+
+struct pt_regs
+{
+ S390_REGS
+ __u32 trap;
+};
+
+#if CONFIG_REMOTE_DEBUG
+typedef struct
+{
+ S390_REGS
+ __u32 trap;
+ __u32 crs[16];
+ s390_fp_regs fp_regs;
+} gdb_pt_regs;
+#endif
+
+
+typedef struct
+{
+ __u32 cr[3];
+} per_cr_words __attribute__((packed));
+
+#define PER_EM_MASK 0xE8000000
+typedef struct
+{
+ unsigned em_branching:1;
+ unsigned em_instruction_fetch:1;
+ /* Switching on storage alteration automatically fixes
+ the storage alteration event bit in the users std. */
+ unsigned em_storage_alteration:1;
+ unsigned em_gpr_alt_unused:1;
+ unsigned em_store_real_address:1;
+ unsigned :3;
+ unsigned branch_addr_ctl:1;
+ unsigned :1;
+ unsigned storage_alt_space_ctl:1;
+ unsigned :5;
+ unsigned :16;
+ __u32 starting_addr;
+ __u32 ending_addr;
+} per_cr_bits __attribute__((packed));
+
+typedef struct
+{
+ __u16 perc_atmid; /* 0x096 */
+ __u32 address; /* 0x098 */
+ __u8 access_id; /* 0x0a1 */
+} per_lowcore_words __attribute__((packed));
+
+typedef struct
+{
+ unsigned perc_branching:1; /* 0x096 */
+ unsigned perc_instruction_fetch:1;
+ unsigned perc_storage_alteration:1;
+ unsigned perc_gpr_alt_unused:1;
+ unsigned perc_store_real_address:1;
+ unsigned :3;
+ unsigned :1;
+ unsigned atmid:5;
+ unsigned si:2;
+ __u32 address; /* 0x098 */
+ unsigned :4; /* 0x0a1 */
+ unsigned access_id:4;
+} per_lowcore_bits __attribute__((packed));
+
+typedef struct
+{
+ union
+ {
+ per_cr_words words;
+ per_cr_bits bits;
+ } control_regs __attribute__((packed));
+ /* Use these flags instead of setting em_instruction_fetch */
+ /* directly they are used so that single stepping can be */
+ /* switched on & off while not affecting other tracing */
+ unsigned single_step:1;
+ unsigned instruction_fetch:1;
+ unsigned :30;
+ /* These addresses are copied into cr10 & cr11 if single stepping
+ is switched off */
+ __u32 starting_addr;
+ __u32 ending_addr;
+ union
+ {
+ per_lowcore_words words;
+ per_lowcore_bits bits;
+ } lowcore;
+} per_struct __attribute__((packed));
+
+
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. If you change the pt_regs structure,
+ you'll need to change user.h too.
+
+ N.B. if you modify the pt_regs struct the strace command also has to be
+ modified & recompiled ( just wait till we have gdb going ).
+
+*/
+
+struct user_regs_struct
+{
+ S390_REGS
+ s390_fp_regs fp_regs;
+/* These per registers are in here so that gdb can modify them itself
+ * as there is no "official" ptrace interface for hardware watchpoints.
+ * this is the way intel does it
+ */
+ per_struct per_info;
+};
+
+typedef struct user_regs_struct user_regs_struct;
+
+typedef struct pt_regs pt_regs;
+
+#ifdef __KERNEL__
+#define user_mode(regs) ((regs)->psw.mask & PSW_PROBLEM_STATE)
+#define instruction_pointer(regs) ((regs)->psw.addr)
+
+struct thread_struct;
+extern int sprintf_regs(int line,char *buff,struct task_struct * task,
+ struct thread_struct *tss,struct pt_regs * regs);
+extern void show_regs(struct task_struct * task,struct thread_struct *tss,
+ struct pt_regs * regs);
+#endif
+
+
+
+
+
+#define FIX_PSW(addr) ((unsigned long)(addr)|0x80000000UL)
+
+#define MULT_PROCPTR_TYPES ((CONFIG_BINFMT_ELF)&&(CONFIG_BINFMT_TOC))
+
+typedef struct
+{
+ long addr;
+ long toc;
+} routine_descriptor;
+extern void fix_routine_descriptor_regs(routine_descriptor *rdes,pt_regs *regs);
+extern __inline__ void
+fix_routine_descriptor_regs(routine_descriptor *rdes,pt_regs *regs)
+{
+ regs->psw.addr=FIX_PSW(rdes->addr);
+ regs->gprs[12]=rdes->toc;
+}
+
+/*
+ * Compiler optimisation should save this stuff from being non optimal
+ * & remove uneccessary code ( isnt gcc great DJB. )
+ */
+
+/*I'm just using this an indicator of what binformat we are using
+ * (DJB) N.B. this needs to stay a macro unfortunately as I am otherwise
+ * dereferencing incomplete pointer types in with load_toc_binary
+ */
+#if MULT_PROCPTR_TYPES
+#define uses_routine_descriptors() \
+(current->binfmt->load_binary==load_toc_binary)
+#else
+#if CONFIG_BINFMT_TOC
+#define uses_routine_descriptors() 1
+#else
+#define uses_routine_descriptors() 0
+#endif
+#endif
+
+#define pt_off(ptreg) offsetof(user_regs_struct,ptreg)
+enum
+{
+ PT_PSWMASK=pt_off(psw.mask),
+ PT_PSWADDR=pt_off(psw.addr),
+ PT_GPR0=pt_off(gprs[0]),
+ PT_GPR1=pt_off(gprs[1]),
+ PT_GPR2=pt_off(gprs[2]),
+ PT_GPR3=pt_off(gprs[3]),
+ PT_GPR4=pt_off(gprs[4]),
+ PT_GPR5=pt_off(gprs[5]),
+ PT_GPR6=pt_off(gprs[6]),
+ PT_GPR7=pt_off(gprs[7]),
+ PT_GPR8=pt_off(gprs[8]),
+ PT_GPR9=pt_off(gprs[9]),
+ PT_GPR10=pt_off(gprs[10]),
+ PT_GPR11=pt_off(gprs[11]),
+ PT_GPR12=pt_off(gprs[12]),
+ PT_GPR13=pt_off(gprs[13]),
+ PT_GPR14=pt_off(gprs[14]),
+ PT_GPR15=pt_off(gprs[15]),
+ PT_ACR0=pt_off(acrs[0]),
+ PT_ACR1=pt_off(acrs[1]),
+ PT_ACR2=pt_off(acrs[2]),
+ PT_ACR3=pt_off(acrs[3]),
+ PT_ACR4=pt_off(acrs[4]),
+ PT_ACR5=pt_off(acrs[5]),
+ PT_ACR6=pt_off(acrs[6]),
+ PT_ACR7=pt_off(acrs[7]),
+ PT_ACR8=pt_off(acrs[8]),
+ PT_ACR9=pt_off(acrs[9]),
+ PT_ACR10=pt_off(acrs[10]),
+ PT_ACR11=pt_off(acrs[11]),
+ PT_ACR12=pt_off(acrs[12]),
+ PT_ACR13=pt_off(acrs[13]),
+ PT_ACR14=pt_off(acrs[14]),
+ PT_ACR15=pt_off(acrs[15]),
+ PT_ORIGGPR2=pt_off(orig_gpr2),
+ PT_FPC=pt_off(fp_regs.fpc),
+/*
+ * A nasty fact of life that the ptrace api
+ * only supports passing of longs.
+ */
+ PT_FPR0_HI=pt_off(fp_regs.fprs[0].fp.hi),
+ PT_FPR0_LO=pt_off(fp_regs.fprs[0].fp.lo),
+ PT_FPR1_HI=pt_off(fp_regs.fprs[1].fp.hi),
+ PT_FPR1_LO=pt_off(fp_regs.fprs[1].fp.lo),
+ PT_FPR2_HI=pt_off(fp_regs.fprs[2].fp.hi),
+ PT_FPR2_LO=pt_off(fp_regs.fprs[2].fp.lo),
+ PT_FPR3_HI=pt_off(fp_regs.fprs[3].fp.hi),
+ PT_FPR3_LO=pt_off(fp_regs.fprs[3].fp.lo),
+ PT_FPR4_HI=pt_off(fp_regs.fprs[4].fp.hi),
+ PT_FPR4_LO=pt_off(fp_regs.fprs[4].fp.lo),
+ PT_FPR5_HI=pt_off(fp_regs.fprs[5].fp.hi),
+ PT_FPR5_LO=pt_off(fp_regs.fprs[5].fp.lo),
+ PT_FPR6_HI=pt_off(fp_regs.fprs[6].fp.hi),
+ PT_FPR6_LO=pt_off(fp_regs.fprs[6].fp.lo),
+ PT_FPR7_HI=pt_off(fp_regs.fprs[7].fp.hi),
+ PT_FPR7_LO=pt_off(fp_regs.fprs[7].fp.lo),
+ PT_FPR8_HI=pt_off(fp_regs.fprs[8].fp.hi),
+ PT_FPR8_LO=pt_off(fp_regs.fprs[8].fp.lo),
+ PT_FPR9_HI=pt_off(fp_regs.fprs[9].fp.hi),
+ PT_FPR9_LO=pt_off(fp_regs.fprs[9].fp.lo),
+ PT_FPR10_HI=pt_off(fp_regs.fprs[10].fp.hi),
+ PT_FPR10_LO=pt_off(fp_regs.fprs[10].fp.lo),
+ PT_FPR11_HI=pt_off(fp_regs.fprs[11].fp.hi),
+ PT_FPR11_LO=pt_off(fp_regs.fprs[11].fp.lo),
+ PT_FPR12_HI=pt_off(fp_regs.fprs[12].fp.hi),
+ PT_FPR12_LO=pt_off(fp_regs.fprs[12].fp.lo),
+ PT_FPR13_HI=pt_off(fp_regs.fprs[13].fp.hi),
+ PT_FPR13_LO=pt_off(fp_regs.fprs[13].fp.lo),
+ PT_FPR14_HI=pt_off(fp_regs.fprs[14].fp.hi),
+ PT_FPR14_LO=pt_off(fp_regs.fprs[14].fp.lo),
+ PT_FPR15_HI=pt_off(fp_regs.fprs[15].fp.hi),
+ PT_FPR15_LO=pt_off(fp_regs.fprs[15].fp.lo),
+ PT_CR_9=pt_off(per_info.control_regs.words.cr[0]),
+ PT_CR_10=pt_off(per_info.control_regs.words.cr[1]),
+ PT_CR_11=pt_off(per_info.control_regs.words.cr[2]),
+ PT_LASTOFF=PT_CR_11,
+ PT_ENDREGS=offsetof(user_regs_struct,per_info.lowcore.words.perc_atmid)
+};
+
+#define PTRACE_AREA \
+__u32 len; \
+addr_t kernel_addr; \
+addr_t process_addr;
+
+typedef struct
+{
+ PTRACE_AREA
+} ptrace_area;
+
+/*
+ 390 specific non posix ptrace requests
+ I chose unusual values so they are unlikely to clash with future ptrace definitions.
+ */
+#define PTRACE_PEEKUSR_AREA 0x5000
+#define PTRACE_POKEUSR_AREA 0x5001
+#define PTRACE_PEEKTEXT_AREA 0x5002
+#define PTRACE_PEEKDATA_AREA 0x5003
+#define PTRACE_POKETEXT_AREA 0x5004
+#define PTRACE_POKEDATA_AREA 0x5005
+/* PT_PROT definition is loosely based on hppa bsd definition in gdb/hppab-nat.c */
+#define PTRACE_PROT 21
+
+typedef enum
+{
+ ptprot_set_access_watchpoint,
+ ptprot_set_write_watchpoint,
+ ptprot_disable_watchpoint
+} ptprot_flags;
+
+typedef struct
+{
+ addr_t lowaddr;
+ addr_t hiaddr;
+ ptprot_flags prot;
+} ptprot_area;
+#endif
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/asm-s390/queue.h b/include/asm-s390/queue.h
new file mode 100644
index 000000000..9771d3048
--- /dev/null
+++ b/include/asm-s390/queue.h
@@ -0,0 +1,117 @@
+/*
+ * include/asm-s390/queue.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ * A little set of queue utilies.
+ */
+#include <linux/stddef.h>
+#include <asm/types.h>
+
+typedef struct queue
+{
+ struct queue *next;
+} queue;
+
+typedef queue list;
+
+typedef struct
+{
+ queue *head;
+ queue *tail;
+} qheader;
+
+static __inline__ void init_queue(qheader *qhead)
+{
+ memset(qhead,0,sizeof(*qhead));
+}
+
+static __inline__ void enqueue_tail(qheader *qhead,queue *member)
+{
+ queue *tail=qhead->tail;
+ member->next=NULL;
+
+ if(member)
+ {
+ if(tail)
+ tail->next=member;
+ else
+
+ qhead->head=member;
+ qhead->tail=member;
+ member->next=NULL;
+ }
+}
+
+static __inline__ queue *dequeue_head(qheader *qhead)
+{
+ queue *head=qhead->head,*next_head;
+
+ if(head)
+ {
+ next_head=head->next;
+ qhead->head=next_head;
+ if(!next_head)
+ qhead->tail=NULL;
+ }
+ return(head);
+}
+
+static __inline__ void init_list(list **lhead)
+{
+ *lhead=NULL;
+}
+
+static __inline__ void add_to_list(list **lhead,list *member)
+{
+ member->next=*lhead;
+ *lhead=member;
+}
+
+static __inline__ int is_in_list(list *lhead,list *member)
+{
+ list *curr;
+
+ for(curr=lhead;curr!=NULL;curr=curr->next)
+ if(curr==member)
+ return(TRUE);
+ return(FALSE);
+}
+
+static __inline__ int get_prev(list *lhead,list *member,list **prev)
+{
+ list *curr;
+
+ *prev=NULL;
+ for(curr=lhead;curr!=NULL;curr=curr->next)
+ {
+ if(curr==member)
+ return(TRUE);
+ *prev=curr;
+ }
+ *prev=NULL;
+ return(FALSE);
+}
+
+
+static __inline__ int remove_from_list(list **lhead,list *member)
+{
+ list *prev;
+
+ if(get_prev(*lhead,member,&prev))
+ {
+
+ if(prev)
+ prev->next=member->next;
+ else
+ *lhead=member->next;
+ return(TRUE);
+ }
+ return(FALSE);
+}
+
+
+
+
diff --git a/include/asm-s390/resource.h b/include/asm-s390/resource.h
new file mode 100644
index 000000000..9cf7a4993
--- /dev/null
+++ b/include/asm-s390/resource.h
@@ -0,0 +1,54 @@
+/*
+ * include/asm-s390/resource.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/resources.h"
+ */
+
+#ifndef _S390_RESOURCE_H
+#define _S390_RESOURCE_H
+
+/*
+ * Resource limits
+ */
+
+#define RLIMIT_CPU 0 /* CPU time in ms */
+#define RLIMIT_FSIZE 1 /* Maximum filesize */
+#define RLIMIT_DATA 2 /* max data size */
+#define RLIMIT_STACK 3 /* max stack size */
+#define RLIMIT_CORE 4 /* max core file size */
+#define RLIMIT_RSS 5 /* max resident set size */
+#define RLIMIT_NPROC 6 /* max number of processes */
+#define RLIMIT_NOFILE 7 /* max number of open files */
+#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+#define RLIMIT_AS 9 /* address space limit */
+
+#define RLIM_NLIMITS 10
+
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
+#ifdef __KERNEL__
+
+#define INIT_RLIMITS \
+{ \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { _STK_LIM, LONG_MAX }, \
+ { 0, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { MAX_TASKS_PER_USER, MAX_TASKS_PER_USER }, \
+ { INR_OPEN, INR_OPEN }, \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+}
+
+#endif /* __KERNEL__ */
+
+#endif
+
diff --git a/include/asm-s390/s390-gdbregs.h b/include/asm-s390/s390-gdbregs.h
new file mode 100644
index 000000000..af708d51d
--- /dev/null
+++ b/include/asm-s390/s390-gdbregs.h
@@ -0,0 +1,84 @@
+/*
+ * include/asm-s390/s390-gdbregs.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ * used both by the linux kernel for remote debugging & gdb
+ */
+
+/* Say how long (ordinary) registers are. This is a piece of bogosity
+ used in push_word and a few other places; REGISTER_RAW_SIZE is the
+ real way to know how big a register is. */
+#ifndef _S390_GDBREGS_H
+#define _S390_GDBREGS_H
+
+#include <asm/s390-regs-common.h>
+#define S390_MAX_INSTR_SIZE 6
+#define NUM_REGS (2+NUM_GPRS+NUM_ACRS+NUM_CRS+1+NUM_FPRS)
+#define FIRST_ACR (2+NUM_GPRS)
+#define LAST_ACR (FIRST_ACR+NUM_ACRS-1)
+#define FIRST_CR (FIRST_ACR+NUM_ACRS)
+#define LAST_CR (FIRST_CR+NUM_CRS-1)
+
+#define PSWM_REGNUM 0
+#define PC_REGNUM 1
+#define GP0_REGNUM 2 /* GPR register 0 */
+#define GP_LAST_REGNUM (GP0_REGNUM+NUM_GPRS-1)
+#define RETADDR_REGNUM (GP0_REGNUM+14) /* Usually return address */
+#define SP_REGNUM (GP0_REGNUM+15) /* Contains address of top of stack */
+#define FP_REGNUM SP_REGNUM /* needed in findvar.c still */
+#define FRAME_REGNUM (GP0_REGNUM+11)
+#define FPC_REGNUM (GP0_REGNUM+NUM_GPRS+NUM_ACRS+NUM_CRS)
+#define FP0_REGNUM (FPC_REGNUM+1) /* FPR (Floating point) register 0 */
+#define FPLAST_REGNUM (FP0_REGNUM+NUM_FPRS-1) /* Last floating point register */
+
+/* The top of this structure is as similar as possible to a pt_regs structure to */
+/* simplify code */
+typedef struct
+{
+ S390_REGS_COMMON
+ __u32 crs[NUM_CRS];
+ s390_fp_regs fp_regs;
+} s390_gdb_regs __attribute__((packed));
+
+#define REGISTER_NAMES \
+{ \
+"pswm","pswa", \
+"gpr0","gpr1","gpr2","gpr3","gpr4","gpr5","gpr6","gpr7", \
+"gpr8","gpr9","gpr10","gpr11","gpr12","gpr13","gpr14","gpr15", \
+"acr0","acr1","acr2","acr3","acr4","acr5","acr6","acr7", \
+"acr8","acr9","acr10","acr11","acr12","acr13","acr14","acr15", \
+"cr0","cr1","cr2","cr3","cr4","cr5","cr6","cr7", \
+"cr8","cr9","cr10","cr11","cr12","cr13","cr14","cr15", \
+"fpc", \
+"fpr0","fpr1","fpr2","fpr3","fpr4","fpr5","fpr6","fpr7", \
+"fpr8","fpr9","fpr10","fpr11","fpr12","fpr13","fpr14","fpr15" \
+}
+
+/* Index within `registers' of the first byte of the space for
+ register N. */
+
+#define FP0_OFFSET ((PSW_MASK_SIZE+PSW_ADDR_SIZE)+ \
+(GPR_SIZE*NUM_GPRS)+(ACR_SIZE+NUM_ACRS)+ \
+(CR_SIZE*NUM_CRS)+(FPC_SIZE+FPC_PAD_SIZE))
+
+#define REGISTER_BYTES \
+((FP0_OFFSET)+(FPR_SIZE*NUM_FPRS))
+
+#define REGISTER_BYTE(N) ((N) < FP0_REGNUM ? (N)*4:(FP0_OFFSET+((N)-FP0_REGNUM)*FPR_SIZE))
+
+#endif
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/asm-s390/s390-regs-common.h b/include/asm-s390/s390-regs-common.h
new file mode 100644
index 000000000..aa349a69d
--- /dev/null
+++ b/include/asm-s390/s390-regs-common.h
@@ -0,0 +1,104 @@
+/*
+ * include/asm-s390/s390-regs-common.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ * this file is designed to keep as much compatibility between
+ * gdb's representation of registers & the kernels representation of registers
+ * as possible so as to minimise translation between gdb registers &
+ * kernel registers please keep this matched with gdb & strace
+ */
+
+#ifndef _S390_REGS_COMMON_H
+#define _S390_REGS_COMMON_H
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#endif
+
+#define REGISTER_SIZE 4
+#define NUM_GPRS 16
+#define GPR_SIZE 4
+#define PSW_MASK_SIZE 4
+#define PSW_ADDR_SIZE 4
+#define NUM_FPRS 16
+#define FPR_SIZE 8
+#define FPC_SIZE 4
+#define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */
+#define NUM_CRS 16
+#define CR_SIZE 4
+#define NUM_ACRS 16
+#define ACR_SIZE 4
+
+#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
+
+#ifndef __ASSEMBLY__
+/* this typedef defines how a Program Status Word looks like */
+typedef struct
+{
+ __u32 mask;
+ __u32 addr;
+} psw_t __attribute__ ((aligned(8)));
+
+typedef __u32 gpr_t;
+
+/* 2 __u32's are used for floats instead to compile with a __STRICT_ANSI__ defined */
+typedef union
+{
+#ifdef __KERNEL__
+ __u64 d; /* mathemu.h gets upset otherwise */
+#else
+ double d; /* ansi c dosen't like long longs & make sure that */
+ /* alignments are identical for both compiles */
+#endif
+ struct
+ {
+ __u32 hi;
+ __u32 lo;
+ } fp;
+ __u32 f;
+} freg_t;
+
+typedef struct
+{
+/*
+ The compiler appears to like aligning freg_t on an 8 byte boundary
+ so I always access fpregs, this was causing fun when I was doing
+ coersions.
+ */
+ __u32 fpc;
+ freg_t fprs[NUM_FPRS];
+} s390_fp_regs;
+
+/*
+ gdb structures & the kernel have this much always in common
+ */
+#define S390_REGS_COMMON \
+psw_t psw; \
+__u32 gprs[NUM_GPRS]; \
+__u32 acrs[NUM_ACRS]; \
+
+typedef struct
+{
+ S390_REGS_COMMON
+} s390_regs_common;
+
+
+/* Sequence of bytes for breakpoint illegal instruction. */
+#define S390_BREAKPOINT {0x0,0x1}
+#define S390_BREAKPOINT_U16 ((__u16)0x0001)
+#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
+#define S390_SYSCALL_SIZE 2
+#define ADDR_BITS_REMOVE(addr) ((addr)&0x7fffffff)
+#endif
+#endif
+
+
+
+
+
+
+
+
+
diff --git a/include/asm-s390/s390dyn.h b/include/asm-s390/s390dyn.h
new file mode 100644
index 000000000..960a81b43
--- /dev/null
+++ b/include/asm-s390/s390dyn.h
@@ -0,0 +1,60 @@
+/*
+ * arch/s390/kernel/s390dyn.h
+ * S/390 data definitions for dynamic device attachment
+ *
+ * S390 version
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ */
+
+#ifndef __s390dyn_h
+#define __s390dyn_h
+
+struct _devreg;
+
+typedef int (* oper_handler_func_t)( int irq,
+ struct _devreg *dreg);
+typedef void (* io_handler_func_t) ( int irq,
+ __u32 intparm );
+typedef void ( * not_oper_handler_func_t)( int irq,
+ int status );
+
+typedef struct _devreg {
+ union {
+ struct _hc {
+ __u16 ctype;
+ __u8 cmode;
+ __u16 dtype;
+ __u8 dmode;
+ } hc; /* has controller info */
+
+ struct _hnc {
+ __u16 dtype;
+ __u8 dmode;
+ __u16 res1;
+ __u8 res2;
+ } hnc; /* has no controller info */
+ } ci;
+
+ int flag;
+ oper_handler_func_t oper_func;
+ struct _devreg *prev;
+ struct _devreg *next;
+} devreg_t;
+
+#define DEVREG_EXACT_MATCH 0x00000001
+#define DEVREG_MATCH_DEV_TYPE 0x00000002
+#define DEVREG_MATCH_CU_TYPE 0x00000004
+#define DEVREG_NO_CU_INFO 0x00000008
+
+
+int s390_device_register ( devreg_t *drinfo );
+int s390_device_deregister ( devreg_t *dreg );
+int s390_request_irq_special( int irq,
+ io_handler_func_t io_handler,
+ not_oper_handler_func_t not_oper_handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+
+#endif /* __s390dyn */
diff --git a/include/asm-s390/s390io.h b/include/asm-s390/s390io.h
new file mode 100644
index 000000000..8ba9c11db
--- /dev/null
+++ b/include/asm-s390/s390io.h
@@ -0,0 +1,85 @@
+/*
+ * arch/s390/kernel/s390io.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ */
+
+#ifndef __s390io_h
+#define __s390io_h
+
+/*
+ * IRQ data structure used by I/O subroutines
+ *
+ * Note : If bit flags are added, the "unused" value must be
+ * decremented accordingly !
+ */
+typedef struct _ioinfo {
+ unsigned int irq; /* aka. subchannel number */
+ spinlock_t irq_lock; /* irq lock */
+
+ struct _ioinfo *prev;
+ struct _ioinfo *next;
+
+ union {
+ unsigned int info;
+ struct {
+ unsigned int busy : 1; /* device currently in use */
+ unsigned int oper : 1; /* device is operational */
+ unsigned int fast : 1; /* post with "channel end", ... */
+ /* ... don't wait for "device end" */
+ /* ... from do_IO() parameters */
+ unsigned int ready : 1; /* interrupt handler registered */
+ unsigned int haltio : 1; /* halt_IO in process */
+ unsigned int doio : 1; /* do_IO in process */
+ unsigned int doio_q : 1; /* do_IO queued - only possible ... */
+ /* ... if 'fast' is set too */
+ unsigned int w4final : 1; /* wait for final status, internally */
+ /* ... used with 'fast' setting only */
+ unsigned int repall : 1; /* report every interrupt status */
+ unsigned int unready : 1; /* deregister irq handler in process */
+ unsigned int d_disable : 1; /* delayed disabling required */
+ unsigned int w4sense : 1; /* SENSE status pending */
+ unsigned int syncio : 1; /* synchronous I/O requested */
+ unsigned int consns : 1; /* concurrent sense is available */
+ unsigned int delsense : 1; /* delayed SENSE required */
+ unsigned int s_pend : 1; /* status pending condition */
+ unsigned int pgid : 1; /* "path group ID" is valid */
+ unsigned int pgid_supp : 1; /* "path group ID" command is supported */
+ unsigned int unused : (sizeof(unsigned int)*8 - 18); /* unused */
+ } __attribute__ ((packed)) flags;
+ } ui;
+
+ unsigned long u_intparm; /* user interruption parameter */
+ senseid_t senseid; /* SenseID info */
+ irq_desc_t irq_desc; /* irq descriptor */
+ __u8 ulpm; /* logical path mask used for I/O */
+ __u8 opm; /* path mask of operational paths */
+ pgid_t pgid; /* path group ID */
+ schib_t schib; /* subchannel information block */
+ orb_t orb; /* operation request block */
+ devstat_t devstat; /* device status */
+ ccw1_t *qcpa; /* queued channel program */
+ ccw1_t senseccw; /* ccw for sense command */
+ unsigned int stctl; /* accumulated status control from irb */
+ unsigned long qintparm; /* queued interruption parameter */
+ unsigned long qflag; /* queued flags */
+ unsigned char qlpm; /* queued logical path mask */
+
+ } __attribute__ ((aligned(8))) ioinfo_t;
+
+#define IOINFO_FLAGS_BUSY 0x80000000
+#define IOINFO_FLAGS_OPER 0x40000000
+#define IOINFO_FLAGS_FAST 0x20000000
+#define IOINFO_FLAGS_READY 0x10000000
+#define IOINFO_FLAGS_HALTIO 0x08000000
+#define IOINFO_FLAGS_DOIO 0x04000000
+#define IOINFO_FLAGS_DOIO_Q 0x02000000
+#define IOINFO_FLAGS_W4FINAL 0x01000000
+#define IOINFO_FLAGS_REPALL 0x00800000
+
+extern ioinfo_t *ioinfo[];
+
+#endif /* __s390io_h */
+
diff --git a/include/asm-s390/s390mach.h b/include/asm-s390/s390mach.h
new file mode 100644
index 000000000..56349777a
--- /dev/null
+++ b/include/asm-s390/s390mach.h
@@ -0,0 +1,65 @@
+/*
+ * arch/s390/kernel/s390mach.h
+ * S/390 data definitions for machine check processing
+ *
+ * S390 version
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ */
+
+#ifndef __s390mach_h
+#define __s390mach_h
+
+#include <asm/types.h>
+
+//
+// machine-check-interruption code
+//
+typedef struct _mcic {
+ __u32 to_be_defined_1 : 9;
+ __u32 cp : 1; /* channel-report pending */
+ __u32 to_be_defined_2 : 22;
+ __u32 to_be_defined_3;
+} __attribute__ ((packed)) mcic_t;
+
+//
+// Channel Report Word
+//
+typedef struct _crw {
+ __u32 res1 : 1; /* reserved zero */
+ __u32 slct : 1; /* solicited */
+ __u32 oflw : 1; /* overflow */
+ __u32 chn : 1; /* chained */
+ __u32 rsc : 4; /* reporting source code */
+ __u32 anc : 1; /* ancillary report */
+ __u32 res2 : 1; /* reserved zero */
+ __u32 erc : 6; /* error-recovery code */
+ __u32 rsid : 16; /* reporting-source ID */
+} __attribute__ ((packed)) crw_t;
+
+//
+// CRW Entry
+//
+typedef struct _crwe {
+ crw_t crw;
+ crw_t *crw_next;
+} __attribute__ ((packed)) crwe_t;
+
+typedef struct _mchchk_queue_element {
+ spinlock_t lock;
+ unsigned int status;
+ mcic_t mcic;
+ crwe_t *crwe; /* CRW if applicable */
+ struct mchchk_queue_element *next;
+ struct mchchk_queue_element *prev;
+} mchchk_queue_element_t;
+
+#define MCHCHK_STATUS_TO_PROCESS 0x00000001
+#define MCHCHK_STATUS_IN_PROGRESS 0x00000002
+#define MCHCHK_STATUS_WAITING 0x00000004
+
+void s390_init_machine_check ( void );
+void __init s390_do_machine_check ( void );
+void __init s390_machine_check_handler( struct semaphore * );
+
+#endif /* __s390mach */
diff --git a/include/asm-s390/segment.h b/include/asm-s390/segment.h
new file mode 100644
index 000000000..8bfce3475
--- /dev/null
+++ b/include/asm-s390/segment.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#endif
diff --git a/include/asm-s390/semaphore-helper.h b/include/asm-s390/semaphore-helper.h
new file mode 100644
index 000000000..d822e9a80
--- /dev/null
+++ b/include/asm-s390/semaphore-helper.h
@@ -0,0 +1,100 @@
+/*
+ * include/asm-s390/semaphore-helper.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *
+ * Derived from "include/asm-i386/semaphore-helper.h"
+ * (C) Copyright 1996 Linus Torvalds
+ * (C) Copyright 1999 Andrea Arcangeli
+ */
+
+#ifndef _S390_SEMAPHORE_HELPER_H
+#define _S390_SEMAPHORE_HELPER_H
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ *
+ * This is trivially done with load_locked/store_cond,
+ * but on the x86 we need an external synchronizer.
+ */
+static inline void wake_one_more(struct semaphore * sem)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ sem->waking++;
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+}
+
+static inline int waking_non_zero(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->waking > 0) {
+ sem->waking--;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_interruptible:
+ * 1 got the lock
+ * 0 go to sleep
+ * -EINTR interrupted
+ *
+ * If we give up we must undo our count-decrease we previously did in down().
+ * Subtle: up() can continue to happens and increase the semaphore count
+ * even during our critical section protected by the spinlock. So
+ * we must remeber to undo the sem->waking that will be run from
+ * wake_one_more() some time soon, if the semaphore count become > 0.
+ */
+static inline int waking_non_zero_interruptible(struct semaphore *sem,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->waking > 0) {
+ sem->waking--;
+ ret = 1;
+ } else if (signal_pending(tsk)) {
+ if (atomic_inc_and_test_greater_zero(&sem->count))
+ sem->waking--;
+ ret = -EINTR;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_trylock:
+ * 1 failed to lock
+ * 0 got the lock
+ *
+ * Implementation details are the same of the interruptible case.
+ */
+static inline int waking_non_zero_trylock(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 1;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->waking <= 0)
+ {
+ if (atomic_inc_and_test_greater_zero(&sem->count))
+ sem->waking--;
+ } else {
+ sem->waking--;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+#endif
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
new file mode 100644
index 000000000..fc903d772
--- /dev/null
+++ b/include/asm-s390/semaphore.h
@@ -0,0 +1,191 @@
+/*
+ * include/asm-s390/semaphore.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *
+ * Derived from "include/asm-i386/semaphore.h"
+ * (C) Copyright 1996 Linus Torvalds
+ */
+
+#ifndef _S390_SEMAPHORE_H
+#define _S390_SEMAPHORE_H
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <linux/wait.h>
+
+struct semaphore {
+ atomic_t count;
+ int sleepers;
+ wait_queue_head_t wait;
+};
+
+#define __SEM_DEBUG_INIT(name)
+
+#define __SEMAPHORE_INITIALIZER(name,count) \
+{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) }
+
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+extern inline void sema_init (struct semaphore *sem, int val)
+{
+ *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+ sema_init(sem, 0);
+}
+
+asmlinkage void __down_failed(void /* special register calling convention */);
+asmlinkage int __down_failed_interruptible(void /* params in registers */);
+asmlinkage int __down_failed_trylock(void /* params in registers */);
+asmlinkage void __up_wakeup(void /* special register calling convention */);
+
+asmlinkage void __down(struct semaphore * sem);
+asmlinkage int __down_interruptible(struct semaphore * sem);
+asmlinkage int __down_trylock(struct semaphore * sem);
+asmlinkage void __up(struct semaphore * sem);
+
+extern inline void down(struct semaphore * sem)
+{
+ if (atomic_dec_return(&sem->count) < 0)
+ __down(sem);
+}
+
+extern inline int down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+
+ if (atomic_dec_return(&sem->count) < 0)
+ ret = __down_interruptible(sem);
+ return ret;
+}
+
+extern inline int down_trylock(struct semaphore * sem)
+{
+ int ret = 0;
+
+ if (atomic_dec_return(&sem->count) < 0)
+ ret = __down_trylock(sem);
+ return ret;
+}
+
+extern inline void up(struct semaphore * sem)
+{
+ if (atomic_inc_return(&sem->count) <= 0)
+ __up(sem);
+}
+
+/* rw mutexes (should that be mutices? =) -- throw rw
+ * spinlocks and semaphores together, and this is what we
+ * end up with...
+ *
+ * The lock is initialized to BIAS. This way, a writer
+ * subtracts BIAS ands gets 0 for the case of an uncontended
+ * lock. Readers decrement by 1 and see a positive value
+ * when uncontended, negative if there are writers waiting
+ * (in which case it goes to sleep).
+ *
+ * The value 0x01000000 supports up to 128 processors and
+ * lots of processes. BIAS must be chosen such that subl'ing
+ * BIAS once per CPU will result in the long remaining
+ * negative.
+ *
+ * In terms of fairness, this should result in the lock
+ * flopping back and forth between readers and writers
+ * under heavy use.
+ *
+ * -ben
+ */
+struct rw_semaphore {
+ atomic_t count;
+ volatile unsigned int write_bias_granted;
+ volatile unsigned int read_bias_granted;
+ wait_queue_head_t wait;
+ wait_queue_head_t write_bias_wait;
+};
+
+#define RW_LOCK_BIAS 0x01000000
+
+#define __RWSEM_DEBUG_INIT /* */
+
+#define __RWSEM_INITIALIZER(name,count) \
+{ ATOMIC_INIT(count), 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
+ __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
+
+#define __DECLARE_RWSEM_GENERIC(name,count) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
+
+#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
+#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
+#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
+
+extern inline void init_rwsem(struct rw_semaphore *sem)
+{
+ atomic_set(&sem->count, RW_LOCK_BIAS);
+ sem->read_bias_granted = 0;
+ sem->write_bias_granted = 0;
+ init_waitqueue_head(&sem->wait);
+ init_waitqueue_head(&sem->write_bias_wait);
+}
+
+extern void __down_read_failed(int, struct rw_semaphore *);
+extern void __down_write_failed(int, struct rw_semaphore *);
+extern void __rwsem_wake(int, struct rw_semaphore *);
+
+extern inline void down_read(struct rw_semaphore *sem)
+{
+ int count;
+ count = atomic_dec_return(&sem->count);
+ if (count < 0)
+ __down_read_failed(count, sem);
+}
+
+extern inline void down_write(struct rw_semaphore *sem)
+{
+ int count;
+ count = atomic_add_return (-RW_LOCK_BIAS, &sem->count);
+ if (count < 0)
+ __down_write_failed(count, sem);
+}
+
+/* When a reader does a release, the only significant
+ * case is when there was a writer waiting, and we've
+ * bumped the count to 0: we must wake the writer up.
+ */
+extern inline void up_read(struct rw_semaphore *sem)
+{
+ int count;
+ count = atomic_inc_return(&sem->count);
+ if (count == 0)
+ __rwsem_wake(count, sem);
+}
+
+/* releasing the writer is easy -- just release it and
+ * wake up any sleepers.
+ */
+extern inline void up_write(struct rw_semaphore *sem)
+{
+ int count;
+ count = atomic_add_return(RW_LOCK_BIAS, &sem->count);
+ if (count >= 0 && count < RW_LOCK_BIAS)
+ __rwsem_wake(count, sem);
+}
+
+#endif
diff --git a/include/asm-s390/sembuf.h b/include/asm-s390/sembuf.h
new file mode 100644
index 000000000..15c9e25ff
--- /dev/null
+++ b/include/asm-s390/sembuf.h
@@ -0,0 +1,25 @@
+#ifndef _S390_SEMBUF_H
+#define _S390_SEMBUF_H
+
+/*
+ * The semid64_ds structure for S/390 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+ unsigned long __unused1;
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long __unused2;
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _S390_SEMBUF_H */
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
new file mode 100644
index 000000000..6a9449a52
--- /dev/null
+++ b/include/asm-s390/setup.h
@@ -0,0 +1,52 @@
+/*
+ * include/asm-s390/setup.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ */
+
+#ifndef _ASM_S390_SETUP_H
+#define _ASM_S390_SETUP_H
+
+#define PARMAREA 0x10400
+
+#ifndef __ASSEMBLER__
+
+#define ORIG_ROOT_DEV (*(unsigned long *) (0x10400))
+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (0x10404))
+#define MEMORY_SIZE (*(unsigned long *) (0x10406))
+#define MACHINE_FLAGS (*(unsigned long *) (0x1040a))
+#define INITRD_START (*(unsigned long *) (0x1040e))
+#define INITRD_SIZE (*(unsigned long *) (0x10412))
+#define RAMDISK_FLAGS (*(unsigned short *) (0x10416))
+#define COMMAND_LINE ((char *) (0x10480))
+
+#else
+
+#define ORIG_ROOT_DEV 0x10400
+#define MOUNT_ROOT_RDONLY 0x10404
+#define MEMORY_SIZE 0x10406
+#define MACHINE_FLAGS 0x1040a
+#define INITRD_START 0x1040e
+#define INITRD_SIZE 0x10412
+#define RAMDISK_FLAGS 0x10416
+#define COMMAND_LINE 0x10480
+
+#endif
+
+#define COMMAND_LINE_SIZE 896
+/*
+ * Machine features detected in head.S
+ */
+#define MACHINE_IS_VM (MACHINE_FLAGS & 1)
+#define MACHINE_HAS_IEEE (MACHINE_FLAGS & 2)
+#define MACHINE_IS_P390 (MACHINE_FLAGS & 4)
+
+#define RAMDISK_ORIGIN 0x800000
+#define RAMDISK_BLKSIZE 0x1000
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+
+#endif
diff --git a/include/asm-s390/shmbuf.h b/include/asm-s390/shmbuf.h
new file mode 100644
index 000000000..aae94fd5b
--- /dev/null
+++ b/include/asm-s390/shmbuf.h
@@ -0,0 +1,42 @@
+#ifndef _S390_SHMBUF_H
+#define _S390_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for S/390 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ unsigned long __unused1;
+ __kernel_time_t shm_dtime; /* last detach time */
+ unsigned long __unused2;
+ __kernel_time_t shm_ctime; /* last change time */
+ unsigned long __unused3;
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _S390_SHMBUF_H */
diff --git a/include/asm-s390/shmparam.h b/include/asm-s390/shmparam.h
new file mode 100644
index 000000000..c2e0c0508
--- /dev/null
+++ b/include/asm-s390/shmparam.h
@@ -0,0 +1,13 @@
+/*
+ * include/asm-s390/shmparam.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/shmparam.h"
+ */
+#ifndef _ASM_S390_SHMPARAM_H
+#define _ASM_S390_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_S390_SHMPARAM_H */
diff --git a/include/asm-s390/sigcontext.h b/include/asm-s390/sigcontext.h
new file mode 100644
index 000000000..610f20e26
--- /dev/null
+++ b/include/asm-s390/sigcontext.h
@@ -0,0 +1,36 @@
+/*
+ * include/asm-s390/sigcontext.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ */
+
+#ifndef _ASM_S390_SIGCONTEXT_H
+#define _ASM_S390_SIGCONTEXT_H
+#include <asm/s390-regs-common.h>
+
+/*
+ Has to be at least _NSIG_WORDS from asm/signal.h
+ */
+#define _SIGCONTEXT_NSIG 64
+#define _SIGCONTEXT_NSIG_BPW 32
+/* Size of stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE STACK_FRAME_OVERHEAD
+#define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
+#define SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
+
+typedef struct
+{
+ s390_regs_common regs;
+ s390_fp_regs fpregs;
+} sigregs;
+
+struct sigcontext
+{
+ unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS];
+ sigregs *sregs;
+};
+
+
+#endif
+
diff --git a/include/asm-s390/siginfo.h b/include/asm-s390/siginfo.h
new file mode 100644
index 000000000..1efd4e1c3
--- /dev/null
+++ b/include/asm-s390/siginfo.h
@@ -0,0 +1,205 @@
+/*
+ * include/asm-s390/siginfo.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/siginfo.h"
+ */
+
+#ifndef _S390_SIGINFO_H
+#define _S390_SIGINFO_H
+
+#include <linux/types.h>
+
+/* XXX: This structure was copied from the Alpha; is there an iBCS version? */
+
+typedef union sigval {
+ int sival_int;
+ void *sival_ptr;
+} sigval_t;
+
+#define SI_MAX_SIZE 128
+#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 3)
+
+typedef struct siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ unsigned int _timer1;
+ unsigned int _timer2;
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ uid_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ clock_t _utime;
+ clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ void *_addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t;
+
+/*
+ * How these fields are to be accessed.
+ */
+#define si_pid _sifields._kill._pid
+#define si_uid _sifields._kill._uid
+#define si_status _sifields._sigchld._status
+#define si_utime _sifields._sigchld._utime
+#define si_stime _sifields._sigchld._stime
+#define si_value _sifields._rt._sigval
+#define si_int _sifields._rt._sigval.sival_int
+#define si_ptr _sifields._rt._sigval.sival_ptr
+#define si_addr _sifields._sigfault._addr
+#define si_band _sifields._sigpoll._band
+#define si_fd _sifields._sigpoll._fd
+
+/*
+ * si_code values
+ * Digital reserves positive values for kernel-generated signals.
+ */
+#define SI_USER 0 /* sent by kill, sigsend, raise */
+#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+#define SI_QUEUE -1 /* sent by sigqueue */
+#define SI_TIMER -2 /* sent by timer expiration */
+#define SI_MESGQ -3 /* sent by real time mesq state change */
+#define SI_ASYNCIO -4 /* sent by AIO completion */
+#define SI_SIGIO -5 /* sent by queued SIGIO */
+
+#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
+#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
+
+/*
+ * SIGILL si_codes
+ */
+#define ILL_ILLOPC 1 /* illegal opcode */
+#define ILL_ILLOPN 2 /* illegal operand */
+#define ILL_ILLADR 3 /* illegal addressing mode */
+#define ILL_ILLTRP 4 /* illegal trap */
+#define ILL_PRVOPC 5 /* privileged opcode */
+#define ILL_PRVREG 6 /* privileged register */
+#define ILL_COPROC 7 /* coprocessor error */
+#define ILL_BADSTK 8 /* internal stack error */
+#define NSIGILL 8
+
+/*
+ * SIGFPE si_codes
+ */
+#define FPE_INTDIV 1 /* integer divide by zero */
+#define FPE_INTOVF 2 /* integer overflow */
+#define FPE_FLTDIV 3 /* floating point divide by zero */
+#define FPE_FLTOVF 4 /* floating point overflow */
+#define FPE_FLTUND 5 /* floating point underflow */
+#define FPE_FLTRES 6 /* floating point inexact result */
+#define FPE_FLTINV 7 /* floating point invalid operation */
+#define FPE_FLTSUB 8 /* subscript out of range */
+#define NSIGFPE 8
+
+/*
+ * SIGSEGV si_codes
+ */
+#define SEGV_MAPERR 1 /* address not mapped to object */
+#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
+#define NSIGSEGV 2
+
+/*
+ * SIGBUS si_codes
+ */
+#define BUS_ADRALN 1 /* invalid address alignment */
+#define BUS_ADRERR 2 /* non-existant physical address */
+#define BUS_OBJERR 3 /* object specific hardware error */
+#define NSIGBUS 3
+
+/*
+ * SIGTRAP si_codes
+ */
+#define TRAP_BRKPT 1 /* process breakpoint */
+#define TRAP_TRACE 2 /* process trace trap */
+#define NSIGTRAP 2
+
+/*
+ * SIGCHLD si_codes
+ */
+#define CLD_EXITED 1 /* child has exited */
+#define CLD_KILLED 2 /* child was killed */
+#define CLD_DUMPED 3 /* child terminated abnormally */
+#define CLD_TRAPPED 4 /* traced child has trapped */
+#define CLD_STOPPED 5 /* child has stopped */
+#define CLD_CONTINUED 6 /* stopped child has continued */
+#define NSIGCHLD
+
+/*
+ * SIGPOLL si_codes
+ */
+#define POLL_IN 1 /* data input available */
+#define POLL_OUT 2 /* output buffers available */
+#define POLL_MSG 3 /* input message available */
+#define POLL_ERR 4 /* i/o error */
+#define POLL_PRI 5 /* high priority input available */
+#define POLL_HUP 6 /* device disconnected */
+#define NSIGPOLL 6
+
+/*
+ * sigevent definitions
+ *
+ * It seems likely that SIGEV_THREAD will have to be handled from
+ * userspace, libpthread transmuting it to SIGEV_SIGNAL, which the
+ * thread manager then catches and does the appropriate nonsense.
+ * However, everything is written out here so as to not get lost.
+ */
+#define SIGEV_SIGNAL 0 /* notify via signal */
+#define SIGEV_NONE 1 /* other notification: meaningless */
+#define SIGEV_THREAD 2 /* deliver via thread creation */
+
+#define SIGEV_MAX_SIZE 64
+#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3)
+
+typedef struct sigevent {
+ sigval_t sigev_value;
+ int sigev_signo;
+ int sigev_notify;
+ union {
+ int _pad[SIGEV_PAD_SIZE];
+
+ struct {
+ void (*_function)(sigval_t);
+ void *_attribute; /* really pthread_attr_t */
+ } _sigev_thread;
+ } _sigev_un;
+} sigevent_t;
+
+#define sigev_notify_function _sigev_un._sigev_thread._function
+#define sigev_notify_attributes _sigev_un._sigev_thread._attribute
+
+#endif
diff --git a/include/asm-s390/signal.h b/include/asm-s390/signal.h
new file mode 100644
index 000000000..e849415cc
--- /dev/null
+++ b/include/asm-s390/signal.h
@@ -0,0 +1,185 @@
+/*
+ * include/asm-s390/signal.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/signal.h"
+ */
+
+#ifndef _ASMS390_SIGNAL_H
+#define _ASMS390_SIGNAL_H
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#ifdef __KERNEL__
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+#include <asm/sigcontext.h>
+#define _NSIG _SIGCONTEXT_NSIG
+#define _NSIG_BPW _SIGCONTEXT_NSIG_BPW
+#define _NSIG_WORDS _SIGCONTEXT_NSIG_WORDS
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#else
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX (_NSIG-1)
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
+
+#define SA_RESTORER 0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#ifdef __KERNEL__
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#define SA_SHIRQ 0x04000000
+#endif
+
+#define SIG_BLOCK 0 /* for blocking signals */
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#define SIG_SETMASK 2 /* for setting the signal mask */
+
+/* Type of a signal handler. */
+typedef void (*__sighandler_t)(int);
+
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+
+#ifdef __KERNEL__
+struct old_sigaction {
+ __sighandler_t sa_handler;
+ old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+#else
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+
+#endif
diff --git a/include/asm-s390/sigp.h b/include/asm-s390/sigp.h
new file mode 100644
index 000000000..5154a34a5
--- /dev/null
+++ b/include/asm-s390/sigp.h
@@ -0,0 +1,254 @@
+/*
+ * include/asm-s390/sigp.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * sigp.h by D.J. Barrow (c) IBM 1999
+ * contains routines / structures for signalling other S/390 processors in an
+ * SMP configuration.
+ */
+
+#ifndef __SIGP__
+#define __SIGP__
+
+#include <asm/ptrace.h>
+#include <asm/misc390.h>
+#include <asm/atomic.h>
+
+/* get real cpu address from logical cpu number */
+extern volatile int __cpu_logical_map[];
+
+typedef enum
+{
+ sigp_unassigned=0x0,
+ sigp_sense,
+ sigp_external_call,
+ sigp_emergency_signal,
+ sigp_start,
+ sigp_stop,
+ sigp_restart,
+ sigp_unassigned1,
+ sigp_unassigned2,
+ sigp_stop_and_store_status,
+ sigp_unassigned3,
+ sigp_initial_cpu_reset,
+ sigp_cpu_reset,
+ sigp_set_prefix,
+ sigp_store_status_at_address,
+ sigp_store_extended_status_at_address
+} sigp_order_code;
+
+#if 0
+/*
+ * these definitions are not used at the moment, but we might need
+ * them in future.
+ */
+typedef struct
+{
+ __u64 cpu_timer;
+ psw_t current_psw;
+ __u32 prefix;
+ __u32 access_regs[16];
+ __u64 float_regs[4];
+ __u32 gpr_regs[16];
+ __u32 control_regs[16];
+} sigp_status __attribute__((packed));
+
+typedef struct
+{
+ __u8 unused1[216];
+ __u64 cpu_timer;
+ psw_t current_psw;
+ __u32 prefix;
+ __u32 access_regs[16];
+ __u64 float_regs[4];
+ __u32 gpr_regs[16];
+ __u32 control_regs[16];
+} sigp_status_512 __attribute__((packed));
+
+typedef struct
+{
+ __u32 extended_save_area_address;
+ __u64 cpu_timer;
+ psw_t current_psw;
+ __u32 prefix;
+ __u32 access_regs[16];
+ __u64 float_regs[4];
+ __u32 gpr_regs[16];
+ __u32 control_regs[16];
+} sigp_extended_status __attribute__((packed));
+
+typedef struct
+{
+ __u8 unused1[212];
+ __u32 extended_save_area_address;
+ __u64 cpu_timer;
+ psw_t current_psw;
+ __u32 prefix;
+ __u32 access_regs[16];
+ __u64 float_regs[4];
+ __u32 gpr_regs[16];
+ __u32 control_regs[16];
+} sigp_extended_status_512 __attribute__((packed));
+
+typedef struct
+{
+ __u64 bfp_float_regs[16];
+ __u32 bfp_float_control_reg;
+ __u8 reserved[12];
+} sigp_extended_save_area __attribute__ ((packed));
+
+typedef struct
+{
+ unsigned equipment_check:1;
+ unsigned unassigned1:20;
+ unsigned incorrect_state:1;
+ unsigned invalid_parameter:1;
+ unsigned external_call_pending:1;
+ unsigned stopped:1;
+ unsigned operator_intervening:1;
+ unsigned check_stop:1;
+ unsigned unassigned2:1;
+ unsigned inoperative:1;
+ unsigned invalid_order:1;
+ unsigned receiver_check:1;
+} sigp_status_bits __attribute__((packed));
+#endif
+
+typedef __u32 sigp_status_word;
+
+typedef enum
+{
+ sigp_order_code_accepted=0,
+ sigp_status_stored,
+ sigp_busy,
+ sigp_not_operational
+} sigp_ccode;
+
+
+/*
+ * Definitions for the external call
+ */
+
+/* 'Bit' signals, asynchronous */
+typedef enum
+{
+ ec_schedule=0,
+ ec_restart,
+ ec_halt,
+ ec_power_off,
+ ec_bit_last
+} ec_bit_sig;
+
+/* Signals which come with a parameter area, synchronous */
+typedef enum
+{
+ ec_set_ctl,
+ ec_get_ctl,
+ ec_set_ctl_masked,
+ ec_cmd_last
+} ec_cmd_sig;
+
+/* state information for synchronous signals */
+typedef enum
+{
+ ec_pending,
+ ec_executing,
+ ec_done
+} ec_state;
+
+/* header for the queuing of signals with a parameter area */
+typedef struct ec_ext_call
+{
+ ec_cmd_sig cmd;
+ atomic_t status;
+ struct ec_ext_call *next;
+ void *parms;
+} ec_ext_call;
+
+/* parameter area for the ec_set_ctl and ec_get_ctl signal */
+typedef struct
+{
+ __u16 start_ctl;
+ __u16 end_ctl;
+ __u32 cregs[16];
+} ec_creg_parms;
+
+/* parameter area for the ec_set_ctl_masked signal */
+typedef struct
+{
+ __u16 start_ctl;
+ __u16 end_ctl;
+ __u32 orvals[16];
+ __u32 andvals[16];
+} ec_creg_mask_parms;
+
+/*
+ * Signal processor
+ */
+extern __inline__ sigp_ccode
+signal_processor(__u16 cpu_addr, sigp_order_code order_code)
+{
+ sigp_ccode ccode;
+
+ __asm__ __volatile__(
+ " sr 1,1\n" /* parameter=0 in gpr 1 */
+ " sigp 1,%1,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (ccode)
+ : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
+ : "cc" , "memory", "1" );
+ return ccode;
+}
+
+/*
+ * Signal processor with parameter
+ */
+extern __inline__ sigp_ccode
+signal_processor_p(__u32 parameter,__u16 cpu_addr,sigp_order_code order_code)
+{
+ sigp_ccode ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n" /* parameter in gpr 1 */
+ " sigp 1,%2,0(%3)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (ccode)
+ : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
+ "a" (order_code)
+ : "cc" , "memory", "1" );
+ return ccode;
+}
+
+/*
+ * Signal processor with parameter and return status
+ */
+extern __inline__ sigp_ccode
+signal_processor_ps(__u32 *statusptr, __u32 parameter,
+ __u16 cpu_addr, sigp_order_code order_code)
+{
+ sigp_ccode ccode;
+
+ __asm__ __volatile__(
+ " sr 2,2\n" /* clear status so it doesn't contain rubbish if not saved. */
+ " lr 3,%2\n" /* parameter in gpr 3 */
+ " sigp 2,%3,0(%4)\n"
+ " st 2,%1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (ccode), "=m" (*statusptr)
+ : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
+ "a" (order_code)
+ : "cc" , "memory", "2" , "3"
+ );
+ return ccode;
+}
+
+#endif __SIGP__
+
+
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
new file mode 100644
index 000000000..d7f246bb7
--- /dev/null
+++ b/include/asm-s390/smp.h
@@ -0,0 +1,79 @@
+/*
+ * include/asm-s390/smp.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+#include <linux/config.h>
+#ifdef CONFIG_SMP
+#ifndef __ASSEMBLY__
+
+#include <asm/lowcore.h>
+#include <linux/tasks.h> // FOR NR_CPUS definition only.
+#include <linux/kernel.h> // FOR FASTCALL definition
+
+#define smp_processor_id() (current->processor)
+#define NO_PROC_ID 0xFF /* No processor magic marker */
+
+/*
+ * This magic constant controls our willingness to transfer
+ * a process across CPUs. Such a transfer incurs misses on the L1
+ * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
+ * gut feeling is this will vary by board in value. For a board
+ * with separate L2 cache it probably depends also on the RSS, and
+ * for a board with shared L2 cache it ought to decay fast as other
+ * processes are run.
+ */
+
+#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
+
+extern unsigned long ipi_count;
+extern void count_cpus(void);
+
+extern __inline__ int cpu_logical_map(int cpu)
+{
+ return cpu;
+}
+
+extern __inline__ int cpu_number_map(int cpu)
+{
+ return cpu;
+}
+
+extern __inline__ __u16 hard_smp_processor_id(void)
+{
+ __u16 cpu_address;
+
+ __asm__ ("stap %0\n" : "=m" (cpu_address));
+ return cpu_address;
+}
+
+#define cpu_logical_map(cpu) (cpu)
+
+void smp_local_timer_interrupt(struct pt_regs * regs);
+
+/*
+ s390 specific smp.c headers
+ */
+typedef struct
+{
+ int intresting;
+ sigp_ccode ccode;
+ __u32 status;
+ __u16 cpu;
+} sigp_info;
+
+sigp_ccode smp_ext_call_sync(int cpu, ec_cmd_sig cmd,void *parms);
+sigp_ccode smp_ext_call_async(int cpu, ec_bit_sig sig);
+void smp_ext_call_sync_others(ec_cmd_sig cmd, void *parms);
+void smp_ext_call_async_others(ec_bit_sig sig);
+
+int smp_signal_others(sigp_order_code order_code,__u32 parameter,
+ int spin,sigp_info *info);
+#endif
+#endif
+#endif
diff --git a/include/asm-s390/smplock.h b/include/asm-s390/smplock.h
new file mode 100644
index 000000000..4ebd38025
--- /dev/null
+++ b/include/asm-s390/smplock.h
@@ -0,0 +1,60 @@
+/*
+ * include/asm-s390/smplock.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/smplock.h"
+ */
+
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
+
diff --git a/include/asm-s390/socket.h b/include/asm-s390/socket.h
new file mode 100644
index 000000000..21368c91b
--- /dev/null
+++ b/include/asm-s390/socket.h
@@ -0,0 +1,66 @@
+/*
+ * include/asm-s390/socket.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/socket.h"
+ */
+
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO 20
+#define SO_SNDTIMEO 21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#ifdef __KERNEL__
+/* Socket types. */
+#define SOCK_STREAM 1 /* stream (connection) socket */
+#define SOCK_DGRAM 2 /* datagram (conn.less) socket */
+#define SOCK_RAW 3 /* raw socket */
+#define SOCK_RDM 4 /* reliably-delivered message */
+#define SOCK_SEQPACKET 5 /* sequential packet socket */
+#define SOCK_PACKET 10 /* linux specific way of */
+ /* getting packets at the dev */
+ /* level. For writing rarp and */
+ /* other similar things on the */
+ /* user level. */
+#endif
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
+#define SO_PEERNAME 28
+
+#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-s390/sockios.h b/include/asm-s390/sockios.h
new file mode 100644
index 000000000..412aeb4dd
--- /dev/null
+++ b/include/asm-s390/sockios.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-s390/sockios.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/sockios.h"
+ */
+
+#ifndef __ARCH_S390_SOCKIOS__
+#define __ARCH_S390_SOCKIOS__
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp */
+
+#endif
diff --git a/include/asm-s390/softirq.h b/include/asm-s390/softirq.h
new file mode 100644
index 000000000..b0ad1dc56
--- /dev/null
+++ b/include/asm-s390/softirq.h
@@ -0,0 +1,35 @@
+/*
+ * include/asm-s390/softirq.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/softirq.h"
+ */
+
+#ifndef __ASM_SOFTIRQ_H
+#define __ASM_SOFTIRQ_H
+
+#ifndef __LINUX_SMP_H
+#include <linux/smp.h>
+#endif
+
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+#include <asm/lowcore.h>
+
+#define cpu_bh_disable(cpu) do { atomic_inc(&S390_lowcore.local_bh_count); barrier(); } while (0)
+#define cpu_bh_enable(cpu) do { barrier(); atomic_dec(&S390_lowcore.local_bh_count); } while (0)
+
+#define local_bh_disable() cpu_bh_disable(smp_processor_id())
+#define local_bh_enable() cpu_bh_enable(smp_processor_id())
+
+#define in_softirq() (atomic_read(&S390_lowcore.local_bh_count) != 0)
+
+#endif /* __ASM_SOFTIRQ_H */
+
+
+
+
+
+
+
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
new file mode 100644
index 000000000..9c7725b55
--- /dev/null
+++ b/include/asm-s390/spinlock.h
@@ -0,0 +1,120 @@
+/*
+ * include/asm-s390/spinlock.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/spinlock.h"
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+typedef struct {
+ volatile unsigned long lock;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0)
+#define spin_unlock_wait(lp) do { barrier(); } while((volatile spinlock_t *)(lp)->lock)
+#define spin_is_locked(x) ((x)->lock != 0)
+
+extern inline void spin_lock(spinlock_t *lp)
+{
+ __asm__ __volatile(" lhi 1,-1\n"
+ "0: slr 0,0\n"
+ " cs 0,1,%1\n"
+ " jl 0b"
+ : "=m" (lp->lock)
+ : "0" (lp->lock) : "0", "1");
+}
+
+extern inline int spin_trylock(spinlock_t *lp)
+{
+ unsigned long result;
+ __asm__ __volatile(" slr %1,%1\n"
+ " lhi 0,-1\n"
+ "0: cs %1,0,%0"
+ : "=m" (lp->lock), "=&d" (result)
+ : "0" (lp->lock) : "0");
+ return !result;
+}
+
+
+
+extern inline void spin_unlock(spinlock_t *lp)
+{
+ __asm__ __volatile(" xc 0(4,%0),0(%0)\n"
+ " bcr 15,0"
+ : /* no output */ : "a" (lp) );
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct {
+ volatile unsigned long lock;
+ volatile unsigned long owner_pc;
+} rwlock_t;
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+
+#define read_lock(rw) \
+ asm volatile(" l 2,%0\n" \
+ "0: sll 2,1\n" \
+ " srl 2,1\n" /* clear high (=write) bit */ \
+ " lr 3,2\n" \
+ " ahi 3,1\n" /* one more reader */ \
+ " cs 2,3,%0\n" /* try to write new value */ \
+ " jl 0b" \
+ : "+m" ((rw)->lock) : : "2", "3" );
+
+#define read_unlock(rw) \
+ asm volatile(" l 2,%0\n" \
+ "0: lr 3,2\n" \
+ " ahi 3,-1\n" /* one less reader */ \
+ " cs 2,3,%0\n" \
+ " jl 0b" \
+ : "+m" ((rw)->lock) : : "2", "3" );
+
+#define write_lock(rw) \
+ asm volatile(" lhi 3,1\n" \
+ " sll 3,31\n" /* new lock value = 0x80000000 */ \
+ "0: slr 2,2\n" /* old lock value must be 0 */ \
+ " cs 2,3,%0\n" \
+ " jl 0b" \
+ : "+m" ((rw)->lock) : : "2", "3" );
+
+#define write_unlock(rw) \
+ asm volatile(" slr 3,3\n" /* new lock value = 0 */ \
+ "0: lhi 2,1\n" \
+ " sll 2,31\n" /* old lock value must be 0x80000000 */ \
+ " cs 2,3,%0\n" \
+ " jl 0b" \
+ : "+m" ((rw)->lock) : : "2", "3" );
+
+#endif /* __ASM_SPINLOCK_H */
+
+
+
+
+
+
+
+
+
diff --git a/include/asm-s390/stat.h b/include/asm-s390/stat.h
new file mode 100644
index 000000000..e3e5907e9
--- /dev/null
+++ b/include/asm-s390/stat.h
@@ -0,0 +1,85 @@
+/*
+ * include/asm-s390/stat.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/stat.h"
+ */
+
+#ifndef _S390_STAT_H
+#define _S390_STAT_H
+
+struct __old_kernel_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+
+struct stat {
+ unsigned short st_dev;
+ unsigned short __pad1;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned short __pad2;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long __unused1;
+ unsigned long st_mtime;
+ unsigned long __unused2;
+ unsigned long st_ctime;
+ unsigned long __unused3;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+/* This matches struct stat64 in glibc2.1, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ */
+struct stat64 {
+ unsigned short st_dev;
+ unsigned char __pad0[10];
+
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned long st_uid;
+ unsigned long st_gid;
+
+ unsigned short st_rdev;
+ unsigned char __pad3[10];
+
+ long long st_size;
+ unsigned long st_blksize;
+
+ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long __pad4; /* future possible st_blocks high bits */
+
+ unsigned long st_atime;
+ unsigned long __pad5;
+
+ unsigned long st_mtime;
+ unsigned long __pad6;
+
+ unsigned long st_ctime;
+ unsigned long __pad7; /* will be high 32 bits of ctime someday */
+
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif
diff --git a/include/asm-s390/statfs.h b/include/asm-s390/statfs.h
new file mode 100644
index 000000000..c39586759
--- /dev/null
+++ b/include/asm-s390/statfs.h
@@ -0,0 +1,33 @@
+/*
+ * include/asm-s390/statfs.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/statfs.h"
+ */
+
+#ifndef _S390_STATFS_H
+#define _S390_STATFS_H
+
+#ifndef __KERNEL_STRICT_NAMES
+
+#include <linux/types.h>
+
+typedef __kernel_fsid_t fsid_t;
+
+#endif
+
+struct statfs {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_spare[6];
+};
+
+#endif
diff --git a/include/asm-s390/string.h b/include/asm-s390/string.h
new file mode 100644
index 000000000..2e8081912
--- /dev/null
+++ b/include/asm-s390/string.h
@@ -0,0 +1,107 @@
+/*
+ * include/asm-s390/string.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#ifndef _S390_STRING_H_
+#define _S390_STRING_H_
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_TYPES_H
+#include <linux/types.h>
+#endif
+
+#define __HAVE_ARCH_MEMCHR
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_STRCAT
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRLEN
+#define __HAVE_ARCH_STRNCPY
+
+#undef __HAVE_ARCH_MEMMOVE
+#undef __HAVE_ARCH_STRNICMP
+#undef __HAVE_ARCH_STRNCAT
+#undef __HAVE_ARCH_STRNCMP
+#undef __HAVE_ARCH_STRCHR
+#undef __HAVE_ARCH_STRRCHR
+#undef __HAVE_ARCH_STRNLEN
+#undef __HAVE_ARCH_STRSPN
+#undef __HAVE_ARCH_STRPBRK
+#undef __HAVE_ARCH_STRTOK
+#undef __HAVE_ARCH_BCOPY
+#undef __HAVE_ARCH_MEMCMP
+#undef __HAVE_ARCH_MEMSCAN
+#undef __HAVE_ARCH_STRSTR
+
+extern void *memset(void *, int, size_t);
+
+extern inline void * memchr(const void * cs,int c,size_t count)
+{
+ void *ptr;
+
+ __asm__ __volatile__ (" lr 0,%2\n"
+ " la %0,0(%3,%1)\n"
+ "0: srst %0,%1\n"
+ " jo 0b\n"
+ " brc 13,1f\n"
+ " slr %0,%0\n"
+ "1:"
+ : "=a" (ptr) : "a" (cs), "d" (c), "d" (count)
+ : "cc", "0" );
+ return ptr;
+}
+
+extern __inline__ char *strcpy(char *dest, const char *src)
+{
+ char *tmp = dest;
+
+ __asm__ __volatile__ (" sr 0,0\n"
+ "0: mvst %0,%1\n"
+ " jo 0b"
+ : "+&a" (dest), "+&a" (src) :
+ : "cc", "memory", "0" );
+ return tmp;
+}
+
+extern __inline__ size_t strlen(const char *s)
+{
+ size_t len;
+
+ __asm__ __volatile__ (" sr 0,0\n"
+ " lr %0,%1\n"
+ "0: srst 0,%0\n"
+ " jo 0b\n"
+ " lr %0,0\n"
+ " sr %0,%1"
+ : "=&a" (len) : "a" (s)
+ : "cc", "0" );
+ return len;
+}
+
+extern __inline__ char *strcat(char *dest, const char *src)
+{
+ char *tmp = dest;
+
+ __asm__ __volatile__ (" sr 0,0\n"
+ "0: srst 0,%0\n"
+ " jo 0b\n"
+ " lr %0,0\n"
+ " sr 0,0\n"
+ "1: mvst %0,%1\n"
+ " jo 1b"
+ : "+&a" (dest), "+&a" (src) :
+ : "cc", "memory", "0" );
+ return tmp;
+}
+
+
+#endif /* __KERNEL__ */
+
+#endif /* __S390_STRING_H_ */
+
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
new file mode 100644
index 000000000..26910ac1f
--- /dev/null
+++ b/include/asm-s390/system.h
@@ -0,0 +1,230 @@
+/*
+ * include/asm-s390/system.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *
+ * Derived from "include/asm-i386/system.h"
+ */
+
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#ifdef __KERNEL__
+#include <asm/lowcore.h>
+#endif
+#include <linux/kernel.h>
+
+#define prepare_to_switch() do { } while(0)
+#define switch_to(prev,next,last) do { \
+ if (prev == next) \
+ break; \
+ save_fp_regs1(&prev->thread.fp_regs); \
+ restore_fp_regs1(&next->thread.fp_regs); \
+ last = resume(&prev->thread,&next->thread); \
+} while (0)
+
+struct task_struct;
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+
+static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ asm volatile (
+ " lhi 1,3\n"
+ " nr 1,%0\n" /* isolate last 2 bits */
+ " xr 1,%0\n" /* align ptr */
+ " bras 2,0f\n"
+ " icm 1,8,%1\n" /* for ptr&3 == 0 */
+ " stcm 0,8,%1\n"
+ " icm 1,4,%1\n" /* for ptr&3 == 1 */
+ " stcm 0,4,%1\n"
+ " icm 1,2,%1\n" /* for ptr&3 == 2 */
+ " stcm 0,2,%1\n"
+ " icm 1,1,%1\n" /* for ptr&3 == 3 */
+ " stcm 0,1,%1\n"
+ "0: sll 1,3\n"
+ " la 2,0(1,2)\n" /* r2 points to an icm */
+ " l 0,%1\n" /* get fullword */
+ "1: lr 1,0\n" /* cs loop */
+ " ex 0,0(2)\n" /* insert x */
+ " cs 0,1,%1\n"
+ " jl 1b\n"
+ " ex 0,4(2)" /* store *ptr to x */
+ : "+a&" (ptr) : "m" (x)
+ : "memory", "0", "1", "2");
+ case 2:
+ if(((__u32)ptr)&1)
+ panic("misaligned (__u16 *) in __xchg\n");
+ asm volatile (
+ " lhi 1,2\n"
+ " nr 1,%0\n" /* isolate bit 2^1 */
+ " xr 1,%0\n" /* align ptr */
+ " bras 2,0f\n"
+ " icm 1,12,%1\n" /* for ptr&2 == 0 */
+ " stcm 0,12,%1\n"
+ " icm 1,3,%1\n" /* for ptr&2 == 1 */
+ " stcm 0,3,%1\n"
+ "0: sll 1,2\n"
+ " la 2,0(1,2)\n" /* r2 points to an icm */
+ " l 0,%1\n" /* get fullword */
+ "1: lr 1,0\n" /* cs loop */
+ " ex 0,0(2)\n" /* insert x */
+ " cs 0,1,%1\n"
+ " jl 1b\n"
+ " ex 0,4(2)" /* store *ptr to x */
+ : "+a&" (ptr) : "m" (x)
+ : "memory", "0", "1", "2");
+ break;
+ case 4:
+ if(((__u32)ptr)&3)
+ panic("misaligned (__u32 *) in __xchg\n");
+ asm volatile (
+ " l 0,0(%1)\n"
+ "0: cs 0,%0,0(%1)\n"
+ " jl 0b\n"
+ " lr %0,0\n"
+ : "+d&" (x) : "a" (ptr)
+ : "memory", "0" );
+ break;
+ default:
+ abort();
+ }
+ return x;
+}
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * This is very similar to the ppc eieio/sync instruction in that is
+ * does a checkpoint syncronisation & makes sure that
+ * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
+ */
+
+#define eieio() __asm__ __volatile__ ("BCR 15,0")
+# define SYNC_OTHER_CORES(x) eieio()
+#define mb() eieio()
+#define rmb() eieio()
+#define wmb() eieio()
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_rmb(var, value) do { var = value; rmb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+/* interrupt control.. */
+#define __sti() ({ \
+ __u8 dummy; \
+ __asm__ __volatile__ ( \
+ "stosm %0,0x03" : "=m" (dummy) : : "memory"); \
+ })
+
+#define __cli() ({ \
+ __u32 flags; \
+ __asm__ __volatile__ ( \
+ "stnsm %0,0xFC" : "=m" (flags) : : "memory"); \
+ flags; \
+ })
+
+#define __save_flags(x) \
+ __asm__ __volatile__("stosm %0,0" : "=m" (x) : : "memory")
+
+#define __restore_flags(x) \
+ __asm__ __volatile__("ssm %0" : : "m" (x) : "memory")
+
+#define __ctl_set_bit(cr, bit) ({ \
+ __u8 dummy[16]; \
+ __asm__ __volatile__ ( \
+ " la 1,%0\n" /* align to 8 byte */ \
+ " ahi 1,7\n" \
+ " srl 1,3\n" \
+ " sll 1,3\n" \
+ " bras 2,0f\n" /* skip indirect insns */ \
+ " stctl 0,0,0(1)\n" \
+ " lctl 0,0,0(1)\n" \
+ "0: ex %1,0(2)\n" /* execute stctl */ \
+ " l 0,0(1)\n" \
+ " or 0,%2\n" /* set the bit */ \
+ " st 0,0(1)\n" \
+ "1: ex %1,4(2)" /* execute lctl */ \
+ : "=m" (dummy) : "a" (cr*17), "a" (1<<(bit)) \
+ : "0", "1", "2"); \
+ })
+
+#define __ctl_clear_bit(cr, bit) ({ \
+ __u8 dummy[16]; \
+ __asm__ __volatile__ ( \
+ " la 1,%0\n" /* align to 8 byte */ \
+ " ahi 1,7\n" \
+ " srl 1,3\n" \
+ " sll 1,3\n" \
+ " bras 2,0f\n" /* skip indirect insns */ \
+ " stctl 0,0,0(1)\n" \
+ " lctl 0,0,0(1)\n" \
+ "0: ex %1,0(2)\n" /* execute stctl */ \
+ " l 0,0(1)\n" \
+ " nr 0,%2\n" /* set the bit */ \
+ " st 0,0(1)\n" \
+ "1: ex %1,4(2)" /* execute lctl */ \
+ : "=m" (dummy) : "a" (cr*17), "a" (~(1<<(bit))) \
+ : "0", "1", "2"); \
+ })
+
+/* For spinlocks etc */
+#define local_irq_save(x) ((x) = __cli())
+#define local_irq_restore(x) __restore_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
+#ifdef CONFIG_SMP
+
+extern void __global_cli(void);
+extern void __global_sti(void);
+
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define save_flags(x) ((x)=__global_save_flags())
+#define restore_flags(x) __global_restore_flags(x)
+
+extern void smp_ctl_set_bit(int cr, int bit);
+extern void smp_ctl_clear_bit(int cr, int bit);
+#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
+#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
+
+#else
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+
+#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
+#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
+
+
+#endif
+
+#ifdef __KERNEL__
+extern struct task_struct *resume(void *,void *);
+
+extern int save_fp_regs1(s390_fp_regs *fpregs);
+extern void save_fp_regs(s390_fp_regs *fpregs);
+extern int restore_fp_regs1(s390_fp_regs *fpregs);
+extern void restore_fp_regs(s390_fp_regs *fpregs);
+extern void show_crashed_task_info(void);
+#endif
+
+#endif
+
+
+
diff --git a/include/asm-s390/termbits.h b/include/asm-s390/termbits.h
new file mode 100644
index 000000000..cfbe7a36a
--- /dev/null
+++ b/include/asm-s390/termbits.h
@@ -0,0 +1,180 @@
+/*
+ * include/asm-s390/termbits.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/termbits.h"
+ */
+
+#ifndef __ARCH_S390_TERMBITS_H__
+#define __ARCH_S390_TERMBITS_H__
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif
diff --git a/include/asm-s390/termios.h b/include/asm-s390/termios.h
new file mode 100644
index 000000000..86415c0a2
--- /dev/null
+++ b/include/asm-s390/termios.h
@@ -0,0 +1,113 @@
+/*
+ * include/asm-s390/termios.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/termios.h"
+ */
+
+#ifndef _S390_TERMIOS_H
+#define _S390_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* line disciplines */
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
+#define N_STRIP 4
+#define N_AX25 5
+#define N_X25 6 /* X.25 async */
+#define N_6PACK 7
+#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964 9 /* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA 11 /* Linux IR - http://www.cs.uit.no/~dagb/irda/irda.html */
+#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC 13 /* synchronous HDLC */
+
+#ifdef __KERNEL__
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+ unsigned short __tmp; \
+ get_user(__tmp,&(termio)->x); \
+ *(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+ put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ put_user((termios)->c_line, &(termio)->c_line); \
+ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* __KERNEL__ */
+
+#endif /* _S390_TERMIOS_H */
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
new file mode 100644
index 000000000..ad97e0eda
--- /dev/null
+++ b/include/asm-s390/timex.h
@@ -0,0 +1,29 @@
+/*
+ * include/asm-s390/timex.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *
+ * Derived from "include/asm-i386/timex.h"
+ * Copyright (C) 1992, Linus Torvalds
+ */
+
+#ifndef _ASM_S390_TIMEX_H
+#define _ASM_S390_TIMEX_H
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
+#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
+ (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
+ << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
+
+typedef unsigned long cycles_t;
+
+extern cycles_t cacheflush_time;
+
+static inline cycles_t get_cycles(void)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/asm-s390/types.h b/include/asm-s390/types.h
new file mode 100644
index 000000000..23d5c7176
--- /dev/null
+++ b/include/asm-s390/types.h
@@ -0,0 +1,66 @@
+/*
+ * include/asm-s390/types.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/types.h"
+ */
+
+#ifndef _S390_TYPES_H
+#define _S390_TYPES_H
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+/* A address type so that arithmetic can be done on it & it can be upgraded to
+ 64 bit when neccessary
+*/
+typedef __u32 addr_t;
+typedef __s32 saddr_t;
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+#define BITS_PER_LONG 32
+
+typedef u32 dma_addr_t;
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
new file mode 100644
index 000000000..9f448fc50
--- /dev/null
+++ b/include/asm-s390/uaccess.h
@@ -0,0 +1,519 @@
+/*
+ * include/asm-s390/uaccess.h
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner (hp@de.ibm.com),
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/uaccess.h"
+ */
+#ifndef __S390_UACCESS_H
+#define __S390_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/sched.h>
+#if 0
+#include <asm/segment.h>
+#endif
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define MAKE_MM_SEG(s,a) ((mm_segment_t) { (s),(a) })
+
+
+#define KERNEL_DS MAKE_MM_SEG(0x7FFFFFFF,0)
+#define USER_DS MAKE_MM_SEG(PAGE_OFFSET,1)
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->thread.fs)
+#define set_fs(x) ({asm volatile("sar 4,%0"::"a" (x.acc4)); \
+ current->thread.fs = (x);})
+
+#define segment_eq(a,b) ((a).acc4 == (b).acc4)
+
+
+#define __access_ok(addr,size) ((((long) addr + size)&0x7FFFFFFFL) < current->addr_limit.seg)
+
+#define access_ok(type,addr,size) __access_ok(addr,size)
+
+extern inline int verify_area(int type, const void * addr, unsigned long size)
+{
+ return access_ok(type,addr,size)?0:-EFAULT;
+}
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup otherwise. */
+extern unsigned long search_exception_table(unsigned long);
+
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ */
+
+extern inline int __put_user_asm_4(__u32 x, void *ptr)
+{
+ int err;
+
+ __asm__ __volatile__ ( " iac 1\n"
+ " sr %1,%1\n"
+ " la 4,%0\n"
+ " sacf 512\n"
+ "0: st %2,0(4)\n"
+ " sacf 0(1)\n"
+ "1:\n"
+ ".section .fixup,\"ax\"\n"
+ "2: sacf 0(1)\n"
+ " lhi %1,%h3\n"
+ " bras 4,3f\n"
+ " .long 1b\n"
+ "3: l 4,0(4)\n"
+ " br 4\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,2b\n"
+ ".previous"
+ : "=m" (*((__u32*) ptr)) , "=&d" (err)
+ : "d" (x), "K" (-EFAULT)
+ : "1", "4" );
+ return err;
+}
+
+extern inline int __put_user_asm_2(__u16 x, void *ptr)
+{
+ int err;
+
+ __asm__ __volatile__ ( " iac 1\n"
+ " sr %1,%1\n"
+ " la 4,%0\n"
+ " sacf 512\n"
+ "0: sth %2,0(4)\n"
+ " sacf 0(1)\n"
+ "1:\n"
+ ".section .fixup,\"ax\"\n"
+ "2: sacf 0(1)\n"
+ " lhi %1,%h3\n"
+ " bras 4,3f\n"
+ " .long 1b\n"
+ "3: l 4,0(4)\n"
+ " br 4\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,2b\n"
+ ".previous"
+ : "=m" (*((__u16*) ptr)) , "=&d" (err)
+ : "d" (x), "K" (-EFAULT)
+ : "1", "4" );
+ return err;
+}
+
+extern inline int __put_user_asm_1(__u8 x, void *ptr)
+{
+ int err;
+
+ __asm__ __volatile__ ( " iac 1\n"
+ " sr %1,%1\n"
+ " la 4,%0\n"
+ " sacf 512\n"
+ "0: stc %2,0(4)\n"
+ " sacf 0(1)\n"
+ "1:\n"
+ ".section .fixup,\"ax\"\n"
+ "2: sacf 0(1)\n"
+ " lhi %1,%h3\n"
+ " bras 4,3f\n"
+ " .long 1b\n"
+ "3: l 4,0(4)\n"
+ " br 4\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,2b\n"
+ ".previous"
+ : "=m" (*((__u8*) ptr)) , "=&d" (err)
+ : "d" (x), "K" (-EFAULT)
+ : "1", "4" );
+ return err;
+}
+
+/*
+ * (u8)(u32) ... autsch, but that the only way we can suppress the
+ * warnings when compiling binfmt_elf.c
+ */
+#define __put_user(x, ptr) \
+({ \
+ int __pu_err; \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ __pu_err = __put_user_asm_1((__u8)(__u32)x,ptr);\
+ break; \
+ case 2: \
+ __pu_err = __put_user_asm_2((__u16)(__u32)x,ptr);\
+ break; \
+ case 4: \
+ __pu_err = __put_user_asm_4((__u32) x,ptr);\
+ break; \
+ default: \
+ __pu_err = __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
+})
+
+#define put_user(x, ptr) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ __typeof__(x) __x = (x); \
+ if (__access_ok((long)__pu_addr,sizeof(*(ptr)))) { \
+ __pu_err = 0; \
+ __put_user((__x), (__pu_addr)); \
+ } \
+ __pu_err; \
+})
+
+extern int __put_user_bad(void);
+
+
+#define __get_user_asm_4(x, ptr, err) \
+({ \
+ __asm__ __volatile__ ( " iac 1\n" \
+ " sr %1,%1\n" \
+ " la 4,%2\n" \
+ " sacf 512\n" \
+ "0: l %0,0(4)\n" \
+ " sacf 0(1)\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "2: sacf 0(1)\n" \
+ " lhi %1,%h3\n" \
+ " bras 4,3f\n" \
+ " .long 1b\n" \
+ "3: l 4,0(4)\n" \
+ " br 4\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,2b\n" \
+ ".previous" \
+ : "=d" (x) , "=&d" (err) \
+ : "m" (*(__u32*) ptr), "K" (-EFAULT) \
+ : "1", "4" ); \
+})
+
+#define __get_user_asm_2(x, ptr, err) \
+({ \
+ __asm__ __volatile__ ( " iac 1\n" \
+ " sr %1,%1\n" \
+ " la 4,%2\n" \
+ " sacf 512\n" \
+ "0: lh %0,0(4)\n" \
+ " sacf 0(1)\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "2: sacf 0(1)\n" \
+ " lhi %1,%h3\n" \
+ " bras 4,3f\n" \
+ " .long 1b\n" \
+ "3: l 4,0(4)\n" \
+ " br 4\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,2b\n" \
+ ".previous" \
+ : "=d" (x) , "=&d" (err) \
+ : "m" (*(__u16*) ptr), "K" (-EFAULT) \
+ : "1", "4" ); \
+})
+
+#define __get_user_asm_1(x, ptr, err) \
+({ \
+ __asm__ __volatile__ ( " iac 1\n" \
+ " sr %1,%1\n" \
+ " la 4,%2\n" \
+ " sr %0,%0\n" \
+ " sacf 512\n" \
+ "0: ic %0,0(4)\n" \
+ " sacf 0(1)\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "2: sacf 0(1)\n" \
+ " lhi %1,%h3\n" \
+ " bras 4,3f\n" \
+ " .long 1b\n" \
+ "3: l 4,0(4)\n" \
+ " br 4\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,2b\n" \
+ ".previous" \
+ : "=d" (x) , "=&d" (err) \
+ : "m" (*(__u8*) ptr), "K" (-EFAULT) \
+ : "1", "4" ); \
+})
+
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __get_user_asm_1(x,ptr,__gu_err); \
+ break; \
+ case 2: \
+ __get_user_asm_2(x,ptr,__gu_err); \
+ break; \
+ case 4: \
+ __get_user_asm_4(x,ptr,__gu_err); \
+ break; \
+ default: \
+ (x) = 0; \
+ __gu_err = __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+
+#define get_user(x, ptr) \
+({ \
+ long __gu_err = -EFAULT; \
+ __typeof__(ptr) __gu_addr = (ptr); \
+ __typeof__(x) __x; \
+ if (__access_ok((long)__gu_addr,sizeof(*(ptr)))) { \
+ __gu_err = 0; \
+ __get_user((__x), (__gu_addr)); \
+ (x) = __x; \
+ } \
+ else \
+ (x) = 0; \
+ __gu_err; \
+})
+
+extern int __get_user_bad(void);
+
+/*
+ * access register are set up, that 4 points to secondary (user) , 2 to primary (kernel)
+ */
+
+extern inline unsigned long
+__copy_to_user_asm(void* to, const void* from, long n)
+{
+
+ __asm__ __volatile__ ( " iac 1\n"
+ " lr 2,%2\n"
+ " lr 4,%1\n"
+ " lr 3,%0\n"
+ " lr 5,3\n"
+ " sacf 512\n"
+ "0: mvcle 4,2,0\n"
+ " jo 0b\n"
+ "1: sacf 0(1)\n"
+ " lr %0,3\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous"
+ : "+&d" (n) : "d" (to), "d" (from)
+ : "1", "2", "3", "4", "5" );
+ return n;
+}
+
+#define __copy_to_user(to, from, n) \
+({ \
+ __copy_to_user_asm(to,from,n); \
+})
+
+#define copy_to_user(to, from, n) \
+({ \
+ long err = 0; \
+ __typeof__(n) __n = (n); \
+ if (__access_ok(to,__n)) { \
+ err = __copy_to_user_asm(to,from,__n); \
+ } \
+ else \
+ err = __n; \
+ err; \
+})
+
+extern inline unsigned long
+__copy_from_user_asm(void* to, const void* from, long n)
+{
+ __asm__ __volatile__ ( " iac 1\n"
+ " lr 2,%1\n"
+ " lr 4,%2\n"
+ " lr 3,%0\n"
+ " lr 5,3\n"
+ " sacf 512\n"
+ "0: mvcle 2,4,0\n"
+ " jo 0b\n"
+ "1: sacf 0(1)\n"
+ " lr %0,3\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous"
+ : "+&d" (n) : "d" (to), "d" (from)
+ : "1", "2", "3", "4", "5" );
+ return n;
+}
+
+
+#define __copy_from_user(to, from, n) \
+({ \
+ __copy_from_user_asm(to,from,n); \
+})
+
+#define copy_from_user(to, from, n) \
+({ \
+ long err = 0; \
+ __typeof__(n) __n = (n); \
+ if (__access_ok(from,__n)) { \
+ err = __copy_from_user_asm(to,from,__n); \
+ } \
+ else \
+ err = __n; \
+ err; \
+})
+
+#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
+
+#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
+
+/*
+ * Copy a null terminated string from userspace.
+ */
+
+static inline long
+strncpy_from_user(char *dst, const char *src, long count)
+{
+ int len;
+ __asm__ __volatile__ ( " iac 1\n"
+ " slr %0,%0\n"
+ " lr 2,%1\n"
+ " lr 4,%2\n"
+ " slr 3,3\n"
+ " sacf 512\n"
+ "0: ic 3,0(%0,4)\n"
+ "1: stc 3,0(%0,2)\n"
+ " ltr 3,3\n"
+ " jz 2f\n"
+ " ahi %0,1\n"
+ " clr %0,%3\n"
+ " jl 0b\n"
+ "2: sacf 0(1)\n"
+ ".section .fixup,\"ax\"\n"
+ "3: lhi %0,%h4\n"
+ " basr 3,0\n"
+ " l 3,4f-.(3)\n"
+ " br 3\n"
+ "4: .long 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,3b\n"
+ " .long 1b,3b\n"
+ ".previous"
+ : "=&a" (len)
+ : "a" (dst), "d" (src), "d" (count),
+ "K" (-EFAULT)
+ : "1", "2", "3", "4", "memory" );
+ return len;
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+static inline unsigned long
+strnlen_user(const char * src, unsigned long n)
+{
+ __asm__ __volatile__ (" iac 1\n"
+ " alr %0,%1\n"
+ " slr 0,0\n"
+ " lr 4,%1\n"
+ " sacf 512\n"
+ "0: srst %0,4\n"
+ " jo 0b\n"
+ " slr %0,%1\n"
+ " ahi %0,1\n"
+ " sacf 0(1)\n"
+ "1:\n"
+ ".section .fixup,\"ax\"\n"
+ "2: sacf 0(1)\n"
+ " slr %0,%0\n"
+ " bras 4,3f\n"
+ " .long 1b\n"
+ "3: l 4,0(4)\n"
+ " br 4\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,2b\n"
+ ".previous"
+ : "+&a" (n) : "d" (src)
+ : "cc", "0", "1", "4" );
+ return n;
+}
+#define strlen_user(str) strnlen_user(str, ~0UL)
+
+/*
+ * Zero Userspace
+ */
+
+static inline unsigned long
+clear_user(void *to, unsigned long n)
+{
+ __asm__ __volatile__ ( " iac 1\n"
+ " sacf 512\n"
+ " lr 4,%1\n"
+ " lr 5,%0\n"
+ " sr 2,2\n"
+ " sr 3,3\n"
+ "0: mvcle 4,2,0\n"
+ " jo 0b\n"
+ "1: sacf 0(1)\n"
+ " lr %0,3\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous"
+ : "+&a" (n)
+ : "a" (to)
+ : "cc", "1", "2", "3", "4", "5" );
+ return n;
+}
+
+#endif /* _S390_UACCESS_H */
diff --git a/include/asm-s390/ucontext.h b/include/asm-s390/ucontext.h
new file mode 100644
index 000000000..cf7c431f1
--- /dev/null
+++ b/include/asm-s390/ucontext.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-s390/ucontext.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/ucontext.h"
+ */
+
+#ifndef _ASM_S390_UCONTEXT_H
+#define _ASM_S390_UCONTEXT_H
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* !_ASM_S390_UCONTEXT_H */
diff --git a/include/asm-s390/unaligned.h b/include/asm-s390/unaligned.h
new file mode 100644
index 000000000..8ee86dbed
--- /dev/null
+++ b/include/asm-s390/unaligned.h
@@ -0,0 +1,24 @@
+/*
+ * include/asm-s390/unaligned.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/unaligned.h"
+ */
+
+#ifndef __S390_UNALIGNED_H
+#define __S390_UNALIGNED_H
+
+/*
+ * The S390 can do unaligned accesses itself.
+ *
+ * The strange macros are there to make sure these can't
+ * be misused in a way that makes them not work on other
+ * architectures where unaligned accesses aren't as simple.
+ */
+
+#define get_unaligned(ptr) (*(ptr))
+
+#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+
+#endif
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
new file mode 100644
index 000000000..4dd04edbc
--- /dev/null
+++ b/include/asm-s390/unistd.h
@@ -0,0 +1,374 @@
+/*
+ * include/asm-s390/unistd.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/unistd.h"
+ */
+
+#ifndef _ASM_S390_UNISTD_H_
+#define _ASM_S390_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_setpgid 57
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_symlink 83
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread 180
+#define __NR_pwrite 181
+#define __NR_chown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+
+
+/* user-visible error numbers are in the range -1 - -122: see <asm-s390/errno.h> */
+
+#define __syscall_return(type, res) \
+do { \
+ if ((unsigned long)(res) >= (unsigned long)(-125)) { \
+ errno = -(res); \
+ res = -1; \
+ } \
+ return (type) (res); \
+} while (0)
+
+#define _svc_clobber "cc", "memory"
+
+#define _syscall0(type,name) \
+type name(void) { \
+ long __res; \
+ __asm__ __volatile__ ( \
+ " svc %b1\n" \
+ " lr %0,2" \
+ : "=d" (__res) \
+ : "i" (__NR_##name) \
+ : _svc_clobber ); \
+ __syscall_return(type,__res); \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) { \
+ register type1 __arg1 asm("2") = arg1; \
+ long __res; \
+ __asm__ __volatile__ ( \
+ " svc %b1\n" \
+ " lr %0,2" \
+ : "=d" (__res) \
+ : "i" (__NR_##name), \
+ "d" (__arg1) \
+ : _svc_clobber ); \
+ __syscall_return(type,__res); \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1, type2 arg2) { \
+ register type1 __arg1 asm("2") = arg1; \
+ register type2 __arg2 asm("3") = arg2; \
+ long __res; \
+ __asm__ __volatile__ ( \
+ " svc %b1\n" \
+ " lr %0,2" \
+ : "=d" (__res) \
+ : "i" (__NR_##name), \
+ "d" (__arg1), \
+ "d" (__arg2) \
+ : _svc_clobber ); \
+ __syscall_return(type,__res); \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)\
+type name(type1 arg1, type2 arg2, type3 arg3) { \
+ register type1 __arg1 asm("2") = arg1; \
+ register type2 __arg2 asm("3") = arg2; \
+ register type3 __arg3 asm("4") = arg3; \
+ long __res; \
+ __asm__ __volatile__ ( \
+ " svc %b1\n" \
+ " lr %0,2" \
+ : "=d" (__res) \
+ : "i" (__NR_##name), \
+ "d" (__arg1), \
+ "d" (__arg2), \
+ "d" (__arg3) \
+ : _svc_clobber ); \
+ __syscall_return(type,__res); \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,\
+ type4,name4) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
+ register type1 __arg1 asm("2") = arg1; \
+ register type2 __arg2 asm("3") = arg2; \
+ register type3 __arg3 asm("4") = arg3; \
+ register type4 __arg4 asm("5") = arg4; \
+ long __res; \
+ __asm__ __volatile__ ( \
+ " svc %b1\n" \
+ " lr %0,2" \
+ : "=d" (__res) \
+ : "i" (__NR_##name), \
+ "d" (__arg1), \
+ "d" (__arg2), \
+ "d" (__arg3), \
+ "d" (__arg4) \
+ : _svc_clobber ); \
+ __syscall_return(type,__res); \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,\
+ type4,name4,type5,name5) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
+ type5 arg5) { \
+ register type1 __arg1 asm("2") = arg1; \
+ register type2 __arg2 asm("3") = arg2; \
+ register type3 __arg3 asm("4") = arg3; \
+ register type4 __arg4 asm("5") = arg4; \
+ register type5 __arg5 asm("6") = arg5; \
+ long __res; \
+ __asm__ __volatile__ ( \
+ " svc %b1\n" \
+ " lr %0,2" \
+ : "=d" (__res) \
+ : "i" (__NR_##name), \
+ "d" (__arg1), \
+ "d" (__arg2), \
+ "d" (__arg3), \
+ "d" (__arg4), \
+ "d" (__arg5) \
+ : _svc_clobber ); \
+ __syscall_return(type,__res); \
+}
+
+#ifdef __KERNEL_SYSCALLS__
+
+/*
+ * we need this inline - forking from kernel space will result
+ * in NO COPY ON WRITE (!!!), until an execve is executed. This
+ * is no problem, but for the stack. This is handled by not letting
+ * main() use the stack at all after fork(). Thus, no function
+ * calls - which means inline code for fork too, as otherwise we
+ * would use the stack upon exit from 'fork()'.
+ *
+ * Actually only pause and fork are needed inline, so that there
+ * won't be any messing with the stack from main(), but we define
+ * some others too.
+ */
+#define __NR__exit __NR_exit
+static inline _syscall0(int,idle)
+static inline _syscall0(int,pause)
+static inline _syscall0(int,sync)
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static inline _syscall1(int,dup,int,fd)
+static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall1(int,_exit,int,exitcode)
+static inline _syscall1(int,delete_module,const char *,name)
+static inline _syscall2(long,stat,char *,filename,struct stat *,statbuf)
+
+extern int sys_wait4(int, int *, int, struct rusage *);
+static inline pid_t waitpid(int pid, int * wait_stat, int flags)
+{
+ return sys_wait4(pid, wait_stat, flags, NULL);
+}
+
+static inline pid_t wait(int * wait_stat)
+{
+ return waitpid(-1,wait_stat,0);
+}
+
+#endif
+
+#endif /* _ASM_S390_UNISTD_H_ */
diff --git a/include/asm-s390/user.h b/include/asm-s390/user.h
new file mode 100644
index 000000000..c64f8c181
--- /dev/null
+++ b/include/asm-s390/user.h
@@ -0,0 +1,77 @@
+/*
+ * include/asm-s390/user.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/usr.h"
+ */
+
+#ifndef _S390_USER_H
+#define _S390_USER_H
+
+#include <asm/page.h>
+#include <linux/ptrace.h>
+/* Core file format: The core file is written in such a way that gdb
+ can understand it and provide useful information to the user (under
+ linux we use the 'trad-core' bfd). There are quite a number of
+ obstacles to being able to view the contents of the floating point
+ registers, and until these are solved you will not be able to view the
+ contents of them. Actually, you can read in the core file and look at
+ the contents of the user struct to find out what the floating point
+ registers contain.
+ The actual file contents are as follows:
+ UPAGE: 1 page consisting of a user struct that tells gdb what is present
+ in the file. Directly after this is a copy of the task_struct, which
+ is currently not used by gdb, but it may come in useful at some point.
+ All of the registers are stored as part of the upage. The upage should
+ always be only one page.
+ DATA: The data area is stored. We use current->end_text to
+ current->brk to pick up all of the user variables, plus any memory
+ that may have been malloced. No attempt is made to determine if a page
+ is demand-zero or if a page is totally unused, we just cover the entire
+ range. All of the addresses are rounded in such a way that an integral
+ number of pages is written.
+ STACK: We need the stack information in order to get a meaningful
+ backtrace. We need to write the data from (esp) to
+ current->start_stack, so we round each of these off in order to be able
+ to write an integer number of pages.
+ The minimum core file size is 3 pages, or 12288 bytes.
+*/
+
+
+/*
+ * This is the old layout of "struct pt_regs", and
+ * is still the layout used by user mode (the new
+ * pt_regs doesn't have all registers as the kernel
+ * doesn't use the extra segment registers)
+ */
+
+/* When the kernel dumps core, it starts by dumping the user struct -
+ this will be used by gdb to figure out where the data and stack segments
+ are within the file, and what virtual addresses to use. */
+struct user {
+/* We start with the registers, to mimic the way that "memory" is returned
+ from the ptrace(3,...) function. */
+ struct user_regs_struct regs; /* Where the registers are actually stored */
+/* The rest of this junk is to help gdb figure out what goes where */
+ unsigned long int u_tsize; /* Text segment size (pages). */
+ unsigned long int u_dsize; /* Data segment size (pages). */
+ unsigned long int u_ssize; /* Stack segment size (pages). */
+ unsigned long start_code; /* Starting virtual address of text. */
+ unsigned long start_stack; /* Starting virtual address of stack area.
+ This is actually the bottom of the stack,
+ the top of the stack is always found in the
+ esp register. */
+ long int signal; /* Signal that caused the core dump. */
+ struct user_regs_struct *u_ar0;
+ /* Used by gdb to help find the values for */
+ /* the registers. */
+ unsigned long magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
+};
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _S390_USER_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fb673b0af..371069369 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -163,4 +163,7 @@ extern int * max_segments[MAX_BLKDEV];
#define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
+extern void drive_stat_acct (kdev_t dev, int rw,
+ unsigned long nr_sectors, int new_io);
+
#endif
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 9aff2f0af..7452d838d 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -7,6 +7,10 @@
*
* This file contains the general definitions for the cyclades.c driver
*$Log: cyclades.h,v $
+ *Revision 3.1 2000/04/19 18:52:52 ivan
+ *converted address fields to unsigned long and added fields for physical
+ *addresses on cyclades_card structure;
+ *
*Revision 3.0 1998/11/02 14:20:59 ivan
*added nports field on cyclades_card structure;
*
@@ -500,8 +504,10 @@ struct ZFW_CTRL {
/* Per card data structure */
struct cyclades_card {
- long base_addr;
- long ctl_addr;
+ unsigned long base_phys;
+ unsigned long ctl_phys;
+ unsigned long base_addr;
+ unsigned long ctl_addr;
int irq;
int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
int first_line; /* minor number of first channel on card */
diff --git a/include/linux/dasd.h b/include/linux/dasd.h
new file mode 100644
index 000000000..98c3c0424
--- /dev/null
+++ b/include/linux/dasd.h
@@ -0,0 +1,225 @@
+
+#ifndef DASD_H
+#define DASD_H
+
+/* First of all the external stuff */
+#include <linux/ioctl.h>
+#include <linux/major.h>
+#include <linux/wait.h>
+
+#define IOCTL_LETTER 'D'
+#define BIODASDFORMAT _IO(IOCTL_LETTER,0) /* Format the volume or an extent */
+#define BIODASDDISABLE _IO(IOCTL_LETTER,1) /* Disable the volume (for Linux) */
+#define BIODASDENABLE _IO(IOCTL_LETTER,2) /* Enable the volume (for Linux) */
+/* Stuff for reading and writing the Label-Area to/from user space */
+#define BIODASDGTVLBL _IOR(IOCTL_LETTER,3,dasd_volume_label_t)
+#define BIODASDSTVLBL _IOW(IOCTL_LETTER,4,dasd_volume_label_t)
+#define BIODASDRWTB _IOWR(IOCTL_LETTER,5,int)
+#define BIODASDRSID _IOR(IOCTL_LETTER,6,senseid_t)
+
+typedef
+union {
+ char bytes[512];
+ struct {
+ /* 80 Bytes of Label data */
+ char identifier[4]; /* e.g. "LNX1", "VOL1" or "CMS1" */
+ char label[6]; /* Given by user */
+ char security;
+ char vtoc[5]; /* Null in "LNX1"-labelled partitions */
+ char reserved0[5];
+ long ci_size;
+ long blk_per_ci;
+ long lab_per_ci;
+ char reserved1[4];
+ char owner[0xe];
+ char no_part;
+ char reserved2[0x1c];
+ /* 16 Byte of some information on the dasd */
+ short blocksize;
+ char nopart;
+ char unused;
+ long unused2[3];
+ /* 7*10 = 70 Bytes of partition data */
+ struct {
+ char type;
+ long start;
+ long size;
+ char unused;
+ } part[7];
+ } __attribute__ ((packed)) label;
+} dasd_volume_label_t;
+
+typedef union {
+ struct {
+ unsigned long no;
+ unsigned int ct;
+ } __attribute__ ((packed)) input;
+ struct {
+ unsigned long noct;
+ } __attribute__ ((packed)) output;
+} __attribute__ ((packed)) dasd_xlate_t;
+
+int dasd_init (void);
+#ifdef MODULE
+int init_module (void);
+void cleanup_module (void);
+#endif /* MODULE */
+
+/* Definitions for blk.h */
+/* #define DASD_MAGIC 0x44415344 is ascii-"DASD" */
+/* #define dasd_MAGIC 0x64617364; is ascii-"dasd" */
+#define DASD_MAGIC 0xC4C1E2C4 /* is ebcdic-"DASD" */
+#define dasd_MAGIC 0x8481A284 /* is ebcdic-"dasd" */
+#define DASD_NAME "dasd"
+#define DASD_PARTN_BITS 2
+#define DASD_MAX_DEVICES (256>>DASD_PARTN_BITS)
+
+#define MAJOR_NR DASD_MAJOR
+#define PARTN_BITS DASD_PARTN_BITS
+
+#ifdef __KERNEL__
+/* Now lets turn to the internal sbtuff */
+/*
+ define the debug levels:
+ - 0 No debugging output to console or syslog
+ - 1 Log internal errors to syslog, ignore check conditions
+ - 2 Log internal errors and check conditions to syslog
+ - 3 Log internal errors to console, log check conditions to syslog
+ - 4 Log internal errors and check conditions to console
+ - 5 panic on internal errors, log check conditions to console
+ - 6 panic on both, internal errors and check conditions
+ */
+#define DASD_DEBUG 4
+
+#define DASD_PROFILE
+/*
+ define the level of paranoia
+ - 0 quite sure, that things are going right
+ - 1 sanity checking, only to avoid panics
+ - 2 normal sanity checking
+ - 3 extensive sanity checks
+ - 4 exhaustive debug messages
+ */
+#define DASD_PARANOIA 2
+
+/*
+ define the depth of flow control, which is logged as a check condition
+ - 0 No flow control messages
+ - 1 Entry of functions logged like check condition
+ - 2 Entry and exit of functions logged like check conditions
+ - 3 Internal structure broken down
+ - 4 unrolling of loops,...
+ */
+#define DASD_FLOW_CONTROL 0
+
+#if DASD_DEBUG > 0
+#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_INFO(x...) printk ( KERN_INFO PRINTK_HEADER x )
+#define PRINT_WARN(x...) printk ( KERN_WARNING PRINTK_HEADER x )
+#define PRINT_ERR(x...) printk ( KERN_ERR PRINTK_HEADER x )
+#define PRINT_FATAL(x...) panic ( PRINTK_HEADER x )
+#else
+#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_INFO(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_WARN(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_ERR(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#endif /* DASD_DEBUG */
+
+#define INTERNAL_ERRMSG(x,y...) \
+"Internal error: in file " __FILE__ " line: %d: " x, __LINE__, y
+#define INTERNAL_CHKMSG(x,y...) \
+"Inconsistency: in file " __FILE__ " line: %d: " x, __LINE__, y
+#define INTERNAL_FLWMSG(x,y...) \
+"Flow control: file " __FILE__ " line: %d: " x, __LINE__, y
+
+#if DASD_DEBUG > 4
+#define INTERNAL_ERROR(x...) PRINT_FATAL ( INTERNAL_ERRMSG ( x ) )
+#elif DASD_DEBUG > 2
+#define INTERNAL_ERROR(x...) PRINT_ERR ( INTERNAL_ERRMSG ( x ) )
+#elif DASD_DEBUG > 0
+#define INTERNAL_ERROR(x...) PRINT_WARN ( INTERNAL_ERRMSG ( x ) )
+#else
+#define INTERNAL_ERROR(x...)
+#endif /* DASD_DEBUG */
+
+#if DASD_DEBUG > 5
+#define INTERNAL_CHECK(x...) PRINT_FATAL ( INTERNAL_CHKMSG ( x ) )
+#elif DASD_DEBUG > 3
+#define INTERNAL_CHECK(x...) PRINT_ERR ( INTERNAL_CHKMSG ( x ) )
+#elif DASD_DEBUG > 1
+#define INTERNAL_CHECK(x...) PRINT_WARN ( INTERNAL_CHKMSG ( x ) )
+#else
+#define INTERNAL_CHECK(x...)
+#endif /* DASD_DEBUG */
+
+#if DASD_DEBUG > 3
+#define INTERNAL_FLOW(x...) PRINT_ERR ( INTERNAL_FLWMSG ( x ) )
+#elif DASD_DEBUG > 2
+#define INTERNAL_FLOW(x...) PRINT_WARN ( INTERNAL_FLWMSG ( x ) )
+#else
+#define INTERNAL_FLOW(x...)
+#endif /* DASD_DEBUG */
+
+#if DASD_FLOW_CONTROL > 0
+#define FUNCTION_ENTRY(x) INTERNAL_FLOW( x "entered %s\n","" );
+#else
+#define FUNCTION_ENTRY(x)
+#endif /* DASD_FLOW_CONTROL */
+
+#if DASD_FLOW_CONTROL > 1
+#define FUNCTION_EXIT(x) INTERNAL_FLOW( x "exited %s\n","" );
+#else
+#define FUNCTION_EXIT(x)
+#endif /* DASD_FLOW_CONTROL */
+
+#if DASD_FLOW_CONTROL > 2
+#define FUNCTION_CONTROL(x...) INTERNAL_FLOW( x );
+#else
+#define FUNCTION_CONTROL(x...)
+#endif /* DASD_FLOW_CONTROL */
+
+#if DASD_FLOW_CONTROL > 3
+#define LOOP_CONTROL(x...) INTERNAL_FLOW( x );
+#else
+#define LOOP_CONTROL(x...)
+#endif /* DASD_FLOW_CONTROL */
+
+#define DASD_DO_IO_SLEEP 0x01
+#define DASD_DO_IO_NOLOCK 0x02
+#define DASD_DO_IO_NODEC 0x04
+
+#define DASD_NOT_FORMATTED 0x01
+
+extern wait_queue_head_t dasd_waitq;
+
+#undef DEBUG_DASD_MALLOC
+#ifdef DEBUG_DASD_MALLOC
+void *b;
+#define kmalloc(x...) (PRINT_INFO(" kmalloc %p\n",b=kmalloc(x)),b)
+#define kfree(x) PRINT_INFO(" kfree %p\n",x);kfree(x)
+#define get_free_page(x...) (PRINT_INFO(" gfp %p\n",b=get_free_page(x)),b)
+#define __get_free_pages(x...) (PRINT_INFO(" gfps %p\n",b=__get_free_pages(x)),b)
+#endif /* DEBUG_DASD_MALLOC */
+
+#endif /* __KERNEL__ */
+#endif /* DASD_H */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index c7df47297..f2bc40f4d 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -82,6 +82,9 @@
#define FB_ACCEL_CT_6555x 30 /* C&T 6555x */
#define FB_ACCEL_3DFX_BANSHEE 31 /* 3Dfx Banshee */
#define FB_ACCEL_ATI_RAGE128 32 /* ATI Rage128 family */
+#define FB_ACCEL_IGS_CYBER2000 33 /* CyberPro 2000 */
+#define FB_ACCEL_IGS_CYBER2010 34 /* CyberPro 2010 */
+#define FB_ACCEL_IGS_CYBER5000 35 /* CyberPro 5000 */
struct fb_fix_screeninfo {
char id[16]; /* identification string eg "TT Builtin" */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e7bf84240..c07a473d1 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -11,6 +11,7 @@
#include <linux/config.h>
#include <linux/types.h>
+#include <linux/major.h>
/* These three have identical behaviour; use the second one if DOS fdisk gets
confused about extended/logical partitions starting past cylinder 1023. */
@@ -233,6 +234,35 @@ extern void devfs_register_partitions (struct gendisk *dev, int minor,
int get_hardsect_size(kdev_t dev);
+/*
+ * FIXME: this should use genhd->minor_shift, but that is slow to look up.
+ */
+static inline unsigned int disk_index (kdev_t dev)
+{
+ int major = MAJOR(dev);
+ int minor = MINOR(dev);
+ unsigned int index;
+
+ switch (major) {
+ case DAC960_MAJOR+0:
+ index = (minor & 0x00f8) >> 3;
+ break;
+ case SCSI_DISK0_MAJOR:
+ index = (minor & 0x00f0) >> 4;
+ break;
+ case IDE0_MAJOR: /* same as HD_MAJOR */
+ case XT_DISK_MAJOR:
+ index = (minor & 0x0040) >> 6;
+ break;
+ case IDE1_MAJOR:
+ index = ((minor & 0x0040) >> 6) + 2;
+ break;
+ default:
+ return 0;
+ }
+ return index;
+}
+
#endif
#endif
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 2491372a9..c9f194be4 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -12,18 +12,19 @@
* used by rstatd/perfmeter
*/
-#define DK_NDRIVE 4
+#define DK_MAX_MAJOR 16
+#define DK_MAX_DISK 16
struct kernel_stat {
unsigned int cpu_user, cpu_nice, cpu_system;
unsigned int per_cpu_user[NR_CPUS],
per_cpu_nice[NR_CPUS],
per_cpu_system[NR_CPUS];
- unsigned int dk_drive[DK_NDRIVE];
- unsigned int dk_drive_rio[DK_NDRIVE];
- unsigned int dk_drive_wio[DK_NDRIVE];
- unsigned int dk_drive_rblk[DK_NDRIVE];
- unsigned int dk_drive_wblk[DK_NDRIVE];
+ unsigned int dk_drive[DK_MAX_MAJOR][DK_MAX_DISK];
+ unsigned int dk_drive_rio[DK_MAX_MAJOR][DK_MAX_DISK];
+ unsigned int dk_drive_wio[DK_MAX_MAJOR][DK_MAX_DISK];
+ unsigned int dk_drive_rblk[DK_MAX_MAJOR][DK_MAX_DISK];
+ unsigned int dk_drive_wblk[DK_MAX_MAJOR][DK_MAX_DISK];
unsigned int pgpgin, pgpgout;
unsigned int pswpin, pswpout;
#if !defined(CONFIG_ARCH_S390)
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index a2431cd72..c1d6ff312 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -24,7 +24,7 @@
#endif
#include <linux/netfilter_ipv4.h>
-#define IPT_FUNCTION_MAXNAMELEN 32
+#define IPT_FUNCTION_MAXNAMELEN 30
#define IPT_TABLE_MAXNAMELEN 32
/* Yes, Virginia, you have to zero the padding. */
@@ -47,13 +47,22 @@ struct ipt_ip {
struct ipt_entry_match
{
- /* Total length */
- u_int16_t match_size;
union {
- /* Used by userspace */
- char name[IPT_FUNCTION_MAXNAMELEN];
- /* Used inside the kernel */
- struct ipt_match *match;
+ struct {
+ u_int16_t match_size;
+
+ /* Used by userspace */
+ char name[IPT_FUNCTION_MAXNAMELEN];
+ } user;
+ struct {
+ u_int16_t match_size;
+
+ /* Used inside the kernel */
+ struct ipt_match *match;
+ } kernel;
+
+ /* Total length */
+ u_int16_t match_size;
} u;
unsigned char data[0];
@@ -61,13 +70,22 @@ struct ipt_entry_match
struct ipt_entry_target
{
- /* Total length */
- u_int16_t target_size;
union {
- /* Used by userspace */
- char name[IPT_FUNCTION_MAXNAMELEN];
- /* Used inside the kernel */
- struct ipt_target *target;
+ struct {
+ u_int16_t target_size;
+
+ /* Used by userspace */
+ char name[IPT_FUNCTION_MAXNAMELEN];
+ } user;
+ struct {
+ u_int16_t target_size;
+
+ /* Used inside the kernel */
+ struct ipt_target *target;
+ } kernel;
+
+ /* Total length */
+ u_int16_t target_size;
} u;
unsigned char data[0];
@@ -286,7 +304,7 @@ ipt_get_target(struct ipt_entry *e)
\
for (__i = sizeof(struct ipt_entry); \
__i < (e)->target_offset; \
- __i += __m->match_size) { \
+ __i += __m->u.match_size) { \
__m = (void *)(e) + __i; \
\
__ret = fn(__m , ## args); \
@@ -421,6 +439,6 @@ extern unsigned int ipt_do_table(struct sk_buff **pskb,
struct ipt_table *table,
void *userdata);
-#define IPT_ALIGN(s) (((s) + (__alignof__(struct ipt_match)-1)) & ~(__alignof__(struct ipt_match)-1))
+#define IPT_ALIGN(s) (((s) + (__alignof__(struct ipt_entry)-1)) & ~(__alignof__(struct ipt_entry)-1))
#endif /*__KERNEL__*/
#endif /* _IPTABLES_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a88cbc9b8..a1176b978 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -28,6 +28,7 @@
#define PAGE_CACHE_MASK PAGE_MASK
#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
+#define page_cache_get(x) get_page(x)
#define page_cache_alloc() alloc_pages(GFP_HIGHUSER, 0)
#define page_cache_free(x) __free_page(x)
#define page_cache_release(x) __free_page(x)
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index cabc0a8be..c2cabe19b 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -36,6 +36,7 @@
#include <net/checksum.h>
#include <linux/random.h>
#include <linux/locks.h>
+#include <linux/kernel_stat.h>
#include <asm/io.h>
#include <linux/raid/md_compatible.h>
@@ -74,6 +75,7 @@ extern void md_wakeup_thread(mdk_thread_t *thread);
extern void md_interrupt_thread (mdk_thread_t *thread);
extern int md_update_sb (mddev_t *mddev);
extern int md_do_sync(mddev_t *mddev, mdp_disk_t *spare);
+extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
extern void md_recover_arrays (void);
extern int md_check_ordering (mddev_t *mddev);
extern void autodetect_raid(void);
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index e0fdda3fa..b62d63750 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -162,7 +162,7 @@ struct mdk_rdev_s
kdev_t dev; /* Device number */
kdev_t old_dev; /* "" when it was last imported */
- int size; /* Device size (in blocks) */
+ unsigned long size; /* Device size (in blocks) */
mddev_t *mddev; /* RAID array if running */
unsigned long last_events; /* IO event timestamp */
@@ -170,7 +170,7 @@ struct mdk_rdev_s
struct file filp; /* Lock file */
mdp_super_t *sb;
- int sb_offset;
+ unsigned long sb_offset;
int faulty; /* if faulty do not issue IO requests */
int desc_nr; /* descriptor index in the superblock */
@@ -199,13 +199,17 @@ struct mddev_s
int sb_dirty;
mdu_param_t param;
int ro;
- unsigned int curr_resync;
+ unsigned long curr_resync;
unsigned long resync_start;
char *name;
int recovery_running;
struct semaphore reconfig_sem;
struct semaphore recovery_sem;
struct semaphore resync_sem;
+
+ atomic_t recovery_active;
+ md_wait_queue_head_t recovery_wait;
+
struct md_list_head all_mddevs;
request_queue_t queue;
};
@@ -213,15 +217,11 @@ struct mddev_s
struct mdk_personality_s
{
char *name;
- int (*map)(mddev_t *mddev, kdev_t dev, kdev_t *rdev,
- unsigned long *rsector, unsigned long size);
int (*make_request)(request_queue_t *q, mddev_t *mddev, int rw, struct buffer_head * bh);
void (*end_request)(struct buffer_head * bh, int uptodate);
int (*run)(mddev_t *mddev);
int (*stop)(mddev_t *mddev);
int (*status)(char *page, mddev_t *mddev);
- int (*ioctl)(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
int max_invalid_dev;
int (*error_handler)(mddev_t *mddev, kdev_t dev);
@@ -239,6 +239,7 @@ struct mdk_personality_s
int (*stop_resync)(mddev_t *mddev);
int (*restart_resync)(mddev_t *mddev);
+ int (*sync_request)(mddev_t *mddev, unsigned long block_nr);
};
@@ -339,5 +340,31 @@ typedef struct dev_name_s {
char name [MAX_DISKNAME_LEN];
} dev_name_t;
+
+#define __wait_event_lock_irq(wq, condition, lock) \
+do { \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ spin_unlock_irq(&lock); \
+ schedule(); \
+ spin_lock_irq(&lock); \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event_lock_irq(wq, condition, lock) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_lock_irq(wq, condition, lock); \
+} while (0)
+
#endif _MD_K_H
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
new file mode 100644
index 000000000..1016bdaa4
--- /dev/null
+++ b/include/linux/raid/raid1.h
@@ -0,0 +1,79 @@
+#ifndef _RAID1_H
+#define _RAID1_H
+
+#include <linux/raid/md.h>
+
+struct mirror_info {
+ int number;
+ int raid_disk;
+ kdev_t dev;
+ int next;
+ int sect_limit;
+
+ /*
+ * State bits:
+ */
+ int operational;
+ int write_only;
+ int spare;
+
+ int used_slot;
+};
+
+struct raid1_private_data {
+ mddev_t *mddev;
+ struct mirror_info mirrors[MD_SB_DISKS];
+ int nr_disks;
+ int raid_disks;
+ int working_disks;
+ int last_used;
+ unsigned long next_sect;
+ int sect_count;
+ mdk_thread_t *thread, *resync_thread;
+ int resync_mirrors;
+ struct mirror_info *spare;
+ md_spinlock_t device_lock;
+
+ /* for use when syncing mirrors: */
+ int start_active, start_ready,
+ start_pending, start_future;
+ int cnt_done, cnt_active, cnt_ready,
+ cnt_pending, cnt_future;
+ int phase;
+ int window;
+ md_wait_queue_head_t wait_done;
+ md_wait_queue_head_t wait_ready;
+ md_spinlock_t segment_lock;
+};
+
+typedef struct raid1_private_data raid1_conf_t;
+
+/*
+ * this is the only point in the RAID code where we violate
+ * C type safety. mddev->private is an 'opaque' pointer.
+ */
+#define mddev_to_conf(mddev) ((raid1_conf_t *) mddev->private)
+
+/*
+ * this is our 'private' 'collective' RAID1 buffer head.
+ * it contains information about what kind of IO operations were started
+ * for this RAID1 operation, and about their status:
+ */
+
+struct raid1_bh {
+ atomic_t remaining; /* 'have we finished' count,
+ * used from IRQ handlers
+ */
+ int cmd;
+ unsigned long state;
+ mddev_t *mddev;
+ struct buffer_head *master_bh;
+ struct buffer_head *mirror_bh [MD_SB_DISKS];
+ struct buffer_head bh_req;
+ struct buffer_head *next_retry;
+};
+/* bits for raid1_bh.state */
+#define R1BH_Uptodate 1
+#define R1BH_SyncPhase 2
+
+#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
new file mode 100644
index 000000000..ab839ea02
--- /dev/null
+++ b/include/linux/raid/raid5.h
@@ -0,0 +1,118 @@
+#ifndef _RAID5_H
+#define _RAID5_H
+
+#include <linux/raid/md.h>
+#include <linux/raid/xor.h>
+
+struct disk_info {
+ kdev_t dev;
+ int operational;
+ int number;
+ int raid_disk;
+ int write_only;
+ int spare;
+ int used_slot;
+};
+
+struct stripe_head {
+ md_spinlock_t stripe_lock;
+ struct stripe_head *hash_next, **hash_pprev; /* hash pointers */
+ struct stripe_head *free_next; /* pool of free sh's */
+ struct buffer_head *buffer_pool; /* pool of free buffers */
+ struct buffer_head *bh_pool; /* pool of free bh's */
+ struct raid5_private_data *raid_conf;
+ struct buffer_head *bh_old[MD_SB_DISKS]; /* disk image */
+ struct buffer_head *bh_new[MD_SB_DISKS]; /* buffers of the MD device (present in buffer cache) */
+ struct buffer_head *bh_copy[MD_SB_DISKS]; /* copy on write of bh_new (bh_new can change from under us) */
+ struct buffer_head *bh_req[MD_SB_DISKS]; /* copy of bh_new (only the buffer heads), queued to the lower levels */
+ int cmd_new[MD_SB_DISKS]; /* READ/WRITE for new */
+ int new[MD_SB_DISKS]; /* buffer added since the last handle_stripe() */
+ unsigned long sector; /* sector of this row */
+ int size; /* buffers size */
+ int pd_idx; /* parity disk index */
+ atomic_t nr_pending; /* nr of pending cmds */
+ unsigned long state; /* state flags */
+ int cmd; /* stripe cmd */
+ atomic_t count; /* nr of waiters */
+ int write_method; /* reconstruct-write / read-modify-write */
+ int phase; /* PHASE_BEGIN, ..., PHASE_COMPLETE */
+ md_wait_queue_head_t wait; /* processes waiting for this stripe */
+
+ int sync_redone;
+};
+
+/*
+ * Phase
+ */
+#define PHASE_BEGIN 0
+#define PHASE_READ_OLD 1
+#define PHASE_WRITE 2
+#define PHASE_READ 3
+#define PHASE_COMPLETE 4
+
+/*
+ * Write method
+ */
+#define METHOD_NONE 0
+#define RECONSTRUCT_WRITE 1
+#define READ_MODIFY_WRITE 2
+
+/*
+ * Stripe state
+ */
+#define STRIPE_LOCKED 0
+#define STRIPE_ERROR 1
+
+/*
+ * Stripe commands
+ */
+#define STRIPE_NONE 0
+#define STRIPE_WRITE 1
+#define STRIPE_READ 2
+#define STRIPE_SYNC 3
+
+struct raid5_private_data {
+ struct stripe_head **stripe_hashtbl;
+ mddev_t *mddev;
+ mdk_thread_t *thread, *resync_thread;
+ struct disk_info disks[MD_SB_DISKS];
+ struct disk_info *spare;
+ int buffer_size;
+ int chunk_size, level, algorithm;
+ int raid_disks, working_disks, failed_disks;
+ int sector_count;
+ unsigned long next_sector;
+ atomic_t nr_handle;
+ struct stripe_head *next_free_stripe;
+ atomic_t nr_stripes;
+ int resync_parity;
+ int max_nr_stripes;
+ int clock;
+ atomic_t nr_hashed_stripes;
+ atomic_t nr_locked_stripes;
+ atomic_t nr_pending_stripes;
+ atomic_t nr_cached_stripes;
+
+ /*
+ * Free stripes pool
+ */
+ atomic_t nr_free_sh;
+ struct stripe_head *free_sh_list;
+ md_wait_queue_head_t wait_for_stripe;
+
+ md_spinlock_t device_lock;
+};
+
+typedef struct raid5_private_data raid5_conf_t;
+
+#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
+
+/*
+ * Our supported algorithms
+ */
+#define ALGORITHM_LEFT_ASYMMETRIC 0
+#define ALGORITHM_RIGHT_ASYMMETRIC 1
+#define ALGORITHM_LEFT_SYMMETRIC 2
+#define ALGORITHM_RIGHT_SYMMETRIC 3
+
+#endif
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
new file mode 100644
index 000000000..c8034b759
--- /dev/null
+++ b/include/linux/raid/xor.h
@@ -0,0 +1,12 @@
+#ifndef _XOR_H
+#define _XOR_H
+
+#include <linux/raid/md.h>
+
+#define MAX_XOR_BLOCKS 4
+
+extern void calibrate_xor_block(void);
+extern void (*xor_block)(unsigned int count,
+ struct buffer_head **bh_ptr);
+
+#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 92e44456b..923a7a111 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -511,7 +511,8 @@ enum
enum {
DEV_CDROM=1,
DEV_HWMON=2,
- DEV_PARPORT=3
+ DEV_PARPORT=3,
+ DEV_RAID=4
};
/* /proc/sys/dev/cdrom */
@@ -529,6 +530,12 @@ enum {
DEV_PARPORT_DEFAULT=-3
};
+/* /proc/sys/dev/raid */
+enum {
+ DEV_RAID_SPEED_LIMIT_MIN=1,
+ DEV_RAID_SPEED_LIMIT_MAX=2
+};
+
/* /proc/sys/dev/parport/default */
enum {
DEV_PARPORT_DEFAULT_TIMESLICE=1,