summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/bitops.h16
-rw-r--r--include/asm-alpha/core_apecs.h82
-rw-r--r--include/asm-alpha/core_cia.h31
-rw-r--r--include/asm-alpha/core_irongate.h23
-rw-r--r--include/asm-alpha/core_lca.h29
-rw-r--r--include/asm-alpha/core_mcpcia.h25
-rw-r--r--include/asm-alpha/core_polaris.h21
-rw-r--r--include/asm-alpha/core_pyxis.h33
-rw-r--r--include/asm-alpha/core_t2.h20
-rw-r--r--include/asm-alpha/core_tsunami.h37
-rw-r--r--include/asm-alpha/hardirq.h2
-rw-r--r--include/asm-alpha/io.h35
-rw-r--r--include/asm-alpha/jensen.h20
-rw-r--r--include/asm-alpha/machvec.h5
-rw-r--r--include/asm-alpha/mmu_context.h4
-rw-r--r--include/asm-alpha/pci.h106
-rw-r--r--include/asm-alpha/pgalloc.h12
-rw-r--r--include/asm-alpha/scatterlist.h15
-rw-r--r--include/asm-alpha/semaphore.h299
-rw-r--r--include/asm-alpha/smp.h3
-rw-r--r--include/asm-alpha/softirq.h125
-rw-r--r--include/asm-alpha/types.h31
-rw-r--r--include/asm-arm/arch-arc/time.h182
-rw-r--r--include/asm-arm/arch-cl7500/time.h97
-rw-r--r--include/asm-arm/arch-ebsa110/time.h55
-rw-r--r--include/asm-arm/arch-ebsa285/time.h118
-rw-r--r--include/asm-arm/arch-nexuspci/time.h25
-rw-r--r--include/asm-arm/arch-rpc/time.h190
-rw-r--r--include/asm-arm/arch-sa1100/system.h1
-rw-r--r--include/asm-arm/checksum.h4
-rw-r--r--include/asm-arm/mmu_context.h3
-rw-r--r--include/asm-arm/pgalloc.h6
-rw-r--r--include/asm-arm/proc-armv/system.h81
-rw-r--r--include/asm-arm/types.h4
-rw-r--r--include/asm-i386/hardirq.h2
-rw-r--r--include/asm-i386/io.h20
-rw-r--r--include/asm-i386/mc146818rtc.h6
-rw-r--r--include/asm-i386/md.h13
-rw-r--r--include/asm-i386/mmu_context.h27
-rw-r--r--include/asm-i386/page.h2
-rw-r--r--include/asm-i386/pgalloc.h35
-rw-r--r--include/asm-i386/pgtable.h31
-rw-r--r--include/asm-i386/softirq.h126
-rw-r--r--include/asm-i386/spinlock.h1
-rw-r--r--include/asm-ia64/a.out.h38
-rw-r--r--include/asm-ia64/acpi-ext.h110
-rw-r--r--include/asm-ia64/atomic.h100
-rw-r--r--include/asm-ia64/bitops.h241
-rw-r--r--include/asm-ia64/break.h21
-rw-r--r--include/asm-ia64/bugs.h19
-rw-r--r--include/asm-ia64/byteorder.h40
-rw-r--r--include/asm-ia64/cache.h12
-rw-r--r--include/asm-ia64/checksum.h99
-rw-r--r--include/asm-ia64/current.h13
-rw-r--r--include/asm-ia64/delay.h90
-rw-r--r--include/asm-ia64/div64.h20
-rw-r--r--include/asm-ia64/dma.h36
-rw-r--r--include/asm-ia64/efi.h233
-rw-r--r--include/asm-ia64/elf.h89
-rw-r--r--include/asm-ia64/errno.h139
-rw-r--r--include/asm-ia64/fcntl.h73
-rw-r--r--include/asm-ia64/fpswa.h75
-rw-r--r--include/asm-ia64/fpu.h65
-rw-r--r--include/asm-ia64/hardirq.h76
-rw-r--r--include/asm-ia64/hdreg.h12
-rw-r--r--include/asm-ia64/ia32.h313
-rw-r--r--include/asm-ia64/ide.h115
-rw-r--r--include/asm-ia64/io.h446
-rw-r--r--include/asm-ia64/ioctl.h77
-rw-r--r--include/asm-ia64/ioctls.h87
-rw-r--r--include/asm-ia64/iosapic.h123
-rw-r--r--include/asm-ia64/ipc.h31
-rw-r--r--include/asm-ia64/ipcbuf.h28
-rw-r--r--include/asm-ia64/irq.h120
-rw-r--r--include/asm-ia64/kdbsupport.h252
-rw-r--r--include/asm-ia64/keyboard.h70
-rw-r--r--include/asm-ia64/linux_logo.h49
-rw-r--r--include/asm-ia64/machvec.h108
-rw-r--r--include/asm-ia64/machvec_dig.h22
-rw-r--r--include/asm-ia64/machvec_hpsim.h20
-rw-r--r--include/asm-ia64/machvec_init.h9
-rw-r--r--include/asm-ia64/machvec_sn1.h20
-rw-r--r--include/asm-ia64/mca.h143
-rw-r--r--include/asm-ia64/mca_asm.h299
-rw-r--r--include/asm-ia64/mman.h38
-rw-r--r--include/asm-ia64/mmu_context.h143
-rw-r--r--include/asm-ia64/msgbuf.h27
-rw-r--r--include/asm-ia64/namei.h17
-rw-r--r--include/asm-ia64/offsets.h37
-rw-r--r--include/asm-ia64/page.h134
-rw-r--r--include/asm-ia64/pal.h1324
-rw-r--r--include/asm-ia64/param.h45
-rw-r--r--include/asm-ia64/pci.h148
-rw-r--r--include/asm-ia64/pgalloc.h257
-rw-r--r--include/asm-ia64/pgtable.h390
-rw-r--r--include/asm-ia64/poll.h30
-rw-r--r--include/asm-ia64/posix_types.h121
-rw-r--r--include/asm-ia64/processor.h786
-rw-r--r--include/asm-ia64/ptrace.h240
-rw-r--r--include/asm-ia64/ptrace_offsets.h216
-rw-r--r--include/asm-ia64/resource.h48
-rw-r--r--include/asm-ia64/rse.h66
-rw-r--r--include/asm-ia64/sal.h486
-rw-r--r--include/asm-ia64/scatterlist.h21
-rw-r--r--include/asm-ia64/segment.h6
-rw-r--r--include/asm-ia64/semaphore.h330
-rw-r--r--include/asm-ia64/sembuf.h22
-rw-r--r--include/asm-ia64/serial.h135
-rw-r--r--include/asm-ia64/shmbuf.h38
-rw-r--r--include/asm-ia64/shmparam.h6
-rw-r--r--include/asm-ia64/sigcontext.h50
-rw-r--r--include/asm-ia64/siginfo.h202
-rw-r--r--include/asm-ia64/signal.h162
-rw-r--r--include/asm-ia64/smp.h100
-rw-r--r--include/asm-ia64/smplock.h54
-rw-r--r--include/asm-ia64/socket.h49
-rw-r--r--include/asm-ia64/sockios.h18
-rw-r--r--include/asm-ia64/softirq.h152
-rw-r--r--include/asm-ia64/spinlock.h97
-rw-r--r--include/asm-ia64/stat.h28
-rw-r--r--include/asm-ia64/statfs.h27
-rw-r--r--include/asm-ia64/string.h15
-rw-r--r--include/asm-ia64/system.h471
-rw-r--r--include/asm-ia64/termbits.h179
-rw-r--r--include/asm-ia64/termios.h112
-rw-r--r--include/asm-ia64/timex.h23
-rw-r--r--include/asm-ia64/types.h94
-rw-r--r--include/asm-ia64/uaccess.h366
-rw-r--r--include/asm-ia64/unaligned.h123
-rw-r--r--include/asm-ia64/unistd.h305
-rw-r--r--include/asm-ia64/unwind.h77
-rw-r--r--include/asm-ia64/user.h57
-rw-r--r--include/asm-ia64/vga.h22
-rw-r--r--include/asm-m68k/mmu_context.h3
-rw-r--r--include/asm-m68k/pgalloc.h5
-rw-r--r--include/asm-mips/hardirq.h3
-rw-r--r--include/asm-mips/irq.h8
-rw-r--r--include/asm-mips/md.h14
-rw-r--r--include/asm-mips/mmu_context.h6
-rw-r--r--include/asm-mips/pgalloc.h5
-rw-r--r--include/asm-mips/pgtable.h7
-rw-r--r--include/asm-mips/softirq.h104
-rw-r--r--include/asm-mips64/hardirq.h3
-rw-r--r--include/asm-mips64/mmu_context.h6
-rw-r--r--include/asm-mips64/offset.h44
-rw-r--r--include/asm-mips64/pgalloc.h4
-rw-r--r--include/asm-mips64/pgtable.h7
-rw-r--r--include/asm-mips64/softirq.h88
-rw-r--r--include/asm-ppc/bitops.h6
-rw-r--r--include/asm-ppc/bootinfo.h8
-rw-r--r--include/asm-ppc/feature.h23
-rw-r--r--include/asm-ppc/heathrow.h45
-rw-r--r--include/asm-ppc/irq.h5
-rw-r--r--include/asm-ppc/machdep.h2
-rw-r--r--include/asm-ppc/mediabay.h1
-rw-r--r--include/asm-ppc/mmu.h57
-rw-r--r--include/asm-ppc/mmu_context.h3
-rw-r--r--include/asm-ppc/ohare.h17
-rw-r--r--include/asm-ppc/pci.h52
-rw-r--r--include/asm-ppc/pgtable.h94
-rw-r--r--include/asm-ppc/processor.h5
-rw-r--r--include/asm-ppc/prom.h5
-rw-r--r--include/asm-ppc/semaphore.h96
-rw-r--r--include/asm-ppc/types.h4
-rw-r--r--include/asm-ppc/vga.h38
-rw-r--r--include/asm-sh/mmu_context.h3
-rw-r--r--include/asm-sh/pgtable.h4
-rw-r--r--include/asm-sparc/bitops.h12
-rw-r--r--include/asm-sparc/hardirq.h5
-rw-r--r--include/asm-sparc/mmu_context.h4
-rw-r--r--include/asm-sparc/pci.h85
-rw-r--r--include/asm-sparc/pgalloc.h6
-rw-r--r--include/asm-sparc/pgtable.h19
-rw-r--r--include/asm-sparc/softirq.h155
-rw-r--r--include/asm-sparc64/bitops.h12
-rw-r--r--include/asm-sparc64/hardirq.h5
-rw-r--r--include/asm-sparc64/io.h35
-rw-r--r--include/asm-sparc64/mmu_context.h6
-rw-r--r--include/asm-sparc64/pgalloc.h22
-rw-r--r--include/asm-sparc64/posix_types.h3
-rw-r--r--include/asm-sparc64/softirq.h113
-rw-r--r--include/linux/acpi.h9
-rw-r--r--include/linux/adfs_fs.h163
-rw-r--r--include/linux/adfs_fs_i.h8
-rw-r--r--include/linux/adfs_fs_sb.h41
-rw-r--r--include/linux/affs_fs.h6
-rw-r--r--include/linux/affs_fs_i.h1
-rw-r--r--include/linux/agp_backend.h1
-rw-r--r--include/linux/atm.h48
-rw-r--r--include/linux/atm_eni.h10
-rw-r--r--include/linux/atm_idt77105.h40
-rw-r--r--include/linux/atm_nicstar.h3
-rw-r--r--include/linux/atm_tcp.h10
-rw-r--r--include/linux/atm_zatm.h3
-rw-r--r--include/linux/atmapi.h29
-rw-r--r--include/linux/atmarp.h3
-rw-r--r--include/linux/atmdev.h38
-rw-r--r--include/linux/atmioc.h4
-rw-r--r--include/linux/atmlec.h21
-rw-r--r--include/linux/atmmpc.h13
-rw-r--r--include/linux/atmsap.h21
-rw-r--r--include/linux/atmsvc.h13
-rw-r--r--include/linux/auto_fs.h45
-rw-r--r--include/linux/auto_fs4.h47
-rw-r--r--include/linux/bfs_fs.h1
-rw-r--r--include/linux/bfs_fs_sb.h8
-rw-r--r--include/linux/blkdev.h39
-rw-r--r--include/linux/bootmem.h3
-rw-r--r--include/linux/coda.h38
-rw-r--r--include/linux/coda_fs_i.h2
-rw-r--r--include/linux/coda_linux.h4
-rw-r--r--include/linux/coda_psdev.h1
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/efs_fs.h2
-rw-r--r--include/linux/ext2_fs.h7
-rw-r--r--include/linux/fs.h70
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/hfs_fs_i.h1
-rw-r--r--include/linux/hpfs_fs_i.h1
-rw-r--r--include/linux/i2c-id.h45
-rw-r--r--include/linux/i2c.h109
-rw-r--r--include/linux/input.h2
-rw-r--r--include/linux/interrupt.h235
-rw-r--r--include/linux/iso_fs.h6
-rw-r--r--include/linux/kbd_kern.h4
-rw-r--r--include/linux/linkage.h2
-rw-r--r--include/linux/lp.h2
-rw-r--r--include/linux/mc146818rtc.h15
-rw-r--r--include/linux/md.h300
-rw-r--r--include/linux/minix_fs.h5
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/msdos_fs.h5
-rw-r--r--include/linux/msdos_fs_i.h2
-rw-r--r--include/linux/netdevice.h166
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/ntfs_fs_i.h1
-rw-r--r--include/linux/openpic.h6
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/pci.h16
-rw-r--r--include/linux/pm.h24
-rw-r--r--include/linux/pmu.h3
-rw-r--r--include/linux/ppp_channel.h5
-rw-r--r--include/linux/proc_fs.h1
-rw-r--r--include/linux/qnx4_fs.h1
-rw-r--r--include/linux/qnx4_fs_i.h1
-rw-r--r--include/linux/raid/linear.h32
-rw-r--r--include/linux/raid/md.h94
-rw-r--r--include/linux/raid/md_compatible.h160
-rw-r--r--include/linux/raid/md_k.h341
-rw-r--r--include/linux/raid/md_p.h161
-rw-r--r--include/linux/raid/md_u.h115
-rw-r--r--include/linux/raid/raid0.h33
-rw-r--r--include/linux/raid0.h27
-rw-r--r--include/linux/rtc.h27
-rw-r--r--include/linux/rtnetlink.h17
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/shm.h1
-rw-r--r--include/linux/smb_fs.h1
-rw-r--r--include/linux/sonet.h22
-rw-r--r--include/linux/spinlock.h3
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/sunrpc/svcsock.h1
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/sysv_fs.h4
-rw-r--r--include/linux/timer.h20
-rw-r--r--include/linux/tqueue.h3
-rw-r--r--include/linux/udf_167.h3
-rw-r--r--include/linux/ufs_fs.h4
-rw-r--r--include/linux/umsdos_fs.h1
-rw-r--r--include/linux/vt_buffer.h2
-rw-r--r--include/net/atmclip.h6
-rw-r--r--include/net/dsfield.h4
-rw-r--r--include/net/irda/nsc-ircc.h3
-rw-r--r--include/net/irda/smc-ircc.h3
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/pkt_sched.h66
-rw-r--r--include/net/snmp.h2
-rw-r--r--include/net/sock.h12
-rw-r--r--include/net/tcp.h34
-rw-r--r--include/scsi/sg.h390
-rw-r--r--include/video/macmodes.h1
283 files changed, 15278 insertions, 3135 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index c9e7e7aee..c590d9e51 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -76,8 +76,8 @@ extern __inline__ void change_bit(unsigned long nr, volatile void * addr)
:"Ir" (1UL << (nr & 31)), "m" (*m));
}
-extern __inline__ unsigned long test_and_set_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int test_and_set_bit(unsigned long nr,
+ volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -101,8 +101,8 @@ extern __inline__ unsigned long test_and_set_bit(unsigned long nr,
return oldbit != 0;
}
-extern __inline__ unsigned long test_and_clear_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int test_and_clear_bit(unsigned long nr,
+ volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -126,8 +126,8 @@ extern __inline__ unsigned long test_and_clear_bit(unsigned long nr,
return oldbit != 0;
}
-extern __inline__ unsigned long test_and_change_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int test_and_change_bit(unsigned long nr,
+ volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -149,9 +149,9 @@ extern __inline__ unsigned long test_and_change_bit(unsigned long nr,
return oldbit != 0;
}
-extern __inline__ unsigned long test_bit(int nr, volatile void * addr)
+extern __inline__ int test_bit(int nr, volatile void * addr)
{
- return 1UL & (((const int *) addr)[nr >> 5] >> (nr & 31));
+ return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
}
/*
diff --git a/include/asm-alpha/core_apecs.h b/include/asm-alpha/core_apecs.h
index f9796c1d5..927aa2ea7 100644
--- a/include/asm-alpha/core_apecs.h
+++ b/include/asm-alpha/core_apecs.h
@@ -1,7 +1,6 @@
#ifndef __ALPHA_APECS__H__
#define __ALPHA_APECS__H__
-#include <linux/config.h>
#include <linux/types.h>
#include <asm/compiler.h>
@@ -66,19 +65,10 @@
for most other things they are identical. It didn't seem reasonable to
make the AVANTI support pay for the limitations of the XL. It is true,
however, that an XL kernel will run on an AVANTI without problems.
-*/
-#define APECS_XL_DMA_WIN1_BASE (64UL*1024*1024)
-#define APECS_XL_DMA_WIN1_SIZE (64UL*1024*1024)
-#define APECS_XL_DMA_WIN1_SIZE_PARANOID (48UL*1024*1024)
-#define APECS_XL_DMA_WIN2_BASE (1UL*1024*1024*1024)
-#define APECS_XL_DMA_WIN2_SIZE (1UL*1024*1024*1024)
-
-
-/* These are for normal APECS family machines, AVANTI/MUSTANG/EB64/PC64. */
-
-#define APECS_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define APECS_DMA_WIN_SIZE (1UL*1024*1024*1024)
+ %%% All of this should be obviated by the ability to route
+ everything through the iommu.
+*/
/*
* 21071-DA Control and Status registers.
@@ -370,64 +360,6 @@ struct el_apecs_procdata
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-/*
- * NOTE: we fudge the window 1 maximum as 48Mb instead of 64Mb, to prevent
- * virt_to_bus() from returning an address in the first window, for a
- * data area that goes beyond the 64Mb first DMA window. Sigh...
- * This MUST match with <asm/dma.h> MAX_DMA_ADDRESS for consistency, but
- * we can't just use that here, because of header file looping... :-(
- */
-
-__EXTERN_INLINE unsigned long apecs_virt_to_bus(void * address)
-{
- unsigned long paddr = virt_to_phys(address);
- return paddr + APECS_DMA_WIN_BASE;
-}
-
-static inline unsigned long apecs_xl_virt_to_bus(void * address)
-{
- unsigned long paddr = virt_to_phys(address);
- if (paddr < APECS_XL_DMA_WIN1_SIZE_PARANOID)
- return paddr + APECS_XL_DMA_WIN1_BASE;
- else
- return paddr + APECS_XL_DMA_WIN2_BASE;
-}
-
-__EXTERN_INLINE void * apecs_bus_to_virt(unsigned long address)
-{
- /*
- * This check is a sanity check but also ensures that bus
- * address 0 maps to virtual address 0 which is useful to
- * detect null "pointers" (the NCR driver is much simpler if
- * NULL pointers are preserved).
- */
- if (address < APECS_DMA_WIN_BASE)
- return 0;
- return phys_to_virt(address - APECS_DMA_WIN_BASE);
-}
-
-static inline void * apecs_xl_bus_to_virt(unsigned long address)
-{
- /*
- * This check is a sanity check but also ensures that bus
- * address 0 maps to virtual address 0 which is useful to
- * detect null "pointers" (the NCR driver is much simpler if
- * NULL pointers are preserved).
- */
- if (address < APECS_XL_DMA_WIN1_BASE)
- return 0;
- else if (address < (APECS_XL_DMA_WIN1_BASE + APECS_XL_DMA_WIN1_SIZE))
- address -= APECS_XL_DMA_WIN1_BASE;
- else /* should be more checking here, maybe? */
- address -= APECS_XL_DMA_WIN2_BASE;
- return phys_to_virt(address);
-}
-
-/*
* I/O functions:
*
* Unlike Jensen, the APECS machines have no concept of local
@@ -579,14 +511,6 @@ __EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#ifdef CONFIG_ALPHA_XL
-#define virt_to_bus apecs_xl_virt_to_bus
-#define bus_to_virt apecs_xl_bus_to_virt
-#else
-#define virt_to_bus apecs_virt_to_bus
-#define bus_to_virt apecs_bus_to_virt
-#endif
-
#define __inb apecs_inb
#define __inw apecs_inw
#define __inl apecs_inl
diff --git a/include/asm-alpha/core_cia.h b/include/asm-alpha/core_cia.h
index 34b2f03b8..418aa3492 100644
--- a/include/asm-alpha/core_cia.h
+++ b/include/asm-alpha/core_cia.h
@@ -77,20 +77,6 @@
#define CIA_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
#define CIA_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-#define CIA_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define CIA_DMA_WIN_SIZE (2UL*1024*1024*1024)
-
-/* Window 0 at 1GB size 1GB mapping to 0. */
-#define CIA_DMA_WIN0_BASE_DEFAULT (1UL*1024*1024*1024)
-#define CIA_DMA_WIN0_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define CIA_DMA_WIN0_TRAN_DEFAULT (0UL)
-
-/* Window 1 at 2GB size 1GB mapping to 1GB. */
-#define CIA_DMA_WIN1_BASE_DEFAULT (2UL*1024*1024*1024)
-#define CIA_DMA_WIN1_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define CIA_DMA_WIN1_TRAN_DEFAULT (1UL*1024*1024*1024)
-
-
/*
* 21171-CA Control and Status Registers (p4-1)
*/
@@ -289,21 +275,6 @@ struct el_CIA_sysdata_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long cia_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + CIA_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * cia_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - CIA_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* CIA (the 2117x PCI/memory support chipset for the EV5 (21164)
@@ -491,8 +462,6 @@ __EXTERN_INLINE int cia_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus cia_virt_to_bus
-#define bus_to_virt cia_bus_to_virt
#define __inb cia_inb
#define __inw cia_inw
#define __inl cia_inl
diff --git a/include/asm-alpha/core_irongate.h b/include/asm-alpha/core_irongate.h
index ae9aaaa85..5c30feb37 100644
--- a/include/asm-alpha/core_irongate.h
+++ b/include/asm-alpha/core_irongate.h
@@ -22,13 +22,8 @@
* and I/O address space. Memory address space resides in the lower
* half of the physical address space (PA[43]=0) and I/O address space
* resides in the upper half of the physical address space (PA[43]=1).
- *
*/
-#define IRONGATE_DMA_WIN_BASE (0UL)
-#define IRONGATE_DMA_WIN_SIZE (0UL)
-
-
/*
* Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access
* through the routines given is 32-bit.
@@ -393,21 +388,6 @@ struct el_IRONGATE_sysdata_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long irongate_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + IRONGATE_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * irongate_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - IRONGATE_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and
@@ -513,9 +493,6 @@ __EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus irongate_virt_to_bus
-#define bus_to_virt irongate_bus_to_virt
-
#define __inb irongate_inb
#define __inw irongate_inw
#define __inl irongate_inl
diff --git a/include/asm-alpha/core_lca.h b/include/asm-alpha/core_lca.h
index 644512cba..6ba2bbd83 100644
--- a/include/asm-alpha/core_lca.h
+++ b/include/asm-alpha/core_lca.h
@@ -55,10 +55,6 @@
* ugh).
*/
-#define LCA_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define LCA_DMA_WIN_SIZE (1UL*1024*1024*1024)
-
-
/*
* Memory Controller registers:
*/
@@ -209,29 +205,6 @@ union el_lca {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long lca_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + LCA_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * lca_bus_to_virt(unsigned long address)
-{
- /*
- * This check is a sanity check but also ensures that bus
- * address 0 maps to virtual address 0 which is useful to
- * detect null "pointers" (the NCR driver is much simpler if
- * NULL pointers are preserved).
- */
- if (address < LCA_DMA_WIN_BASE)
- return 0;
- return phys_to_virt(address - LCA_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* Unlike Jensen, the Noname machines have no concept of local
@@ -387,8 +360,6 @@ __EXTERN_INLINE int lca_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus lca_virt_to_bus
-#define bus_to_virt lca_bus_to_virt
#define __inb lca_inb
#define __inw lca_inw
#define __inl lca_inl
diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h
index 577704129..fb100e508 100644
--- a/include/asm-alpha/core_mcpcia.h
+++ b/include/asm-alpha/core_mcpcia.h
@@ -72,13 +72,10 @@
*
*/
-#define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */
-
-#define MCPCIA_DMA_WIN_BASE (2UL*1024*1024*1024)
-#define MCPCIA_DMA_WIN_SIZE (2UL*1024*1024*1024)
-
#define MCPCIA_MID(m) ((unsigned long)(m) << 33)
+#define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */
+
/*
* Memory spaces:
*/
@@ -198,21 +195,6 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long mcpcia_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + MCPCIA_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * mcpcia_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - MCPCIA_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* MCPCIA, the RAWHIDE family PCI/memory support chipset for the EV5 (21164)
@@ -452,9 +434,6 @@ __EXTERN_INLINE void mcpcia_writeq(unsigned long b, unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus mcpcia_virt_to_bus
-#define bus_to_virt mcpcia_bus_to_virt
-
#define __inb mcpcia_inb
#define __inw mcpcia_inw
#define __inl mcpcia_inl
diff --git a/include/asm-alpha/core_polaris.h b/include/asm-alpha/core_polaris.h
index da53edc22..626b24a22 100644
--- a/include/asm-alpha/core_polaris.h
+++ b/include/asm-alpha/core_polaris.h
@@ -36,14 +36,6 @@
#define POLARIS_W_CMD (POLARIS_DENSE_CONFIG_BASE+4)
#define POLARIS_W_STATUS (POLARIS_DENSE_CONFIG_BASE+6)
-/* No HAE address. Polaris has no concept of an HAE, since it
- * supports transfers of all sizes in dense space.
- */
-
-#define POLARIS_DMA_WIN_BASE 0x80000000UL /* fixed, 2G @ 2G */
-#define POLARIS_DMA_WIN_SIZE 0x80000000UL /* fixed, 2G @ 2G */
-
-
/*
* Data structure for handling POLARIS machine checks:
*/
@@ -61,16 +53,6 @@ struct el_POLARIS_sysdata_mcheck {
#define __IO_EXTERN_INLINE
#endif
-__EXTERN_INLINE unsigned long polaris_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + POLARIS_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * polaris_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - POLARIS_DMA_WIN_BASE);
-}
-
/*
* I/O functions:
*
@@ -188,9 +170,6 @@ __EXTERN_INLINE int polaris_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus polaris_virt_to_bus
-#define bus_to_virt polaris_bus_to_virt
-
#define __inb polaris_inb
#define __inw polaris_inw
#define __inl polaris_inl
diff --git a/include/asm-alpha/core_pyxis.h b/include/asm-alpha/core_pyxis.h
index 2850a949c..18b523ef4 100644
--- a/include/asm-alpha/core_pyxis.h
+++ b/include/asm-alpha/core_pyxis.h
@@ -71,20 +71,6 @@
#define PYXIS_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
#define PYXIS_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-#define PYXIS_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define PYXIS_DMA_WIN_SIZE (2UL*1024*1024*1024)
-
-/* Window 0 at 1GB size 1GB mapping 0 */
-#define PYXIS_DMA_WIN0_BASE_DEFAULT (1UL*1024*1024*1024)
-#define PYXIS_DMA_WIN0_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define PYXIS_DMA_WIN0_TRAN_DEFAULT (0UL)
-
-/* Window 0 at 2GB size 1GB mapping 1GB */
-#define PYXIS_DMA_WIN1_BASE_DEFAULT (2UL*1024*1024*1024)
-#define PYXIS_DMA_WIN1_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define PYXIS_DMA_WIN1_TRAN_DEFAULT (1UL*1024*1024*1024)
-
-
/*
* General Registers
*/
@@ -272,22 +258,6 @@ struct el_PYXIS_sysdata_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long pyxis_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + PYXIS_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * pyxis_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - PYXIS_DMA_WIN_BASE);
-}
-
-
-/*
* I/O functions:
*
* PYXIS, the 21174 PCI/memory support chipset for the EV56 (21164A)
@@ -430,9 +400,6 @@ __EXTERN_INLINE int pyxis_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus pyxis_virt_to_bus
-#define bus_to_virt pyxis_bus_to_virt
-
#define __inb pyxis_inb
#define __inw pyxis_inw
#define __inl pyxis_inl
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h
index 933be1714..d11506cd5 100644
--- a/include/asm-alpha/core_t2.h
+++ b/include/asm-alpha/core_t2.h
@@ -21,9 +21,6 @@
#define T2_MEM_R1_MASK 0x03ffffff /* Mem sparse region 1 mask is 26 bits */
-#define T2_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define T2_DMA_WIN_SIZE (1UL*1024*1024*1024)
-
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
#define _GAMMA_BIAS 0x8000000000UL
@@ -322,21 +319,6 @@ struct el_t2_frame_corrected {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long t2_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + T2_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * t2_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - T2_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* T2 (the core logic PCI/memory support chipset for the SABLE
@@ -534,8 +516,6 @@ __EXTERN_INLINE int t2_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus t2_virt_to_bus
-#define bus_to_virt t2_bus_to_virt
#define __inb t2_inb
#define __inw t2_inw
#define __inl t2_inl
diff --git a/include/asm-alpha/core_tsunami.h b/include/asm-alpha/core_tsunami.h
index 5bfa4d658..ffbcc5c34 100644
--- a/include/asm-alpha/core_tsunami.h
+++ b/include/asm-alpha/core_tsunami.h
@@ -15,26 +15,6 @@
*
*/
-/*
- * We must actually use 2 windows to direct-map the 2GB space, because
- * of an "idiot-syncracy" of the CYPRESS chip used on DS20 and others.
- * It may respond to a PCI bus address in the last 1MB of the 4GB
- * address range, and that is where real memory may appear.
- *
- * Sigh...
- */
-#define TSUNAMI_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN_SIZE (2UL*1024*1024*1024)
-
-#define TSUNAMI_DMA_WIN0_BASE_DEFAULT (1UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN0_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN0_TRAN_DEFAULT (0UL)
-
-#define TSUNAMI_DMA_WIN1_BASE_DEFAULT (2UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN1_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN1_TRAN_DEFAULT (1UL*1024*1024*1024)
-
-
/* XXX: Do we need to conditionalize on this? */
#ifdef USE_48_BIT_KSEG
#define TS_BIAS 0x80000000000UL
@@ -309,20 +289,6 @@ struct el_TSUNAMI_sysdata_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-__EXTERN_INLINE unsigned long tsunami_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + TSUNAMI_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * tsunami_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - TSUNAMI_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* TSUNAMI, the 21??? PCI/memory support chipset for the EV6 (21264)
@@ -439,9 +405,6 @@ __EXTERN_INLINE void tsunami_writeq(unsigned long b, unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus tsunami_virt_to_bus
-#define bus_to_virt tsunami_bus_to_virt
-
#define __inb tsunami_inb
#define __inw tsunami_inw
#define __inl tsunami_inl
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index 67544e13d..86e895b83 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -23,6 +23,8 @@ extern int __local_irq_count;
(local_irq_count(__cpu) + local_bh_count(__cpu)) != 0; \
})
+#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index 33c7b0006..59eeb9545 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -51,7 +51,7 @@ static inline void set_hae(unsigned long new_hae)
/*
* Change virtual addresses to physical addresses and vv.
*/
-static inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(void *address)
{
return (unsigned long)address - IDENT_ADDR;
}
@@ -61,6 +61,36 @@ static inline void * phys_to_virt(unsigned long address)
return (void *) (address + IDENT_ADDR);
}
+/*
+ * Change addresses as seen by the kernel (virtual) to addresses as
+ * seen by a device (bus), and vice versa.
+ *
+ * Note that this only works for a limited range of kernel addresses,
+ * and very well may not span all memory. Consider this interface
+ * deprecated in favour of the mapping functions in <asm/pci.h>.
+ */
+extern unsigned long __direct_map_base;
+extern unsigned long __direct_map_size;
+
+static inline unsigned long virt_to_bus(void *address)
+{
+ unsigned long phys = virt_to_phys(address);
+ unsigned long bus = phys + __direct_map_base;
+ return phys <= __direct_map_size ? bus : 0;
+}
+
+static inline void *bus_to_virt(unsigned long address)
+{
+ void *virt;
+
+ /* This check is a sanity check but also ensures that bus address 0
+ maps to virtual address 0 which is useful to detect null pointers
+ (the NCR driver is much simpler if NULL pointers are preserved). */
+ address -= __direct_map_base;
+ virt = phys_to_virt(address);
+ return (long)address <= 0 ? NULL : virt;
+}
+
#else /* !__KERNEL__ */
/*
@@ -82,9 +112,6 @@ extern void _sethae (unsigned long addr); /* cached version */
/* In a generic kernel, we always go through the machine vector. */
-# define virt_to_bus(a) alpha_mv.mv_virt_to_bus(a)
-# define bus_to_virt(a) alpha_mv.mv_bus_to_virt(a)
-
# define __inb alpha_mv.mv_inb
# define __inw alpha_mv.mv_inw
# define __inl alpha_mv.mv_inl
diff --git a/include/asm-alpha/jensen.h b/include/asm-alpha/jensen.h
index 826dfc54d..4ebbb2102 100644
--- a/include/asm-alpha/jensen.h
+++ b/include/asm-alpha/jensen.h
@@ -80,24 +80,6 @@
#endif
/*
- * Change virtual addresses to bus addresses and vv.
- *
- * NOTE! On the Jensen, the physical address is the same
- * as the bus address, but this is not necessarily true on
- * other alpha hardware.
- */
-__EXTERN_INLINE unsigned long jensen_virt_to_bus(void * address)
-{
- return virt_to_phys(address);
-}
-
-__EXTERN_INLINE void * jensen_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address);
-}
-
-
-/*
* Handle the "host address register". This needs to be set
* to the high 7 bits of the EISA address. This is also needed
* for EISA IO addresses, which are only 16 bits wide (the
@@ -306,8 +288,6 @@ __EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus jensen_virt_to_bus
-#define bus_to_virt jensen_bus_to_virt
#define __inb jensen_inb
#define __inw jensen_inw
#define __inl jensen_inl
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
index 2c2f82bbd..ccbed15fc 100644
--- a/include/asm-alpha/machvec.h
+++ b/include/asm-alpha/machvec.h
@@ -21,6 +21,7 @@ struct vm_area_struct;
struct linux_hose_info;
struct pci_dev;
struct pci_ops;
+struct pci_controler;
struct alpha_machine_vector
{
@@ -39,8 +40,8 @@ struct alpha_machine_vector
unsigned long min_io_address;
unsigned long min_mem_address;
- unsigned long (*mv_virt_to_bus)(void *);
- void * (*mv_bus_to_virt)(unsigned long);
+ void (*mv_pci_tbi)(struct pci_controler *hose,
+ dma_addr_t start, dma_addr_t end);
unsigned int (*mv_inb)(unsigned long);
unsigned int (*mv_inw)(unsigned long);
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 18e316751..1d150d523 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -22,6 +22,10 @@
#include <asm/io.h>
#endif
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+
extern inline unsigned long
__reload_thread(struct thread_struct *pcb)
{
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
index cc4ecb4bb..f5a9e09b8 100644
--- a/include/asm-alpha/pci.h
+++ b/include/asm-alpha/pci.h
@@ -1,27 +1,49 @@
#ifndef __ALPHA_PCI_H
#define __ALPHA_PCI_H
+#include <linux/spinlock.h>
+#include <asm/scatterlist.h>
#include <asm/machvec.h>
/*
* The following structure is used to manage multiple PCI busses.
*/
+struct pci_dev;
struct pci_bus;
struct resource;
+/* A PCI IOMMU allocation arena. There are typically two of these
+ regions per bus. */
+/* ??? The 8400 has a 32-byte pte entry, and the entire table apparently
+ lives directly on the host bridge (no tlb?). We don't support this
+ machine, but if we ever did, we'd need to parameterize all this quite
+ a bit further. Probably with per-bus operation tables. */
+
+struct pci_iommu_arena
+{
+ spinlock_t lock;
+ unsigned long *ptes;
+ dma_addr_t dma_base;
+ unsigned int size;
+ unsigned int alloc_hint;
+};
+
+/* A controler. Used to manage multiple PCI busses. */
+
struct pci_controler {
- /* Mandated. */
struct pci_controler *next;
struct pci_bus *bus;
struct resource *io_space;
struct resource *mem_space;
- /* Alpha specific. */
unsigned long config_space;
unsigned int index;
unsigned int first_busno;
unsigned int last_busno;
+
+ struct pci_iommu_arena *sg_pci;
+ struct pci_iommu_arena *sg_isa;
};
/* Override the logic in pci_scan_bus for skipping already-configured
@@ -32,5 +54,83 @@ struct pci_controler {
#define PCIBIOS_MIN_IO alpha_mv.min_io_address
#define PCIBIOS_MIN_MEM alpha_mv.min_mem_address
-#endif /* __ALPHA_PCI_H */
+/* IOMMU controls. */
+
+/* Allocate and map kernel buffer using consistant mode DMA for PCI
+ device. Returns non-NULL cpu-view pointer to the buffer if
+ successful and sets *DMA_ADDRP to the pci side dma address as well,
+ else DMA_ADDRP is undefined. */
+
+extern void *pci_alloc_consistent(struct pci_dev *, long, dma_addr_t *);
+
+/* Free and unmap a consistant DMA buffer. CPU_ADDR and DMA_ADDR must
+ be values that were returned from pci_alloc_consistant. SIZE must
+ be the same as what as passed into pci_alloc_consistant.
+ References to the memory and mappings assosciated with CPU_ADDR or
+ DMA_ADDR past this call are illegal. */
+
+extern void pci_free_consistent(struct pci_dev *, long, void *, dma_addr_t);
+
+/* Map a single buffer of the indicate size for PCI DMA in streaming
+ mode. The 32-bit PCI bus mastering address to use is returned.
+ Once the device is given the dma address, the device owns this memory
+ until either pci_unmap_single or pci_sync_single is performed. */
+
+extern dma_addr_t pci_map_single(struct pci_dev *, void *, long);
+
+/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
+ SIZE must match what was provided for in a previous pci_map_single
+ call. All other usages are undefined. After this call, reads by
+ the cpu to the buffer are guarenteed to see whatever the device
+ wrote there. */
+extern void pci_unmap_single(struct pci_dev *, dma_addr_t, long);
+
+/* Map a set of buffers described by scatterlist in streaming mode for
+ PCI DMA. This is the scather-gather version of the above
+ pci_map_single interface. Here the scatter gather list elements
+ are each tagged with the appropriate PCI dma address and length.
+ They are obtained via sg_dma_{address,length}(SG).
+
+ NOTE: An implementation may be able to use a smaller number of DMA
+ address/length pairs than there are SG table elements. (for
+ example via virtual mapping capabilities) The routine returns the
+ number of addr/length pairs actually used, at most nents.
+
+ Device ownership issues as mentioned above for pci_map_single are
+ the same here. */
+
+extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int);
+
+/* Unmap a set of streaming mode DMA translations. Again, cpu read
+ rules concerning calls here are the same as for pci_unmap_single()
+ above. */
+
+extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int);
+
+/* Make physical memory consistant for a single streaming mode DMA
+ translation after a transfer.
+
+ If you perform a pci_map_single() but wish to interrogate the
+ buffer using the cpu, yet do not wish to teardown the PCI dma
+ mapping, you must call this function before doing so. At the next
+ point you give the PCI dma address back to the card, the device
+ again owns the buffer. */
+
+extern inline void
+pci_sync_single(struct pci_dev *dev, dma_addr_t dma_addr, long size)
+{
+ /* Nothing to do. */
+}
+
+/* Make physical memory consistant for a set of streaming mode DMA
+ translations after a transfer. The same as pci_dma_sync_single but
+ for a scatter-gather list, same rules and usage. */
+
+extern inline void
+pci_sync_sg(struct pci_dev *dev, struct scatterlist *sg, int size)
+{
+ /* Nothing to do. */
+}
+
+#endif /* __ALPHA_PCI_H */
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
index 5ea0193be..ae10466b8 100644
--- a/include/asm-alpha/pgalloc.h
+++ b/include/asm-alpha/pgalloc.h
@@ -145,6 +145,18 @@ static inline void flush_tlb_range(struct mm_struct *mm,
flush_tlb_mm(mm);
}
+/*
+ * Flush a specified range of user mapping page tables
+ * from TLB.
+ * Although Alpha uses VPTE caches, this can be a nop, as Alpha does
+ * not have finegrained tlb flushing, so it will flush VPTE stuff
+ * during next flush_tlb_range.
+ */
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
#else /* __SMP__ */
extern void flush_tlb_all(void);
diff --git a/include/asm-alpha/scatterlist.h b/include/asm-alpha/scatterlist.h
index 98718797b..b7dad47f3 100644
--- a/include/asm-alpha/scatterlist.h
+++ b/include/asm-alpha/scatterlist.h
@@ -1,13 +1,20 @@
#ifndef _ALPHA_SCATTERLIST_H
#define _ALPHA_SCATTERLIST_H
+#include <linux/types.h>
+
struct scatterlist {
- char * address; /* Location data is to be transferred to */
- char * alt_address; /* Location of actual if address is a
- * dma indirect buffer. NULL otherwise */
- unsigned int length;
+ char *address; /* Source/target vaddr. */
+ char *alt_address; /* Location of actual if address is a
+ dma indirect buffer, else NULL. */
+ dma_addr_t dma_address;
+ unsigned int length;
+ unsigned int dma_length;
};
+#define sg_dma_address(sg) ((sg)->dma_address)
+#define sg_dma_len(sg) ((sg)->dma_length)
+
#define ISA_DMA_THRESHOLD (~0UL)
#endif /* !(_ALPHA_SCATTERLIST_H) */
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index 255888e8a..6cf9873f5 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -5,7 +5,7 @@
* SMP- and interrupt-safe semaphores..
*
* (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1996 Richard Henderson
+ * (C) Copyright 1996, 2000 Richard Henderson
*/
#include <asm/current.h>
@@ -42,7 +42,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-extern inline void sema_init (struct semaphore *sem, int val)
+extern inline void sema_init(struct semaphore *sem, int val)
{
/*
* Logically,
@@ -110,8 +110,8 @@ extern inline void down(struct semaphore * sem)
__asm__ __volatile__ (
"/* semaphore down operation */\n"
"1: ldl_l $24,%1\n"
+ " subl $24,1,$28\n"
" subl $24,1,$24\n"
- " mov $24,$28\n"
" stl_c $28,%1\n"
" beq $28,2f\n"
" blt $24,3f\n"
@@ -146,8 +146,8 @@ extern inline int down_interruptible(struct semaphore * sem)
__asm__ __volatile__ (
"/* semaphore down interruptible operation */\n"
"1: ldl_l $24,%2\n"
+ " subl $24,1,$28\n"
" subl $24,1,$24\n"
- " mov $24,$28\n"
" stl_c $28,%2\n"
" beq $28,2f\n"
" blt $24,3f\n"
@@ -247,11 +247,10 @@ extern inline void up(struct semaphore * sem)
"/* semaphore up operation */\n"
" mb\n"
"1: ldl_l $24,%1\n"
+ " addl $24,1,$28\n"
" addl $24,1,$24\n"
- " mov $24,$28\n"
" stl_c $28,%1\n"
" beq $28,2f\n"
- " mb\n"
" ble $24,3f\n"
"4:\n"
".section .text2,\"ax\"\n"
@@ -266,4 +265,292 @@ extern inline void up(struct semaphore * sem)
: "$24", "$28", "memory");
}
+
+/* rw mutexes (should that be mutices? =) -- throw rw
+ * spinlocks and semaphores together, and this is what we
+ * end up with...
+ *
+ * The lock is initialized to BIAS. This way, a writer
+ * subtracts BIAS ands gets 0 for the case of an uncontended
+ * lock. Readers decrement by 1 and see a positive value
+ * when uncontended, negative if there are writers waiting
+ * (in which case it goes to sleep).
+ *
+ * The value 0x01000000 supports up to 128 processors and
+ * lots of processes. BIAS must be chosen such that subtracting
+ * BIAS once per CPU will result in the int remaining
+ * negative.
+ * In terms of fairness, this should result in the lock
+ * flopping back and forth between readers and writers
+ * under heavy use.
+ *
+ * -ben
+ *
+ * Once we start supporting machines with more than 128 CPUs,
+ * we should go for using a 64bit atomic type instead of 32bit
+ * as counter. We shall probably go for bias 0x80000000 then,
+ * so that single sethi can set it.
+ *
+ * -jj
+ */
+
+#define RW_LOCK_BIAS 0x01000000
+
+struct rw_semaphore {
+ int count;
+ /* bit 0 means read bias granted;
+ bit 1 means write bias granted. */
+ unsigned granted;
+ wait_queue_head_t wait;
+ wait_queue_head_t write_bias_wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+ atomic_t readers;
+ atomic_t writers;
+#endif
+};
+
+#if WAITQUEUE_DEBUG
+#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
+#else
+#define __RWSEM_DEBUG_INIT /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name,count) \
+ { (count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
+ __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
+
+#define __DECLARE_RWSEM_GENERIC(name,count) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
+
+#define DECLARE_RWSEM(name) \
+ __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS)
+#define DECLARE_RWSEM_READ_LOCKED(name) \
+ __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1)
+#define DECLARE_RWSEM_WRITE_LOCKED(name) \
+ __DECLARE_RWSEM_GENERIC(name, 0)
+
+extern inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RW_LOCK_BIAS;
+ sem->granted = 0;
+ init_waitqueue_head(&sem->wait);
+ init_waitqueue_head(&sem->write_bias_wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+ atomic_set(&sem->readers, 0);
+ atomic_set(&sem->writers, 0);
+#endif
+}
+
+/* All have custom assembly linkages. */
+extern void __down_read_failed(struct rw_semaphore *sem);
+extern void __down_write_failed(struct rw_semaphore *sem);
+extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers);
+
+extern inline void down_read(struct rw_semaphore *sem)
+{
+ /* Given that we have to use particular hard registers to
+ communicate with __down_read_failed anyway, reuse them in
+ the atomic operation as well.
+
+ __down_read_failed takes the semaphore address in $24, the count
+ we read in $25, and it's return address in $28. The pv is loaded
+ as usual. The gp is clobbered (in the module case) as usual. */
+
+ /* This little bit of silliness is to get the GP loaded for
+ a function that ordinarily wouldn't. Otherwise we could
+ have it done by the macro directly, which can be optimized
+ the linker. */
+ register void *pv __asm__("$27");
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ pv = __down_read_failed;
+ __asm__ __volatile__(
+ "/* semaphore down_read operation */\n"
+ "1: ldl_l $24,%1\n"
+ " subl $24,1,$28\n"
+ " subl $24,1,$25\n"
+ " stl_c $28,%1\n"
+ " beq $28,2f\n"
+ " blt $25,3f\n"
+ "4: mb\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: lda $24,%1\n"
+ " jsr $28,($27),__down_read_failed\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=r"(pv)
+ : "m"(sem->count), "r"(pv)
+ : "$24", "$25", "$28", "memory");
+
+#if WAITQUEUE_DEBUG
+ if (sem->granted & 2)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_inc(&sem->readers);
+#endif
+}
+
+extern inline void down_write(struct rw_semaphore *sem)
+{
+ /* Given that we have to use particular hard registers to
+ communicate with __down_write_failed anyway, reuse them in
+ the atomic operation as well.
+
+ __down_write_failed takes the semaphore address in $24, the count
+ we read in $25, and it's return address in $28. The pv is loaded
+ as usual. The gp is clobbered (in the module case) as usual. */
+
+ /* This little bit of silliness is to get the GP loaded for
+ a function that ordinarily wouldn't. Otherwise we could
+ have it done by the macro directly, which can be optimized
+ the linker. */
+ register void *pv __asm__("$27");
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ pv = __down_write_failed;
+ __asm__ __volatile__(
+ "/* semaphore down_write operation */\n"
+ "1: ldl_l $24,%1\n"
+ " ldah $28,%3($24)\n"
+ " ldah $25,%3($24)\n"
+ " stl_c $28,%1\n"
+ " beq $28,2f\n"
+ " bne $25,3f\n"
+ "4: mb\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: lda $24,%1\n"
+ " jsr $28,($27),__down_write_failed\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=r"(pv)
+ : "m"(sem->count), "r"(pv), "i"(-(RW_LOCK_BIAS >> 16))
+ : "$24", "$25", "$28", "memory");
+
+#if WAITQUEUE_DEBUG
+ if (atomic_read(&sem->writers))
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (sem->granted & 3)
+ BUG();
+ atomic_inc(&sem->writers);
+#endif
+}
+
+/* When a reader does a release, the only significant case is when
+ there was a writer waiting, and we've * bumped the count to 0: we must
+wake the writer up. */
+
+extern inline void up_read(struct rw_semaphore *sem)
+{
+ /* Given that we have to use particular hard registers to
+ communicate with __rwsem_wake anyway, reuse them in
+ the atomic operation as well.
+
+ __rwsem_wake takes the semaphore address in $24, the
+ number of waiting readers in $25, and it's return address
+ in $28. The pv is loaded as usual. The gp is clobbered
+ (in the module case) as usual. */
+
+ register void *pv __asm__("$27");
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+ if (sem->granted & 2)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_dec(&sem->readers);
+#endif
+
+ pv = __rwsem_wake;
+ __asm__ __volatile__(
+ "/* semaphore up_read operation */\n"
+ " mb\n"
+ "1: ldl_l $24,%1\n"
+ " addl $24,1,$28\n"
+ " addl $24,1,$24\n"
+ " stl_c $28,%1\n"
+ " beq $28,2f\n"
+ " beq $24,3f\n"
+ "4:\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: lda $24,%1\n"
+ " mov 0,$25\n"
+ " jsr $28,($27),__rwsem_wake\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=r"(pv)
+ : "m"(sem->count), "r"(pv)
+ : "$24", "$25", "$28", "memory");
+}
+
+/* releasing the writer is easy -- just release it and
+ * wake up any sleepers.
+ */
+extern inline void up_write(struct rw_semaphore *sem)
+{
+ /* Given that we have to use particular hard registers to
+ communicate with __rwsem_wake anyway, reuse them in
+ the atomic operation as well.
+
+ __rwsem_wake takes the semaphore address in $24, the
+ number of waiting readers in $25, and it's return address
+ in $28. The pv is loaded as usual. The gp is clobbered
+ (in the module case) as usual. */
+
+ register void *pv __asm__("$27");
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+ if (sem->granted & 3)
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (atomic_read(&sem->writers) != 1)
+ BUG();
+ atomic_dec(&sem->writers);
+#endif
+
+ pv = __rwsem_wake;
+ __asm__ __volatile__(
+ "/* semaphore up_write operation */\n"
+ " mb\n"
+ "1: ldl_l $24,%1\n"
+ " ldah $28,%3($24)\n"
+ " stl_c $28,%1\n"
+ " beq $28,2f\n"
+ " blt $24,3f\n"
+ "4:\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: ldah $25,%3($24)\n"
+ /* Only do the wake if we're no longer negative. */
+ " blt $25,4b\n"
+ " lda $24,%1\n"
+ " jsr $28,($27),__rwsem_wake\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=r"(pv)
+ : "m"(sem->count), "r"(pv), "i"(RW_LOCK_BIAS >> 16)
+ : "$24", "$25", "$28", "memory");
+}
+
#endif
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index e85061736..e342d9bd0 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -42,7 +42,8 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
/* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */
-extern int cpu_number_map[NR_CPUS];
+extern int __cpu_number_map[NR_CPUS];
+#define cpu_number_map(cpu) __cpu_number_map[cpu]
/* The reverse map from sequential logical cpu number to cpu id. */
extern int __cpu_logical_map[NR_CPUS];
diff --git a/include/asm-alpha/softirq.h b/include/asm-alpha/softirq.h
index d49064790..8b2713ed6 100644
--- a/include/asm-alpha/softirq.h
+++ b/include/asm-alpha/softirq.h
@@ -24,132 +24,9 @@ extern inline void cpu_bh_enable(int cpu)
local_bh_count(cpu)--;
}
-extern inline int cpu_bh_trylock(int cpu)
-{
- return local_bh_count(cpu) ? 0 : (local_bh_count(cpu) = 1);
-}
-
-extern inline void cpu_bh_endlock(int cpu)
-{
- local_bh_count(cpu) = 0;
-}
-
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define get_active_bhs() (bh_mask & bh_active)
-
-static inline void clear_active_bhs(unsigned long x)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " bic %0,%2,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".section .text2,\"ax\"\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (bh_active)
- :"Ir" (x), "m" (bh_active));
-}
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- wmb();
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-#ifdef __SMP__
-
-/*
- * The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
-extern atomic_t global_bh_lock;
-extern atomic_t global_bh_count;
-
-extern void synchronize_bh(void);
-
-static inline void start_bh_atomic(void)
-{
- atomic_inc(&global_bh_lock);
- synchronize_bh();
-}
-
-static inline void end_bh_atomic(void)
-{
- atomic_dec(&global_bh_lock);
-}
-
-/* These are for the irq's testing the lock */
-static inline int softirq_trylock(int cpu)
-{
- if (cpu_bh_trylock(cpu)) {
- if (!test_and_set_bit(0, &global_bh_count)) {
- if (atomic_read(&global_bh_lock) == 0)
- return 1;
- clear_bit(0, &global_bh_count);
- }
- cpu_bh_endlock(cpu);
- }
- return 0;
-}
-
-static inline void softirq_endlock(int cpu)
-{
- cpu_bh_enable(cpu);
- clear_bit(0, &global_bh_count);
-}
-
-#else
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_disable();
-}
-
-extern inline void end_bh_atomic(void)
-{
- local_bh_enable();
-}
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) cpu_bh_trylock(cpu)
-#define softirq_endlock(cpu) cpu_bh_endlock(cpu)
-#define synchronize_bh() barrier()
-
-#endif /* SMP */
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
+#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#endif /* _ALPHA_SOFTIRQ_H */
diff --git a/include/asm-alpha/types.h b/include/asm-alpha/types.h
index 381d5f044..78ef3dda4 100644
--- a/include/asm-alpha/types.h
+++ b/include/asm-alpha/types.h
@@ -25,23 +25,9 @@ typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
-/*
- * There are 32-bit compilers for the alpha out there..
- */
-#if ((~0UL) == 0xffffffff)
-
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-typedef __signed__ long long __s64;
-typedef unsigned long long __u64;
-#endif
-
-#else
-
typedef __signed__ long __s64;
typedef unsigned long __u64;
-#endif
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
@@ -56,22 +42,15 @@ typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
-/*
- * There are 32-bit compilers for the alpha out there..
- */
-#if ((~0UL) == 0xffffffff)
-
-typedef signed long long s64;
-typedef unsigned long long u64;
-#define BITS_PER_LONG 32
-
-#else
-
typedef signed long s64;
typedef unsigned long u64;
+
#define BITS_PER_LONG 64
-#endif
+/* PCI dma addresses are 32-bits wide. Ignore PCI64 for now, since
+ we'll typically be sending it all through iommu tables anyway. */
+
+typedef u32 dma_addr_t;
#endif /* __KERNEL__ */
#endif /* _ALPHA_TYPES_H */
diff --git a/include/asm-arm/arch-arc/time.h b/include/asm-arm/arch-arc/time.h
index 5e7f3c863..95c8ae14f 100644
--- a/include/asm-arm/arch-arc/time.h
+++ b/include/asm-arm/arch-arc/time.h
@@ -1,196 +1,30 @@
/*
* linux/include/asm-arm/arch-arc/time.h
*
- * Copyright (c) 1996 Russell King.
+ * Copyright (c) 1996-2000 Russell King.
*
* Changelog:
* 24-Sep-1996 RMK Created
* 10-Oct-1996 RMK Brought up to date with arch-sa110eval
* 04-Dec-1997 RMK Updated for new arch/arm/time.c
*/
-#include <asm/ioc.h>
-
-static long last_rtc_update = 0; /* last time the cmos clock got updated */
-
-extern __inline__ unsigned long gettimeoffset (void)
-{
- unsigned int count1, count2, status1, status2;
- unsigned long offset = 0;
-
- status1 = inb(IOC_IRQREQA);
- barrier ();
- outb (0, IOC_T0LATCH);
- barrier ();
- count1 = inb(IOC_T0CNTL) | (inb(IOC_T0CNTH) << 8);
- barrier ();
- status2 = inb(IOC_IRQREQA);
- barrier ();
- outb (0, IOC_T0LATCH);
- barrier ();
- count2 = inb(IOC_T0CNTL) | (inb(IOC_T0CNTH) << 8);
-
- if (count2 < count1) {
- /*
- * This means that we haven't just had an interrupt
- * while reading into status2.
- */
- if (status2 & (1 << 5))
- offset = tick;
- count1 = count2;
- } else if (count2 > count1) {
- /*
- * We have just had another interrupt while reading
- * status2.
- */
- offset += tick;
- count1 = count2;
- }
-
- count1 = LATCH - count1;
- /*
- * count1 = number of clock ticks since last interrupt
- */
- offset += count1 * tick / LATCH;
- return offset;
-}
-
-extern int iic_control (unsigned char, int, char *, int);
-
-static int set_rtc_time(unsigned long nowtime)
-{
- char buf[5], ctrl;
-
- if (iic_control(0xa1, 0, &ctrl, 1) != 0)
- printk("RTC: failed to read control reg\n");
-
- /*
- * Reset divider
- */
- ctrl |= 0x80;
-
- if (iic_control(0xa0, 0, &ctrl, 1) != 0)
- printk("RTC: failed to stop the clock\n");
-
- /*
- * We only set the time - we don't set the date.
- * This means that there is the possibility once
- * a day for the correction to disrupt the date.
- * We really ought to write the time and date, or
- * nothing at all.
- */
- buf[0] = 0;
- buf[1] = nowtime % 60; nowtime /= 60;
- buf[2] = nowtime % 60; nowtime /= 60;
- buf[3] = nowtime % 24;
-
- BIN_TO_BCD(buf[1]);
- BIN_TO_BCD(buf[2]);
- BIN_TO_BCD(buf[3]);
-
- if (iic_control(0xa0, 1, buf, 4) != 0)
- printk("RTC: Failed to set the time\n");
-
- /*
- * Re-enable divider
- */
- ctrl &= ~0x80;
-
- if (iic_control(0xa0, 0, &ctrl, 1) != 0)
- printk("RTC: failed to start the clock\n");
-
- return 0;
-}
-
-extern __inline__ unsigned long get_rtc_time(void)
-{
- unsigned int year, i;
- char buf[8];
-
- /*
- * The year is not part of the RTC counter
- * registers, and is stored in RAM. This
- * means that it will not be automatically
- * updated.
- */
- if (iic_control(0xa1, 0xc0, buf, 1) != 0)
- printk("RTC: failed to read the year\n");
-
- /*
- * If the year is before 1970, then the year
- * is actually 100 in advance. This gives us
- * a year 2070 bug...
- */
- year = 1900 + buf[0];
- if (year < 1970)
- year += 100;
-
- /*
- * Read the time and date in one go - this
- * will ensure that we don't get any effects
- * due to carry (the RTC latches the counters
- * during a read).
- */
- if (iic_control(0xa1, 2, buf, 5) != 0) {
- printk("RTC: failed to read the time and date\n");
- memset(buf, 0, sizeof(buf));
- }
-
- /*
- * The RTC combines years with date and weekday
- * with month. We need to mask off this extra
- * information before converting the date to
- * binary.
- */
- buf[4] &= 0x1f;
- buf[3] &= 0x3f;
-
- for (i = 0; i < 5; i++)
- BCD_TO_BIN(buf[i]);
-
- return mktime(year, buf[4], buf[3], buf[2], buf[1], buf[0]);
-}
+extern void ioctime_init(void);
static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
-
- /* If we have an externally synchronized linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- xtime.tv_usec >= 50000 - (tick >> 1) &&
- xtime.tv_usec < 50000 + (tick >> 1)) {
- if (set_rtc_time(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
-
- if (!user_mode(regs))
- do_profile(instruction_pointer(regs));
+ do_set_rtc();
+ do_profile(regs);
}
-static struct irqaction timerirq = {
- timer_interrupt,
- 0,
- 0,
- "timer",
- NULL,
- NULL
-};
-
/*
- * Set up timer interrupt, and return the current time in seconds.
+ * Set up timer interrupt.
*/
extern __inline__ void setup_timer(void)
{
- outb(LATCH & 255, IOC_T0LTCHL);
- outb(LATCH >> 8, IOC_T0LTCHH);
- outb(0, IOC_T0GO);
+ ioctime_init();
- xtime.tv_sec = get_rtc_time();
+ timer_irq.handler = timer_interrupt;
- setup_arm_irq(IRQ_TIMER, &timerirq);
+ setup_arm_irq(IRQ_TIMER, &timer_irq);
}
diff --git a/include/asm-arm/arch-cl7500/time.h b/include/asm-arm/arch-cl7500/time.h
index 19b388682..59cb2d6fd 100644
--- a/include/asm-arm/arch-cl7500/time.h
+++ b/include/asm-arm/arch-cl7500/time.h
@@ -1,88 +1,19 @@
/*
* linux/include/asm-arm/arch-cl7500/time.h
*
- * Copyright (c) 1996 Russell King.
- * Copyright (C) 1999 Nexus Electronics Ltd.
+ * Copyright (c) 1996-2000 Russell King.
*
* Changelog:
* 24-Sep-1996 RMK Created
* 10-Oct-1996 RMK Brought up to date with arch-sa110eval
* 04-Dec-1997 RMK Updated for new arch/arm/time.c
- * 10-Aug-1999 PJB Converted for CL7500
*/
-#include <asm/iomd.h>
-
-static long last_rtc_update = 0; /* last time the cmos clock got updated */
-
-extern __inline__ unsigned long gettimeoffset (void)
-{
- unsigned long offset = 0;
- unsigned int count1, count2, status1, status2;
-
- status1 = IOMD_IRQREQA;
- barrier ();
- outb(0, IOMD_T0LATCH);
- barrier ();
- count1 = inb(IOMD_T0CNTL) | (inb(IOMD_T0CNTH) << 8);
- barrier ();
- status2 = inb(IOMD_IRQREQA);
- barrier ();
- outb(0, IOMD_T0LATCH);
- barrier ();
- count2 = inb(IOMD_T0CNTL) | (inb(IOMD_T0CNTH) << 8);
-
- if (count2 < count1) {
- /*
- * This means that we haven't just had an interrupt
- * while reading into status2.
- */
- if (status2 & (1 << 5))
- offset = tick;
- count1 = count2;
- } else if (count2 > count1) {
- /*
- * We have just had another interrupt while reading
- * status2.
- */
- offset += tick;
- count1 = count2;
- }
-
- count1 = LATCH - count1;
- /*
- * count1 = number of clock ticks since last interrupt
- */
- offset += count1 * tick / LATCH;
- return offset;
-}
-
-extern __inline__ unsigned long get_rtc_time(void)
-{
- return mktime(1976, 06, 24, 0, 0, 0);
-}
-
-static int set_rtc_time(unsigned long nowtime)
-{
- return 0;
-}
+extern void ioctime_init(void);
static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
-
- /* If we have an externally synchronized linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- xtime.tv_usec >= 50000 - (tick >> 1) &&
- xtime.tv_usec < 50000 + (tick >> 1)) {
- if (set_rtc_time(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
+ do_set_rtc();
{
/* Twinkle the lights. */
@@ -95,29 +26,17 @@ static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
}
- if (!user_mode(regs))
- do_profile(instruction_pointer(regs));
+ do_profile(regs);
}
-static struct irqaction timerirq = {
- timer_interrupt,
- 0,
- 0,
- "timer",
- NULL,
- NULL
-};
-
/*
- * Set up timer interrupt, and return the current time in seconds.
+ * Set up timer interrupt.
*/
extern __inline__ void setup_timer(void)
{
- outb(LATCH & 255, IOMD_T0LTCHL);
- outb(LATCH >> 8, IOMD_T0LTCHH);
- outb(0, IOMD_T0GO);
+ ioctime_init();
- xtime.tv_sec = get_rtc_time();
+ timer_irq.handler = timer_interrupt;
- setup_arm_irq(IRQ_TIMER, &timerirq);
+ setup_arm_irq(IRQ_TIMER, &timer_irq);
}
diff --git a/include/asm-arm/arch-ebsa110/time.h b/include/asm-arm/arch-ebsa110/time.h
index dfb62984c..dbddeb1b9 100644
--- a/include/asm-arm/arch-ebsa110/time.h
+++ b/include/asm-arm/arch-ebsa110/time.h
@@ -12,11 +12,8 @@
* 28-Dec-1998 APH Made leds code optional
*/
-#include <linux/config.h>
#include <asm/leds.h>
-#define IRQ_TIMER IRQ_EBSA110_TIMER0
-
#define MCLK_47_8
#if defined(MCLK_42_3)
@@ -32,52 +29,27 @@
#define PIT1_COUNT 0x85A1
#define DIVISOR 2
#endif
-
-extern __inline__ unsigned long gettimeoffset (void)
-{
- return 0;
-}
static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
*PIT_T1 = (PIT1_COUNT) & 0xff;
*PIT_T1 = (PIT1_COUNT) >> 8;
-#ifdef CONFIG_LEDS
- {
- static int count = 50;
- if (--count == 0) {
- count = 50;
- leds_event(led_timer);
- }
- }
-#endif
-
- {
#ifdef DIVISOR
+ {
static unsigned int divisor;
- if (divisor-- == 0) {
- divisor = DIVISOR - 1;
-#else
- {
-#endif
- do_timer(regs);
- }
+ if (divisor--)
+ return;
+ divisor = DIVISOR - 1;
}
+#endif
+ do_leds();
+ do_timer(regs);
}
-static struct irqaction timerirq = {
- timer_interrupt,
- 0,
- 0,
- "timer",
- NULL,
- NULL
-};
-
/*
- * Set up timer interrupt, and return the current time in seconds.
+ * Set up timer interrupt.
*/
extern __inline__ void setup_timer(void)
{
@@ -93,12 +65,9 @@ extern __inline__ void setup_timer(void)
*PIT_T1 = (PIT1_COUNT) & 0xff;
*PIT_T1 = (PIT1_COUNT) >> 8;
- /*
- * Default the date to 1 Jan 1970 0:0:0
- * You will have to run a time daemon to set the
- * clock correctly at bootup
- */
- xtime.tv_sec = mktime(1970, 1, 1, 0, 0, 0);
+ timer_irq.handler = timer_interrupt;
- setup_arm_irq(IRQ_TIMER, &timerirq);
+ setup_arm_irq(IRQ_EBSA110_TIMER0, &timer_irq);
}
+
+
diff --git a/include/asm-arm/arch-ebsa285/time.h b/include/asm-arm/arch-ebsa285/time.h
index 3c5bcca3b..b10bdd324 100644
--- a/include/asm-arm/arch-ebsa285/time.h
+++ b/include/asm-arm/arch-ebsa285/time.h
@@ -17,7 +17,6 @@
#define RTC_PORT(x) (rtc_base+(x))
#define RTC_ALWAYS_BCD 0
-#include <linux/config.h>
#include <linux/mc146818rtc.h>
#include <asm/dec21285.h>
@@ -25,32 +24,6 @@
#include <asm/system.h>
static int rtc_base;
-static unsigned long (*gettimeoffset)(void);
-static int (*set_rtc_mmss)(unsigned long nowtime);
-static long last_rtc_update = 0; /* last time the cmos clock got updated */
-
-#ifdef CONFIG_LEDS
-static void do_leds(void)
-{
- static unsigned int count = 50;
- static int last_pid;
-
- if (current->pid != last_pid) {
- last_pid = current->pid;
- if (last_pid)
- leds_event(led_idle_end);
- else
- leds_event(led_idle_start);
- }
-
- if (--count == 0) {
- count = 50;
- leds_event(led_timer);
- }
-}
-#else
-#define do_leds()
-#endif
#define mSEC_10_from_14 ((14318180 + 100) / 200)
@@ -101,34 +74,10 @@ static void isa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do_leds();
do_timer(regs);
-
- /* If we have an externally synchronized linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- xtime.tv_usec > 50000 - (tick >> 1) &&
- xtime.tv_usec < 50000 + (tick >> 1)) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
-
- if (!user_mode(regs))
- do_profile(instruction_pointer(regs));
+ do_set_rtc();
+ do_profile(regs);
}
-static struct irqaction isa_timer_irq = {
- isa_timer_interrupt,
- 0,
- 0,
- "timer",
- NULL,
- NULL
-};
-
static unsigned long __init get_isa_cmos_time(void)
{
unsigned int year, mon, day, hour, min, sec;
@@ -175,11 +124,12 @@ static unsigned long __init get_isa_cmos_time(void)
}
static int
-set_isa_cmos_time(unsigned long nowtime)
+set_isa_cmos_time(void)
{
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
unsigned char save_control, save_freq_select;
+ unsigned long nowtime = xtime.tv_sec;
save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
@@ -228,60 +178,31 @@ set_isa_cmos_time(unsigned long nowtime)
-static unsigned long __ebsa285_text timer1_gettimeoffset (void)
+static unsigned long timer1_gettimeoffset (void)
{
unsigned long value = LATCH - *CSR_TIMER1_VALUE;
return (tick * value) / LATCH;
}
-static void __ebsa285_text timer1_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static void timer1_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
*CSR_TIMER1_CLR = 0;
/* Do the LEDs things */
do_leds();
-
do_timer(regs);
-
- /* If we have an externally synchronized linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- xtime.tv_usec > 50000 - (tick >> 1) &&
- xtime.tv_usec < 50000 + (tick >> 1)) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
-
- if (!user_mode(regs))
- do_profile(instruction_pointer(regs));
-}
-
-static struct irqaction __ebsa285_data timer1_irq = {
- timer1_interrupt,
- 0,
- 0,
- "timer",
- NULL,
- NULL
-};
-
-static int
-set_dummy_time(unsigned long secs)
-{
- return 1;
+ do_set_rtc();
+ do_profile(regs);
}
/*
- * Set up timer interrupt, and return the current time in seconds.
+ * Set up timer interrupt.
*/
extern __inline__ void setup_timer(void)
{
+ int irq;
+
if (machine_is_co285())
/*
* Add-in 21285s shouldn't access the RTC
@@ -321,18 +242,11 @@ extern __inline__ void setup_timer(void)
printk(KERN_WARNING "RTC: *** warning: CMOS battery bad\n");
xtime.tv_sec = get_isa_cmos_time();
- set_rtc_mmss = set_isa_cmos_time;
+ set_rtc = set_isa_cmos_time;
} else
rtc_base = 0;
}
- if (!rtc_base) {
- /*
- * Default the date to 1 Jan 1970 0:0:0
- */
- xtime.tv_sec = mktime(1970, 1, 1, 0, 0, 0);
- set_rtc_mmss = set_dummy_time;
- }
if (machine_is_ebsa285() || machine_is_co285()) {
gettimeoffset = timer1_gettimeoffset;
@@ -340,7 +254,8 @@ extern __inline__ void setup_timer(void)
*CSR_TIMER1_LOAD = LATCH;
*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV16;
- setup_arm_irq(IRQ_TIMER1, &timer1_irq);
+ timer_irq.handler = timer1_interrupt;
+ irq = IRQ_TIMER1;
} else {
/* enable PIT timer */
/* set for periodic (4) and LSB/MSB write (0x30) */
@@ -349,7 +264,8 @@ extern __inline__ void setup_timer(void)
outb((mSEC_10_from_14/6) >> 8, 0x40);
gettimeoffset = isa_gettimeoffset;
-
- setup_arm_irq(IRQ_ISA_TIMER, &isa_timer_irq);
+ timer_irq.handler = isa_timer_interrupt;
+ irq = IRQ_ISA_TIMER;
}
+ setup_arm_irq(IRQ_ISA_TIMER, &timer_irq);
}
diff --git a/include/asm-arm/arch-nexuspci/time.h b/include/asm-arm/arch-nexuspci/time.h
index 17f0ae472..fbf53e887 100644
--- a/include/asm-arm/arch-nexuspci/time.h
+++ b/include/asm-arm/arch-nexuspci/time.h
@@ -10,13 +10,6 @@
#define UART_BASE 0xfff00000
#define INTCONT 0xffe00000
-#define update_rtc()
-
-extern __inline__ unsigned long gettimeoffset (void)
-{
- return 0;
-}
-
static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
static int count = 50;
@@ -40,15 +33,6 @@ static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do_timer(regs);
}
-static struct irqaction timerirq = {
- timer_interrupt,
- 0,
- 0,
- "timer",
- NULL,
- NULL
-};
-
extern __inline__ void setup_timer(void)
{
int tick = 3686400 / 16 / 2 / 100;
@@ -58,12 +42,7 @@ extern __inline__ void setup_timer(void)
writeb(0x80, UART_BASE + 8);
writeb(0x10, UART_BASE + 0x14);
- /*
- * Default the date to 1 Jan 1970 0:0:0
- * You will have to run a time daemon to set the
- * clock correctly at bootup
- */
- xtime.tv_sec = mktime(1970, 1, 1, 0, 0, 0);
+ timer_irq.handler = timer_interrupt;
- setup_arm_irq(IRQ_TIMER, &timerirq);
+ setup_arm_irq(IRQ_TIMER, &timer_irq);
}
diff --git a/include/asm-arm/arch-rpc/time.h b/include/asm-arm/arch-rpc/time.h
index 0ac40356a..94974241f 100644
--- a/include/asm-arm/arch-rpc/time.h
+++ b/include/asm-arm/arch-rpc/time.h
@@ -1,204 +1,30 @@
/*
* linux/include/asm-arm/arch-rpc/time.h
*
- * Copyright (c) 1996 Russell King.
+ * Copyright (c) 1996-2000 Russell King.
*
* Changelog:
* 24-Sep-1996 RMK Created
* 10-Oct-1996 RMK Brought up to date with arch-sa110eval
* 04-Dec-1997 RMK Updated for new arch/arm/time.c
*/
-#include <asm/iomd.h>
-
-static long last_rtc_update = 0; /* last time the cmos clock got updated */
-
-extern __inline__ unsigned long gettimeoffset (void)
-{
- unsigned long offset = 0;
- unsigned int count1, count2, status1, status2;
-
- status1 = IOMD_IRQREQA;
- barrier ();
- outb(0, IOMD_T0LATCH);
- barrier ();
- count1 = inb(IOMD_T0CNTL) | (inb(IOMD_T0CNTH) << 8);
- barrier ();
- status2 = inb(IOMD_IRQREQA);
- barrier ();
- outb(0, IOMD_T0LATCH);
- barrier ();
- count2 = inb(IOMD_T0CNTL) | (inb(IOMD_T0CNTH) << 8);
-
- if (count2 < count1) {
- /*
- * This means that we haven't just had an interrupt
- * while reading into status2.
- */
- if (status2 & (1 << 5))
- offset = tick;
- count1 = count2;
- } else if (count2 > count1) {
- /*
- * We have just had another interrupt while reading
- * status2.
- */
- offset += tick;
- count1 = count2;
- }
-
- count1 = LATCH - count1;
- /*
- * count1 = number of clock ticks since last interrupt
- */
- offset += count1 * tick / LATCH;
- return offset;
-}
-
-extern int iic_control(unsigned char, int, char *, int);
-
-static int set_rtc_time(unsigned long nowtime)
-{
- char buf[5], ctrl;
-
- if (iic_control(0xa1, 0, &ctrl, 1) != 0)
- printk("RTC: failed to read control reg\n");
-
- /*
- * Reset divider
- */
- ctrl |= 0x80;
-
- if (iic_control(0xa0, 0, &ctrl, 1) != 0)
- printk("RTC: failed to stop the clock\n");
-
- /*
- * We only set the time - we don't set the date.
- * This means that there is the possibility once
- * a day for the correction to disrupt the date.
- * We really ought to write the time and date, or
- * nothing at all.
- */
- buf[0] = 0;
- buf[1] = nowtime % 60; nowtime /= 60;
- buf[2] = nowtime % 60; nowtime /= 60;
- buf[3] = nowtime % 24;
-
- BIN_TO_BCD(buf[1]);
- BIN_TO_BCD(buf[2]);
- BIN_TO_BCD(buf[3]);
-
- if (iic_control(0xa0, 1, buf, 4) != 0)
- printk("RTC: Failed to set the time\n");
-
- /*
- * Re-enable divider
- */
- ctrl &= ~0x80;
-
- if (iic_control(0xa0, 0, &ctrl, 1) != 0)
- printk("RTC: failed to start the clock\n");
-
- return 0;
-}
-
-extern __inline__ unsigned long get_rtc_time(void)
-{
- unsigned int year, i;
- char buf[8];
-
- /*
- * The year is not part of the RTC counter
- * registers, and is stored in RAM. This
- * means that it will not be automatically
- * updated.
- */
- if (iic_control(0xa1, 0xc0, buf, 1) != 0)
- printk("RTC: failed to read the year\n");
-
- /*
- * If the year is before 1970, then the year
- * is actually 100 in advance. This gives us
- * a year 2070 bug...
- */
- year = 1900 + buf[0];
- if (year < 1970)
- year += 100;
-
- /*
- * Read the time and date in one go - this
- * will ensure that we don't get any effects
- * due to carry (the RTC latches the counters
- * during a read).
- */
- if (iic_control(0xa1, 2, buf, 5) != 0) {
- printk("RTC: failed to read the time and date\n");
- memset(buf, 0, sizeof(buf));
- }
-
- /*FIXME:
- * This doesn't seem to work. Does RISC OS
- * actually use the RTC year? It doesn't
- * seem to. In that case, how does it update
- * the CMOS year?
- */
- /*year += (buf[3] >> 6) & 3;*/
-
- /*
- * The RTC combines years with date and weekday
- * with month. We need to mask off this extra
- * information before converting the date to
- * binary.
- */
- buf[4] &= 0x1f;
- buf[3] &= 0x3f;
-
- for (i = 0; i < 5; i++)
- BCD_TO_BIN(buf[i]);
-
- return mktime(year, buf[4], buf[3], buf[2], buf[1], buf[0]);
-}
+extern void ioctime_init(void);
static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
-
- /* If we have an externally synchronized linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- xtime.tv_usec >= 50000 - (tick >> 1) &&
- xtime.tv_usec < 50000 + (tick >> 1)) {
- if (set_rtc_time(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
-
- if (!user_mode(regs))
- do_profile(instruction_pointer(regs));
+ do_set_rtc();
+ do_profile(regs);
}
-static struct irqaction timerirq = {
- timer_interrupt,
- 0,
- 0,
- "timer",
- NULL,
- NULL
-};
-
/*
- * Set up timer interrupt, and return the current time in seconds.
+ * Set up timer interrupt.
*/
extern __inline__ void setup_timer(void)
{
- outb(LATCH & 255, IOMD_T0LTCHL);
- outb(LATCH >> 8, IOMD_T0LTCHH);
- outb(0, IOMD_T0GO);
+ ioctime_init();
- xtime.tv_sec = get_rtc_time();
+ timer_irq.handler = timer_interrupt;
- setup_arm_irq(IRQ_TIMER, &timerirq);
+ setup_arm_irq(IRQ_TIMER, &timer_irq);
}
diff --git a/include/asm-arm/arch-sa1100/system.h b/include/asm-arm/arch-sa1100/system.h
index 49b4bdca2..3ec5c9074 100644
--- a/include/asm-arm/arch-sa1100/system.h
+++ b/include/asm-arm/arch-sa1100/system.h
@@ -41,6 +41,7 @@
" b 1f @ Seems we must align the next \n" \
" .align 5 @ instruction on a cache line \n" \
"1: mcr p15, 0, %0, c15, c8, 2 @ Wait for interrupts \n" \
+" mov r0, r0 @ insert NOP to ensure SA1100 re-awakes\n" \
" mcr p15, 0, %0, c15, c1, 2 @ Reenable clock switching \n" \
: : "r" (&ICIP) : "cc" ); \
} while (0)
diff --git a/include/asm-arm/checksum.h b/include/asm-arm/checksum.h
index 5f1e0f695..9d048a3ba 100644
--- a/include/asm-arm/checksum.h
+++ b/include/asm-arm/checksum.h
@@ -55,9 +55,9 @@ ip_fast_csum(unsigned char * iph, unsigned int ihl)
unsigned int sum, tmp1;
__asm__ __volatile__(
- "sub %2, %2, #5 @ ip_fast_csum
- ldr %0, [%1], #4
+ "ldr %0, [%1], #4 @ ip_fast_csum
ldr %3, [%1], #4
+ sub %2, %2, #5
adds %0, %0, %3
ldr %3, [%1], #4
adcs %0, %0, %3
diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h
index 4b8125cba..6ebf76a6c 100644
--- a/include/asm-arm/mmu_context.h
+++ b/include/asm-arm/mmu_context.h
@@ -17,6 +17,9 @@
#define destroy_context(mm) do { } while(0)
#define init_new_context(tsk,mm) do { } while(0)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
/*
* This is the actual mm switch as far as the scheduler
* is concerned. No registers are touched.
diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h
index 841855ef8..12b2f3390 100644
--- a/include/asm-arm/pgalloc.h
+++ b/include/asm-arm/pgalloc.h
@@ -14,6 +14,12 @@
*/
#include <asm/proc/cache.h>
+extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+}
+
/*
* Page table cache stuff
*/
diff --git a/include/asm-arm/proc-armv/system.h b/include/asm-arm/proc-armv/system.h
index a7551ec93..d0e334813 100644
--- a/include/asm-arm/proc-armv/system.h
+++ b/include/asm-arm/proc-armv/system.h
@@ -16,7 +16,6 @@ extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int
switch (size) {
case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" : "=r" (x) : "r" (x), "r" (ptr) : "memory");
break;
- case 2: abort ();
case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" : "=r" (x) : "r" (x), "r" (ptr) : "memory");
break;
default: arm_invalidptr(xchg_str, size);
@@ -25,11 +24,9 @@ extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int
}
#define set_cr(x) \
- do { \
__asm__ __volatile__( \
"mcr p15, 0, %0, c1, c0 @ set CR" \
- : : "r" (x)); \
- } while (0)
+ : : "r" (x))
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */
@@ -42,76 +39,66 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
* Save the current interrupt enable state & disable IRQs
*/
#define __save_flags_cli(x) \
- do { \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %1, cpsr @ save_flags_cli\n" \
-" and %0, %1, #192\n" \
-" orr %1, %1, #128\n" \
-" msr cpsr, %1" \
- : "=r" (x), "=r" (temp) \
- : \
- : "memory"); \
- } while (0)
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ save_flags_cli\n" \
+" orr %1, %0, #128\n" \
+" msr cpsr_c, %1" \
+ : "=r" (x), "=r" (temp) \
+ : \
+ : "memory"); \
+ })
/*
* Enable IRQs
*/
#define __sti() \
- do { \
- unsigned long temp; \
- __asm__ __volatile__( \
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
"mrs %0, cpsr @ sti\n" \
" bic %0, %0, #128\n" \
-" msr cpsr, %0" \
- : "=r" (temp) \
- : \
- : "memory"); \
- } while(0)
+" msr cpsr_c, %0" \
+ : "=r" (temp) \
+ : \
+ : "memory"); \
+ })
/*
* Disable IRQs
*/
#define __cli() \
- do { \
- unsigned long temp; \
- __asm__ __volatile__( \
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
"mrs %0, cpsr @ cli\n" \
" orr %0, %0, #128\n" \
-" msr cpsr, %0" \
- : "=r" (temp) \
- : \
- : "memory"); \
- } while(0)
+" msr cpsr_c, %0" \
+ : "=r" (temp) \
+ : \
+ : "memory"); \
+ })
/*
* save current IRQ & FIQ state
*/
#define __save_flags(x) \
- do { \
- __asm__ __volatile__( \
+ __asm__ __volatile__( \
"mrs %0, cpsr @ save_flags\n" \
-" and %0, %0, #192" \
: "=r" (x) \
: \
- : "memory"); \
- } while (0)
+ : "memory")
/*
* restore saved IRQ & FIQ state
*/
#define __restore_flags(x) \
- do { \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ restore_flags\n" \
-" bic %0, %0, #192\n" \
-" orr %0, %0, %1\n" \
-" msr cpsr, %0" \
- : "=&r" (temp) \
- : "r" (x) \
- : "memory"); \
- } while (0)
+ __asm__ __volatile__( \
+ "msr cpsr_c, %0 @ restore_flags\n" \
+ : \
+ : "r" (x) \
+ : "memory")
/* For spinlocks etc */
#define local_irq_save(x) __save_flags_cli(x)
diff --git a/include/asm-arm/types.h b/include/asm-arm/types.h
index a1e76285a..39d5290f5 100644
--- a/include/asm-arm/types.h
+++ b/include/asm-arm/types.h
@@ -41,6 +41,10 @@ typedef unsigned long long u64;
#define BITS_PER_LONG 32
+/* Dma addresses are 32-bits wide. */
+
+typedef u32 dma_addr_t;
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h
index fc8ff9016..610db5633 100644
--- a/include/asm-i386/hardirq.h
+++ b/include/asm-i386/hardirq.h
@@ -12,6 +12,8 @@ extern unsigned int local_irq_count[NR_CPUS];
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
(local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+#define in_irq() (local_irq_count[smp_processor_id()] != 0)
+
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 7a4e9facc..4ec380c2d 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -195,15 +195,15 @@ extern void iounmap(void *addr);
*/
#define __ISA_IO_base ((char *)(PAGE_OFFSET))
-#define isa_readb(a) readb(__ISA_IO_base + (a))
-#define isa_readw(a) readw(__ISA_IO_base + (a))
-#define isa_readl(a) readl(__ISA_IO_base + (a))
-#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
-#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
-#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
+#define isa_readb(a) readb(__ISA_IO_base + (unsigned long)(a))
+#define isa_readw(a) readw(__ISA_IO_base + (unsigned long)(a))
+#define isa_readl(a) readl(__ISA_IO_base + (unsigned long)(a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (unsigned long)(a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (unsigned long)(a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (unsigned long)(a))
+#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (unsigned long)(a),(b),(c))
+#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (unsigned long)(b),(c))
+#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (unsigned long)(a),(b),(c))
/*
@@ -211,7 +211,7 @@ extern void iounmap(void *addr);
*/
#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(b),(c),(d))
-#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (unsigned long)(b)),(c),(d))
static inline int check_signature(unsigned long io_addr,
const unsigned char *signature, int length)
diff --git a/include/asm-i386/mc146818rtc.h b/include/asm-i386/mc146818rtc.h
index 31eddb2da..07f4149a4 100644
--- a/include/asm-i386/mc146818rtc.h
+++ b/include/asm-i386/mc146818rtc.h
@@ -1,8 +1,8 @@
/*
* Machine dependent access functions for RTC registers.
*/
-#ifndef __ASM_I386_MC146818RTC_H
-#define __ASM_I386_MC146818RTC_H
+#ifndef _I386_MC146818RTC_H
+#define _I386_MC146818RTC_H
#include <asm/io.h>
@@ -24,4 +24,4 @@ outb_p((addr),RTC_PORT(0)); \
outb_p((val),RTC_PORT(1)); \
})
-#endif /* __ASM_I386_MC146818RTC_H */
+#endif /* _I386_MC146818RTC_H */
diff --git a/include/asm-i386/md.h b/include/asm-i386/md.h
deleted file mode 100644
index 0a2c5dd01..000000000
--- a/include/asm-i386/md.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* $Id: md.h,v 1.1 1997/12/15 15:11:57 jj Exp $
- * md.h: High speed xor_block operation for RAID4/5
- *
- */
-
-#ifndef __ASM_MD_H
-#define __ASM_MD_H
-
-/* #define HAVE_ARCH_XORBLOCK */
-
-#define MD_XORBLOCK_ALIGNMENT sizeof(long)
-
-#endif /* __ASM_MD_H */
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index fb3af63ae..1d9248632 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -1,6 +1,7 @@
#ifndef __I386_MMU_CONTEXT_H
#define __I386_MMU_CONTEXT_H
+#include <linux/config.h>
#include <asm/desc.h>
#include <asm/atomic.h>
#include <asm/pgalloc.h>
@@ -12,30 +13,46 @@
#define init_new_context(tsk,mm) do { } while (0)
#ifdef __SMP__
-extern unsigned int cpu_tlbbad[NR_CPUS];
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+ if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
+ cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
+}
+#else
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
#endif
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
{
+ set_bit(cpu, &next->cpu_vm_mask);
if (prev != next) {
/*
* Re-load LDT if necessary
*/
if (prev->segments != next->segments)
load_LDT(next);
-
+#ifdef CONFIG_SMP
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ cpu_tlbstate[cpu].active_mm = next;
+#endif
/* Re-load page tables */
asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd)));
clear_bit(cpu, &prev->cpu_vm_mask);
}
#ifdef __SMP__
else {
- if(cpu_tlbbad[cpu])
+ int old_state = cpu_tlbstate[cpu].state;
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ if(cpu_tlbstate[cpu].active_mm != next)
+ BUG();
+ if(old_state == TLBSTATE_OLD)
local_flush_tlb();
}
- cpu_tlbbad[cpu] = 0;
+
#endif
- set_bit(cpu, &next->cpu_vm_mask);
}
#define activate_mm(prev, next) \
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index d6f199989..67855d163 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -37,10 +37,12 @@
typedef struct { unsigned long long pte; } pte_t;
typedef struct { unsigned long long pmd; } pmd_t;
typedef struct { unsigned long long pgd; } pgd_t;
+#define PTE_MASK (~(unsigned long long) (PAGE_SIZE-1))
#else
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
+#define PTE_MASK PAGE_MASK
#endif
typedef struct { unsigned long pgprot; } pgprot_t;
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index fd01afad1..5cb20763d 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -187,6 +187,7 @@ extern inline void set_pgdir(unsigned long address, pgd_t entry)
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
@@ -220,11 +221,6 @@ static inline void flush_tlb_range(struct mm_struct *mm,
#else
-/*
- * We aren't very clever about this yet - SMP could certainly
- * avoid some global flushes..
- */
-
#include <asm/smp.h>
#define local_flush_tlb() \
@@ -242,23 +238,24 @@ static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, u
flush_tlb_mm(mm);
}
-extern volatile unsigned long smp_invalidate_needed;
-extern unsigned int cpu_tlbbad[NR_CPUS];
+#define TLBSTATE_OK 1
+#define TLBSTATE_LAZY 2
+#define TLBSTATE_OLD 3
-static inline void do_flush_tlb_local(void)
+struct tlb_state
{
- unsigned long cpu = smp_processor_id();
- struct mm_struct *mm = current->mm;
-
- clear_bit(cpu, &smp_invalidate_needed);
- if (mm) {
- set_bit(cpu, &mm->cpu_vm_mask);
- local_flush_tlb();
- } else {
- cpu_tlbbad[cpu] = 1;
- }
-}
+ struct mm_struct *active_mm;
+ int state;
+};
+extern struct tlb_state cpu_tlbstate[NR_CPUS];
+
#endif
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* i386 does not keep any page table caches in TLB */
+}
+
#endif /* _I386_PGALLOC_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index e4f8afcf3..ef491587f 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -29,12 +29,13 @@ extern pgd_t swapper_pg_dir[1024];
#define __flush_tlb() \
do { \
- __asm__ __volatile__ \
- ("movl %0, %%cr3;" \
- : \
- : "r" __pa(current->active_mm->pgd) \
- : "memory" \
- ); \
+ unsigned int tmpreg; \
+ \
+ __asm__ __volatile__( \
+ "movl %%cr3, %0; # flush TLB \n" \
+ "movl %0, %%cr3; \n" \
+ : "=r" (tmpreg) \
+ :: "memory"); \
} while (0)
/*
@@ -43,14 +44,16 @@ extern pgd_t swapper_pg_dir[1024];
*/
#define __flush_tlb_global() \
do { \
+ unsigned int tmpreg; \
+ \
__asm__ __volatile__( \
- "movl %0, %%cr4; # turn off PGE \n" \
- "mov %2, %%cr3; # flush TLB \n" \
- "mov %1, %%cr4; # turn PGE back on \n" \
- : \
- : "r" (mmu_cr4_features), \
- "r" (mmu_cr4_features & ~X86_CR4_PGE), \
- "r" (__pa(current->active_mm->pgd)) \
+ "movl %1, %%cr4; # turn off PGE \n" \
+ "movl %%cr3, %0; # flush TLB \n" \
+ "movl %0, %%cr3; \n" \
+ "movl %2, %%cr4; # turn PGE back on \n" \
+ : "=r" (tmpreg) \
+ : "r" (mmu_cr4_features & ~X86_CR4_PGE), \
+ "r" (mmu_cr4_features) \
: "memory"); \
} while (0)
@@ -151,7 +154,7 @@ extern unsigned long empty_zero_page[1024];
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h
index 6eb68524a..9964ba5bc 100644
--- a/include/asm-i386/softirq.h
+++ b/include/asm-i386/softirq.h
@@ -9,133 +9,9 @@ extern unsigned int local_bh_count[NR_CPUS];
#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
-#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
-#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
-
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-#define get_active_bhs() (bh_mask & bh_active)
-#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
-
-extern spinlock_t i386_bh_lock;
-
-#ifdef __SMP__
-
-/*
- * The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
-extern atomic_t global_bh_lock;
-extern atomic_t global_bh_count;
-
-extern void synchronize_bh(void);
-
-static inline void start_bh_atomic(void)
-{
- atomic_inc(&global_bh_lock);
- synchronize_bh();
-}
-
-static inline void end_bh_atomic(void)
-{
- atomic_dec(&global_bh_lock);
-}
-
-/* These are for the IRQs testing the lock */
-static inline int softirq_trylock(int cpu)
-{
- if (cpu_bh_trylock(cpu)) {
- if (!test_and_set_bit(0,&global_bh_count)) {
- if (atomic_read(&global_bh_lock) == 0)
- return 1;
- clear_bit(0,&global_bh_count);
- }
- cpu_bh_endlock(cpu);
- }
- return 0;
-}
-
-static inline void softirq_endlock(int cpu)
-{
- cpu_bh_enable(cpu);
- clear_bit(0,&global_bh_count);
-}
-
-#else
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_disable();
- barrier();
-}
-
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_enable();
-}
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (cpu_bh_trylock(cpu))
-#define softirq_endlock(cpu) (cpu_bh_endlock(cpu))
-#define synchronize_bh() barrier()
-
-#endif /* SMP */
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- unsigned long flags;
-
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
-
- spin_lock_irqsave(&i386_bh_lock, flags);
- bh_mask |= 1 << nr;
- spin_unlock_irqrestore(&i386_bh_lock, flags);
-}
-
-extern inline void remove_bh(int nr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&i386_bh_lock, flags);
- bh_mask &= ~(1 << nr);
- spin_unlock_irqrestore(&i386_bh_lock, flags);
-
- synchronize_bh();
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&i386_bh_lock, flags);
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- spin_unlock_irqrestore(&i386_bh_lock, flags);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&i386_bh_lock, flags);
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
- spin_unlock_irqrestore(&i386_bh_lock, flags);
-}
+#define in_softirq() (local_bh_count[smp_processor_id()] != 0)
#endif /* __ASM_SOFTIRQ_H */
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 44a2e59e5..4ce58066c 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -44,6 +44,7 @@ typedef struct {
*/
#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock)
+#define spin_is_locked(x) ((x)->lock != 0)
#define spin_lock_string \
"\n1:\t" \
diff --git a/include/asm-ia64/a.out.h b/include/asm-ia64/a.out.h
new file mode 100644
index 000000000..2de8a2d0d
--- /dev/null
+++ b/include/asm-ia64/a.out.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_A_OUT_H
+#define _ASM_IA64_A_OUT_H
+
+/*
+ * No a.out format has been (or should be) defined so this file is
+ * just a dummy that allows us to get binfmt_elf compiled. It
+ * probably would be better to clean up binfmt_elf.c so it does not
+ * necessarily depend on there being a.out support.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/types.h>
+
+struct exec
+{
+ unsigned long a_info;
+ unsigned long a_text;
+ unsigned long a_data;
+ unsigned long a_bss;
+ unsigned long a_entry;
+};
+
+#define N_TXTADDR(x) 0
+#define N_DATADDR(x) 0
+#define N_BSSADDR(x) 0
+#define N_DRSIZE(x) 0
+#define N_TRSIZE(x) 0
+#define N_SYMSIZE(x) 0
+#define N_TXTOFF(x) 0
+
+#ifdef __KERNEL__
+# define STACK_TOP 0xa000000000000000UL
+# define IA64_RBS_BOT (STACK_TOP - 0x80000000L) /* bottom of register backing store */
+#endif
+
+#endif /* _ASM_IA64_A_OUT_H */
diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h
new file mode 100644
index 000000000..c3999f148
--- /dev/null
+++ b/include/asm-ia64/acpi-ext.h
@@ -0,0 +1,110 @@
+#ifndef _ASM_IA64_ACPI_EXT_H
+#define _ASM_IA64_ACPI_EXT_H
+
+/*
+ * Advanced Configuration and Power Infterface
+ * Based on 'ACPI Specification 1.0b' Febryary 2, 1999
+ * and 'IA-64 Extensions to the ACPI Specification' Rev 0.6
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ */
+
+#include <linux/types.h>
+
+#define ACPI_RSDP_SIG "RSD PTR " /* Trailing space required */
+#define ACPI_RSDP_SIG_LEN 8
+typedef struct {
+ char signature[8];
+ u8 checksum;
+ char oem_id[6];
+ char reserved; /* Must be 0 */
+ struct acpi_rsdt *rsdt;
+} acpi_rsdp_t;
+
+typedef struct {
+ char signature[4];
+ u32 length;
+ u8 revision;
+ u8 checksum;
+ char oem_id[6];
+ char oem_table_id[8];
+ u32 oem_revision;
+ u32 creator_id;
+ u32 creator_revision;
+ char reserved[4];
+} acpi_desc_table_hdr_t;
+
+#define ACPI_RSDT_SIG "RSDT"
+#define ACPI_RSDT_SIG_LEN 4
+typedef struct acpi_rsdt {
+ acpi_desc_table_hdr_t header;
+ unsigned long entry_ptrs[1]; /* Not really . . . */
+} acpi_rsdt_t;
+
+#define ACPI_SAPIC_SIG "SPIC"
+#define ACPI_SAPIC_SIG_LEN 4
+typedef struct {
+ acpi_desc_table_hdr_t header;
+ unsigned long interrupt_block;
+} acpi_sapic_t;
+
+/* SAPIC structure types */
+#define ACPI_ENTRY_LOCAL_SAPIC 0
+#define ACPI_ENTRY_IO_SAPIC 1
+#define ACPI_ENTRY_INT_SRC_OVERRIDE 2
+#define ACPI_ENTRY_PLATFORM_INT_SOURCE 3 /* Unimplemented */
+
+/* Local SAPIC flags */
+#define LSAPIC_ENABLED (1<<0)
+#define LSAPIC_PERFORMANCE_RESTRICTED (1<<1)
+#define LSAPIC_PRESENT (1<<2)
+
+typedef struct {
+ u8 type;
+ u8 length;
+ u16 acpi_processor_id;
+ u16 flags;
+ u8 id;
+ u8 eid;
+} acpi_entry_lsapic_t;
+
+typedef struct {
+ u8 type;
+ u8 length;
+ u16 reserved;
+ u32 irq_base; /* start of IRQ's this IOSAPIC is responsible for. */
+ unsigned long address; /* Address of this IOSAPIC */
+} acpi_entry_iosapic_t;
+
+/* Defines legacy IRQ->pin mapping */
+typedef struct {
+ u8 type;
+ u8 length;
+ u8 bus; /* Constant 0 == ISA */
+ u8 isa_irq; /* ISA IRQ # */
+ u8 pin; /* called vector in spec; really IOSAPIC pin number */
+ u32 flags; /* Edge/Level trigger & High/Low active */
+ u8 reserved[6];
+} acpi_entry_int_override_t;
+#define INT_OVERRIDE_ACTIVE_LOW 0x03
+#define INT_OVERRIDE_LEVEL_TRIGGER 0x0d
+
+typedef struct {
+ u8 type;
+ u8 length;
+ u32 flags;
+ u8 int_type;
+ u8 id;
+ u8 eid;
+ u8 iosapic_vector;
+ unsigned long reserved;
+ unsigned long global_vector;
+} acpi_entry_platform_src_t;
+
+extern int acpi_parse(acpi_rsdp_t *);
+extern const char *acpi_get_sysname (void);
+
+extern void (*acpi_idle) (void); /* power-management idle function, if any */
+
+#endif /* _ASM_IA64_ACPI_EXT_H */
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
new file mode 100644
index 000000000..a1ec41e3b
--- /dev/null
+++ b/include/asm-ia64/atomic.h
@@ -0,0 +1,100 @@
+#ifndef _ASM_IA64_ATOMIC_H
+#define _ASM_IA64_ATOMIC_H
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ *
+ * NOTE: don't mess with the types below! The "unsigned long" and
+ * "int" types were carefully placed so as to ensure proper operation
+ * of the macros.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/types.h>
+
+#include <asm/system.h>
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+#define __atomic_fool_gcc(x) (*(volatile struct { int a[100]; } *)x)
+
+/*
+ * On IA-64, counter must always be volatile to ensure that that the
+ * memory accesses are ordered.
+ */
+typedef struct { volatile __s32 counter; } atomic_t;
+
+#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+static __inline__ int
+ia64_atomic_add (int i, atomic_t *v)
+{
+ __s32 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic_read(v);
+ new = old + i;
+ } while (ia64_cmpxchg(v, old, old + i, sizeof(atomic_t)) != old);
+ return new;
+}
+
+static __inline__ int
+ia64_atomic_sub (int i, atomic_t *v)
+{
+ __s32 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic_read(v);
+ new = old - i;
+ } while (ia64_cmpxchg(v, old, new, sizeof(atomic_t)) != old);
+ return new;
+}
+
+/*
+ * Atomically add I to V and return TRUE if the resulting value is
+ * negative.
+ */
+static __inline__ int
+atomic_add_negative (int i, atomic_t *v)
+{
+ return ia64_atomic_add(i, v) < 0;
+}
+
+#define atomic_add_return(i,v) \
+ ((__builtin_constant_p(i) && \
+ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
+ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
+ ? ia64_fetch_and_add(i, &(v)->counter) \
+ : ia64_atomic_add(i, v))
+
+#define atomic_sub_return(i,v) \
+ ((__builtin_constant_p(i) && \
+ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
+ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
+ ? ia64_fetch_and_add(-(i), &(v)->counter) \
+ : ia64_atomic_sub(i, v))
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+#define atomic_add(i,v) atomic_add_return((i), (v))
+#define atomic_sub(i,v) atomic_sub_return((i), (v))
+#define atomic_inc(v) atomic_add(1, (v))
+#define atomic_dec(v) atomic_sub(1, (v))
+
+#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
new file mode 100644
index 000000000..c10d745e7
--- /dev/null
+++ b/include/asm-ia64/bitops.h
@@ -0,0 +1,241 @@
+#ifndef _ASM_IA64_BITOPS_H
+#define _ASM_IA64_BITOPS_H
+
+/*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 02/04/00 D. Mosberger Require 64-bit alignment for bitops, per suggestion from davem
+ */
+
+#include <asm/system.h>
+
+/*
+ * These operations need to be atomic. The address must be (at least)
+ * 32-bit aligned. Note that there are driver (e.g., eepro100) which
+ * use these operations to operate on hw-defined data-structures, so
+ * we can't easily change these operations to force a bigger
+ * alignment.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+extern __inline__ void
+set_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = 1 << (nr & 31);
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old | bit;
+ } while (cmpxchg(m, old, new) != old);
+}
+
+extern __inline__ void
+clear_bit (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg(m, old, new) != old);
+}
+
+extern __inline__ void
+change_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = (1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old ^ bit;
+ } while (cmpxchg(m, old, new) != old);
+}
+
+extern __inline__ int
+test_and_set_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = 1 << (nr & 31);
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old | bit;
+ } while (cmpxchg(m, old, new) != old);
+ return (old & bit) != 0;
+}
+
+extern __inline__ int
+test_and_clear_bit (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg(m, old, new) != old);
+ return (old & ~mask) != 0;
+}
+
+extern __inline__ int
+test_and_change_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = (1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old ^ bit;
+ } while (cmpxchg(m, old, new) != old);
+ return (old & bit) != 0;
+}
+
+extern __inline__ int
+test_bit (int nr, volatile void *addr)
+{
+ return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+extern inline unsigned long
+ffz (unsigned long x)
+{
+ unsigned long result;
+
+ __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x & (~x - 1)));
+ return result;
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Find the most significant bit that is set (undefined if no bit is
+ * set).
+ */
+static inline unsigned long
+ia64_fls (unsigned long x)
+{
+ double d = x;
+ long exp;
+
+ __asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d));
+ return exp - 0xffff;
+}
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+#define ffs(x) __builtin_ffs(x)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+extern __inline__ unsigned long
+hweight64 (unsigned long x)
+{
+ unsigned long result;
+ __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x));
+ return result;
+}
+
+#define hweight32(x) hweight64 ((x) & 0xfffffffful)
+#define hweight16(x) hweight64 ((x) & 0xfffful)
+#define hweight8(x) hweight64 ((x) & 0xfful)
+
+#endif /* __KERNEL__ */
+
+/*
+ * Find next zero bit in a bitmap reasonably efficiently..
+ */
+extern inline int
+find_next_zero_bit (void *addr, unsigned long size, unsigned long offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
+ unsigned long result = offset & ~63UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 63UL;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (64-offset);
+ if (size < 64)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= 64;
+ result += 64;
+ }
+ while (size & ~63UL) {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += 64;
+ size -= 64;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+found_first:
+ tmp |= ~0UL << size;
+found_middle:
+ return result + ffz(tmp);
+}
+
+/*
+ * The optimizer actually does good code for this case..
+ */
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+
+#ifdef __KERNEL__
+
+#define ext2_set_bit test_and_set_bit
+#define ext2_clear_bit test_and_clear_bit
+#define ext2_test_bit test_bit
+#define ext2_find_first_zero_bit find_first_zero_bit
+#define ext2_find_next_zero_bit find_next_zero_bit
+
+/* Bitmap functions for the minix filesystem. */
+#define minix_set_bit(nr,addr) test_and_set_bit(nr,addr)
+#define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_BITOPS_H */
diff --git a/include/asm-ia64/break.h b/include/asm-ia64/break.h
new file mode 100644
index 000000000..97c7b2d79
--- /dev/null
+++ b/include/asm-ia64/break.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_IA64_BREAK_H
+#define _ASM_IA64_BREAK_H
+
+/*
+ * IA-64 Linux break numbers.
+ *
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * OS-specific debug break numbers:
+ */
+#define __IA64_BREAK_KDB 0x80100
+
+/*
+ * OS-specific break numbers:
+ */
+#define __IA64_BREAK_SYSCALL 0x100000
+
+#endif /* _ASM_IA64_BREAK_H */
diff --git a/include/asm-ia64/bugs.h b/include/asm-ia64/bugs.h
new file mode 100644
index 000000000..c74d2261c
--- /dev/null
+++ b/include/asm-ia64/bugs.h
@@ -0,0 +1,19 @@
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/processor.h>
+
+/*
+ * I don't know of any ia-64 bugs yet..
+ */
+static void
+check_bugs (void)
+{
+}
diff --git a/include/asm-ia64/byteorder.h b/include/asm-ia64/byteorder.h
new file mode 100644
index 000000000..4d55d41b8
--- /dev/null
+++ b/include/asm-ia64/byteorder.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_IA64_BYTEORDER_H
+#define _ASM_IA64_BYTEORDER_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/types.h>
+
+static __inline__ __const__ __u64
+__ia64_swab64 (__u64 x)
+{
+ __u64 result;
+
+ __asm__ ("mux1 %0=%1,@rev" : "=r" (result) : "r" (x));
+ return result;
+}
+
+static __inline__ __const__ __u32
+__ia64_swab32 (__u32 x)
+{
+ return __ia64_swab64 (x) >> 32;
+}
+
+static __inline__ __const__ __u16
+__ia64_swab16(__u16 x)
+{
+ return __ia64_swab64 (x) >> 48;
+}
+
+#define __arch__swab64(x) __ia64_swab64 (x)
+#define __arch__swab32(x) __ia64_swab32 (x)
+#define __arch__swab16(x) __ia64_swab16 (x)
+
+#define __BYTEORDER_HAS_U64__
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_IA64_BYTEORDER_H */
diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h
new file mode 100644
index 000000000..aa1040e13
--- /dev/null
+++ b/include/asm-ia64/cache.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_IA64_CACHE_H
+#define _ASM_IA64_CACHE_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/* Bytes per L1 (data) cache line. */
+#define L1_CACHE_BYTES 64
+
+#endif /* _ASM_IA64_CACHE_H */
diff --git a/include/asm-ia64/checksum.h b/include/asm-ia64/checksum.h
new file mode 100644
index 000000000..21325d8d0
--- /dev/null
+++ b/include/asm-ia64/checksum.h
@@ -0,0 +1,99 @@
+#ifndef _ASM_IA64_CHECKSUM_H
+#define _ASM_IA64_CHECKSUM_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern unsigned short ip_fast_csum (unsigned char * iph, unsigned int ihl);
+
+/*
+ * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
+ * checksum, already complemented
+ */
+extern unsigned short int csum_tcpudp_magic (unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum);
+
+extern unsigned int csum_tcpudp_nofold (unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum);
+
+/*
+ * Computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern unsigned int csum_partial (const unsigned char * buff, int len,
+ unsigned int sum);
+
+/*
+ * Same as csum_partial, but copies from src while it checksums.
+ *
+ * Here it is even more important to align src and dst on a 32-bit (or
+ * even better 64-bit) boundary.
+ */
+extern unsigned int csum_partial_copy (const char *src, char *dst, int len,
+ unsigned int sum);
+
+/*
+ * The same as csum_partial, but copies from user space (but on the
+ * ia-64 we have just one address space, so this is identical to the
+ * above).
+ *
+ * This is obsolete and will go away.
+ */
+#define csum_partial_copy_fromuser csum_partial_copy
+
+/*
+ * This is a new version of the above that records errors it finds in
+ * *errp, but continues and zeros the rest of the buffer.
+ */
+extern unsigned int csum_partial_copy_from_user (const char *src, char *dst,
+ int len, unsigned int sum,
+ int *errp);
+
+extern unsigned int csum_partial_copy_nocheck (const char *src, char *dst,
+ int len, unsigned int sum);
+
+/*
+ * This routine is used for miscellaneous IP-like checksums, mainly in
+ * icmp.c
+ */
+extern unsigned short ip_compute_csum (unsigned char *buff, int len);
+
+/*
+ * Fold a partial checksum without adding pseudo headers.
+ */
+static inline unsigned short
+csum_fold (unsigned int sum)
+{
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return ~sum;
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+extern unsigned short int csum_ipv6_magic (struct in6_addr *saddr,
+ struct in6_addr *daddr,
+ __u16 len,
+ unsigned short proto,
+ unsigned int sum);
+
+#endif /* _ASM_IA64_CHECKSUM_H */
diff --git a/include/asm-ia64/current.h b/include/asm-ia64/current.h
new file mode 100644
index 000000000..29aa54e6c
--- /dev/null
+++ b/include/asm-ia64/current.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_IA64_CURRENT_H
+#define _ASM_IA64_CURRENT_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/* In kernel mode, thread pointer (r13) is used to point to the
+ current task structure. */
+register struct task_struct *current asm ("r13");
+
+#endif /* _ASM_IA64_CURRENT_H */
diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h
new file mode 100644
index 000000000..cca4ecdf6
--- /dev/null
+++ b/include/asm-ia64/delay.h
@@ -0,0 +1,90 @@
+#ifndef _ASM_IA64_DELAY_H
+#define _ASM_IA64_DELAY_H
+
+/*
+ * Delay routines using a pre-computed "cycles/usec" value.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/processor.h>
+
+extern __inline__ void
+ia64_set_itm (unsigned long val)
+{
+ __asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory");
+}
+
+extern __inline__ unsigned long
+ia64_get_itm (void)
+{
+ unsigned long result;
+
+ __asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) :: "memory");
+ return result;
+}
+
+extern __inline__ void
+ia64_set_itv (unsigned char vector, unsigned char masked)
+{
+ if (masked > 1)
+ masked = 1;
+
+ __asm__ __volatile__("mov cr.itv=%0;; srlz.d;;"
+ :: "r"((masked << 16) | vector) : "memory");
+}
+
+extern __inline__ void
+ia64_set_itc (unsigned long val)
+{
+ __asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory");
+}
+
+extern __inline__ unsigned long
+ia64_get_itc (void)
+{
+ unsigned long result;
+
+ __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
+ return result;
+}
+
+extern __inline__ void
+__delay (unsigned long loops)
+{
+ unsigned long saved_ar_lc;
+
+ if (loops < 1)
+ return;
+
+ __asm__ __volatile__("mov %0=ar.lc;;" : "=r"(saved_ar_lc));
+ __asm__ __volatile__("mov ar.lc=%0;;" :: "r"(loops - 1));
+ __asm__ __volatile__("1:\tbr.cloop.sptk.few 1b;;");
+ __asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc));
+}
+
+extern __inline__ void
+udelay (unsigned long usecs)
+{
+#ifdef CONFIG_IA64_SOFTSDV_HACKS
+ while (usecs--)
+ ;
+#else
+ unsigned long start = ia64_get_itc();
+ unsigned long cycles = usecs*my_cpu_data.cyc_per_usec;
+
+ while (ia64_get_itc() - start < cycles)
+ /* skip */;
+#endif /* CONFIG_IA64_SOFTSDV_HACKS */
+}
+
+#endif /* _ASM_IA64_DELAY_H */
diff --git a/include/asm-ia64/div64.h b/include/asm-ia64/div64.h
new file mode 100644
index 000000000..08c03f672
--- /dev/null
+++ b/include/asm-ia64/div64.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_IA64_DIV64_H
+#define _ASM_IA64_DIV64_H
+
+/*
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * vsprintf uses this to divide a 64-bit integer N by a small integer BASE.
+ * This is incredibly hard on IA-64...
+ */
+
+#define do_div(n,base) \
+({ \
+ int _res; \
+ _res = ((unsigned long) (n)) % (unsigned) (base); \
+ (n) = ((unsigned long) (n)) / (unsigned) (base); \
+ _res; \
+})
+
+#endif /* _ASM_IA64_DIV64_H */
diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h
new file mode 100644
index 000000000..3e6185064
--- /dev/null
+++ b/include/asm-ia64/dma.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_IA64_DMA_H
+#define _ASM_IA64_DMA_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/io.h> /* need byte IO */
+#include <linux/config.h>
+#include <linux/spinlock.h> /* And spinlocks */
+#include <linux/delay.h>
+
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb outb_p
+#else
+#define dma_outb outb
+#endif
+
+#define dma_inb inb
+
+#define MAX_DMA_CHANNELS 8
+#define MAX_DMA_ADDRESS (~0UL) /* no limits on DMAing, for now */
+
+extern spinlock_t dma_spin_lock;
+
+/* From PCI */
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* _ASM_IA64_DMA_H */
diff --git a/include/asm-ia64/efi.h b/include/asm-ia64/efi.h
new file mode 100644
index 000000000..a3549e0c4
--- /dev/null
+++ b/include/asm-ia64/efi.h
@@ -0,0 +1,233 @@
+#ifndef _ASM_IA64_EFI_H
+#define _ASM_IA64_EFI_H
+
+/*
+ * Extensible Firmware Interface
+ * Based on 'Extensible Firmware Interface Specification' version 0.9, April 30, 1999
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 Hewlett-Packard Co.
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+#include <asm/page.h>
+#include <asm/system.h>
+
+#define EFI_SUCCESS 0
+#define EFI_INVALID_PARAMETER 2
+#define EFI_UNSUPPORTED 3
+#define EFI_BUFFER_TOO_SMALL 4
+
+typedef unsigned long efi_status_t;
+typedef u8 efi_bool_t;
+typedef u16 efi_char16_t; /* UNICODE character */
+
+typedef struct {
+ u32 data1;
+ u16 data2;
+ u16 data3;
+ u8 data4[8];
+} efi_guid_t;
+
+/*
+ * Generic EFI table header
+ */
+typedef struct {
+ u64 signature;
+ u32 revision;
+ u32 headersize;
+ u32 crc32;
+ u32 reserved;
+} efi_table_hdr_t;
+
+/*
+ * Memory map descriptor:
+ */
+
+/* Memory types: */
+#define EFI_RESERVED_TYPE 0
+#define EFI_LOADER_CODE 1
+#define EFI_LOADER_DATA 2
+#define EFI_BOOT_SERVICES_CODE 3
+#define EFI_BOOT_SERVICES_DATA 4
+#define EFI_RUNTIME_SERVICES_CODE 5
+#define EFI_RUNTIME_SERVICES_DATA 6
+#define EFI_CONVENTIONAL_MEMORY 7
+#define EFI_UNUSABLE_MEMORY 8
+#define EFI_ACPI_RECLAIM_MEMORY 9
+#define EFI_ACPI_MEMORY_NVS 10
+#define EFI_MEMORY_MAPPED_IO 11
+#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
+#define EFI_PAL_CODE 13
+#define EFI_MAX_MEMORY_TYPE 14
+
+/* Attribute values: */
+#define EFI_MEMORY_UC 0x0000000000000001 /* uncached */
+#define EFI_MEMORY_WC 0x0000000000000002 /* write-coalescing */
+#define EFI_MEMORY_WT 0x0000000000000004 /* write-through */
+#define EFI_MEMORY_WB 0x0000000000000008 /* write-back */
+#define EFI_MEMORY_WP 0x0000000000001000 /* write-protect */
+#define EFI_MEMORY_RP 0x0000000000002000 /* read-protect */
+#define EFI_MEMORY_XP 0x0000000000004000 /* execute-protect */
+#define EFI_MEMORY_RUNTIME 0x8000000000000000 /* range requires runtime mapping */
+#define EFI_MEMORY_DESCRIPTOR_VERSION 1
+
+typedef struct {
+ u32 type;
+ u32 pad;
+ u64 phys_addr;
+ u64 virt_addr;
+ u64 num_pages;
+ u64 attribute;
+} efi_memory_desc_t;
+
+typedef int efi_freemem_callback_t (u64 start, u64 end, void *arg);
+
+/*
+ * Types and defines for Time Services
+ */
+#define EFI_TIME_ADJUST_DAYLIGHT 0x1
+#define EFI_TIME_IN_DAYLIGHT 0x2
+#define EFI_UNSPECIFIED_TIMEZONE 0x07ff
+
+typedef struct {
+ u16 year;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 pad1;
+ u32 nanosecond;
+ s16 timezone;
+ u8 daylight;
+ u8 pad2;
+} efi_time_t;
+
+typedef struct {
+ u32 resolution;
+ u32 accuracy;
+ u8 sets_to_zero;
+} efi_time_cap_t;
+
+/*
+ * Types and defines for EFI ResetSystem
+ */
+#define EFI_RESET_COLD 0
+#define EFI_RESET_WARM 1
+
+/*
+ * EFI Runtime Services table
+ */
+#define EFI_RUNTIME_SERVICES_SIGNATURE 0x5652453544e5552
+#define EFI_RUNTIME_SERVICES_REVISION 0x00010000
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 get_time;
+ u64 set_time;
+ u64 get_wakeup_time;
+ u64 set_wakeup_time;
+ u64 set_virtual_address_map;
+ u64 convert_pointer;
+ u64 get_variable;
+ u64 get_next_variable;
+ u64 set_variable;
+ u64 get_next_high_mono_count;
+ u64 reset_system;
+} efi_runtime_services_t;
+
+typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
+typedef efi_status_t efi_set_time_t (efi_time_t *tm);
+typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
+ efi_time_t *tm);
+typedef efi_status_t efi_set_wakeup_time_t (efi_bool_t enabled, efi_time_t *tm);
+typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
+ unsigned long *data_size, void *data);
+typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name,
+ efi_guid_t *vendor);
+typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 attr,
+ unsigned long data_size, void *data);
+typedef efi_status_t efi_get_next_high_mono_count_t (u64 *count);
+typedef void efi_reset_system_t (int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data);
+
+/*
+ * EFI Configuration Table and GUID definitions
+ */
+
+#define MPS_TABLE_GUID \
+ ((efi_guid_t) { 0xeb9d2d2f, 0x2d88, 0x11d3, { 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d }})
+
+#define ACPI_TABLE_GUID \
+ ((efi_guid_t) { 0xeb9d2d30, 0x2d88, 0x11d3, { 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d }})
+
+#define SMBIOS_TABLE_GUID \
+ ((efi_guid_t) { 0xeb9d2d31, 0x2d88, 0x11d3, { 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d }})
+
+#define SAL_SYSTEM_TABLE_GUID \
+ ((efi_guid_t) { 0xeb9d2d32, 0x2d88, 0x11d3, { 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d }})
+
+typedef struct {
+ efi_guid_t guid;
+ u64 table;
+} efi_config_table_t;
+
+#define EFI_SYSTEM_TABLE_SIGNATURE 0x5453595320494249
+#define EFI_SYSTEM_TABLE_REVISION ((0 << 16) | (91))
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 fw_vendor; /* physical addr of CHAR16 vendor string */
+ u32 fw_revision;
+ u64 con_in_handle;
+ u64 con_in;
+ u64 con_out_handle;
+ u64 con_out;
+ u64 stderr_handle;
+ u64 stderr;
+ u64 runtime;
+ u64 boottime;
+ u64 nr_tables;
+ u64 tables;
+} efi_system_table_t;
+
+/*
+ * All runtime access to EFI goes through this structure:
+ */
+extern struct efi {
+ efi_system_table_t *systab; /* EFI system table */
+ void *mps; /* MPS table */
+ void *acpi; /* ACPI table */
+ void *smbios; /* SM BIOS table */
+ void *sal_systab; /* SAL system table */
+ void *boot_info; /* boot info table */
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
+ efi_set_wakeup_time_t *set_wakeup_time;
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
+ efi_reset_system_t *reset_system;
+} efi;
+
+extern inline int
+efi_guidcmp (efi_guid_t left, efi_guid_t right)
+{
+ return memcmp(&left, &right, sizeof (efi_guid_t));
+}
+
+extern void efi_init (void);
+extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
+extern void efi_gettimeofday (struct timeval *tv);
+extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
+
+#endif /* _ASM_IA64_EFI_H */
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h
new file mode 100644
index 000000000..8176841d1
--- /dev/null
+++ b/include/asm-ia64/elf.h
@@ -0,0 +1,89 @@
+#ifndef _ASM_IA64_ELF_H
+#define _ASM_IA64_ELF_H
+
+/*
+ * ELF archtecture specific definitions.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x) == EM_IA_64)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_IA_64
+
+#define USE_ELF_CORE_DUMP
+
+/* always align to 64KB to allow for future page sizes of up to 64KB: */
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.
+ * Typical use of this is to invoke "./ld.so someprog" to test out a
+ * new version of the loader. We need to make sure that it is out of
+ * the way of the program that it will "exec", and that there is
+ * sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
+
+/*
+ * We use (abuse?) this macro to insert the (empty) vm_area that is
+ * used to map the register backing store. I don't see any better
+ * place to do this, but we should discuss this with Linus once we can
+ * talk to him...
+ */
+extern void ia64_init_addr_space (void);
+#define ELF_PLAT_INIT(_r) ia64_init_addr_space()
+
+/* ELF register definitions. This is needed for core dump support. */
+
+/*
+ * elf_gregset_t contains the application-level state in the following order:
+ * r0-r31
+ * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
+ * predicate registers (p0-p63)
+ * b0-b7
+ * ip cfm psr
+ * ar.rsc ar.bsp ar.bspstore ar.rnat
+ * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
+ */
+#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
+#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct ia64_fpreg elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+struct pt_regs; /* forward declaration... */
+extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
+#define ELF_CORE_COPY_REGS(_dest,_regs) ia64_elf_core_copy_regs(_regs, _dest);
+
+/* This macro yields a bitmask that programs can use to figure out
+ what instruction set this CPU supports. */
+#define ELF_HWCAP 0
+
+/* This macro yields a string that ld.so will use to load
+ implementation specific libraries for optimization. Not terribly
+ relevant until we have real hardware to play with... */
+#define ELF_PLATFORM 0
+
+#ifdef __KERNEL__
+# define SET_PERSONALITY(EX,IBCS2) \
+ (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX)
+#endif
+
+#endif /* _ASM_IA64_ELF_H */
diff --git a/include/asm-ia64/errno.h b/include/asm-ia64/errno.h
new file mode 100644
index 000000000..39233bd14
--- /dev/null
+++ b/include/asm-ia64/errno.h
@@ -0,0 +1,139 @@
+#ifndef _ASM_IA64_ERRNO_H
+#define _ASM_IA64_ERRNO_H
+
+/*
+ * This is derived from the Linux/x86 version.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Arg list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
+#endif /* _ASM_IA64_ERRNO_H */
diff --git a/include/asm-ia64/fcntl.h b/include/asm-ia64/fcntl.h
new file mode 100644
index 000000000..4cf46bee2
--- /dev/null
+++ b/include/asm-ia64/fcntl.h
@@ -0,0 +1,73 @@
+#ifndef _ASM_IA64_FCNTL_H
+#define _ASM_IA64_FCNTL_H
+/*
+ * This is mostly compatible with Linux/x86.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * open/fcntl - O_SYNC is only implemented on blocks devices and on
+ * files located on an ext2 file system
+ */
+#define O_ACCMODE 0003
+#define O_RDONLY 00
+#define O_WRONLY 01
+#define O_RDWR 02
+#define O_CREAT 0100 /* not fcntl */
+#define O_EXCL 0200 /* not fcntl */
+#define O_NOCTTY 0400 /* not fcntl */
+#define O_TRUNC 01000 /* not fcntl */
+#define O_APPEND 02000
+#define O_NONBLOCK 04000
+#define O_NDELAY O_NONBLOCK
+#define O_SYNC 010000
+#define FASYNC 020000 /* fcntl, for BSD compatibility */
+#define O_DIRECT 040000 /* direct disk access hint - currently ignored */
+#define O_LARGEFILE 0100000
+#define O_DIRECTORY 0200000 /* must be a directory */
+#define O_NOFOLLOW 0400000 /* don't follow links */
+
+#define F_DUPFD 0 /* dup */
+#define F_GETFD 1 /* get f_flags */
+#define F_SETFD 2 /* set f_flags */
+#define F_GETFL 3 /* more flags (cloexec) */
+#define F_SETFL 4
+#define F_GETLK 5
+#define F_SETLK 6
+#define F_SETLKW 7
+
+#define F_SETOWN 8 /* for sockets. */
+#define F_GETOWN 9 /* for sockets. */
+#define F_SETSIG 10 /* for sockets. */
+#define F_GETSIG 11 /* for sockets. */
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK 0
+#define F_WRLCK 1
+#define F_UNLCK 2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK 4 /* or 3 */
+#define F_SHLCK 8 /* or 4 */
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* or'd with one of the above to prevent
+ blocking */
+#define LOCK_UN 8 /* remove lock */
+
+struct flock {
+ short l_type;
+ short l_whence;
+ off_t l_start;
+ off_t l_len;
+ pid_t l_pid;
+};
+
+#endif /* _ASM_IA64_FCNTL_H */
diff --git a/include/asm-ia64/fpswa.h b/include/asm-ia64/fpswa.h
new file mode 100644
index 000000000..a6facbd8c
--- /dev/null
+++ b/include/asm-ia64/fpswa.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_IA64_FPSWA_H
+#define _ASM_IA64_FPSWA_H
+
+/*
+ * Floating-point Software Assist
+ *
+ * Copyright (C) 1999 Intel Corporation.
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com>
+ */
+
+#if 0
+#define FPSWA_BUG
+#endif
+
+typedef struct {
+ /* 4 * 128 bits */
+ unsigned long fp_lp[4*2];
+} fp_state_low_preserved_t;
+
+typedef struct {
+ /* 10 * 128 bits */
+ unsigned long fp_lv[10 * 2];
+} fp_state_low_volatile_t;
+
+typedef struct {
+ /* 16 * 128 bits */
+ unsigned long fp_hp[16 * 2];
+} fp_state_high_preserved_t;
+
+typedef struct {
+ /* 96 * 128 bits */
+ unsigned long fp_hv[96 * 2];
+} fp_state_high_volatile_t;
+
+/**
+ * floating point state to be passed to the FP emulation library by
+ * the trap/fault handler
+ */
+typedef struct {
+ unsigned long bitmask_low64;
+ unsigned long bitmask_high64;
+ fp_state_low_preserved_t *fp_state_low_preserved;
+ fp_state_low_volatile_t *fp_state_low_volatile;
+ fp_state_high_preserved_t *fp_state_high_preserved;
+ fp_state_high_volatile_t *fp_state_high_volatile;
+} fp_state_t;
+
+typedef struct {
+ unsigned long status;
+ unsigned long err0;
+ unsigned long err1;
+ unsigned long err2;
+} fpswa_ret_t;
+
+/**
+ * function header for the Floating Point software assist
+ * library. This function is invoked by the Floating point software
+ * assist trap/fault handler.
+ */
+typedef fpswa_ret_t (*efi_fpswa_t) (unsigned long trap_type, void *bundle, unsigned long *ipsr,
+ unsigned long *fsr, unsigned long *isr, unsigned long *preds,
+ unsigned long *ifs, fp_state_t *fp_state);
+
+/**
+ * This is the FPSWA library interface as defined by EFI. We need to pass a
+ * pointer to the interface itself on a call to the assist library
+ */
+typedef struct {
+ unsigned int revision;
+ unsigned int reserved;
+ efi_fpswa_t fpswa;
+} fpswa_interface_t;
+
+#endif /* _ASM_IA64_FPSWA_H */
diff --git a/include/asm-ia64/fpu.h b/include/asm-ia64/fpu.h
new file mode 100644
index 000000000..0dedf74b8
--- /dev/null
+++ b/include/asm-ia64/fpu.h
@@ -0,0 +1,65 @@
+#ifndef _ASM_IA64_FPU_H
+#define _ASM_IA64_FPU_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/types.h>
+
+/* floating point status register: */
+#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
+#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
+#define FPSR_TRAP_ZD (1 << 2) /* zero-divide trap disabled */
+#define FPSR_TRAP_OD (1 << 3) /* overflow trap disabled */
+#define FPSR_TRAP_UD (1 << 4) /* underflow trap disabled */
+#define FPSR_TRAP_ID (1 << 5) /* inexact trap disabled */
+#define FPSR_S0(x) ((x) << 6)
+#define FPSR_S1(x) ((x) << 19)
+#define FPSR_S2(x) (__IA64_UL(x) << 32)
+#define FPSR_S3(x) (__IA64_UL(x) << 45)
+
+/* floating-point status field controls: */
+#define FPSF_FTZ (1 << 0) /* flush-to-zero */
+#define FPSF_WRE (1 << 1) /* widest-range exponent */
+#define FPSF_PC(x) (((x) & 0x3) << 2) /* precision control */
+#define FPSF_RC(x) (((x) & 0x3) << 4) /* rounding control */
+#define FPSF_TD (1 << 6) /* trap disabled */
+
+/* floating-point status field flags: */
+#define FPSF_V (1 << 7) /* invalid operation flag */
+#define FPSF_D (1 << 8) /* denormal/unnormal operand flag */
+#define FPSF_Z (1 << 9) /* zero divide (IEEE) flag */
+#define FPSF_O (1 << 10) /* overflow (IEEE) flag */
+#define FPSF_U (1 << 11) /* underflow (IEEE) flag */
+#define FPSF_I (1 << 12) /* inexact (IEEE) flag) */
+
+/* floating-point rounding control: */
+#define FPRC_NEAREST 0x0
+#define FPRC_NEGINF 0x1
+#define FPRC_POSINF 0x2
+#define FPRC_TRUNC 0x3
+
+#define FPSF_DEFAULT (FPSF_PC (0x3) | FPSF_RC (FPRC_NEAREST))
+
+/* This default value is the same as HP-UX uses. Don't change it
+ without a very good reason. */
+#define FPSR_DEFAULT (FPSR_TRAP_VD | FPSR_TRAP_DD | FPSR_TRAP_ZD \
+ | FPSR_TRAP_OD | FPSR_TRAP_UD | FPSR_TRAP_ID \
+ | FPSR_S0 (FPSF_DEFAULT) \
+ | FPSR_S1 (FPSF_DEFAULT | FPSF_TD | FPSF_WRE) \
+ | FPSR_S2 (FPSF_DEFAULT | FPSF_TD) \
+ | FPSR_S3 (FPSF_DEFAULT | FPSF_TD))
+
+# ifndef __ASSEMBLY__
+
+struct ia64_fpreg {
+ union {
+ unsigned long bits[2];
+ } u;
+} __attribute__ ((aligned (16)));
+
+# endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_FPU_H */
diff --git a/include/asm-ia64/hardirq.h b/include/asm-ia64/hardirq.h
new file mode 100644
index 000000000..95523854e
--- /dev/null
+++ b/include/asm-ia64/hardirq.h
@@ -0,0 +1,76 @@
+#ifndef _ASM_IA64_HARDIRQ_H
+#define _ASM_IA64_HARDIRQ_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/config.h>
+#include <linux/threads.h>
+
+extern unsigned int local_irq_count[NR_CPUS];
+extern unsigned long hardirq_no[NR_CPUS];
+
+/*
+ * Are we in an interrupt context? Either doing bottom half
+ * or hardware interrupt processing?
+ */
+
+#define in_interrupt() \
+({ \
+ int __cpu = smp_processor_id(); \
+ (local_irq_count[__cpu] + local_bh_count[__cpu]) != 0; \
+})
+
+#ifndef CONFIG_SMP
+# define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
+# define hardirq_endlock(cpu) ((void) 0)
+
+# define hardirq_enter(cpu, irq) (local_irq_count[cpu]++)
+# define hardirq_exit(cpu, irq) (local_irq_count[cpu]--)
+
+# define synchronize_irq() barrier()
+#else
+
+#include <linux/spinlock.h>
+
+#include <asm/atomic.h>
+#include <asm/smp.h>
+
+extern int global_irq_holder;
+extern spinlock_t global_irq_lock;
+extern atomic_t global_irq_count;
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore.. */
+ if (global_irq_holder == cpu) {
+ global_irq_holder = NO_PROC_ID;
+ spin_unlock(&global_irq_lock);
+ }
+}
+
+static inline void hardirq_enter(int cpu, int irq)
+{
+ ++local_irq_count[cpu];
+ atomic_inc(&global_irq_count);
+}
+
+static inline void hardirq_exit(int cpu, int irq)
+{
+ atomic_dec(&global_irq_count);
+ --local_irq_count[cpu];
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ return !local_irq_count[cpu] && !test_bit(0,&global_irq_lock);
+}
+
+#define hardirq_endlock(cpu) ((void)0)
+
+extern void synchronize_irq(void);
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_IA64_HARDIRQ_H */
diff --git a/include/asm-ia64/hdreg.h b/include/asm-ia64/hdreg.h
new file mode 100644
index 000000000..62e422faf
--- /dev/null
+++ b/include/asm-ia64/hdreg.h
@@ -0,0 +1,12 @@
+/*
+ * linux/include/asm-ia64/hdreg.h
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ */
+
+#ifndef __ASM_IA64_HDREG_H
+#define __ASM_IA64_HDREG_H
+
+typedef unsigned short ide_ioreg_t;
+
+#endif /* __ASM_IA64_HDREG_H */
diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h
new file mode 100644
index 000000000..d76ce8b58
--- /dev/null
+++ b/include/asm-ia64/ia32.h
@@ -0,0 +1,313 @@
+#ifndef _ASM_IA64_IA32_H
+#define _ASM_IA64_IA32_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_IA32_SUPPORT
+
+/*
+ * 32 bit structures for IA32 support.
+ */
+
+/* 32bit compatibility types */
+typedef unsigned int __kernel_size_t32;
+typedef int __kernel_ssize_t32;
+typedef int __kernel_ptrdiff_t32;
+typedef int __kernel_time_t32;
+typedef int __kernel_clock_t32;
+typedef int __kernel_pid_t32;
+typedef unsigned short __kernel_ipc_pid_t32;
+typedef unsigned short __kernel_uid_t32;
+typedef unsigned short __kernel_gid_t32;
+typedef unsigned short __kernel_dev_t32;
+typedef unsigned int __kernel_ino_t32;
+typedef unsigned short __kernel_mode_t32;
+typedef unsigned short __kernel_umode_t32;
+typedef short __kernel_nlink_t32;
+typedef int __kernel_daddr_t32;
+typedef int __kernel_off_t32;
+typedef unsigned int __kernel_caddr_t32;
+typedef long __kernel_loff_t32;
+typedef __kernel_fsid_t __kernel_fsid_t32;
+
+#define IA32_PAGE_SHIFT 12 /* 4KB pages */
+#define IA32_PAGE_SIZE (1ULL << IA32_PAGE_SHIFT)
+
+/* fcntl.h */
+struct flock32 {
+ short l_type;
+ short l_whence;
+ __kernel_off_t32 l_start;
+ __kernel_off_t32 l_len;
+ __kernel_pid_t32 l_pid;
+ short __unused;
+};
+
+
+/* sigcontext.h */
+/*
+ * As documented in the iBCS2 standard..
+ *
+ * The first part of "struct _fpstate" is just the
+ * normal i387 hardware setup, the extra "status"
+ * word is used to save the coprocessor status word
+ * before entering the handler.
+ */
+struct _fpreg_ia32 {
+ unsigned short significand[4];
+ unsigned short exponent;
+};
+
+struct _fpstate_ia32 {
+ unsigned int cw,
+ sw,
+ tag,
+ ipoff,
+ cssel,
+ dataoff,
+ datasel;
+ struct _fpreg_ia32 _st[8];
+ unsigned int status;
+};
+
+struct sigcontext_ia32 {
+ unsigned short gs, __gsh;
+ unsigned short fs, __fsh;
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned int edi;
+ unsigned int esi;
+ unsigned int ebp;
+ unsigned int esp;
+ unsigned int ebx;
+ unsigned int edx;
+ unsigned int ecx;
+ unsigned int eax;
+ unsigned int trapno;
+ unsigned int err;
+ unsigned int eip;
+ unsigned short cs, __csh;
+ unsigned int eflags;
+ unsigned int esp_at_signal;
+ unsigned short ss, __ssh;
+ struct _fpstate_ia32 * fpstate;
+ unsigned int oldmask;
+ unsigned int cr2;
+};
+
+/* signal.h */
+#define _IA32_NSIG 64
+#define _IA32_NSIG_BPW 32
+#define _IA32_NSIG_WORDS (_IA32_NSIG / _IA32_NSIG_BPW)
+
+typedef struct {
+ unsigned int sig[_IA32_NSIG_WORDS];
+} sigset32_t;
+
+struct sigaction32 {
+ unsigned int sa_handler; /* Really a pointer, but need to deal
+ with 32 bits */
+ unsigned int sa_flags;
+ unsigned int sa_restorer; /* Another 32 bit pointer */
+ sigset32_t sa_mask; /* A 32 bit mask */
+};
+
+struct ucontext_ia32 {
+ unsigned long uc_flags;
+ struct ucontext_ia32 *uc_link;
+ stack_t uc_stack;
+ struct sigcontext_ia32 uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct stat32 {
+ unsigned short st_dev;
+ unsigned short __pad1;
+ unsigned int st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned short __pad2;
+ unsigned int st_size;
+ unsigned int st_blksize;
+ unsigned int st_blocks;
+ unsigned int st_atime;
+ unsigned int __unused1;
+ unsigned int st_mtime;
+ unsigned int __unused2;
+ unsigned int st_ctime;
+ unsigned int __unused3;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+
+struct statfs32 {
+ int f_type;
+ int f_bsize;
+ int f_blocks;
+ int f_bfree;
+ int f_bavail;
+ int f_files;
+ int f_ffree;
+ __kernel_fsid_t32 f_fsid;
+ int f_namelen; /* SunOS ignores this field. */
+ int f_spare[6];
+};
+
+/*
+ * IA-32 ELF specific definitions for IA-64.
+ */
+
+#define _ASM_IA64_ELF_H /* Don't include elf.h */
+
+#include <linux/sched.h>
+#include <asm/processor.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x) == EM_386)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_386
+
+#define IA32_PAGE_OFFSET 0xc0000000
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.
+ * Typical use of this is to invoke "./ld.so someprog" to test out a
+ * new version of the loader. We need to make sure that it is out of
+ * the way of the program that it will "exec", and that there is
+ * sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000)
+
+void ia64_elf32_init(struct pt_regs *regs);
+#define ELF_PLAT_INIT(_r) ia64_elf32_init(_r)
+
+#define elf_addr_t u32
+#define elf_caddr_t u32
+
+/* ELF register definitions. This is needed for core dump support. */
+
+#define ELF_NGREG 128 /* XXX fix me */
+#define ELF_NFPREG 128 /* XXX fix me */
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct {
+ unsigned long w0;
+ unsigned long w1;
+} elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/* This macro yields a bitmask that programs can use to figure out
+ what instruction set this CPU supports. */
+#define ELF_HWCAP 0
+
+/* This macro yields a string that ld.so will use to load
+ implementation specific libraries for optimization. Not terribly
+ relevant until we have real hardware to play with... */
+#define ELF_PLATFORM 0
+
+#ifdef __KERNEL__
+# define SET_PERSONALITY(EX,IBCS2) \
+ (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX)
+#endif
+
+#define IA32_EFLAG 0x200
+
+/*
+ * IA-32 ELF specific definitions for IA-64.
+ */
+
+#define __USER_CS 0x23
+#define __USER_DS 0x2B
+
+#define SEG_LIM 32
+#define SEG_TYPE 52
+#define SEG_SYS 56
+#define SEG_DPL 57
+#define SEG_P 59
+#define SEG_DB 62
+#define SEG_G 63
+
+#define FIRST_TSS_ENTRY 6
+#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
+#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
+#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+
+#define IA64_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, segdb, granularity) \
+ ((base) | \
+ (limit << SEG_LIM) | \
+ (segtype << SEG_TYPE) | \
+ (nonsysseg << SEG_SYS) | \
+ (dpl << SEG_DPL) | \
+ (segpresent << SEG_P) | \
+ (segdb << SEG_DB) | \
+ (granularity << SEG_G))
+
+#define IA32_SEG_BASE 16
+#define IA32_SEG_TYPE 40
+#define IA32_SEG_SYS 44
+#define IA32_SEG_DPL 45
+#define IA32_SEG_P 47
+#define IA32_SEG_HIGH_LIMIT 48
+#define IA32_SEG_AVL 52
+#define IA32_SEG_DB 54
+#define IA32_SEG_G 55
+#define IA32_SEG_HIGH_BASE 56
+
+#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, granularity) \
+ ((limit & 0xFFFF) | \
+ (base & 0xFFFFFF << IA32_SEG_BASE) | \
+ (segtype << IA32_SEG_TYPE) | \
+ (nonsysseg << IA32_SEG_SYS) | \
+ (dpl << IA32_SEG_DPL) | \
+ (segpresent << IA32_SEG_P) | \
+ (((limit >> 16) & 0xF) << IA32_SEG_HIGH_LIMIT) | \
+ (avl << IA32_SEG_AVL) | \
+ (segdb << IA32_SEG_DB) | \
+ (granularity << IA32_SEG_G) | \
+ (((base >> 24) & 0xFF) << IA32_SEG_HIGH_BASE))
+
+#define IA32_CR0 0x80000001 /* Enable PG and PE bits */
+#define IA32_CR4 0 /* No architectural extensions */
+
+/*
+ * IA32 floating point control registers starting values
+ */
+
+#define IA32_FSR_DEFAULT 0x555500000 /* set all tag bits */
+#define IA32_FCR_DEFAULT 0x33f /* single precision, all masks */
+
+#define ia32_start_thread(regs,new_ip,new_sp) do { \
+ set_fs(USER_DS); \
+ ia64_psr(regs)->cpl = 3; /* set user mode */ \
+ ia64_psr(regs)->ri = 0; /* clear return slot number */ \
+ ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \
+ regs->cr_iip = new_ip; \
+ regs->r12 = new_sp; \
+ regs->ar_rnat = 0; \
+ regs->loadrs = 0; \
+} while (0)
+
+extern void ia32_gdt_init (void);
+extern long ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs);
+extern void ia32_init_addr_space (struct pt_regs *regs);
+extern int ia32_setup_arg_pages (struct linux_binprm *bprm);
+
+#endif /* !CONFIG_IA32_SUPPORT */
+
+#endif /* _ASM_IA64_IA32_H */
diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h
new file mode 100644
index 000000000..6ec01ecd4
--- /dev/null
+++ b/include/asm-ia64/ide.h
@@ -0,0 +1,115 @@
+/*
+ * linux/include/asm-ia64/ide.h
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ */
+
+/*
+ * This file contains the ia64 architecture specific IDE code.
+ */
+
+#ifndef __ASM_IA64_IDE_H
+#define __ASM_IA64_IDE_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#ifndef MAX_HWIFS
+#define MAX_HWIFS 10
+#endif
+
+#define ide__sti() __sti()
+
+static __inline__ int
+ide_default_irq (ide_ioreg_t base)
+{
+ switch (base) {
+ case 0x1f0: return 14;
+ case 0x170: return 15;
+ case 0x1e8: return 11;
+ case 0x168: return 10;
+ case 0x1e0: return 8;
+ case 0x160: return 12;
+ default:
+ return 0;
+ }
+}
+
+static __inline__ ide_ioreg_t
+ide_default_io_base (int index)
+{
+ switch (index) {
+ case 0: return 0x1f0;
+ case 1: return 0x170;
+ case 2: return 0x1e8;
+ case 3: return 0x168;
+ case 4: return 0x1e0;
+ case 5: return 0x160;
+ default:
+ return 0;
+ }
+}
+
+static __inline__ void
+ide_init_hwif_ports (hw_regs_t *hw, ide_ioreg_t data_port, ide_ioreg_t ctrl_port, int *irq)
+{
+ ide_ioreg_t reg = data_port;
+ int i;
+
+ for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
+ hw->io_ports[i] = reg;
+ reg += 1;
+ }
+ if (ctrl_port) {
+ hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+ } else {
+ hw->io_ports[IDE_CONTROL_OFFSET] = hw->io_ports[IDE_DATA_OFFSET] + 0x206;
+ }
+ if (irq != NULL)
+ *irq = 0;
+}
+
+static __inline__ void
+ide_init_default_hwifs (void)
+{
+#ifndef CONFIG_BLK_DEV_IDEPCI
+ hw_regs_t hw;
+ int index;
+
+ for(index = 0; index < MAX_HWIFS; index++) {
+ ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
+ hw.irq = ide_default_irq(ide_default_io_base(index));
+ ide_register_hw(&hw, NULL);
+ }
+#endif /* CONFIG_BLK_DEV_IDEPCI */
+}
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned head : 4; /* always zeros here */
+ unsigned unit : 1; /* drive select number, 0 or 1 */
+ unsigned bit5 : 1; /* always 1 */
+ unsigned lba : 1; /* using LBA instead of CHS */
+ unsigned bit7 : 1; /* always 1 */
+ } b;
+ } select_t;
+
+#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
+#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
+#define ide_check_region(from,extent) check_region((from), (extent))
+#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
+#define ide_release_region(from,extent) release_region((from), (extent))
+
+/*
+ * The following are not needed for the non-m68k ports
+ */
+#define ide_ack_intr(hwif) (1)
+#define ide_fix_driveid(id) do {} while (0)
+#define ide_release_lock(lock) do {} while (0)
+#define ide_get_lock(lock, hdlr, data) do {} while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_IA64_IDE_H */
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
new file mode 100644
index 000000000..ad532b26b
--- /dev/null
+++ b/include/asm-ia64/io.h
@@ -0,0 +1,446 @@
+#ifndef _ASM_IA64_IO_H
+#define _ASM_IA64_IO_H
+
+/*
+ * This file contains the definitions for the emulated IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated to
+ * (a) handle it all in a way that makes gcc able to optimize it as
+ * well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+/* We don't use IO slowdowns on the ia64, but.. */
+#define __SLOW_DOWN_IO do { } while (0)
+#define SLOW_DOWN_IO do { } while (0)
+
+#define __IA64_UNCACHED_OFFSET 0xc000000000000000 /* region 6 */
+
+#define IO_SPACE_LIMIT 0xffff
+
+# ifdef __KERNEL__
+
+#include <asm/page.h>
+#include <asm/system.h>
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ */
+static inline unsigned long
+virt_to_phys (volatile void *address)
+{
+ return (unsigned long) address - PAGE_OFFSET;
+}
+
+static inline void*
+phys_to_virt(unsigned long address)
+{
+ return (void *) (address + PAGE_OFFSET);
+}
+
+#define bus_to_virt phys_to_virt
+#define virt_to_bus virt_to_phys
+
+# else /* !KERNEL */
+# endif /* !KERNEL */
+
+/*
+ * Memory fence w/accept. This should never be used in code that is
+ * not IA-64 specific.
+ */
+#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
+
+extern inline const unsigned long
+__ia64_get_io_port_base (void)
+{
+ unsigned long addr;
+
+ __asm__ ("mov %0=ar.k0;;" : "=r"(addr));
+ return __IA64_UNCACHED_OFFSET | addr;
+}
+
+extern inline void*
+__ia64_mk_io_addr (unsigned long port)
+{
+ const unsigned long io_base = __ia64_get_io_port_base();
+ unsigned long addr;
+
+ addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
+ return (void *) addr;
+}
+
+/*
+ * For the in/out instructions, we need to do:
+ *
+ * o "mf" _before_ doing the I/O access to ensure that all prior
+ * accesses to memory occur before the I/O access
+ * o "mf.a" _after_ doing the I/O access to ensure that the access
+ * has completed before we're doing any other I/O accesses
+ *
+ * The former is necessary because we might be doing normal (cached) memory
+ * accesses, e.g., to set up a DMA descriptor table and then do an "outX()"
+ * to tell the DMA controller to start the DMA operation. The "mf" ahead
+ * of the I/O operation ensures that the DMA table is correct when the I/O
+ * access occurs.
+ *
+ * The mf.a is necessary to ensure that all I/O access occur in program
+ * order. --davidm 99/12/07
+ */
+
+extern inline unsigned int
+__inb (unsigned long port)
+{
+ volatile unsigned char *addr = __ia64_mk_io_addr(port);
+ unsigned char ret;
+
+ ret = *addr;
+ __ia64_mf_a();
+ return ret;
+}
+
+extern inline unsigned int
+__inw (unsigned long port)
+{
+ volatile unsigned short *addr = __ia64_mk_io_addr(port);
+ unsigned short ret;
+
+ ret = *addr;
+ __ia64_mf_a();
+ return ret;
+}
+
+extern inline unsigned int
+__inl (unsigned long port)
+{
+ volatile unsigned int *addr = __ia64_mk_io_addr(port);
+ unsigned int ret;
+
+ ret = *addr;
+ __ia64_mf_a();
+ return ret;
+}
+
+extern inline void
+__insb (unsigned long port, void *dst, unsigned long count)
+{
+ volatile unsigned char *addr = __ia64_mk_io_addr(port);
+ unsigned char *dp = dst;
+
+ __ia64_mf_a();
+ while (count--) {
+ *dp++ = *addr;
+ }
+ __ia64_mf_a();
+ return;
+}
+
+extern inline void
+__insw (unsigned long port, void *dst, unsigned long count)
+{
+ volatile unsigned short *addr = __ia64_mk_io_addr(port);
+ unsigned short *dp = dst;
+
+ __ia64_mf_a();
+ while (count--) {
+ *dp++ = *addr;
+ }
+ __ia64_mf_a();
+ return;
+}
+
+extern inline void
+__insl (unsigned long port, void *dst, unsigned long count)
+{
+ volatile unsigned int *addr = __ia64_mk_io_addr(port);
+ unsigned int *dp = dst;
+
+ __ia64_mf_a();
+ while (count--) {
+ *dp++ = *addr;
+ }
+ __ia64_mf_a();
+ return;
+}
+
+extern inline void
+__outb (unsigned char val, unsigned long port)
+{
+ volatile unsigned char *addr = __ia64_mk_io_addr(port);
+
+ *addr = val;
+ __ia64_mf_a();
+}
+
+extern inline void
+__outw (unsigned short val, unsigned long port)
+{
+ volatile unsigned short *addr = __ia64_mk_io_addr(port);
+
+ *addr = val;
+ __ia64_mf_a();
+}
+
+extern inline void
+__outl (unsigned int val, unsigned long port)
+{
+ volatile unsigned int *addr = __ia64_mk_io_addr(port);
+
+ *addr = val;
+ __ia64_mf_a();
+}
+
+extern inline void
+__outsb (unsigned long port, const void *src, unsigned long count)
+{
+ volatile unsigned char *addr = __ia64_mk_io_addr(port);
+ const unsigned char *sp = src;
+
+ while (count--) {
+ *addr = *sp++;
+ }
+ __ia64_mf_a();
+ return;
+}
+
+extern inline void
+__outsw (unsigned long port, const void *src, unsigned long count)
+{
+ volatile unsigned short *addr = __ia64_mk_io_addr(port);
+ const unsigned short *sp = src;
+
+ while (count--) {
+ *addr = *sp++;
+ }
+ __ia64_mf_a();
+ return;
+}
+
+extern inline void
+__outsl (unsigned long port, void *src, unsigned long count)
+{
+ volatile unsigned int *addr = __ia64_mk_io_addr(port);
+ const unsigned int *sp = src;
+
+ while (count--) {
+ *addr = *sp++;
+ }
+ __ia64_mf_a();
+ return;
+}
+
+#define inb __inb
+#define inw __inw
+#define inl __inl
+#define insb __insb
+#define insw __insw
+#define insl __insl
+#define outb __outb
+#define outw __outw
+#define outl __outl
+#define outsb __outsb
+#define outsw __outsw
+#define outsl __outsl
+
+/*
+ * The address passed to these functions are ioremap()ped already.
+ */
+extern inline unsigned long
+__readb (unsigned long addr)
+{
+ return *(volatile unsigned char *)addr;
+}
+
+extern inline unsigned long
+__readw (unsigned long addr)
+{
+ return *(volatile unsigned short *)addr;
+}
+
+extern inline unsigned long
+__readl (unsigned long addr)
+{
+ return *(volatile unsigned int *) addr;
+}
+
+extern inline unsigned long
+__readq (unsigned long addr)
+{
+ return *(volatile unsigned long *) addr;
+}
+
+extern inline void
+__writeb (unsigned char val, unsigned long addr)
+{
+ *(volatile unsigned char *) addr = val;
+}
+
+extern inline void
+__writew (unsigned short val, unsigned long addr)
+{
+ *(volatile unsigned short *) addr = val;
+}
+
+extern inline void
+__writel (unsigned int val, unsigned long addr)
+{
+ *(volatile unsigned int *) addr = val;
+}
+
+extern inline void
+__writeq (unsigned long val, unsigned long addr)
+{
+ *(volatile unsigned long *) addr = val;
+}
+
+#define readb __readb
+#define readw __readw
+#define readl __readl
+#define readq __readqq
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+#define __raw_readq readq
+#define writeb __writeb
+#define writew __writew
+#define writel __writel
+#define writeq __writeq
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writeq writeq
+
+#ifndef inb_p
+# define inb_p inb
+#endif
+#ifndef inw_p
+# define inw_p inw
+#endif
+#ifndef inl_p
+# define inl_p inl
+#endif
+
+#ifndef outb_p
+# define outb_p outb
+#endif
+#ifndef outw_p
+# define outw_p outw
+#endif
+#ifndef outl_p
+# define outl_p outl
+#endif
+
+/*
+ * An "address" in IO memory space is not clearly either an integer
+ * or a pointer. We will accept both, thus the casts.
+ *
+ * On ia-64, we access the physical I/O memory space through the
+ * uncached kernel region.
+ */
+static inline void *
+ioremap (unsigned long offset, unsigned long size)
+{
+ return (void *) (__IA64_UNCACHED_OFFSET | (offset));
+}
+
+static inline void
+iounmap (void *addr)
+{
+}
+
+#define ioremap_nocache(o,s) ioremap(o,s)
+
+# ifdef __KERNEL__
+
+/*
+ * String version of IO memory access ops:
+ */
+extern void __ia64_memcpy_fromio (void *, unsigned long, long);
+extern void __ia64_memcpy_toio (unsigned long, void *, long);
+extern void __ia64_memset_c_io (unsigned long, unsigned long, long);
+
+#define memcpy_fromio(to,from,len) \
+ __ia64_memcpy_fromio((to),(unsigned long)(from),(len))
+#define memcpy_toio(to,from,len) \
+ __ia64_memcpy_toio((unsigned long)(to),(from),(len))
+#define memset_io(addr,c,len) \
+ __ia64_memset_c_io((unsigned long)(addr),0x0101010101010101UL*(u8)(c),(len))
+
+#define __HAVE_ARCH_MEMSETW_IO
+#define memsetw_io(addr,c,len) \
+ _memset_c_io((unsigned long)(addr),0x0001000100010001UL*(u16)(c),(len))
+
+/*
+ * XXX - We don't have csum_partial_copy_fromio() yet, so we cheat here and
+ * just copy it. The net code will then do the checksum later. Presently
+ * only used by some shared memory 8390 Ethernet cards anyway.
+ */
+
+#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
+
+#if 0
+
+/*
+ * XXX this is the kind of legacy stuff we want to get rid of with IA-64... --davidm 99/12/02
+ */
+
+/*
+ * This is used for checking BIOS signatures. It's not clear at all
+ * why this is here. This implementation seems to be the same on
+ * all architectures. Strange.
+ */
+static inline int
+check_signature (unsigned long io_addr, const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ALWAYS_BCD 0
+
+#endif
+
+/*
+ * The caches on some architectures aren't DMA-coherent and have need
+ * to handle this in software. There are two types of operations that
+ * can be applied to dma buffers.
+ *
+ * - dma_cache_inv(start, size) invalidates the affected parts of the
+ * caches. Dirty lines of the caches may be written back or simply
+ * be discarded. This operation is necessary before dma operations
+ * to the memory.
+ *
+ * - dma_cache_wback(start, size) makes caches and memory coherent
+ * by writing the content of the caches back to memory, if necessary
+ * (cache flush).
+ *
+ * - dma_cache_wback_inv(start, size) Like dma_cache_wback() but the
+ * function also invalidates the affected part of the caches as
+ * necessary before DMA transfers from outside to memory.
+ *
+ * Fortunately, the IA-64 architecture mandates cache-coherent DMA, so
+ * these functions can be implemented as no-ops.
+ */
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+
+# endif /* __KERNEL__ */
+#endif /* _ASM_IA64_IO_H */
diff --git a/include/asm-ia64/ioctl.h b/include/asm-ia64/ioctl.h
new file mode 100644
index 000000000..69b01d8d8
--- /dev/null
+++ b/include/asm-ia64/ioctl.h
@@ -0,0 +1,77 @@
+#ifndef _ASM_IA64_IOCTL_H
+#define _ASM_IA64_IOCTL_H
+
+/*
+ * This is mostly derived from the Linux/x86 version.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The ia64 ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _ASM_IA64_IOCTL_H */
diff --git a/include/asm-ia64/ioctls.h b/include/asm-ia64/ioctls.h
new file mode 100644
index 000000000..9cd897f1b
--- /dev/null
+++ b/include/asm-ia64/ioctls.h
@@ -0,0 +1,87 @@
+#ifndef _ASM_IA64_IOCTLS_H
+#define _ASM_IA64_IOCTLS_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+#define TCGETA 0x5405
+#define TCSETA 0x5406
+#define TCSETAW 0x5407
+#define TCSETAF 0x5408
+#define TCSBRK 0x5409
+#define TCXONC 0x540A
+#define TCFLSH 0x540B
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+#define TIOCGPGRP 0x540F
+#define TIOCSPGRP 0x5410
+#define TIOCOUTQ 0x5411
+#define TIOCSTI 0x5412
+#define TIOCGWINSZ 0x5413
+#define TIOCSWINSZ 0x5414
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define FIONREAD 0x541B
+#define TIOCINQ FIONREAD
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+#define FIONBIO 0x5421
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define FIOCLEX 0x5451
+#define FIOASYNC 0x5452
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
+#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#endif /* _ASM_IA64_IOCTLS_H */
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h
new file mode 100644
index 000000000..95934da1e
--- /dev/null
+++ b/include/asm-ia64/iosapic.h
@@ -0,0 +1,123 @@
+#ifndef __ASM_IA64_IOSAPIC_H
+#define __ASM_IA64_IOSAPIC_H
+
+#include <linux/config.h>
+
+#define IO_SAPIC_DEFAULT_ADDR 0xFEC00000
+
+#define IO_SAPIC_REG_SELECT 0x0
+#define IO_SAPIC_WINDOW 0x10
+#define IO_SAPIC_EOI 0x40
+
+#define IO_SAPIC_VERSION 0x1
+
+/*
+ * Redirection table entry
+ */
+
+#define IO_SAPIC_RTE_LOW(i) (0x10+i*2)
+#define IO_SAPIC_RTE_HIGH(i) (0x11+i*2)
+
+
+#define IO_SAPIC_DEST_SHIFT 16
+
+/*
+ * Delivery mode
+ */
+
+#define IO_SAPIC_DELIVERY_SHIFT 8
+#define IO_SAPIC_FIXED 0x0
+#define IO_SAPIC_LOWEST_PRIORITY 0x1
+#define IO_SAPIC_PMI 0x2
+#define IO_SAPIC_NMI 0x4
+#define IO_SAPIC_INIT 0x5
+#define IO_SAPIC_EXTINT 0x7
+
+/*
+ * Interrupt polarity
+ */
+
+#define IO_SAPIC_POLARITY_SHIFT 13
+#define IO_SAPIC_POL_HIGH 0
+#define IO_SAPIC_POL_LOW 1
+
+/*
+ * Trigger mode
+ */
+
+#define IO_SAPIC_TRIGGER_SHIFT 15
+#define IO_SAPIC_EDGE 0
+#define IO_SAPIC_LEVEL 1
+
+/*
+ * Mask bit
+ */
+
+#define IO_SAPIC_MASK_SHIFT 16
+#define IO_SAPIC_UNMASK 0
+#define IO_SAPIC_MSAK 1
+
+/*
+ * Bus types
+ */
+#define BUS_ISA 0 /* ISA Bus */
+#define BUS_PCI 1 /* PCI Bus */
+
+#ifndef CONFIG_IA64_PCI_FIRMWARE_IRQ
+struct intr_routing_entry {
+ unsigned char srcbus;
+ unsigned char srcbusno;
+ unsigned char srcbusirq;
+ unsigned char iosapic_pin;
+ unsigned char dstiosapic;
+ unsigned char mode;
+ unsigned char trigger;
+ unsigned char polarity;
+};
+
+extern struct intr_routing_entry intr_routing[];
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <asm/irq.h>
+
+/*
+ * IOSAPIC Version Register return 32 bit structure like:
+ * {
+ * unsigned int version : 8;
+ * unsigned int reserved1 : 8;
+ * unsigned int pins : 8;
+ * unsigned int reserved2 : 8;
+ * }
+ */
+extern unsigned int iosapic_version(unsigned long);
+extern void iosapic_init(unsigned long);
+
+struct iosapic_vector {
+ unsigned long iosapic_base; /* IOSAPIC Base address */
+ char pin; /* IOSAPIC pin (-1 == No data) */
+ unsigned char bus; /* Bus number */
+ unsigned char baseirq; /* Base IRQ handled by this IOSAPIC */
+ unsigned char bustype; /* Bus type (ISA, PCI, etc) */
+ unsigned int busdata; /* Bus specific ID */
+ /* These bitfields use the values defined above */
+ unsigned char dmode : 3;
+ unsigned char polarity : 1;
+ unsigned char trigger : 1;
+ unsigned char UNUSED : 3;
+};
+extern struct iosapic_vector iosapic_vector[NR_IRQS];
+
+#define iosapic_addr(v) iosapic_vector[v].iosapic_base
+#define iosapic_pin(v) iosapic_vector[v].pin
+#define iosapic_bus(v) iosapic_vector[v].bus
+#define iosapic_baseirq(v) iosapic_vector[v].baseirq
+#define iosapic_bustype(v) iosapic_vector[v].bustype
+#define iosapic_busdata(v) iosapic_vector[v].busdata
+#define iosapic_dmode(v) iosapic_vector[v].dmode
+#define iosapic_trigger(v) iosapic_vector[v].trigger
+#define iosapic_polarity(v) iosapic_vector[v].polarity
+
+# endif /* !__ASSEMBLY__ */
+#endif /* __ASM_IA64_IOSAPIC_H */
diff --git a/include/asm-ia64/ipc.h b/include/asm-ia64/ipc.h
new file mode 100644
index 000000000..36f43063a
--- /dev/null
+++ b/include/asm-ia64/ipc.h
@@ -0,0 +1,31 @@
+#ifndef __i386_IPC_H__
+#define __i386_IPC_H__
+
+/*
+ * These are used to wrap system calls on x86.
+ *
+ * See arch/i386/kernel/sys_i386.c for ugly details..
+ */
+struct ipc_kludge {
+ struct msgbuf *msgp;
+ long msgtyp;
+};
+
+#define SEMOP 1
+#define SEMGET 2
+#define SEMCTL 3
+#define MSGSND 11
+#define MSGRCV 12
+#define MSGGET 13
+#define MSGCTL 14
+#define SHMAT 21
+#define SHMDT 22
+#define SHMGET 23
+#define SHMCTL 24
+
+/* Used by the DIPC package, try and avoid reusing it */
+#define DIPC 25
+
+#define IPCCALL(version,op) ((version)<<16 | (op))
+
+#endif
diff --git a/include/asm-ia64/ipcbuf.h b/include/asm-ia64/ipcbuf.h
new file mode 100644
index 000000000..079899ae7
--- /dev/null
+++ b/include/asm-ia64/ipcbuf.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_IA64_IPCBUF_H
+#define _ASM_IA64_IPCBUF_H
+
+/*
+ * The ipc64_perm structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit seq
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid_t uid;
+ __kernel_gid_t gid;
+ __kernel_uid_t cuid;
+ __kernel_gid_t cgid;
+ __kernel_mode_t mode;
+ unsigned short seq;
+ unsigned short __pad1;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _ASM_IA64_IPCBUF_H */
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
new file mode 100644
index 000000000..137670219
--- /dev/null
+++ b/include/asm-ia64/irq.h
@@ -0,0 +1,120 @@
+#ifndef _ASM_IA64_IRQ_H
+#define _ASM_IA64_IRQ_H
+
+/*
+ * Copyright (C) 1999-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998 Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * 11/24/98 S.Eranian updated TIMER_IRQ and irq_cannonicalize
+ * 01/20/99 S.Eranian added keyboard interrupt
+ */
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+
+#include <asm/ptrace.h>
+
+#define NR_IRQS 256
+#define NR_ISA_IRQS 16
+
+/*
+ * 0 special
+ *
+ * 1,3-14 are reserved from firmware
+ *
+ * 16-255 (vectored external interrupts) are available
+ *
+ * 15 spurious interrupt (see IVR)
+ *
+ * 16 lowest priority, 255 highest priority
+ *
+ * 15 classes of 16 interrupts each.
+ */
+#define IA64_MIN_VECTORED_IRQ 16
+#define IA64_MAX_VECTORED_IRQ 255
+
+#define IA64_SPURIOUS_INT 0x0f
+#define PERFMON_IRQ 0x28 /* performanc monitor interrupt vector */
+#define TIMER_IRQ 0xef /* use highest-prio group 15 interrupt for timer */
+#define IPI_IRQ 0xfe /* inter-processor interrupt vector */
+#define CMC_IRQ 0xff /* correctable machine-check interrupt vector */
+
+#define IA64_MIN_VECTORED_IRQ 16
+#define IA64_MAX_VECTORED_IRQ 255
+
+extern __u8 irq_to_vector_map[IA64_MIN_VECTORED_IRQ];
+#define map_legacy_irq(x) (((x) < IA64_MIN_VECTORED_IRQ) ? irq_to_vector_map[(x)] : (x))
+
+#define IRQ_INPROGRESS (1 << 0) /* irq handler active */
+#define IRQ_ENABLED (1 << 1) /* irq enabled */
+#define IRQ_PENDING (1 << 2) /* irq pending */
+#define IRQ_REPLAY (1 << 3) /* irq has been replayed but not acked yet */
+#define IRQ_AUTODETECT (1 << 4) /* irq is being autodetected */
+#define IRQ_WAITING (1 << 5) /* used for autodetection: irq not yet seen yet */
+
+struct hw_interrupt_type {
+ const char *typename;
+ void (*init) (unsigned long addr);
+ void (*startup) (unsigned int irq);
+ void (*shutdown) (unsigned int irq);
+ int (*handle) (unsigned int irq, struct pt_regs *regs);
+ void (*enable) (unsigned int irq);
+ void (*disable) (unsigned int irq);
+};
+
+extern struct hw_interrupt_type irq_type_default; /* dummy interrupt controller */
+extern struct hw_interrupt_type irq_type_ia64_internal; /* CPU-internal interrupt controller */
+
+struct irq_desc {
+ unsigned int type; /* type of interrupt (level vs. edge triggered) */
+ unsigned int status; /* see above */
+ unsigned int depth; /* disable depth for nested irq disables */
+ struct hw_interrupt_type *handler;
+ struct irqaction *action; /* irq action list */
+};
+
+extern struct irq_desc irq_desc[NR_IRQS];
+
+extern spinlock_t irq_controller_lock;
+
+/* IA64 inter-cpu interrupt related definitions */
+
+/* Delivery modes for inter-cpu interrupts */
+enum {
+ IA64_IPI_DM_INT = 0x0, /* pend an external interrupt */
+ IA64_IPI_DM_PMI = 0x2, /* pend a PMI */
+ IA64_IPI_DM_NMI = 0x4, /* pend an NMI (vector 2) */
+ IA64_IPI_DM_INIT = 0x5, /* pend an INIT interrupt */
+ IA64_IPI_DM_EXTINT = 0x7, /* pend an 8259-compatible interrupt. */
+};
+
+#define IA64_BUS_ID(cpu) (cpu >> 8)
+#define IA64_LOCAL_ID(cpu) (cpu & 0xff)
+
+static __inline__ int
+irq_cannonicalize (int irq)
+{
+ /*
+ * We do the legacy thing here of pretending that irqs < 16
+ * are 8259 irqs.
+ */
+ return ((irq == 2) ? 9 : irq);
+}
+
+extern int invoke_irq_handlers (unsigned int irq, struct pt_regs *regs, struct irqaction *action);
+extern void disable_irq (unsigned int);
+extern void disable_irq_nosync (unsigned int);
+extern void enable_irq (unsigned int);
+extern void ipi_send (int cpu, int vector, int delivery_mode);
+
+#ifdef CONFIG_SMP
+ extern void irq_enter(int cpu, int irq);
+ extern void irq_exit(int cpu, int irq);
+ extern void handle_IPI(int irq, void *dev_id, struct pt_regs *regs);
+#else
+# define irq_enter(cpu, irq) (++local_irq_count[cpu])
+# define irq_exit(cpu, irq) (--local_irq_count[cpu])
+#endif
+
+#endif /* _ASM_IA64_IRQ_H */
diff --git a/include/asm-ia64/kdbsupport.h b/include/asm-ia64/kdbsupport.h
new file mode 100644
index 000000000..beb846f79
--- /dev/null
+++ b/include/asm-ia64/kdbsupport.h
@@ -0,0 +1,252 @@
+#ifndef _ASM_IA64_KDBSUPPORT_H
+#define _ASM_IA64_KDBSUPPORT_H
+
+/*
+ * Kernel Debugger Breakpoint Handler
+ *
+ * Copyright 1999, Silicon Graphics, Inc.
+ *
+ * Written March 1999 by Scott Lurndal at Silicon Graphics, Inc.
+ */
+
+#include <asm/ptrace.h>
+
+ /*
+ * This file provides definitions for functions that
+ * are dependent upon the product into which kdb is
+ * linked.
+ *
+ * This version is for linux.
+ */
+typedef void (*handler_t)(struct pt_regs *);
+typedef unsigned long k_machreg_t;
+
+unsigned long show_cur_stack_frame(struct pt_regs *, int, unsigned long *) ;
+
+extern char* kbd_getstr(char *, size_t, char *);
+extern int kdbinstalltrap(int, handler_t, handler_t*);
+extern int kdbinstalldbreg(kdb_bp_t *);
+extern void kdbremovedbreg(kdb_bp_t *);
+extern void kdb_initbptab(void);
+extern int kdbgetregcontents(const char *, struct pt_regs *, unsigned long *);
+extern int kdbsetregcontents(const char *, struct pt_regs *, unsigned long);
+extern int kdbdumpregs(struct pt_regs *, const char *, const char *);
+
+typedef int kdbintstate_t;
+
+extern void kdb_disableint(kdbintstate_t *);
+extern void kdb_restoreint(kdbintstate_t *);
+
+extern k_machreg_t kdb_getpc(struct pt_regs *);
+extern int kdb_setpc(struct pt_regs *, k_machreg_t);
+
+extern int kdb_putword(unsigned long, unsigned long);
+extern int kdb_getcurrentframe(struct pt_regs *);
+
+/*
+ * kdb_db_trap is a processor dependent routine invoked
+ * from kdb() via the #db trap handler. It handles breakpoints involving
+ * the processor debug registers and handles single step traps
+ * using the processor trace flag.
+ */
+
+#define KDB_DB_BPT 0 /* Straight breakpoint */
+#define KDB_DB_SS 1 /* Single Step trap */
+#define KDB_DB_SSB 2 /* Single Step, caller should continue */
+
+extern int kdb_db_trap(struct pt_regs *, int);
+
+extern int kdb_allocdbreg(kdb_bp_t *);
+extern void kdb_freedbreg(kdb_bp_t *);
+extern void kdb_initdbregs(void);
+
+extern void kdb_setsinglestep(struct pt_regs *);
+
+ /*
+ * Support for ia32 architecture debug registers.
+ */
+#define KDB_DBREGS 4
+extern k_machreg_t dbregs[];
+
+#define DR6_BT 0x00008000
+#define DR6_BS 0x00004000
+#define DR6_BD 0x00002000
+
+#define DR6_B3 0x00000008
+#define DR6_B2 0x00000004
+#define DR6_B1 0x00000002
+#define DR6_B0 0x00000001
+
+#define DR7_RW_VAL(dr, drnum) \
+ (((dr) >> (16 + (4 * (drnum)))) & 0x3)
+
+#define DR7_RW_SET(dr, drnum, rw) \
+ do { \
+ (dr) &= ~(0x3 << (16 + (4 * (drnum)))); \
+ (dr) |= (((rw) & 0x3) << (16 + (4 * (drnum)))); \
+ } while (0)
+
+#define DR7_RW0(dr) DR7_RW_VAL(dr, 0)
+#define DR7_RW0SET(dr,rw) DR7_RW_SET(dr, 0, rw)
+#define DR7_RW1(dr) DR7_RW_VAL(dr, 1)
+#define DR7_RW1SET(dr,rw) DR7_RW_SET(dr, 1, rw)
+#define DR7_RW2(dr) DR7_RW_VAL(dr, 2)
+#define DR7_RW2SET(dr,rw) DR7_RW_SET(dr, 2, rw)
+#define DR7_RW3(dr) DR7_RW_VAL(dr, 3)
+#define DR7_RW3SET(dr,rw) DR7_RW_SET(dr, 3, rw)
+
+
+#define DR7_LEN_VAL(dr, drnum) \
+ (((dr) >> (18 + (4 * (drnum)))) & 0x3)
+
+#define DR7_LEN_SET(dr, drnum, rw) \
+ do { \
+ (dr) &= ~(0x3 << (18 + (4 * (drnum)))); \
+ (dr) |= (((rw) & 0x3) << (18 + (4 * (drnum)))); \
+ } while (0)
+
+#define DR7_LEN0(dr) DR7_LEN_VAL(dr, 0)
+#define DR7_LEN0SET(dr,len) DR7_LEN_SET(dr, 0, len)
+#define DR7_LEN1(dr) DR7_LEN_VAL(dr, 1)
+#define DR7_LEN1SET(dr,len) DR7_LEN_SET(dr, 1, len)
+#define DR7_LEN2(dr) DR7_LEN_VAL(dr, 2)
+#define DR7_LEN2SET(dr,len) DR7_LEN_SET(dr, 2, len)
+#define DR7_LEN3(dr) DR7_LEN_VAL(dr, 3)
+#define DR7_LEN3SET(dr,len) DR7_LEN_SET(dr, 3, len)
+
+#define DR7_G0(dr) (((dr)>>1)&0x1)
+#define DR7_G0SET(dr) ((dr) |= 0x2)
+#define DR7_G0CLR(dr) ((dr) &= ~0x2)
+#define DR7_G1(dr) (((dr)>>3)&0x1)
+#define DR7_G1SET(dr) ((dr) |= 0x8)
+#define DR7_G1CLR(dr) ((dr) &= ~0x8)
+#define DR7_G2(dr) (((dr)>>5)&0x1)
+#define DR7_G2SET(dr) ((dr) |= 0x20)
+#define DR7_G2CLR(dr) ((dr) &= ~0x20)
+#define DR7_G3(dr) (((dr)>>7)&0x1)
+#define DR7_G3SET(dr) ((dr) |= 0x80)
+#define DR7_G3CLR(dr) ((dr) &= ~0x80)
+
+#define DR7_L0(dr) (((dr))&0x1)
+#define DR7_L0SET(dr) ((dr) |= 0x1)
+#define DR7_L0CLR(dr) ((dr) &= ~0x1)
+#define DR7_L1(dr) (((dr)>>2)&0x1)
+#define DR7_L1SET(dr) ((dr) |= 0x4)
+#define DR7_L1CLR(dr) ((dr) &= ~0x4)
+#define DR7_L2(dr) (((dr)>>4)&0x1)
+#define DR7_L2SET(dr) ((dr) |= 0x10)
+#define DR7_L2CLR(dr) ((dr) &= ~0x10)
+#define DR7_L3(dr) (((dr)>>6)&0x1)
+#define DR7_L3SET(dr) ((dr) |= 0x40)
+#define DR7_L3CLR(dr) ((dr) &= ~0x40)
+
+#define DR7_GD 0x00002000 /* General Detect Enable */
+#define DR7_GE 0x00000200 /* Global exact */
+#define DR7_LE 0x00000100 /* Local exact */
+
+extern k_machreg_t kdb_getdr6(void);
+extern void kdb_putdr6(k_machreg_t);
+
+extern k_machreg_t kdb_getdr7(void);
+extern void kdb_putdr7(k_machreg_t);
+
+extern k_machreg_t kdb_getdr(int);
+extern void kdb_putdr(int, k_machreg_t);
+
+extern k_machreg_t kdb_getcr(int);
+
+extern void kdb_bp_install(void);
+extern void kdb_bp_remove(void);
+
+/*
+ * Support for setjmp/longjmp
+ */
+#define JB_BX 0
+#define JB_SI 1
+#define JB_DI 2
+#define JB_BP 3
+#define JB_SP 4
+#define JB_PC 5
+
+typedef struct __kdb_jmp_buf {
+ unsigned long regs[6];
+} kdb_jmp_buf;
+
+extern int kdb_setjmp(kdb_jmp_buf *);
+extern void kdb_longjmp(kdb_jmp_buf *, int);
+
+extern kdb_jmp_buf kdbjmpbuf;
+
+#define getprsregs(regs) ((struct switch_stack *)regs -1)
+
+#define BITMASK(bp,value) (value << bp)
+
+/* bkpt support using break inst instead of IBP reg */
+
+/*
+ * Define certain specific instructions
+ */
+#define BREAK_INSTR (0x00000080100L << 11)
+#define INST_SLOT0_MASK (0x1ffffffffffL << 5)
+
+#if 0
+#define MAX_BREAKPOINTS 40
+#define PSR_SS 40
+#endif
+
+/**
+ * IA-64 instruction format structures
+ */
+typedef union bundle {
+ struct {
+ long low8;
+ long high8;
+ } lform;
+ struct {
+ int low_low4;
+ int low_high4;
+ long high8;
+ } iform;
+} bundle_t;
+
+#define BKPTMODE_DATAR 3
+#define BKPTMODE_IO 2
+#define BKPTMODE_DATAW 1
+#define BKPTMODE_INST 0
+
+/* Some of the fault registers needed by kdb but not passed with
+ * regs or switch stack.
+ */
+typedef struct fault_regs {
+ unsigned long isr ;
+ unsigned long ifa ;
+ unsigned long iim ;
+ unsigned long itir ;
+} fault_regs_t ;
+
+/*
+ * State of kdb
+ */
+
+typedef struct kdb_state {
+ int cmd_given ;
+ int reason_for_entry ;
+ int bkpt_handling_state ;
+ int kdb_action ;
+} kdb_state_t ;
+
+#define BKPTSTATE_NOT_HANDLED 0
+#define BKPTSTATE_HANDLED 1
+
+#define CMDGIVEN_UNKNOWN 0
+#define CMDGIVEN_SSTEP 1
+#define CMDGIVEN_GO 2
+
+#define ENTRYREASON_GO 0
+#define ENTRYREASON_SSTEP 1
+
+#define ACTION_UNKNOWN 0
+#define ACTION_NOBPINSTALL 1
+#define ACTION_NOPROMPT 2
+
+#endif /* _ASM_IA64_KDBSUPPORT_H */
diff --git a/include/asm-ia64/keyboard.h b/include/asm-ia64/keyboard.h
new file mode 100644
index 000000000..c77324377
--- /dev/null
+++ b/include/asm-ia64/keyboard.h
@@ -0,0 +1,70 @@
+#ifndef _ASM_IA64_KEYBOARD_H
+#define _ASM_IA64_KEYBOARD_H
+
+/*
+ * This file contains the ia-64 architecture specific keyboard
+ * definitions.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+# ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#define KEYBOARD_IRQ 1
+#define DISABLE_KBD_DURING_INTERRUPTS 0
+
+extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
+extern int pckbd_getkeycode(unsigned int scancode);
+extern int pckbd_pretranslate(unsigned char scancode, char raw_mode);
+extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
+ char raw_mode);
+extern char pckbd_unexpected_up(unsigned char keycode);
+extern void pckbd_leds(unsigned char leds);
+extern void pckbd_init_hw(void);
+extern unsigned char pckbd_sysrq_xlate[128];
+
+#define kbd_setkeycode pckbd_setkeycode
+#define kbd_getkeycode pckbd_getkeycode
+#define kbd_pretranslate pckbd_pretranslate
+#define kbd_translate pckbd_translate
+#define kbd_unexpected_up pckbd_unexpected_up
+#define kbd_leds pckbd_leds
+#define kbd_init_hw pckbd_init_hw
+#define kbd_sysrq_xlate pckbd_sysrq_xlate
+
+#define INIT_KBD
+
+#define SYSRQ_KEY 0x54
+#if defined(CONFIG_KDB)
+#define E1_PAUSE 119 /* PAUSE key */
+#endif
+
+/* resource allocation */
+#define kbd_request_region()
+#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, "keyboard", NULL)
+
+/* How to access the keyboard macros on this platform. */
+#define kbd_read_input() inb(KBD_DATA_REG)
+#define kbd_read_status() inb(KBD_STATUS_REG)
+#define kbd_write_output(val) outb(val, KBD_DATA_REG)
+#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
+
+/* Some stoneage hardware needs delays after some operations. */
+#define kbd_pause() do { } while(0)
+
+/*
+ * Machine specific bits for the PS/2 driver
+ */
+
+#define AUX_IRQ 12
+
+#define aux_request_irq(hand, dev_id) \
+ request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS/2 Mouse", dev_id)
+
+#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id)
+
+# endif /* __KERNEL__ */
+#endif /* _ASM_IA64_KEYBOARD_H */
diff --git a/include/asm-ia64/linux_logo.h b/include/asm-ia64/linux_logo.h
new file mode 100644
index 000000000..61a3e7fe0
--- /dev/null
+++ b/include/asm-ia64/linux_logo.h
@@ -0,0 +1,49 @@
+/* $Id: linux_logo.h,v 1.6 1998/07/30 16:30:20 jj Exp $
+ * include/asm-ia64/linux_logo.h: This is a linux logo
+ * to be displayed on boot.
+ *
+ * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu)
+ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1998 David Mosberger (davidm@hpl.hp.com)
+ *
+ * You can put anything here, but:
+ * LINUX_LOGO_COLORS has to be less than 224
+ * image size has to be 80x80
+ * values have to start from 0x20
+ * (i.e. RGB(linux_logo_red[0],
+ * linux_logo_green[0],
+ * linux_logo_blue[0]) is color 0x20)
+ * BW image has to be 80x80 as well, with MS bit
+ * on the left
+ * Serial_console ascii image can be any size,
+ * but should contain %s to display the version
+ */
+
+#include <linux/init.h>
+#include <linux/version.h>
+
+#define linux_logo_banner "Linux/ia64 version " UTS_RELEASE
+
+#define LINUX_LOGO_COLORS 214
+
+#ifdef INCLUDE_LINUX_LOGO_DATA
+
+#define INCLUDE_LINUX_LOGOBW
+#define INCLUDE_LINUX_LOGO16
+
+#include <linux/linux_logo.h>
+
+#else
+
+/* prototypes only */
+extern unsigned char linux_logo_red[];
+extern unsigned char linux_logo_green[];
+extern unsigned char linux_logo_blue[];
+extern unsigned char linux_logo[];
+extern unsigned char linux_logo_bw[];
+extern unsigned char linux_logo16_red[];
+extern unsigned char linux_logo16_green[];
+extern unsigned char linux_logo16_blue[];
+extern unsigned char linux_logo16[];
+
+#endif
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
new file mode 100644
index 000000000..4b369dc4c
--- /dev/null
+++ b/include/asm-ia64/machvec.h
@@ -0,0 +1,108 @@
+/*
+ * Machine vector for IA-64.
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) 1999 Hewlett-Packard Co.
+ * Copyright (C) David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_MACHVEC_H
+#define _ASM_IA64_MACHVEC_H
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+/* forward declarations: */
+struct hw_interrupt_type;
+struct irq_desc;
+struct mm_struct;
+struct pt_regs;
+struct task_struct;
+struct timeval;
+struct vm_area_struct;
+
+typedef void ia64_mv_setup_t (char **);
+typedef void ia64_mv_irq_init_t (struct irq_desc *);
+typedef void ia64_mv_pci_fixup_t (void);
+typedef unsigned long ia64_mv_map_nr_t (unsigned long);
+typedef void ia64_mv_mca_init_t (void);
+typedef void ia64_mv_mca_handler_t (void);
+typedef void ia64_mv_cmci_handler_t (int, void *, struct pt_regs *);
+typedef void ia64_mv_log_print_t (void);
+
+# if defined (CONFIG_IA64_HP_SIM)
+# include <asm/machvec_hpsim.h>
+# elif defined (CONFIG_IA64_DIG)
+# include <asm/machvec_dig.h>
+# elif defined (CONFIG_IA64_SGI_SN1_SIM)
+# include <asm/machvec_sgi_sn1_SIM.h>
+# elif defined (CONFIG_IA64_GENERIC)
+
+struct ia64_machine_vector {
+ const char *name;
+ ia64_mv_setup_t *setup;
+ ia64_mv_irq_init_t *irq_init;
+ ia64_mv_pci_fixup_t *pci_fixup;
+ ia64_mv_map_nr_t *map_nr;
+ ia64_mv_mca_init_t *mca_init;
+ ia64_mv_mca_handler_t *mca_handler;
+ ia64_mv_cmci_handler_t *cmci_handler;
+ ia64_mv_log_print_t *log_print;
+};
+
+#define MACHVEC_INIT(name) \
+{ \
+ #name, \
+ platform_setup, \
+ platform_irq_init, \
+ platform_pci_fixup, \
+ platform_map_nr, \
+ platform_mca_init, \
+ platform_mca_handler, \
+ platform_cmci_handler, \
+ platform_log_print \
+}
+
+# ifndef MACHVEC_INHIBIT_RENAMING
+# define platform_name ia64_mv.name
+# define platform_setup ia64_mv.setup
+# define platform_irq_init ia64_mv.irq_init
+# define platform_map_nr ia64_mv.map_nr
+# define platform_mca_init ia64_mv.mca_init
+# define platform_mca_handler ia64_mv.mca_handler
+# define platform_cmci_handler ia64_mv.cmci_handler
+# define platform_log_print ia64_mv.log_print
+# endif
+
+extern struct ia64_machine_vector ia64_mv;
+extern void machvec_noop (void);
+
+# else
+# error Unknown configuration. Update asm-ia64/machvec.h.
+# endif /* CONFIG_IA64_GENERIC */
+
+/*
+ * Define default versions so we can extend machvec for new platforms without having
+ * to update the machvec files for all existing platforms.
+ */
+#ifndef platform_setup
+# define platform_setup ((ia64_mv_setup_t *) machvec_noop)
+#endif
+#ifndef platform_irq_init
+# define platform_irq_init ((ia64_mv_irq_init_t *) machvec_noop)
+#endif
+#ifndef platform_mca_init
+# define platform_mca_init ((ia64_mv_mca_init_t *) machvec_noop)
+#endif
+#ifndef platform_mca_handler
+# define platform_mca_handler ((ia64_mv_mca_handler_t *) machvec_noop)
+#endif
+#ifndef platform_cmci_handler
+# define platform_cmci_handler ((ia64_mv_cmci_handler_t *) machvec_noop)
+#endif
+#ifndef platform_log_print
+# define platform_log_print ((ia64_mv_log_print_t *) machvec_noop)
+#endif
+
+#endif /* _ASM_IA64_MACHVEC_H */
diff --git a/include/asm-ia64/machvec_dig.h b/include/asm-ia64/machvec_dig.h
new file mode 100644
index 000000000..a63e586c8
--- /dev/null
+++ b/include/asm-ia64/machvec_dig.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_MACHVEC_DIG_h
+#define _ASM_IA64_MACHVEC_DIG_h
+
+extern ia64_mv_setup_t dig_setup;
+extern ia64_mv_irq_init_t dig_irq_init;
+extern ia64_mv_pci_fixup_t dig_pci_fixup;
+extern ia64_mv_map_nr_t map_nr_dense;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "dig"
+#define platform_setup dig_setup
+#define platform_irq_init dig_irq_init
+#define platform_pci_fixup dig_pci_fixup
+#define platform_map_nr map_nr_dense
+
+#endif /* _ASM_IA64_MACHVEC_DIG_h */
diff --git a/include/asm-ia64/machvec_hpsim.h b/include/asm-ia64/machvec_hpsim.h
new file mode 100644
index 000000000..9afb30cb5
--- /dev/null
+++ b/include/asm-ia64/machvec_hpsim.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_IA64_MACHVEC_HPSIM_h
+#define _ASM_IA64_MACHVEC_HPSIM_h
+
+extern ia64_mv_setup_t hpsim_setup;
+extern ia64_mv_irq_init_t hpsim_irq_init;
+extern ia64_mv_map_nr_t map_nr_dense;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "hpsim"
+#define platform_setup hpsim_setup
+#define platform_irq_init hpsim_irq_init
+#define platform_map_nr map_nr_dense
+
+#endif /* _ASM_IA64_MACHVEC_HPSIM_h */
diff --git a/include/asm-ia64/machvec_init.h b/include/asm-ia64/machvec_init.h
new file mode 100644
index 000000000..60859418a
--- /dev/null
+++ b/include/asm-ia64/machvec_init.h
@@ -0,0 +1,9 @@
+#define MACHVEC_INHIBIT_RENAMING
+
+#include <asm/machvec.h>
+
+#define MACHVEC_HELPER(name) \
+ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
+ = MACHVEC_INIT(name);
+
+#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name)
diff --git a/include/asm-ia64/machvec_sn1.h b/include/asm-ia64/machvec_sn1.h
new file mode 100644
index 000000000..bbee99952
--- /dev/null
+++ b/include/asm-ia64/machvec_sn1.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_IA64_MACHVEC_SN1_h
+#define _ASM_IA64_MACHVEC_SN1_h
+
+extern ia64_mv_setup_t sn1_setup;
+extern ia64_mv_irq_init_t sn1_irq_init;
+extern ia64_mv_map_nr_t sn1_map_nr;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "sn1"
+#define platform_setup sn1_setup
+#define platform_irq_init sn1_irq_init
+#define platform_map_nr sn1_map_nr
+
+#endif /* _ASM_IA64_MACHVEC_SN1_h */
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
new file mode 100644
index 000000000..0b9df0dcd
--- /dev/null
+++ b/include/asm-ia64/mca.h
@@ -0,0 +1,143 @@
+/*
+ * File: mca.h
+ * Purpose: Machine check handling specific defines
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
+ * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
+ */
+#ifndef _ASM_IA64_MCA_H
+#define _ASM_IA64_MCA_H
+
+#include <linux/types.h>
+#include <asm/param.h>
+#include <asm/sal.h>
+#include <asm/processor.h>
+
+/* These are the return codes from all the IA64_MCA specific interfaces */
+typedef int ia64_mca_return_code_t;
+
+enum {
+ IA64_MCA_SUCCESS = 0,
+ IA64_MCA_FAILURE = 1
+};
+
+#define IA64_MCA_RENDEZ_TIMEOUT (100 * HZ) /* 1000 milliseconds */
+
+/* Interrupt vectors reserved for MC handling. */
+#define IA64_MCA_RENDEZ_INT_VECTOR 0xF3 /* Rendez interrupt */
+#define IA64_MCA_WAKEUP_INT_VECTOR 0x12 /* Wakeup interrupt */
+#define IA64_MCA_CMC_INT_VECTOR 0xF2 /* Correctable machine check interrupt */
+
+#define IA64_CMC_INT_DISABLE 0
+#define IA64_CMC_INT_ENABLE 1
+
+
+typedef u32 int_vector_t;
+typedef u64 millisec_t;
+
+typedef union cmcv_reg_u {
+ u64 cmcv_regval;
+ struct {
+ u64 cmcr_vector : 8;
+ u64 cmcr_ignored1 : 47;
+ u64 cmcr_mask : 1;
+ u64 cmcr_reserved1 : 3;
+ u64 cmcr_ignored2 : 1;
+ u64 cmcr_reserved2 : 4;
+ } cmcv_reg_s;
+
+} cmcv_reg_t;
+
+#define cmcv_mask cmcv_reg_s.cmcr_mask
+#define cmcv_vector cmcv_reg_s.cmcr_vector
+
+
+#define IA64_MCA_UCMC_HANDLER_SIZE 0x10
+#define IA64_INIT_HANDLER_SIZE 0x10
+
+enum {
+ IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0,
+ IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1
+};
+
+#define IA64_MAXCPUS 64 /* Need to do something about this */
+
+/* Information maintained by the MC infrastructure */
+typedef struct ia64_mc_info_s {
+ u64 imi_mca_handler;
+ size_t imi_mca_handler_size;
+ u64 imi_monarch_init_handler;
+ size_t imi_monarch_init_handler_size;
+ u64 imi_slave_init_handler;
+ size_t imi_slave_init_handler_size;
+ u8 imi_rendez_checkin[IA64_MAXCPUS];
+
+} ia64_mc_info_t;
+
+/* Possible rendez states passed from SAL to OS during MCA
+ * handoff
+ */
+enum {
+ IA64_MCA_RENDEZ_NOT_RQD = 0x0,
+ IA64_MCA_RENDEZ_DONE_WITHOUT_INIT = 0x1,
+ IA64_MCA_RENDEZ_DONE_WITH_INIT = 0x2,
+ IA64_MCA_RENDEZ_FAILURE = -1
+};
+
+typedef struct ia64_mca_sal_to_os_state_s {
+ u64 imsto_os_gp; /* GP of the os registered with the SAL */
+ u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */
+ u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */
+ u64 imsto_sal_gp; /* GP of the SAL - physical */
+ u64 imsto_rendez_state; /* Rendez state information */
+ u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going
+ * back to SAL from OS after MCA handling.
+ */
+} ia64_mca_sal_to_os_state_t;
+
+enum {
+ IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */
+ IA64_MCA_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
+ IA64_MCA_COLD_BOOT = -2, /* Cold boot of the system need from SAL */
+ IA64_MCA_HALT = -3 /* System to be halted by SAL */
+};
+
+typedef struct ia64_mca_os_to_sal_state_s {
+ u64 imots_os_status; /* OS status to SAL as to what happened
+ * with the MCA handling.
+ */
+ u64 imots_sal_gp; /* GP of the SAL - physical */
+ u64 imots_new_min_state; /* Pointer to structure containing
+ * new values of registers in the min state
+ * save area.
+ */
+ u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going
+ * back to SAL from OS after MCA handling.
+ */
+} ia64_mca_os_to_sal_state_t;
+
+typedef int (*prfunc_t)(const char * fmt, ...);
+
+extern void mca_init(void);
+extern void ia64_os_mca_dispatch(void);
+extern void ia64_os_mca_dispatch_end(void);
+extern void ia64_mca_ucmc_handler(void);
+extern void ia64_monarch_init_handler(void);
+extern void ia64_slave_init_handler(void);
+extern void ia64_mca_rendez_int_handler(int,void *,struct pt_regs *);
+extern void ia64_mca_wakeup_int_handler(int,void *,struct pt_regs *);
+extern void ia64_mca_cmc_int_handler(int,void *,struct pt_regs *);
+extern void ia64_log_print(int,int,prfunc_t);
+
+#define PLATFORM_CALL(fn, args) printk("Platform call TBD\n")
+
+#undef MCA_TEST
+
+#if defined(MCA_TEST)
+# define MCA_DEBUG printk
+#else
+# define MCA_DEBUG
+#endif
+
+#endif /* _ASM_IA64_MCA_H */
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h
new file mode 100644
index 000000000..97f36e587
--- /dev/null
+++ b/include/asm-ia64/mca_asm.h
@@ -0,0 +1,299 @@
+/*
+ * File: mca_asm.h
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
+ * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
+ */
+#ifndef _ASM_IA64_MCA_ASM_H
+#define _ASM_IA64_MCA_ASM_H
+
+#define PSR_IC 13
+#define PSR_I 14
+#define PSR_DT 17
+#define PSR_RT 27
+#define PSR_IT 36
+#define PSR_BN 44
+
+/*
+ * This macro converts a instruction virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ * 1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define INST_VA_TO_PA(addr) \
+ dep addr = 0, addr, 61, 3;
+/*
+ * This macro converts a data virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ * 1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define DATA_VA_TO_PA(addr) \
+ dep addr = 0, addr, 61, 3;
+/*
+ * This macro converts a data physical address to a virtual address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ * 1. Put 0x7 in bits 61 thru 63.
+ */
+#define DATA_PA_TO_VA(addr,temp) \
+ mov temp = 0x7 ; \
+ dep addr = temp, addr, 61, 3;
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in physical mode with all the address
+ * translations turned off.
+ * 1. Save the current psr
+ * 2. Make sure that all the upper 32 bits are off
+ *
+ * 3. Clear the interrupt enable and interrupt state collection bits
+ * in the psr before updating the ipsr and iip.
+ *
+ * 4. Turn off the instruction, data and rse translation bits of the psr
+ * and store the new value into ipsr
+ * Also make sure that the interrupts are disabled.
+ * Ensure that we are in little endian mode.
+ * [psr.{rt, it, dt, i, be} = 0]
+ *
+ * 5. Get the physical address corresponding to the virtual address
+ * of the next instruction bundle and put it in iip.
+ * (Using magic numbers 24 and 40 in the deposint instruction since
+ * the IA64_SDK code directly maps to lower 24bits as physical address
+ * from a virtual address).
+ *
+ * 6. Do an rfi to move the values from ipsr to psr and iip to ip.
+ */
+#define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
+ mov old_psr = psr; \
+ ;; \
+ dep old_psr = 0, old_psr, 32, 32; \
+ \
+ mov ar##.##rsc = r0 ; \
+ ;; \
+ mov temp2 = ar##.##bspstore; \
+ ;; \
+ DATA_VA_TO_PA(temp2); \
+ ;; \
+ mov temp1 = ar##.##rnat; \
+ ;; \
+ mov ar##.##bspstore = temp2; \
+ ;; \
+ mov ar##.##rnat = temp1; \
+ mov temp1 = psr; \
+ mov temp2 = psr; \
+ ;; \
+ \
+ dep temp2 = 0, temp2, PSR_IC, 2; \
+ ;; \
+ mov psr##.##l = temp2; \
+ \
+ dep temp1 = 0, temp1, 32, 32; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_IT, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_DT, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_RT, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_I, 1; \
+ ;; \
+ movl temp2 = start_addr; \
+ mov cr##.##ipsr = temp1; \
+ ;; \
+ INST_VA_TO_PA(temp2); \
+ mov cr##.##iip = temp2; \
+ mov cr##.##ifs = r0; \
+ DATA_VA_TO_PA(sp) \
+ DATA_VA_TO_PA(gp) \
+ ;; \
+ srlz##.##i; \
+ ;; \
+ nop 1; \
+ nop 2; \
+ nop 1; \
+ nop 2; \
+ rfi; \
+ ;;
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in virtual mode with all the address
+ * translations turned on.
+ * 1. Get the old saved psr
+ *
+ * 2. Clear the interrupt enable and interrupt state collection bits
+ * in the current psr.
+ *
+ * 3. Set the instruction translation bit back in the old psr
+ * Note we have to do this since we are right now saving only the
+ * lower 32-bits of old psr.(Also the old psr has the data and
+ * rse translation bits on)
+ *
+ * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
+ *
+ * 5. Set iip to the virtual address of the next instruction bundle.
+ *
+ * 6. Do an rfi to move ipsr to psr and iip to ip.
+ */
+
+#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
+ mov temp2 = psr; \
+ ;; \
+ dep temp2 = 0, temp2, PSR_IC, 2; \
+ ;; \
+ mov psr##.##l = temp2; \
+ mov ar##.##rsc = r0 ; \
+ ;; \
+ mov temp2 = ar##.##bspstore; \
+ ;; \
+ DATA_PA_TO_VA(temp2,temp1); \
+ ;; \
+ mov temp1 = ar##.##rnat; \
+ ;; \
+ mov ar##.##bspstore = temp2; \
+ ;; \
+ mov ar##.##rnat = temp1; \
+ ;; \
+ mov temp1 = old_psr; \
+ ;; \
+ mov temp2 = 1 ; \
+ dep temp1 = temp2, temp1, PSR_I, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_IC, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_IT, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_DT, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_RT, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_BN, 1; \
+ ;; \
+ \
+ mov cr##.##ipsr = temp1; \
+ movl temp2 = start_addr; \
+ ;; \
+ mov cr##.##iip = temp2; \
+ DATA_PA_TO_VA(sp, temp1); \
+ DATA_PA_TO_VA(gp, temp1); \
+ ;; \
+ nop 1; \
+ nop 2; \
+ nop 1; \
+ rfi; \
+ ;;
+
+/*
+ * The following offsets capture the order in which the
+ * RSE related registers from the old context are
+ * saved onto the new stack frame.
+ *
+ * +-----------------------+
+ * |NDIRTY [BSP - BSPSTORE]|
+ * +-----------------------+
+ * | RNAT |
+ * +-----------------------+
+ * | BSPSTORE |
+ * +-----------------------+
+ * | IFS |
+ * +-----------------------+
+ * | PFS |
+ * +-----------------------+
+ * | RSC |
+ * +-----------------------+ <-------- Bottom of new stack frame
+ */
+#define rse_rsc_offset 0
+#define rse_pfs_offset (rse_rsc_offset+0x08)
+#define rse_ifs_offset (rse_pfs_offset+0x08)
+#define rse_bspstore_offset (rse_ifs_offset+0x08)
+#define rse_rnat_offset (rse_bspstore_offset+0x08)
+#define rse_ndirty_offset (rse_rnat_offset+0x08)
+
+/*
+ * rse_switch_context
+ *
+ * 1. Save old RSC onto the new stack frame
+ * 2. Save PFS onto new stack frame
+ * 3. Cover the old frame and start a new frame.
+ * 4. Save IFS onto new stack frame
+ * 5. Save the old BSPSTORE on the new stack frame
+ * 6. Save the old RNAT on the new stack frame
+ * 7. Write BSPSTORE with the new backing store pointer
+ * 8. Read and save the new BSP to calculate the #dirty registers
+ * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
+ */
+#define rse_switch_context(temp,p_stackframe,p_bspstore) \
+ ;; \
+ mov temp=ar##.##rsc;; \
+ st8 [p_stackframe]=temp,8;; \
+ mov temp=ar##.##pfs;; \
+ st8 [p_stackframe]=temp,8; \
+ cover ;; \
+ mov temp=cr##.##ifs;; \
+ st8 [p_stackframe]=temp,8;; \
+ mov temp=ar##.##bspstore;; \
+ st8 [p_stackframe]=temp,8;; \
+ mov temp=ar##.##rnat;; \
+ st8 [p_stackframe]=temp,8; \
+ mov ar##.##bspstore=p_bspstore;; \
+ mov temp=ar##.##bsp;; \
+ sub temp=temp,p_bspstore;; \
+ st8 [p_stackframe]=temp,8
+
+/*
+ * rse_return_context
+ * 1. Allocate a zero-sized frame
+ * 2. Store the number of dirty registers RSC.loadrs field
+ * 3. Issue a loadrs to insure that any registers from the interrupted
+ * context which were saved on the new stack frame have been loaded
+ * back into the stacked registers
+ * 4. Restore BSPSTORE
+ * 5. Restore RNAT
+ * 6. Restore PFS
+ * 7. Restore IFS
+ * 8. Restore RSC
+ * 9. Issue an RFI
+ */
+#define rse_return_context(psr_mask_reg,temp,p_stackframe) \
+ ;; \
+ alloc temp=ar.pfs,0,0,0,0; \
+ add p_stackframe=rse_ndirty_offset,p_stackframe;; \
+ ld8 temp=[p_stackframe];; \
+ shl temp=temp,16;; \
+ mov ar##.##rsc=temp;; \
+ loadrs;; \
+ add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
+ ld8 temp=[p_stackframe];; \
+ mov ar##.##bspstore=temp;; \
+ add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
+ ld8 temp=[p_stackframe];; \
+ mov ar##.##rnat=temp;; \
+ add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \
+ ld8 temp=[p_stackframe];; \
+ mov ar##.##pfs=temp; \
+ add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \
+ ld8 temp=[p_stackframe];; \
+ mov cr##.##ifs=temp; \
+ add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \
+ ld8 temp=[p_stackframe];; \
+ mov ar##.##rsc=temp ; \
+ add p_stackframe=-rse_rsc_offset,p_stackframe; \
+ mov temp=cr.ipsr;; \
+ st8 [p_stackframe]=temp,8; \
+ mov temp=cr.iip;; \
+ st8 [p_stackframe]=temp,-8; \
+ mov temp=psr;; \
+ or temp=temp,psr_mask_reg;; \
+ mov cr.ipsr=temp;; \
+ mov temp=ip;; \
+ add temp=0x30,temp;; \
+ mov cr.iip=temp;; \
+ rfi;; \
+ ld8 temp=[p_stackframe],8;; \
+ mov cr.ipsr=temp;; \
+ ld8 temp=[p_stackframe];; \
+ mov cr.iip=temp
+
+#endif /* _ASM_IA64_MCA_ASM_H */
diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h
new file mode 100644
index 000000000..93e73d77b
--- /dev/null
+++ b/include/asm-ia64/mman.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_MMAN_H
+#define _ASM_IA64_MMAN_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+#define PROT_NONE 0x0 /* page can not be accessed */
+
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_TYPE 0x0f /* Mask for type of mapping */
+#define MAP_FIXED 0x10 /* Interpret addr exactly */
+#define MAP_ANONYMOUS 0x20 /* don't use a file */
+
+#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_GROWSUP 0x0200 /* register stack-like segment */
+#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+#define MAP_LOCKED 0x2000 /* pages are locked */
+#define MAP_NORESERVE 0x4000 /* don't check for reservations */
+
+#define MS_ASYNC 1 /* sync memory asynchronously */
+#define MS_INVALIDATE 2 /* invalidate the caches */
+#define MS_SYNC 4 /* synchronous memory sync */
+
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+
+/* compatibility flags */
+#define MAP_ANON MAP_ANONYMOUS
+#define MAP_FILE 0
+
+#endif /* _ASM_IA64_MMAN_H */
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
new file mode 100644
index 000000000..b775d0a9a
--- /dev/null
+++ b/include/asm-ia64/mmu_context.h
@@ -0,0 +1,143 @@
+#ifndef _ASM_IA64_MMU_CONTEXT_H
+#define _ASM_IA64_MMU_CONTEXT_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+
+#include <asm/processor.h>
+
+/*
+ * Routines to manage the allocation of task context numbers. Task
+ * context numbers are used to reduce or eliminate the need to perform
+ * TLB flushes due to context switches. Context numbers are
+ * implemented using ia-64 region ids. Since ia-64 TLBs do not
+ * guarantee that the region number is checked when performing a TLB
+ * lookup, we need to assign a unique region id to each region in a
+ * process. We use the least significant three bits in a region id
+ * for this purpose. On processors where the region number is checked
+ * in TLB lookups, we can get back those two bits by defining
+ * CONFIG_IA64_TLB_CHECKS_REGION_NUMBER. The macro
+ * IA64_REGION_ID_BITS gives the number of bits in a region id. The
+ * architecture manual guarantees this number to be in the range
+ * 18-24.
+ *
+ * A context number has the following format:
+ *
+ * +--------------------+---------------------+
+ * | generation number | region id |
+ * +--------------------+---------------------+
+ *
+ * A context number of 0 is considered "invalid".
+ *
+ * The generation number is incremented whenever we end up having used
+ * up all available region ids. At that point with flush the entire
+ * TLB and reuse the first region id. The new generation number
+ * ensures that when we context switch back to an old process, we do
+ * not inadvertently end up using its possibly reused region id.
+ * Instead, we simply allocate a new region id for that process.
+ *
+ * Copyright (C) 1998 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
+
+#define IA64_REGION_ID_BITS 18
+
+#ifdef CONFIG_IA64_TLB_CHECKS_REGION_NUMBER
+# define IA64_HW_CONTEXT_BITS IA64_REGION_ID_BITS
+#else
+# define IA64_HW_CONTEXT_BITS (IA64_REGION_ID_BITS - 3)
+#endif
+
+#define IA64_HW_CONTEXT_MASK ((1UL << IA64_HW_CONTEXT_BITS) - 1)
+
+extern unsigned long ia64_next_context;
+
+extern void get_new_mmu_context (struct mm_struct *mm);
+
+extern inline unsigned long
+ia64_rid (unsigned long context, unsigned long region_addr)
+{
+# ifdef CONFIG_IA64_TLB_CHECKS_REGION_NUMBER
+ return context;
+# else
+ return context << 3 | (region_addr >> 61);
+# endif
+}
+
+extern inline void
+get_mmu_context (struct mm_struct *mm)
+{
+ /* check if our ASN is of an older generation and thus invalid: */
+ if (((mm->context ^ ia64_next_context) & ~IA64_HW_CONTEXT_MASK) != 0) {
+ get_new_mmu_context(mm);
+ }
+}
+
+extern inline void
+init_new_context (struct task_struct *p, struct mm_struct *mm)
+{
+ mm->context = 0;
+}
+
+extern inline void
+destroy_context (struct mm_struct *mm)
+{
+ /* Nothing to do. */
+}
+
+extern inline void
+reload_context (struct mm_struct *mm)
+{
+ unsigned long rid;
+ unsigned long rid_incr = 0;
+ unsigned long rr0, rr1, rr2, rr3, rr4;
+
+ rid = (mm->context & IA64_HW_CONTEXT_MASK);
+
+#ifndef CONFIG_IA64_TLB_CHECKS_REGION_NUMBER
+ rid <<= 3; /* make space for encoding the region number */
+ rid_incr = 1 << 8;
+#endif
+
+ /* encode the region id, preferred page size, and VHPT enable bit: */
+ rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
+ rr1 = rr0 + 1*rid_incr;
+ rr2 = rr0 + 2*rid_incr;
+ rr3 = rr0 + 3*rid_incr;
+ rr4 = rr0 + 4*rid_incr;
+ ia64_set_rr(0x0000000000000000, rr0);
+ ia64_set_rr(0x2000000000000000, rr1);
+ ia64_set_rr(0x4000000000000000, rr2);
+ ia64_set_rr(0x6000000000000000, rr3);
+ ia64_set_rr(0x8000000000000000, rr4);
+ ia64_insn_group_barrier();
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+ ia64_insn_group_barrier();
+}
+
+/*
+ * Switch from address space PREV to address space NEXT. Note that
+ * TSK may be NULL.
+ */
+static inline void
+switch_mm (struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+{
+ /*
+ * We may get interrupts here, but that's OK because interrupt
+ * handlers cannot touch user-space.
+ */
+ __asm__ __volatile__ ("mov ar.k7=%0" :: "r"(__pa(next->pgd)));
+ get_mmu_context(next);
+ reload_context(next);
+}
+
+#define activate_mm(prev,next) \
+ switch_mm((prev), (next), NULL, smp_processor_id())
+
+#endif /* _ASM_IA64_MMU_CONTEXT_H */
diff --git a/include/asm-ia64/msgbuf.h b/include/asm-ia64/msgbuf.h
new file mode 100644
index 000000000..8dfa06dd0
--- /dev/null
+++ b/include/asm-ia64/msgbuf.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_IA64_MSGBUF_H
+#define _ASM_IA64_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _ASM_IA64_MSGBUF_H */
diff --git a/include/asm-ia64/namei.h b/include/asm-ia64/namei.h
new file mode 100644
index 000000000..74e195253
--- /dev/null
+++ b/include/asm-ia64/namei.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_IA64_NAMEI_H
+#define _ASM_IA64_NAMEI_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+#define __prefix_lookup_dentry(name, lookup_flags) \
+ do {} while (0)
+
+#endif /* _ASM_IA64_NAMEI_H */
diff --git a/include/asm-ia64/offsets.h b/include/asm-ia64/offsets.h
new file mode 100644
index 000000000..9639a9e40
--- /dev/null
+++ b/include/asm-ia64/offsets.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_IA64_OFFSETS_H
+#define _ASM_IA64_OFFSETS_H
+
+/*
+ * DO NOT MODIFY
+ *
+ * This file was generated by arch/ia64/tools/print_offsets.
+ *
+ */
+
+#define PF_PTRACED_BIT 4
+
+#define IA64_TASK_SIZE 2752 /* 0xac0 */
+#define IA64_PT_REGS_SIZE 400 /* 0x190 */
+#define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */
+#define IA64_SIGINFO_SIZE 136 /* 0x88 */
+
+#define IA64_TASK_FLAGS_OFFSET 8 /* 0x8 */
+#define IA64_TASK_SIGPENDING_OFFSET 16 /* 0x10 */
+#define IA64_TASK_NEED_RESCHED_OFFSET 40 /* 0x28 */
+#define IA64_TASK_THREAD_OFFSET 912 /* 0x390 */
+#define IA64_TASK_THREAD_KSP_OFFSET 912 /* 0x390 */
+#define IA64_TASK_PID_OFFSET 188 /* 0xbc */
+#define IA64_TASK_MM_OFFSET 88 /* 0x58 */
+#define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */
+#define IA64_PT_REGS_R12_OFFSET 112 /* 0x70 */
+#define IA64_PT_REGS_R8_OFFSET 144 /* 0x90 */
+#define IA64_PT_REGS_R16_OFFSET 176 /* 0xb0 */
+#define IA64_SWITCH_STACK_B0_OFFSET 464 /* 0x1d0 */
+#define IA64_SWITCH_STACK_CALLER_UNAT_OFFSET 0 /* 0x0 */
+#define IA64_SIGCONTEXT_AR_BSP_OFFSET 72 /* 0x48 */
+#define IA64_SIGCONTEXT_AR_RNAT_OFFSET 80 /* 0x50 */
+#define IA64_SIGCONTEXT_FLAGS_OFFSET 0 /* 0x0 */
+#define IA64_SIGCONTEXT_CFM_OFFSET 48 /* 0x30 */
+#define IA64_SIGCONTEXT_FR6_OFFSET 560 /* 0x230 */
+
+#endif /* _ASM_IA64_OFFSETS_H */
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
new file mode 100644
index 000000000..64d044599
--- /dev/null
+++ b/include/asm-ia64/page.h
@@ -0,0 +1,134 @@
+#ifndef _ASM_IA64_PAGE_H
+#define _ASM_IA64_PAGE_H
+/*
+ * Pagetable related stuff.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/config.h>
+
+#include <asm/types.h>
+
+/*
+ * PAGE_SHIFT determines the actual kernel page size.
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define PAGE_SHIFT 12
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define PAGE_SHIFT 13
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define PAGE_SHIFT 14
+#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
+# define PAGE_SHIFT 16
+#else
+# error Unsupported page size!
+#endif
+
+#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
+
+#ifdef __ASSEMBLY__
+# define __pa(x) ((x) - PAGE_OFFSET)
+# define __va(x) ((x) + PAGE_OFFSET)
+#else /* !__ASSEMBLY */
+# ifdef __KERNEL__
+# define STRICT_MM_TYPECHECKS
+
+extern void clear_page (void *page);
+extern void copy_page (void *to, void *from);
+
+# ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+# else /* !STRICT_MM_TYPECHECKS */
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+# endif /* !STRICT_MM_TYPECHECKS */
+
+/*
+ * Note: the MAP_NR() macro can't use __pa() because MAP_NR(X) MUST
+ * map to something >= max_mapnr if X is outside the identity mapped
+ * kernel space.
+ */
+
+/*
+ * The dense variant can be used as long as the size of memory holes isn't
+ * very big.
+ */
+#define MAP_NR_DENSE(addr) (((unsigned long) (addr) - PAGE_OFFSET) >> PAGE_SHIFT)
+
+/*
+ * This variant works well for the SGI SN1 architecture (which does have huge
+ * holes in the memory address space).
+ */
+#define MAP_NR_SN1(addr) (((unsigned long) (addr) - PAGE_OFFSET) >> PAGE_SHIFT)
+
+#ifdef CONFIG_IA64_GENERIC
+# define MAP_NR(addr) platform_map_nr(addr)
+#elif defined (CONFIG_IA64_SN_SN1_SIM)
+# define MAP_NR(addr) MAP_NR_SN1(addr)
+#else
+# define MAP_NR(addr) MAP_NR_DENSE(addr)
+#endif
+
+# endif /* __KERNEL__ */
+
+typedef union ia64_va {
+ struct {
+ unsigned long off : 61; /* intra-region offset */
+ unsigned long reg : 3; /* region number */
+ } f;
+ unsigned long l;
+ void *p;
+} ia64_va;
+
+/*
+ * Note: These macros depend on the fact that PAGE_OFFSET has all
+ * region bits set to 1 and all other bits set to zero. They are
+ * expressed in this way to ensure they result in a single "dep"
+ * instruction.
+ */
+#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
+#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
+
+#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0)
+#define PAGE_BUG(page) do { BUG(); } while (0)
+
+#endif /* !ASSEMBLY */
+
+#define PAGE_OFFSET 0xe000000000000000
+
+#endif /* _ASM_IA64_PAGE_H */
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
new file mode 100644
index 000000000..1ebcba0f4
--- /dev/null
+++ b/include/asm-ia64/pal.h
@@ -0,0 +1,1324 @@
+#ifndef _ASM_IA64_PAL_H
+#define _ASM_IA64_PAL_H
+
+/*
+ * Processor Abstraction Layer definitions.
+ *
+ * This is based on version 2.4 of the manual "Enhanced Mode Processor
+ * Abstraction Layer".
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ *
+ * 99/10/01 davidm Make sure we pass zero for reserved parameters.
+ */
+
+/*
+ * Note that some of these calls use a static-register only calling
+ * convention which has nothing to do with the regular calling
+ * convention.
+ */
+#define PAL_CACHE_FLUSH 1 /* flush i/d cache */
+#define PAL_CACHE_INFO 2 /* get detailed i/d cache info */
+#define PAL_CACHE_INIT 3 /* initialize i/d cache */
+#define PAL_CACHE_SUMMARY 4 /* get summary of cache heirarchy */
+#define PAL_MEM_ATTRIB 5 /* list supported memory attributes */
+#define PAL_PTCE_INFO 6 /* purge TLB info */
+#define PAL_VM_INFO 7 /* return supported virtual memory features */
+#define PAL_VM_SUMMARY 8 /* return summary on supported vm features */
+#define PAL_BUS_GET_FEATURES 9 /* return processor bus interface features settings */
+#define PAL_BUS_SET_FEATURES 10 /* set processor bus features */
+#define PAL_DEBUG_INFO 11 /* get number of debug registers */
+#define PAL_FIXED_ADDR 12 /* get fixed component of processors's directed address */
+#define PAL_FREQ_BASE 13 /* base frequency of the platform */
+#define PAL_FREQ_RATIOS 14 /* ratio of processor, bus and ITC frequency */
+#define PAL_PERF_MON_INFO 15 /* return performance monitor info */
+#define PAL_PLATFORM_ADDR 16 /* set processor interrupt block and IO port space addr */
+#define PAL_PROC_GET_FEATURES 17 /* get configurable processor features & settings */
+#define PAL_PROC_SET_FEATURES 18 /* enable/disable configurable processor features */
+#define PAL_RSE_INFO 19 /* return rse information */
+#define PAL_VERSION 20 /* return version of PAL code */
+#define PAL_MC_CLEAR_LOG 21 /* clear all processor log info */
+#define PAL_MC_DRAIN 22 /* drain operations which could result in an MCA */
+#define PAL_MC_EXPECTED 23 /* set/reset expected MCA indicator */
+#define PAL_MC_DYNAMIC_STATE 24 /* get processor dynamic state */
+#define PAL_MC_ERROR_INFO 25 /* get processor MCA info and static state */
+#define PAL_MC_RESUME 26 /* Return to interrupted process */
+#define PAL_MC_REGISTER_MEM 27 /* Register memory for PAL to use during MCAs and inits */
+#define PAL_HALT 28 /* enter the low power HALT state */
+#define PAL_HALT_LIGHT 29 /* enter the low power light halt state*/
+#define PAL_COPY_INFO 30 /* returns info needed to relocate PAL */
+#define PAL_CACHE_LINE_INIT 31 /* init tags & data of cache line */
+#define PAL_PMI_ENTRYPOINT 32 /* register PMI memory entry points with the processor */
+#define PAL_ENTER_IA_32_ENV 33 /* enter IA-32 system environment */
+#define PAL_VM_PAGE_SIZE 34 /* return vm TC and page walker page sizes */
+
+#define PAL_MEM_FOR_TEST 37 /* get amount of memory needed for late processor test */
+#define PAL_CACHE_PROT_INFO 38 /* get i/d cache protection info */
+#define PAL_REGISTER_INFO 39 /* return AR and CR register information*/
+#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
+
+#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
+#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
+#define PAL_TEST_PROC 258 /* perform late processor self-test */
+#define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */
+#define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */
+#define PAL_VM_TR_READ 261 /* read contents of translation register */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * Data types needed to pass information into PAL procedures and
+ * interpret information returned by them.
+ */
+
+/* Return status from the PAL procedure */
+typedef s64 pal_status_t;
+
+#define PAL_STATUS_SUCCESS 0 /* No error */
+#define PAL_STATUS_UNIMPLEMENTED -1 /* Unimplemented procedure */
+#define PAL_STATUS_EINVAL -2 /* Invalid argument */
+#define PAL_STATUS_ERROR -3 /* Error */
+#define PAL_STATUS_CACHE_INIT_FAIL -4 /* Could not initialize the
+ * specified level and type of
+ * cache without sideeffects
+ * and "restrict" was 1
+ */
+
+/* Processor cache level in the heirarchy */
+typedef u64 pal_cache_level_t;
+#define PAL_CACHE_LEVEL_L0 0 /* L0 */
+#define PAL_CACHE_LEVEL_L1 1 /* L1 */
+#define PAL_CACHE_LEVEL_L2 2 /* L2 */
+
+
+/* Processor cache type at a particular level in the heirarchy */
+
+typedef u64 pal_cache_type_t;
+#define PAL_CACHE_TYPE_INSTRUCTION 1 /* Instruction cache */
+#define PAL_CACHE_TYPE_DATA 2 /* Data or unified cache */
+#define PAL_CACHE_TYPE_INSTRUCTION_DATA 3 /* Both Data & Instruction */
+
+
+#define PAL_CACHE_FLUSH_NO_INVALIDATE 0 /* Don't invalidate clean lines */
+#define PAL_CACHE_FLUSH_INVALIDATE 1 /* Invalidate clean lines */
+
+/* Processor cache line size in bytes */
+typedef int pal_cache_line_size_t;
+
+/* Processor cache line state */
+typedef u64 pal_cache_line_state_t;
+#define PAL_CACHE_LINE_STATE_INVALID 0 /* Invalid */
+#define PAL_CACHE_LINE_STATE_SHARED 1 /* Shared */
+#define PAL_CACHE_LINE_STATE_EXCLUSIVE 2 /* Exclusive */
+#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */
+
+typedef struct pal_freq_ratio {
+ u64 den : 32, num : 32; /* numerator & denominator */
+} itc_ratio, proc_ratio;
+
+typedef union pal_cache_config_info_1_s {
+ struct {
+ u64 u : 1, /* 0 Unified cache ? */
+ reserved : 5, /* 7-3 Reserved */
+ at : 2, /* 2-1 Cache mem attr*/
+ associativity : 8, /* 16-8 Associativity*/
+ line_size : 8, /* 23-17 Line size */
+ stride : 8, /* 31-24 Stride */
+ store_latency : 8, /*39-32 Store latency*/
+ load_latency : 8, /* 47-40 Load latency*/
+ store_hints : 8, /* 55-48 Store hints*/
+ load_hints : 8; /* 63-56 Load hints */
+ } pcci1_bits;
+ u64 pcci1_data;
+} pal_cache_config_info_1_t;
+
+typedef union pal_cache_config_info_2_s {
+ struct {
+ u64 cache_size : 32, /*cache size in bytes*/
+
+
+ alias_boundary : 8, /* 39-32 aliased addr
+ * separation for max
+ * performance.
+ */
+ tag_ls_bit : 8, /* 47-40 LSb of addr*/
+ tag_ms_bit : 8, /* 55-48 MSb of addr*/
+ reserved : 8; /* 63-56 Reserved */
+ } pcci2_bits;
+ u64 pcci2_data;
+} pal_cache_config_info_2_t;
+
+
+typedef struct pal_cache_config_info_s {
+ pal_status_t pcci_status;
+ pal_cache_config_info_1_t pcci_info_1;
+ pal_cache_config_info_2_t pcci_info_2;
+ u64 pcci_reserved;
+} pal_cache_config_info_t;
+
+#define pcci_ld_hint pcci_info_1.pcci1.load_hints
+#define pcci_st_hint pcci_info_1.pcci1_bits.store_hints
+#define pcci_ld_latency pcci_info_1.pcci1_bits.load_latency
+#define pcci_st_latency pcci_info_1.pcci1_bits.store_latency
+#define pcci_stride pcci_info_1.pcci1_bits.stride
+#define pcci_line_size pcci_info_1.pcci1_bits.line_size
+#define pcci_assoc pcci_info_1.pcci1_bits.associativity
+#define pcci_cache_attr pcci_info_1.pcci1_bits.at
+#define pcci_unified pcci_info_1.pcci1_bits.u
+#define pcci_tag_msb pcci_info_2.pcci2_bits.tag_ms_bit
+#define pcci_tag_lsb pcci_info_2.pcci2_bits.tag_ls_bit
+#define pcci_alias_boundary pcci_info_2.pcci2_bits.alias_boundary
+#define pcci_cache_size pcci_info_2.pcci2_bits.cache_size
+
+
+
+/* Possible values for cache attributes */
+
+#define PAL_CACHE_ATTR_WT 0 /* Write through cache */
+#define PAL_CACHE_ATTR_WB 1 /* Write back cache */
+#define PAL_CACHE_ATTR_WT_OR_WB 2 /* Either write thru or write
+ * back depending on TLB
+ * memory attributes
+ */
+
+
+/* Possible values for cache hints */
+
+#define PAL_CACHE_HINT_TEMP_1 0 /* Temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_1 1 /* Non-temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_ALL 3 /* Non-temporal all levels */
+
+/* Processor cache protection information */
+typedef union pal_cache_protection_element_u {
+ u32 pcpi_data;
+ struct {
+ u32 data_bits : 8, /* # data bits covered by
+ * each unit of protection
+ */
+
+ tagprot_lsb : 6, /* Least -do- */
+ tagprot_msb : 6, /* Most Sig. tag address
+ * bit that this
+ * protection covers.
+ */
+ prot_bits : 6, /* # of protection bits */
+ method : 4, /* Protection method */
+ t_d : 2; /* Indicates which part
+ * of the cache this
+ * protection encoding
+ * applies.
+ */
+ } pcp_info;
+} pal_cache_protection_element_t;
+
+#define pcpi_cache_prot_part pcp_info.t_d
+#define pcpi_prot_method pcp_info.method
+#define pcpi_prot_bits pcp_info.prot_bits
+#define pcpi_tagprot_msb pcp_info.tagprot_msb
+#define pcpi_tagprot_lsb pcp_info.tagprot_lsb
+#define pcpi_data_bits pcp_info.data_bits
+
+/* Processor cache part encodings */
+#define PAL_CACHE_PROT_PART_DATA 0 /* Data protection */
+#define PAL_CACHE_PROT_PART_TAG 1 /* Tag protection */
+#define PAL_CACHE_PROT_PART_TAG_DATA 2 /* Tag+data protection (tag is
+ * more significant )
+ */
+#define PAL_CACHE_PROT_PART_DATA_TAG 3 /* Data+tag protection (data is
+ * more significant )
+ */
+#define PAL_CACHE_PROT_PART_MAX 6
+
+
+typedef struct pal_cache_protection_info_s {
+ pal_status_t pcpi_status;
+ pal_cache_protection_element_t pcp_info[PAL_CACHE_PROT_PART_MAX];
+} pal_cache_protection_info_t;
+
+
+/* Processor cache protection method encodings */
+#define PAL_CACHE_PROT_METHOD_NONE 0 /* No protection */
+#define PAL_CACHE_PROT_METHOD_ODD_PARITY 1 /* Odd parity */
+#define PAL_CACHE_PROT_METHOD_EVEN_PARITY 2 /* Even parity */
+#define PAL_CACHE_PROT_METHOD_ECC 3 /* ECC protection */
+
+
+/* Processor cache line identification in the heirarchy */
+typedef union pal_cache_line_id_u {
+ u64 pclid_data;
+ struct {
+ u64 cache_type : 8, /* 7-0 cache type */
+ level : 8, /* 15-8 level of the
+ * cache in the
+ * heirarchy.
+ */
+ way : 8, /* 23-16 way in the set
+ */
+ part : 8, /* 31-24 part of the
+ * cache
+ */
+ reserved : 32; /* 63-32 is reserved*/
+ } pclid_info_read;
+ struct {
+ u64 cache_type : 8, /* 7-0 cache type */
+ level : 8, /* 15-8 level of the
+ * cache in the
+ * heirarchy.
+ */
+ way : 8, /* 23-16 way in the set
+ */
+ part : 8, /* 31-24 part of the
+ * cache
+ */
+ mesi : 8, /* 39-32 cache line
+ * state
+ */
+ start : 8, /* 47-40 lsb of data to
+ * invert
+ */
+ length : 8, /* 55-48 #bits to
+ * invert
+ */
+ trigger : 8; /* 63-56 Trigger error
+ * by doing a load
+ * after the write
+ */
+
+ } pclid_info_write;
+} pal_cache_line_id_u_t;
+
+#define pclid_read_part pclid_info_read.part
+#define pclid_read_way pclid_info_read.way
+#define pclid_read_level pclid_info_read.level
+#define pclid_read_cache_type pclid_info_read.cache_type
+
+#define pclid_write_trigger pclid_info_write.trigger
+#define pclid_write_length pclid_info_write.length
+#define pclid_write_start pclid_info_write.start
+#define pclid_write_mesi pclid_info_write.mesi
+#define pclid_write_part pclid_info_write.part
+#define pclid_write_way pclid_info_write.way
+#define pclid_write_level pclid_info_write.level
+#define pclid_write_cache_type pclid_info_write.cache_type
+
+/* Processor cache line part encodings */
+#define PAL_CACHE_LINE_ID_PART_DATA 0 /* Data */
+#define PAL_CACHE_LINE_ID_PART_TAG 1 /* Tag */
+#define PAL_CACHE_LINE_ID_PART_DATA_PROT 2 /* Data protection */
+#define PAL_CACHE_LINE_ID_PART_TAG_PROT 3 /* Tag protection */
+#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT 4 /* Data+tag
+ * protection
+ */
+typedef struct pal_cache_line_info_s {
+ pal_status_t pcli_status; /* Return status of the read cache line
+ * info call.
+ */
+ u64 pcli_data; /* 64-bit data, tag, protection bits .. */
+ u64 pcli_data_len; /* data length in bits */
+ pal_cache_line_state_t pcli_cache_line_state; /* mesi state */
+
+} pal_cache_line_info_t;
+
+
+/* Machine Check related crap */
+
+/* Pending event status bits */
+typedef u64 pal_mc_pending_events_t;
+
+#define PAL_MC_PENDING_MCA (1 << 0)
+#define PAL_MC_PENDING_INIT (1 << 1)
+
+/* Error information type */
+typedef u64 pal_mc_info_index_t;
+
+#define PAL_MC_INFO_PROCESSOR 0 /* Processor */
+#define PAL_MC_INFO_CACHE_CHECK 1 /* Cache check */
+#define PAL_MC_INFO_TLB_CHECK 2 /* Tlb check */
+#define PAL_MC_INFO_BUS_CHECK 3 /* Bus check */
+#define PAL_MC_INFO_REQ_ADDR 4 /* Requestor address */
+#define PAL_MC_INFO_RESP_ADDR 5 /* Responder address */
+#define PAL_MC_INFO_TARGET_ADDR 6 /* Target address */
+#define PAL_MC_INFO_IMPL_DEP 7 /* Implementation
+ * dependent
+ */
+
+
+typedef struct pal_process_state_info_s {
+ u64 reserved1 : 2,
+ rz : 1, /* PAL_CHECK processor
+ * rendezvous
+ * successful.
+ */
+
+ ra : 1, /* PAL_CHECK attempted
+ * a rendezvous.
+ */
+ me : 1, /* Distinct multiple
+ * errors occurred
+ */
+
+ mn : 1, /* Min. state save
+ * area has been
+ * registered with PAL
+ */
+
+ sy : 1, /* Storage integrity
+ * synched
+ */
+
+
+ co : 1, /* Continuable */
+ ci : 1, /* MC isolated */
+ us : 1, /* Uncontained storage
+ * damage.
+ */
+
+
+ hd : 1, /* Non-essential hw
+ * lost (no loss of
+ * functionality)
+ * causing the
+ * processor to run in
+ * degraded mode.
+ */
+
+ tl : 1, /* 1 => MC occurred
+ * after an instr was
+ * executed but before
+ * the trap that
+ * resulted from instr
+ * execution was
+ * generated.
+ * (Trap Lost )
+ */
+ op : 3, /* Operation that
+ * caused the machine
+ * check
+ */
+
+ dy : 1, /* Processor dynamic
+ * state valid
+ */
+
+
+ in : 1, /* 0 = MC, 1 = INIT */
+ rs : 1, /* RSE valid */
+ cm : 1, /* MC corrected */
+ ex : 1, /* MC is expected */
+ cr : 1, /* Control regs valid*/
+ pc : 1, /* Perf cntrs valid */
+ dr : 1, /* Debug regs valid */
+ tr : 1, /* Translation regs
+ * valid
+ */
+ rr : 1, /* Region regs valid */
+ ar : 1, /* App regs valid */
+ br : 1, /* Branch regs valid */
+ pr : 1, /* Predicate registers
+ * valid
+ */
+
+ fp : 1, /* fp registers valid*/
+ b1 : 1, /* Preserved bank one
+ * general registers
+ * are valid
+ */
+ b0 : 1, /* Preserved bank zero
+ * general registers
+ * are valid
+ */
+ gr : 1, /* General registers
+ * are valid
+ * (excl. banked regs)
+ */
+ dsize : 16, /* size of dynamic
+ * state returned
+ * by the processor
+ */
+
+ reserved2 : 12,
+ cc : 1, /* Cache check */
+ tc : 1, /* TLB check */
+ bc : 1, /* Bus check */
+ uc : 1; /* Unknown check */
+
+} pal_processor_state_info_t;
+
+typedef struct pal_cache_check_info_s {
+ u64 reserved1 : 16,
+ way : 5, /* Way in which the
+ * error occurred
+ */
+ reserved2 : 1,
+ mc : 1, /* Machine check corrected */
+ tv : 1, /* Target address
+ * structure is valid
+ */
+
+ wv : 1, /* Way field valid */
+ op : 3, /* Type of cache
+ * operation that
+ * caused the machine
+ * check.
+ */
+
+ dl : 1, /* Failure in data part
+ * of cache line
+ */
+ tl : 1, /* Failure in tag part
+ * of cache line
+ */
+ dc : 1, /* Failure in dcache */
+ ic : 1, /* Failure in icache */
+ index : 24, /* Cache line index */
+ mv : 1, /* mesi valid */
+ mesi : 3, /* Cache line state */
+ level : 4; /* Cache level */
+
+} pal_cache_check_info_t;
+
+typedef struct pal_tlb_check_info_s {
+
+ u64 tr_slot : 8, /* Slot# of TR where
+ * error occurred
+ */
+ reserved2 : 8,
+ dtr : 1, /* Fail in data TR */
+ itr : 1, /* Fail in inst TR */
+ dtc : 1, /* Fail in data TC */
+ itc : 1, /* Fail in inst. TC */
+ mc : 1, /* Machine check corrected */
+ reserved1 : 43;
+
+} pal_tlb_check_info_t;
+
+typedef struct pal_bus_check_info_s {
+ u64 size : 5, /* Xaction size*/
+ ib : 1, /* Internal bus error */
+ eb : 1, /* External bus error */
+ cc : 1, /* Error occurred
+ * during cache-cache
+ * transfer.
+ */
+ type : 8, /* Bus xaction type*/
+ sev : 5, /* Bus error severity*/
+ tv : 1, /* Targ addr valid */
+ rp : 1, /* Resp addr valid */
+ rq : 1, /* Req addr valid */
+ bsi : 8, /* Bus error status
+ * info
+ */
+ mc : 1, /* Machine check corrected */
+ reserved1 : 31;
+} pal_bus_check_info_t;
+
+typedef union pal_mc_error_info_u {
+ u64 pmei_data;
+ pal_processor_state_info_t pme_processor;
+ pal_cache_check_info_t pme_cache;
+ pal_tlb_check_info_t pme_tlb;
+ pal_bus_check_info_t pme_bus;
+} pal_mc_error_info_t;
+
+#define pmci_proc_unknown_check pme_processor.uc
+#define pmci_proc_bus_check pme_processor.bc
+#define pmci_proc_tlb_check pme_processor.tc
+#define pmci_proc_cache_check pme_processor.cc
+#define pmci_proc_dynamic_state_size pme_processor.dsize
+#define pmci_proc_gpr_valid pme_processor.gr
+#define pmci_proc_preserved_bank0_gpr_valid pme_processor.b0
+#define pmci_proc_preserved_bank1_gpr_valid pme_processor.b1
+#define pmci_proc_fp_valid pme_processor.fp
+#define pmci_proc_predicate_regs_valid pme_processor.pr
+#define pmci_proc_branch_regs_valid pme_processor.br
+#define pmci_proc_app_regs_valid pme_processor.ar
+#define pmci_proc_region_regs_valid pme_processor.rr
+#define pmci_proc_translation_regs_valid pme_processor.tr
+#define pmci_proc_debug_regs_valid pme_processor.dr
+#define pmci_proc_perf_counters_valid pme_processor.pc
+#define pmci_proc_control_regs_valid pme_processor.cr
+#define pmci_proc_machine_check_expected pme_processor.ex
+#define pmci_proc_machine_check_corrected pme_processor.cm
+#define pmci_proc_rse_valid pme_processor.rs
+#define pmci_proc_machine_check_or_init pme_processor.in
+#define pmci_proc_dynamic_state_valid pme_processor.dy
+#define pmci_proc_operation pme_processor.op
+#define pmci_proc_trap_lost pme_processor.tl
+#define pmci_proc_hardware_damage pme_processor.hd
+#define pmci_proc_uncontained_storage_damage pme_processor.us
+#define pmci_proc_machine_check_isolated pme_processor.ci
+#define pmci_proc_continuable pme_processor.co
+#define pmci_proc_storage_intergrity_synced pme_processor.sy
+#define pmci_proc_min_state_save_area_regd pme_processor.mn
+#define pmci_proc_distinct_multiple_errors pme_processor.me
+#define pmci_proc_pal_attempted_rendezvous pme_processor.ra
+#define pmci_proc_pal_rendezvous_complete pme_processor.rz
+
+
+#define pmci_cache_level pme_cache.level
+#define pmci_cache_line_state pme_cache.mesi
+#define pmci_cache_line_state_valid pme_cache.mv
+#define pmci_cache_line_index pme_cache.index
+#define pmci_cache_instr_cache_fail pme_cache.ic
+#define pmci_cache_data_cache_fail pme_cache.dc
+#define pmci_cache_line_tag_fail pme_cache.tl
+#define pmci_cache_line_data_fail pme_cache.dl
+#define pmci_cache_operation pme_cache.op
+#define pmci_cache_way_valid pme_cache.wv
+#define pmci_cache_target_address_valid pme_cache.tv
+#define pmci_cache_way pme_cache.way
+#define pmci_cache_mc pme_cache.mc
+
+#define pmci_tlb_instr_translation_cache_fail pme_tlb.itc
+#define pmci_tlb_data_translation_cache_fail pme_tlb.dtc
+#define pmci_tlb_instr_translation_reg_fail pme_tlb.itr
+#define pmci_tlb_data_translation_reg_fail pme_tlb.dtr
+#define pmci_tlb_translation_reg_slot pme_tlb.tr_slot
+#define pmci_tlb_mc pme_tlb.mc
+
+#define pmci_bus_status_info pme_bus.bsi
+#define pmci_bus_req_address_valid pme_bus.rq
+#define pmci_bus_resp_address_valid pme_bus.rp
+#define pmci_bus_target_address_valid pme_bus.tv
+#define pmci_bus_error_severity pme_bus.sev
+#define pmci_bus_transaction_type pme_bus.type
+#define pmci_bus_cache_cache_transfer pme_bus.cc
+#define pmci_bus_transaction_size pme_bus.size
+#define pmci_bus_internal_error pme_bus.ib
+#define pmci_bus_external_error pme_bus.eb
+#define pmci_bus_mc pme_bus.mc
+
+
+typedef struct pal_min_state_area_s {
+ u64 pmsa_reserved[26];
+ u64 pmsa_xfs;
+ u64 pmsa_xpsr;
+ u64 pmsa_xip;
+ u64 pmsa_rsc;
+ u64 pmsa_br0;
+ u64 pmsa_pr;
+ u64 pmsa_bank0_gr[16];
+ u64 pmsa_gr[16];
+ u64 pmsa_nat_bits;
+} pal_min_state_area_t;
+
+
+struct ia64_pal_retval {
+ /*
+ * A zero status value indicates call completed without error.
+ * A negative status value indicates reason of call failure.
+ * A positive status value indicates success but an
+ * informational value should be printed (e.g., "reboot for
+ * change to take effect").
+ */
+ s64 status;
+ u64 v0;
+ u64 v1;
+ u64 v2;
+};
+
+/*
+ * Note: Currently unused PAL arguments are generally labeled
+ * "reserved" so the value specified in the PAL documentation
+ * (generally 0) MUST be passed. Reserved parameters are not optional
+ * parameters.
+ */
+#ifdef __GCC_MULTIREG_RETVALS__
+ extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64);
+ /*
+ * If multi-register return values are returned according to the
+ * ia-64 calling convention, we can call ia64_pal_call_static
+ * directly.
+ */
+# define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0,a1, a2, a3)
+#else
+ extern void ia64_pal_call_static (struct ia64_pal_retval *, u64, u64, u64, u64);
+ /*
+ * If multi-register return values are returned through an aggregate
+ * allocated in the caller, we need to use the stub implemented in
+ * sal-stub.S.
+ */
+# define PAL_CALL(iprv,a0,a1,a2,a3) ia64_pal_call_static(&iprv, a0, a1, a2, a3)
+#endif
+
+typedef int (*ia64_pal_handler) (u64, ...);
+extern ia64_pal_handler ia64_pal;
+extern void ia64_pal_handler_init (void *);
+
+extern ia64_pal_handler ia64_pal;
+
+extern pal_cache_config_info_t l0d_cache_config_info;
+extern pal_cache_config_info_t l0i_cache_config_info;
+extern pal_cache_config_info_t l1_cache_config_info;
+extern pal_cache_config_info_t l2_cache_config_info;
+
+extern pal_cache_protection_info_t l0d_cache_protection_info;
+extern pal_cache_protection_info_t l0i_cache_protection_info;
+extern pal_cache_protection_info_t l1_cache_protection_info;
+extern pal_cache_protection_info_t l2_cache_protection_info;
+
+extern pal_cache_config_info_t pal_cache_config_info_get(pal_cache_level_t,
+ pal_cache_type_t);
+
+extern pal_cache_protection_info_t pal_cache_protection_info_get(pal_cache_level_t,
+ pal_cache_type_t);
+
+
+extern void pal_error(int);
+
+
+/* Useful wrappers for the current list of pal procedures */
+
+typedef union pal_bus_features_u {
+ u64 pal_bus_features_val;
+ struct {
+ u64 pbf_reserved1 : 29;
+ u64 pbf_req_bus_parking : 1;
+ u64 pbf_bus_lock_mask : 1;
+ u64 pbf_enable_half_xfer_rate : 1;
+ u64 pbf_reserved2 : 22;
+ u64 pbf_disable_xaction_queueing : 1;
+ u64 pbf_disable_resp_err_check : 1;
+ u64 pbf_disable_berr_check : 1;
+ u64 pbf_disable_bus_req_internal_err_signal : 1;
+ u64 pbf_disable_bus_req_berr_signal : 1;
+ u64 pbf_disable_bus_init_event_check : 1;
+ u64 pbf_disable_bus_init_event_signal : 1;
+ u64 pbf_disable_bus_addr_err_check : 1;
+ u64 pbf_disable_bus_addr_err_signal : 1;
+ u64 pbf_disable_bus_data_err_check : 1;
+ } pal_bus_features_s;
+} pal_bus_features_u_t;
+
+extern void pal_bus_features_print (u64);
+
+/* Provide information about configurable processor bus features */
+extern inline s64
+ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
+ pal_bus_features_u_t *features_status,
+ pal_bus_features_u_t *features_control)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
+ if (features_avail)
+ features_avail->pal_bus_features_val = iprv.v0;
+ if (features_status)
+ features_status->pal_bus_features_val = iprv.v1;
+ if (features_control)
+ features_control->pal_bus_features_val = iprv.v2;
+ return iprv.status;
+}
+/* Enables/disables specific processor bus features */
+extern inline s64
+ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0);
+ return iprv.status;
+}
+
+/* Flush the processor instruction or data caches */
+extern inline s64
+ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 plat_ack)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, plat_ack);
+ return iprv.status;
+}
+
+
+/* Initialize the processor controlled caches */
+extern inline s64
+ia64_pal_cache_init (u64 level, u64 cache_type, u64 restrict)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, restrict);
+ return iprv.status;
+}
+
+/* Initialize the tags and data of a data or unified cache line of
+ * processor controlled cache to known values without the availability
+ * of backing memory.
+ */
+extern inline s64
+ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0);
+ return iprv.status;
+}
+
+
+/* Read the data and tag of a processor controlled cache line for diags */
+extern inline s64
+ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0);
+ return iprv.status;
+}
+
+/* Return summary information about the heirarchy of caches controlled by the processor */
+extern inline s64
+ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
+ if (cache_levels)
+ *cache_levels = iprv.v0;
+ if (unique_caches)
+ *unique_caches = iprv.v1;
+ return iprv.status;
+}
+
+/* Write the data and tag of a processor-controlled cache line for diags */
+extern inline s64
+ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, data);
+ return iprv.status;
+}
+
+
+/* Return the parameters needed to copy relocatable PAL procedures from ROM to memory */
+extern inline s64
+ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
+ u64 *buffer_size, u64 *buffer_align)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics);
+ if (buffer_size)
+ *buffer_size = iprv.v0;
+ if (buffer_align)
+ *buffer_align = iprv.v1;
+ return iprv.status;
+}
+
+/* Copy relocatable PAL procedures from ROM to memory */
+extern inline s64
+ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc_offset)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor);
+ if (pal_proc_offset)
+ *pal_proc_offset = iprv.v0;
+ return iprv.status;
+}
+
+/* Return the number of instruction and data debug register pairs */
+extern inline s64
+ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
+ if (inst_regs)
+ *inst_regs = iprv.v0;
+ if (data_regs)
+ *data_regs = iprv.v1;
+
+ return iprv.status;
+}
+
+#ifdef TBD
+/* Switch from IA64-system environment to IA-32 system environment */
+extern inline s64
+ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3);
+ return iprv.status;
+}
+#endif
+
+/* Get unique geographical address of this processor on its bus */
+extern inline s64
+ia64_pal_fixed_addr (u64 *global_unique_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0);
+ if (global_unique_addr)
+ *global_unique_addr = iprv.v0;
+ return iprv.status;
+}
+
+/* Get base frequency of the platform if generated by the processor */
+extern inline s64
+ia64_pal_freq_base (u64 *platform_base_freq)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
+ if (platform_base_freq)
+ *platform_base_freq = iprv.v0;
+ return iprv.status;
+}
+
+/*
+ * Get the ratios for processor frequency, bus frequency and interval timer to
+ * to base frequency of the platform
+ */
+extern inline s64
+ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
+ struct pal_freq_ratio *itc_ratio)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0);
+ if (proc_ratio)
+ *(u64 *)proc_ratio = iprv.v0;
+ if (bus_ratio)
+ *(u64 *)bus_ratio = iprv.v1;
+ if (itc_ratio)
+ *(u64 *)itc_ratio = iprv.v2;
+ return iprv.status;
+}
+
+/* Make the processor enter HALT or one of the implementation dependent low
+ * power states where prefetching and execution are suspended and cache and
+ * TLB coherency is not maintained.
+ */
+extern inline s64
+ia64_pal_halt (u64 halt_state)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0);
+ return iprv.status;
+}
+typedef union pal_power_mgmt_info_u {
+ u64 ppmi_data;
+ struct {
+ u64 exit_latency : 16,
+ entry_latency : 16,
+ power_consumption : 32;
+ } pal_power_mgmt_info_s;
+} pal_power_mgmt_info_u_t;
+
+/* Return information about processor's optional power management capabilities. */
+extern inline s64
+ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
+ return iprv.status;
+}
+
+/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
+ * suspended, but cache and TLB coherency is maintained.
+ */
+extern inline s64
+ia64_pal_halt_light (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0);
+ return iprv.status;
+}
+
+/* Clear all the processor error logging registers and reset the indicator that allows
+ * the error logging registers to be written. This procedure also checks the pending
+ * machine check bit and pending INIT bit and reports their states.
+ */
+extern inline s64
+ia64_pal_mc_clear_log (u64 *pending_vector)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0);
+ if (pending_vector)
+ *pending_vector = iprv.v0;
+ return iprv.status;
+}
+
+/* Ensure that all outstanding transactions in a processor are completed or that any
+ * MCA due to thes outstanding transaction is taken.
+ */
+extern inline s64
+ia64_pal_mc_drain (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0);
+ return iprv.status;
+}
+
+/* Return the machine check dynamic processor state */
+extern inline s64
+ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0);
+ if (size)
+ *size = iprv.v0;
+ if (pds)
+ *pds = iprv.v1;
+ return iprv.status;
+}
+
+/* Return processor machine check information */
+extern inline s64
+ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0);
+ if (size)
+ *size = iprv.v0;
+ if (error_info)
+ *error_info = iprv.v1;
+ return iprv.status;
+}
+
+/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot
+ * attempt to correct any expected machine checks.
+ */
+extern inline s64
+ia64_pal_mc_expected (u64 expected, u64 *previous)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0);
+ if (previous)
+ *previous = iprv.v0;
+ return iprv.status;
+}
+
+/* Register a platform dependent location with PAL to which it can save
+ * minimal processor state in the event of a machine check or initialization
+ * event.
+ */
+extern inline s64
+ia64_pal_mc_register_mem (u64 physical_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0);
+ return iprv.status;
+}
+
+/* Restore minimal architectural processor state, set CMC interrupt if necessary
+ * and resume execution
+ */
+extern inline s64
+ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0);
+ return iprv.status;
+}
+
+/* Return the memory attributes implemented by the processor */
+extern inline s64
+ia64_pal_mem_attrib (u64 *mem_attrib)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
+ if (mem_attrib)
+ *mem_attrib = iprv.v0;
+ return iprv.status;
+}
+
+/* Return the amount of memory needed for second phase of processor
+ * self-test and the required alignment of memory.
+ */
+extern inline s64
+ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0);
+ if (bytes_needed)
+ *bytes_needed = iprv.v0;
+ if (alignment)
+ *alignment = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_perf_mon_info_u {
+ u64 ppmi_data;
+ struct {
+ u64 generic : 8,
+ width : 8,
+ cycles : 8,
+ retired : 8,
+ reserved : 32;
+ } pal_perf_mon_info_s;
+} pal_perf_mon_info_u_t;
+
+/* Return the performance monitor information about what can be counted
+ * and how to configure the monitors to count the desired events.
+ */
+extern inline s64
+ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0);
+ if (pm_info)
+ pm_info->ppmi_data = iprv.v0;
+ return iprv.status;
+}
+
+/* Specifies the physical address of the processor interrupt block
+ * and I/O port space.
+ */
+extern inline s64
+ia64_pal_platform_addr (u64 type, u64 physical_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0);
+ return iprv.status;
+}
+
+/* Set the SAL PMI entrypoint in memory */
+extern inline s64
+ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0);
+ return iprv.status;
+}
+
+#ifdef TBD
+struct pal_features_s;
+/* Provide information about configurable processor features */
+extern inline s64
+ia64_pal_proc_get_features (struct pal_features_s *features_avail,
+ struct pal_features_s *features_status,
+ struct pal_features_s *features_control)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0);
+ return iprv.status;
+}
+/* Enable/disable processor dependent features */
+extern inline s64
+ia64_pal_proc_set_features (feature_select)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
+ return iprv.status;
+}
+
+#endif
+/*
+ * Put everything in a struct so we avoid the global offset table whenever
+ * possible.
+ */
+typedef struct ia64_ptce_info_s {
+ u64 base;
+ u32 count[2];
+ u32 stride[2];
+} ia64_ptce_info_t;
+
+/* Return the information required for the architected loop used to purge
+ * (initialize) the entire TC
+ */
+extern inline s64
+ia64_get_ptce (ia64_ptce_info_t *ptce)
+{
+ struct ia64_pal_retval iprv;
+
+ if (!ptce)
+ return -1;
+
+ PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0);
+ if (iprv.status == 0) {
+ ptce->base = iprv.v0;
+ ptce->count[0] = iprv.v1 >> 32;
+ ptce->count[1] = iprv.v1 & 0xffffffff;
+ ptce->stride[0] = iprv.v2 >> 32;
+ ptce->stride[1] = iprv.v2 & 0xffffffff;
+ }
+ return iprv.status;
+}
+
+/* Return info about implemented application and control registers. */
+extern inline s64
+ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0);
+ if (reg_info_1)
+ *reg_info_1 = iprv.v0;
+ if (reg_info_2)
+ *reg_info_2 = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_hints_u {
+ u64 ph_data;
+ struct {
+ u64 si : 1,
+ li : 1,
+ reserved : 62;
+ } pal_hints_s;
+} pal_hints_u_t;
+
+/* Return information about the register stack and RSE for this processor
+ * implementation.
+ */
+extern inline s64
+ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
+ if (num_phys_stacked)
+ *num_phys_stacked = iprv.v0;
+ if (hints)
+ hints->ph_data = iprv.v1;
+ return iprv.status;
+}
+
+/* Cause the processor to enter SHUTDOWN state, where prefetching and execution are
+ * suspended, but cause cache and TLB coherency to be maintained.
+ * This is usually called in IA-32 mode.
+ */
+extern inline s64
+ia64_pal_shutdown (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0);
+ return iprv.status;
+}
+
+/* Perform the second phase of processor self-test. */
+extern inline s64
+ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 *self_test_state)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes);
+ if (self_test_state)
+ *self_test_state = iprv.v0;
+ return iprv.status;
+}
+
+typedef union pal_version_u {
+ u64 pal_version_val;
+ struct {
+ u64 pv_pal_b_rev : 8;
+ u64 pv_pal_b_model : 8;
+ u64 pv_reserved1 : 8;
+ u64 pv_pal_vendor : 8;
+ u64 pv_pal_a_rev : 8;
+ u64 pv_pal_a_model : 8;
+ u64 pv_reserved2 : 16;
+ } pal_version_s;
+} pal_version_u_t;
+
+
+/* Return PAL version information */
+extern inline s64
+ia64_pal_version (pal_version_u_t *pal_version)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VERSION, 0, 0, 0);
+ if (pal_version)
+ pal_version->pal_version_val = iprv.v0;
+ return iprv.status;
+}
+
+typedef union pal_tc_info_u {
+ u64 pti_val;
+ struct {
+ u64 num_sets : 8,
+ associativity : 8,
+ num_entries : 16,
+ pf : 1,
+ unified : 1,
+ reduce_tr : 1,
+ reserved : 29;
+ } pal_tc_info_s;
+} pal_tc_info_u_t;
+
+
+/* Return information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+extern inline s64
+ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_pages)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0);
+ if (tc_info)
+ tc_info->pti_val = iprv.v0;
+ if (tc_pages)
+ *tc_pages = iprv.v1;
+ return iprv.status;
+}
+
+/* Get page size information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+extern inline s64
+ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
+ if (tr_pages)
+ *tr_pages = iprv.v0;
+ if (vw_pages)
+ *vw_pages = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_vm_info_1_u {
+ u64 pvi1_val;
+ struct {
+ u64 vw : 1,
+ phys_add_size : 7,
+ key_size : 16,
+ max_pkr : 8,
+ hash_tag_id : 8,
+ max_dtr_entry : 8,
+ max_itr_entry : 8,
+ max_unique_tcs : 8,
+ num_tc_levels : 8;
+ } pal_vm_info_1_s;
+} pal_vm_info_1_u_t;
+
+typedef union pal_vm_info_2_u {
+ u64 pvi2_val;
+ struct {
+ u64 impl_va_msb : 8,
+ rid_size : 8,
+ reserved : 48;
+ } pal_vm_info_2_s;
+} pal_vm_info_2_u_t;
+
+/* Get summary information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+extern inline s64
+ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0);
+ if (vm_info_1)
+ vm_info_1->pvi1_val = iprv.v0;
+ if (vm_info_2)
+ vm_info_2->pvi2_val = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_itr_valid_u {
+ u64 piv_val;
+ struct {
+ u64 access_rights_valid : 1,
+ priv_level_valid : 1,
+ dirty_bit_valid : 1,
+ mem_attr_valid : 1,
+ reserved : 60;
+ } pal_itr_valid_s;
+} pal_itr_valid_u_t;
+
+/* Read a translation register */
+extern inline s64
+ia64_pal_vm_tr_read (u64 reg_num, u64 tr_type, u64 tr_buffer, pal_itr_valid_u_t *itr_valid)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_TR_READ, reg_num, tr_type, tr_buffer);
+ if (itr_valid)
+ itr_valid->piv_val = iprv.v0;
+ return iprv.status;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PAL_H */
diff --git a/include/asm-ia64/param.h b/include/asm-ia64/param.h
new file mode 100644
index 000000000..e93d4756a
--- /dev/null
+++ b/include/asm-ia64/param.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_IA64_PARAM_H
+#define _ASM_IA64_PARAM_H
+
+/*
+ * Fundamental kernel parameters.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_IA64_HP_SIM
+/*
+ * Yeah, simulating stuff is slow, so let us catch some breath between
+ * timer interrupts...
+ */
+# define HZ 20
+#endif
+
+#ifdef CONFIG_IA64_DIG
+# ifdef CONFIG_IA64_SOFTSDV_HACKS
+# define HZ 20
+# else
+# define HZ 100
+# endif
+#endif
+
+#ifndef HZ
+# define HZ 1024
+#endif
+
+#define EXEC_PAGESIZE 65536
+
+#ifndef NGROUPS
+# define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+# define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* _ASM_IA64_PARAM_H */
diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h
new file mode 100644
index 000000000..bb3e4fb21
--- /dev/null
+++ b/include/asm-ia64/pci.h
@@ -0,0 +1,148 @@
+#ifndef _ASM_IA64_PCI_H
+#define _ASM_IA64_PCI_H
+
+/*
+ * Can be used to override the logic in pci_scan_bus for skipping
+ * already-configured bus numbers - to be used for buggy BIOSes or
+ * architectures with incomplete PCI setup by the loader.
+ */
+#define pcibios_assign_all_busses() 0
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+/*
+ * Dynamic DMA mapping API.
+ * IA-64 has everything mapped statically.
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+
+struct pci_dev;
+
+/*
+ * Allocate and map kernel buffer using consistent mode DMA for a device.
+ * hwdev should be valid struct pci_dev pointer for PCI devices,
+ * NULL for PCI-like buses (ISA, EISA).
+ * Returns non-NULL cpu-view pointer to the buffer if successful and
+ * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
+ * is undefined.
+ */
+extern void *pci_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
+
+/*
+ * Free and unmap a consistent DMA buffer.
+ * cpu_addr is what was returned from pci_alloc_consistent,
+ * size must be the same as what as passed into pci_alloc_consistent,
+ * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ *
+ * References to the memory and mappings associated with cpu_addr/dma_addr
+ * past this call are illegal.
+ */
+extern void pci_free_consistent (struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+/*
+ * Map a single buffer of the indicated size for DMA in streaming mode.
+ * The 32-bit bus address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory
+ * until either pci_unmap_single or pci_dma_sync_single is performed.
+ */
+extern inline dma_addr_t
+pci_map_single (struct pci_dev *hwdev, void *ptr, size_t size)
+{
+ return virt_to_bus(ptr);
+}
+
+/*
+ * Unmap a single streaming mode DMA translation. The dma_addr and size
+ * must match what was provided for in a previous pci_map_single call. All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guarenteed to see
+ * whatever the device wrote there.
+ */
+extern inline void
+pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size)
+{
+ /* Nothing to do */
+}
+
+/*
+ * Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scather-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+extern inline int
+pci_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents)
+{
+ return nents;
+}
+
+/*
+ * Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+extern inline void
+pci_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents)
+{
+ /* Nothing to do */
+}
+
+/*
+ * Make physical memory consistent for a single
+ * streaming mode DMA translation after a transfer.
+ *
+ * If you perform a pci_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the PCI dma
+ * mapping, you must call this function before doing so. At the
+ * next point you give the PCI dma address back to the card, the
+ * device again owns the buffer.
+ */
+extern inline void
+pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size)
+{
+ /* Nothing to do */
+}
+
+/*
+ * Make physical memory consistent for a set of streaming mode DMA
+ * translations after a transfer.
+ *
+ * The same as pci_dma_sync_single but for a scatter-gather list,
+ * same rules and usage.
+ */
+extern inline void
+pci_dma_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems)
+{
+ /* Nothing to do */
+}
+
+/* These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) (virt_to_bus((sg)->address))
+#define sg_dma_len(sg) ((sg)->length)
+
+#endif /* _ASM_IA64_PCI_H */
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
new file mode 100644
index 000000000..becc7422f
--- /dev/null
+++ b/include/asm-ia64/pgalloc.h
@@ -0,0 +1,257 @@
+#ifndef _ASM_IA64_PGALLOC_H
+#define _ASM_IA64_PGALLOC_H
+
+/*
+ * This file contains the functions and defines necessary to allocate
+ * page tables.
+ *
+ * This hopefully works with any (fixed) ia-64 page-size, as defined
+ * in <asm/page.h> (currently 8192).
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
+ */
+
+#include <linux/config.h>
+
+#include <linux/threads.h>
+
+#include <asm/mmu_context.h>
+#include <asm/processor.h>
+
+/*
+ * Very stupidly, we used to get new pgd's and pmd's, init their contents
+ * to point to the NULL versions of the next level page table, later on
+ * completely re-init them the same way, then free them up. This wasted
+ * a lot of work and caused unnecessary memory traffic. How broken...
+ * We fix this by caching them.
+ */
+#define pgd_quicklist (my_cpu_data.pgd_quick)
+#define pmd_quicklist (my_cpu_data.pmd_quick)
+#define pte_quicklist (my_cpu_data.pte_quick)
+#define pgtable_cache_size (my_cpu_data.pgtable_cache_sz)
+
+extern __inline__ pgd_t*
+get_pgd_slow (void)
+{
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+ if (ret)
+ clear_page(ret);
+ return ret;
+}
+
+extern __inline__ pgd_t*
+get_pgd_fast (void)
+{
+ unsigned long *ret = pgd_quicklist;
+
+ if (ret != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ --pgtable_cache_size;
+ }
+ return (pgd_t *)ret;
+}
+
+extern __inline__ pgd_t*
+pgd_alloc (void)
+{
+ pgd_t *pgd;
+
+ pgd = get_pgd_fast();
+ if (!pgd)
+ pgd = get_pgd_slow();
+ return pgd;
+}
+
+extern __inline__ void
+free_pgd_fast (pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ ++pgtable_cache_size;
+}
+
+extern __inline__ pmd_t *
+get_pmd_slow (void)
+{
+ pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
+
+ if (pmd)
+ clear_page(pmd);
+ return pmd;
+}
+
+extern __inline__ pmd_t *
+get_pmd_fast (void)
+{
+ unsigned long *ret = (unsigned long *)pmd_quicklist;
+
+ if (ret != NULL) {
+ pmd_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ --pgtable_cache_size;
+ }
+ return (pmd_t *)ret;
+}
+
+extern __inline__ void
+free_pmd_fast (pmd_t *pmd)
+{
+ *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
+ pmd_quicklist = (unsigned long *) pmd;
+ ++pgtable_cache_size;
+}
+
+extern __inline__ void
+free_pmd_slow (pmd_t *pmd)
+{
+ free_page((unsigned long)pmd);
+}
+
+extern pte_t *get_pte_slow (pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *
+get_pte_fast (void)
+{
+ unsigned long *ret = (unsigned long *)pte_quicklist;
+
+ if (ret != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ --pgtable_cache_size;
+ }
+ return (pte_t *)ret;
+}
+
+extern __inline__ void
+free_pte_fast (pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ ++pgtable_cache_size;
+}
+
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(pte)
+#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
+#define pmd_free(pmd) free_pmd_fast(pmd)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+
+extern __inline__ pte_t*
+pte_alloc (pmd_t *pmd, unsigned long vmaddr)
+{
+ unsigned long offset;
+
+ offset = (vmaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ pte_t *pte_page = get_pte_fast();
+
+ if (!pte_page)
+ return get_pte_slow(pmd, offset);
+ pmd_set(pmd, pte_page);
+ return pte_page + offset;
+ }
+ if (pmd_bad(*pmd)) {
+ __handle_bad_pmd(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + offset;
+}
+
+extern __inline__ pmd_t*
+pmd_alloc (pgd_t *pgd, unsigned long vmaddr)
+{
+ unsigned long offset;
+
+ offset = (vmaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+ if (pgd_none(*pgd)) {
+ pmd_t *pmd_page = get_pmd_fast();
+
+ if (!pmd_page)
+ pmd_page = get_pmd_slow();
+ if (pmd_page) {
+ if (pgd_none(*pgd)) {
+ pgd_set(pgd, pmd_page);
+ return pmd_page + offset;
+ } else
+ free_pmd_fast(pmd_page);
+ } else
+ return NULL;
+ }
+ if (pgd_bad(*pgd)) {
+ __handle_bad_pgd(pgd);
+ return NULL;
+ }
+ return (pmd_t *) pgd_page(*pgd) + offset;
+}
+
+#define pte_alloc_kernel(pmd, addr) pte_alloc(pmd, addr)
+#define pmd_alloc_kernel(pgd, addr) pmd_alloc(pgd, addr)
+
+extern int do_check_pgt_cache (int, int);
+
+/*
+ * This establishes kernel virtual mappings (e.g., as a result of a
+ * vmalloc call). Since ia-64 uses a separate kernel page table,
+ * there is nothing to do here... :)
+ */
+#define set_pgdir(vmaddr, entry) do { } while(0)
+
+/*
+ * Now for some TLB flushing routines. This is the kind of stuff that
+ * can be very expensive, so try to avoid them whenever possible.
+ */
+
+/*
+ * Flush everything (kernel mapping may also have changed due to
+ * vmalloc/vfree).
+ */
+extern void __flush_tlb_all (void);
+
+#ifdef CONFIG_SMP
+ extern void smp_flush_tlb_all (void);
+# define flush_tlb_all() smp_flush_tlb_all()
+#else
+# define flush_tlb_all() __flush_tlb_all()
+#endif
+
+/*
+ * Serialize usage of ptc.g:
+ */
+extern spinlock_t ptcg_lock;
+
+/*
+ * Flush a specified user mapping
+ */
+extern __inline__ void
+flush_tlb_mm (struct mm_struct *mm)
+{
+ if (mm) {
+ mm->context = 0;
+ if (mm == current->active_mm) {
+ /* This is called, e.g., as a result of exec(). */
+ get_new_mmu_context(mm);
+ reload_context(mm);
+ }
+ }
+}
+
+extern void flush_tlb_range (struct mm_struct *mm, unsigned long start, unsigned long end);
+
+/*
+ * Page-granular tlb flush.
+ *
+ * do a tbisd (type = 2) normally, and a tbis (type = 3)
+ * if it is an executable mapping. We want to avoid the
+ * itlb flush, because that potentially also does a
+ * icache flush.
+ */
+static __inline__ void
+flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
+{
+ flush_tlb_range(vma->vm_mm, addr, addr + PAGE_SIZE);
+}
+
+#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
new file mode 100644
index 000000000..2defe7cdf
--- /dev/null
+++ b/include/asm-ia64/pgtable.h
@@ -0,0 +1,390 @@
+#ifndef _ASM_IA64_PGTABLE_H
+#define _ASM_IA64_PGTABLE_H
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the ia-64 page table tree.
+ *
+ * This hopefully works with any (fixed) ia-64 page-size, as defined
+ * in <asm/page.h> (currently 8192).
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/mman.h>
+#include <asm/page.h>
+#include <asm/types.h>
+
+/* Size of physical address space: */
+#define IA64_PHYS_BITS 50 /* EAS2.5 defines 50 bits of ppn */
+#define IA64_PHYS_SIZE (__IA64_UL(1) << IA64_PHYS_BITS)
+
+/* Is ADDR a valid kernel address? */
+#define kern_addr_valid(addr) ((addr) >= TASK_SIZE)
+
+/* Is ADDR a valid physical address? */
+#define phys_addr_valid(addr) ((addr) < IA64_PHYS_SIZE)
+
+/*
+ * First, define the various bits in a PTE. Note that the PTE format
+ * matches the VHPT short format, the firt doubleword of the VHPD long
+ * format, and the first doubleword of the TLB insertion format.
+ */
+#define _PAGE_P (1 << 0) /* page present bit */
+#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
+#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
+#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
+#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
+#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
+#define _PAGE_MA_MASK (0x7 << 2)
+#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
+#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
+#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
+#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
+#define _PAGE_PL_MASK (3 << 7)
+#define _PAGE_AR_R (0 << 9) /* read only */
+#define _PAGE_AR_RX (1 << 9) /* read & execute */
+#define _PAGE_AR_RW (2 << 9) /* read & write */
+#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
+#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
+#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
+#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
+#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
+#define _PAGE_AR_MASK (7 << 9)
+#define _PAGE_AR_SHIFT 9
+#define _PAGE_A (1 << 5) /* page accessed bit */
+#define _PAGE_D (1 << 6) /* page dirty bit */
+#define _PAGE_PPN_MASK ((IA64_PHYS_SIZE - 1) & ~0xfffUL)
+#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
+#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
+
+#define _PFN_MASK _PAGE_PPN_MASK
+#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_A | _PAGE_D)
+
+#define _PAGE_SIZE_4K 12
+#define _PAGE_SIZE_8K 13
+#define _PAGE_SIZE_16K 14
+#define _PAGE_SIZE_64K 16
+#define _PAGE_SIZE_256K 18
+#define _PAGE_SIZE_1M 20
+#define _PAGE_SIZE_4M 22
+#define _PAGE_SIZE_16M 24
+#define _PAGE_SIZE_64M 26
+#define _PAGE_SIZE_256M 28
+
+#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
+#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
+#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
+
+/*
+ * Definitions for first level:
+ *
+ * PGDIR_SHIFT determines what a first-level page table entry can map.
+ */
+#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
+#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD (__IA64_UL(1) << (PAGE_SHIFT-3))
+#define USER_PTRS_PER_PGD PTRS_PER_PGD
+
+/*
+ * Definitions for second level:
+ *
+ * PMD_SHIFT determines the size of the area a second-level page table
+ * can map.
+ */
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
+#define PMD_SIZE (__IA64_UL(1) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PTRS_PER_PMD (__IA64_UL(1) << (PAGE_SHIFT-3))
+
+/*
+ * Definitions for third level:
+ */
+#define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3))
+
+/* Number of pointers that fit on a page: this will go away. */
+#define PTRS_PER_PAGE (__IA64_UL(1) << (PAGE_SHIFT-3))
+
+# ifndef __ASSEMBLY__
+
+#include <asm/bitops.h>
+#include <asm/mmu_context.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+
+/*
+ * All the normal masks have the "page accessed" bits on, as any time
+ * they are used, the page is accessed. They are cleared only by the
+ * page-out routines
+ */
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
+#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RW)
+
+/*
+ * Next come the mappings that determine how mmap() protection bits
+ * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
+ * _P version gets used for a private shared memory segment, the _S
+ * version gets used for a shared memory segment with MAP_SHARED on.
+ * In a private shared memory segment, we do a copy-on-write if a task
+ * attempts to write to the page.
+ */
+ /* xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
+#define __P011 PAGE_READONLY /* ditto */
+#define __P100 __pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __P101 __pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __P110 __pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __P111 __pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
+#define __S011 PAGE_SHARED
+#define __S100 __pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __S101 __pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __S110 __pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RWX)
+#define __S111 __pgprot(_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_PL_3 | _PAGE_AR_RWX)
+
+#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+
+
+/*
+ * Some definitions to translate between mem_map, PTEs, and page
+ * addresses:
+ */
+
+/*
+ * Given a pointer to an mem_map[] entry, return the kernel virtual
+ * address corresponding to that page.
+ */
+#define page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
+
+/*
+ * Given a PTE, return the index of the mem_map[] entry corresponding
+ * to the page frame the PTE.
+ */
+#define pte_pagenr(x) ((unsigned long) ((pte_val(x) & _PFN_MASK) >> PAGE_SHIFT))
+
+/*
+ * Now for some cache flushing routines. This is the kind of stuff
+ * that can be very expensive, so try to avoid them whenever possible.
+ */
+
+/* Caches aren't brain-dead on the ia-64. */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(mm, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+extern void ia64_flush_icache_page (unsigned long addr);
+
+#define flush_icache_page(pg) ia64_flush_icache_page(page_address(pg))
+
+/*
+ * Now come the defines and routines to manage and access the three-level
+ * page table.
+ */
+
+/*
+ * On some architectures, special things need to be done when setting
+ * the PTE in a page table. Nothing special needs to be on ia-64.
+ */
+#define set_pte(ptep, pteval) (*(ptep) = (pteval))
+
+#define VMALLOC_START (0xa000000000000000+2*PAGE_SIZE)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#define VMALLOC_END 0xbfffffffffffffff
+
+/*
+ * BAD_PAGETABLE is used when we need a bogus page-table, while
+ * BAD_PAGE is used for a bogus page.
+ *
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern pte_t ia64_bad_page (void);
+extern pmd_t *ia64_bad_pagetable (void);
+
+#define BAD_PAGETABLE ia64_bad_pagetable()
+#define BAD_PAGE ia64_bad_page()
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page,pgprot) \
+({ \
+ pte_t __pte; \
+ \
+ pte_val(__pte) = ((page - mem_map) << PAGE_SHIFT) | pgprot_val(pgprot); \
+ __pte; \
+})
+
+/* This takes a physical page address that is used by the remapping functions */
+#define mk_pte_phys(physpage, pgprot) \
+({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
+
+#define pte_modify(_pte, newprot) \
+ (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
+
+#define page_pte_prot(page,prot) mk_pte(page, prot)
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+#define pte_none(pte) (!pte_val(pte))
+#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
+#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
+/* pte_page() returns the "struct page *" corresponding to the PTE: */
+#define pte_page(pte) (mem_map + pte_pagenr(pte))
+
+#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = __pa(ptep))
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (!phys_addr_valid(pmd_val(pmd)))
+#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
+#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
+#define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
+
+#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = __pa(pmdp))
+#define pgd_none(pgd) (!pgd_val(pgd))
+#define pgd_bad(pgd) (!phys_addr_valid(pgd_val(pgd)))
+#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
+#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
+
+/*
+ * The following have defined behavior only work if pte_present() is true.
+ */
+#define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
+#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) < 4)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_D)
+#define pte_young(pte) (pte_val(pte) & _PAGE_A)
+/*
+ * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the
+ * 2nd bit in the access rights:
+ */
+#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
+#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
+
+#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
+#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
+
+#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
+#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
+
+/*
+ * Macro to make mark a page protection value as "uncacheable". Note
+ * that "protection" is really a misnomer here as the protection value
+ * contains the memory attribute bits, dirty bits, and various other
+ * bits as well.
+ */
+#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
+
+/* The offset in the 1-level directory is given by the 3 region bits
+ (61..63) and the seven level-1 bits (33-39). */
+extern __inline__ pgd_t*
+pgd_offset (struct mm_struct *mm, unsigned long address)
+{
+ unsigned long region = address >> 61;
+ unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
+
+ return mm->pgd + ((region << (PAGE_SHIFT - 6)) | l1index);
+}
+
+/* In the kernel's mapped region we have a full 43 bit space available and completely
+ ignore the region number (since we know its in region number 5). */
+#define pgd_offset_k(addr) \
+ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir,addr) \
+ ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
+
+/* Find an entry in the third-level page table.. */
+#define pte_offset(dir,addr) \
+ ((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+
+
+extern void __handle_bad_pgd (pgd_t *pgd);
+extern void __handle_bad_pmd (pmd_t *pmd);
+
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/*
+ * IA-64 doesn't have any external MMU info: the page tables contain
+ * all the necessary information. However, we can use this macro
+ * to pre-install (override) a PTE that we know is needed anyhow.
+ *
+ * Asit says that on Itanium, it is generally faster to let the VHPT
+ * walker pick up a newly installed PTE (and VHPT misses should be
+ * extremely rare compared to normal misses). Also, since
+ * pre-installing the PTE has the problem that we may evict another
+ * TLB entry needlessly because we don't know for sure whether we need
+ * to update the iTLB or dTLB, I tend to prefer this solution, too.
+ * Also, this avoids nasty issues with forward progress (what if the
+ * newly installed PTE gets replaced before we return to the previous
+ * execution context?).
+ *
+ */
+#if 0
+# define update_mmu_cache(vma,address,pte)
+#else
+# define update_mmu_cache(vma,address,pte) \
+do { \
+ /* \
+ * XXX fix me!! \
+ * \
+ * It's not clear this is a win. We may end up pollute the \
+ * dtlb with itlb entries and vice versa (e.g., consider stack \
+ * pages that are normally marked executable). It would be \
+ * better to insert the TLB entry for the TLB cache that we \
+ * know needs the new entry. However, the update_mmu_cache() \
+ * arguments don't tell us whether we got here through a data \
+ * access or through an instruction fetch. Talk to Linus to \
+ * fix this. \
+ * \
+ * If you re-enable this code, you must disable the ptc code in \
+ * Entry 20 of the ivt. \
+ */ \
+ unsigned long flags; \
+ \
+ ia64_clear_ic(flags); \
+ ia64_itc((vma->vm_flags & PROT_EXEC) ? 0x3 : 0x2, address, pte_val(pte), PAGE_SHIFT); \
+ __restore_flags(flags); \
+} while (0)
+#endif
+
+#define SWP_TYPE(entry) (((entry).val >> 1) & 0xff)
+#define SWP_OFFSET(entry) ((entry).val >> 9)
+#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 9) })
+#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define module_map vmalloc
+#define module_unmap vfree
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define PageSkip(page) (0)
+
+#define io_remap_page_range remap_page_range /* XXX is this right? */
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+
+# endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/include/asm-ia64/poll.h b/include/asm-ia64/poll.h
new file mode 100644
index 000000000..c4ecb0e9a
--- /dev/null
+++ b/include/asm-ia64/poll.h
@@ -0,0 +1,30 @@
+#ifndef _ASM_IA64_POLL_H
+#define _ASM_IA64_POLL_H
+
+/*
+ * poll(2) bit definitions. Chosen to be compatible with Linux/x86.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define POLLIN 0x0001
+#define POLLPRI 0x0002
+#define POLLOUT 0x0004
+#define POLLERR 0x0008
+#define POLLHUP 0x0010
+#define POLLNVAL 0x0020
+
+#define POLLRDNORM 0x0040
+#define POLLRDBAND 0x0080
+#define POLLWRNORM 0x0100
+#define POLLWRBAND 0x0200
+#define POLLMSG 0x0400
+
+struct pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+#endif /* _ASM_IA64_POLL_H */
diff --git a/include/asm-ia64/posix_types.h b/include/asm-ia64/posix_types.h
new file mode 100644
index 000000000..b3fcdd8cd
--- /dev/null
+++ b/include/asm-ia64/posix_types.h
@@ -0,0 +1,121 @@
+#ifndef _ASM_IA64_POSIX_TYPES_H
+#define _ASM_IA64_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ *
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+typedef unsigned int __kernel_dev_t;
+typedef unsigned int __kernel_ino_t;
+typedef unsigned int __kernel_mode_t;
+typedef unsigned int __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef long long __kernel_loff_t;
+typedef int __kernel_pid_t;
+typedef int __kernel_ipc_pid_t;
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+typedef __kernel_uid_t __kernel_old_uid_t;
+typedef __kernel_gid_t __kernel_old_gid_t;
+typedef __kernel_uid_t __kernel_uid32_t;
+typedef __kernel_gid_t __kernel_gid32_t;
+
+# ifdef __KERNEL__
+
+# ifndef __GNUC__
+
+#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
+#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
+#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
+#define __FD_ZERO(set) \
+ ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
+
+# else /* !__GNUC__ */
+
+/* With GNU C, use inline functions instead so args are evaluated only once: */
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+{
+ unsigned long *tmp = p->fds_bits;
+ int i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 16:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ return;
+
+ case 8:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ return;
+
+ case 4:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ return;
+ }
+ }
+ i = __FDSET_LONGS;
+ while (i) {
+ i--;
+ *tmp = 0;
+ tmp++;
+ }
+}
+
+# endif /* !__GNUC__ */
+# endif /* __KERNEL__ */
+#endif /* _ASM_IA64_POSIX_TYPES_H */
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
new file mode 100644
index 000000000..5a49bf2c0
--- /dev/null
+++ b/include/asm-ia64/processor.h
@@ -0,0 +1,786 @@
+#ifndef _ASM_IA64_PROCESSOR_H
+#define _ASM_IA64_PROCESSOR_H
+
+/*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998, 1999 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ *
+ * 11/24/98 S.Eranian added ia64_set_iva()
+ * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
+ */
+
+#include <linux/config.h>
+
+#include <asm/ptrace.h>
+#include <asm/types.h>
+
+#define IA64_NUM_DBG_REGS 8
+
+/*
+ * TASK_SIZE really is a mis-named. It really is the maximum user
+ * space address (plus one). On ia-64, there are five regions of 2TB
+ * each (assuming 8KB page size), for a total of 8TB of user virtual
+ * address space.
+ */
+#define TASK_SIZE 0xa000000000000000
+
+#ifdef CONFIG_IA32_SUPPORT
+# define TASK_UNMAPPED_BASE 0x40000000 /* XXX fix me! */
+#else
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE 0x2000000000000000
+#endif
+
+/*
+ * Bus types
+ */
+#define EISA_bus 0
+#define EISA_bus__is_a_macro /* for versions in ksyms.c */
+#define MCA_bus 0
+#define MCA_bus__is_a_macro /* for versions in ksyms.c */
+
+/* Processor status register bits: */
+#define IA64_PSR_BE_BIT 1
+#define IA64_PSR_UP_BIT 2
+#define IA64_PSR_AC_BIT 3
+#define IA64_PSR_MFL_BIT 4
+#define IA64_PSR_MFH_BIT 5
+#define IA64_PSR_IC_BIT 13
+#define IA64_PSR_I_BIT 14
+#define IA64_PSR_PK_BIT 15
+#define IA64_PSR_DT_BIT 17
+#define IA64_PSR_DFL_BIT 18
+#define IA64_PSR_DFH_BIT 19
+#define IA64_PSR_SP_BIT 20
+#define IA64_PSR_PP_BIT 21
+#define IA64_PSR_DI_BIT 22
+#define IA64_PSR_SI_BIT 23
+#define IA64_PSR_DB_BIT 24
+#define IA64_PSR_LP_BIT 25
+#define IA64_PSR_TB_BIT 26
+#define IA64_PSR_RT_BIT 27
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_IS_BIT 34
+#define IA64_PSR_MC_BIT 35
+#define IA64_PSR_IT_BIT 36
+#define IA64_PSR_ID_BIT 37
+#define IA64_PSR_DA_BIT 38
+#define IA64_PSR_DD_BIT 39
+#define IA64_PSR_SS_BIT 40
+#define IA64_PSR_RI_BIT 41
+#define IA64_PSR_ED_BIT 43
+#define IA64_PSR_BN_BIT 44
+
+#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
+#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
+#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT)
+#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT)
+#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT)
+#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT)
+#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT)
+#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT)
+#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT)
+#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT)
+#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT)
+#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT)
+#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT)
+#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT)
+#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT)
+#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT)
+#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT)
+#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
+#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
+#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
+#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
+#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT)
+#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT)
+#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT)
+#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT)
+#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
+#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
+#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
+
+/* User mask bits: */
+#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
+
+/* Default Control Register */
+#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */
+#define IA64_DCR_BE_BIT 1 /* big-endian default */
+#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */
+#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */
+#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */
+#define IA64_DCR_DK_BIT 10 /* defer key miss faults */
+#define IA64_DCR_DX_BIT 11 /* defer key permission faults */
+#define IA64_DCR_DR_BIT 12 /* defer access right faults */
+#define IA64_DCR_DA_BIT 13 /* defer access bit faults */
+#define IA64_DCR_DD_BIT 14 /* defer debug faults */
+
+#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT)
+#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT)
+#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT)
+#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT)
+#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT)
+#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT)
+#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT)
+#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT)
+#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT)
+#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT)
+
+/* Interrupt Status Register */
+#define IA64_ISR_X_BIT 32 /* execute access */
+#define IA64_ISR_W_BIT 33 /* write access */
+#define IA64_ISR_R_BIT 34 /* read access */
+#define IA64_ISR_NA_BIT 35 /* non-access */
+#define IA64_ISR_SP_BIT 36 /* speculative load exception */
+#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */
+#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */
+
+#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT)
+#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT)
+#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT)
+#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT)
+#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT)
+#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT)
+#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT)
+
+#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
+#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
+#define IA64_KERNEL_DEATH (__IA64_UL(1) << 63) /* used for die_if_kernel() recursion detection */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/smp.h>
+#include <linux/threads.h>
+
+#include <asm/fpu.h>
+#include <asm/offsets.h>
+#include <asm/page.h>
+#include <asm/rse.h>
+#include <asm/unwind.h>
+
+/* like above but expressed as bitfields for more efficient access: */
+struct ia64_psr {
+ __u64 reserved0 : 1;
+ __u64 be : 1;
+ __u64 up : 1;
+ __u64 ac : 1;
+ __u64 mfl : 1;
+ __u64 mfh : 1;
+ __u64 reserved1 : 7;
+ __u64 ic : 1;
+ __u64 i : 1;
+ __u64 pk : 1;
+ __u64 reserved2 : 1;
+ __u64 dt : 1;
+ __u64 dfl : 1;
+ __u64 dfh : 1;
+ __u64 sp : 1;
+ __u64 pp : 1;
+ __u64 di : 1;
+ __u64 si : 1;
+ __u64 db : 1;
+ __u64 lp : 1;
+ __u64 tb : 1;
+ __u64 rt : 1;
+ __u64 reserved3 : 4;
+ __u64 cpl : 2;
+ __u64 is : 1;
+ __u64 mc : 1;
+ __u64 it : 1;
+ __u64 id : 1;
+ __u64 da : 1;
+ __u64 dd : 1;
+ __u64 ss : 1;
+ __u64 ri : 2;
+ __u64 ed : 1;
+ __u64 bn : 1;
+ __u64 reserved4 : 19;
+};
+
+/*
+ * This shift should be large enough to be able to represent
+ * 1000000/itc_freq with good accuracy while being small enough to fit
+ * 1000000<<IA64_USEC_PER_CYC_SHIFT in 64 bits.
+ */
+#define IA64_USEC_PER_CYC_SHIFT 41
+
+/*
+ * CPU type, hardware bug flags, and per-CPU state.
+ */
+struct cpuinfo_ia64 {
+ __u64 *pgd_quick;
+ __u64 *pmd_quick;
+ __u64 *pte_quick;
+ __u64 pgtable_cache_sz;
+ /* CPUID-derived information: */
+ __u64 ppn;
+ __u64 features;
+ __u8 number;
+ __u8 revision;
+ __u8 model;
+ __u8 family;
+ __u8 archrev;
+ char vendor[16];
+ __u64 itc_freq; /* frequency of ITC counter */
+ __u64 proc_freq; /* frequency of processor */
+ __u64 cyc_per_usec; /* itc_freq/1000000 */
+ __u64 usec_per_cyc; /* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */
+#ifdef CONFIG_SMP
+ __u64 loops_per_sec;
+ __u64 ipi_count;
+ __u64 prof_counter;
+ __u64 prof_multiplier;
+#endif
+};
+
+#define my_cpu_data cpu_data[smp_processor_id()]
+
+#ifdef CONFIG_SMP
+# define loops_per_sec() my_cpu_data.loops_per_sec
+#else
+# define loops_per_sec() loops_per_sec
+#endif
+
+extern struct cpuinfo_ia64 cpu_data[NR_CPUS];
+
+extern void identify_cpu (struct cpuinfo_ia64 *);
+extern void print_cpu_info (struct cpuinfo_ia64 *);
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct {
+ __u64 ksp; /* kernel stack pointer */
+ unsigned long flags; /* various flags */
+ struct ia64_fpreg fph[96]; /* saved/loaded on demand */
+ __u64 dbr[IA64_NUM_DBG_REGS];
+ __u64 ibr[IA64_NUM_DBG_REGS];
+#ifdef CONFIG_IA32_SUPPORT
+ __u64 fsr; /* IA32 floating pt status reg */
+ __u64 fcr; /* IA32 floating pt control reg */
+ __u64 fir; /* IA32 fp except. instr. reg */
+ __u64 fdr; /* IA32 fp except. data reg */
+# define INIT_THREAD_IA32 , 0, 0, 0, 0
+#else
+# define INIT_THREAD_IA32
+#endif /* CONFIG_IA32_SUPPORT */
+};
+
+#define INIT_MMAP { \
+ &init_mm, PAGE_OFFSET, PAGE_OFFSET + 0x10000000, NULL, PAGE_SHARED, \
+ VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL \
+}
+
+#define INIT_THREAD { \
+ 0, /* ksp */ \
+ 0, /* flags */ \
+ {{{{0}}}, }, /* fph */ \
+ {0, }, /* dbr */ \
+ {0, } /* ibr */ \
+ INIT_THREAD_IA32 \
+}
+
+#define start_thread(regs,new_ip,new_sp) do { \
+ set_fs(USER_DS); \
+ ia64_psr(regs)->cpl = 3; /* set user mode */ \
+ ia64_psr(regs)->ri = 0; /* clear return slot number */ \
+ regs->cr_iip = new_ip; \
+ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
+ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
+ regs->ar_bspstore = IA64_RBS_BOT; \
+ regs->ar_rnat = 0; \
+ regs->loadrs = 0; \
+} while (0)
+
+/* Forward declarations, a strange C thing... */
+struct mm_struct;
+struct task_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread (struct task_struct *);
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE 1: Only a kernel-only process (ie the swapper or direct
+ * descendants who haven't done an "execve()") should use this: it
+ * will work within a system call from a "real" process, but the
+ * process memory space will not be free'd until both the parent and
+ * the child have exited.
+ *
+ * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
+ * into trouble in init/main.c when the child thread returns to
+ * do_basic_setup() and the timing is such that free_initmem() has
+ * been called already.
+ */
+extern int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
+
+/* Copy and release all segment info associated with a VM */
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+#define forget_segments() do { } while (0)
+
+/* Get wait channel for task P. */
+extern unsigned long get_wchan (struct task_struct *p);
+
+/* Return instruction pointer of blocked task TSK. */
+#define KSTK_EIP(tsk) \
+ ({ \
+ struct pt_regs *_regs = ia64_task_regs(tsk); \
+ _regs->cr_iip + ia64_psr(_regs)->ri; \
+ })
+
+/* Return stack pointer of blocked task TSK. */
+#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
+
+static inline struct task_struct *
+ia64_get_fpu_owner (void)
+{
+ struct task_struct *t;
+ __asm__ ("mov %0=ar.k5" : "=r"(t));
+ return t;
+}
+
+static inline void
+ia64_set_fpu_owner (struct task_struct *t)
+{
+ __asm__ __volatile__ ("mov ar.k5=%0" :: "r"(t));
+}
+
+extern void __ia64_init_fpu (void);
+extern void __ia64_save_fpu (struct ia64_fpreg *fph);
+extern void __ia64_load_fpu (struct ia64_fpreg *fph);
+
+#define ia64_fph_enable() __asm__ __volatile__ (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
+#define ia64_fph_disable() __asm__ __volatile__ (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
+
+/* load fp 0.0 into fph */
+static inline void
+ia64_init_fpu (void) {
+ ia64_fph_enable();
+ __ia64_init_fpu();
+ ia64_fph_disable();
+}
+
+/* save f32-f127 at FPH */
+static inline void
+ia64_save_fpu (struct ia64_fpreg *fph) {
+ ia64_fph_enable();
+ __ia64_save_fpu(fph);
+ ia64_fph_disable();
+}
+
+/* load f32-f127 from FPH */
+static inline void
+ia64_load_fpu (struct ia64_fpreg *fph) {
+ ia64_fph_enable();
+ __ia64_load_fpu(fph);
+ ia64_fph_disable();
+}
+
+extern inline void
+ia64_fc (void *addr)
+{
+ __asm__ __volatile__ ("fc %0" :: "r"(addr) : "memory");
+}
+
+extern inline void
+ia64_sync_i (void)
+{
+ __asm__ __volatile__ (";; sync.i" ::: "memory");
+}
+
+extern inline void
+ia64_srlz_i (void)
+{
+ __asm__ __volatile__ (";; srlz.i ;;" ::: "memory");
+}
+
+extern inline void
+ia64_srlz_d (void)
+{
+ __asm__ __volatile__ (";; srlz.d" ::: "memory");
+}
+
+extern inline void
+ia64_set_rr (__u64 reg_bits, __u64 rr_val)
+{
+ __asm__ __volatile__ ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory");
+}
+
+extern inline __u64
+ia64_get_dcr (void)
+{
+ __u64 r;
+ __asm__ ("mov %0=cr.dcr" : "=r"(r));
+ return r;
+}
+
+extern inline void
+ia64_set_dcr (__u64 val)
+{
+ __asm__ __volatile__ ("mov cr.dcr=%0;;" :: "r"(val) : "memory");
+ ia64_srlz_d();
+}
+
+extern inline __u64
+ia64_get_lid (void)
+{
+ __u64 r;
+ __asm__ ("mov %0=cr.lid" : "=r"(r));
+ return r;
+}
+
+extern inline void
+ia64_invala (void)
+{
+ __asm__ __volatile__ ("invala" ::: "memory");
+}
+
+/*
+ * Save the processor status flags in FLAGS and then clear the
+ * interrupt collection and interrupt enable bits.
+ */
+#define ia64_clear_ic(flags) \
+ __asm__ __volatile__ ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" \
+ : "=r"(flags) :: "memory");
+
+/*
+ * Insert a translation into an instruction and/or data translation
+ * register.
+ */
+extern inline void
+ia64_itr (__u64 target_mask, __u64 tr_num,
+ __u64 vmaddr, __u64 pte,
+ __u64 log_page_size)
+{
+ __asm__ __volatile__ ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
+ __asm__ __volatile__ ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
+ if (target_mask & 0x1)
+ __asm__ __volatile__ ("itr.i itr[%0]=%1"
+ :: "r"(tr_num), "r"(pte) : "memory");
+ if (target_mask & 0x2)
+ __asm__ __volatile__ (";;itr.d dtr[%0]=%1"
+ :: "r"(tr_num), "r"(pte) : "memory");
+}
+
+/*
+ * Insert a translation into the instruction and/or data translation
+ * cache.
+ */
+extern inline void
+ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
+ __u64 log_page_size)
+{
+ __asm__ __volatile__ ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
+ __asm__ __volatile__ ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
+ /* as per EAS2.6, itc must be the last instruction in an instruction group */
+ if (target_mask & 0x1)
+ __asm__ __volatile__ ("itc.i %0;;" :: "r"(pte) : "memory");
+ if (target_mask & 0x2)
+ __asm__ __volatile__ (";;itc.d %0;;" :: "r"(pte) : "memory");
+}
+
+/*
+ * Purge a range of addresses from instruction and/or data translation
+ * register(s).
+ */
+extern inline void
+ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
+{
+ if (target_mask & 0x1)
+ __asm__ __volatile__ ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
+ if (target_mask & 0x2)
+ __asm__ __volatile__ ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
+}
+
+/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
+extern inline void
+ia64_set_iva (void *ivt_addr)
+{
+ __asm__ __volatile__ ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory");
+}
+
+/* Set the page table address and control bits. */
+extern inline void
+ia64_set_pta (__u64 pta)
+{
+ /* Note: srlz.i implies srlz.d */
+ __asm__ __volatile__ ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory");
+}
+
+extern inline __u64
+ia64_get_cpuid (__u64 regnum)
+{
+ __u64 r;
+
+ __asm__ ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum));
+ return r;
+}
+
+extern inline void
+ia64_eoi (void)
+{
+ __asm__ ("mov cr.eoi=r0;; srlz.d;;" ::: "memory");
+}
+
+extern __inline__ void
+ia64_set_lrr0 (__u8 vector, __u8 masked)
+{
+ if (masked > 1)
+ masked = 1;
+
+ __asm__ __volatile__ ("mov cr.lrr0=%0;; srlz.d"
+ :: "r"((masked << 16) | vector) : "memory");
+}
+
+
+extern __inline__ void
+ia64_set_lrr1 (__u8 vector, __u8 masked)
+{
+ if (masked > 1)
+ masked = 1;
+
+ __asm__ __volatile__ ("mov cr.lrr1=%0;; srlz.d"
+ :: "r"((masked << 16) | vector) : "memory");
+}
+
+extern __inline__ void
+ia64_set_pmv (__u64 val)
+{
+ __asm__ __volatile__ ("mov cr.pmv=%0" :: "r"(val) : "memory");
+}
+
+extern __inline__ __u64
+ia64_get_pmc (__u64 regnum)
+{
+ __u64 retval;
+
+ __asm__ __volatile__ ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum));
+ return retval;
+}
+
+extern __inline__ void
+ia64_set_pmc (__u64 regnum, __u64 value)
+{
+ __asm__ __volatile__ ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value));
+}
+
+extern __inline__ __u64
+ia64_get_pmd (__u64 regnum)
+{
+ __u64 retval;
+
+ __asm__ __volatile__ ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum));
+ return retval;
+}
+
+extern __inline__ void
+ia64_set_pmd (__u64 regnum, __u64 value)
+{
+ __asm__ __volatile__ ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value));
+}
+
+/*
+ * Given the address to which a spill occurred, return the unat bit
+ * number that corresponds to this address.
+ */
+extern inline __u64
+ia64_unat_pos (void *spill_addr)
+{
+ return ((__u64) spill_addr >> 3) & 0x3f;
+}
+
+/*
+ * Set the NaT bit of an integer register which was spilled at address
+ * SPILL_ADDR. UNAT is the mask to be updated.
+ */
+extern inline void
+ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
+{
+ __u64 bit = ia64_unat_pos(spill_addr);
+ __u64 mask = 1UL << bit;
+
+ *unat = (*unat & ~mask) | (nat << bit);
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ * Note that the only way T can block is through a call to schedule() -> switch_to().
+ */
+extern inline unsigned long
+thread_saved_pc (struct thread_struct *t)
+{
+ struct ia64_frame_info info;
+ /* XXX ouch: Linus, please pass the task pointer to thread_saved_pc() instead! */
+ struct task_struct *p = (void *) ((unsigned long) t - IA64_TASK_THREAD_OFFSET);
+
+ ia64_unwind_init_from_blocked_task(&info, p);
+ if (ia64_unwind_to_previous_frame(&info) < 0)
+ return 0;
+ return ia64_unwind_get_ip(&info);
+}
+
+/*
+ * Get the current instruction/program counter value.
+ */
+#define current_text_addr() \
+ ({ void *_pc; __asm__ ("mov %0=ip" : "=r" (_pc)); _pc; })
+
+#define THREAD_SIZE IA64_STK_OFFSET
+/* NOTE: The task struct and the stacks are allocated together. */
+#define alloc_task_struct() \
+ ((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES))
+#define free_task_struct(p) free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES)
+#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+
+#define init_task (init_task_union.task)
+#define init_stack (init_task_union.stack)
+
+/*
+ * Set the correctable machine check vector register
+ */
+extern __inline__ void
+ia64_set_cmcv (__u64 val)
+{
+ __asm__ __volatile__ ("mov cr.cmcv=%0" :: "r"(val) : "memory");
+}
+
+/*
+ * Read the correctable machine check vector register
+ */
+extern __inline__ __u64
+ia64_get_cmcv (void)
+{
+ __u64 val;
+
+ __asm__ ("mov %0=cr.cmcv" : "=r"(val) :: "memory");
+ return val;
+}
+
+extern inline __u64
+ia64_get_ivr (void)
+{
+ __u64 r;
+ __asm__ __volatile__ ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r));
+ return r;
+}
+
+extern inline void
+ia64_set_tpr (__u64 val)
+{
+ __asm__ __volatile__ ("mov cr.tpr=%0" :: "r"(val));
+}
+
+extern inline __u64
+ia64_get_tpr (void)
+{
+ __u64 r;
+ __asm__ ("mov %0=cr.tpr" : "=r"(r));
+ return r;
+}
+
+extern __inline__ void
+ia64_set_irr0 (__u64 val)
+{
+ __asm__ __volatile__("mov cr.irr0=%0;;" :: "r"(val) : "memory");
+ ia64_srlz_d();
+}
+
+extern __inline__ __u64
+ia64_get_irr0 (void)
+{
+ __u64 val;
+
+ __asm__ ("mov %0=cr.irr0" : "=r"(val));
+ return val;
+}
+
+extern __inline__ void
+ia64_set_irr1 (__u64 val)
+{
+ __asm__ __volatile__("mov cr.irr1=%0;;" :: "r"(val) : "memory");
+ ia64_srlz_d();
+}
+
+extern __inline__ __u64
+ia64_get_irr1 (void)
+{
+ __u64 val;
+
+ __asm__ ("mov %0=cr.irr1" : "=r"(val));
+ return val;
+}
+
+extern __inline__ void
+ia64_set_irr2 (__u64 val)
+{
+ __asm__ __volatile__("mov cr.irr2=%0;;" :: "r"(val) : "memory");
+ ia64_srlz_d();
+}
+
+extern __inline__ __u64
+ia64_get_irr2 (void)
+{
+ __u64 val;
+
+ __asm__ ("mov %0=cr.irr2" : "=r"(val));
+ return val;
+}
+
+extern __inline__ void
+ia64_set_irr3 (__u64 val)
+{
+ __asm__ __volatile__("mov cr.irr3=%0;;" :: "r"(val) : "memory");
+ ia64_srlz_d();
+}
+
+extern __inline__ __u64
+ia64_get_irr3 (void)
+{
+ __u64 val;
+
+ __asm__ ("mov %0=cr.irr3" : "=r"(val));
+ return val;
+}
+
+extern __inline__ __u64
+ia64_get_gp(void)
+{
+ __u64 val;
+
+ __asm__ ("mov %0=gp" : "=r"(val));
+ return val;
+}
+
+/* XXX remove the handcoded version once we have a sufficiently clever compiler... */
+#ifdef SMART_COMPILER
+# define ia64_rotr(w,n) \
+ ({ \
+ __u64 _w = (w), _n = (n); \
+ \
+ (_w >> _n) | (_w << (64 - _n)); \
+ })
+#else
+# define ia64_rotr(w,n) \
+ ({ \
+ __u64 result; \
+ asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n)); \
+ result; \
+ })
+#endif
+
+#define ia64_rotl(w,n) ia64_rotr((w),(64)-(n))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PROCESSOR_H */
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h
new file mode 100644
index 000000000..b2d0cc906
--- /dev/null
+++ b/include/asm-ia64/ptrace.h
@@ -0,0 +1,240 @@
+#ifndef _ASM_IA64_PTRACE_H
+#define _ASM_IA64_PTRACE_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998, 1999 Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * 12/07/98 S. Eranian added pt_regs & switch_stack
+ * 12/21/98 D. Mosberger updated to match latest code
+ * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
+ *
+ */
+/*
+ * When a user process is blocked, its state looks as follows:
+ *
+ * +----------------------+ ------- IA64_STK_OFFSET
+ * | | ^
+ * | struct pt_regs | |
+ * | | |
+ * +----------------------+ |
+ * | | |
+ * | memory stack | |
+ * | (growing downwards) | |
+ * //.....................// |
+ * |
+ * //.....................// |
+ * | | |
+ * +----------------------+ |
+ * | struct switch_stack | |
+ * | | |
+ * +----------------------+ |
+ * | | |
+ * //.....................// |
+ * |
+ * //.....................// |
+ * | | |
+ * | register stack | |
+ * | (growing upwards) | |
+ * | | |
+ * +----------------------+ | --- IA64_RBS_OFFSET
+ * | | | ^
+ * | struct task_struct | | |
+ * current -> | | | |
+ * +----------------------+ -------
+ *
+ * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
+ * This is because ar.ec is saved as part of ar.pfs.
+ */
+
+#include <linux/config.h>
+
+#include <asm/fpu.h>
+#include <asm/offsets.h>
+
+/*
+ * Base-2 logarithm of number of pages to allocate per task structure
+ * (including register backing store and memory stack):
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define IA64_TASK_STRUCT_LOG_NUM_PAGES 3
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define IA64_TASK_STRUCT_LOG_NUM_PAGES 2
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define IA64_TASK_STRUCT_LOG_NUM_PAGES 1
+#else
+# define IA64_TASK_STRUCT_LOG_NUM_PAGES 0
+#endif
+
+#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + 15) & ~15)
+#define IA64_STK_OFFSET ((1 << IA64_TASK_STRUCT_LOG_NUM_PAGES)*PAGE_SIZE)
+
+#define INIT_TASK_SIZE IA64_STK_OFFSET
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are saved on system
+ * calls.
+ *
+ * We don't save all floating point register because the kernel
+ * is compiled to use only a very small subset, so the other are
+ * untouched.
+ *
+ * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
+ * (because the memory stack pointer MUST ALWAYS be aligned this way)
+ *
+ */
+struct pt_regs {
+ /* The following registers are saved by SAVE_MIN: */
+
+ unsigned long cr_ipsr; /* interrupted task's psr */
+ unsigned long cr_iip; /* interrupted task's instruction pointer */
+ unsigned long cr_ifs; /* interrupted task's function state */
+
+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+ unsigned long ar_pfs; /* prev function state */
+ unsigned long ar_rsc; /* RSE configuration */
+ /* The following two are valid only if cr_ipsr.cpl > 0: */
+ unsigned long ar_rnat; /* RSE NaT */
+ unsigned long ar_bspstore; /* RSE bspstore */
+
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+ unsigned long b6; /* scratch */
+ unsigned long loadrs; /* size of dirty partition << 16 */
+
+ unsigned long r1; /* the gp pointer */
+ unsigned long r2; /* scratch */
+ unsigned long r3; /* scratch */
+ unsigned long r12; /* interrupted task's memory stack pointer */
+ unsigned long r13; /* thread pointer */
+ unsigned long r14; /* scratch */
+ unsigned long r15; /* scratch */
+
+ unsigned long r8; /* scratch (return value register 0) */
+ unsigned long r9; /* scratch (return value register 1) */
+ unsigned long r10; /* scratch (return value register 2) */
+ unsigned long r11; /* scratch (return value register 3) */
+
+ /* The following registers are saved by SAVE_REST: */
+
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
+
+ unsigned long ar_ccv; /* compare/exchange value */
+ unsigned long ar_fpsr; /* floating point status*/
+
+ unsigned long b0; /* return pointer (bp) */
+ unsigned long b7; /* scratch */
+ /*
+ * Floating point registers that the kernel considers
+ * scratch:
+ */
+ struct ia64_fpreg f6; /* scratch*/
+ struct ia64_fpreg f7; /* scratch*/
+ struct ia64_fpreg f8; /* scratch*/
+ struct ia64_fpreg f9; /* scratch*/
+};
+
+/*
+ * This structure contains the addition registers that need to
+ * preserved across a context switch. This generally consists of
+ * "preserved" registers.
+ */
+struct switch_stack {
+ unsigned long caller_unat; /* user NaT collection register (preserved) */
+ unsigned long ar_fpsr; /* floating-point status register */
+
+ struct ia64_fpreg f2; /* preserved */
+ struct ia64_fpreg f3; /* preserved */
+ struct ia64_fpreg f4; /* preserved */
+ struct ia64_fpreg f5; /* preserved */
+
+ struct ia64_fpreg f10; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f11; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f12; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f13; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f14; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f15; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f16; /* preserved */
+ struct ia64_fpreg f17; /* preserved */
+ struct ia64_fpreg f18; /* preserved */
+ struct ia64_fpreg f19; /* preserved */
+ struct ia64_fpreg f20; /* preserved */
+ struct ia64_fpreg f21; /* preserved */
+ struct ia64_fpreg f22; /* preserved */
+ struct ia64_fpreg f23; /* preserved */
+ struct ia64_fpreg f24; /* preserved */
+ struct ia64_fpreg f25; /* preserved */
+ struct ia64_fpreg f26; /* preserved */
+ struct ia64_fpreg f27; /* preserved */
+ struct ia64_fpreg f28; /* preserved */
+ struct ia64_fpreg f29; /* preserved */
+ struct ia64_fpreg f30; /* preserved */
+ struct ia64_fpreg f31; /* preserved */
+
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+
+ unsigned long b0; /* so we can force a direct return in copy_thread */
+ unsigned long b1;
+ unsigned long b2;
+ unsigned long b3;
+ unsigned long b4;
+ unsigned long b5;
+
+ unsigned long ar_pfs; /* previous function state */
+ unsigned long ar_lc; /* loop counter (preserved) */
+ unsigned long ar_unat; /* NaT bits for r4-r7 */
+ unsigned long ar_rnat; /* RSE NaT collection register */
+ unsigned long ar_bspstore; /* RSE dirty base (preserved) */
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+};
+
+#ifdef __KERNEL__
+ /* given a pointer to a task_struct, return the user's pt_regs */
+# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
+# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
+# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
+
+ struct task_struct; /* forward decl */
+
+ extern void show_regs (struct pt_regs *);
+ extern long ia64_peek (struct pt_regs *, struct task_struct *, unsigned long addr, long *val);
+ extern long ia64_poke (struct pt_regs *, struct task_struct *, unsigned long addr, long val);
+
+ /* get nat bits for r1-r31 such that bit N==1 iff rN is a NaT */
+ extern long ia64_get_nat_bits (struct pt_regs *pt, struct switch_stack *sw);
+ /* put nat bits for r1-r31 such that rN is a NaT iff bit N==1 */
+ extern void ia64_put_nat_bits (struct pt_regs *pt, struct switch_stack *sw, unsigned long nat);
+
+ extern void ia64_increment_ip (struct pt_regs *pt);
+ extern void ia64_decrement_ip (struct pt_regs *pt);
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * The number chosen here is somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
+
+#endif /* _ASM_IA64_PTRACE_H */
diff --git a/include/asm-ia64/ptrace_offsets.h b/include/asm-ia64/ptrace_offsets.h
new file mode 100644
index 000000000..5965e826d
--- /dev/null
+++ b/include/asm-ia64/ptrace_offsets.h
@@ -0,0 +1,216 @@
+#ifndef _ASM_IA64_PTRACE_OFFSETS_H
+#define _ASM_IA64_PTRACE_OFFSETS_H
+
+/*
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
+ * virtual structure that would have the following definition:
+ *
+ * struct uarea {
+ * struct ia64_fpreg fph[96]; // f32-f127
+ * struct switch_stack sw;
+ * struct pt_regs pt;
+ * unsigned long rsvd1[358];
+ * unsigned long dbr[8];
+ * unsigned long rsvd2[252];
+ * unsigned long ibr[8];
+ * }
+ */
+
+/* fph: */
+#define PT_F32 0x0000
+#define PT_F33 0x0010
+#define PT_F34 0x0020
+#define PT_F35 0x0030
+#define PT_F36 0x0040
+#define PT_F37 0x0050
+#define PT_F38 0x0060
+#define PT_F39 0x0070
+#define PT_F40 0x0080
+#define PT_F41 0x0090
+#define PT_F42 0x00a0
+#define PT_F43 0x00b0
+#define PT_F44 0x00c0
+#define PT_F45 0x00d0
+#define PT_F46 0x00e0
+#define PT_F47 0x00f0
+#define PT_F48 0x0100
+#define PT_F49 0x0110
+#define PT_F50 0x0120
+#define PT_F51 0x0130
+#define PT_F52 0x0140
+#define PT_F53 0x0150
+#define PT_F54 0x0160
+#define PT_F55 0x0170
+#define PT_F56 0x0180
+#define PT_F57 0x0190
+#define PT_F58 0x01a0
+#define PT_F59 0x01b0
+#define PT_F60 0x01c0
+#define PT_F61 0x01d0
+#define PT_F62 0x01e0
+#define PT_F63 0x01f0
+#define PT_F64 0x0200
+#define PT_F65 0x0210
+#define PT_F66 0x0220
+#define PT_F67 0x0230
+#define PT_F68 0x0240
+#define PT_F69 0x0250
+#define PT_F70 0x0260
+#define PT_F71 0x0270
+#define PT_F72 0x0280
+#define PT_F73 0x0290
+#define PT_F74 0x02a0
+#define PT_F75 0x02b0
+#define PT_F76 0x02c0
+#define PT_F77 0x02d0
+#define PT_F78 0x02e0
+#define PT_F79 0x02f0
+#define PT_F80 0x0300
+#define PT_F81 0x0310
+#define PT_F82 0x0320
+#define PT_F83 0x0330
+#define PT_F84 0x0340
+#define PT_F85 0x0350
+#define PT_F86 0x0360
+#define PT_F87 0x0370
+#define PT_F88 0x0380
+#define PT_F89 0x0390
+#define PT_F90 0x03a0
+#define PT_F91 0x03b0
+#define PT_F92 0x03c0
+#define PT_F93 0x03d0
+#define PT_F94 0x03e0
+#define PT_F95 0x03f0
+#define PT_F96 0x0400
+#define PT_F97 0x0410
+#define PT_F98 0x0420
+#define PT_F99 0x0430
+#define PT_F100 0x0440
+#define PT_F101 0x0450
+#define PT_F102 0x0460
+#define PT_F103 0x0470
+#define PT_F104 0x0480
+#define PT_F105 0x0490
+#define PT_F106 0x04a0
+#define PT_F107 0x04b0
+#define PT_F108 0x04c0
+#define PT_F109 0x04d0
+#define PT_F110 0x04e0
+#define PT_F111 0x04f0
+#define PT_F112 0x0500
+#define PT_F113 0x0510
+#define PT_F114 0x0520
+#define PT_F115 0x0530
+#define PT_F116 0x0540
+#define PT_F117 0x0550
+#define PT_F118 0x0560
+#define PT_F119 0x0570
+#define PT_F120 0x0580
+#define PT_F121 0x0590
+#define PT_F122 0x05a0
+#define PT_F123 0x05b0
+#define PT_F124 0x05c0
+#define PT_F125 0x05d0
+#define PT_F126 0x05e0
+#define PT_F127 0x05f0
+/* switch stack: */
+#define PT_CALLER_UNAT 0x0600
+#define PT_KERNEL_FPSR 0x0608
+#define PT_F2 0x0610
+#define PT_F3 0x0620
+#define PT_F4 0x0630
+#define PT_F5 0x0640
+#define PT_F10 0x0650
+#define PT_F11 0x0660
+#define PT_F12 0x0670
+#define PT_F13 0x0680
+#define PT_F14 0x0690
+#define PT_F15 0x06a0
+#define PT_F16 0x06b0
+#define PT_F17 0x06c0
+#define PT_F18 0x06d0
+#define PT_F19 0x06e0
+#define PT_F20 0x06f0
+#define PT_F21 0x0700
+#define PT_F22 0x0710
+#define PT_F23 0x0720
+#define PT_F24 0x0730
+#define PT_F25 0x0740
+#define PT_F26 0x0750
+#define PT_F27 0x0760
+#define PT_F28 0x0770
+#define PT_F29 0x0780
+#define PT_F30 0x0790
+#define PT_F31 0x07a0
+#define PT_R4 0x07b0
+#define PT_R5 0x07b8
+#define PT_R6 0x07c0
+#define PT_R7 0x07c8
+#define PT_K_B0 0x07d0
+#define PT_B1 0x07d8
+#define PT_B2 0x07e0
+#define PT_B3 0x07e8
+#define PT_B4 0x07f0
+#define PT_B5 0x07f8
+#define PT_K_AR_PFS 0x0800
+#define PT_AR_LC 0x0808
+#define PT_K_AR_UNAT 0x0810
+#define PT_K_AR_RNAT 0x0818
+#define PT_K_AR_BSPSTORE 0x0820
+#define PT_K_PR 0x0828
+/* pt_regs */
+#define PT_CR_IPSR 0x0830
+#define PT_CR_IIP 0x0838
+#define PT_CR_IFS 0x0840
+#define PT_AR_UNAT 0x0848
+#define PT_AR_PFS 0x0858
+#define PT_AR_RSC 0x0858
+#define PT_AR_RNAT 0x0868
+#define PT_AR_BSPSTORE 0x0868
+#define PT_PR 0x0870
+#define PT_B6 0x0878
+#define PT_AR_BSP 0x0880
+#define PT_R1 0x0888
+#define PT_R2 0x0890
+#define PT_R3 0x0898
+#define PT_R12 0x08a0
+#define PT_R13 0x08a8
+#define PT_R14 0x08b0
+#define PT_R15 0x08b8
+#define PT_R8 0x08c0
+#define PT_R9 0x08c8
+#define PT_R10 0x08d0
+#define PT_R11 0x08d8
+#define PT_R16 0x08e0
+#define PT_R17 0x08e8
+#define PT_R18 0x08f0
+#define PT_R19 0x08f8
+#define PT_R20 0x0900
+#define PT_R21 0x0908
+#define PT_R22 0x0910
+#define PT_R23 0x0918
+#define PT_R24 0x0920
+#define PT_R25 0x0928
+#define PT_R26 0x0930
+#define PT_R27 0x0938
+#define PT_R28 0x0940
+#define PT_R29 0x0948
+#define PT_R30 0x0950
+#define PT_R31 0x0958
+#define PT_AR_CCV 0x0960
+#define PT_AR_FPSR 0x0968
+#define PT_B0 0x0970
+#define PT_B7 0x0978
+#define PT_F6 0x0980
+#define PT_F7 0x0990
+#define PT_F8 0x09a0
+#define PT_F9 0x09b0
+
+#define PT_DBR 0x2000 /* data breakpoint registers */
+#define PT_IBR 0x3000 /* instruction breakpoint registers */
+
+#endif /* _ASM_IA64_PTRACE_OFFSETS_H */
diff --git a/include/asm-ia64/resource.h b/include/asm-ia64/resource.h
new file mode 100644
index 000000000..a8d75ecdd
--- /dev/null
+++ b/include/asm-ia64/resource.h
@@ -0,0 +1,48 @@
+#ifndef _ASM_IA64_RESOURCE_H
+#define _ASM_IA64_RESOURCE_H
+
+/*
+ * Resource limits
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define RLIMIT_CPU 0 /* CPU time in ms */
+#define RLIMIT_FSIZE 1 /* Maximum filesize */
+#define RLIMIT_DATA 2 /* max data size */
+#define RLIMIT_STACK 3 /* max stack size */
+#define RLIMIT_CORE 4 /* max core file size */
+#define RLIMIT_RSS 5 /* max resident set size */
+#define RLIMIT_NPROC 6 /* max number of processes */
+#define RLIMIT_NOFILE 7 /* max number of open files */
+#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+#define RLIMIT_AS 9 /* address space limit */
+
+#define RLIM_NLIMITS 10
+
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
+# ifdef __KERNEL__
+
+#define INIT_RLIMITS \
+{ \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { _STK_LIM, RLIM_INFINITY }, \
+ { 0, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { 0, 0 }, \
+ { INR_OPEN, INR_OPEN }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+}
+
+# endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_RESOURCE_H */
diff --git a/include/asm-ia64/rse.h b/include/asm-ia64/rse.h
new file mode 100644
index 000000000..b65b28421
--- /dev/null
+++ b/include/asm-ia64/rse.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_RSE_H
+#define _ASM_IA64_RSE_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * Register stack engine related helper functions. This file may be
+ * used in applications, so be careful about the name-space and give
+ * some consideration to non-GNU C compilers (though __inline__ is
+ * fine).
+ */
+
+static __inline__ unsigned long
+ia64_rse_slot_num (unsigned long *addr)
+{
+ return (((unsigned long) addr) >> 3) & 0x3f;
+}
+
+/*
+ * Return TRUE if ADDR is the address of an RNAT slot.
+ */
+static __inline__ unsigned long
+ia64_rse_is_rnat_slot (unsigned long *addr)
+{
+ return ia64_rse_slot_num(addr) == 0x3f;
+}
+
+/*
+ * Returns the address of the RNAT slot that covers the slot at
+ * address SLOT_ADDR.
+ */
+static __inline__ unsigned long *
+ia64_rse_rnat_addr (unsigned long *slot_addr)
+{
+ return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3));
+}
+
+/*
+ * Calcuate the number of registers in the dirty partition starting at
+ * BSPSTORE with a size of DIRTY bytes. This isn't simply DIRTY
+ * divided by eight because the 64th slot is used to store ar.rnat.
+ */
+static __inline__ unsigned long
+ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp)
+{
+ unsigned long slots = (bsp - bspstore);
+
+ return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40;
+}
+
+/*
+ * The inverse of the above: given bspstore and the number of
+ * registers, calculate ar.bsp.
+ */
+static __inline__ unsigned long *
+ia64_rse_skip_regs (unsigned long *addr, long num_regs)
+{
+ long delta = ia64_rse_slot_num(addr) + num_regs;
+
+ if (num_regs < 0)
+ delta -= 0x3e;
+ return addr + num_regs + delta/0x3f;
+}
+
+#endif /* _ASM_IA64_RSE_H */
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
new file mode 100644
index 000000000..e26091a94
--- /dev/null
+++ b/include/asm-ia64/sal.h
@@ -0,0 +1,486 @@
+#ifndef _ASM_IA64_SAL_H
+#define _ASM_IA64_SAL_H
+
+/*
+ * System Abstraction Layer definitions.
+ *
+ * This is based on version 2.5 of the manual "IA-64 System
+ * Abstraction Layer".
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ *
+ * 99/09/29 davidm Updated for SAL 2.6.
+ */
+
+#include <linux/config.h>
+
+#include <asm/pal.h>
+#include <asm/system.h>
+
+extern spinlock_t sal_lock;
+
+#ifdef __GCC_MULTIREG_RETVALS__
+ /* If multi-register return values are returned according to the
+ ia-64 calling convention, we can call ia64_sal directly. */
+# define __SAL_CALL(result,args...) result = (*ia64_sal)(args)
+#else
+ /* If multi-register return values are returned through an aggregate
+ allocated in the caller, we need to use the stub implemented in
+ sal-stub.S. */
+ extern struct ia64_sal_retval ia64_sal_stub (u64 index, ...);
+# define __SAL_CALL(result,args...) result = ia64_sal_stub(args)
+#endif
+
+#ifdef CONFIG_SMP
+# define SAL_CALL(result,args...) do { \
+ spin_lock(&sal_lock); \
+ __SAL_CALL(result,args); \
+ spin_unlock(&sal_lock); \
+} while (0)
+#else
+# define SAL_CALL(result,args...) __SAL_CALL(result,args)
+#endif
+
+#define SAL_SET_VECTORS 0x01000000
+#define SAL_GET_STATE_INFO 0x01000001
+#define SAL_GET_STATE_INFO_SIZE 0x01000002
+#define SAL_CLEAR_STATE_INFO 0x01000003
+#define SAL_MC_RENDEZ 0x01000004
+#define SAL_MC_SET_PARAMS 0x01000005
+#define SAL_REGISTER_PHYSICAL_ADDR 0x01000006
+
+#define SAL_CACHE_FLUSH 0x01000008
+#define SAL_CACHE_INIT 0x01000009
+#define SAL_PCI_CONFIG_READ 0x01000010
+#define SAL_PCI_CONFIG_WRITE 0x01000011
+#define SAL_FREQ_BASE 0x01000012
+
+#define SAL_UPDATE_PAL 0x01000020
+
+struct ia64_sal_retval {
+ /*
+ * A zero status value indicates call completed without error.
+ * A negative status value indicates reason of call failure.
+ * A positive status value indicates success but an
+ * informational value should be printed (e.g., "reboot for
+ * change to take effect").
+ */
+ s64 status;
+ u64 v0;
+ u64 v1;
+ u64 v2;
+};
+
+typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
+
+enum {
+ SAL_FREQ_BASE_PLATFORM = 0,
+ SAL_FREQ_BASE_INTERVAL_TIMER = 1,
+ SAL_FREQ_BASE_REALTIME_CLOCK = 2
+};
+
+/*
+ * The SAL system table is followed by a variable number of variable
+ * length descriptors. The structure of these descriptors follows
+ * below.
+ */
+struct ia64_sal_systab {
+ char signature[4]; /* should be "SST_" */
+ int size; /* size of this table in bytes */
+ unsigned char sal_rev_minor;
+ unsigned char sal_rev_major;
+ unsigned short entry_count; /* # of entries in variable portion */
+ unsigned char checksum;
+ char ia32_bios_present;
+ unsigned short reserved1;
+ char oem_id[32]; /* ASCII NUL terminated OEM id
+ (terminating NUL is missing if
+ string is exactly 32 bytes long). */
+ char product_id[32]; /* ASCII product id */
+ char reserved2[16];
+};
+
+enum SAL_Systab_Entry_Type {
+ SAL_DESC_ENTRY_POINT = 0,
+ SAL_DESC_MEMORY = 1,
+ SAL_DESC_PLATFORM_FEATURE = 2,
+ SAL_DESC_TR = 3,
+ SAL_DESC_PTC = 4,
+ SAL_DESC_AP_WAKEUP = 5
+};
+
+/*
+ * Entry type: Size:
+ * 0 48
+ * 1 32
+ * 2 16
+ * 3 32
+ * 4 16
+ * 5 16
+ */
+#define SAL_DESC_SIZE(type) "\060\040\020\040\020\020"[(unsigned) type]
+
+struct ia64_sal_desc_entry_point {
+ char type;
+ char reserved1[7];
+ s64 pal_proc;
+ s64 sal_proc;
+ s64 gp;
+ char reserved2[16];
+};
+
+struct ia64_sal_desc_memory {
+ char type;
+ char used_by_sal; /* needs to be mapped for SAL? */
+ char mem_attr; /* current memory attribute setting */
+ char access_rights; /* access rights set up by SAL */
+ char mem_attr_mask; /* mask of supported memory attributes */
+ char reserved1;
+ char mem_type; /* memory type */
+ char mem_usage; /* memory usage */
+ s64 addr; /* physical address of memory */
+ unsigned int length; /* length (multiple of 4KB pages) */
+ unsigned int reserved2;
+ char oem_reserved[8];
+};
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK (1 << 0)
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1 << 1)
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1 << 2)
+
+struct ia64_sal_desc_platform_feature {
+ char type;
+ unsigned char feature_mask;
+ char reserved1[14];
+};
+
+struct ia64_sal_desc_tr {
+ char type;
+ char tr_type; /* 0 == instruction, 1 == data */
+ char regnum; /* translation register number */
+ char reserved1[5];
+ s64 addr; /* virtual address of area covered */
+ s64 page_size; /* encoded page size */
+ char reserved2[8];
+};
+
+struct ia64_sal_desc_ptc {
+ char type;
+ char reserved1[3];
+ unsigned int num_domains; /* # of coherence domains */
+ long domain_info; /* physical address of domain info table */
+};
+
+#define IA64_SAL_AP_EXTERNAL_INT 0
+
+struct ia64_sal_desc_ap_wakeup {
+ char type;
+ char mechanism; /* 0 == external interrupt */
+ char reserved1[6];
+ long vector; /* interrupt vector in range 0x10-0xff */
+};
+
+extern ia64_sal_handler ia64_sal;
+
+extern const char *ia64_sal_strerror (long status);
+extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
+
+/* SAL information type encodings */
+enum {
+ SAL_INFO_TYPE_MCA = 0, /* Machine check abort information */
+ SAL_INFO_TYPE_INIT = 1, /* Init information */
+ SAL_INFO_TYPE_CMC = 2 /* Corrected machine check information */
+};
+
+/* Sub information type encodings */
+enum {
+ SAL_SUB_INFO_TYPE_PROCESSOR = 0, /* Processor information */
+ SAL_SUB_INFO_TYPE_PLATFORM = 1 /* Platform information */
+};
+
+/* Encodings for machine check parameter types */
+enum {
+ SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezevous interrupt */
+ SAL_MC_PARAM_RENDEZ_WAKEUP = 2 /* Wakeup */
+};
+
+/* Encodings for rendezvous mechanisms */
+enum {
+ SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */
+ SAL_MC_PARAM_MECHANISM_MEM = 2 /* Use memory synchronization variable*/
+};
+
+/* Encodings for vectors which can be registered by the OS with SAL */
+enum {
+ SAL_VECTOR_OS_MCA = 0,
+ SAL_VECTOR_OS_INIT = 1,
+ SAL_VECTOR_OS_BOOT_RENDEZ = 2
+};
+
+/* Definition of the SAL Error Log from the SAL spec */
+
+/* Definition of timestamp according to SAL spec for logging purposes */
+
+typedef struct sal_log_timestamp_s {
+ u8 slh_century; /* Century (19, 20, 21, ...) */
+ u8 slh_year; /* Year (00..99) */
+ u8 slh_month; /* Month (1..12) */
+ u8 slh_day; /* Day (1..31) */
+ u8 slh_reserved;
+ u8 slh_hour; /* Hour (0..23) */
+ u8 slh_minute; /* Minute (0..59) */
+ u8 slh_second; /* Second (0..59) */
+} sal_log_timestamp_t;
+
+
+#define MAX_CACHE_ERRORS 6
+#define MAX_TLB_ERRORS 6
+#define MAX_BUS_ERRORS 1
+
+typedef struct sal_log_processor_info_s {
+ struct {
+ u64 slpi_psi : 1,
+ slpi_cache_check: MAX_CACHE_ERRORS,
+ slpi_tlb_check : MAX_TLB_ERRORS,
+ slpi_bus_check : MAX_BUS_ERRORS,
+ slpi_reserved2 : (31 - (MAX_TLB_ERRORS + MAX_CACHE_ERRORS
+ + MAX_BUS_ERRORS)),
+ slpi_minstate : 1,
+ slpi_bank1_gr : 1,
+ slpi_br : 1,
+ slpi_cr : 1,
+ slpi_ar : 1,
+ slpi_rr : 1,
+ slpi_fr : 1,
+ slpi_reserved1 : 25;
+ } slpi_valid;
+
+ pal_processor_state_info_t slpi_processor_state_info;
+
+ struct {
+ pal_cache_check_info_t slpi_cache_check;
+ u64 slpi_target_address;
+ } slpi_cache_check_info[MAX_CACHE_ERRORS];
+
+ pal_tlb_check_info_t slpi_tlb_check_info[MAX_TLB_ERRORS];
+
+ struct {
+ pal_bus_check_info_t slpi_bus_check;
+ u64 slpi_requestor_addr;
+ u64 slpi_responder_addr;
+ u64 slpi_target_addr;
+ } slpi_bus_check_info[MAX_BUS_ERRORS];
+
+ pal_min_state_area_t slpi_min_state_area;
+ u64 slpi_bank1_gr[16];
+ u64 slpi_bank1_nat_bits;
+ u64 slpi_br[8];
+ u64 slpi_cr[128];
+ u64 slpi_ar[128];
+ u64 slpi_rr[8];
+ u64 slpi_fr[128];
+} sal_log_processor_info_t;
+
+#define sal_log_processor_info_psi_valid slpi_valid.spli_psi
+#define sal_log_processor_info_cache_check_valid slpi_valid.spli_cache_check
+#define sal_log_processor_info_tlb_check_valid slpi_valid.spli_tlb_check
+#define sal_log_processor_info_bus_check_valid slpi_valid.spli_bus_check
+#define sal_log_processor_info_minstate_valid slpi_valid.spli_minstate
+#define sal_log_processor_info_bank1_gr_valid slpi_valid.slpi_bank1_gr
+#define sal_log_processor_info_br_valid slpi_valid.slpi_br
+#define sal_log_processor_info_cr_valid slpi_valid.slpi_cr
+#define sal_log_processor_info_ar_valid slpi_valid.slpi_ar
+#define sal_log_processor_info_rr_valid slpi_valid.slpi_rr
+#define sal_log_processor_info_fr_valid slpi_valid.slpi_fr
+
+typedef struct sal_log_header_s {
+ u64 slh_next_log; /* Offset of the next log from the
+ * beginning of this structure.
+ */
+ uint slh_log_len; /* Length of this error log in bytes */
+ ushort slh_log_type; /* Type of log (0 - cpu ,1 - platform) */
+ ushort slh_log_sub_type; /* SGI specific sub type */
+ sal_log_timestamp_t slh_log_timestamp; /* Timestamp */
+ u64 slh_log_dev_spec_info; /* For processor log this field will
+ * contain an area architected for all
+ * IA-64 processors. For platform log
+ * this field will contain information
+ * specific to the hardware
+ * implementation.
+ */
+} sal_log_header_t;
+
+
+/*
+ * Now define a couple of inline functions for improved type checking
+ * and convenience.
+ */
+extern inline long
+ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
+ unsigned long *drift_info)
+{
+ struct ia64_sal_retval isrv;
+
+ SAL_CALL(isrv, SAL_FREQ_BASE, which);
+ *ticks_per_second = isrv.v0;
+ *drift_info = isrv.v1;
+ return isrv.status;
+}
+
+/* Flush all the processor and platform level instruction and/or data caches */
+extern inline s64
+ia64_sal_cache_flush (u64 cache_type)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type);
+ return isrv.status;
+}
+
+
+
+/* Initialize all the processor and platform level instruction and data caches */
+extern inline s64
+ia64_sal_cache_init (void)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_CACHE_INIT);
+ return isrv.status;
+}
+
+/* Clear the processor and platform information logged by SAL with respect to the
+ * machine state at the time of MCA's, INITs or CMCs
+ */
+extern inline s64
+ia64_sal_clear_state_info (u64 sal_info_type, u64 sal_info_sub_type)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, sal_info_sub_type);
+ return isrv.status;
+}
+
+
+/* Get the processor and platform information logged by SAL with respect to the machine
+ * state at the time of the MCAs, INITs or CMCs.
+ */
+extern inline u64
+ia64_sal_get_state_info (u64 sal_info_type, u64 sal_info_sub_type, u64 *sal_info)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_GET_STATE_INFO, sal_info_type, sal_info_sub_type, sal_info);
+ if (isrv.status)
+ return 0;
+ return isrv.v0;
+}
+/* Get the maximum size of the information logged by SAL with respect to the machine
+ * state at the time of MCAs, INITs or CMCs
+ */
+extern inline u64
+ia64_sal_get_state_info_size (u64 sal_info_type, u64 sal_info_sub_type)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, sal_info_sub_type);
+ if (isrv.status)
+ return 0;
+ return isrv.v0;
+}
+
+/* Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup
+ * from the monarch processor.
+ */
+extern inline s64
+ia64_sal_mc_rendez (void)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_MC_RENDEZ);
+ return isrv.status;
+}
+
+/* Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during
+ * the machine check rendezvous sequence as well as the mechanism to wake up the
+ * non-monarch processor at the end of machine check processing.
+ */
+extern inline s64
+ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val, timeout);
+ return isrv.status;
+}
+
+/* Read from PCI configuration space */
+extern inline s64
+ia64_sal_pci_config_read (u64 pci_config_addr, u64 size, u64 *value)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size);
+ if (value)
+ *value = isrv.v0;
+ return isrv.status;
+}
+
+/* Write to PCI configuration space */
+extern inline s64
+ia64_sal_pci_config_write (u64 pci_config_addr, u64 size, u64 value)
+{
+ struct ia64_sal_retval isrv;
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) && !defined(SAPIC_FIXED)
+ extern spinlock_t ivr_read_lock;
+ unsigned long flags;
+
+ /*
+ * Avoid PCI configuration read/write overwrite -- A0 Interrupt loss workaround
+ */
+ spin_lock_irqsave(&ivr_read_lock, flags);
+#endif
+ SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value);
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) && !defined(SAPIC_FIXED)
+ spin_unlock_irqrestore(&ivr_read_lock, flags);
+#endif
+ return isrv.status;
+}
+
+/*
+ * Register physical addresses of locations needed by SAL when SAL
+ * procedures are invoked in virtual mode.
+ */
+extern inline s64
+ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr);
+ return isrv.status;
+}
+
+/* Register software dependent code locations within SAL. These locations are handlers
+ * or entry points where SAL will pass control for the specified event. These event
+ * handlers are for the bott rendezvous, MCAs and INIT scenarios.
+ */
+extern inline s64
+ia64_sal_set_vectors (u64 vector_type,
+ u64 handler_addr1, u64 gp1, u64 handler_len1,
+ u64 handler_addr2, u64 gp2, u64 handler_len2)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_SET_VECTORS, vector_type,
+ handler_addr1, gp1, handler_len1,
+ handler_addr2, gp2, handler_len2);
+
+ return isrv.status;
+}
+/* Update the contents of PAL block in the non-volatile storage device */
+extern inline s64
+ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
+ u64 *error_code, u64 *scratch_buf_size_needed)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size);
+ if (error_code)
+ *error_code = isrv.v0;
+ if (scratch_buf_size_needed)
+ *scratch_buf_size_needed = isrv.v1;
+ return isrv.status;
+}
+
+#endif /* _ASM_IA64_PAL_H */
diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h
new file mode 100644
index 000000000..5a119b6c8
--- /dev/null
+++ b/include/asm-ia64/scatterlist.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_IA64_SCATTERLIST_H
+#define _ASM_IA64_SCATTERLIST_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+struct scatterlist {
+ char *address; /* location data is to be transferred to */
+ /*
+ * Location of actual buffer if ADDRESS points to a DMA
+ * indirection buffer, NULL otherwise:
+ */
+ char *alt_address;
+ unsigned int length; /* buffer length */
+};
+
+#define ISA_DMA_THRESHOLD (~0UL)
+
+#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/include/asm-ia64/segment.h b/include/asm-ia64/segment.h
new file mode 100644
index 000000000..63118c84b
--- /dev/null
+++ b/include/asm-ia64/segment.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IA64_SEGMENT_H
+#define _ASM_IA64_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif /* __ALPHA_SEGMENT_H */
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
new file mode 100644
index 000000000..2c75056c5
--- /dev/null
+++ b/include/asm-ia64/semaphore.h
@@ -0,0 +1,330 @@
+#ifndef _ASM_IA64_SEMAPHORE_H
+#define _ASM_IA64_SEMAPHORE_H
+
+/*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/wait.h>
+
+#include <asm/atomic.h>
+
+struct semaphore {
+ atomic_t count;
+ int sleepers;
+ wait_queue_head_t wait;
+#if WAITQUEUE_DEBUG
+ long __magic; /* initialized by __SEM_DEBUG_INIT() */
+#endif
+};
+
+#if WAITQUEUE_DEBUG
+# define __SEM_DEBUG_INIT(name) , (long) &(name).__magic
+#else
+# define __SEM_DEBUG_INIT(name)
+#endif
+
+#define __SEMAPHORE_INITIALIZER(name,count) \
+{ \
+ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) \
+}
+
+#define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
+
+extern inline void
+sema_init (struct semaphore *sem, int val)
+{
+ *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
+}
+
+static inline void
+init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
+}
+
+static inline void
+init_MUTEX_LOCKED (struct semaphore *sem)
+{
+ sema_init(sem, 0);
+}
+
+extern void __down (struct semaphore * sem);
+extern int __down_interruptible (struct semaphore * sem);
+extern int __down_trylock (struct semaphore * sem);
+extern void __up (struct semaphore * sem);
+
+extern spinlock_t semaphore_wake_lock;
+
+/*
+ * Atomically decrement the semaphore's count. If it goes negative,
+ * block the calling thread in the TASK_UNINTERRUPTIBLE state.
+ */
+extern inline void
+down (struct semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ if (atomic_dec_return(&sem->count) < 0)
+ __down(sem);
+}
+
+/*
+ * Atomically decrement the semaphore's count. If it goes negative,
+ * block the calling thread in the TASK_INTERRUPTIBLE state.
+ */
+extern inline int
+down_interruptible (struct semaphore * sem)
+{
+ int ret = 0;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ if (atomic_dec_return(&sem->count) < 0)
+ ret = __down_interruptible(sem);
+ return ret;
+}
+
+extern inline int
+down_trylock (struct semaphore *sem)
+{
+ int ret = 0;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ if (atomic_dec_return(&sem->count) < 0)
+ ret = __down_trylock(sem);
+ return ret;
+}
+
+extern inline void
+up (struct semaphore * sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ if (atomic_inc_return(&sem->count) <= 0)
+ __up(sem);
+}
+
+/*
+ * rw mutexes (should that be mutices? =) -- throw rw spinlocks and
+ * semaphores together, and this is what we end up with...
+ *
+ * The lock is initialized to BIAS. This way, a writer subtracts BIAS
+ * ands gets 0 for the case of an uncontended lock. Readers decrement
+ * by 1 and see a positive value when uncontended, negative if there
+ * are writers waiting (in which case it goes to sleep). BIAS must be
+ * chosen such that subtracting BIAS once per CPU will result either
+ * in zero (uncontended case) or in a negative value (contention
+ * case). On the other hand, BIAS must be at least as big as the
+ * number of processes in the system.
+ *
+ * On IA-64, we use a BIAS value of 0x100000000, which supports up to
+ * 2 billion (2^31) processors and 4 billion processes.
+ *
+ * In terms of fairness, when there is heavy use of the lock, we want
+ * to see the lock being passed back and forth between readers and
+ * writers (like in a producer/consumer style of communication).
+ *
+
+ For
+ * liveness, it would be necessary to process the blocked readers and
+ * writers in FIFO order. However, we don't do this (yet). I suppose
+ * if you have a lock that is _that_ heavily contested, you're in big
+ * trouble anyhow.
+ *
+ * -ben (with clarifications & IA-64 comments by davidm)
+ */
+#define RW_LOCK_BIAS 0x100000000ul
+
+struct rw_semaphore {
+ volatile long count;
+ volatile __u8 write_bias_granted;
+ volatile __u8 read_bias_granted;
+ __u16 pad1;
+ __u32 pad2;
+ wait_queue_head_t wait;
+ wait_queue_head_t write_bias_wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+ atomic_t readers;
+ atomic_t writers;
+#endif
+};
+
+#if WAITQUEUE_DEBUG
+# define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
+#else
+# define __RWSEM_DEBUG_INIT
+#endif
+
+#define __RWSEM_INITIALIZER(name,count) \
+{ \
+ (count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
+ __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT \
+}
+
+#define __DECLARE_RWSEM_GENERIC(name,count) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
+
+#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS)
+#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS - 1)
+#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, 0)
+
+extern void __down_read_failed (struct rw_semaphore *sem, long count);
+extern void __down_write_failed (struct rw_semaphore *sem, long count);
+extern void __rwsem_wake (struct rw_semaphore *sem, long count);
+
+extern inline void
+init_rwsem (struct rw_semaphore *sem)
+{
+ sem->count = RW_LOCK_BIAS;
+ sem->read_bias_granted = 0;
+ sem->write_bias_granted = 0;
+ init_waitqueue_head(&sem->wait);
+ init_waitqueue_head(&sem->write_bias_wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+ atomic_set(&sem->readers, 0);
+ atomic_set(&sem->writers, 0);
+#endif
+}
+
+extern inline void
+down_read (struct rw_semaphore *sem)
+{
+ long count;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ count = ia64_fetch_and_add(-1, &sem->count);
+ if (count < 0)
+ __down_read_failed(sem, count);
+
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_inc(&sem->readers);
+#endif
+}
+
+extern inline void
+down_write (struct rw_semaphore *sem)
+{
+ long old_count, new_count;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ do {
+ old_count = sem->count;
+ new_count = old_count - RW_LOCK_BIAS;
+ } while (cmpxchg(&sem->count, old_count, new_count) != old_count);
+
+ if (new_count != 0)
+ __down_write_failed(sem, new_count);
+#if WAITQUEUE_DEBUG
+ if (atomic_read(&sem->writers))
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ atomic_inc(&sem->writers);
+#endif
+}
+
+/*
+ * When a reader does a release, the only significant
+ * case is when there was a writer waiting, and we've
+ * bumped the count to 0: we must wake the writer up.
+ */
+extern inline void
+__up_read (struct rw_semaphore *sem)
+{
+ long count;
+
+ count = ia64_fetch_and_add(1, &sem->count);
+ if (count == 0)
+ /*
+ * Other processes are blocked already; resolve
+ * contention by letting either a writer or a reader
+ * proceed...
+ */
+ __rwsem_wake(sem, count);
+}
+
+/*
+ * Releasing the writer is easy -- just release it and
+ * wake up any sleepers.
+ */
+extern inline void
+__up_write (struct rw_semaphore *sem)
+{
+ long old_count, new_count;
+
+ do {
+ old_count = sem->count;
+ new_count = old_count + RW_LOCK_BIAS;
+ } while (cmpxchg(&sem->count, old_count, new_count) != old_count);
+
+ /*
+ * Note: new_count <u RW_LOCK_BIAS <=> old_count < 0 && new_count >= 0.
+ * (where <u is "unsigned less-than").
+ */
+ if ((unsigned long) new_count < RW_LOCK_BIAS)
+ /* someone is blocked already, resolve contention... */
+ __rwsem_wake(sem, new_count);
+}
+
+extern inline void
+up_read (struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_dec(&sem->readers);
+#endif
+ __up_read(sem);
+}
+
+extern inline void
+up_write (struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (atomic_read(&sem->writers) != 1)
+ BUG();
+ atomic_dec(&sem->writers);
+#endif
+ __up_write(sem);
+}
+
+#endif /* _ASM_IA64_SEMAPHORE_H */
diff --git a/include/asm-ia64/sembuf.h b/include/asm-ia64/sembuf.h
new file mode 100644
index 000000000..1f5ea49b4
--- /dev/null
+++ b/include/asm-ia64/sembuf.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_SEMBUF_H
+#define _ASM_IA64_SEMBUF_H
+
+/*
+ * The semid64_ds structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _ASM_IA64_SEMBUF_H */
diff --git a/include/asm-ia64/serial.h b/include/asm-ia64/serial.h
new file mode 100644
index 000000000..5f8042925
--- /dev/null
+++ b/include/asm-ia64/serial.h
@@ -0,0 +1,135 @@
+/*
+ * include/asm-ia64/serial.h
+ *
+ * Derived from the i386 version.
+ */
+
+#include <linux/config.h>
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+#define CONFIG_SERIAL_DETECT_IRQ /* on IA-64, we always want to autodetect irqs */
+
+/* Standard COM flags (except for COM4, because of the 8514 problem) */
+#ifdef CONFIG_SERIAL_DETECT_IRQ
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
+#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
+#else
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
+#endif
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define FOURPORT_FLAGS ASYNC_FOURPORT
+#define ACCENT_FLAGS 0
+#define BOCA_FLAGS 0
+#define HUB6_FLAGS 0
+#define RS_TABLE_SIZE 64
+#else
+#define RS_TABLE_SIZE
+#endif
+
+/*
+ * The following define the access methods for the HUB6 card. All
+ * access is through two ports for all 24 possible chips. The card is
+ * selected through the high 2 bits, the port on that card with the
+ * "middle" 3 bits, and the register on that port with the bottom
+ * 3 bits.
+ *
+ * While the access port and interrupt is configurable, the default
+ * port locations are 0x302 for the port control register, and 0x303
+ * for the data read/write register. Normally, the interrupt is at irq3
+ * but can be anything from 3 to 7 inclusive. Note that using 3 will
+ * require disabling com2.
+ */
+
+#define C_P(card,port) (((card)<<6|(port)<<3) + 1)
+
+#define STD_SERIAL_PORT_DEFNS \
+ /* UART CLK PORT IRQ FLAGS */ \
+ { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
+ { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
+ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
+ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
+
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define EXTRA_SERIAL_PORT_DEFNS \
+ { 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \
+ { 0, BASE_BAUD, 0x1A8, 9, FOURPORT_FLAGS }, /* ttyS5 */ \
+ { 0, BASE_BAUD, 0x1B0, 9, FOURPORT_FLAGS }, /* ttyS6 */ \
+ { 0, BASE_BAUD, 0x1B8, 9, FOURPORT_FLAGS }, /* ttyS7 */ \
+ { 0, BASE_BAUD, 0x2A0, 5, FOURPORT_FLAGS }, /* ttyS8 */ \
+ { 0, BASE_BAUD, 0x2A8, 5, FOURPORT_FLAGS }, /* ttyS9 */ \
+ { 0, BASE_BAUD, 0x2B0, 5, FOURPORT_FLAGS }, /* ttyS10 */ \
+ { 0, BASE_BAUD, 0x2B8, 5, FOURPORT_FLAGS }, /* ttyS11 */ \
+ { 0, BASE_BAUD, 0x330, 4, ACCENT_FLAGS }, /* ttyS12 */ \
+ { 0, BASE_BAUD, 0x338, 4, ACCENT_FLAGS }, /* ttyS13 */ \
+ { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS14 (spare) */ \
+ { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS15 (spare) */ \
+ { 0, BASE_BAUD, 0x100, 12, BOCA_FLAGS }, /* ttyS16 */ \
+ { 0, BASE_BAUD, 0x108, 12, BOCA_FLAGS }, /* ttyS17 */ \
+ { 0, BASE_BAUD, 0x110, 12, BOCA_FLAGS }, /* ttyS18 */ \
+ { 0, BASE_BAUD, 0x118, 12, BOCA_FLAGS }, /* ttyS19 */ \
+ { 0, BASE_BAUD, 0x120, 12, BOCA_FLAGS }, /* ttyS20 */ \
+ { 0, BASE_BAUD, 0x128, 12, BOCA_FLAGS }, /* ttyS21 */ \
+ { 0, BASE_BAUD, 0x130, 12, BOCA_FLAGS }, /* ttyS22 */ \
+ { 0, BASE_BAUD, 0x138, 12, BOCA_FLAGS }, /* ttyS23 */ \
+ { 0, BASE_BAUD, 0x140, 12, BOCA_FLAGS }, /* ttyS24 */ \
+ { 0, BASE_BAUD, 0x148, 12, BOCA_FLAGS }, /* ttyS25 */ \
+ { 0, BASE_BAUD, 0x150, 12, BOCA_FLAGS }, /* ttyS26 */ \
+ { 0, BASE_BAUD, 0x158, 12, BOCA_FLAGS }, /* ttyS27 */ \
+ { 0, BASE_BAUD, 0x160, 12, BOCA_FLAGS }, /* ttyS28 */ \
+ { 0, BASE_BAUD, 0x168, 12, BOCA_FLAGS }, /* ttyS29 */ \
+ { 0, BASE_BAUD, 0x170, 12, BOCA_FLAGS }, /* ttyS30 */ \
+ { 0, BASE_BAUD, 0x178, 12, BOCA_FLAGS }, /* ttyS31 */
+#else
+#define EXTRA_SERIAL_PORT_DEFNS
+#endif
+
+/* You can have up to four HUB6's in the system, but I've only
+ * included two cards here for a total of twelve ports.
+ */
+#if (defined(CONFIG_HUB6) && defined(CONFIG_SERIAL_MANY_PORTS))
+#define HUB6_SERIAL_PORT_DFNS \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,0) }, /* ttyS32 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,1) }, /* ttyS33 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,2) }, /* ttyS34 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,3) }, /* ttyS35 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,4) }, /* ttyS36 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,5) }, /* ttyS37 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,0) }, /* ttyS38 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,1) }, /* ttyS39 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,2) }, /* ttyS40 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,3) }, /* ttyS41 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,4) }, /* ttyS42 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,5) }, /* ttyS43 */
+#else
+#define HUB6_SERIAL_PORT_DFNS
+#endif
+
+#ifdef CONFIG_MCA
+#define MCA_SERIAL_PORT_DFNS \
+ { 0, BASE_BAUD, 0x3220, 3, STD_COM_FLAGS }, \
+ { 0, BASE_BAUD, 0x3228, 3, STD_COM_FLAGS }, \
+ { 0, BASE_BAUD, 0x4220, 3, STD_COM_FLAGS }, \
+ { 0, BASE_BAUD, 0x4228, 3, STD_COM_FLAGS }, \
+ { 0, BASE_BAUD, 0x5220, 3, STD_COM_FLAGS }, \
+ { 0, BASE_BAUD, 0x5228, 3, STD_COM_FLAGS },
+#else
+#define MCA_SERIAL_PORT_DFNS
+#endif
+
+#define SERIAL_PORT_DFNS \
+ STD_SERIAL_PORT_DEFNS \
+ EXTRA_SERIAL_PORT_DEFNS \
+ HUB6_SERIAL_PORT_DFNS \
+ MCA_SERIAL_PORT_DFNS
+
diff --git a/include/asm-ia64/shmbuf.h b/include/asm-ia64/shmbuf.h
new file mode 100644
index 000000000..6def08993
--- /dev/null
+++ b/include/asm-ia64/shmbuf.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_SHMBUF_H
+#define _ASM_IA64_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ __kernel_time_t shm_dtime; /* last detach time */
+ __kernel_time_t shm_ctime; /* last change time */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_IA64_SHMBUF_H */
diff --git a/include/asm-ia64/shmparam.h b/include/asm-ia64/shmparam.h
new file mode 100644
index 000000000..5bbea62b5
--- /dev/null
+++ b/include/asm-ia64/shmparam.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IA64_SHMPARAM_H
+#define _ASM_IA64_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_IA64_SHMPARAM_H */
diff --git a/include/asm-ia64/sigcontext.h b/include/asm-ia64/sigcontext.h
new file mode 100644
index 000000000..357e444e5
--- /dev/null
+++ b/include/asm-ia64/sigcontext.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_IA64_SIGCONTEXT_H
+#define _ASM_IA64_SIGCONTEXT_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/fpu.h>
+
+#define IA64_SC_FLAG_ONSTACK_BIT 1 /* is handler running on signal stack? */
+#define IA64_SC_FLAG_IN_SYSCALL_BIT 1 /* did signal interrupt a syscall? */
+#define IA64_SC_FLAG_FPH_VALID_BIT 2 /* is state in f[32]-f[127] valid? */
+
+#define IA64_SC_FLAG_ONSTACK (1 << IA64_SC_FLAG_ONSTACK_BIT)
+#define IA64_SC_FLAG_IN_SYSCALL (1 << IA64_SC_FLAG_IN_SYSCALL_BIT)
+#define IA64_SC_FLAG_FPH_VALID (1 << IA64_SC_FLAG_FPH_VALID_BIT)
+
+# ifndef __ASSEMBLY__
+
+struct sigcontext {
+ unsigned long sc_flags; /* see manifest constants above */
+ unsigned long sc_nat; /* bit i == 1 iff scratch reg gr[i] is a NaT */
+ stack_t sc_stack; /* previously active stack */
+
+ unsigned long sc_ip; /* instruction pointer */
+ unsigned long sc_cfm; /* current frame marker */
+ unsigned long sc_um; /* user mask bits */
+ unsigned long sc_ar_rsc; /* register stack configuration register */
+ unsigned long sc_ar_bsp; /* backing store pointer */
+ unsigned long sc_ar_rnat; /* RSE NaT collection register */
+ unsigned long sc_ar_ccv; /* compare and exchange compare value register */
+ unsigned long sc_ar_unat; /* ar.unat of interrupted context */
+ unsigned long sc_ar_fpsr; /* floating-point status register */
+ unsigned long sc_ar_pfs; /* previous function state */
+ unsigned long sc_ar_lc; /* loop count register */
+ unsigned long sc_pr; /* predicate registers */
+ unsigned long sc_br[8]; /* branch registers */
+ unsigned long sc_gr[32]; /* general registers (static partition) */
+ struct ia64_fpreg sc_fr[128]; /* floating-point registers */
+
+ /*
+ * The mask must come last so we can increase _NSIG_WORDS
+ * without breaking binary compatibility.
+ */
+ sigset_t sc_mask; /* signal mask to restore after handler returns */
+};
+
+# endif /* __ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGCONTEXT_H */
diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h
new file mode 100644
index 000000000..0559f5f8b
--- /dev/null
+++ b/include/asm-ia64/siginfo.h
@@ -0,0 +1,202 @@
+#ifndef _ASM_IA64_SIGINFO_H
+#define _ASM_IA64_SIGINFO_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/types.h>
+
+typedef union sigval {
+ int sival_int;
+ void *sival_ptr;
+} sigval_t;
+
+#define SI_MAX_SIZE 128
+#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 3)
+
+typedef struct siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ unsigned int _timer1;
+ unsigned int _timer2;
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ uid_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ clock_t _utime;
+ clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ void *_addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t;
+
+/*
+ * How these fields are to be accessed.
+ */
+#define si_pid _sifields._kill._pid
+#define si_uid _sifields._kill._uid
+#define si_status _sifields._sigchld._status
+#define si_utime _sifields._sigchld._utime
+#define si_stime _sifields._sigchld._stime
+#define si_value _sifields._rt._sigval
+#define si_int _sifields._rt._sigval.sival_int
+#define si_ptr _sifields._rt._sigval.sival_ptr
+#define si_addr _sifields._sigfault._addr
+#define si_band _sifields._sigpoll._band
+#define si_fd _sifields._sigpoll._fd
+
+/*
+ * si_code values
+ * Positive values for kernel-generated signals.
+ */
+#define SI_USER 0 /* sent by kill, sigsend, raise */
+#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+#define SI_QUEUE -1 /* sent by sigqueue */
+#define SI_TIMER -2 /* sent by timer expiration */
+#define SI_MESGQ -3 /* sent by real time mesq state change */
+#define SI_ASYNCIO -4 /* sent by AIO completion */
+#define SI_SIGIO -5 /* sent by queued SIGIO */
+
+#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
+#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
+
+/*
+ * SIGILL si_codes
+ */
+#define ILL_ILLOPC 1 /* illegal opcode */
+#define ILL_ILLOPN 2 /* illegal operand */
+#define ILL_ILLADR 3 /* illegal addressing mode */
+#define ILL_ILLTRP 4 /* illegal trap */
+#define ILL_PRVOPC 5 /* privileged opcode */
+#define ILL_PRVREG 6 /* privileged register */
+#define ILL_COPROC 7 /* coprocessor error */
+#define ILL_BADSTK 8 /* internal stack error */
+#define ILL_BADIADDR 9 /* Unimplemented instruction address */
+#define NSIGILL 9
+
+/*
+ * SIGFPE si_codes
+ */
+#define FPE_INTDIV 1 /* integer divide by zero */
+#define FPE_INTOVF 2 /* integer overflow */
+#define FPE_FLTDIV 3 /* floating point divide by zero */
+#define FPE_FLTOVF 4 /* floating point overflow */
+#define FPE_FLTUND 5 /* floating point underflow */
+#define FPE_FLTRES 6 /* floating point inexact result */
+#define FPE_FLTINV 7 /* floating point invalid operation */
+#define FPE_FLTSUB 8 /* subscript out of range */
+#define NSIGFPE 8
+
+/*
+ * SIGSEGV si_codes
+ */
+#define SEGV_MAPERR 1 /* address not mapped to object */
+#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
+#define NSIGSEGV 2
+
+/*
+ * SIGBUS si_codes
+ */
+#define BUS_ADRALN 1 /* invalid address alignment */
+#define BUS_ADRERR 2 /* non-existant physical address */
+#define BUS_OBJERR 3 /* object specific hardware error */
+#define NSIGBUS 3
+
+/*
+ * SIGTRAP si_codes
+ */
+#define TRAP_BRKPT 1 /* process breakpoint */
+#define TRAP_TRACE 2 /* process trace trap */
+#define TRAP_BRANCH 3 /* process taken branch trap */
+#define NSIGTRAP 3
+
+/*
+ * SIGCHLD si_codes
+ */
+#define CLD_EXITED 1 /* child has exited */
+#define CLD_KILLED 2 /* child was killed */
+#define CLD_DUMPED 3 /* child terminated abnormally */
+#define CLD_TRAPPED 4 /* traced child has trapped */
+#define CLD_STOPPED 5 /* child has stopped */
+#define CLD_CONTINUED 6 /* stopped child has continued */
+#define NSIGCHLD 6
+
+/*
+ * SIGPOLL si_codes
+ */
+#define POLL_IN 1 /* data input available */
+#define POLL_OUT 2 /* output buffers available */
+#define POLL_MSG 3 /* input message available */
+#define POLL_ERR 4 /* i/o error */
+#define POLL_PRI 5 /* high priority input available */
+#define POLL_HUP 6 /* device disconnected */
+#define NSIGPOLL 6
+
+/*
+ * sigevent definitions
+ *
+ * It seems likely that SIGEV_THREAD will have to be handled from
+ * userspace, libpthread transmuting it to SIGEV_SIGNAL, which the
+ * thread manager then catches and does the appropriate nonsense.
+ * However, everything is written out here so as to not get lost.
+ */
+#define SIGEV_SIGNAL 0 /* notify via signal */
+#define SIGEV_NONE 1 /* other notification: meaningless */
+#define SIGEV_THREAD 2 /* deliver via thread creation */
+
+#define SIGEV_MAX_SIZE 64
+#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3)
+
+typedef struct sigevent {
+ sigval_t sigev_value;
+ int sigev_signo;
+ int sigev_notify;
+ union {
+ int _pad[SIGEV_PAD_SIZE];
+
+ struct {
+ void (*_function)(sigval_t);
+ void *_attribute; /* really pthread_attr_t */
+ } _sigev_thread;
+ } _sigev_un;
+} sigevent_t;
+
+#define sigev_notify_function _sigev_un._sigev_thread._function
+#define sigev_notify_attributes _sigev_un._sigev_thread._attribute
+
+#endif /* _ASM_IA64_SIGINFO_H */
diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h
new file mode 100644
index 000000000..f6a01d640
--- /dev/null
+++ b/include/asm-ia64/signal.h
@@ -0,0 +1,162 @@
+#ifndef _ASM_IA64_SIGNAL_H
+#define _ASM_IA64_SIGNAL_H
+
+/*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * Unfortunately, this file is being included by bits/signal.h in
+ * glibc-2.x. Hence the #ifdef __KERNEL__ ugliness.
+ */
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for backwards compatibility */
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX (_NSIG-1)
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
+
+#define SA_RESTORER 0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#ifdef __KERNEL__
+
+#define _NSIG 64
+#define _NSIG_BPW 64
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#define SA_SHIRQ 0x04000000
+#define SA_LEGACY 0x02000000 /* installed via a legacy irq? */
+
+#endif /* __KERNEL__ */
+
+#define SIG_BLOCK 0 /* for blocking signals */
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#define SIG_SETMASK 2 /* for setting the signal mask */
+
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+
+# ifndef __ASSEMBLY__
+
+# include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+/* Type of a signal handler. */
+typedef void (*__sighandler_t)(int);
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+typedef unsigned long old_sigset_t;
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+# include <asm/sigcontext.h>
+
+#endif /* __KERNEL__ */
+
+# endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGNAL_H */
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
new file mode 100644
index 000000000..1ad1a8149
--- /dev/null
+++ b/include/asm-ia64/smp.h
@@ -0,0 +1,100 @@
+/*
+ * SMP Support
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ */
+#ifndef _ASM_IA64_SMP_H
+#define _ASM_IA64_SMP_H
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+
+#include <asm/ptrace.h>
+#include <asm/spinlock.h>
+#include <asm/io.h>
+
+#define IPI_DEFAULT_BASE_ADDR 0xfee00000
+#define XTP_OFFSET 0x1e0008
+
+#define smp_processor_id() (current->processor)
+
+extern unsigned long cpu_present_map;
+extern unsigned long cpu_online_map;
+extern unsigned long ipi_base_addr;
+extern int bootstrap_processor;
+extern volatile int cpu_number_map[NR_CPUS];
+extern volatile int __cpu_logical_map[NR_CPUS];
+
+#define cpu_logical_map(i) __cpu_logical_map[i]
+
+#if defined(CONFIG_KDB)
+extern volatile unsigned long smp_kdb_wait;
+#endif /* CONFIG_KDB */
+
+extern unsigned long ap_wakeup_vector;
+
+/*
+ * XTP control functions:
+ * min_xtp : route all interrupts to this CPU
+ * normal_xtp: nominal XTP value
+ * raise_xtp : Route all interrupts away from this CPU
+ * max_xtp : never deliver interrupts to this CPU.
+ */
+
+/*
+ * This turns off XTP based interrupt routing. There is a bug in the handling of
+ * IRQ_INPROGRESS when the same vector appears on more than one CPU.
+ */
+extern int use_xtp;
+
+extern __inline void
+min_xtp(void)
+{
+ if (use_xtp)
+ writeb(0x80, ipi_base_addr | XTP_OFFSET); /* XTP to min */
+}
+
+extern __inline void
+normal_xtp(void)
+{
+ if (use_xtp)
+ writeb(0x8e, ipi_base_addr | XTP_OFFSET); /* XTP normal */
+}
+
+extern __inline void
+max_xtp(void)
+{
+ if (use_xtp)
+ writeb(0x8f, ipi_base_addr | XTP_OFFSET); /* Set XTP to max... */
+}
+
+extern __inline unsigned int
+hard_smp_processor_id(void)
+{
+ struct {
+ unsigned long reserved : 16;
+ unsigned long eid : 8;
+ unsigned long id : 8;
+ unsigned long ignored : 32;
+ } lid;
+
+ __asm__ __volatile__ ("mov %0=cr.lid" : "=r" (lid));
+
+ /*
+ * Damn. IA64 CPU ID's are 16 bits long, Linux expect the hard id to be
+ * in the range 0..31. So, return the low-order bits of the bus-local ID
+ * only and hope it's less than 32. This needs to be fixed...
+ */
+ return (lid.id & 0x0f);
+}
+
+#define NO_PROC_ID 0xffffffff
+#define PROC_CHANGE_PENALTY 20
+
+extern void __init init_smp_config (void);
+extern void smp_do_timer (struct pt_regs *regs);
+
+#endif /* _ASM_IA64_SMP_H */
diff --git a/include/asm-ia64/smplock.h b/include/asm-ia64/smplock.h
new file mode 100644
index 000000000..136a777b9
--- /dev/null
+++ b/include/asm-ia64/smplock.h
@@ -0,0 +1,54 @@
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+static __inline__ void
+release_kernel_lock(struct task_struct *task, int cpu)
+{
+ if (task->lock_depth >= 0)
+ spin_unlock(&kernel_flag);
+ release_irqlock(cpu);
+ __sti();
+}
+
+/*
+ * Re-acquire the kernel lock
+ */
+static __inline__ void
+reacquire_kernel_lock(struct task_struct *task)
+{
+ if (task->lock_depth >= 0)
+ spin_lock(&kernel_flag);
+}
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+static __inline__ void
+lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+static __inline__ void
+unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
diff --git a/include/asm-ia64/socket.h b/include/asm-ia64/socket.h
new file mode 100644
index 000000000..8aeee3046
--- /dev/null
+++ b/include/asm-ia64/socket.h
@@ -0,0 +1,49 @@
+#ifndef _ASM_IA64_SOCKET_H
+#define _ASM_IA64_SOCKET_H
+
+/*
+ * Socket related defines. This mostly mirrors the Linux/x86 version.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/sockios.h>
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO 20
+#define SO_SNDTIMEO 21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
+#endif /* _ASM_IA64_SOCKET_H */
diff --git a/include/asm-ia64/sockios.h b/include/asm-ia64/sockios.h
new file mode 100644
index 000000000..d27ae3473
--- /dev/null
+++ b/include/asm-ia64/sockios.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_IA64_SOCKIOS_H
+#define _ASM_IA64_SOCKIOS_H
+
+/*
+ * Socket-level I/O control calls. This mostly mirrors the Linux/x86
+ * version.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp */
+
+#endif /* _ASM_IA64_SOCKIOS_H */
diff --git a/include/asm-ia64/softirq.h b/include/asm-ia64/softirq.h
new file mode 100644
index 000000000..8b92f4442
--- /dev/null
+++ b/include/asm-ia64/softirq.h
@@ -0,0 +1,152 @@
+#ifndef _ASM_IA64_SOFTIRQ_H
+#define _ASM_IA64_SOFTIRQ_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/config.h>
+#include <linux/stddef.h>
+
+#include <asm/system.h>
+#include <asm/hardirq.h>
+
+extern unsigned int local_bh_count[NR_CPUS];
+
+#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
+#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
+
+#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
+#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
+
+#define local_bh_disable() cpu_bh_disable(smp_processor_id())
+#define local_bh_enable() cpu_bh_enable(smp_processor_id())
+
+#define get_active_bhs() (bh_mask & bh_active)
+
+static inline void
+clear_active_bhs (unsigned long x)
+{
+ unsigned long old, new;
+ volatile unsigned long *bh_activep = (void *) &bh_active;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(bh_activep);
+ old = *bh_activep;
+ new = old & ~x;
+ } while (ia64_cmpxchg(bh_activep, old, new, 8) != old);
+}
+
+extern inline void
+init_bh (int nr, void (*routine)(void))
+{
+ bh_base[nr] = routine;
+ atomic_set(&bh_mask_count[nr], 0);
+ bh_mask |= 1 << nr;
+}
+
+extern inline void
+remove_bh (int nr)
+{
+ bh_mask &= ~(1 << nr);
+ mb();
+ bh_base[nr] = NULL;
+}
+
+extern inline void
+mark_bh (int nr)
+{
+ set_bit(nr, &bh_active);
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * The locking mechanism for base handlers, to prevent re-entrancy,
+ * is entirely private to an implementation, it should not be
+ * referenced at all outside of this file.
+ */
+extern atomic_t global_bh_lock;
+extern atomic_t global_bh_count;
+
+extern void synchronize_bh(void);
+
+static inline void
+start_bh_atomic (void)
+{
+ atomic_inc(&global_bh_lock);
+ synchronize_bh();
+}
+
+static inline void
+end_bh_atomic (void)
+{
+ atomic_dec(&global_bh_lock);
+}
+
+/* These are for the irq's testing the lock */
+static inline int
+softirq_trylock (int cpu)
+{
+ if (cpu_bh_trylock(cpu)) {
+ if (!test_and_set_bit(0, &global_bh_count)) {
+ if (atomic_read(&global_bh_lock) == 0)
+ return 1;
+ clear_bit(0,&global_bh_count);
+ }
+ cpu_bh_endlock(cpu);
+ }
+ return 0;
+}
+
+static inline void
+softirq_endlock (int cpu)
+{
+ cpu_bh_enable(cpu);
+ clear_bit(0,&global_bh_count);
+}
+
+#else /* !CONFIG_SMP */
+
+extern inline void
+start_bh_atomic (void)
+{
+ local_bh_disable();
+ barrier();
+}
+
+extern inline void
+end_bh_atomic (void)
+{
+ barrier();
+ local_bh_enable();
+}
+
+/* These are for the irq's testing the lock */
+#define softirq_trylock(cpu) (cpu_bh_trylock(cpu))
+#define softirq_endlock(cpu) (cpu_bh_endlock(cpu))
+#define synchronize_bh() barrier()
+
+#endif /* !CONFIG_SMP */
+
+/*
+ * These use a mask count to correctly handle
+ * nested disable/enable calls
+ */
+extern inline void
+disable_bh (int nr)
+{
+ bh_mask &= ~(1 << nr);
+ atomic_inc(&bh_mask_count[nr]);
+ synchronize_bh();
+}
+
+extern inline void
+enable_bh (int nr)
+{
+ if (atomic_dec_and_test(&bh_mask_count[nr]))
+ bh_mask |= 1 << nr;
+}
+
+#endif /* _ASM_IA64_SOFTIRQ_H */
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h
new file mode 100644
index 000000000..ec10fb794
--- /dev/null
+++ b/include/asm-ia64/spinlock.h
@@ -0,0 +1,97 @@
+#ifndef _ASM_IA64_SPINLOCK_H
+#define _ASM_IA64_SPINLOCK_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ *
+ * This file is used for SMP configurations only.
+ */
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/atomic.h>
+
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#define spin_lock_init(x) ((x)->lock = 0)
+
+/* Streamlined test_and_set_bit(0, (x)) */
+#define spin_lock(x) __asm__ __volatile__ ( \
+ "mov ar.ccv = r0\n" \
+ "mov r29 = 1\n" \
+ ";;\n" \
+ "1:\n" \
+ "ld4 r2 = [%0]\n" \
+ ";;\n" \
+ "cmp4.eq p0,p7 = r0,r2\n" \
+ "(p7) br.cond.dptk.few 1b \n" \
+ "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
+ ";;\n" \
+ "cmp4.eq p0,p7 = r0, r2\n" \
+ "(p7) br.cond.dptk.few 1b\n" \
+ ";;\n" \
+ :: "m" __atomic_fool_gcc((x)) : "r2", "r29")
+
+#define spin_unlock(x) __asm__ __volatile__ ("st4.rel [%0] = r0;;" : "=m" (__atomic_fool_gcc((x))))
+
+#define spin_trylock(x) (!test_and_set_bit(0, (x)))
+
+#define spin_unlock_wait(x) \
+ ({ do { barrier(); } while(((volatile spinlock_t *)x)->lock); })
+
+typedef struct {
+ volatile int read_counter:31;
+ volatile int write_lock:1;
+} rwlock_t;
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+
+#define read_lock(rw) \
+do { \
+ int tmp = 0; \
+ __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = %1, 1\n" \
+ ";;\n" \
+ "tbit.nz p6,p0 = %0, 31\n" \
+ "(p6) br.cond.sptk.few 2f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\tfetchadd4.rel %0 = %1, -1\n" \
+ ";;\n" \
+ "3:\tld4.acq %0 = %1\n" \
+ ";;\n" \
+ "tbit.nz p6,p0 = %0, 31\n" \
+ "(p6) br.cond.sptk.few 3b\n" \
+ "br.cond.sptk.few 1b\n" \
+ ";;\n" \
+ ".previous\n": "=r" (tmp), "=m" (__atomic_fool_gcc(rw))); \
+} while(0)
+
+#define read_unlock(rw) \
+do { \
+ int tmp = 0; \
+ __asm__ __volatile__ ("fetchadd4.rel %0 = %1, -1\n" \
+ : "=r" (tmp) : "m" (__atomic_fool_gcc(rw))); \
+} while(0)
+
+/*
+ * These may need to be rewhacked in asm().
+ * XXX FIXME SDV - This may have a race on real hardware but is sufficient for SoftSDV
+ */
+#define write_lock(rw) \
+while(1) {\
+ do { \
+ } while (!test_and_set_bit(31, (rw))); \
+ if ((rw)->read_counter) { \
+ clear_bit(31, (rw)); \
+ while ((rw)->read_counter) \
+ ; \
+ } else { \
+ break; \
+ } \
+}
+
+#define write_unlock(x) (clear_bit(31, (x)))
+
+#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/include/asm-ia64/stat.h b/include/asm-ia64/stat.h
new file mode 100644
index 000000000..c261a337e
--- /dev/null
+++ b/include/asm-ia64/stat.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_IA64_STAT_H
+#define _ASM_IA64_STAT_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+struct stat {
+ unsigned int st_dev;
+ unsigned int st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int st_rdev;
+ unsigned int __pad1;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+ unsigned int st_blksize;
+ int st_blocks;
+ unsigned int __unused1;
+ unsigned int __unused2;
+};
+
+#endif /* _ASM_IA64_STAT_H */
diff --git a/include/asm-ia64/statfs.h b/include/asm-ia64/statfs.h
new file mode 100644
index 000000000..4e684d5c8
--- /dev/null
+++ b/include/asm-ia64/statfs.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_IA64_STATFS_H
+#define _ASM_IA64_STATFS_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+# ifndef __KERNEL_STRICT_NAMES
+# include <linux/types.h>
+ typedef __kernel_fsid_t fsid_t;
+# endif
+
+struct statfs {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_spare[6];
+};
+
+#endif /* _ASM_IA64_STATFS_H */
diff --git a/include/asm-ia64/string.h b/include/asm-ia64/string.h
new file mode 100644
index 000000000..09a99daf4
--- /dev/null
+++ b/include/asm-ia64/string.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_IA64_STRING_H
+#define _ASM_IA64_STRING_H
+
+/*
+ * Here is where we want to put optimized versions of the string
+ * routines.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */
+#define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */
+
+#endif /* _ASM_IA64_STRING_H */
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
new file mode 100644
index 000000000..53dc2d2e5
--- /dev/null
+++ b/include/asm-ia64/system.h
@@ -0,0 +1,471 @@
+#ifndef _ASM_IA64_SYSTEM_H
+#define _ASM_IA64_SYSTEM_H
+
+/*
+ * System defines. Note that this is included both from .c and .S
+ * files, so it does only defines, not any C code. This is based
+ * on information published in the Processor Abstraction Layer
+ * and the System Abstraction Layer manual.
+ *
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+#include <linux/config.h>
+
+#include <asm/page.h>
+
+#define KERNEL_START (PAGE_OFFSET + 0x500000)
+
+/*
+ * The following #defines must match with vmlinux.lds.S:
+ */
+#define IVT_END_ADDR (KERNEL_START + 0x8000)
+#define ZERO_PAGE_ADDR (IVT_END_ADDR + 0*PAGE_SIZE)
+#define SWAPPER_PGD_ADDR (IVT_END_ADDR + 1*PAGE_SIZE)
+
+#define GATE_ADDR (0xa000000000000000 + PAGE_SIZE)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+struct pci_vector_struct {
+ __u16 bus; /* PCI Bus number */
+ __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
+ __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
+ __u8 irq; /* IRQ assigned */
+};
+
+extern struct ia64_boot_param {
+ __u64 command_line; /* physical address of command line arguments */
+ __u64 efi_systab; /* physical address of EFI system table */
+ __u64 efi_memmap; /* physical address of EFI memory map */
+ __u64 efi_memmap_size; /* size of EFI memory map */
+ __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */
+ __u32 efi_memdesc_version; /* memory descriptor version */
+ struct {
+ __u16 num_cols; /* number of columns on console output device */
+ __u16 num_rows; /* number of rows on console output device */
+ __u16 orig_x; /* cursor's x position */
+ __u16 orig_y; /* cursor's y position */
+ } console_info;
+ __u16 num_pci_vectors; /* number of ACPI derived PCI IRQ's*/
+ __u64 pci_vectors; /* physical address of PCI data (pci_vector_struct)*/
+ __u64 fpswa; /* physical address of the the fpswa interface */
+} ia64_boot_param;
+
+extern inline void
+ia64_insn_group_barrier (void)
+{
+ __asm__ __volatile__ (";;" ::: "memory");
+}
+
+/*
+ * Macros to force memory ordering. In these descriptions, "previous"
+ * and "subsequent" refer to program order; "visible" means that all
+ * architecturally visible effects of a memory access have occurred
+ * (at a minimum, this means the memory has been read or written).
+ *
+ * wmb(): Guarantees that all preceding stores to memory-
+ * like regions are visible before any subsequent
+ * stores and that all following stores will be
+ * visible only after all previous stores.
+ * rmb(): Like wmb(), but for reads.
+ * mb(): wmb()/rmb() combo, i.e., all previous memory
+ * accesses are visible before all subsequent
+ * accesses and vice versa. This is also known as
+ * a "fence."
+ *
+ * Note: "mb()" and its variants cannot be used as a fence to order
+ * accesses to memory mapped I/O registers. For that, mf.a needs to
+ * be used. However, we don't want to always use mf.a because (a)
+ * it's (presumably) much slower than mf and (b) mf.a is supported for
+ * sequential memory pages only.
+ */
+#define mb() __asm__ __volatile__ ("mf" ::: "memory")
+#define rmb() mb()
+#define wmb() mb()
+
+/*
+ * XXX check on these---I suspect what Linus really wants here is
+ * acquire vs release semantics but we can't discuss this stuff with
+ * Linus just yet. Grrr...
+ */
+#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
+#define set_rmb(var, value) do { (var) = (value); mb(); } while (0)
+#define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
+
+/*
+ * The group barrier in front of the rsm & ssm are necessary to ensure
+ * that none of the previous instructions in the same group are
+ * affected by the rsm/ssm.
+ */
+/* For spinlocks etc */
+
+#ifdef CONFIG_IA64_DEBUG_IRQ
+
+ extern unsigned long last_cli_ip;
+
+# define local_irq_save(x) \
+do { \
+ unsigned long ip, psr; \
+ \
+ __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \
+ if (psr & (1UL << 14)) { \
+ __asm__ ("mov %0=ip" : "=r"(ip)); \
+ last_cli_ip = ip; \
+ } \
+ (x) = psr; \
+} while (0)
+
+# define local_irq_disable() \
+do { \
+ unsigned long ip, psr; \
+ \
+ __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \
+ if (psr & (1UL << 14)) { \
+ __asm__ ("mov %0=ip" : "=r"(ip)); \
+ last_cli_ip = ip; \
+ } \
+} while (0)
+
+# define local_irq_restore(x) \
+do { \
+ unsigned long ip, old_psr, psr = (x); \
+ \
+ __asm__ __volatile__ ("mov %0=psr; mov psr.l=%1;; srlz.d" \
+ : "=&r" (old_psr) : "r" (psr) : "memory"); \
+ if ((old_psr & (1UL << 14)) && !(psr & (1UL << 14))) { \
+ __asm__ ("mov %0=ip" : "=r"(ip)); \
+ last_cli_ip = ip; \
+ } \
+} while (0)
+
+#else /* !CONFIG_IA64_DEBUG_IRQ */
+ /* clearing of psr.i is implicitly serialized (visible by next insn) */
+# define local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" \
+ : "=r" (x) :: "memory")
+# define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
+/* (potentially) setting psr.i requires data serialization: */
+# define local_irq_restore(x) __asm__ __volatile__ ("mov psr.l=%0;; srlz.d" \
+ :: "r" (x) : "memory")
+#endif /* !CONFIG_IA64_DEBUG_IRQ */
+
+#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
+
+#define __cli() local_irq_disable ()
+#define __save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
+#define __save_and_cli(flags) local_irq_save(flags)
+#define save_and_cli(flags) __save_and_cli(flags)
+
+
+#ifdef CONFIG_IA64_SOFTSDV_HACKS
+/*
+ * Yech. SoftSDV has a slight probem with psr.i and itc/itm. If
+ * PSR.i = 0 and ITC == ITM, you don't get the timer tick posted. So,
+ * I'll check if ITC is larger than ITM here and reset if neccessary.
+ * I may miss a tick to two.
+ *
+ * Don't include asm/delay.h; it causes include loops that are
+ * mind-numbingly hard to follow.
+ */
+
+#define get_itc(x) __asm__ __volatile__("mov %0=ar.itc" : "=r"((x)) :: "memory")
+#define get_itm(x) __asm__ __volatile__("mov %0=cr.itm" : "=r"((x)) :: "memory")
+#define set_itm(x) __asm__ __volatile__("mov cr.itm=%0" :: "r"((x)) : "memory")
+
+#define __restore_flags(x) \
+do { \
+ unsigned long itc, itm; \
+ local_irq_restore(x); \
+ get_itc(itc); \
+ get_itm(itm); \
+ if (itc > itm) \
+ set_itm(itc + 10); \
+} while (0)
+
+#define __sti() \
+do { \
+ unsigned long itc, itm; \
+ local_irq_enable(); \
+ get_itc(itc); \
+ get_itm(itm); \
+ if (itc > itm) \
+ set_itm(itc + 10); \
+} while (0)
+
+#else /* !CONFIG_IA64_SOFTSDV_HACKS */
+
+#define __sti() local_irq_enable ()
+#define __restore_flags(flags) local_irq_restore(flags)
+
+#endif /* !CONFIG_IA64_SOFTSDV_HACKS */
+
+#ifdef CONFIG_SMP
+ extern void __global_cli (void);
+ extern void __global_sti (void);
+ extern unsigned long __global_save_flags (void);
+ extern void __global_restore_flags (unsigned long);
+# define cli() __global_cli()
+# define sti() __global_sti()
+# define save_flags(flags) ((flags) = __global_save_flags())
+# define restore_flags(flags) __global_restore_flags(flags)
+#else /* !CONFIG_SMP */
+# define cli() __cli()
+# define sti() __sti()
+# define save_flags(flags) __save_flags(flags)
+# define restore_flags(flags) __restore_flags(flags)
+#endif /* !CONFIG_SMP */
+
+/*
+ * Force an unresolved reference if someone tries to use
+ * ia64_fetch_and_add() with a bad value.
+ */
+extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
+extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
+
+#define IA64_FETCHADD(tmp,v,n,sz) \
+({ \
+ switch (sz) { \
+ case 4: \
+ __asm__ __volatile__ ("fetchadd4.rel %0=%1,%3" \
+ : "=r"(tmp), "=m"(__atomic_fool_gcc(v)) \
+ : "m" (__atomic_fool_gcc(v)), "i"(n)); \
+ break; \
+ \
+ case 8: \
+ __asm__ __volatile__ ("fetchadd8.rel %0=%1,%3" \
+ : "=r"(tmp), "=m"(__atomic_fool_gcc(v)) \
+ : "m" (__atomic_fool_gcc(v)), "i"(n)); \
+ break; \
+ \
+ default: \
+ __bad_size_for_ia64_fetch_and_add(); \
+ } \
+})
+
+#define ia64_fetch_and_add(i,v) \
+({ \
+ __u64 _tmp; \
+ volatile __typeof__(*(v)) *_v = (v); \
+ switch (i) { \
+ case -16: IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break; \
+ case -8: IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); break; \
+ case -4: IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); break; \
+ case -1: IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); break; \
+ case 1: IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); break; \
+ case 4: IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); break; \
+ case 8: IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); break; \
+ case 16: IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); break; \
+ default: \
+ _tmp = __bad_increment_for_ia64_fetch_and_add(); \
+ break; \
+ } \
+ (__typeof__(*v)) (_tmp + (i)); /* return new value */ \
+})
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer (void);
+
+static __inline__ unsigned long
+__xchg (unsigned long x, volatile void *ptr, int size)
+{
+ unsigned long result;
+
+ switch (size) {
+ case 1:
+ __asm__ __volatile ("xchg1 %0=%1,%2" : "=r" (result)
+ : "m" (*(char *) ptr), "r" (x) : "memory");
+ return result;
+
+ case 2:
+ __asm__ __volatile ("xchg2 %0=%1,%2" : "=r" (result)
+ : "m" (*(short *) ptr), "r" (x) : "memory");
+ return result;
+
+ case 4:
+ __asm__ __volatile ("xchg4 %0=%1,%2" : "=r" (result)
+ : "m" (*(int *) ptr), "r" (x) : "memory");
+ return result;
+
+ case 8:
+ __asm__ __volatile ("xchg8 %0=%1,%2" : "=r" (result)
+ : "m" (*(long *) ptr), "r" (x) : "memory");
+ return result;
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
+#define tas(ptr) (xchg ((ptr), 1))
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern long __cmpxchg_called_with_bad_pointer(void);
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) (*(struct __xchg_dummy *)(x))
+
+#define ia64_cmpxchg(ptr,old,new,size) \
+({ \
+ __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(new) _n_ = (new); \
+ __u64 _o_, _r_; \
+ \
+ switch (size) { \
+ case 1: _o_ = (__u8 ) (old); break; \
+ case 2: _o_ = (__u16) (old); break; \
+ case 4: _o_ = (__u32) (old); break; \
+ case 8: _o_ = (__u64) (old); break; \
+ default: \
+ } \
+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "r"(_o_)); \
+ switch (size) { \
+ case 1: \
+ __asm__ __volatile__ ("cmpxchg1.rel %0=%2,%3,ar.ccv" \
+ : "=r"(_r_), "=m"(__xg(_p_)) \
+ : "m"(__xg(_p_)), "r"(_n_)); \
+ break; \
+ \
+ case 2: \
+ __asm__ __volatile__ ("cmpxchg2.rel %0=%2,%3,ar.ccv" \
+ : "=r"(_r_), "=m"(__xg(_p_)) \
+ : "m"(__xg(_p_)), "r"(_n_)); \
+ break; \
+ \
+ case 4: \
+ __asm__ __volatile__ ("cmpxchg4.rel %0=%2,%3,ar.ccv" \
+ : "=r"(_r_), "=m"(__xg(_p_)) \
+ : "m"(__xg(_p_)), "r"(_n_)); \
+ break; \
+ \
+ case 8: \
+ __asm__ __volatile__ ("cmpxchg8.rel %0=%2,%3,ar.ccv" \
+ : "=r"(_r_), "=m"(__xg(_p_)) \
+ : "m"(__xg(_p_)), "r"(_n_)); \
+ break; \
+ \
+ default: \
+ _r_ = __cmpxchg_called_with_bad_pointer(); \
+ break; \
+ } \
+ (__typeof__(old)) _r_; \
+})
+
+#define cmpxchg(ptr,o,n) ia64_cmpxchg((ptr), (o), (n), sizeof(*(ptr)))
+
+#ifdef CONFIG_IA64_DEBUG_CMPXCHG
+# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
+# define CMPXCHG_BUGCHECK(v) \
+ do { \
+ if (_cmpxchg_bugcheck_count-- <= 0) { \
+ void *ip; \
+ extern int printk(const char *fmt, ...); \
+ asm ("mov %0=ip" : "=r"(ip)); \
+ printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
+ break; \
+ } \
+ } while (0)
+#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
+# define CMPXCHG_BUGCHECK_DECL
+# define CMPXCHG_BUGCHECK(v)
+#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+
+#ifdef __KERNEL__
+
+extern void ia64_save_debug_regs (unsigned long *save_area);
+extern void ia64_load_debug_regs (unsigned long *save_area);
+
+#define prepare_to_switch() do { } while(0)
+
+#ifdef CONFIG_IA32_SUPPORT
+# define TASK_TO_PTREGS(t) \
+ ((struct pt_regs *)(((unsigned long)(t) + IA64_STK_OFFSET - IA64_PT_REGS_SIZE)))
+# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
+# define IA32_FP_STATE(prev,next) \
+ if (IS_IA32_PROCESS(TASK_TO_PTREGS(prev))) { \
+ __asm__ __volatile__("mov %0=ar.fsr":"=r"((prev)->thread.fsr)); \
+ __asm__ __volatile__("mov %0=ar.fcr":"=r"((prev)->thread.fcr)); \
+ __asm__ __volatile__("mov %0=ar.fir":"=r"((prev)->thread.fir)); \
+ __asm__ __volatile__("mov %0=ar.fdr":"=r"((prev)->thread.fdr)); \
+ } \
+ if (IS_IA32_PROCESS(TASK_TO_PTREGS(next))) { \
+ __asm__ __volatile__("mov ar.fsr=%0"::"r"((next)->thread.fsr)); \
+ __asm__ __volatile__("mov ar.fcr=%0"::"r"((next)->thread.fcr)); \
+ __asm__ __volatile__("mov ar.fir=%0"::"r"((next)->thread.fir)); \
+ __asm__ __volatile__("mov ar.fdr=%0"::"r"((next)->thread.fdr)); \
+ }
+#else /* !CONFIG_IA32_SUPPORT */
+# define IA32_FP_STATE(prev,next)
+# define IS_IA32_PROCESS(regs) 0
+#endif /* CONFIG_IA32_SUPPORT */
+
+/*
+ * Context switch from one thread to another. If the two threads have
+ * different address spaces, schedule() has already taken care of
+ * switching to the new address space by calling switch_mm().
+ *
+ * Disabling access to the fph partition and the debug-register
+ * context switch MUST be done before calling ia64_switch_to() since a
+ * newly created thread returns directly to
+ * ia64_ret_from_syscall_clear_r8.
+ */
+extern struct task_struct *ia64_switch_to (void *next_task);
+#define __switch_to(prev,next,last) do { \
+ ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \
+ if ((prev)->thread.flags & IA64_THREAD_DBG_VALID) { \
+ ia64_save_debug_regs(&(prev)->thread.dbr[0]); \
+ } \
+ if ((next)->thread.flags & IA64_THREAD_DBG_VALID) { \
+ ia64_load_debug_regs(&(next)->thread.dbr[0]); \
+ } \
+ IA32_FP_STATE(prev,next); \
+ (last) = ia64_switch_to((next)); \
+} while (0)
+
+#ifdef CONFIG_SMP
+ /*
+ * In the SMP case, we save the fph state when context-switching
+ * away from a thread that owned and modified fph. This way, when
+ * the thread gets scheduled on another CPU, the CPU can pick up the
+ * state frm task->thread.fph, avoiding the complication of having
+ * to fetch the latest fph state from another CPU. If the thread
+ * happens to be rescheduled on the same CPU later on and nobody
+ * else has touched the FPU in the meantime, the thread will fault
+ * upon the first access to fph but since the state in fph is still
+ * valid, no other overheads are incurred. In other words, CPU
+ * affinity is a Good Thing.
+ */
+# define switch_to(prev,next,last) do { \
+ if (ia64_get_fpu_owner() == (prev) && ia64_psr(ia64_task_regs(prev))->mfh) { \
+ (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
+ __ia64_save_fpu((prev)->thread.fph); \
+ } \
+ __switch_to(prev,next,last); \
+ } while (0)
+#else
+# define switch_to(prev,next,last) __switch_to(prev,next,last)
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_SYSTEM_H */
diff --git a/include/asm-ia64/termbits.h b/include/asm-ia64/termbits.h
new file mode 100644
index 000000000..7d85947b8
--- /dev/null
+++ b/include/asm-ia64/termbits.h
@@ -0,0 +1,179 @@
+#ifndef _ASM_IA64_TERMBITS_H
+#define _ASM_IA64_TERMBITS_H
+
+/*
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 99/01/28 Added new baudrates
+ */
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _ASM_IA64_TERMBITS_H */
diff --git a/include/asm-ia64/termios.h b/include/asm-ia64/termios.h
new file mode 100644
index 000000000..3252e9110
--- /dev/null
+++ b/include/asm-ia64/termios.h
@@ -0,0 +1,112 @@
+#ifndef _ASM_IA64_TERMIOS_H
+#define _ASM_IA64_TERMIOS_H
+
+/*
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 99/01/28 Added N_IRDA and N_SMSBLOCK
+ */
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* line disciplines */
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
+#define N_STRIP 4
+#define N_AX25 5
+#define N_X25 6 /* X.25 async */
+#define N_6PACK 7
+#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964 9 /* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA 11 /* Linux IR - http://www.cs.uit.no/~dagb/irda/irda.html */
+#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS msgs */
+#define N_HDLC 13 /* synchronous HDLC */
+#define N_SYNC_PPP 14 /* synchronous PPP */
+
+# ifdef __KERNEL__
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+ unsigned short __tmp; \
+ get_user(__tmp,&(termio)->x); \
+ *(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+ put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ put_user((termios)->c_line, &(termio)->c_line); \
+ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+# endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_TERMIOS_H */
diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h
new file mode 100644
index 000000000..d8d60cf24
--- /dev/null
+++ b/include/asm-ia64/timex.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_IA64_TIMEX_H
+#define _ASM_IA64_TIMEX_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ XXX fix me! */
+
+typedef unsigned long cycles_t;
+extern cycles_t cacheflush_time;
+
+static inline cycles_t
+get_cycles (void)
+{
+ cycles_t ret;
+
+ __asm__ __volatile__ ("mov %0=ar.itc" : "=r"(ret));
+ return ret;
+}
+
+#endif /* _ASM_IA64_TIMEX_H */
diff --git a/include/asm-ia64/types.h b/include/asm-ia64/types.h
new file mode 100644
index 000000000..a86d0a7f3
--- /dev/null
+++ b/include/asm-ia64/types.h
@@ -0,0 +1,94 @@
+#ifndef _ASM_IA64_TYPES_H
+#define _ASM_IA64_TYPES_H
+
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#ifdef __ASSEMBLY__
+# define __IA64_UL(x) x
+# define __IA64_UL_CONST(x) x
+#else
+# define __IA64_UL(x) ((unsigned long)x)
+# define __IA64_UL_CONST(x) x##UL
+#endif
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned int umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+/*
+ * There are 32-bit compilers for the ia-64 out there..
+ */
+# if ((~0UL) == 0xffffffff)
+# if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+# endif
+# else
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+# endif
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+# ifdef __KERNEL__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+/*
+ * There are 32-bit compilers for the ia-64 out there... (don't rely
+ * on cpp because that may cause su problem in a 32->64 bit
+ * cross-compilation environment).
+ */
+# ifdef __LP64__
+
+typedef signed long s64;
+typedef unsigned long u64;
+#define BITS_PER_LONG 64
+
+# else
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+#define BITS_PER_LONG 32
+
+# endif
+
+/* DMA addresses are 64-bits wide, in general. */
+
+typedef u64 dma_addr_t;
+
+# endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_TYPES_H */
diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h
new file mode 100644
index 000000000..dffc5879f
--- /dev/null
+++ b/include/asm-ia64/uaccess.h
@@ -0,0 +1,366 @@
+#ifndef _ASM_IA64_UACCESS_H
+#define _ASM_IA64_UACCESS_H
+
+/*
+ * This file defines various macros to transfer memory areas across
+ * the user/kernel boundary. This needs to be done carefully because
+ * this code is executed in kernel mode and uses user-specified
+ * addresses. Thus, we need to be careful not to let the user to
+ * trick us into accessing kernel memory that would normally be
+ * inaccessible. This code is also fairly performance sensitive,
+ * so we want to spend as little time doing saftey checks as
+ * possible.
+ *
+ * To make matters a bit more interesting, these macros sometimes also
+ * called from within the kernel itself, in which case the address
+ * validity check must be skipped. The get_fs() macro tells us what
+ * to do: if get_fs()==USER_DS, checking is performed, if
+ * get_fs()==KERNEL_DS, checking is bypassed.
+ *
+ * Note that even if the memory area specified by the user is in a
+ * valid address range, it is still possible that we'll get a page
+ * fault while accessing it. This is handled by filling out an
+ * exception handler fixup entry for each instruction that has the
+ * potential to fault. When such a fault occurs, the page fault
+ * handler checks to see whether the faulting instruction has a fixup
+ * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
+ * then resumes execution at the continuation point.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+
+/*
+ * For historical reasons, the following macros are grossly misnamed:
+ */
+#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
+#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->addr_limit)
+#define set_fs(x) (current->addr_limit = (x))
+
+#define segment_eq(a,b) ((a).seg == (b).seg)
+
+/*
+ * When accessing user memory, we need to make sure the entire area
+ * really is in user-level space. In order to do this efficiently, we
+ * make sure that the page at address TASK_SIZE is never valid (we do
+ * this by selecting VMALLOC_START as TASK_SIZE+PAGE_SIZE). This way,
+ * we can simply check whether the starting address is < TASK_SIZE
+ * and, if so, start accessing the memory. If the user specified bad
+ * length, we will fault on the NaT page and then return the
+ * appropriate error.
+ */
+#define __access_ok(addr,size,segment) (((unsigned long) (addr)) <= (segment).seg)
+#define access_ok(type,addr,size) __access_ok((addr),(size),get_fs())
+
+extern inline int
+verify_area (int type, const void *addr, unsigned long size)
+{
+ return access_ok(type,addr,size) ? 0 : -EFAULT;
+}
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * As the alpha uses the same address space for kernel and user
+ * data, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x,ptr) __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
+#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
+
+/*
+ * The "__xxx" versions do not do address space checking, useful when
+ * doing multiple accesses to the same area (the programmer has to do the
+ * checks by hand with "access_ok()")
+ */
+#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+
+/*
+ * The "xxx_ret" versions return constant specified in third argument, if
+ * something bad happens. These macros can be optimized for the
+ * case of just returning from the function xxx_ret is used.
+ */
+#define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
+#define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; })
+#define __put_user_ret(x,ptr,ret) ({ if (__put_user(x,ptr)) return ret; })
+#define __get_user_ret(x,ptr,ret) ({ if (__get_user(x,ptr)) return ret; })
+
+extern void __get_user_unknown (void);
+
+#define __get_user_nocheck(x,ptr,size) \
+({ \
+ register long __gu_err __asm__ ("r8") = 0; \
+ register long __gu_val __asm__ ("r9") = 0; \
+ switch (size) { \
+ case 1: __get_user_8(ptr); break; \
+ case 2: __get_user_16(ptr); break; \
+ case 4: __get_user_32(ptr); break; \
+ case 8: __get_user_64(ptr); break; \
+ default: __get_user_unknown(); break; \
+ } \
+ (x) = (__typeof__(*(ptr))) __gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x,ptr,size,segment) \
+({ \
+ register long __gu_err __asm__ ("r8") = -EFAULT; \
+ register long __gu_val __asm__ ("r9") = 0; \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ if (__access_ok((long)__gu_addr,size,segment)) { \
+ __gu_err = 0; \
+ switch (size) { \
+ case 1: __get_user_8(__gu_addr); break; \
+ case 2: __get_user_16(__gu_addr); break; \
+ case 4: __get_user_32(__gu_addr); break; \
+ case 8: __get_user_64(__gu_addr); break; \
+ default: __get_user_unknown(); break; \
+ } \
+ } \
+ (x) = (__typeof__(*(ptr))) __gu_val; \
+ __gu_err; \
+})
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct *)(x))
+
+#define __get_user_64(addr) \
+ __asm__ ("\n1:\tld8 %0=%2\t// %0 and %1 get overwritten by exception handler\n" \
+ "2:\n" \
+ "\t.section __ex_table,\"a\"\n" \
+ "\t\tdata4 @gprel(1b)\n" \
+ "\t\tdata4 (2b-1b)|1\n" \
+ "\t.previous" \
+ : "=r"(__gu_val), "=r"(__gu_err) \
+ : "m"(__m(addr)), "1"(__gu_err));
+
+#define __get_user_32(addr) \
+ __asm__ ("\n1:\tld4 %0=%2\t// %0 and %1 get overwritten by exception handler\n" \
+ "2:\n" \
+ "\t.section __ex_table,\"a\"\n" \
+ "\t\tdata4 @gprel(1b)\n" \
+ "\t\tdata4 (2b-1b)|1\n" \
+ "\t.previous" \
+ : "=r"(__gu_val), "=r"(__gu_err) \
+ : "m"(__m(addr)), "1"(__gu_err));
+
+#define __get_user_16(addr) \
+ __asm__ ("\n1:\tld2 %0=%2\t// %0 and %1 get overwritten by exception handler\n" \
+ "2:\n" \
+ "\t.section __ex_table,\"a\"\n" \
+ "\t\tdata4 @gprel(1b)\n" \
+ "\t\tdata4 (2b-1b)|1\n" \
+ "\t.previous" \
+ : "=r"(__gu_val), "=r"(__gu_err) \
+ : "m"(__m(addr)), "1"(__gu_err));
+
+#define __get_user_8(addr) \
+ __asm__ ("\n1:\tld1 %0=%2\t// %0 and %1 get overwritten by exception handler\n" \
+ "2:\n" \
+ "\t.section __ex_table,\"a\"\n" \
+ "\t\tdata4 @gprel(1b)\n" \
+ "\t\tdata4 (2b-1b)|1\n" \
+ "\t.previous" \
+ : "=r"(__gu_val), "=r"(__gu_err) \
+ : "m"(__m(addr)), "1"(__gu_err));
+
+
+extern void __put_user_unknown (void);
+
+#define __put_user_nocheck(x,ptr,size) \
+({ \
+ register long __pu_err __asm__ ("r8") = 0; \
+ switch (size) { \
+ case 1: __put_user_8(x,ptr); break; \
+ case 2: __put_user_16(x,ptr); break; \
+ case 4: __put_user_32(x,ptr); break; \
+ case 8: __put_user_64(x,ptr); break; \
+ default: __put_user_unknown(); break; \
+ } \
+ __pu_err; \
+})
+
+#define __put_user_check(x,ptr,size,segment) \
+({ \
+ register long __pu_err __asm__ ("r8") = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ if (__access_ok((long)__pu_addr,size,segment)) { \
+ __pu_err = 0; \
+ switch (size) { \
+ case 1: __put_user_8(x,__pu_addr); break; \
+ case 2: __put_user_16(x,__pu_addr); break; \
+ case 4: __put_user_32(x,__pu_addr); break; \
+ case 8: __put_user_64(x,__pu_addr); break; \
+ default: __put_user_unknown(); break; \
+ } \
+ } \
+ __pu_err; \
+})
+
+/*
+ * The "__put_user_xx()" macros tell gcc they read from memory
+ * instead of writing: this is because they do not write to
+ * any memory gcc knows about, so there are no aliasing issues
+ */
+#define __put_user_64(x,addr) \
+ __asm__ __volatile__ ( \
+ "\n1:\tst8 %1=%r2\t// %0 gets overwritten by exception handler\n" \
+ "2:\n" \
+ "\t.section __ex_table,\"a\"\n" \
+ "\t\tdata4 @gprel(1b)\n" \
+ "\t\tdata4 2b-1b\n" \
+ "\t.previous" \
+ : "=r"(__pu_err) \
+ : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
+
+#define __put_user_32(x,addr) \
+ __asm__ __volatile__ ( \
+ "\n1:\tst4 %1=%r2\t// %0 gets overwritten by exception handler\n" \
+ "2:\n" \
+ "\t.section __ex_table,\"a\"\n" \
+ "\t\tdata4 @gprel(1b)\n" \
+ "\t\tdata4 2b-1b\n" \
+ "\t.previous" \
+ : "=r"(__pu_err) \
+ : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
+
+#define __put_user_16(x,addr) \
+ __asm__ __volatile__ ( \
+ "\n1:\tst2 %1=%r2\t// %0 gets overwritten by exception handler\n" \
+ "2:\n" \
+ "\t.section __ex_table,\"a\"\n" \
+ "\t\tdata4 @gprel(1b)\n" \
+ "\t\tdata4 2b-1b\n" \
+ "\t.previous" \
+ : "=r"(__pu_err) \
+ : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
+
+#define __put_user_8(x,addr) \
+ __asm__ __volatile__ ( \
+ "\n1:\tst1 %1=%r2\t// %0 gets overwritten by exception handler\n" \
+ "2:\n" \
+ "\t.section __ex_table,\"a\"\n" \
+ "\t\tdata4 @gprel(1b)\n" \
+ "\t\tdata4 2b-1b\n" \
+ "\t.previous" \
+ : "=r"(__pu_err) \
+ : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
+
+/*
+ * Complex access routines
+ */
+extern unsigned long __copy_user (void *to, const void *from, unsigned long count);
+
+#define __copy_to_user(to,from,n) __copy_user((to), (from), (n))
+#define __copy_from_user(to,from,n) __copy_user((to), (from), (n))
+
+#define copy_to_user(to,from,n) __copy_tofrom_user((to), (from), (n), 1)
+#define copy_from_user(to,from,n) __copy_tofrom_user((to), (from), (n), 0)
+
+#define __copy_tofrom_user(to,from,n,check_to) \
+({ \
+ void *__cu_to = (to); \
+ const void *__cu_from = (from); \
+ long __cu_len = (n); \
+ \
+ if (__access_ok((long) ((check_to) ? __cu_to : __cu_from), __cu_len, get_fs())) { \
+ __cu_len = __copy_user(__cu_to, __cu_from, __cu_len); \
+ } \
+ __cu_len; \
+})
+
+#define copy_to_user_ret(to,from,n,retval) \
+({ \
+ if (copy_to_user(to,from,n)) \
+ return retval; \
+})
+
+#define copy_from_user_ret(to,from,n,retval) \
+({ \
+ if (copy_from_user(to,from,n)) \
+ return retval; \
+})
+
+extern unsigned long __do_clear_user (void *, unsigned long);
+
+#define __clear_user(to,n) \
+({ \
+ __do_clear_user(to,n); \
+})
+
+#define clear_user(to,n) \
+({ \
+ unsigned long __cu_len = (n); \
+ if (__access_ok((long) to, __cu_len, get_fs())) { \
+ __cu_len = __do_clear_user(to, __cu_len); \
+ } \
+ __cu_len; \
+})
+
+
+/* Returns: -EFAULT if exception before terminator, N if the entire
+ buffer filled, else strlen. */
+
+extern long __strncpy_from_user (char *to, const char *from, long to_len);
+
+#define strncpy_from_user(to,from,n) \
+({ \
+ const char * __sfu_from = (from); \
+ long __sfu_ret = -EFAULT; \
+ if (__access_ok((long) __sfu_from, 0, get_fs())) \
+ __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
+ __sfu_ret; \
+})
+
+/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
+extern unsigned long __strlen_user (const char *);
+
+#define strlen_user(str) \
+({ \
+ const char *__su_str = (str); \
+ unsigned long __su_ret = 0; \
+ if (__access_ok((long) __su_str, 0, get_fs())) \
+ __su_ret = __strlen_user(__su_str); \
+ __su_ret; \
+})
+
+/*
+ * Returns: 0 if exception before NUL or reaching the supplied limit
+ * (N), a value greater than N if the limit would be exceeded, else
+ * strlen.
+ */
+extern unsigned long __strnlen_user (const char *, long);
+
+#define strnlen_user(str, len) \
+({ \
+ const char *__su_str = (str); \
+ unsigned long __su_ret = 0; \
+ if (__access_ok((long) __su_str, 0, get_fs())) \
+ __su_ret = __strnlen_user(__su_str, len); \
+ __su_ret; \
+})
+
+struct exception_table_entry {
+ int addr; /* gp-relative address of insn this fixup is for */
+ int skip; /* number of bytes to skip to get to the continuation point.
+ Bit 0 tells us if r9 should be cleared to 0*/
+};
+
+extern const struct exception_table_entry *search_exception_table (unsigned long addr);
+
+#endif /* _ASM_IA64_UACCESS_H */
diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h
new file mode 100644
index 000000000..6dc82c87e
--- /dev/null
+++ b/include/asm-ia64/unaligned.h
@@ -0,0 +1,123 @@
+#ifndef _ASM_IA64_UNALIGNED_H
+#define _ASM_IA64_UNALIGNED_H
+
+/*
+ * The main single-value unaligned transfer routines. Derived from
+ * the Linux/Alpha version.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#define get_unaligned(ptr) \
+ ((__typeof__(*(ptr)))ia64_get_unaligned((ptr), sizeof(*(ptr))))
+
+#define put_unaligned(x,ptr) \
+ ia64_put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))
+
+/*
+ * EGCS 1.1 knows about arbitrary unaligned loads. Define some
+ * packed structures to talk about such things with.
+ */
+struct __una_u64 { __u64 x __attribute__((packed)); };
+struct __una_u32 { __u32 x __attribute__((packed)); };
+struct __una_u16 { __u16 x __attribute__((packed)); };
+
+extern inline unsigned long
+__uldq (const unsigned long * r11)
+{
+ const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
+ return ptr->x;
+}
+
+extern inline unsigned long
+__uldl (const unsigned int * r11)
+{
+ const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
+ return ptr->x;
+}
+
+extern inline unsigned long
+__uldw (const unsigned short * r11)
+{
+ const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
+ return ptr->x;
+}
+
+extern inline void
+__ustq (unsigned long r5, unsigned long * r11)
+{
+ struct __una_u64 *ptr = (struct __una_u64 *) r11;
+ ptr->x = r5;
+}
+
+extern inline void
+__ustl (unsigned long r5, unsigned int * r11)
+{
+ struct __una_u32 *ptr = (struct __una_u32 *) r11;
+ ptr->x = r5;
+}
+
+extern inline void
+__ustw (unsigned long r5, unsigned short * r11)
+{
+ struct __una_u16 *ptr = (struct __una_u16 *) r11;
+ ptr->x = r5;
+}
+
+
+/*
+ * This function doesn't actually exist. The idea is that when
+ * someone uses the macros below with an unsupported size (datatype),
+ * the linker will alert us to the problem via an unresolved reference
+ * error.
+ */
+extern unsigned long ia64_bad_unaligned_access_length (void);
+
+#define ia64_get_unaligned(_ptr,size) \
+({ \
+ const void *ptr = (_ptr); \
+ unsigned long val; \
+ \
+ switch (size) { \
+ case 1: \
+ val = *(const unsigned char *) ptr; \
+ break; \
+ case 2: \
+ val = __uldw((const unsigned short *)ptr); \
+ break; \
+ case 4: \
+ val = __uldl((const unsigned int *)ptr); \
+ break; \
+ case 8: \
+ val = __uldq((const unsigned long *)ptr); \
+ break; \
+ default: \
+ val = ia64_bad_unaligned_access_length(); \
+ } \
+ val; \
+})
+
+#define ia64_put_unaligned(_val,_ptr,size) \
+do { \
+ const void *ptr = (_ptr); \
+ unsigned long val = (_val); \
+ \
+ switch (size) { \
+ case 1: \
+ *(unsigned char *)ptr = (val); \
+ break; \
+ case 2: \
+ __ustw(val, (unsigned short *)ptr); \
+ break; \
+ case 4: \
+ __ustl(val, (unsigned int *)ptr); \
+ break; \
+ case 8: \
+ __ustq(val, (unsigned long *)ptr); \
+ break; \
+ default: \
+ ia64_bad_unaligned_access_length(); \
+ } \
+} while (0)
+
+#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
new file mode 100644
index 000000000..b20e42686
--- /dev/null
+++ b/include/asm-ia64/unistd.h
@@ -0,0 +1,305 @@
+#ifndef _ASM_IA64_UNISTD_H
+#define _ASM_IA64_UNISTD_H
+
+/*
+ * IA-64 Linux syscall numbers and inline-functions.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/break.h>
+
+#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL
+
+#define __NR_ni_syscall 1024
+#define __NR_exit 1025
+#define __NR_read 1026
+#define __NR_write 1027
+#define __NR_open 1028
+#define __NR_close 1029
+#define __NR_creat 1030
+#define __NR_link 1031
+#define __NR_unlink 1032
+#define __NR_execve 1033
+#define __NR_chdir 1034
+#define __NR_fchdir 1035
+#define __NR_utimes 1036
+#define __NR_mknod 1037
+#define __NR_chmod 1038
+#define __NR_chown 1039
+#define __NR_lseek 1040
+#define __NR_getpid 1041
+#define __NR_getppid 1042
+#define __NR_mount 1043
+#define __NR_umount 1044
+#define __NR_setuid 1045
+#define __NR_getuid 1046
+#define __NR_geteuid 1047
+#define __NR_ptrace 1048
+#define __NR_access 1049
+#define __NR_sync 1050
+#define __NR_fsync 1051
+#define __NR_fdatasync 1052
+#define __NR_kill 1053
+#define __NR_rename 1054
+#define __NR_mkdir 1055
+#define __NR_rmdir 1056
+#define __NR_dup 1057
+#define __NR_pipe 1058
+#define __NR_times 1059
+#define __NR_brk 1060
+#define __NR_setgid 1061
+#define __NR_getgid 1062
+#define __NR_getegid 1063
+#define __NR_acct 1064
+#define __NR_ioctl 1065
+#define __NR_fcntl 1066
+#define __NR_umask 1067
+#define __NR_chroot 1068
+#define __NR_ustat 1069
+#define __NR_dup2 1070
+#define __NR_setreuid 1071
+#define __NR_setregid 1072
+#define __NR_getresuid 1073
+#define __NR_setresuid 1074
+#define __NR_getresgid 1075
+#define __NR_setresgid 1076
+#define __NR_getgroups 1077
+#define __NR_setgroups 1078
+#define __NR_getpgid 1079
+#define __NR_setpgid 1080
+#define __NR_setsid 1081
+#define __NR_getsid 1082
+#define __NR_sethostname 1083
+#define __NR_setrlimit 1084
+#define __NR_getrlimit 1085
+#define __NR_getrusage 1086
+#define __NR_gettimeofday 1087
+#define __NR_settimeofday 1088
+#define __NR_select 1089
+#define __NR_poll 1090
+#define __NR_symlink 1091
+#define __NR_readlink 1092
+#define __NR_uselib 1093
+#define __NR_swapon 1094
+#define __NR_swapoff 1095
+#define __NR_reboot 1096
+#define __NR_truncate 1097
+#define __NR_ftruncate 1098
+#define __NR_fchmod 1099
+#define __NR_fchown 1100
+#define __NR_getpriority 1101
+#define __NR_setpriority 1102
+#define __NR_statfs 1103
+#define __NR_fstatfs 1104
+#define __NR_ioperm 1105
+#define __NR_semget 1106
+#define __NR_semop 1107
+#define __NR_semctl 1108
+#define __NR_msgget 1109
+#define __NR_msgsnd 1110
+#define __NR_msgrcv 1111
+#define __NR_msgctl 1112
+#define __NR_shmget 1113
+#define __NR_shmat 1114
+#define __NR_shmdt 1115
+#define __NR_shmctl 1116
+/* also known as klogctl() in GNU libc: */
+#define __NR_syslog 1117
+#define __NR_setitimer 1118
+#define __NR_getitimer 1119
+#define __NR_stat 1120
+#define __NR_lstat 1121
+#define __NR_fstat 1122
+#define __NR_vhangup 1123
+#define __NR_lchown 1124
+#define __NR_vm86 1125
+#define __NR_wait4 1126
+#define __NR_sysinfo 1127
+#define __NR_clone 1128
+#define __NR_setdomainname 1129
+#define __NR_uname 1130
+#define __NR_adjtimex 1131
+#define __NR_create_module 1132
+#define __NR_init_module 1133
+#define __NR_delete_module 1134
+#define __NR_get_kernel_syms 1135
+#define __NR_query_module 1136
+#define __NR_quotactl 1137
+#define __NR_bdflush 1138
+#define __NR_sysfs 1139
+#define __NR_personality 1140
+#define __NR_afs_syscall 1141
+#define __NR_setfsuid 1142
+#define __NR_setfsgid 1143
+#define __NR_getdents 1144
+#define __NR_flock 1145
+#define __NR_readv 1146
+#define __NR_writev 1147
+#define __NR_pread 1148
+#define __NR_pwrite 1149
+#define __NR__sysctl 1150
+#define __NR_mmap 1151
+#define __NR_munmap 1152
+#define __NR_mlock 1153
+#define __NR_mlockall 1154
+#define __NR_mprotect 1155
+#define __NR_mremap 1156
+#define __NR_msync 1157
+#define __NR_munlock 1158
+#define __NR_munlockall 1159
+#define __NR_sched_getparam 1160
+#define __NR_sched_setparam 1161
+#define __NR_sched_getscheduler 1162
+#define __NR_sched_setscheduler 1163
+#define __NR_sched_yield 1164
+#define __NR_sched_get_priority_max 1165
+#define __NR_sched_get_priority_min 1166
+#define __NR_sched_rr_get_interval 1167
+#define __NR_nanosleep 1168
+#define __NR_nfsservctl 1169
+#define __NR_prctl 1170
+#define __NR_getpagesize 1171
+#define __NR_mmap2 1172
+#define __NR_pciconfig_read 1173
+#define __NR_pciconfig_write 1174
+#define __NR_perfmonctl 1175
+#define __NR_sigaltstack 1176
+#define __NR_rt_sigaction 1177
+#define __NR_rt_sigpending 1178
+#define __NR_rt_sigprocmask 1179
+#define __NR_rt_sigqueueinfo 1180
+#define __NR_rt_sigreturn 1181
+#define __NR_rt_sigsuspend 1182
+#define __NR_rt_sigtimedwait 1183
+#define __NR_getcwd 1184
+#define __NR_capget 1185
+#define __NR_capset 1186
+#define __NR_sendfile 1187
+#define __NR_getpmsg 1188
+#define __NR_putpmsg 1189
+#define __NR_socket 1190
+#define __NR_bind 1191
+#define __NR_connect 1192
+#define __NR_listen 1193
+#define __NR_accept 1194
+#define __NR_getsockname 1195
+#define __NR_getpeername 1196
+#define __NR_socketpair 1197
+#define __NR_send 1198
+#define __NR_sendto 1199
+#define __NR_recv 1200
+#define __NR_recvfrom 1201
+#define __NR_shutdown 1202
+#define __NR_setsockopt 1203
+#define __NR_getsockopt 1204
+#define __NR_sendmsg 1205
+#define __NR_recvmsg 1206
+#define __NR_sys_pivot_root 1207
+
+#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
+
+extern long __ia64_syscall (long a0, long a1, long a2, long a3, long a4, long nr);
+
+#define _syscall0(type,name) \
+type \
+name (void) \
+{ \
+ register long dummy1 __asm__ ("out0"); \
+ register long dummy2 __asm__ ("out1"); \
+ register long dummy3 __asm__ ("out2"); \
+ register long dummy4 __asm__ ("out3"); \
+ register long dummy5 __asm__ ("out4"); \
+ \
+ return __ia64_syscall(dummy1, dummy2, dummy3, dummy4, dummy5, \
+ __NR_##name); \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type \
+name (type1 arg1) \
+{ \
+ register long dummy2 __asm__ ("out1"); \
+ register long dummy3 __asm__ ("out2"); \
+ register long dummy4 __asm__ ("out3"); \
+ register long dummy5 __asm__ ("out4"); \
+ \
+ return __ia64_syscall((long) arg1, dummy2, dummy3, dummy4, \
+ dummy5, __NR_##name); \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type \
+name (type1 arg1, type2 arg2) \
+{ \
+ register long dummy3 __asm__ ("out2"); \
+ register long dummy4 __asm__ ("out3"); \
+ register long dummy5 __asm__ ("out4"); \
+ \
+ return __ia64_syscall((long) arg1, (long) arg2, dummy3, dummy4, \
+ dummy5, __NR_##name); \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type \
+name (type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ register long dummy4 __asm__ ("out3"); \
+ register long dummy5 __asm__ ("out4"); \
+ \
+ return __ia64_syscall((long) arg1, (long) arg2, (long) arg3, \
+ dummy4, dummy5, __NR_##name); \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type \
+name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ register long dummy5 __asm__ ("out4"); \
+ \
+ return __ia64_syscall((long) arg1, (long) arg2, (long) arg3, \
+ (long) arg4, dummy5, __NR_##name); \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type \
+name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ return __ia64_syscall((long) arg1, (long) arg2, (long) arg3, \
+ (long) arg4, (long), __NR_##name); \
+}
+
+#ifdef __KERNEL_SYSCALLS__
+
+static inline _syscall0(int,sync)
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static inline _syscall1(int,dup,int,fd)
+static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall4(pid_t,wait4,pid_t,pid,int *,wait_stat,int,options,struct rusage*, rusage)
+static inline _syscall1(int,delete_module,const char *,name)
+static inline _syscall2(pid_t,clone,unsigned long,flags,void*,sp);
+
+#define __NR__exit __NR_exit
+static inline _syscall1(int,_exit,int,exitcode)
+
+static inline pid_t
+waitpid (int pid, int *wait_stat, int flags)
+{
+ return wait4(pid, wait_stat, flags, NULL);
+}
+
+static inline pid_t
+wait (int * wait_stat)
+{
+ return wait4(-1, wait_stat, 0, 0);
+}
+
+#endif /* __KERNEL_SYSCALLS__ */
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_UNISTD_H */
diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h
new file mode 100644
index 000000000..038edb798
--- /dev/null
+++ b/include/asm-ia64/unwind.h
@@ -0,0 +1,77 @@
+#ifndef _ASM_IA64_UNWIND_H
+#define _ASM_IA64_UNWIND_H
+
+/*
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whitles, so there
+ * is not much point in implementing the full IA-64 unwind API (though
+ * it would of course be possible to implement the kernel API on top
+ * of it).
+ */
+
+struct task_struct; /* forward declaration */
+struct switch_stack; /* forward declaration */
+
+/*
+ * The following declarations are private to the unwind
+ * implementation:
+ */
+
+struct ia64_stack {
+ unsigned long *limit;
+ unsigned long *top;
+};
+
+/*
+ * No user of this module should every access this structure directly
+ * as it is subject to change. It is declared here solely so we can
+ * use automatic variables.
+ */
+struct ia64_frame_info {
+ struct ia64_stack regstk;
+ unsigned long *bsp;
+ unsigned long top_rnat; /* RSE NaT collection at top of backing store */
+ unsigned long cfm;
+ unsigned long ip; /* instruction pointer */
+};
+
+/*
+ * The official API follows below:
+ */
+
+/*
+ * Prepare to unwind blocked task t.
+ */
+extern void ia64_unwind_init_from_blocked_task (struct ia64_frame_info *info,
+ struct task_struct *t);
+
+/*
+ * Prepare to unwind the current task. For this to work, the kernel
+ * stack identified by REGS must look like this:
+ *
+ * // //
+ * | |
+ * | kernel stack |
+ * | |
+ * +=====================+
+ * | struct pt_regs |
+ * +---------------------+ <--- REGS
+ * | struct switch_stack |
+ * +---------------------+
+ */
+extern void ia64_unwind_init_from_current (struct ia64_frame_info *info, struct pt_regs *regs);
+
+/*
+ * Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error.
+ */
+extern int ia64_unwind_to_previous_frame (struct ia64_frame_info *info);
+
+#define ia64_unwind_get_ip(info) ((info)->ip)
+#define ia64_unwind_get_bsp(info) ((unsigned long) (info)->bsp)
+
+#endif /* _ASM_IA64_UNWIND_H */
diff --git a/include/asm-ia64/user.h b/include/asm-ia64/user.h
new file mode 100644
index 000000000..84fbe50fd
--- /dev/null
+++ b/include/asm-ia64/user.h
@@ -0,0 +1,57 @@
+#ifndef _ASM_IA64_USER_H
+#define _ASM_IA64_USER_H
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd). The file contents are as
+ * follows:
+ *
+ * upage: 1 page consisting of a user struct that tells gdb
+ * what is present in the file. Directly after this is a
+ * copy of the task_struct, which is currently not used by gdb,
+ * but it may come in handy at some point. All of the registers
+ * are stored as part of the upage. The upage should always be
+ * only one page long.
+ * data: The data segment follows next. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any memory
+ * that may have been sbrk'ed. No attempt is made to determine if a
+ * page is demand-zero or if a page is totally unused, we just cover
+ * the entire range. All of the addresses are rounded in such a way
+ * that an integral number of pages is written.
+ * stack: We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from usp to
+ * current->start_stack, so we round each of these in order to be able
+ * to write an integer number of pages.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/ptrace.h>
+
+#include <asm/page.h>
+
+#define EF_SIZE 3072 /* XXX fix me */
+
+struct user {
+ unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */
+ size_t u_tsize; /* text size (pages) */
+ size_t u_dsize; /* data size (pages) */
+ size_t u_ssize; /* stack size (pages) */
+ unsigned long start_code; /* text starting address */
+ unsigned long start_data; /* data starting address */
+ unsigned long start_stack; /* stack starting address */
+ long int signal; /* signal causing core dump */
+ struct regs * u_ar0; /* help gdb find registers */
+ unsigned long magic; /* identifies a core file */
+ char u_comm[32]; /* user command name */
+};
+
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_DATA_START_ADDR (u.start_data)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _ASM_IA64_USER_H */
diff --git a/include/asm-ia64/vga.h b/include/asm-ia64/vga.h
new file mode 100644
index 000000000..1f446d684
--- /dev/null
+++ b/include/asm-ia64/vga.h
@@ -0,0 +1,22 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ * (c) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * (c) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#ifndef __ASM_IA64_VGA_H_
+#define __ASM_IA64_VGA_H_
+
+/*
+ * On the PC, we can just recalculate addresses and then access the
+ * videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0))
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif /* __ASM_IA64_VGA_H_ */
diff --git a/include/asm-m68k/mmu_context.h b/include/asm-m68k/mmu_context.h
index f54fbfa17..d481eb316 100644
--- a/include/asm-m68k/mmu_context.h
+++ b/include/asm-m68k/mmu_context.h
@@ -9,6 +9,9 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
extern inline void
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
diff --git a/include/asm-m68k/pgalloc.h b/include/asm-m68k/pgalloc.h
index 3d2a2b87d..93f342308 100644
--- a/include/asm-m68k/pgalloc.h
+++ b/include/asm-m68k/pgalloc.h
@@ -396,4 +396,9 @@ extern inline void flush_tlb_kernel_page(unsigned long addr)
__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
}
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
#endif /* _M68K_PGALLOC_H */
diff --git a/include/asm-mips/hardirq.h b/include/asm-mips/hardirq.h
index 167818a8c..c9ffba132 100644
--- a/include/asm-mips/hardirq.h
+++ b/include/asm-mips/hardirq.h
@@ -1,4 +1,4 @@
-/* $Id: hardirq.h,v 1.5 1999/10/09 00:01:42 ralf Exp $
+/* $Id: hardirq.h,v 1.6 2000/02/04 07:40:53 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -20,6 +20,7 @@ extern unsigned int local_irq_count[NR_CPUS];
*/
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
(local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+#define in_irq() (local_irq_count[smp_processor_id()] != 0)
#ifndef __SMP__
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index a17ea48cc..c7bafd3c8 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -1,4 +1,4 @@
-/* $Id: irq.h,v 1.5 1999/01/04 16:09:22 ralf Exp $
+/* $Id: irq.h,v 1.6 2000/01/26 00:07:45 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -7,8 +7,8 @@
* Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
*/
-#ifndef __ASM_MIPS_IRQ_H
-#define __ASM_MIPS_IRQ_H
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
#define NR_IRQS 64
@@ -25,4 +25,4 @@ extern void enable_irq(unsigned int);
/* Machine specific interrupt initialization */
extern void (*irq_setup)(void);
-#endif /* __ASM_MIPS_IRQ_H */
+#endif /* _ASM_IRQ_H */
diff --git a/include/asm-mips/md.h b/include/asm-mips/md.h
deleted file mode 100644
index aff87490d..000000000
--- a/include/asm-mips/md.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* $Id: md.h,v 1.1 1998/08/17 10:20:14 ralf Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __ASM_MD_H
-#define __ASM_MD_H
-
-/* #define HAVE_ARCH_XORBLOCK */
-
-#define MD_XORBLOCK_ALIGNMENT sizeof(long)
-
-#endif /* __ASM_MD_H */
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h
index 68051accf..2e9809bba 100644
--- a/include/asm-mips/mmu_context.h
+++ b/include/asm-mips/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.6 1999/12/04 03:59:12 ralf Exp $
+/* $Id: mmu_context.h,v 1.7 2000/02/04 07:40:53 ralf Exp $
*
* Switch a MMU context.
*
@@ -31,6 +31,10 @@ extern pgd_t *current_pgd;
#endif
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+
/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
diff --git a/include/asm-mips/pgalloc.h b/include/asm-mips/pgalloc.h
index 75bc57669..aed22eedc 100644
--- a/include/asm-mips/pgalloc.h
+++ b/include/asm-mips/pgalloc.h
@@ -1,4 +1,4 @@
-/* $Id: pgalloc.h,v 1.1 2000/02/04 07:40:53 ralf Exp $
+/* $Id: pgalloc.h,v 1.2 2000/02/13 20:52:06 harald Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -39,6 +39,9 @@ extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
+
/*
* Allocate and free page tables. The xxx_kernel() versions are
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index d28144839..89f6e132d 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -1,4 +1,4 @@
-/* $Id: pgtable.h,v 1.27 2000/01/29 01:42:28 ralf Exp $
+/* $Id: pgtable.h,v 1.28 2000/02/04 07:40:53 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -19,6 +19,11 @@
#include <asm/cachectl.h>
#include <linux/config.h>
+extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+}
+
+
/*
* - add_wired_entry() add a fixed TLB entry, and move wired register
*/
diff --git a/include/asm-mips/softirq.h b/include/asm-mips/softirq.h
index a7e7a074a..9248125eb 100644
--- a/include/asm-mips/softirq.h
+++ b/include/asm-mips/softirq.h
@@ -1,110 +1,26 @@
-/* $Id: softirq.h,v 1.9 1999/11/19 20:35:48 ralf Exp $
+/* $Id: softirq.h,v 1.10 2000/02/22 21:23:52 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1997, 1998, 1999 by Ralf Baechle
- * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1997, 1998, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_SOFTIRQ_H
#define _ASM_SOFTIRQ_H
-#include <linux/config.h>
-
-/* The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
-extern atomic_t __mips_bh_counter;
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
extern unsigned int local_bh_count[NR_CPUS];
-#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
-#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
-
-#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
-#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
-
-#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-
-#define get_active_bhs() (bh_mask & bh_active)
-
-#if !defined(CONFIG_CPU_HAS_LLSC)
-
-#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
-
-#else
-
-static inline void clear_active_bhs(unsigned long x)
-{
- unsigned long temp;
-
- __asm__ __volatile__(
- "1:\tll\t%0,%1\n\t"
- "and\t%0,%2\n\t"
- "sc\t%0,%1\n\t"
- "beqz\t%0,1b"
- :"=&r" (temp),
- "=m" (bh_active)
- :"Ir" (~x),
- "m" (bh_active));
-
-}
-
-#endif
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- mb();
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_disable();
- barrier();
-}
+#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
+#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_enable();
-}
+#define local_bh_disable() cpu_bh_disable(smp_processor_id())
+#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (cpu_bh_trylock(cpu))
-#define softirq_endlock(cpu) (cpu_bh_endlock(cpu))
-#define synchronize_bh() barrier()
+#define in_softirq() (local_bh_count[smp_processor_id()] != 0)
#endif /* _ASM_SOFTIRQ_H */
diff --git a/include/asm-mips64/hardirq.h b/include/asm-mips64/hardirq.h
index 7f3adeb68..3bc56d38d 100644
--- a/include/asm-mips64/hardirq.h
+++ b/include/asm-mips64/hardirq.h
@@ -1,4 +1,4 @@
-/* $Id: hardirq.h,v 1.2 1999/12/04 03:59:12 ralf Exp $
+/* $Id: hardirq.h,v 1.3 2000/02/04 07:40:53 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -20,6 +20,7 @@ extern unsigned int local_irq_count[NR_CPUS];
*/
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
(local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+#define in_irq() (local_irq_count[smp_processor_id()] != 0)
#ifndef __SMP__
diff --git a/include/asm-mips64/mmu_context.h b/include/asm-mips64/mmu_context.h
index d81818f88..ca41d022b 100644
--- a/include/asm-mips64/mmu_context.h
+++ b/include/asm-mips64/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.2 1999/12/04 03:59:12 ralf Exp $
+/* $Id: mmu_context.h,v 1.3 2000/02/04 07:40:53 ralf Exp $
*
* Switch a MMU context.
*
@@ -22,6 +22,10 @@ extern pgd_t *current_pgd;
#define ASID_INC 0x1
#define ASID_MASK 0xff
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+
/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
diff --git a/include/asm-mips64/offset.h b/include/asm-mips64/offset.h
index d292080de..abe647224 100644
--- a/include/asm-mips64/offset.h
+++ b/include/asm-mips64/offset.h
@@ -52,30 +52,30 @@
#define TASK_COUNTER 56
#define TASK_PRIORITY 64
#define TASK_MM 80
-#define TASK_STRUCT_SIZE 1448
+#define TASK_STRUCT_SIZE 1456
/* MIPS specific thread_struct offsets. */
-#define THREAD_REG16 880
-#define THREAD_REG17 888
-#define THREAD_REG18 896
-#define THREAD_REG19 904
-#define THREAD_REG20 912
-#define THREAD_REG21 920
-#define THREAD_REG22 928
-#define THREAD_REG23 936
-#define THREAD_REG29 944
-#define THREAD_REG30 952
-#define THREAD_REG31 960
-#define THREAD_STATUS 968
-#define THREAD_FPU 976
-#define THREAD_BVADDR 1240
-#define THREAD_BUADDR 1248
-#define THREAD_ECODE 1256
-#define THREAD_TRAPNO 1264
-#define THREAD_MFLAGS 1272
-#define THREAD_CURDS 1280
-#define THREAD_TRAMP 1288
-#define THREAD_OLDCTX 1296
+#define THREAD_REG16 888
+#define THREAD_REG17 896
+#define THREAD_REG18 904
+#define THREAD_REG19 912
+#define THREAD_REG20 920
+#define THREAD_REG21 928
+#define THREAD_REG22 936
+#define THREAD_REG23 944
+#define THREAD_REG29 952
+#define THREAD_REG30 960
+#define THREAD_REG31 968
+#define THREAD_STATUS 976
+#define THREAD_FPU 984
+#define THREAD_BVADDR 1248
+#define THREAD_BUADDR 1256
+#define THREAD_ECODE 1264
+#define THREAD_TRAPNO 1272
+#define THREAD_MFLAGS 1280
+#define THREAD_CURDS 1288
+#define THREAD_TRAMP 1296
+#define THREAD_OLDCTX 1304
/* Linux mm_struct offsets. */
#define MM_USERS 32
diff --git a/include/asm-mips64/pgalloc.h b/include/asm-mips64/pgalloc.h
index 963a996f1..1e5de6c96 100644
--- a/include/asm-mips64/pgalloc.h
+++ b/include/asm-mips64/pgalloc.h
@@ -1,4 +1,4 @@
-/* $Id$
+/* $Id: pgalloc.h,v 1.1 2000/02/04 07:40:53 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -39,6 +39,8 @@ extern void (*flush_tlb_mm)(struct mm_struct *mm);
extern void (*flush_tlb_range)(struct mm_struct *mm, unsigned long start,
unsigned long end);
extern void (*flush_tlb_page)(struct vm_area_struct *vma, unsigned long page);
+extern inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end);
+
/*
* Allocate and free page tables. The xxx_kernel() versions are
diff --git a/include/asm-mips64/pgtable.h b/include/asm-mips64/pgtable.h
index 02b08b4c3..f0ba45505 100644
--- a/include/asm-mips64/pgtable.h
+++ b/include/asm-mips64/pgtable.h
@@ -1,4 +1,4 @@
-/* $Id: pgtable.h,v 1.6 2000/01/29 01:42:28 ralf Exp $
+/* $Id: pgtable.h,v 1.10 2000/02/10 21:38:10 kanoj Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -11,6 +11,7 @@
#define _ASM_PGTABLE_H
#include <asm/addrspace.h>
+#include <asm/page.h>
#ifndef _LANGUAGE_ASSEMBLY
@@ -19,6 +20,10 @@
#include <linux/mmzone.h>
#include <asm/cachectl.h>
+extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+}
+
/* Basically we have the same two-level (which is the logical three level
* Linux page table layout folded) page tables as the i386. Some day
* when we have proper page coloring support we can have a 1% quicker
diff --git a/include/asm-mips64/softirq.h b/include/asm-mips64/softirq.h
index f827649ed..528b4f091 100644
--- a/include/asm-mips64/softirq.h
+++ b/include/asm-mips64/softirq.h
@@ -1,92 +1,26 @@
-/* $Id$
+/* $Id: softirq.h,v 1.3 2000/02/22 21:23:52 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1997, 1998, 1999 by Ralf Baechle
+ * Copyright (C) 1997, 1998, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_SOFTIRQ_H
#define _ASM_SOFTIRQ_H
-extern unsigned int local_bh_count[NR_CPUS];
-
-#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
-#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
-
-#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
-#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
-
-#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-
-#define get_active_bhs() (bh_mask & bh_active)
-
-static inline void clear_active_bhs(unsigned long x)
-{
- unsigned long temp;
-
- __asm__ __volatile__(
- "1:\tlld\t%0,%1\n\t"
- "and\t%0,%2\n\t"
- "scd\t%0,%1\n\t"
- "beqz\t%0,1b"
- :"=&r" (temp),
- "=m" (bh_active)
- :"Ir" (~x),
- "m" (bh_active));
-}
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- mb();
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
+extern unsigned int local_bh_count[NR_CPUS];
-extern inline void start_bh_atomic(void)
-{
- local_bh_disable();
- barrier();
-}
+#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
+#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_enable();
-}
+#define local_bh_disable() cpu_bh_disable(smp_processor_id())
+#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (cpu_bh_trylock(cpu))
-#define softirq_endlock(cpu) (cpu_bh_endlock(cpu))
-#define synchronize_bh() barrier()
+#define in_softirq() (local_bh_count[smp_processor_id()] != 0)
#endif /* _ASM_SOFTIRQ_H */
diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h
index 95b59cafc..c981d5fa0 100644
--- a/include/asm-ppc/bitops.h
+++ b/include/asm-ppc/bitops.h
@@ -1,5 +1,5 @@
/*
- * $Id: bitops.h,v 1.11 1999/01/03 20:16:48 cort Exp $
+ * $Id: bitops.h,v 1.12 2000/02/09 03:28:31 davem Exp $
* bitops.h: Bit string operations on the ppc
*/
@@ -88,11 +88,11 @@ extern __inline__ unsigned long change_bit(unsigned long nr, void *addr)
}
#endif
-extern __inline__ unsigned long test_bit(int nr, __const__ volatile void *addr)
+extern __inline__ int test_bit(int nr, __const__ volatile void *addr)
{
__const__ unsigned int *p = (__const__ unsigned int *) addr;
- return (p[nr >> 5] >> (nr & 0x1f)) & 1UL;
+ return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
}
extern __inline__ int ffz(unsigned int x)
diff --git a/include/asm-ppc/bootinfo.h b/include/asm-ppc/bootinfo.h
index cc32d941f..af614d798 100644
--- a/include/asm-ppc/bootinfo.h
+++ b/include/asm-ppc/bootinfo.h
@@ -9,6 +9,12 @@
#ifndef _PPC_BOOTINFO_H
#define _PPC_BOOTINFO_H
+#include <linux/config.h>
+
+#if defined(CONFIG_APUS) && !defined(__BOOTER__)
+#include <asm-m68k/bootinfo.h>
+#else
+
struct bi_record {
unsigned long tag; /* tag ID */
unsigned long size; /* size of record (in bytes) */
@@ -23,5 +29,7 @@ struct bi_record {
#define BI_SYSMAP 0x1015
#define BI_MACHTYPE 0x1016
+#endif /* CONFIG_APUS */
+
#endif /* _PPC_BOOTINFO_H */
diff --git a/include/asm-ppc/feature.h b/include/asm-ppc/feature.h
index 07b10e8bc..318150dab 100644
--- a/include/asm-ppc/feature.h
+++ b/include/asm-ppc/feature.h
@@ -8,12 +8,13 @@
* for more details.
*
* Copyright (C) 1998 Paul Mackerras.
+ *
*/
#ifndef __ASM_PPC_FEATURE_H
#define __ASM_PPC_FEATURE_H
/*
- * The FCR bits for particular features vary somewhat between
+ * The FCR selector for particular features vary somewhat between
* different machines. So we abstract a list of features here
* and let the feature_* routines map them to the actual bits.
*/
@@ -25,19 +26,23 @@ enum system_feature {
FEATURE_Serial_IO_B,
FEATURE_SWIM3_enable,
FEATURE_MESH_enable,
- FEATURE_IDE_enable,
- FEATURE_VIA_enable,
- FEATURE_CD_power,
+ FEATURE_IDE0_enable, /* Internal IDE */
+ FEATURE_IDE0_reset, /* Internal IDE */
+ FEATURE_IOBUS_enable, /* Internal IDE */
FEATURE_Mediabay_reset,
- FEATURE_Mediabay_enable,
+ FEATURE_Mediabay_power,
FEATURE_Mediabay_PCI_enable,
- FEATURE_Mediabay_IDE_enable,
+ FEATURE_Mediabay_IDE_enable, /* Also IDE 1 */
+ FEATURE_Mediabay_IDE_reset, /* Also IDE 1 */
FEATURE_Mediabay_floppy_enable,
FEATURE_BMac_reset,
FEATURE_BMac_IO_enable,
- FEATURE_Modem_Reset,
- FEATURE_IDE_DiskPower,
- FEATURE_IDE_Reset,
+ FEATURE_Modem_power,
+ FEATURE_Slow_SCC_PCLK,
+ FEATURE_Sound_power,
+ FEATURE_Sound_CLK_enable,
+ FEATURE_IDE2_enable,
+ FEATURE_IDE2_reset,
FEATURE_last,
};
diff --git a/include/asm-ppc/heathrow.h b/include/asm-ppc/heathrow.h
new file mode 100644
index 000000000..4081e1237
--- /dev/null
+++ b/include/asm-ppc/heathrow.h
@@ -0,0 +1,45 @@
+/*
+ * heathrow.h: definitions for using the "Heathrow" I/O controller chip.
+ *
+ * Grabbed from Open Firmware definitions on a PowerBook G3 Series
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ */
+
+/* offset from ohare base for feature control register */
+#define HEATHROW_FEATURE_REG 0x38
+
+/*
+ * Bits in feature control register.
+ * Bits postfixed with a _N are in inverse logic
+ */
+#define HRW_RESET_SCC 1 /* Named in_use_led in OF ??? */
+#define HRW_BAY_POWER_N 2
+#define HRW_BAY_PCI_ENABLE 4
+#define HRW_BAY_IDE_ENABLE 8
+#define HRW_BAY_FLOPPY_ENABLE 0x10
+#define HRW_IDE0_ENABLE 0x20
+#define HRW_IDE0_RESET_N 0x40
+#define HRW_BAY_RESET_N 0x80
+#define HRW_IOBUS_ENABLE 0x100 /* Internal IDE ? */
+#define HRW_SCC_ENABLE 0x200
+#define HRW_MESH_ENABLE 0x400
+#define HRW_SWIM_ENABLE 0x800
+#define HRW_SOUND_POWER_N 0x1000
+#define HRW_SOUND_CLK_ENABLE 0x2000
+#define HRW_SCCA_IO 0x4000
+#define HRW_SCCB_IO 0x8000
+#define HRW_PORT_OR_DESK_VIA_N 0x10000 /* This one is 0 on PowerBook */
+#define HRW_PWM_MON_ID_N 0x20000 /* ??? (0) */
+#define HRW_HOOK_MB_CNT_N 0x40000 /* ??? (0) */
+#define HRW_SWIM_CLONE_FLOPPY 0x80000 /* ??? (0) */
+#define HRW_AUD_RUN22 0x100000 /* ??? (1) */
+#define HRW_SCSI_LINK_MODE 0x200000 /* Read ??? (1) */
+#define HRW_ARB_BYPASS 0x400000 /* ??? (0 on main, 1 on gatwick) */
+#define HRW_IDE1_RESET_N 0x800000 /* Media bay */
+#define HRW_SLOW_SCC_PCLK 0x1000000 /* ??? (0) */
+#define HRW_MODEM_POWER_N 0x2000000 /* Used by internal modem on wallstreet */
+#define HRW_MFDC_CELL_ENABLE 0x4000000 /* ??? (0) */
+#define HRW_USE_MFDC 0x8000000 /* ??? (0) */
+#define HRW_BMAC_IO_ENABLE 0x60000000 /* two bits, not documented in OF */
+#define HRW_BMAC_RESET 0x80000000 /* not documented in OF */
diff --git a/include/asm-ppc/irq.h b/include/asm-ppc/irq.h
index 11a272bba..009229882 100644
--- a/include/asm-ppc/irq.h
+++ b/include/asm-ppc/irq.h
@@ -129,10 +129,9 @@ static __inline__ int irq_cannonicalize(int irq)
/*
* this is the # irq's for all ppc arch's (pmac/chrp/prep)
- * so it is the max of them all - which happens to be powermac
- * at present (G3 powermacs have 64).
+ * so it is the max of them all
*/
-#define NR_IRQS 128
+#define NR_IRQS 256
#endif /* CONFIG_APUS */
diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h
index eaf91b288..5e7b1b578 100644
--- a/include/asm-ppc/machdep.h
+++ b/include/asm-ppc/machdep.h
@@ -21,7 +21,7 @@ struct machdep_calls {
unsigned int (*irq_cannonicalize)(unsigned int irq);
void (*init_IRQ)(void);
int (*get_irq)(struct pt_regs *);
- void (*post_irq)( int );
+ void (*post_irq)( struct pt_regs *, int );
/* A general init function, called by ppc_init in init/main.c.
May be NULL. */
diff --git a/include/asm-ppc/mediabay.h b/include/asm-ppc/mediabay.h
index 04792d15e..a634d7f20 100644
--- a/include/asm-ppc/mediabay.h
+++ b/include/asm-ppc/mediabay.h
@@ -8,6 +8,7 @@
#define _PPC_MEDIABAY_H
#define MB_FD 0 /* media bay contains floppy drive */
+#define MB_FD1 1 /* media bay contains floppy drive */
#define MB_CD 3 /* media bay contains ATA drive such as CD */
#define MB_NO 7 /* media bay contains nothing */
diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h
index 81dadd22a..55f185d91 100644
--- a/include/asm-ppc/mmu.h
+++ b/include/asm-ppc/mmu.h
@@ -310,4 +310,61 @@ extern void _tlbia(void); /* invalidate all TLB entries */
* a processor working register during a tablewalk.
*/
#define M_TW 799
+
+/*
+ * At present, all PowerPC 400-class processors share a similar TLB
+ * architecture. The instruction and data sides share a unified,
+ * 64-entry, fully-associative TLB which is maintained totally under
+ * software control. In addition, the instruction side has a
+ * hardware-managed, 4-entry, fully- associative TLB which serves as a
+ * first level to the shared TLB. These two TLBs are known as the UTLB
+ * and ITLB, respectively.
+ */
+
+#define PPC4XX_TLB_SIZE 64
+
+/*
+ * TLB entries are defined by a "high" tag portion and a "low" data
+ * portion. On all architectures, the data portion is 32-bits.
+ *
+ * TLB entries are managed entirely under software control by reading,
+ * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx
+ * instructions.
+ */
+
+#define TLB_LO 1
+#define TLB_HI 0
+
+#define TLB_DATA TLB_LO
+#define TLB_TAG TLB_HI
+
+/* Tag portion */
+
+#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
+#define TLB_PAGESZ_MASK 0x00000380
+#define TLB_PAGESZ(x) (((x) & 0x7) << 7)
+#define PAGESZ_1K 0
+#define PAGESZ_4K 1
+#define PAGESZ_16K 2
+#define PAGESZ_64K 3
+#define PAGESZ_256K 4
+#define PAGESZ_1M 5
+#define PAGESZ_4M 6
+#define PAGESZ_16M 7
+#define TLB_VALID 0x00000040 /* Entry is valid */
+
+/* Data portion */
+
+#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
+#define TLB_PERM_MASK 0x00000300
+#define TLB_EX 0x00000200 /* Instruction execution allowed */
+#define TLB_WR 0x00000100 /* Writes permitted */
+#define TLB_ZSEL_MASK 0x000000F0
+#define TLB_ZSEL(x) (((x) & 0xF) << 4)
+#define TLB_ATTR_MASK 0x0000000F
+#define TLB_W 0x00000008 /* Caching is write-through */
+#define TLB_I 0x00000004 /* Caching is inhibited */
+#define TLB_M 0x00000002 /* Memory is coherent */
+#define TLB_G 0x00000001 /* Memory is guarded from prefetch */
+
#endif /* _PPC_MMU_H_ */
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index a7e0a5246..88d6970a5 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -19,6 +19,9 @@
-- Dan
*/
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
#ifdef CONFIG_8xx
#define NO_CONTEXT 16
#define LAST_CONTEXT 15
diff --git a/include/asm-ppc/ohare.h b/include/asm-ppc/ohare.h
index ffc4ef10b..1303e5869 100644
--- a/include/asm-ppc/ohare.h
+++ b/include/asm-ppc/ohare.h
@@ -2,6 +2,9 @@
* ohare.h: definitions for using the "O'Hare" I/O controller chip.
*
* Copyright (C) 1997 Paul Mackerras.
+ *
+ * BenH: Changed to match those of heathrow (but not all of them). Please
+ * check if I didn't break anything (especially the media bay).
*/
/* offset from ohare base for feature control register */
@@ -13,21 +16,21 @@
* and may differ for other machines.
*/
#define OH_SCC_RESET 1
-#define OH_BAY_RESET 2 /* a guess */
+#define OH_BAY_POWER_N 2 /* a guess */
#define OH_BAY_PCI_ENABLE 4 /* a guess */
#define OH_BAY_IDE_ENABLE 8
#define OH_BAY_FLOPPY_ENABLE 0x10
-#define OH_IDE_ENABLE 0x20
-#define OH_IDE_POWER 0x40 /* a guess */
-#define OH_BAY_ENABLE 0x80
-#define OH_IDE_RESET 0x100 /* 0-based, a guess */
+#define OH_IDE0_ENABLE 0x20
+#define OH_IDE0_RESET_N 0x40 /* a guess */
+#define OH_BAY_RESET_N 0x80
+#define OH_IOBUS_ENABLE 0x100 /* IOBUS seems to be IDE */
#define OH_SCC_ENABLE 0x200
#define OH_MESH_ENABLE 0x400
#define OH_FLOPPY_ENABLE 0x800
#define OH_SCCA_IO 0x4000
#define OH_SCCB_IO 0x8000
-#define OH_VIA_ENABLE 0x10000
-#define OH_IDECD_POWER 0x800000
+#define OH_VIA_ENABLE 0x10000 /* Is apparently wrong, to be verified */
+#define OH_IDE1_RESET_N 0x800000
/*
* Bits to set in the feature control register on PowerBooks.
diff --git a/include/asm-ppc/pci.h b/include/asm-ppc/pci.h
index 698ecc518..92347e406 100644
--- a/include/asm-ppc/pci.h
+++ b/include/asm-ppc/pci.h
@@ -10,4 +10,56 @@
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
+/* Dynamic DMA Mapping stuff
+ * ++ajoshi
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+
+struct pci_dev;
+
+extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle);
+extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+extern inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
+ size_t size)
+{
+ return virt_to_bus(ptr);
+}
+extern inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size)
+{
+ /* nothing to do */
+}
+extern inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents)
+{
+ return nents;
+}
+extern inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents)
+{
+ /* nothing to do */
+}
+extern inline void pci_dma_sync_single(struct pci_dev *hwdev,
+ dma_addr_t dma_handle,
+ size_t size)
+{
+ /* nothing to do */
+}
+extern inline void pci_dma_syng_sg(struct pci_dev *hwdev,
+ struct scatterlist *sg,
+ int nelems)
+{
+ /* nothing to do */
+}
+
+#define sg_dma_address(sg) (virt_to_bus((sg)->address))
+#define sg_dma_len(sg) ((sg)->length)
+
#endif /* __PPC_PCI_H */
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 3949a7b5f..f1f0f578a 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -15,7 +15,7 @@ extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
- unsigned long end);
+ unsigned long end);
extern inline void flush_hash_page(unsigned context, unsigned long va)
{ }
#elif defined(CONFIG_8xx)
@@ -48,6 +48,12 @@ extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* PPC has hw page tables. */
+}
+
/*
* No cache flushing is required when address mappings are
* changed, because the caches on PowerPCs are physically
@@ -102,6 +108,16 @@ extern unsigned long ioremap_bot, ioremap_base;
* copied to the MD_TWC before it gets loaded.
*/
+/*
+ * At present, all PowerPC 400-class processors share a similar TLB
+ * architecture. The instruction and data sides share a unified,
+ * 64-entry, fully-associative TLB which is maintained totally under
+ * software control. In addition, the instruction side has a
+ * hardware-managed, 4-entry, fully-associative TLB which serves as a
+ * first level to the shared TLB. These two TLBs are known as the UTLB
+ * and ITLB, respectively (see "mmu.h" for definitions).
+ */
+
/* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
#define PMD_SHIFT 22
#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -159,68 +175,19 @@ extern unsigned long ioremap_bot, ioremap_base;
*/
#if defined(CONFIG_4xx)
-/*
- * At present, all PowerPC 400-class processors share a similar TLB
- * architecture. The instruction and data sides share a unified, 64-entry,
- * fully-associative TLB which is maintained under software control. In
- * addition, the instruction side has a hardware-managed, 4-entry, fully-
- * associative TLB which serves as a first level to the shared TLB. These
- * two TLBs are known as the UTLB and ITLB, respectively.
- */
-
-#define PPC4XX_TLB_SIZE 64
-
-/*
- * TLB entries are defined by a "high" tag portion and a "low" data portion.
- * On all architectures, the data portion is 32-bits.
- */
-
-#define TLB_LO 1
-#define TLB_HI 0
-
-#define TLB_DATA TLB_LO
-#define TLB_TAG TLB_HI
-
-/* Tag portion */
-
-#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
-#define TLB_PAGESZ_MASK 0x00000380
-#define TLB_PAGESZ(x) (((x) & 0x7) << 7)
-#define PAGESZ_1K 0
-#define PAGESZ_4K 1
-#define PAGESZ_16K 2
-#define PAGESZ_64K 3
-#define PAGESZ_256K 4
-#define PAGESZ_1M 5
-#define PAGESZ_4M 6
-#define PAGESZ_16M 7
-#define TLB_VALID 0x00000040 /* Entry is valid */
-
-/* Data portion */
-
-#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
-#define TLB_PERM_MASK 0x00000300
-#define TLB_EX 0x00000200 /* Instruction execution allowed */
-#define TLB_WR 0x00000100 /* Writes permitted */
-#define TLB_ZSEL_MASK 0x000000F0
-#define TLB_ZSEL(x) (((x) & 0xF) << 4)
-#define TLB_ATTR_MASK 0x0000000F
-#define TLB_W 0x00000008 /* Caching is write-through */
-#define TLB_I 0x00000004 /* Caching is inhibited */
-#define TLB_M 0x00000002 /* Memory is coherent */
-#define TLB_G 0x00000001 /* Memory is guarded from prefetch */
-
-#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
-#define _PAGE_USER 0x002 /* matches one of the PP bits */
-#define _PAGE_RW 0x004 /* software: user write access allowed */
-#define _PAGE_GUARDED 0x008
-#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x080 /* C: page changed */
-#define _PAGE_ACCESSED 0x100 /* R: page referenced */
-#define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */
+/* Definitions for 4xx embedded chips. */
+#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
+#define _PAGE_COHERENT 0x002 /* M: enforece memory coherence */
+#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
+#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
+#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
+#define _PAGE_PRESENT 0x040 /* software: PTE contains a translation */
+#define _PAGE_DIRTY 0x100 /* C: page changed */
+#define _PAGE_RW 0x200 /* Writes permitted */
+#define _PAGE_ACCESSED 0x400 /* R: page referenced */
+#define _PAGE_HWWRITE 0x800 /* software: _PAGE_RW & _PAGE_DIRTY */
#define _PAGE_SHARED 0
+
#elif defined(CONFIG_8xx)
/* Definitions for 8xx embedded chips. */
#define _PAGE_PRESENT 0x0001 /* Page is valid */
@@ -242,7 +209,8 @@ extern unsigned long ioremap_bot, ioremap_base;
* protection.
*/
#define _PAGE_HWWRITE _PAGE_DIRTY
-#else
+
+#else /* CONFIG_6xx */
/* Definitions for 60x, 740/750, etc. */
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_USER 0x002 /* matches one of the PP bits */
diff --git a/include/asm-ppc/processor.h b/include/asm-ppc/processor.h
index 9d32dd011..20e337f34 100644
--- a/include/asm-ppc/processor.h
+++ b/include/asm-ppc/processor.h
@@ -174,7 +174,11 @@
#define HID0_DLOCK (1<<12) /* Data Cache Lock */
#define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */
#define HID0_DCI (1<<10) /* Data Cache Invalidate */
+#define HID0_SPD (1<<9) /* Speculative disable */
+#define HID0_SGE (1<<7) /* Store Gathering Enable */
#define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */
+#define HID0_BTIC (1<<5) /* Branch Target Instruction Cache Enable */
+#define HID0_ABE (1<<3) /* Address Broadcast Enable */
#define HID0_BHTE (1<<2) /* Branch History Table Enable */
#define HID0_BTCD (1<<1) /* Branch target cache disable */
#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
@@ -339,6 +343,7 @@
#define TBRU SPRN_TBRU /* Time Base Read Upper Register */
#define TBWL SPRN_TBWL /* Time Base Write Lower Register */
#define TBWU SPRN_TBWU /* Time Base Write Upper Register */
+#define ICTC 1019
#define THRM1 SPRN_THRM1 /* Thermal Management Register 1 */
#define THRM2 SPRN_THRM2 /* Thermal Management Register 2 */
#define THRM3 SPRN_THRM3 /* Thermal Management Register 3 */
diff --git a/include/asm-ppc/prom.h b/include/asm-ppc/prom.h
index 2acd7b45a..40deb121b 100644
--- a/include/asm-ppc/prom.h
+++ b/include/asm-ppc/prom.h
@@ -63,7 +63,7 @@ typedef void (*prom_entry)(struct prom_args *);
/* Prototypes */
extern void abort(void);
-extern void prom_init(int, int, prom_entry);
+extern unsigned long prom_init(int, int, prom_entry);
extern void prom_print(const char *msg);
extern void relocate_nodes(void);
extern void finish_device_tree(void);
@@ -72,7 +72,10 @@ extern struct device_node *find_type_devices(const char *type);
extern struct device_node *find_path_device(const char *path);
extern struct device_node *find_compatible_devices(const char *type,
const char *compat);
+extern struct device_node *find_pci_device_OFnode(unsigned char bus,
+ unsigned char dev_fn);
extern struct device_node *find_phandle(phandle);
+extern struct device_node *find_all_nodes(void);
extern int device_is_compatible(struct device_node *device, const char *);
extern int machine_is_compatible(const char *compat);
extern unsigned char *get_property(struct device_node *node, const char *name,
diff --git a/include/asm-ppc/semaphore.h b/include/asm-ppc/semaphore.h
index b73bd5961..461ac0360 100644
--- a/include/asm-ppc/semaphore.h
+++ b/include/asm-ppc/semaphore.h
@@ -4,6 +4,9 @@
/*
* Swiped from asm-sparc/semaphore.h and modified
* -- Cort (cort@cs.nmt.edu)
+ *
+ * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h
+ * -- Ani Joshi (ajoshi@unixbox.com)
*/
#ifdef __KERNEL__
@@ -102,6 +105,99 @@ extern inline void up(struct semaphore * sem)
__up(sem);
}
+
+/* RW spinlock-based semaphores */
+
+struct rw_semaphore
+{
+ spinlock_t lock;
+ int rd, wr;
+ wait_queue_head_t wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+#endif
+};
+
+#define __RWSEM_INITIALIZER(name, rd, wr) \
+{ \
+ SPIN_LOCK_UNLOCKED, \
+ (rd), (wr), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) \
+}
+
+#define __DECLARE_RWSEM_GENERIC(name, rd, wr) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name, rd, wr)
+
+#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name, 0, 0)
+#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, 1, 0)
+#define DECLAER_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, 0, 1)
+
+extern inline void init_rwsem(struct rw_semaphore *sem)
+{
+ spin_lock_init(&sem->lock);
+ sem->rd = sem->wr = 0;
+ init_waitqueue_head(&sem->wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+#endif
+}
+
+#ifndef CHECK_MAGIC
+#define CHECK_MAGIC(x)
+#endif
+
+extern void down_read_failed(struct rw_semaphore *);
+extern void down_write_failed(struct rw_semaphore *);
+
+extern inline void down_read(struct rw_semaphore *sem)
+{
+ CHECK_MAGIC(sem->__magic);
+
+ spin_lock_irq(&sem->lock);
+ if (sem->wr)
+ down_read_failed(sem);
+ sem->rd++;
+ spin_unlock_irq(&sem->lock);
+}
+
+extern inline void down_write(struct rw_semaphore *sem)
+{
+ CHECK_MAGIC(sem->__magic);
+
+ spin_lock(&sem->lock);
+ if(sem->rd || sem->wr)
+ down_write_failed(sem);
+ sem->wr = 1;
+ spin_unlock(&sem->lock);
+}
+
+#define up_read(sem) \
+ do { \
+ unsigned long flags; \
+ \
+ CHECK_MAGIC((sem)->__magic); \
+ \
+ spin_lock_irqsave(&(sem)->lock, flags); \
+ if (!--(sem)->rd && waitqueue_active(&(sem)->wait)) \
+ wake_up(&(sem)->wait); \
+ spin_unlock_irqrestore(&(sem)->lock, flags); \
+ } while (0)
+
+#define up_write(sem) \
+ do { \
+ unsigned long flags; \
+ \
+ CHECK_MAGIC((sem)->__magic); \
+ \
+ spin_lock_irqsave(&(sem)->lock, flags); \
+ (sem)->wr = 0; \
+ if (waitqueue_active(&(sem)->wait)) \
+ wake_up(&(sem)->wait); \
+ spin_unlock_irqrestore(&(sem)->lock, flags); \
+ } while (0)
+
+
#endif /* __KERNEL__ */
#endif /* !(_PPC_SEMAPHORE_H) */
diff --git a/include/asm-ppc/types.h b/include/asm-ppc/types.h
index 86fa349d3..4c5e9766e 100644
--- a/include/asm-ppc/types.h
+++ b/include/asm-ppc/types.h
@@ -41,6 +41,10 @@ typedef unsigned long long u64;
#define BITS_PER_LONG 32
+/* DMA addresses are 32-bits wide */
+
+typedef u32 dma_addr_t;
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-ppc/vga.h b/include/asm-ppc/vga.h
index 85c9ec8a9..e6a7f9a53 100644
--- a/include/asm-ppc/vga.h
+++ b/include/asm-ppc/vga.h
@@ -8,43 +8,33 @@
#define _LINUX_ASM_VGA_H_
#include <asm/io.h>
-#include <asm/processor.h>
#include <linux/config.h>
-#include <linux/console.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
#define VT_BUF_HAVE_RW
+/*
+ * These are only needed for supporting VGA or MDA text mode, which use little
+ * endian byte ordering.
+ * In other cases, we can optimize by using native byte ordering and
+ * <linux/vt_buffer.h> has already done the right job for us.
+ */
extern inline void scr_writew(u16 val, u16 *addr)
{
- /* If using vgacon (not fbcon) byteswap the writes.
- * If non-vgacon assume fbcon and don't byteswap
- * just like include/linux/vt_buffer.h.
- * XXX: this is a performance loss so get rid of it
- * as soon as fbcon works on prep.
- * -- Cort
- */
-#ifdef CONFIG_FB
- if ( conswitchp != &vga_con )
- (*(addr) = (val));
- else
-#endif /* CONFIG_FB */
- st_le16(addr, val);
+ writew(val, (unsigned long)addr);
}
extern inline u16 scr_readw(const u16 *addr)
{
-#ifdef CONFIG_FB
- if ( conswitchp != &vga_con )
- return (*(addr));
- else
-#endif /* CONFIG_FB */
- return ld_le16((unsigned short *)addr);
+ return readw((unsigned long)addr);
}
-#define VT_BUF_HAVE_MEMCPYF
-#define scr_memcpyw_from memcpy
-#define scr_memcpyw_to memcpy
+#define VT_BUF_HAVE_MEMCPYW
+#define scr_memcpyw memcpy
+
+#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
extern unsigned long vgacon_remap_base;
#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index e16a7d775..b90be6fbd 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -11,6 +11,9 @@
(b) ASID (Address Space IDentifier)
*/
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
/*
* Cache of MMU context last used.
*/
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 97a9a3ad8..cd6dc704a 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -57,6 +57,10 @@ extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
/*
* Basically we have the same two-level (which is the logical three level
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index fbaa0f005..1139f58f7 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -1,4 +1,4 @@
-/* $Id: bitops.h,v 1.54 1998/09/21 05:07:34 jj Exp $
+/* $Id: bitops.h,v 1.55 2000/02/09 03:28:32 davem Exp $
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -94,7 +94,7 @@ extern __inline__ void change_bit(unsigned long nr, void *addr)
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
-extern __inline__ unsigned long test_and_set_bit(unsigned long nr, __SMPVOL void *addr)
+extern __inline__ int test_and_set_bit(unsigned long nr, __SMPVOL void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
@@ -116,7 +116,7 @@ extern __inline__ void set_bit(unsigned long nr, __SMPVOL void *addr)
(void) test_and_set_bit(nr, addr);
}
-extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, __SMPVOL void *addr)
+extern __inline__ int test_and_clear_bit(unsigned long nr, __SMPVOL void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
@@ -139,7 +139,7 @@ extern __inline__ void clear_bit(unsigned long nr, __SMPVOL void *addr)
(void) test_and_clear_bit(nr, addr);
}
-extern __inline__ unsigned long test_and_change_bit(unsigned long nr, __SMPVOL void *addr)
+extern __inline__ int test_and_change_bit(unsigned long nr, __SMPVOL void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
@@ -165,9 +165,9 @@ extern __inline__ void change_bit(unsigned long nr, __SMPVOL void *addr)
#endif /* __KERNEL__ */
/* The following routine need not be atomic. */
-extern __inline__ unsigned long test_bit(int nr, __const__ __SMPVOL void *addr)
+extern __inline__ int test_bit(int nr, __const__ __SMPVOL void *addr)
{
- return 1UL & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31));
+ return (1 & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31))) != 0;
}
/* The easy/cheese version for now. */
diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h
index ed47c7760..56fe88bba 100644
--- a/include/asm-sparc/hardirq.h
+++ b/include/asm-sparc/hardirq.h
@@ -26,6 +26,8 @@ extern unsigned int local_irq_count;
#define synchronize_irq() barrier()
+#define in_irq() (local_irq_count != 0)
+
#else
#include <asm/atomic.h>
@@ -45,6 +47,9 @@ extern atomic_t global_irq_count;
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
(local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+#define in_irq() ({ int __cpu = smp_processor_id(); \
+ (local_irq_count[__cpu] != 0); })
+
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
diff --git a/include/asm-sparc/mmu_context.h b/include/asm-sparc/mmu_context.h
index ab2aeebef..604c447a9 100644
--- a/include/asm-sparc/mmu_context.h
+++ b/include/asm-sparc/mmu_context.h
@@ -5,6 +5,10 @@
#ifndef __ASSEMBLY__
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+
/*
* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated.
diff --git a/include/asm-sparc/pci.h b/include/asm-sparc/pci.h
index f30a5bcc2..a2749a907 100644
--- a/include/asm-sparc/pci.h
+++ b/include/asm-sparc/pci.h
@@ -10,4 +10,89 @@
#define PCIBIOS_MIN_IO 0UL
#define PCIBIOS_MIN_MEM 0UL
+#ifdef __KERNEL__
+
+/* Dynamic DMA mapping stuff.
+ */
+
+#include <asm/scatterlist.h>
+
+struct pci_dev;
+
+/* Allocate and map kernel buffer using consistent mode DMA for a device.
+ * hwdev should be valid struct pci_dev pointer for PCI devices.
+ */
+extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
+
+/* Free and unmap a consistent DMA buffer.
+ * cpu_addr is what was returned from pci_alloc_consistent,
+ * size must be the same as what as passed into pci_alloc_consistent,
+ * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ *
+ * References to the memory and mappings assosciated with cpu_addr/dma_addr
+ * past this call are illegal.
+ */
+extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
+
+/* Map a single buffer of the indicated size for DMA in streaming mode.
+ * The 32-bit bus address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory
+ * until either pci_unmap_single or pci_dma_sync_single is performed.
+ */
+extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size);
+
+/* Unmap a single streaming mode DMA translation. The dma_addr and size
+ * must match what was provided for in a previous pci_map_single call. All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guarenteed to see
+ * whatever the device wrote there.
+ */
+extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size);
+
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scather-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents);
+
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents);
+
+/* Make physical memory consistent for a single
+ * streaming mode DMA translation after a transfer.
+ *
+ * If you perform a pci_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the PCI dma
+ * mapping, you must call this function before doing so. At the
+ * next point you give the PCI dma address back to the card, the
+ * device again owns the buffer.
+ */
+extern void pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size);
+
+/* Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ *
+ * The same as pci_dma_sync_single but for a scatter-gather list,
+ * same rules and usage.
+ */
+extern void pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems);
+
+#endif /* __KERNEL__ */
+
#endif /* __SPARC_PCI_H */
diff --git a/include/asm-sparc/pgalloc.h b/include/asm-sparc/pgalloc.h
index ab87c0629..fc323280b 100644
--- a/include/asm-sparc/pgalloc.h
+++ b/include/asm-sparc/pgalloc.h
@@ -1,4 +1,4 @@
-/* $Id: pgalloc.h,v 1.2 2000/01/15 00:51:42 anton Exp $ */
+/* $Id: pgalloc.h,v 1.3 2000/02/03 10:13:31 jj Exp $ */
#ifndef _SPARC_PGALLOC_H
#define _SPARC_PGALLOC_H
@@ -69,6 +69,10 @@ BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_tlb_range, struct mm_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
+extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+}
+
#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
#define flush_tlb_range(mm,start,end) BTFIXUP_CALL(flush_tlb_range)(mm,start,end)
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 931e67169..8829d323c 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -1,4 +1,4 @@
-/* $Id: pgtable.h,v 1.87 1999/12/27 06:37:14 anton Exp $ */
+/* $Id: pgtable.h,v 1.88 2000/02/06 22:56:09 zaitcev Exp $ */
#ifndef _SPARC_PGTABLE_H
#define _SPARC_PGTABLE_H
@@ -50,16 +50,21 @@ BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct s
#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
-/* mmu_map/unmap is provided by iommu/iounit; mmu_flush/inval probably belongs to CPU... */
+/*
+ * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
+ * mmu_flush/inval belong to CPU. Valid on IIep.
+ */
BTFIXUPDEF_CALL(void, mmu_map_dma_area, unsigned long va, __u32 addr, int len)
-BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long addr, int len)
-BTFIXUPDEF_CALL(void, mmu_inval_dma_area, unsigned long addr, int len)
-BTFIXUPDEF_CALL(void, mmu_flush_dma_area, unsigned long addr, int len)
+BTFIXUPDEF_CALL(unsigned long /*phys*/, mmu_translate_dvma, unsigned long busa)
+BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
+BTFIXUPDEF_CALL(void, mmu_inval_dma_area, unsigned long virt, int len)
+BTFIXUPDEF_CALL(void, mmu_flush_dma_area, unsigned long virt, int len)
#define mmu_map_dma_area(va, ba,len) BTFIXUP_CALL(mmu_map_dma_area)(va,ba,len)
#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
-#define mmu_inval_dma_area(va,len) BTFIXUP_CALL(mmu_unmap_dma_area)(va,len)
-#define mmu_flush_dma_area(va,len) BTFIXUP_CALL(mmu_unmap_dma_area)(va,len)
+#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
+#define mmu_inval_dma_area(va,len) BTFIXUP_CALL(mmu_inval_dma_area)(va,len)
+#define mmu_flush_dma_area(va,len) BTFIXUP_CALL(mmu_flush_dma_area)(va,len)
BTFIXUPDEF_SIMM13(pmd_shift)
BTFIXUPDEF_SETHI(pmd_size)
diff --git a/include/asm-sparc/softirq.h b/include/asm-sparc/softirq.h
index c82a080ad..d61b56554 100644
--- a/include/asm-sparc/softirq.h
+++ b/include/asm-sparc/softirq.h
@@ -14,170 +14,21 @@
#include <asm/hardirq.h>
-#define get_active_bhs() (bh_mask & bh_active)
-
#ifdef __SMP__
extern unsigned int local_bh_count[NR_CPUS];
-/*
- * The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
-extern atomic_t global_bh_lock;
-extern spinlock_t global_bh_count;
-extern spinlock_t sparc_bh_lock;
-
-extern void synchronize_bh(void);
-
-static inline void clear_active_bhs(unsigned int mask)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_active &= ~(mask);
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-extern inline void remove_bh(int nr)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_mask &= ~(1 << nr);
- bh_base[nr] = NULL;
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-extern inline void mark_bh(int nr)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_active |= (1 << nr);
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-static inline void start_bh_atomic(void)
-{
- atomic_inc(&global_bh_lock);
- synchronize_bh();
-}
-
-static inline void end_bh_atomic(void)
-{
- atomic_dec(&global_bh_lock);
-}
-
-/* These are for the IRQs testing the lock */
-static inline int softirq_trylock(int cpu)
-{
- if (spin_trylock(&global_bh_count)) {
- if (atomic_read(&global_bh_lock) == 0 &&
- local_bh_count[cpu] == 0) {
- ++local_bh_count[cpu];
- return 1;
- }
- spin_unlock(&global_bh_count);
- }
- return 0;
-}
-
-static inline void softirq_endlock(int cpu)
-{
- local_bh_count[cpu]--;
- spin_unlock(&global_bh_count);
-}
-
#define local_bh_disable() (local_bh_count[smp_processor_id()]++)
#define local_bh_enable() (local_bh_count[smp_processor_id()]--)
+#define in_softirq() (local_bh_count[smp_processor_id()] != 0)
+
#else
extern unsigned int local_bh_count;
-#define clear_active_bhs(x) (bh_active &= ~(x))
-#define mark_bh(nr) (bh_active |= (1 << (nr)))
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1))
-#define softirq_endlock(cpu) (local_bh_count = 0)
-#define synchronize_bh() barrier()
-
#define local_bh_disable() (local_bh_count++)
#define local_bh_enable() (local_bh_count--)
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- mb();
- bh_base[nr] = NULL;
-}
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_count++;
- barrier();
-}
-
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_count--;
-}
+#define in_softirq() (local_bh_count != 0)
#endif /* SMP */
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 27820f265..6a6ec52b1 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -1,4 +1,4 @@
-/* $Id: bitops.h,v 1.26 1999/01/07 14:14:15 jj Exp $
+/* $Id: bitops.h,v 1.27 2000/02/09 03:28:33 davem Exp $
* bitops.h: Bit string operations on the V9.
*
* Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -20,7 +20,7 @@
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
-extern __inline__ unsigned long test_and_set_bit(unsigned long nr, void *addr)
+extern __inline__ int test_and_set_bit(unsigned long nr, void *addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
unsigned long oldbit;
@@ -60,7 +60,7 @@ extern __inline__ void set_bit(unsigned long nr, void *addr)
: "g5", "g7", "cc", "memory");
}
-extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, void *addr)
+extern __inline__ int test_and_clear_bit(unsigned long nr, void *addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
unsigned long oldbit;
@@ -100,7 +100,7 @@ extern __inline__ void clear_bit(unsigned long nr, void *addr)
: "g5", "g7", "cc", "memory");
}
-extern __inline__ unsigned long test_and_change_bit(unsigned long nr, void *addr)
+extern __inline__ int test_and_change_bit(unsigned long nr, void *addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
unsigned long oldbit;
@@ -135,9 +135,9 @@ extern __inline__ void change_bit(unsigned long nr, void *addr)
: "g5", "g7", "cc", "memory");
}
-extern __inline__ unsigned long test_bit(int nr, __const__ void *addr)
+extern __inline__ int test_bit(int nr, __const__ void *addr)
{
- return 1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63));
+ return (1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63))) != 0UL;
}
/* The easy/cheese version for now. */
diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h
index 7df1d1346..daff61ac4 100644
--- a/include/asm-sparc64/hardirq.h
+++ b/include/asm-sparc64/hardirq.h
@@ -16,10 +16,13 @@ extern unsigned int local_irq_count;
/*
* Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
+ * or hardware interrupt processing? On any cpu?
*/
#define in_interrupt() ((local_irq_count + local_bh_count) != 0)
+/* This tests only the local processors hw IRQ context disposition. */
+#define in_irq() (local_irq_count != 0)
+
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count == 0)
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index cb7fba53f..788e8dd18 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -1,4 +1,4 @@
-/* $Id: io.h,v 1.30 2000/01/28 13:43:14 jj Exp $ */
+/* $Id: io.h,v 1.31 2000/02/08 05:11:38 jj Exp $ */
#ifndef __SPARC64_IO_H
#define __SPARC64_IO_H
@@ -13,43 +13,10 @@
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
-#define NEW_PCI_DMA_MAP
-
-#ifndef NEW_PCI_DMA_MAP
-#define PCI_DVMA_HASHSZ 256
-
-extern unsigned long pci_dvma_v2p_hash[PCI_DVMA_HASHSZ];
-extern unsigned long pci_dvma_p2v_hash[PCI_DVMA_HASHSZ];
-
-#define pci_dvma_ahashfn(addr) (((addr) >> 24) & 0xff)
-
-extern __inline__ unsigned long virt_to_bus(volatile void *addr)
-{
- unsigned long vaddr = (unsigned long)addr;
- unsigned long off;
-
- /* Handle kernel variable pointers... */
- if (vaddr < PAGE_OFFSET)
- vaddr += PAGE_OFFSET - (unsigned long)&empty_zero_page;
-
- off = pci_dvma_v2p_hash[pci_dvma_ahashfn(vaddr - PAGE_OFFSET)];
- return vaddr + off;
-}
-
-extern __inline__ void *bus_to_virt(unsigned long addr)
-{
- unsigned long paddr = addr & 0xffffffffUL;
- unsigned long off;
-
- off = pci_dvma_p2v_hash[pci_dvma_ahashfn(paddr)];
- return (void *)(paddr + off);
-}
-#else
extern unsigned long virt_to_bus_not_defined_use_pci_map(volatile void *addr);
#define virt_to_bus virt_to_bus_not_defined_use_pci_map
extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
#define bus_to_virt bus_to_virt_not_defined_use_pci_map
-#endif
/* Different PCI controllers we support have their PCI MEM space
* mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index ae61e47a6..d6ef977d9 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.41 1999/09/10 15:39:03 jj Exp $ */
+/* $Id: mmu_context.h,v 1.42 2000/02/08 07:47:03 davem Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
@@ -10,6 +10,10 @@
#include <asm/system.h>
#include <asm/spitfire.h>
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+
extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index c0e4a12a1..fe4d9e1fa 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -94,6 +94,28 @@ extern __inline__ void flush_tlb_page(struct vm_area_struct *vma, unsigned long
#endif /* ! __SMP__ */
+/* This will change for Cheetah and later chips. */
+#define VPTE_BASE 0xfffffffe00000000
+
+extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ /* Note the signed type. */
+ long s = start, e = end;
+ if (s > e)
+ /* Nobody should call us with start below VM hole and end above.
+ See if it is really true. */
+ BUG();
+#if 0
+ /* Currently free_pgtables guarantees this. */
+ s &= PMD_MASK;
+ e = (e + PMD_SIZE - 1) & PMD_MASK;
+#endif
+ flush_tlb_range(mm,
+ VPTE_BASE + (s >> (PAGE_SHIFT - 3)),
+ VPTE_BASE + (e >> (PAGE_SHIFT - 3)));
+}
+
/* Page table allocation/freeing. */
#ifdef __SMP__
/* Sliiiicck */
diff --git a/include/asm-sparc64/posix_types.h b/include/asm-sparc64/posix_types.h
index e2a024e3e..e486344ad 100644
--- a/include/asm-sparc64/posix_types.h
+++ b/include/asm-sparc64/posix_types.h
@@ -9,11 +9,12 @@
#if (__GNUC__ > 2) || (__GNUC_MINOR__ >= 8)
typedef unsigned long int __kernel_size_t;
+typedef long int __kernel_ssize_t;
#else
typedef unsigned long long __kernel_size_t;
+typedef long long __kernel_ssize_t;
#endif
-typedef long long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_clock_t;
diff --git a/include/asm-sparc64/softirq.h b/include/asm-sparc64/softirq.h
index b8e017d79..460c96633 100644
--- a/include/asm-sparc64/softirq.h
+++ b/include/asm-sparc64/softirq.h
@@ -19,117 +19,6 @@ extern unsigned int local_bh_count;
#define local_bh_disable() (local_bh_count++)
#define local_bh_enable() (local_bh_count--)
-/* The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
-
-#define get_active_bhs() (bh_mask & bh_active)
-#define clear_active_bhs(mask) \
- __asm__ __volatile__( \
-"1: ldx [%1], %%g7\n" \
-" andn %%g7, %0, %%g5\n" \
-" casx [%1], %%g7, %%g5\n" \
-" cmp %%g7, %%g5\n" \
-" bne,pn %%xcc, 1b\n" \
-" nop" \
- : /* no outputs */ \
- : "HIr" (mask), "r" (&bh_active) \
- : "g5", "g7", "cc", "memory")
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- membar("#StoreStore");
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-#ifndef __SMP__
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_count++;
- barrier();
-}
-
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_count--;
-}
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1))
-#define softirq_endlock(cpu) (local_bh_count = 0)
-#define synchronize_bh() barrier()
-
-#else /* (__SMP__) */
-
-extern atomic_t global_bh_lock;
-extern spinlock_t global_bh_count;
-
-extern void synchronize_bh(void);
-
-static inline void start_bh_atomic(void)
-{
- atomic_inc(&global_bh_lock);
- synchronize_bh();
-}
-
-static inline void end_bh_atomic(void)
-{
- atomic_dec(&global_bh_lock);
-}
-
-/* These are for the IRQs testing the lock */
-static inline int softirq_trylock(int cpu)
-{
- if (spin_trylock(&global_bh_count)) {
- if (atomic_read(&global_bh_lock) == 0 &&
- cpu_data[cpu].bh_count == 0) {
- ++(cpu_data[cpu].bh_count);
- return 1;
- }
- spin_unlock(&global_bh_count);
- }
- return 0;
-}
-
-static inline void softirq_endlock(int cpu)
-{
- (cpu_data[cpu].bh_count)--;
- spin_unlock(&global_bh_count);
-}
-
-#endif /* (__SMP__) */
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
+#define in_softirq() (local_bh_count != 0)
#endif /* !(__SPARC64_SOFTIRQ_H) */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 5b1ad645c..6a9894bb0 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -56,15 +56,6 @@ enum
typedef int acpi_dstate_t;
-#ifdef __KERNEL__
-
-extern int acpi_active;
-
-extern void (*acpi_idle)(void);
-extern void (*acpi_power_off)(void);
-
-#endif /* __KERNEL__ */
-
/* RSDP location */
#define ACPI_BIOS_ROM_BASE (0x0e0000)
#define ACPI_BIOS_ROM_END (0x100000)
diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h
index f6ed2d3b1..ae1e69173 100644
--- a/include/linux/adfs_fs.h
+++ b/include/linux/adfs_fs.h
@@ -2,113 +2,43 @@
#define _ADFS_FS_H
#include <linux/types.h>
-/*
- * Structures of data on the disk
- */
/*
* Disc Record at disc address 0xc00
*/
struct adfs_discrecord {
- unsigned char log2secsize;
- unsigned char secspertrack;
- unsigned char heads;
- unsigned char density;
- unsigned char idlen;
- unsigned char log2bpmb;
- unsigned char skew;
- unsigned char bootoption;
- unsigned char lowsector;
- unsigned char nzones;
- unsigned short zone_spare;
- unsigned long root;
- unsigned long disc_size;
- unsigned short disc_id;
- unsigned char disc_name[10];
- unsigned long disc_type;
- unsigned long disc_size_high;
- unsigned char log2sharesize:4;
- unsigned char unused:4;
- unsigned char big_flag:1;
+ __u8 log2secsize;
+ __u8 secspertrack;
+ __u8 heads;
+ __u8 density;
+ __u8 idlen;
+ __u8 log2bpmb;
+ __u8 skew;
+ __u8 bootoption;
+ __u8 lowsector;
+ __u8 nzones;
+ __u16 zone_spare;
+ __u32 root;
+ __u32 disc_size;
+ __u16 disc_id;
+ __u8 disc_name[10];
+ __u32 disc_type;
+ __u32 disc_size_high;
+ __u8 log2sharesize:4;
+ __u8 unused40:4;
+ __u8 big_flag:1;
+ __u8 unused41:1;
+ __u8 nzones_high;
+ __u32 format_version;
+ __u32 root_size;
+ __u8 unused52[60 - 52];
};
#define ADFS_DISCRECORD (0xc00)
#define ADFS_DR_OFFSET (0x1c0)
#define ADFS_DR_SIZE 60
+#define ADFS_DR_SIZE_BITS (ADFS_DR_SIZE << 3)
#define ADFS_SUPER_MAGIC 0xadf5
-#define ADFS_FREE_FRAG 0
-#define ADFS_BAD_FRAG 1
-#define ADFS_ROOT_FRAG 2
-
-/*
- * Directory header
- */
-struct adfs_dirheader {
- unsigned char startmasseq;
- unsigned char startname[4];
-};
-
-#define ADFS_NEWDIR_SIZE 2048
-#define ADFS_OLDDIR_SIZE 1024
-#define ADFS_NUM_DIR_ENTRIES 77
-
-/*
- * Directory entries
- */
-struct adfs_direntry {
- char dirobname[10];
-#define ADFS_NAME_LEN 10
- __u8 dirload[4];
- __u8 direxec[4];
- __u8 dirlen[4];
- __u8 dirinddiscadd[3];
- __u8 newdiratts;
-#define ADFS_NDA_OWNER_READ (1 << 0)
-#define ADFS_NDA_OWNER_WRITE (1 << 1)
-#define ADFS_NDA_LOCKED (1 << 2)
-#define ADFS_NDA_DIRECTORY (1 << 3)
-#define ADFS_NDA_EXECUTE (1 << 4)
-#define ADFS_NDA_PUBLIC_READ (1 << 5)
-#define ADFS_NDA_PUBLIC_WRITE (1 << 6)
-};
-
-#define ADFS_MAX_NAME_LEN 255
-struct adfs_idir_entry {
- __u32 inode_no; /* Address */
- __u32 file_id; /* file id */
- __u32 name_len; /* name length */
- __u32 size; /* size */
- __u32 mtime; /* modification time */
- __u32 filetype; /* RiscOS file type */
- __u8 mode; /* internal mode */
- char name[ADFS_MAX_NAME_LEN]; /* file name */
-};
-
-/*
- * Directory tail
- */
-union adfs_dirtail {
- struct {
- unsigned char dirlastmask;
- char dirname[10];
- unsigned char dirparent[3];
- char dirtitle[19];
- unsigned char reserved[14];
- unsigned char endmasseq;
- unsigned char endname[4];
- unsigned char dircheckbyte;
- } old;
- struct {
- unsigned char dirlastmask;
- unsigned char reserved[2];
- unsigned char dirparent[3];
- char dirtitle[19];
- char dirname[10];
- unsigned char endmasseq;
- unsigned char endname[4];
- unsigned char dircheckbyte;
- } new;
-};
#ifdef __KERNEL__
/*
@@ -129,47 +59,8 @@ extern inline int adfs_checkbblk(unsigned char *ptr)
return (result & 0xff) != ptr[511];
}
-/* dir.c */
-extern unsigned int adfs_val (unsigned char *p, int len);
-extern int adfs_dir_read_parent (struct inode *inode, struct buffer_head **bhp);
-extern int adfs_dir_read (struct inode *inode, struct buffer_head **bhp);
-extern int adfs_dir_check (struct inode *inode, struct buffer_head **bhp,
- int buffers, union adfs_dirtail *dtp);
-extern void adfs_dir_free (struct buffer_head **bhp, int buffers);
-extern int adfs_dir_get (struct super_block *sb, struct buffer_head **bhp,
- int buffers, int pos, unsigned long parent_object_id,
- struct adfs_idir_entry *ide);
-extern int adfs_dir_find_entry (struct super_block *sb, struct buffer_head **bhp,
- int buffers, unsigned int index,
- struct adfs_idir_entry *ide);
-
-/* inode.c */
-extern int adfs_inode_validate (struct inode *inode);
-extern unsigned long adfs_inode_generate (unsigned long parent_id, int diridx);
-extern unsigned long adfs_inode_objid (struct inode *inode);
-extern unsigned int adfs_parent_bmap (struct inode *inode, int block);
-extern int adfs_bmap (struct inode *inode, int block);
-extern void adfs_read_inode (struct inode *inode);
-
-/* map.c */
-extern int adfs_map_lookup (struct super_block *sb, int frag_id, int offset);
-
-/* namei.c */
-extern struct dentry *adfs_lookup (struct inode *dir, struct dentry *dentry);
+#endif
-/* super.c */
extern int init_adfs_fs (void);
-extern void adfs_error (struct super_block *, const char *, const char *, ...);
-
-/*
- * Inodes and file operations
- */
-
-/* dir.c */
-extern struct inode_operations adfs_dir_inode_operations;
-
-/* file.c */
-extern struct inode_operations adfs_file_inode_operations;
-#endif
#endif
diff --git a/include/linux/adfs_fs_i.h b/include/linux/adfs_fs_i.h
index 831575165..94d5607bf 100644
--- a/include/linux/adfs_fs_i.h
+++ b/include/linux/adfs_fs_i.h
@@ -11,7 +11,13 @@
* adfs file system inode data in memory
*/
struct adfs_inode_info {
- unsigned long file_id; /* id of fragments containing actual data */
+ unsigned long mmu_private;
+ unsigned long parent_id; /* object id of parent */
+ __u32 loadaddr; /* RISC OS load address */
+ __u32 execaddr; /* RISC OS exec address */
+ unsigned int filetype; /* RISC OS file type */
+ unsigned int attr; /* RISC OS permissions */
+ int stamped:1; /* RISC OS file has date/time */
};
#endif
diff --git a/include/linux/adfs_fs_sb.h b/include/linux/adfs_fs_sb.h
index 649b61e45..30082c823 100644
--- a/include/linux/adfs_fs_sb.h
+++ b/include/linux/adfs_fs_sb.h
@@ -1,33 +1,38 @@
/*
* linux/include/linux/adfs_fs_sb.h
*
- * Copyright (C) 1997 Russell King
+ * Copyright (C) 1997-1999 Russell King
*/
#ifndef _ADFS_FS_SB
#define _ADFS_FS_SB
-#include <linux/adfs_fs.h>
+/*
+ * Forward-declare this
+ */
+struct adfs_discmap;
+struct adfs_dir_ops;
/*
- * adfs file system superblock data in memory
+ * ADFS file system superblock data in memory
*/
struct adfs_sb_info {
- struct buffer_head *s_sbh; /* buffer head containing disc record */
- struct adfs_discrecord *s_dr; /* pointer to disc record in s_sbh */
- uid_t s_uid; /* owner uid */
- gid_t s_gid; /* owner gid */
- int s_owner_mask; /* ADFS Owner perm -> unix perm */
- int s_other_mask; /* ADFS Other perm -> unix perm */
- __u16 s_zone_size; /* size of a map zone in bits */
- __u16 s_ids_per_zone; /* max. no ids in one zone */
- __u32 s_idlen; /* length of ID in map */
- __u32 s_map_size; /* size of a map */
- __u32 s_zonesize; /* zone size (in map bits) */
- __u32 s_map_block; /* block address of map */
- struct buffer_head **s_map; /* bh list containing map */
- __u32 s_root; /* root disc address */
- __s8 s_map2blk; /* shift left by this for map->sector */
+ struct adfs_discmap *s_map; /* bh list containing map */
+ struct adfs_dir_ops *s_dir; /* directory operations */
+
+ uid_t s_uid; /* owner uid */
+ gid_t s_gid; /* owner gid */
+ umode_t s_owner_mask; /* ADFS owner perm -> unix perm */
+ umode_t s_other_mask; /* ADFS other perm -> unix perm */
+
+ __u32 s_ids_per_zone; /* max. no ids in one zone */
+ __u32 s_idlen; /* length of ID in map */
+ __u32 s_map_size; /* sector size of a map */
+ unsigned long s_size; /* total size (in blocks) of this fs */
+ signed int s_map2blk; /* shift left by this for map->sector */
+ unsigned int s_log2sharesize;/* log2 share size */
+ unsigned int s_version; /* disc format version */
+ unsigned int s_namelen; /* maximum number of characters in name */
};
#endif
diff --git a/include/linux/affs_fs.h b/include/linux/affs_fs.h
index 342ac2648..90e5a3dd2 100644
--- a/include/linux/affs_fs.h
+++ b/include/linux/affs_fs.h
@@ -105,11 +105,11 @@ extern void affs_dir_truncate(struct inode *);
extern struct inode_operations affs_file_inode_operations;
extern struct inode_operations affs_file_inode_operations_ofs;
extern struct inode_operations affs_dir_inode_operations;
-extern struct inode_operations affs_symlink_inode_operations;
-extern struct inode_operations affs_chrdev_inode_operations;
-extern struct inode_operations affs_blkdev_inode_operations;
+extern struct address_space_operations affs_symlink_aops;
+extern struct address_space_operations affs_aops;
extern struct dentry_operations affs_dentry_operations;
extern struct dentry_operations affs_dentry_operations_intl;
+extern int affs_bmap(struct inode *, int);
#endif
diff --git a/include/linux/affs_fs_i.h b/include/linux/affs_fs_i.h
index beeabb0de..a55951fbf 100644
--- a/include/linux/affs_fs_i.h
+++ b/include/linux/affs_fs_i.h
@@ -29,6 +29,7 @@ struct ext_cache {
* affs fs inode data in memory
*/
struct affs_inode_info {
+ unsigned long mmu_private;
u32 i_protect; /* unused attribute bits */
s32 i_parent; /* parent ino */
s32 i_original; /* if != 0, this is the key of the original */
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index eaa8bd1be..3c8c7e01f 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -48,6 +48,7 @@ enum chipset_type {
VIA_GENERIC,
VIA_VP3,
VIA_MVP3,
+ VIA_MVP4,
VIA_APOLLO_PRO,
SIS_GENERIC,
AMD_GENERIC,
diff --git a/include/linux/atm.h b/include/linux/atm.h
index 67a8f3465..ed917611f 100644
--- a/include/linux/atm.h
+++ b/include/linux/atm.h
@@ -1,6 +1,6 @@
/* atm.h - general ATM declarations */
-/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
+/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
/*
@@ -20,6 +20,7 @@
#include <linux/socket.h>
#include <linux/types.h>
#endif
+#include <linux/atmapi.h>
#include <linux/atmsap.h>
#include <linux/atmioc.h>
@@ -84,23 +85,6 @@
* please speak up ...
*/
-/* socket layer */
-#define SO_BCTXOPT __SO_ENCODE(SOL_SOCKET,16,struct atm_buffconst)
- /* not ATM specific - should go somewhere else */
-#define SO_BCRXOPT __SO_ENCODE(SOL_SOCKET,17,struct atm_buffconst)
-
-
-/* for SO_BCTXOPT and SO_BCRXOPT */
-
-struct atm_buffconst {
- unsigned long buf_fac; /* buffer alignment factor */
- unsigned long buf_off; /* buffer alignment offset */
- unsigned long size_fac; /* buffer size factor */
- unsigned long size_off; /* buffer size offset */
- unsigned long min_size; /* minimum size */
- unsigned long max_size; /* maximum size, 0 = unlimited */
-};
-
/* ATM cell header (for AAL0) */
@@ -154,12 +138,28 @@ struct atm_trafprm {
int min_pcr; /* minimum PCR in cells per second */
int max_cdv; /* maximum CDV in microseconds */
int max_sdu; /* maximum SDU in bytes */
+ /* extra params for ABR */
+ unsigned int icr; /* Initial Cell Rate (24-bit) */
+ unsigned int tbe; /* Transient Buffer Exposure (24-bit) */
+ unsigned int frtt : 24; /* Fixed Round Trip Time (24-bit) */
+ unsigned int rif : 4; /* Rate Increment Factor (4-bit) */
+ unsigned int rdf : 4; /* Rate Decrease Factor (4-bit) */
+ unsigned int nrm_pres :1; /* nrm present bit */
+ unsigned int trm_pres :1; /* rm present bit */
+ unsigned int adtf_pres :1; /* adtf present bit */
+ unsigned int cdf_pres :1; /* cdf present bit*/
+ unsigned int nrm :3; /* Max # of Cells for each forward RM cell (3-bit) */
+ unsigned int trm :3; /* Time between forward RM cells (3-bit) */
+ unsigned int adtf :10; /* ACR Decrease Time Factor (10-bit) */
+ unsigned int cdf :3; /* Cutoff Decrease Factor (3-bit) */
+ unsigned int spare :9; /* spare bits */
};
struct atm_qos {
struct atm_trafprm txtp; /* parameters in TX direction */
- struct atm_trafprm rxtp; /* parameters in RX direction */
- unsigned char aal;
+ struct atm_trafprm rxtp __ATM_API_ALIGN;
+ /* parameters in RX direction */
+ unsigned char aal __ATM_API_ALIGN;
};
/* PVC addressing */
@@ -177,7 +177,7 @@ struct sockaddr_atmpvc {
short itf; /* ATM interface */
short vpi; /* VPI (only 8 bits at UNI) */
int vci; /* VCI (only 16 bits at UNI) */
- } sap_addr; /* PVC address */
+ } sap_addr __ATM_API_ALIGN; /* PVC address */
};
/* SVC addressing */
@@ -209,7 +209,7 @@ struct sockaddr_atmsvc {
/* unused addresses must be bzero'ed */
char lij_type; /* role in LIJ call; one of ATM_LIJ* */
uint32_t lij_id; /* LIJ call identifier */
- } sas_addr; /* SVC address */
+ } sas_addr __ATM_API_ALIGN; /* SVC address */
};
@@ -236,10 +236,6 @@ struct atmif_sioc {
};
-#define ATM_CREATE_LEAF _IO('a',ATMIOC_SPECIAL+2)
- /* create a point-to-multipoint leaf socket */
-
-
#ifdef __KERNEL__
#include <linux/net.h> /* struct net_proto */
diff --git a/include/linux/atm_eni.h b/include/linux/atm_eni.h
index 7ed89d105..34f317972 100644
--- a/include/linux/atm_eni.h
+++ b/include/linux/atm_eni.h
@@ -1,7 +1,7 @@
/* atm_eni.h - Driver-specific declarations of the ENI driver (for use by
driver-specific utilities) */
-/* Written 1995-1997 by Werner Almesberger, EPFL LRC */
+/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#ifndef LINUX_ATM_ENI_H
@@ -9,7 +9,15 @@
#include <linux/atmioc.h>
+
+struct eni_multipliers {
+ int tx,rx; /* values are in percent and must be > 100 */
+};
+
+
#define ENI_MEMDUMP _IOW('a',ATMIOC_SARPRV,struct atmif_sioc)
/* printk memory map */
+#define ENI_SETMULT _IOW('a',ATMIOC_SARPRV+7,struct atmif_sioc)
+ /* set buffer multipliers */
#endif
diff --git a/include/linux/atm_idt77105.h b/include/linux/atm_idt77105.h
new file mode 100644
index 000000000..70e9e6e59
--- /dev/null
+++ b/include/linux/atm_idt77105.h
@@ -0,0 +1,40 @@
+/* atm_idt77105.h - Driver-specific declarations of the IDT77105 driver (for
+ * use by driver-specific utilities) */
+
+/* Written 1999 by Greg Banks <gnb@linuxfan.com>. Copied from atm_suni.h. */
+
+
+#ifndef LINUX_ATM_IDT77105_H
+#define LINUX_ATM_IDT77105_H
+
+#include <asm/types.h>
+#include <linux/atmioc.h>
+
+/*
+ * Structure for IDT77105_GETSTAT and IDT77105_GETSTATZ ioctls.
+ * Pointed to by `arg' in atmif_sioc.
+ */
+struct idt77105_stats {
+ __u32 symbol_errors; /* wire symbol errors */
+ __u32 tx_cells; /* cells transmitted */
+ __u32 rx_cells; /* cells received */
+ __u32 rx_hec_errors; /* Header Error Check errors on receive */
+};
+
+#define IDT77105_GETLOOP _IOW('a',ATMIOC_PHYPRV,struct atmif_sioc) /* get loopback mode */
+#define IDT77105_SETLOOP _IOW('a',ATMIOC_PHYPRV+1,struct atmif_sioc) /* set loopback mode */
+#define IDT77105_GETSTAT _IOW('a',ATMIOC_PHYPRV+2,struct atmif_sioc) /* get stats */
+#define IDT77105_GETSTATZ _IOW('a',ATMIOC_PHYPRV+3,struct atmif_sioc) /* get stats and zero */
+
+
+/*
+ * TODO: what we need is a global loopback mode get/set ioctl for
+ * all devices, not these device-specific hacks -- Greg Banks
+ */
+#define IDT77105_LM_NONE 0 /* no loopback */
+#define IDT77105_LM_DIAG 1 /* diagnostic (i.e. loop TX to RX)
+ * (a.k.a. local loopback) */
+#define IDT77105_LM_LOOP 2 /* line (i.e. loop RX to TX)
+ * (a.k.a. remote loopback) */
+
+#endif
diff --git a/include/linux/atm_nicstar.h b/include/linux/atm_nicstar.h
index 3b564d227..577b79f33 100644
--- a/include/linux/atm_nicstar.h
+++ b/include/linux/atm_nicstar.h
@@ -18,6 +18,7 @@
* sys/types.h for struct timeval
*/
+#include <linux/atmapi.h>
#include <linux/atmioc.h>
#define NS_GETPSTAT _IOWR('a',ATMIOC_SARPRV+1,struct atmif_sioc)
@@ -32,7 +33,7 @@ typedef struct buf_nr
unsigned min;
unsigned init;
unsigned max;
-} buf_nr;
+}buf_nr;
typedef struct pool_levels
diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h
index 7e8eb77d6..a79f45bc3 100644
--- a/include/linux/atm_tcp.h
+++ b/include/linux/atm_tcp.h
@@ -1,12 +1,14 @@
/* atm_tcp.h - Driver-specific declarations of the ATMTCP driver (for use by
driver-specific utilities) */
-/* Written 1997-1999 by Werner Almesberger, EPFL LRC/ICA */
+/* Written 1997-2000 by Werner Almesberger, EPFL LRC/ICA */
#ifndef LINUX_ATM_TCP_H
#define LINUX_ATM_TCP_H
+#include <linux/atmapi.h>
+
#ifdef __KERNEL__
#include <linux/types.h>
#endif
@@ -33,12 +35,12 @@ struct atmtcp_hdr {
struct atmtcp_control {
struct atmtcp_hdr hdr; /* must be first */
- int type; /* message type; both directions */
- unsigned long vcc; /* both directions */
+ int type; /* message type; both directions */
+ atm_kptr_t vcc; /* both directions */
struct sockaddr_atmpvc addr; /* suggested value from kernel */
struct atm_qos qos; /* both directions */
int result; /* to kernel only */
-};
+} __ATM_API_ALIGN;
/*
* Field usage:
diff --git a/include/linux/atm_zatm.h b/include/linux/atm_zatm.h
index d8d85cb76..5ca80691c 100644
--- a/include/linux/atm_zatm.h
+++ b/include/linux/atm_zatm.h
@@ -1,7 +1,7 @@
/* atm_zatm.h - Driver-specific declarations of the ZATM driver (for use by
driver-specific utilities) */
-/* Written 1995-1997 by Werner Almesberger, EPFL LRC */
+/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
#ifndef LINUX_ATM_ZATM_H
@@ -12,6 +12,7 @@
* sys/types.h for struct timeval
*/
+#include <linux/atmapi.h>
#include <linux/atmioc.h>
#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
diff --git a/include/linux/atmapi.h b/include/linux/atmapi.h
new file mode 100644
index 000000000..bdf01ef9c
--- /dev/null
+++ b/include/linux/atmapi.h
@@ -0,0 +1,29 @@
+/* atmapi.h - ATM API user space/kernel compatibility */
+
+/* Written 1999,2000 by Werner Almesberger, EPFL ICA */
+
+
+#ifndef _LINUX_ATMAPI_H
+#define _LINUX_ATMAPI_H
+
+#ifdef __sparc__
+/* such alignment is not required on 32 bit sparcs, but we can't
+ figure that we are on a sparc64 while compiling user-space programs. */
+#define __ATM_API_ALIGN __attribute__((aligned(8)))
+#else
+#define __ATM_API_ALIGN
+#endif
+
+
+/*
+ * Opaque type for kernel pointers. Note that _ is never accessed. We need
+ * the struct in order hide the array, so that we can make simple assignments
+ * instead of being forced to use memcpy. It also improves error reporting for
+ * code that still assumes that we're passing unsigned longs.
+ *
+ * Convention: NULL pointers are passed as a field of all zeroes.
+ */
+
+typedef struct { unsigned char _[8]; } atm_kptr_t;
+
+#endif
diff --git a/include/linux/atmarp.h b/include/linux/atmarp.h
index 057300fd5..24f82338f 100644
--- a/include/linux/atmarp.h
+++ b/include/linux/atmarp.h
@@ -1,6 +1,6 @@
/* atmarp.h - ATM ARP protocol and kernel-demon interface definitions */
-/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */
+/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
#ifndef _LINUX_ATMARP_H
@@ -9,6 +9,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
#endif
+#include <linux/atmapi.h>
#include <linux/atmioc.h>
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 0287d1661..95e97de35 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -8,6 +8,8 @@
#include <linux/config.h>
+#include <linux/atmapi.h>
+#include <linux/atm.h>
#include <linux/atmioc.h>
@@ -26,9 +28,9 @@
struct atm_aal_stats {
- long tx,tx_err; /* TX okay and errors */
- long rx,rx_err; /* RX okay and errors */
- long rx_drop; /* RX out of memory */
+ int tx,tx_err; /* TX okay and errors */
+ int rx,rx_err; /* RX okay and errors */
+ int rx_drop; /* RX out of memory */
};
@@ -36,7 +38,7 @@ struct atm_dev_stats {
struct atm_aal_stats aal0;
struct atm_aal_stats aal34;
struct atm_aal_stats aal5;
-};
+} __ATM_API_ALIGN;
#define ATM_GETLINKRATE _IOW('a',ATMIOC_ITF+1,struct atmif_sioc)
@@ -123,6 +125,12 @@ struct atm_cirange {
#define ATM_VS2TXT_MAP \
"IDLE", "CONNECTED", "CLOSING", "LISTEN", "INUSE", "BOUND"
+#define ATM_VF2TXT_MAP \
+ "ADDR", "READY", "PARTIAL", "REGIS", \
+ "RELEASED", "HASQOS", "LISTEN", "META", \
+ "256", "512", "1024", "2048", \
+ "SESSION", "HASSAP", "BOUND", "CLOSE"
+
#ifdef __KERNEL__
@@ -130,8 +138,8 @@ struct atm_cirange {
#include <linux/time.h> /* struct timeval */
#include <linux/net.h>
#include <linux/skbuff.h> /* struct sk_buff */
-#include <linux/atm.h>
#include <linux/uio.h>
+#include <net/sock.h>
#include <asm/atomic.h>
#ifdef CONFIG_PROC_FS
@@ -156,10 +164,10 @@ struct atm_cirange {
#define ATM_VF_META 128 /* SVC socket isn't used for normal data
traffic and doesn't depend on signaling
to be available */
-#define ATM_VF_AQREL 256 /* Arequipa VC is being released */
-#define ATM_VF_AQDANG 512 /* VC is in Arequipa's dangling list */
-#define ATM_VF_SCRX ATM_SC_RX /* 1024; allow single-copy in the RX dir. */
-#define ATM_VF_SCTX ATM_SC_TX /* 2048; allow single-copy in the TX dir. */
+ /* 256; unused */
+ /* 512; unused */
+ /* 1024; unused */
+ /* 2048; unused */
#define ATM_VF_SESSION 4096 /* VCC is p2mp session control descriptor */
#define ATM_VF_HASSAP 8192 /* SAP has been set */
#define ATM_VF_CLOSE 32768 /* asynchronous close - treat like VF_RELEASED*/
@@ -191,7 +199,6 @@ struct atm_vcc {
struct atm_dev *dev; /* device back pointer */
struct atm_qos qos; /* QOS */
struct atm_sap sap; /* SAP */
- unsigned long tx_quota,rx_quota; /* buffer quotas */
atomic_t tx_inuse,rx_inuse; /* buffer space in use */
void (*push)(struct atm_vcc *vcc,struct sk_buff *skb);
void (*pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* optional */
@@ -207,6 +214,7 @@ struct atm_vcc {
struct atm_aal_stats *stats; /* pointer to AAL stats group */
wait_queue_head_t sleep; /* if socket is busy */
wait_queue_head_t wsleep; /* if waiting for write buffer space */
+ struct sock *sk; /* socket backpointer */
struct atm_vcc *prev,*next;
/* SVC part --- may move later ------------------------------------- */
short itf; /* interface number */
@@ -220,7 +228,9 @@ struct atm_vcc {
/* Multipoint part ------------------------------------------------- */
struct atm_vcc *session; /* session VCC descriptor */
/* Other stuff ----------------------------------------------------- */
- void *user_back; /* user backlink - not touched */
+ void *user_back; /* user backlink - not touched by */
+ /* native ATM stack. Currently used */
+ /* by CLIP and sch_atm. */
};
@@ -338,6 +348,12 @@ static __inline__ void atm_return(struct atm_vcc *vcc,int truesize)
}
+static __inline__ int atm_may_send(struct atm_vcc *vcc,unsigned int size)
+{
+ return size+atomic_read(&vcc->tx_inuse)+ATM_PDU_OVHD < vcc->sk->sndbuf;
+}
+
+
int atm_charge(struct atm_vcc *vcc,int truesize);
struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
int gfp_flags);
diff --git a/include/linux/atmioc.h b/include/linux/atmioc.h
index 17a5f4ca3..920ac56c5 100644
--- a/include/linux/atmioc.h
+++ b/include/linux/atmioc.h
@@ -1,10 +1,10 @@
/* atmioc.h - ranges for ATM-related ioctl numbers */
-/* Written 1995-1998 by Werner Almesberger, EPFL LRC */
+/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
/*
- * See http://lrcwww.epfl.ch/linux-atm/magic.html for the complete list of
+ * See http://icawww1.epfl.ch/linux-atm/magic.html for the complete list of
* "magic" ioctl numbers.
*/
diff --git a/include/linux/atmlec.h b/include/linux/atmlec.h
index 6b1be5adf..dd6b349a0 100644
--- a/include/linux/atmlec.h
+++ b/include/linux/atmlec.h
@@ -9,6 +9,7 @@
#ifndef _ATMLEC_H_
#define _ATMLEC_H_
+#include <linux/atmapi.h>
#include <linux/atmioc.h>
#include <linux/atm.h>
#include <linux/if_ether.h>
@@ -43,15 +44,16 @@ typedef enum {
struct atmlec_config_msg {
unsigned int maximum_unknown_frame_count;
- unsigned long max_unknown_frame_time;
+ unsigned int max_unknown_frame_time;
unsigned short max_retry_count;
- unsigned long aging_time;
- unsigned long forward_delay_time;
- unsigned long arp_response_time;
- unsigned long flush_timeout;
- unsigned long path_switching_delay;
+ unsigned int aging_time;
+ unsigned int forward_delay_time;
+ unsigned int arp_response_time;
+ unsigned int flush_timeout;
+ unsigned int path_switching_delay;
unsigned int lane_version; /* LANE2: 1 for LANEv1, 2 for LANEv2 */
int mtu;
+ int is_proxy;
};
struct atmlec_msg {
@@ -61,7 +63,7 @@ struct atmlec_msg {
struct {
unsigned char mac_addr[ETH_ALEN];
unsigned char atm_addr[ATM_ESA_LEN];
- unsigned long flag;/* Topology_change flag,
+ unsigned int flag;/* Topology_change flag,
remoteflag, permanent flag,
lecid, transaction id */
unsigned int targetless_le_arp; /* LANE2 */
@@ -73,9 +75,10 @@ struct atmlec_msg {
uint32_t tran_id; /* transaction id */
unsigned char mac_addr[ETH_ALEN]; /* dst mac addr */
unsigned char atm_addr[ATM_ESA_LEN]; /* reqestor ATM addr */
- } proxy; /* For mapping LE_ARP requests to responses. Filled by */
+ } proxy;
+ /* For mapping LE_ARP requests to responses. Filled by */
} content; /* zeppelin, returned by kernel. Used only when proxying */
-};
+} __ATM_API_ALIGN;
struct atmlec_ioc {
int dev_num;
diff --git a/include/linux/atmmpc.h b/include/linux/atmmpc.h
index e9c2ace8c..5fbfa6813 100644
--- a/include/linux/atmmpc.h
+++ b/include/linux/atmmpc.h
@@ -1,6 +1,7 @@
#ifndef _ATMMPC_H_
#define _ATMMPC_H_
+#include <linux/atmapi.h>
#include <linux/atmioc.h>
#include <linux/atm.h>
@@ -37,16 +38,16 @@ typedef struct eg_ctrl_info {
uint16_t holding_time;
} eg_ctrl_info;
-struct mpc_parameters{
+struct mpc_parameters {
uint16_t mpc_p1; /* Shortcut-Setup Frame Count */
uint16_t mpc_p2; /* Shortcut-Setup Frame Time */
uint8_t mpc_p3[8]; /* Flow-detection Protocols */
uint16_t mpc_p4; /* MPC Initial Retry Time */
uint16_t mpc_p5; /* MPC Retry Time Maximum */
uint16_t mpc_p6; /* Hold Down Time */
-};
+} ;
-struct k_message{
+struct k_message {
uint16_t type;
uint32_t ip_mask;
uint8_t MPS_ctrl[ATM_ESA_LEN];
@@ -56,9 +57,10 @@ struct k_message{
struct mpc_parameters params;
} content;
struct atm_qos qos;
-};
+} __ATM_API_ALIGN;
-struct llc_snap_hdr { /* RFC 1483 LLC/SNAP encapsulation for routed IP PDUs */
+struct llc_snap_hdr {
+ /* RFC 1483 LLC/SNAP encapsulation for routed IP PDUs */
uint8_t dsap; /* Destination Service Access Point (0xAA) */
uint8_t ssap; /* Source Service Access Point (0xAA) */
uint8_t ui; /* Unnumbered Information (0x03) */
@@ -121,4 +123,3 @@ struct llc_snap_hdr { /* RFC 1483 LLC/SNAP encapsulation for routed IP PDUs */
#define RELOAD 301 /* kill -HUP the daemon for reload */
#endif /* _ATMMPC_H_ */
-
diff --git a/include/linux/atmsap.h b/include/linux/atmsap.h
index f49418862..799b10451 100644
--- a/include/linux/atmsap.h
+++ b/include/linux/atmsap.h
@@ -1,11 +1,13 @@
/* atmsap.h - ATM Service Access Point addressing definitions */
-/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */
+/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
#ifndef _LINUX_ATMSAP_H
#define _LINUX_ATMSAP_H
+#include <linux/atmapi.h>
+
/*
* BEGIN_xx and END_xx markers are used for automatic generation of
* documentation. Do not change them.
@@ -116,24 +118,22 @@ struct atm_blli {
unsigned char def_size; /* default packet size (log2), 4-12 (0 to */
/* omit) */
unsigned char window;/* packet window size, 1-127 (0 to omit) */
- } itu; /* ITU-T ecoding */
+ } itu; /* ITU-T encoding */
unsigned char user; /* user specified l3 information */
- struct { /* if l3_proto = ATM_L3_H310 */
- unsigned char term_type; /* terminal type */
+ struct { /* if l3_proto = ATM_L3_H310 */
+ unsigned char term_type; /* terminal type */
unsigned char fw_mpx_cap; /* forward multiplexing capability */
/* only if term_type != ATM_TT_NONE */
unsigned char bw_mpx_cap; /* backward multiplexing capability */
/* only if term_type != ATM_TT_NONE */
} h310;
- struct { /* if l3_proto = ATM_L3_TR9577 */
- unsigned char ipi; /* initial protocol id */
+ struct { /* if l3_proto = ATM_L3_TR9577 */
+ unsigned char ipi; /* initial protocol id */
unsigned char snap[5];/* IEEE 802.1 SNAP identifier */
/* (only if ipi == NLPID_IEEE802_1_SNAP) */
} tr9577;
} l3;
- struct atm_blli *next; /* next BLLI or NULL (undefined when used in */
- /* atmsvc_msg) ONLY USED IN OLD-STYLE API */
-};
+} __ATM_API_ALIGN;
struct atm_bhli {
@@ -149,7 +149,8 @@ struct atm_bhli {
struct atm_sap {
struct atm_bhli bhli; /* local SAP, high-layer information */
- struct atm_blli blli[ATM_MAX_BLLI]; /* local SAP, low-layer info */
+ struct atm_blli blli[ATM_MAX_BLLI] __ATM_API_ALIGN;
+ /* local SAP, low-layer info */
};
diff --git a/include/linux/atmsvc.h b/include/linux/atmsvc.h
index 8b93f5644..4df1c5481 100644
--- a/include/linux/atmsvc.h
+++ b/include/linux/atmsvc.h
@@ -1,11 +1,12 @@
/* atmsvc.h - ATM signaling kernel-demon interface definitions */
-/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
+/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#ifndef _LINUX_ATMSVC_H
#define _LINUX_ATMSVC_H
+#include <linux/atmapi.h>
#include <linux/atm.h>
#include <linux/atmioc.h>
@@ -19,8 +20,8 @@ enum atmsvc_msg_type { as_catch_null,as_bind,as_connect,as_accept,as_reject,
struct atmsvc_msg {
enum atmsvc_msg_type type;
- unsigned long vcc;
- unsigned long listen_vcc; /* indicate */
+ atm_kptr_t vcc;
+ atm_kptr_t listen_vcc; /* indicate */
int reply; /* for okay and close: */
/* < 0: error before active */
/* (sigd has discarded ctx) */
@@ -31,12 +32,12 @@ struct atmsvc_msg {
struct sockaddr_atmsvc local; /* local SVC address */
struct atm_qos qos; /* QOS parameters */
struct atm_sap sap; /* SAP */
- unsigned long session; /* for p2pm */
+ unsigned int session; /* for p2pm */
struct sockaddr_atmsvc svc; /* SVC address */
-};
+} __ATM_API_ALIGN;
/*
- * Message contents: see ftp://lrcftp.epfl.ch/pub/linux/atm/docs/isp-*.tar.gz
+ * Message contents: see ftp://icaftp.epfl.ch/pub/linux/atm/docs/isp-*.tar.gz
*/
/*
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index 99f9784d3..962593cc8 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -14,20 +14,21 @@
#ifndef _LINUX_AUTO_FS_H
#define _LINUX_AUTO_FS_H
+#ifdef __KERNEL__
#include <linux/version.h>
#include <linux/fs.h>
#include <linux/limits.h>
-#include <linux/ioctl.h>
#include <asm/types.h>
+#endif /* __KERNEL__ */
+
+#include <linux/ioctl.h>
-/* This header file describes a range of autofs interface versions;
- the new implementation ("autofs4") supports them all, but the old
- implementation only supports v3. */
-#define AUTOFS_MIN_PROTO_VERSION 3 /* Min version we support */
-#define AUTOFS_MAX_PROTO_VERSION 4 /* Max (current) version */
+/* This file describes autofs v3 */
+#define AUTOFS_PROTO_VERSION 3
-/* Backwards compat for autofs v3; it just implements a version */
-#define AUTOFS_PROTO_VERSION 3 /* v3 version */
+/* Range of protocol versions defined */
+#define AUTOFS_MAX_PROTO_VERSION AUTOFS_PROTO_VERSION
+#define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
/*
* Architectures where both 32- and 64-bit binaries can be executed
@@ -50,15 +51,13 @@ typedef unsigned int autofs_wqt_t;
typedef unsigned long autofs_wqt_t;
#endif
-enum autofs_packet_type {
- autofs_ptype_missing, /* Missing entry (mount request) */
- autofs_ptype_expire, /* Expire entry (umount request) */
- autofs_ptype_expire_multi, /* Expire entry (umount request) */
-};
+/* Packet types */
+#define autofs_ptype_missing 0 /* Missing entry (mount request) */
+#define autofs_ptype_expire 1 /* Expire entry (umount request) */
struct autofs_packet_hdr {
- int proto_version; /* Protocol version */
- enum autofs_packet_type type; /* Type of packet */
+ int proto_version; /* Protocol version */
+ int type; /* Type of packet */
};
struct autofs_packet_missing {
@@ -75,28 +74,12 @@ struct autofs_packet_expire {
char name[NAME_MAX+1];
};
-/* v4 multi expire (via pipe) */
-struct autofs_packet_expire_multi {
- struct autofs_packet_hdr hdr;
- autofs_wqt_t wait_queue_token;
- int len;
- char name[NAME_MAX+1];
-};
-
-union autofs_packet_union {
- struct autofs_packet_hdr hdr;
- struct autofs_packet_missing missing;
- struct autofs_packet_expire expire;
- struct autofs_packet_expire_multi expire_multi;
-};
-
#define AUTOFS_IOC_READY _IO(0x93,0x60)
#define AUTOFS_IOC_FAIL _IO(0x93,0x61)
#define AUTOFS_IOC_CATATONIC _IO(0x93,0x62)
#define AUTOFS_IOC_PROTOVER _IOR(0x93,0x63,int)
#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93,0x64,unsigned long)
#define AUTOFS_IOC_EXPIRE _IOR(0x93,0x65,struct autofs_packet_expire)
-#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int)
#ifdef __KERNEL__
diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h
new file mode 100644
index 000000000..52ff6d8a3
--- /dev/null
+++ b/include/linux/auto_fs4.h
@@ -0,0 +1,47 @@
+/* -*- c-mode -*-
+ * linux/include/linux/auto_fs4.h
+ *
+ * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ */
+
+#ifndef _LINUX_AUTO_FS4_H
+#define _LINUX_AUTO_FS4_H
+
+/* Include common v3 definitions */
+#include <linux/auto_fs.h>
+
+/* autofs v4 definitions */
+#undef AUTOFS_PROTO_VERSION
+#undef AUTOFS_MIN_PROTO_VERSION
+#undef AUTOFS_MAX_PROTO_VERSION
+
+#define AUTOFS_PROTO_VERSION 4
+#define AUTOFS_MIN_PROTO_VERSION 3
+#define AUTOFS_MAX_PROTO_VERSION 4
+
+/* New message type */
+#define autofs_ptype_expire_multi 2 /* Expire entry (umount request) */
+
+/* v4 multi expire (via pipe) */
+struct autofs_packet_expire_multi {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ int len;
+ char name[NAME_MAX+1];
+};
+
+union autofs_packet_union {
+ struct autofs_packet_hdr hdr;
+ struct autofs_packet_missing missing;
+ struct autofs_packet_expire expire;
+ struct autofs_packet_expire_multi expire_multi;
+};
+
+#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int)
+
+
+#endif /* _LINUX_AUTO_FS4_H */
diff --git a/include/linux/bfs_fs.h b/include/linux/bfs_fs.h
index 880ac7f0b..7e7de106a 100644
--- a/include/linux/bfs_fs.h
+++ b/include/linux/bfs_fs.h
@@ -83,6 +83,7 @@ extern int init_bfs_fs(void);
/* file.c */
extern struct inode_operations bfs_file_inops;
+extern struct address_space_operations bfs_aops;
/* dir.c */
extern struct inode_operations bfs_dir_inops;
diff --git a/include/linux/bfs_fs_sb.h b/include/linux/bfs_fs_sb.h
index f0f54d606..5f927f35b 100644
--- a/include/linux/bfs_fs_sb.h
+++ b/include/linux/bfs_fs_sb.h
@@ -7,13 +7,6 @@
#define _LINUX_BFS_FS_SB
/*
- * BFS block map entry, an array of these is kept in bfs_sb_info.
- */
- struct bfs_bmap {
- unsigned long start, end;
- };
-
-/*
* BFS file system in-core superblock info
*/
struct bfs_sb_info {
@@ -24,7 +17,6 @@ struct bfs_sb_info {
unsigned long si_lf_sblk;
unsigned long si_lf_eblk;
unsigned long si_lasti;
- struct bfs_bmap * si_bmap;
char * si_imap;
struct buffer_head * si_sbh; /* buffer header w/superblock */
struct bfs_super_block * si_bfs_sb; /* superblock in si_sbh->b_data */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c86eecc9b..6eed225b6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -37,14 +37,17 @@ struct request {
};
typedef struct request_queue request_queue_t;
-typedef int (merge_request_fn) (request_queue_t *,
- struct request * req,
- struct buffer_head *);
-typedef int (merge_requests_fn) (request_queue_t *,
- struct request * req,
- struct request * req2);
-typedef void (request_fn_proc) (request_queue_t *);
+typedef int (merge_request_fn) (request_queue_t *q,
+ struct request *req,
+ struct buffer_head *bh);
+typedef int (merge_requests_fn) (request_queue_t *q,
+ struct request *req,
+ struct request *req2);
+typedef void (request_fn_proc) (request_queue_t *q);
typedef request_queue_t * (queue_proc) (kdev_t dev);
+typedef void (make_request_fn) (int rw, struct buffer_head *bh);
+typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
+typedef void (unplug_device_fn) (void *q);
struct request_queue
{
@@ -52,6 +55,8 @@ struct request_queue
request_fn_proc * request_fn;
merge_request_fn * merge_fn;
merge_requests_fn * merge_requests_fn;
+ make_request_fn * make_request_fn;
+ plug_device_fn * plug_device_fn;
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
@@ -72,12 +77,6 @@ struct request_queue
* not.
*/
char head_active;
-
- /*
- * Boolean that indicates whether we should use plugging on
- * this queue or not.
- */
- char use_plug;
};
struct blk_dev_struct {
@@ -107,8 +106,10 @@ extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
extern wait_queue_head_t wait_for_request;
extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
-extern void unplug_device(void * data);
-extern void make_request(int major,int rw, struct buffer_head * bh);
+extern void generic_unplug_device(void * data);
+extern void generic_plug_device (request_queue_t *q, kdev_t dev);
+extern void generic_make_request(int rw, struct buffer_head * bh);
+extern request_queue_t * blk_get_queue(kdev_t dev);
/*
* Access functions for manipulating queue properties
@@ -116,12 +117,8 @@ extern void make_request(int major,int rw, struct buffer_head * bh);
extern void blk_init_queue(request_queue_t *, request_fn_proc *);
extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_headactive(request_queue_t *, int);
-extern void blk_queue_pluggable(request_queue_t *, int);
-
-/* md needs this function to remap requests */
-extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
-extern int md_make_request (int minor, int rw, struct buffer_head * bh);
-extern int md_error (kdev_t mddev, kdev_t rdev);
+extern void blk_queue_pluggable(request_queue_t *, plug_device_fn *);
+extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern int * blk_size[MAX_BLKDEV];
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 7b51fcb54..75d9fb540 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -55,6 +55,3 @@ extern void * __init __alloc_bootmem_node (int nid, unsigned long size, unsigned
__alloc_bootmem_node((nid), (x), PAGE_SIZE, 0)
#endif /* _LINUX_BOOTMEM_H */
-
-
-
diff --git a/include/linux/coda.h b/include/linux/coda.h
index 8cb3ff46b..6e1a939be 100644
--- a/include/linux/coda.h
+++ b/include/linux/coda.h
@@ -305,33 +305,35 @@ struct coda_statfs {
#define CODA_INACTIVE 21
#define CODA_VGET 22
#define CODA_SIGNAL 23
-#define CODA_REPLACE 24
-#define CODA_FLUSH 25
-#define CODA_PURGEUSER 26
-#define CODA_ZAPFILE 27
-#define CODA_ZAPDIR 28
-#define CODA_PURGEFID 30
+#define CODA_REPLACE 24 /* DOWNCALL */
+#define CODA_FLUSH 25 /* DOWNCALL */
+#define CODA_PURGEUSER 26 /* DOWNCALL */
+#define CODA_ZAPFILE 27 /* DOWNCALL */
+#define CODA_ZAPDIR 28 /* DOWNCALL */
+#define CODA_PURGEFID 30 /* DOWNCALL */
#define CODA_OPEN_BY_PATH 31
#define CODA_RESOLVE 32
#define CODA_REINTEGRATE 33
#define CODA_STATFS 34
-#define CODA_NCALLS 35
+#define CODA_MAKE_CINODE 35 /* DOWNCALL */
+#define CODA_NCALLS 36
-#define DOWNCALL(opcode) (opcode >= CODA_REPLACE && opcode <= CODA_PURGEFID)
+#define DOWNCALL(opcode) \
+ ((opcode >= CODA_REPLACE && opcode <= CODA_PURGEFID) || \
+ opcode == CODA_MAKE_CINODE)
#define VC_MAXDATASIZE 8192
#define VC_MAXMSGSIZE sizeof(union inputArgs)+sizeof(union outputArgs) +\
VC_MAXDATASIZE
#define CIOC_KERNEL_VERSION _IOWR('c', 10, sizeof (int))
+
#if 0
- /* don't care about kernel version number */
-#define CODA_KERNEL_VERSION 0
- /* The old venus 4.6 compatible interface */
-#define CODA_KERNEL_VERSION 1
+#define CODA_KERNEL_VERSION 0 /* don't care about kernel version number */
+#define CODA_KERNEL_VERSION 1 /* The old venus 4.6 compatible interface */
+#define CODA_KERNEL_VERSION 2 /* venus_lookup gets an extra parameter */
#endif
- /* venus_lookup gets an extra parameter to aid windows.*/
-#define CODA_KERNEL_VERSION 2
+#define CODA_KERNEL_VERSION 3 /* added CODA_MAKE_CINODE downcall */
/*
* Venus <-> Coda RPC arguments
@@ -650,6 +652,13 @@ struct coda_purgefid_out {
ViceFid CodaFid;
};
+struct coda_make_cinode_out {
+ struct coda_out_hdr oh;
+ ViceFid CodaFid;
+ struct coda_vattr attr;
+ int fd;
+};
+
/* coda_rdwr: */
struct coda_rdwr_in {
struct coda_in_hdr ih;
@@ -751,6 +760,7 @@ union outputArgs {
struct coda_purgefid_out coda_purgefid;
struct coda_rdwr_out coda_rdwr;
struct coda_replace_out coda_replace;
+ struct coda_make_cinode_out coda_make_cinode;
struct coda_open_by_path_out coda_open_by_path;
struct coda_statfs_out coda_statfs;
};
diff --git a/include/linux/coda_fs_i.h b/include/linux/coda_fs_i.h
index baa136b97..302a715b9 100644
--- a/include/linux/coda_fs_i.h
+++ b/include/linux/coda_fs_i.h
@@ -40,6 +40,8 @@ struct coda_inode_info {
#define C_INITED 0x20
#define C_FLUSH 0x2 /* used after a flush */
+struct inode *coda_iget(struct super_block * sb, ViceFid * fid,
+ struct coda_vattr * attr);
int coda_cnode_make(struct inode **, struct ViceFid *, struct super_block *);
int coda_cnode_makectl(struct inode **inode, struct super_block *sb);
struct inode *coda_fid_to_inode(ViceFid *fid, struct super_block *sb);
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index cd0e46cea..59cd5da3c 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -26,7 +26,9 @@
extern struct inode_operations coda_dir_inode_operations;
extern struct inode_operations coda_file_inode_operations;
extern struct inode_operations coda_ioctl_inode_operations;
-extern struct inode_operations coda_symlink_inode_operations;
+
+extern struct address_space_operations coda_file_aops;
+extern struct address_space_operations coda_symlink_aops;
extern struct file_operations coda_dir_operations;
extern struct file_operations coda_file_operations;
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index f23a8e9b9..2b6bbadd7 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -101,6 +101,7 @@ struct upc_req {
#define REQ_ASYNC 0x1
#define REQ_READ 0x2
#define REQ_WRITE 0x4
+#define REQ_ABORT 0x8
/*
diff --git a/include/linux/console.h b/include/linux/console.h
index efb0003ea..1f4188a78 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -14,6 +14,9 @@
#ifndef _LINUX_CONSOLE_H_
#define _LINUX_CONSOLE_H_ 1
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
struct vc_data;
struct console_font_op;
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h
index a36f9d6fd..f422f4e51 100644
--- a/include/linux/efs_fs.h
+++ b/include/linux/efs_fs.h
@@ -47,7 +47,7 @@ static const char cprt[] = "EFS: "EFS_VERSION" - (c) 1999 Al Smith <Al.Smith@aes
extern struct inode_operations efs_dir_inode_operations;
extern struct inode_operations efs_file_inode_operations;
-extern struct inode_operations efs_symlink_inode_operations;
+extern struct address_space_operations efs_symlink_aops;
extern int init_module(void);
extern void cleanup_module(void);
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index d2300c599..5bb7d8154 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -560,14 +560,10 @@ extern unsigned long ext2_count_free_inodes (struct super_block *);
extern void ext2_check_inodes_bitmap (struct super_block *);
/* inode.c */
-extern long ext2_bmap (struct inode *, long);
-extern int ext2_get_block (struct inode *, long, struct buffer_head *, int);
extern struct buffer_head * ext2_getblk (struct inode *, long, int, int *);
-extern int ext2_getblk_block (struct inode *, long, int, int *, int *);
extern struct buffer_head * ext2_bread (struct inode *, int, int, int *);
-extern int ext2_getcluster (struct inode * inode, long block);
extern void ext2_read_inode (struct inode *);
extern void ext2_write_inode (struct inode *);
extern void ext2_put_inode (struct inode *);
@@ -620,9 +616,10 @@ extern struct inode_operations ext2_dir_inode_operations;
extern struct inode_operations ext2_file_inode_operations;
/* symlink.c */
-extern struct inode_operations ext2_symlink_inode_operations;
extern struct inode_operations ext2_fast_symlink_inode_operations;
+extern struct address_space_operations ext2_aops;
+
#endif /* __KERNEL__ */
#endif /* _LINUX_EXT2_FS_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 35530b777..c10d1793b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -251,6 +251,7 @@ extern void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long
#define touch_buffer(bh) set_bit(PG_referenced, &bh->b_page->flags)
+
#include <linux/pipe_fs_i.h>
#include <linux/minix_fs_i.h>
#include <linux/ext2_fs_i.h>
@@ -332,10 +333,23 @@ struct iattr {
* oh the beauties of C type declarations.
*/
struct page;
+struct address_space;
+
+struct address_space_operations {
+ int (*writepage) (struct dentry *, struct page *);
+ int (*readpage)(struct dentry *, struct page *);
+ int (*prepare_write)(struct page *, unsigned, unsigned);
+ int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
+ /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
+ int (*bmap)(struct address_space *, long);
+};
struct address_space {
- struct list_head pages;
- unsigned long nrpages;
+ struct list_head pages; /* list of pages */
+ unsigned long nrpages; /* number of pages */
+ struct address_space_operations *a_ops; /* methods */
+ void *host; /* owner: inode, block_device */
+ void *private; /* private data */
};
struct block_device {
@@ -374,6 +388,7 @@ struct inode {
wait_queue_head_t i_wait;
struct file_lock *i_flock;
struct vm_area_struct *i_mmap;
+ struct address_space *i_mapping;
struct address_space i_data;
spinlock_t i_shared_lock;
struct dquot *i_dquot[MAXQUOTAS];
@@ -655,24 +670,6 @@ struct inode_operations {
struct inode *, struct dentry *);
int (*readlink) (struct dentry *, char *,int);
struct dentry * (*follow_link) (struct dentry *, struct dentry *, unsigned int);
- /*
- * the order of these functions within the VFS template has been
- * changed because SMP locking has changed: from now on all get_block,
- * readpage and writepage functions are supposed to do
- * whatever locking they need to get proper SMP operation - for
- * now in most cases this means a lock/unlock_kernel at entry/exit.
- * [The new order is also slightly more logical :)]
- */
- /*
- * Generic block allocator exported by the lowlevel fs. All metadata
- * details are handled by the lowlevel fs, all 'logical data content'
- * details are handled by the highlevel block layer.
- */
- int (*get_block) (struct inode *, long, struct buffer_head *, int);
-
- int (*readpage) (struct dentry *, struct page *);
- int (*writepage) (struct dentry *, struct page *);
-
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int);
int (*revalidate) (struct dentry *);
@@ -779,7 +776,6 @@ extern int blkdev_put(struct block_device *, int);
extern int register_chrdev(unsigned int, const char *, struct file_operations *);
extern int unregister_chrdev(unsigned int, const char *);
extern int chrdev_open(struct inode *, struct file *);
-extern struct file_operations def_chr_fops;
extern const char * bdevname(kdev_t);
extern const char * cdevname(kdev_t);
extern const char * kdevname(kdev_t);
@@ -852,20 +848,17 @@ extern inline void mark_buffer_protected(struct buffer_head * bh)
}
extern void FASTCALL(__mark_buffer_dirty(struct buffer_head *bh, int flag));
+extern void FASTCALL(mark_buffer_dirty(struct buffer_head *bh, int flag));
#define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state)
-extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
-{
- if (!atomic_set_buffer_dirty(bh))
- __mark_buffer_dirty(bh, flag);
-}
-
extern void balance_dirty(kdev_t);
extern int check_disk_change(kdev_t);
extern int invalidate_inodes(struct super_block *);
extern void invalidate_inode_pages(struct inode *);
-extern void invalidate_buffers(kdev_t);
+#define invalidate_buffers(dev) __invalidate_buffers((dev), 0)
+#define destroy_buffers(dev) __invalidate_buffers((dev), 1)
+extern void __invalidate_buffers(kdev_t dev, int);
extern int floppy_is_wp(int);
extern void sync_inodes(kdev_t);
extern void write_inode_now(struct inode *);
@@ -978,30 +971,29 @@ extern void wakeup_bdflush(int wait);
extern int brw_page(int, struct page *, kdev_t, int [], int);
typedef int (*writepage_t)(struct file *, struct page *, unsigned long, unsigned long, const char *);
+typedef int (get_block_t)(struct inode*,long,struct buffer_head*,int);
/* Generic buffer handling for block filesystems.. */
-extern int block_read_full_page(struct dentry *, struct page *);
-extern int block_write_full_page (struct dentry *, struct page *);
-extern int block_write_partial_page (struct file *, struct page *, unsigned long, unsigned long, const char *);
-extern int block_write_cont_page (struct file *, struct page *, unsigned long, unsigned long, const char *);
-extern int block_write_zero_range(struct inode *, struct page *, unsigned, unsigned, unsigned, const char *);
-extern inline int block_write_range(struct inode *inode, struct page *page,
- unsigned from, unsigned len,const char *buf)
-{
- return block_write_zero_range(inode, page, from, from, from+len, buf);
-}
extern int block_flushpage(struct page *, unsigned long);
extern int block_symlink(struct inode *, const char *, int);
+extern int block_write_full_page(struct page*, get_block_t*);
+extern int block_read_full_page(struct page*, get_block_t*);
+extern int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
+extern int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
+ unsigned long *);
+int generic_block_bmap(struct address_space *, long, get_block_t *);
+int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *);
-extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *, writepage_t);
+extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *);
extern void do_generic_file_read(struct file *, loff_t *, read_descriptor_t *, read_actor_t);
extern int vfs_readlink(struct dentry *, char *, int, const char *);
extern struct dentry *vfs_follow_link(struct dentry *, struct dentry *, unsigned, const char *);
extern int page_readlink(struct dentry *, char *, int);
extern struct dentry *page_follow_link(struct dentry *, struct dentry *, unsigned);
+struct inode_operations page_symlink_inode_operations;
extern struct super_block *get_super(kdev_t);
struct super_block *get_empty_super(void);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 8b743b450..9e228d0d7 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -19,6 +19,7 @@
#define WIN98_EXTENDED_PARTITION 0x0f
#define LINUX_SWAP_PARTITION 0x82
+#define LINUX_RAID_PARTITION 0xfd /* autodetect RAID partition */
#ifdef CONFIG_SOLARIS_X86_PARTITION
#define SOLARIS_X86_PARTITION LINUX_SWAP_PARTITION
@@ -45,6 +46,7 @@ struct partition {
struct hd_struct {
long start_sect;
long nr_sects;
+ int type; /* currently RAID or normal */
};
struct gendisk {
diff --git a/include/linux/hfs_fs_i.h b/include/linux/hfs_fs_i.h
index 03585a086..4416d7837 100644
--- a/include/linux/hfs_fs_i.h
+++ b/include/linux/hfs_fs_i.h
@@ -19,6 +19,7 @@
struct hfs_inode_info {
int magic; /* A magic number */
+ unsigned long mmu_private;
struct hfs_cat_entry *entry;
/* For a regular or header file */
diff --git a/include/linux/hpfs_fs_i.h b/include/linux/hpfs_fs_i.h
index 8263bf2d7..56a758b16 100644
--- a/include/linux/hpfs_fs_i.h
+++ b/include/linux/hpfs_fs_i.h
@@ -2,6 +2,7 @@
#define _HPFS_FS_I
struct hpfs_inode_info {
+ unsigned long mmu_private;
ino_t i_parent_dir; /* (directories) gives fnode of parent dir */
unsigned i_dno; /* (directories) root dnode */
unsigned i_dpos; /* (directories) temp for readdir */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index d0d2ce2f9..55e103dd5 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -20,7 +20,7 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
/* ------------------------------------------------------------------------- */
-/* $Id: i2c-id.h,v 1.6 1999/12/21 23:45:58 frodo Exp $ */
+/* $Id: i2c-id.h,v 1.10 2000/02/04 02:47:41 mds Exp $ */
#ifndef I2C_ID_H
#define I2C_ID_H
@@ -44,35 +44,36 @@
* never be used in official drivers
*/
-#define I2C_DRIVERID_MSP3400 1
-#define I2C_DRIVERID_TUNER 2
-#define I2C_DRIVERID_VIDEOTEXT 3 /* please rename */
+#define I2C_DRIVERID_MSP3400 1
+#define I2C_DRIVERID_TUNER 2
+#define I2C_DRIVERID_VIDEOTEX 3 /* please rename */
#define I2C_DRIVERID_TDA8425 4 /* stereo sound processor */
#define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */
#define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */
#define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */
#define I2C_DRIVERID_SAA7111A 8 /* video input processor */
#define I2C_DRIVERID_SAA5281 9 /* videotext decoder */
-#define I2C_DRIVERID_SAA7112 10 /* video decoder, image scaler */
-#define I2C_DRIVERID_SAA7120 11 /* video encoder */
-#define I2C_DRIVERID_SAA7121 12 /* video encoder */
-#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
-#define I2C_DRIVERID_CH7003 14 /* digital pc to tv encoder */
-#define I2C_DRIVERID_PCF8574A 15 /* i2c expander - 8 bit in/out */
-#define I2C_DRIVERID_PCF8582C 16 /* eeprom */
-#define I2C_DRIVERID_AT24Cxx 17 /* eeprom 1/2/4/8/16 K */
+#define I2C_DRIVERID_SAA7112 10 /* video decoder, image scaler */
+#define I2C_DRIVERID_SAA7120 11 /* video encoder */
+#define I2C_DRIVERID_SAA7121 12 /* video encoder */
+#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
+#define I2C_DRIVERID_CH7003 14 /* digital pc to tv encoder */
+#define I2C_DRIVERID_PCF8574A 15 /* i2c expander - 8 bit in/out */
+#define I2C_DRIVERID_PCF8582C 16 /* eeprom */
+#define I2C_DRIVERID_AT24Cxx 17 /* eeprom 1/2/4/8/16 K */
#define I2C_DRIVERID_TEA6300 18 /* audio mixer */
-#define I2C_DRIVERID_BT829 19 /* pc to tv encoder */
+#define I2C_DRIVERID_BT829 19 /* pc to tv encoder */
#define I2C_DRIVERID_TDA9850 20 /* audio mixer */
#define I2C_DRIVERID_TDA9855 21 /* audio mixer */
+#define I2C_DRIVERID_SAA7110 22 /* */
+#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */
+#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
#define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */
#define I2C_DRIVERID_EXP1 0xF1
#define I2C_DRIVERID_EXP2 0xF2
#define I2C_DRIVERID_EXP3 0xF3
-#define I2C_DRIVERID_MGATVO 0x0101 /* Matrox TVOut */
-
#define I2C_DRIVERID_I2CDEV 900
#define I2C_DRIVERID_I2CPROC 901
@@ -88,8 +89,8 @@
#define I2C_ALGO_PCF 0x020000 /* PCF 8584 style adapters */
#define I2C_ALGO_ATI 0x030000 /* ATI video card */
#define I2C_ALGO_SMBUS 0x040000
-#define I2C_ALGO_ISA 0x050000 /* lm_sensors ISA pseudo-adapter */
-#define I2C_ALGO_SAA7146 0x060000 /* SAA 7146 video decoder bus */
+#define I2C_ALGO_ISA 0x050000 /* lm_sensors ISA pseudo-adapter */
+#define I2C_ALGO_SAA714 0x060000 /* SAA 7146 video decoder bus */
#define I2C_ALGO_SAA7146A 0x060001 /* SAA 7146A - enhanced version */
@@ -113,9 +114,11 @@
#define I2C_HW_B_VELLE 0x04 /* Vellemann K8000 */
#define I2C_HW_B_BT848 0x05 /* BT848 video boards */
#define I2C_HW_B_WNV 0x06 /* Winnov Videums */
-#define I2C_HW_B_VIA 0x07 /* Via vt82c586b */
-#define I2C_HW_B_HYDRA 0x08 /* Apple Hydra Mac I/O */
+#define I2C_HW_B_VIA 0x07 /* Via vt82c586b */
+#define I2C_HW_B_HYDRA 0x08 /* Apple Hydra Mac I/O */
#define I2C_HW_B_G400 0x09 /* Matrox G400 */
+#define I2C_HW_B_I810 0x0a /* Intel I810 */
+#define I2C_HW_B_RIVA 0x10 /* Riva based graphics cards */
/* --- PCF 8584 based algorithms */
#define I2C_HW_P_LP 0x00 /* Parallel port interface */
@@ -127,11 +130,11 @@
#define I2C_HW_SMBUS_ALI15X3 0x01
#define I2C_HW_SMBUS_VIA2 0x02
#define I2C_HW_SMBUS_VOODOO3 0x03
-#define I2C_HW_SMBUS_I801 0x04
+#define I2C_HW_SMBUS_I801 0x04
#define I2C_HW_SMBUS_AMD756 0x05
#define I2C_HW_SMBUS_SIS5595 0x06
-/* --- ISA pseudo-adapter */
+/* --- ISA pseudo-adapter */
#define I2C_HW_ISA 0x00
#endif /* I2C_ID_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 9dcc57d49..3aa308bc2 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -3,7 +3,7 @@
/* i2c.h - definitions for the i2c-bus interface */
/* */
/* ------------------------------------------------------------------------- */
-/* Copyright (C) 1995-1999 Simon G. Vogl
+/* Copyright (C) 1995-2000 Simon G. Vogl
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -23,7 +23,7 @@
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
Frodo Looijaard <frodol@dds.nl> */
-/* $Id: i2c.h,v 1.32 1999/12/21 23:45:58 frodo Exp $ */
+/* $Id: i2c.h,v 1.36 2000/01/18 23:54:07 frodo Exp $ */
#ifndef I2C_H
#define I2C_H
@@ -94,6 +94,8 @@ struct i2c_msg {
unsigned short flags;
#define I2C_M_TEN 0x10 /* we have a ten bit chip address */
#define I2C_M_RD 0x01
+#define I2C_M_NOSTART 0x4000
+#define I2C_M_REV_DIR_ADDR 0x2000
#if 0
#define I2C_M_PROBE 0x20
#endif
@@ -171,8 +173,8 @@ struct i2c_driver {
* dec_use is the inverse operation.
* NB: Make sure you have no circular dependencies, or else you get a
* deadlock when trying to unload the modules.
- * You should use the i2c_{inc,dec}_use_client functions instead of
- * calling this function directly.
+ * You should use the i2c_{inc,dec}_use_client functions instead of
+ * calling this function directly.
*/
void (*inc_use)(struct i2c_client *client);
void (*dec_use)(struct i2c_client *client);
@@ -190,9 +192,9 @@ struct i2c_client {
unsigned int addr; /* chip address - NOTE: 7bit */
/* addresses are stored in the */
/* _LOWER_ 7 bits of this char */
- /* addr: unsigned int to make lm_sensors i2c-isa adapter work
- more cleanly. It does not take any more memory space, due to
- alignment considerations */
+ /* addr: unsigned int to make lm_sensors i2c-isa adapter work
+ more cleanly. It does not take any more memory space, due to
+ alignment considerations */
struct i2c_adapter *adapter; /* the adapter we sit on */
struct i2c_driver *driver; /* and our access routines */
void *data; /* for the clients */
@@ -207,17 +209,17 @@ struct i2c_client {
*/
struct i2c_algorithm {
char name[32]; /* textual description */
- unsigned int id;
+ unsigned int id;
/* If a adapter algorithm can't to I2C-level access, set master_xfer
- to NULL. If an adapter algorithm can do SMBus access, set
- smbus_xfer. If set to NULL, the SMBus protocol is simulated
- using common I2C messages */
+ to NULL. If an adapter algorithm can do SMBus access, set
+ smbus_xfer. If set to NULL, the SMBus protocol is simulated
+ using common I2C messages */
int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg msgs[],
- int num);
+ int num);
int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data * data);
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data * data);
/* --- these optional/future use for some adapter types.*/
int (*slave_send)(struct i2c_adapter *,char*,int);
@@ -245,7 +247,7 @@ struct i2c_adapter {
struct i2c_algorithm *algo;/* the algorithm to access the bus */
void *algo_data;
- /* --- These may be NULL, but should increase the module use count */
+ /* --- These may be NULL, but should increase the module use count */
void (*inc_use)(struct i2c_adapter *);
void (*dec_use)(struct i2c_adapter *);
@@ -279,20 +281,20 @@ struct i2c_adapter {
/*flags for the driver struct: */
#define I2C_DF_NOTIFY 0x01 /* notify on bus (de/a)ttaches */
-#define I2C_DF_DUMMY 0x02 /* do not connect any clients */
+#define I2C_DF_DUMMY 0x02 /* do not connect any clients */
/* i2c_client_address_data is the struct for holding default client
* addresses for a driver and for the parameters supplied on the
* command line
*/
struct i2c_client_address_data {
- unsigned short *normal_i2c;
- unsigned short *normal_i2c_range;
- unsigned short *probe;
- unsigned short *probe_range;
- unsigned short *ignore;
- unsigned short *ignore_range;
- unsigned short *force;
+ unsigned short *normal_i2c;
+ unsigned short *normal_i2c_range;
+ unsigned short *probe;
+ unsigned short *probe_range;
+ unsigned short *ignore;
+ unsigned short *ignore_range;
+ unsigned short *force;
};
/* Internal numbers to terminate lists */
@@ -361,20 +363,21 @@ extern int i2c_check_functionality (struct i2c_adapter *adap, u32 func);
/* To determine what functionality is present */
-#define I2C_FUNC_I2C 0x00000001
-#define I2C_FUNC_10BIT_ADDR 0x00000002
-#define I2C_FUNC_SMBUS_QUICK 0x00010000
-#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
-#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000
-#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000
-#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000
-#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000
-#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000
-#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000
-#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000
+#define I2C_FUNC_I2C 0x00000001
+#define I2C_FUNC_10BIT_ADDR 0x00000002
+#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART} */
+#define I2C_FUNC_SMBUS_QUICK 0x00010000
+#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
+#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000
+#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000
+#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000
+#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000
+#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000
+#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000
+#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000
#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
-#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* New I2C-like block */
-#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* transfers */
+#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* New I2C-like block */
+#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* transfer */
#define I2C_FUNC_SMBUS_BYTE I2C_FUNC_SMBUS_READ_BYTE | \
I2C_FUNC_SMBUS_WRITE_BYTE
@@ -398,23 +401,23 @@ extern int i2c_check_functionality (struct i2c_adapter *adap, u32 func);
* Data for SMBus Messages
*/
union i2c_smbus_data {
- __u8 byte;
- __u16 word;
- __u8 block[33]; /* block[0] is used for length */
+ __u8 byte;
+ __u16 word;
+ __u8 block[33]; /* block[0] is used for length */
};
/* smbus_access read or write markers */
-#define I2C_SMBUS_READ 1
-#define I2C_SMBUS_WRITE 0
+#define I2C_SMBUS_READ 1
+#define I2C_SMBUS_WRITE 0
/* SMBus transaction types (size parameter in the above functions)
Note: these no longer correspond to the (arbitrary) PIIX4 internal codes! */
-#define I2C_SMBUS_QUICK 0
-#define I2C_SMBUS_BYTE 1
-#define I2C_SMBUS_BYTE_DATA 2
-#define I2C_SMBUS_WORD_DATA 3
-#define I2C_SMBUS_PROC_CALL 4
-#define I2C_SMBUS_BLOCK_DATA 5
+#define I2C_SMBUS_QUICK 0
+#define I2C_SMBUS_BYTE 1
+#define I2C_SMBUS_BYTE_DATA 2
+#define I2C_SMBUS_WORD_DATA 3
+#define I2C_SMBUS_PROC_CALL 4
+#define I2C_SMBUS_BLOCK_DATA 5
/* ----- commands for the ioctl like i2c_command call:
@@ -423,7 +426,7 @@ union i2c_smbus_data {
* corresponding header files.
*/
/* -> bit-adapter specific ioctls */
-#define I2C_RETRIES 0x0701 /* number times a device adress should */
+#define I2C_RETRIES 0x0701 /* number times a device adress should */
/* be polled when not acknowledging */
#define I2C_TIMEOUT 0x0702 /* set timeout - call with int */
@@ -433,11 +436,11 @@ union i2c_smbus_data {
/* Attn.: Slave address is 7 or 10 bits */
#define I2C_SLAVE_FORCE 0x0706 /* Change slave address */
/* Attn.: Slave address is 7 or 10 bits */
- /* This changes the address, even if it */
- /* is already taken! */
-#define I2C_TENBIT 0x0704 /* 0 for 7 bit addrs, != 0 for 10 bit */
+ /* This changes the address, even if it */
+ /* is already taken! */
+#define I2C_TENBIT 0x0704 /* 0 for 7 bit addrs, != 0 for 10 bit */
-#define I2C_FUNCS 0x0705 /* Get the adapter functionality */
+#define I2C_FUNCS 0x0705 /* Get the adapter functionality */
#if 0
#define I2C_ACK_TEST 0x0710 /* See if a slave is at a specific adress */
#endif
@@ -445,7 +448,7 @@ union i2c_smbus_data {
#define I2C_SMBUS 0x0720 /* SMBus-level access */
/* ... algo-bit.c recognizes */
-#define I2C_UDELAY 0x0705 /* set delay in microsecs between each */
+#define I2C_UDELAY 0x0705 /* set delay in microsecs between each */
/* written byte (except address) */
#define I2C_MDELAY 0x0706 /* millisec delay between written bytes */
diff --git a/include/linux/input.h b/include/linux/input.h
index fefc77b51..3fdfa7e92 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -234,7 +234,7 @@ struct input_event {
#define KEY_RECORD 167
#define KEY_REWIND 168
-#define KEY_UNKNOWN 192
+#define KEY_UNKNOWN 180
#define BTN_MISC 0x100
#define BTN_0 0x100
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 8bc1f9ee6..29a42aef8 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -3,6 +3,7 @@
#define _LINUX_INTERRUPT_H
#include <linux/kernel.h>
+#include <linux/smp.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
@@ -15,17 +16,9 @@ struct irqaction {
struct irqaction *next;
};
-extern volatile unsigned char bh_running;
-
-extern atomic_t bh_mask_count[32];
-extern unsigned long bh_active;
-extern unsigned long bh_mask;
-extern void (*bh_base[32])(void);
-
-asmlinkage void do_bottom_half(void);
/* Who gets which entry in bh_base. Things which will occur most often
- should come first - in which case NET should be up the top with SERIAL/TQUEUE! */
+ should come first */
enum {
TIMER_BH = 0,
@@ -37,10 +30,8 @@ enum {
SPECIALIX_BH,
AURORA_BH,
ESP_BH,
- NET_BH,
SCSI_BH,
IMMEDIATE_BH,
- KEYBOARD_BH,
CYCLADES_BH,
CM206_BH,
JS_BH,
@@ -51,6 +42,228 @@ enum {
#include <asm/hardirq.h>
#include <asm/softirq.h>
+
+
+/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
+ frequency threaded job scheduling. For almost all the purposes
+ tasklets are more than enough. F.e. KEYBOARD_BH, CONSOLE_BH, all serial
+ device BHs et al. are converted to tasklets, not to softirqs.
+ */
+
+enum
+{
+ HI_SOFTIRQ=0,
+ NET_TX_SOFTIRQ,
+ NET_RX_SOFTIRQ,
+ TASKLET_SOFTIRQ
+};
+
+#if SMP_CACHE_BYTES <= 32
+/* It is trick to make assembly easier. */
+#define SOFTIRQ_STATE_PAD 32
+#else
+#define SOFTIRQ_STATE_PAD SMP_CACHE_BYTES
+#endif
+
+struct softirq_state
+{
+ __u32 active;
+ __u32 mask;
+} __attribute__ ((__aligned__(SOFTIRQ_STATE_PAD)));
+
+extern struct softirq_state softirq_state[NR_CPUS];
+
+struct softirq_action
+{
+ void (*action)(struct softirq_action *);
+ void *data;
+};
+
+asmlinkage void do_softirq(void);
+extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
+
+extern __inline__ void __cpu_raise_softirq(int cpu, int nr)
+{
+ softirq_state[cpu].active |= (1<<nr);
+}
+
+
+/* I do not want to use atomic variables now, so that cli/sti */
+extern __inline__ void raise_softirq(int nr)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __cpu_raise_softirq(smp_processor_id(), nr);
+ local_irq_restore(flags);
+}
+
+extern void softirq_init(void);
+
+
+
+/* Tasklets --- multithreaded analogue of BHs.
+
+ Main feature differing them of generic softirqs: tasklet
+ is running only on one CPU simultaneously.
+
+ Main feature differing them of BHs: different tasklets
+ may be run simultaneously on different CPUs.
+
+ Properties:
+ * If tasklet_schedule() is called, then tasklet is guaranteed
+ to be executed on some cpu at least once after this.
+ * If the tasklet is already scheduled, but its excecution is still not
+ started, it will be executed only once.
+ * If this tasklet is already running on another CPU (or schedule is called
+ from tasklet itself), it is rescheduled for later.
+ * Tasklet is strictly serialized wrt itself, but not
+ wrt another tasklets. If client needs some intertask synchronization,
+ he makes it with spinlocks.
+ */
+
+struct tasklet_struct
+{
+ struct tasklet_struct *next;
+ unsigned long state;
+ atomic_t count;
+ void (*func)(unsigned long);
+ unsigned long data;
+};
+
+#define DECLARE_TASKLET(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+
+#define DECLARE_TASKLET_DISABLED(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+
+
+enum
+{
+ TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
+};
+
+struct tasklet_head
+{
+ struct tasklet_struct *list;
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+extern struct tasklet_head tasklet_vec[NR_CPUS];
+extern struct tasklet_head tasklet_hi_vec[NR_CPUS];
+
+#ifdef __SMP__
+#define tasklet_trylock(t) (!test_and_set_bit(TASKLET_STATE_RUN, &(t)->state))
+#define tasklet_unlock_wait(t) while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { /* NOTHING */ }
+#define tasklet_unlock(t) clear_bit(TASKLET_STATE_RUN, &(t)->state)
+#else
+#define tasklet_trylock(t) 1
+#define tasklet_unlock_wait(t) do { } while (0)
+#define tasklet_unlock(t) do { } while (0)
+#endif
+
+extern __inline__ void tasklet_schedule(struct tasklet_struct *t)
+{
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ int cpu = smp_processor_id();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ t->next = tasklet_vec[cpu].list;
+ tasklet_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+}
+
+extern __inline__ void tasklet_hi_schedule(struct tasklet_struct *t)
+{
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ int cpu = smp_processor_id();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ t->next = tasklet_hi_vec[cpu].list;
+ tasklet_hi_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, HI_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+}
+
+
+extern __inline__ void tasklet_disable_nosync(struct tasklet_struct *t)
+{
+ atomic_inc(&t->count);
+}
+
+extern __inline__ void tasklet_disable(struct tasklet_struct *t)
+{
+ tasklet_disable_nosync(t);
+ tasklet_unlock_wait(t);
+}
+
+extern __inline__ void tasklet_enable(struct tasklet_struct *t)
+{
+ atomic_dec(&t->count);
+}
+
+extern void tasklet_kill(struct tasklet_struct *t);
+extern void tasklet_init(struct tasklet_struct *t,
+ void (*func)(unsigned long), unsigned long data);
+
+#ifdef __SMP__
+
+#define SMP_TIMER_NAME(name) name##__thr
+
+#define SMP_TIMER_DEFINE(name, task) \
+DECLARE_TASKLET(task, name##__thr, 0); \
+static void name (unsigned long dummy) \
+{ \
+ tasklet_schedule(&(task)); \
+}
+
+#else /* __SMP__ */
+
+#define SMP_TIMER_NAME(name) name
+#define SMP_TIMER_DEFINE(name, task)
+
+#endif /* __SMP__ */
+
+
+/* Old BH definitions */
+
+extern struct tasklet_struct bh_task_vec[];
+
+/* It is exported _ONLY_ for wait_on_irq(). */
+extern spinlock_t global_bh_lock;
+
+extern __inline__ void mark_bh(int nr)
+{
+ tasklet_hi_schedule(bh_task_vec+nr);
+}
+
+extern __inline__ void disable_bh_nosync(int nr)
+{
+ tasklet_disable_nosync(bh_task_vec+nr);
+}
+
+extern __inline__ void disable_bh(int nr)
+{
+ tasklet_disable_nosync(bh_task_vec+nr);
+ if (!in_interrupt())
+ tasklet_unlock_wait(bh_task_vec+nr);
+}
+
+extern __inline__ void enable_bh(int nr)
+{
+ tasklet_enable(bh_task_vec+nr);
+}
+
+
+extern void init_bh(int nr, void (*routine)(void));
+extern void remove_bh(int nr);
+
+
/*
* Autoprobing for irqs:
*
diff --git a/include/linux/iso_fs.h b/include/linux/iso_fs.h
index 80a8b27f4..59da5b123 100644
--- a/include/linux/iso_fs.h
+++ b/include/linux/iso_fs.h
@@ -178,7 +178,6 @@ extern int iso_date(char *, int);
extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
-extern int rock_ridge_symlink_readpage(struct dentry *, struct page *);
extern int find_rock_ridge_relocation(struct iso_directory_record *, struct inode *);
int get_joliet_filename(struct iso_directory_record *, struct inode *, unsigned char *);
@@ -192,10 +191,7 @@ extern int isofs_lookup_grandparent(struct inode *, int);
extern struct inode_operations isofs_file_inode_operations;
extern struct inode_operations isofs_dir_inode_operations;
-extern struct inode_operations isofs_symlink_inode_operations;
-extern struct inode_operations isofs_chrdev_inode_operations;
-extern struct inode_operations isofs_blkdev_inode_operations;
-extern struct inode_operations isofs_fifo_inode_operations;
+extern struct address_space_operations isofs_symlink_aops;
/* The following macros are used to check for memory leaks. */
#ifdef LEAK_CHECK
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index 9b215fb14..56dd41fac 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -4,6 +4,8 @@
#include <linux/interrupt.h>
#include <linux/keyboard.h>
+extern struct tasklet_struct keyboard_tasklet;
+
extern int shift_state;
extern char *func_table[MAX_NR_FUNC];
@@ -85,7 +87,7 @@ extern inline void set_console(int nr)
extern inline void set_leds(void)
{
- mark_bh(KEYBOARD_BH);
+ tasklet_schedule(&keyboard_tasklet);
}
extern inline int vc_kbd_mode(struct kbd_struct * kbd, int flag)
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index b7657dd8a..c5c834d97 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -9,6 +9,8 @@
#if defined __i386__ && (__GNUC__ > 2 || __GNUC_MINOR__ > 7)
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
+#elif defined __ia64__
+#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage))
#else
#define asmlinkage CPP_ASMLINKAGE
#endif
diff --git a/include/linux/lp.h b/include/linux/lp.h
index a02c3fff1..4bc121a30 100644
--- a/include/linux/lp.h
+++ b/include/linux/lp.h
@@ -89,6 +89,7 @@
#define LPGETSTATS 0x060d /* get statistics (struct lp_stats) */
#endif
#define LPGETFLAGS 0x060e /* get status flags */
+#define LPSETTIMEOUT 0x060f /* set parport timeout */
/* timeout for printk'ing a timeout, in jiffies (100ths of a second).
This is also used for re-checking error conditions if LP_ABORT is
@@ -145,6 +146,7 @@ struct lp_struct {
unsigned int last_error;
struct semaphore port_mutex;
wait_queue_head_t dataq;
+ long timeout;
};
/*
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index f0f86bca1..cab50dd9e 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -1,6 +1,4 @@
-/* $Id$
- *
- * mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM
+/* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM
* Copyright Torsten Duwe <duwe@informatik.uni-erlangen.de> 1993
* derived from Data Sheet, Copyright Motorola 1984 (!).
* It was written to be part of the Linux operating system.
@@ -9,10 +7,13 @@
* in terms of the GNU Library General Public License, Version 2 or later,
* at your option.
*/
-#ifndef _LINUX_MC146818RTC_H
-#define _LINUX_MC146818RTC_H
-#include <asm/mc146818rtc.h>
+#ifndef _MC146818RTC_H
+#define _MC146818RTC_H
+
+#include <asm/io.h>
+#include <linux/rtc.h> /* get the user-level API */
+#include <asm/mc146818rtc.h> /* register access macros */
/**********************************************************************
* register summary
@@ -94,4 +95,4 @@
#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
#endif
-#endif /* _LINUX_MC146818RTC_H */
+#endif /* _MC146818RTC_H */
diff --git a/include/linux/md.h b/include/linux/md.h
deleted file mode 100644
index 654b67717..000000000
--- a/include/linux/md.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- md.h : Multiple Devices driver for Linux
- Copyright (C) 1994-96 Marc ZYNGIER
- <zyngier@ufr-info-p7.ibp.fr> or
- <maz@gloups.fdn.fr>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef _MD_H
-#define _MD_H
-
-#include <linux/major.h>
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/*
- * Different major versions are not compatible.
- * Different minor versions are only downward compatible.
- * Different patchlevel versions are downward and upward compatible.
- */
-#define MD_MAJOR_VERSION 0
-#define MD_MINOR_VERSION 36
-#define MD_PATCHLEVEL_VERSION 6
-
-#define MD_DEFAULT_DISK_READAHEAD (256 * 1024)
-
-/* ioctls */
-#define REGISTER_DEV _IO (MD_MAJOR, 1)
-#define START_MD _IO (MD_MAJOR, 2)
-#define STOP_MD _IO (MD_MAJOR, 3)
-#define REGISTER_DEV_NEW _IO (MD_MAJOR, 4)
-
-/*
- personalities :
- Byte 0 : Chunk size factor
- Byte 1 : Fault tolerance count for each physical device
- ( 0 means no fault tolerance,
- 0xFF means always tolerate faults), not used by now.
- Byte 2 : Personality
- Byte 3 : Reserved.
- */
-
-#define FAULT_SHIFT 8
-#define PERSONALITY_SHIFT 16
-
-#define FACTOR_MASK 0x000000FFUL
-#define FAULT_MASK 0x0000FF00UL
-#define PERSONALITY_MASK 0x00FF0000UL
-
-#define MD_RESERVED 0 /* Not used by now */
-#define LINEAR (1UL << PERSONALITY_SHIFT)
-#define STRIPED (2UL << PERSONALITY_SHIFT)
-#define RAID0 STRIPED
-#define RAID1 (3UL << PERSONALITY_SHIFT)
-#define RAID5 (4UL << PERSONALITY_SHIFT)
-#define MAX_PERSONALITY 5
-
-/*
- * MD superblock.
- *
- * The MD superblock maintains some statistics on each MD configuration.
- * Each real device in the MD set contains it near the end of the device.
- * Some of the ideas are copied from the ext2fs implementation.
- *
- * We currently use 4096 bytes as follows:
- *
- * word offset function
- *
- * 0 - 31 Constant generic MD device information.
- * 32 - 63 Generic state information.
- * 64 - 127 Personality specific information.
- * 128 - 511 12 32-words descriptors of the disks in the raid set.
- * 512 - 911 Reserved.
- * 912 - 1023 Disk specific descriptor.
- */
-
-/*
- * If x is the real device size in bytes, we return an apparent size of:
- *
- * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
- *
- * and place the 4kB superblock at offset y.
- */
-#define MD_RESERVED_BYTES (64 * 1024)
-#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512)
-#define MD_RESERVED_BLOCKS (MD_RESERVED_BYTES / BLOCK_SIZE)
-
-#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
-#define MD_NEW_SIZE_BLOCKS(x) ((x & ~(MD_RESERVED_BLOCKS - 1)) - MD_RESERVED_BLOCKS)
-
-#define MD_SB_BYTES 4096
-#define MD_SB_WORDS (MD_SB_BYTES / 4)
-#define MD_SB_BLOCKS (MD_SB_BYTES / BLOCK_SIZE)
-#define MD_SB_SECTORS (MD_SB_BYTES / 512)
-
-/*
- * The following are counted in 32-bit words
- */
-#define MD_SB_GENERIC_OFFSET 0
-#define MD_SB_PERSONALITY_OFFSET 64
-#define MD_SB_DISKS_OFFSET 128
-#define MD_SB_DESCRIPTOR_OFFSET 992
-
-#define MD_SB_GENERIC_CONSTANT_WORDS 32
-#define MD_SB_GENERIC_STATE_WORDS 32
-#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
-#define MD_SB_PERSONALITY_WORDS 64
-#define MD_SB_DISKS_WORDS 384
-#define MD_SB_DESCRIPTOR_WORDS 32
-#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
-#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
-#define MD_SB_DISKS (MD_SB_DISKS_WORDS / MD_SB_DESCRIPTOR_WORDS)
-
-/*
- * Device "operational" state bits
- */
-#define MD_FAULTY_DEVICE 0 /* Device is faulty / operational */
-#define MD_ACTIVE_DEVICE 1 /* Device is a part or the raid set / spare disk */
-#define MD_SYNC_DEVICE 2 /* Device is in sync with the raid set */
-
-typedef struct md_device_descriptor_s {
- __u32 number; /* 0 Device number in the entire set */
- __u32 major; /* 1 Device major number */
- __u32 minor; /* 2 Device minor number */
- __u32 raid_disk; /* 3 The role of the device in the raid set */
- __u32 state; /* 4 Operational state */
- __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
-} md_descriptor_t;
-
-#define MD_SB_MAGIC 0xa92b4efc
-
-/*
- * Superblock state bits
- */
-#define MD_SB_CLEAN 0
-#define MD_SB_ERRORS 1
-
-typedef struct md_superblock_s {
-
- /*
- * Constant generic information
- */
- __u32 md_magic; /* 0 MD identifier */
- __u32 major_version; /* 1 major version to which the set conforms */
- __u32 minor_version; /* 2 minor version to which the set conforms */
- __u32 patch_version; /* 3 patchlevel version to which the set conforms */
- __u32 gvalid_words; /* 4 Number of non-reserved words in this section */
- __u32 set_magic; /* 5 Raid set identifier */
- __u32 ctime; /* 6 Creation time */
- __u32 level; /* 7 Raid personality (mirroring, raid5, ...) */
- __u32 size; /* 8 Apparent size of each individual disk, in kB */
- __u32 nr_disks; /* 9 Number of total disks in the raid set */
- __u32 raid_disks; /* 10 Number of disks in a fully functional raid set */
- __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 11];
-
- /*
- * Generic state information
- */
- __u32 utime; /* 0 Superblock update time */
- __u32 state; /* 1 State bits (clean, ...) */
- __u32 active_disks; /* 2 Number of currently active disks (some non-faulty disks might not be in sync) */
- __u32 working_disks; /* 3 Number of working disks */
- __u32 failed_disks; /* 4 Number of failed disks */
- __u32 spare_disks; /* 5 Number of spare disks */
- __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 6];
-
- /*
- * Personality information
- */
- __u32 parity_algorithm;
- __u32 chunk_size;
- __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 2];
-
- /*
- * Disks information
- */
- md_descriptor_t disks[MD_SB_DISKS];
-
- /*
- * Reserved
- */
- __u32 reserved[MD_SB_RESERVED_WORDS];
-
- /*
- * Active descriptor
- */
- md_descriptor_t descriptor;
-} md_superblock_t;
-
-#ifdef __KERNEL__
-
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <asm/semaphore.h>
-
-/*
- * Kernel-based reconstruction is mostly working, but still requires
- * some additional work.
- */
-#define SUPPORT_RECONSTRUCTION 0
-
-#define MAX_REAL 8 /* Max number of physical dev per md dev */
-#define MAX_MD_DEV 4 /* Max number of md dev */
-
-#define FACTOR(a) ((a)->repartition & FACTOR_MASK)
-#define MAX_FAULT(a) (((a)->repartition & FAULT_MASK)>>8)
-#define PERSONALITY(a) ((a)->repartition & PERSONALITY_MASK)
-
-#define FACTOR_SHIFT(a) (PAGE_SHIFT + (a) - 10)
-
-struct real_dev
-{
- kdev_t dev; /* Device number */
- int size; /* Device size (in blocks) */
- int offset; /* Real device offset (in blocks) in md dev
- (only used in linear mode) */
- struct inode *inode; /* Lock inode */
- md_superblock_t *sb;
- u32 sb_offset;
-};
-
-struct md_dev;
-
-#define SPARE_INACTIVE 0
-#define SPARE_WRITE 1
-#define SPARE_ACTIVE 2
-
-struct md_personality
-{
- char *name;
- int (*map)(struct md_dev *mddev, kdev_t *rdev,
- unsigned long *rsector, unsigned long size);
- int (*make_request)(struct md_dev *mddev, int rw, struct buffer_head * bh);
- void (*end_request)(struct buffer_head * bh, int uptodate);
- int (*run)(int minor, struct md_dev *mddev);
- int (*stop)(int minor, struct md_dev *mddev);
- int (*status)(char *page, int minor, struct md_dev *mddev);
- int (*ioctl)(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
- int max_invalid_dev;
- int (*error_handler)(struct md_dev *mddev, kdev_t dev);
-
-/*
- * Some personalities (RAID-1, RAID-5) can get disks hot-added and
- * hot-removed. Hot removal is different from failure. (failure marks
- * a disk inactive, but the disk is still part of the array)
- */
- int (*hot_add_disk) (struct md_dev *mddev, kdev_t dev);
- int (*hot_remove_disk) (struct md_dev *mddev, kdev_t dev);
- int (*mark_spare) (struct md_dev *mddev, md_descriptor_t *descriptor, int state);
-};
-
-struct md_dev
-{
- struct real_dev devices[MAX_REAL];
- struct md_personality *pers;
- md_superblock_t *sb;
- int sb_dirty;
- int repartition;
- int busy;
- int nb_dev;
- void *private;
-};
-
-struct md_thread {
- void (*run) (void *data);
- void *data;
- wait_queue_head_t wqueue;
- unsigned long flags;
- struct semaphore *sem;
- struct task_struct *tsk;
-};
-
-#define THREAD_WAKEUP 0
-
-extern struct md_dev md_dev[MAX_MD_DEV];
-extern int md_size[MAX_MD_DEV];
-extern int md_maxreadahead[MAX_MD_DEV];
-
-extern char *partition_name (kdev_t dev);
-
-extern int register_md_personality (int p_num, struct md_personality *p);
-extern int unregister_md_personality (int p_num);
-extern struct md_thread *md_register_thread (void (*run) (void *data), void *data);
-extern void md_unregister_thread (struct md_thread *thread);
-extern void md_wakeup_thread(struct md_thread *thread);
-extern int md_update_sb (int minor);
-extern int md_do_sync(struct md_dev *mddev);
-
-#endif __KERNEL__
-#endif _MD_H
diff --git a/include/linux/minix_fs.h b/include/linux/minix_fs.h
index dcfbca271..c1d661460 100644
--- a/include/linux/minix_fs.h
+++ b/include/linux/minix_fs.h
@@ -107,10 +107,7 @@ extern int minix_new_block(struct inode * inode);
extern void minix_free_block(struct inode * inode, int block);
extern unsigned long minix_count_free_blocks(struct super_block *sb);
-extern int minix_bmap(struct inode *,int);
-
extern struct buffer_head * minix_getblk(struct inode *, int, int);
-extern int minix_get_block(struct inode *, long, struct buffer_head *, int);
extern struct buffer_head * minix_bread(struct inode *, int, int);
extern void minix_truncate(struct inode *);
@@ -118,9 +115,9 @@ extern int init_minix_fs(void);
extern int minix_sync_inode(struct inode *);
extern int minix_sync_file(struct file *, struct dentry *);
+extern struct address_space_operations minix_aops;
extern struct inode_operations minix_file_inode_operations;
extern struct inode_operations minix_dir_inode_operations;
-extern struct inode_operations minix_symlink_inode_operations;
extern struct dentry_operations minix_dentry_operations;
#endif /* __KERNEL__ */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index edf0f0768..9e2a1e4ff 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -14,6 +14,7 @@
#define WATCHDOG_MINOR 130 /* Watchdog timer */
#define TEMP_MINOR 131 /* Temperature Sensor */
#define RTC_MINOR 135
+#define EFI_RTC_MINOR 136 /* EFI Time services */
#define SUN_OPENPROM_MINOR 139
#define NVRAM_MINOR 144
#define I2O_MINOR 166
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0d7609741..1b4cee348 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -394,7 +394,7 @@ extern int pgt_cache_water[2];
extern int check_pgt_cache(void);
extern void paging_init(void);
-extern void free_area_init(unsigned int * zones_size);
+extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, pg_data_t *pgdat,
unsigned int * zones_size, unsigned long zone_start_paddr);
extern void mem_init(void);
@@ -463,7 +463,7 @@ extern struct page *filemap_nopage(struct vm_area_struct * area,
#define __GFP_DMA 0x20
#define __GFP_UNCACHED 0x40
-#define GFP_BUFFER (__GFP_WAIT)
+#define GFP_BUFFER (__GFP_HIGH | __GFP_WAIT)
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_USER (__GFP_WAIT | __GFP_IO)
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
@@ -510,6 +510,8 @@ static inline int expand_stack(struct vm_area_struct * vma, unsigned long addres
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
+extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev);
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e3d688acf..d2b1d5a6a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -29,7 +29,7 @@ typedef struct zone_struct {
unsigned long offset;
unsigned long free_pages;
int low_on_memory;
- unsigned long pages_low, pages_high;
+ unsigned long pages_min, pages_low, pages_high;
struct pglist_data *zone_pgdat;
/*
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 3a7f28622..54c1cbd55 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -271,11 +271,6 @@ extern ssize_t fat_file_read(struct file *, char *, size_t, loff_t *);
extern ssize_t fat_file_write(struct file *, const char *, size_t, loff_t *);
extern void fat_truncate(struct inode *inode);
-/* mmap.c */
-extern int fat_mmap(struct file *, struct vm_area_struct *);
-extern int fat_readpage(struct file *, struct page *);
-
-
/* vfat.c */
extern int init_vfat_fs(void);
diff --git a/include/linux/msdos_fs_i.h b/include/linux/msdos_fs_i.h
index bcb37bb80..8d66550d2 100644
--- a/include/linux/msdos_fs_i.h
+++ b/include/linux/msdos_fs_i.h
@@ -6,6 +6,7 @@
*/
struct msdos_inode_info {
+ unsigned long mmu_private;
int i_start; /* first cluster or 0 */
int i_logstart; /* logical first cluster */
int i_attrs; /* unused attribute bits */
@@ -13,7 +14,6 @@ struct msdos_inode_info {
int i_location; /* on-disk position of directory entry or 0 */
struct inode *i_fat_inode; /* struct inode of this one */
struct list_head i_fat_hash; /* hash by i_location */
- int i_realsize;
};
#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 323458ca4..3905728e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -29,6 +29,8 @@
#include <linux/if_packet.h>
#include <asm/atomic.h>
+#include <asm/cache.h>
+#include <asm/byteorder.h>
#ifdef __KERNEL__
#include <linux/config.h>
@@ -107,15 +109,6 @@ struct net_device_stats
unsigned long tx_compressed;
};
-#ifdef CONFIG_NET_FASTROUTE
-struct net_fastroute_stats
-{
- int hits;
- int succeed;
- int deferred;
- int latency_reduction;
-};
-#endif
/* Media selection options. */
enum {
@@ -138,6 +131,23 @@ struct neighbour;
struct neigh_parms;
struct sk_buff;
+struct netif_rx_stats
+{
+ unsigned total;
+ unsigned dropped;
+ unsigned time_squeeze;
+ unsigned throttled;
+ unsigned fastroute_hit;
+ unsigned fastroute_success;
+ unsigned fastroute_defer;
+ unsigned fastroute_deferred_out;
+ unsigned fastroute_latency_reduction;
+ unsigned cpu_collision;
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+extern struct netif_rx_stats netdev_rx_stat[];
+
+
/*
* We tag multicasts with these structures.
*/
@@ -163,6 +173,16 @@ struct hh_cache
unsigned long hh_data[16/sizeof(unsigned long)];
};
+enum netdev_state_t
+{
+ LINK_STATE_XOFF=0,
+ LINK_STATE_DOWN,
+ LINK_STATE_START,
+ LINK_STATE_RXSEM,
+ LINK_STATE_TXSEM,
+ LINK_STATE_SCHED
+};
+
/*
* The DEVICE structure.
@@ -194,34 +214,30 @@ struct net_device
unsigned long mem_start; /* shared mem start */
unsigned long base_addr; /* device I/O address */
unsigned int irq; /* device IRQ number */
-
- /* Low-level status flags. */
- volatile unsigned char start; /* start an operation */
+
/*
- * These two are just single-bit flags, but due to atomicity
- * reasons they have to be inside a "unsigned long". However,
- * they should be inside the SAME unsigned long instead of
- * this wasteful use of memory..
+ * Some hardware also needs these fields, but they are not
+ * part of the usual set specified in Space.c.
*/
- unsigned long interrupt; /* bitops.. */
- unsigned long tbusy; /* transmitter busy */
-
- struct net_device *next;
+
+ unsigned char if_port; /* Selectable AUI, TP,..*/
+ unsigned char dma; /* DMA channel */
+
+ unsigned long state;
+
+ struct net_device *next;
/* The device initialization function. Called only once. */
int (*init)(struct net_device *dev);
+ /* ------- Fields preinitialized in Space.c finish here ------- */
+
+ struct net_device *next_sched;
+
/* Interface index. Unique device identifier */
int ifindex;
int iflink;
- /*
- * Some hardware also needs these fields, but they are not
- * part of the usual set specified in Space.c.
- */
-
- unsigned char if_port; /* Selectable AUI, TP,..*/
- unsigned char dma; /* DMA channel */
struct net_device_stats* (*get_stats)(struct net_device *dev);
struct iw_statistics* (*get_wireless_stats)(struct net_device *dev);
@@ -235,14 +251,18 @@ struct net_device
/* These may be needed for future network-power-down code. */
unsigned long trans_start; /* Time (in jiffies) of last Tx */
unsigned long last_rx; /* Time of last Rx */
-
+
unsigned short flags; /* interface flags (a la BSD) */
unsigned short gflags;
unsigned mtu; /* interface MTU value */
unsigned short type; /* interface hardware type */
unsigned short hard_header_len; /* hardware hdr length */
void *priv; /* pointer to private data */
-
+
+ struct net_device *master; /* Pointer to master device of a group,
+ * which this device is member of.
+ */
+
/* Interface address info. */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
unsigned char pad; /* make dev_addr aligned to 8 bytes */
@@ -253,11 +273,9 @@ struct net_device
int mc_count; /* Number of installed mcasts */
int promiscuity;
int allmulti;
-
- /* For load balancing driver pair support */
-
- unsigned long pkt_queue; /* Packets queued */
- struct net_device *slave; /* Slave device */
+
+ int watchdog_timeo;
+ struct timer_list watchdog_timer;
/* Protocol specific pointers */
@@ -329,13 +347,15 @@ struct net_device
#define HAVE_CHANGE_MTU
int (*change_mtu)(struct net_device *dev, int new_mtu);
+#define HAVE_TX_TIMOUT
+ void (*tx_timeout) (struct net_device *dev);
+
int (*hard_header_parse)(struct sk_buff *skb,
unsigned char *haddr);
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
int (*accept_fastpath)(struct net_device *, struct dst_entry*);
#ifdef CONFIG_NET_FASTROUTE
- unsigned long tx_semaphore;
#define NETDEV_FASTROUTE_HMASK 0xF
/* Semi-private data. Keep it at the end of device struct. */
rwlock_t fastpath_lock;
@@ -361,8 +381,6 @@ struct packet_type
extern struct net_device loopback_dev; /* The loopback */
extern struct net_device *dev_base; /* All devices */
extern rwlock_t dev_base_lock; /* Device list lock */
-extern int netdev_dropping;
-extern int net_cpu_congestion;
extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr);
extern void dev_add_pack(struct packet_type *pt);
@@ -392,9 +410,77 @@ extern __inline__ int unregister_gifconf(unsigned int family)
return register_gifconf(family, 0);
}
+/*
+ * Incoming packets are placed on per-cpu queues so that
+ * no locking is needed.
+ */
+
+struct softnet_data
+{
+ int throttle;
+ struct sk_buff_head input_pkt_queue;
+ struct net_device *output_queue;
+ struct sk_buff *completion_queue;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+
+extern struct softnet_data softnet_data[NR_CPUS];
+
+#define HAS_NETIF_QUEUE
+
+extern __inline__ void __netif_schedule(struct net_device *dev)
+{
+ if (!test_and_set_bit(LINK_STATE_SCHED, &dev->state)) {
+ unsigned long flags;
+ int cpu = smp_processor_id();
+
+ local_irq_save(flags);
+ dev->next_sched = softnet_data[cpu].output_queue;
+ softnet_data[cpu].output_queue = dev;
+ __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+}
+
+extern __inline__ void netif_schedule(struct net_device *dev)
+{
+ if (!test_bit(LINK_STATE_XOFF, &dev->state))
+ __netif_schedule(dev);
+}
+
+extern __inline__ void netif_start_queue(struct net_device *dev)
+{
+ clear_bit(LINK_STATE_XOFF, &dev->state);
+}
+
+extern __inline__ void netif_wake_queue(struct net_device *dev)
+{
+ if (test_and_clear_bit(LINK_STATE_XOFF, &dev->state))
+ __netif_schedule(dev);
+}
+
+extern __inline__ void netif_stop_queue(struct net_device *dev)
+{
+ set_bit(LINK_STATE_XOFF, &dev->state);
+}
+
+extern __inline__ void dev_kfree_skb_irq(struct sk_buff *skb)
+{
+ if (atomic_dec_and_test(&skb->users)) {
+ int cpu =smp_processor_id();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ skb->next = softnet_data[cpu].completion_queue;
+ softnet_data[cpu].completion_queue = skb;
+ __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+}
+
+
#define HAVE_NETIF_RX 1
extern void netif_rx(struct sk_buff *skb);
-extern void net_bh(void);
extern int dev_ioctl(unsigned int cmd, void *);
extern int dev_change_flags(struct net_device *, unsigned);
extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
@@ -448,15 +534,13 @@ extern void dev_load(const char *name);
extern void dev_mcast_init(void);
extern int netdev_register_fc(struct net_device *dev, void (*stimul)(struct net_device *dev));
extern void netdev_unregister_fc(int bit);
-extern int netdev_dropping;
extern int netdev_max_backlog;
-extern atomic_t netdev_rx_dropped;
extern unsigned long netdev_fc_xoff;
+extern int netdev_set_master(struct net_device *dev, struct net_device *master);
#ifdef CONFIG_NET_FASTROUTE
extern int netdev_fastroute;
extern int netdev_fastroute_obstacles;
extern void dev_clear_fastroute(struct net_device *dev);
-extern struct net_fastroute_stats dev_fastroute_stat;
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f2a114a58..26fb2de7f 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -190,6 +190,7 @@ extern int __nfs_revalidate_inode(struct nfs_server *, struct dentry *);
* linux/fs/nfs/file.c
*/
extern struct inode_operations nfs_file_inode_operations;
+extern struct address_space_operations nfs_file_aops;
/*
* linux/fs/nfs/dir.c
diff --git a/include/linux/ntfs_fs_i.h b/include/linux/ntfs_fs_i.h
index 0bc8d7806..3ac18ea47 100644
--- a/include/linux/ntfs_fs_i.h
+++ b/include/linux/ntfs_fs_i.h
@@ -62,6 +62,7 @@ typedef unsigned int ntfs_cluster_t;
/* Definition of NTFS in-memory inode structure */
struct ntfs_inode_info{
+ unsigned long mmu_private;
struct ntfs_sb_info *vol;
int i_number; /* should be really 48 bits */
unsigned sequence_number;
diff --git a/include/linux/openpic.h b/include/linux/openpic.h
index ce7ffdf89..b4a9ecab1 100644
--- a/include/linux/openpic.h
+++ b/include/linux/openpic.h
@@ -263,8 +263,12 @@ extern u_char *OpenPIC_InitSenses;
* Interrupt Source Registers
*/
-#define OPENPIC_SENSE_POLARITY 0x00800000 /* Undoc'd */
+#define OPENPIC_POLARITY_POSITIVE 0x00800000
+#define OPENPIC_POLARITY_NEGATIVE 0x00000000
+#define OPENPIC_POLARITY_MASK 0x00800000
#define OPENPIC_SENSE_LEVEL 0x00400000
+#define OPENPIC_SENSE_EDGE 0x00000000
+#define OPENPIC_SENSE_MASK 0x00400000
/*
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 53bc365d2..813dd78ea 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -83,7 +83,7 @@ extern void add_to_page_cache(struct page * page, struct address_space *mapping,
extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long index)
{
- __add_page_to_hash_queue(page, page_hash(&inode->i_data,index));
+ __add_page_to_hash_queue(page, page_hash(inode->i_mapping,index));
}
extern inline void add_page_to_inode_queue(struct address_space *mapping, struct page * page)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 6472c3b59..8fae97e9b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -149,7 +149,8 @@
#define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */
/* Header type 2 (CardBus bridges) */
-/* 0x14-0x15 reserved */
+#define PCI_CB_CAPABILITY_LIST 0x14
+/* 0x15 reserved */
#define PCI_CB_SEC_STATUS 0x16 /* Secondary status */
#define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */
#define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */
@@ -571,6 +572,19 @@ extern inline int pci_enable_device(struct pci_dev *dev) { return 0; }
#endif /* !CONFIG_PCI */
+/* these helpers provide future and backwards compatibility
+ * for accessing popular PCI BAR info */
+#define pci_resource_start(dev,bar) ((dev)->resource[(bar)].start)
+#define pci_resource_end(dev,bar) ((dev)->resource[(bar)].end)
+#define pci_resource_flags(dev,bar) ((dev)->resource[(bar)].flags)
+#define pci_resource_len(dev,bar) \
+ ((pci_resource_start((dev),(bar)) == 0 && \
+ pci_resource_end((dev),(bar)) == \
+ pci_resource_start((dev),(bar))) ? 0 : \
+ \
+ (pci_resource_end((dev),(bar)) - \
+ pci_resource_start((dev),(bar)) + 1))
+
/*
* The world is not perfect and supplies us with broken PCI devices.
* For at least a part of these bugs we need a work-around, so both
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 8108f3764..aaafe127f 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -21,6 +21,8 @@
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
+#ifdef __KERNEL__
+
#include <linux/config.h>
#include <linux/list.h>
@@ -69,8 +71,10 @@ enum
PM_SYS_UNKNOWN = 0x00000000, /* generic */
PM_SYS_KBC = 0x41d00303, /* keyboard controller */
PM_SYS_COM = 0x41d00500, /* serial port */
+ PM_SYS_IRDA = 0x41d00510, /* IRDA controller */
PM_SYS_FDC = 0x41d00700, /* floppy controller */
PM_SYS_VGA = 0x41d00900, /* VGA controller */
+ PM_SYS_PCMCIA = 0x41d00e00, /* PCMCIA controller */
};
/*
@@ -96,13 +100,17 @@ struct pm_dev
void *data;
unsigned long flags;
- unsigned long status;
+ int state;
struct list_head entry;
};
#if defined(CONFIG_ACPI) || defined(CONFIG_APM)
+extern int pm_active;
+
+#define PM_IS_ACTIVE() (pm_active != 0)
+
/*
* Register a device with power management
*/
@@ -116,6 +124,11 @@ struct pm_dev *pm_register(pm_dev_t type,
void pm_unregister(struct pm_dev *dev);
/*
+ * Unregister all devices with matching callback
+ */
+void pm_unregister_all(pm_callback callback);
+
+/*
* Send a request to all devices
*/
int pm_send_request(pm_request_t rqst, void *data);
@@ -130,6 +143,8 @@ extern inline void pm_dev_idle(struct pm_dev *dev) {}
#else // CONFIG_ACPI || CONFIG_APM
+#define PM_IS_ACTIVE() 0
+
extern inline struct pm_dev *pm_register(pm_dev_t type,
unsigned long id,
pm_callback callback)
@@ -139,6 +154,8 @@ extern inline struct pm_dev *pm_register(pm_dev_t type,
extern inline void pm_unregister(struct pm_dev *dev) {}
+extern inline void pm_unregister_all(pm_callback callback) {}
+
extern inline int pm_send_request(pm_request_t rqst, void *data)
{
return 0;
@@ -154,4 +171,9 @@ extern inline void pm_dev_idle(struct pm_dev *dev) {}
#endif // CONFIG_ACPI || CONFIG_APM
+extern void (*pm_idle)(void);
+extern void (*pm_power_off)(void);
+
+#endif // __KERNEL__
+
#endif /* _LINUX_PM_H */
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 907b58c2d..ad4d12b67 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -51,6 +51,7 @@ enum {
PMU_OHARE_BASED, /* 2400, 3400, 3500 (old G3 powerbook) */
PMU_HEATHROW_BASED, /* PowerBook G3 series */
PMU_PADDINGTON_BASED, /* 1999 PowerBook G3 */
+ PMU_KEYLARGO_BASED, /* Core99 motherboard (PMU99) */
};
/*
@@ -66,6 +67,8 @@ enum {
#define PMU_IOC_SET_BACKLIGHT _IOW('B', 2, sizeof(__u32))
/* out param: u32* backlight value: 0 to 31 */
#define PMU_IOC_GET_MODEL _IOR('B', 3, sizeof(__u32*))
+/* out param: u32* has_adb: 0 or 1 */
+#define PMU_IOC_HAS_ADB _IOR('B', 4, sizeof(__u32*))
#ifdef __KERNEL__
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index c68290358..d1f9dd66c 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -1,3 +1,5 @@
+#ifndef _PPP_CHANNEL_H_
+#define _PPP_CHANNEL_H_
/*
* Definitions for the interface between the generic PPP code
* and a PPP channel.
@@ -17,7 +19,7 @@
* ==FILEVERSION 990909==
*/
-/* $Id: ppp_channel.h,v 1.2 1999/09/15 11:21:53 paulus Exp $ */
+/* $Id: ppp_channel.h,v 1.3 2000/01/31 01:42:48 davem Exp $ */
#include <linux/list.h>
#include <linux/skbuff.h>
@@ -61,3 +63,4 @@ extern int ppp_register_channel(struct ppp_channel *, int unit);
extern void ppp_unregister_channel(struct ppp_channel *);
#endif /* __KERNEL__ */
+#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 30f86f49f..21349eb40 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -115,7 +115,6 @@ extern struct dentry *proc_lookup(struct inode *, struct dentry *);
extern struct inode_operations proc_sys_inode_operations;
extern struct inode_operations proc_kcore_inode_operations;
extern struct inode_operations proc_kmsg_inode_operations;
-extern struct inode_operations proc_omirr_inode_operations;
extern struct inode_operations proc_ppc_htab_inode_operations;
/*
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h
index e340e3dfe..d831875bb 100644
--- a/include/linux/qnx4_fs.h
+++ b/include/linux/qnx4_fs.h
@@ -108,7 +108,6 @@ extern int init_qnx4_fs(void);
extern int qnx4_create(struct inode *dir, struct dentry *dentry, int mode);
extern struct inode_operations qnx4_file_inode_operations;
extern struct inode_operations qnx4_dir_inode_operations;
-extern struct inode_operations qnx4_symlink_inode_operations;
extern int qnx4_is_free(struct super_block *sb, long block);
extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy);
extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode);
diff --git a/include/linux/qnx4_fs_i.h b/include/linux/qnx4_fs_i.h
index 57439fe57..b0fe8463e 100644
--- a/include/linux/qnx4_fs_i.h
+++ b/include/linux/qnx4_fs_i.h
@@ -33,6 +33,7 @@ struct qnx4_inode_info {
__u8 i_zero[4]; /* 4 */
qnx4_ftype_t i_type; /* 1 */
__u8 i_status; /* 1 */
+ unsigned long mmu_private;
};
#endif
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
new file mode 100644
index 000000000..55cfab78f
--- /dev/null
+++ b/include/linux/raid/linear.h
@@ -0,0 +1,32 @@
+#ifndef _LINEAR_H
+#define _LINEAR_H
+
+#include <linux/raid/md.h>
+
+struct dev_info {
+ kdev_t dev;
+ int size;
+ unsigned int offset;
+};
+
+typedef struct dev_info dev_info_t;
+
+struct linear_hash
+{
+ dev_info_t *dev0, *dev1;
+};
+
+struct linear_private_data
+{
+ struct linear_hash *hash_table;
+ dev_info_t disks[MD_SB_DISKS];
+ dev_info_t *smallest;
+ int nr_zones;
+};
+
+
+typedef struct linear_private_data linear_conf_t;
+
+#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
+
+#endif
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
new file mode 100644
index 000000000..cabc0a8be
--- /dev/null
+++ b/include/linux/raid/md.h
@@ -0,0 +1,94 @@
+/*
+ md.h : Multiple Devices driver for Linux
+ Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
+ Copyright (C) 1994-96 Marc ZYNGIER
+ <zyngier@ufr-info-p7.ibp.fr> or
+ <maz@gloups.fdn.fr>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_H
+#define _MD_H
+
+#include <linux/mm.h>
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <asm/semaphore.h>
+#include <linux/major.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <asm/bitops.h>
+#include <linux/module.h>
+#include <linux/hdreg.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/delay.h>
+#include <net/checksum.h>
+#include <linux/random.h>
+#include <linux/locks.h>
+#include <asm/io.h>
+
+#include <linux/raid/md_compatible.h>
+/*
+ * 'md_p.h' holds the 'physical' layout of RAID devices
+ * 'md_u.h' holds the user <=> kernel API
+ *
+ * 'md_k.h' holds kernel internal definitions
+ */
+
+#include <linux/raid/md_p.h>
+#include <linux/raid/md_u.h>
+#include <linux/raid/md_k.h>
+
+/*
+ * Different major versions are not compatible.
+ * Different minor versions are only downward compatible.
+ * Different patchlevel versions are downward and upward compatible.
+ */
+#define MD_MAJOR_VERSION 0
+#define MD_MINOR_VERSION 90
+#define MD_PATCHLEVEL_VERSION 0
+
+extern int md_size[MAX_MD_DEVS];
+extern struct hd_struct md_hd_struct[MAX_MD_DEVS];
+
+extern void add_mddev_mapping (mddev_t *mddev, kdev_t dev, void *data);
+extern void del_mddev_mapping (mddev_t *mddev, kdev_t dev);
+extern char * partition_name (kdev_t dev);
+extern int register_md_personality (int p_num, mdk_personality_t *p);
+extern int unregister_md_personality (int p_num);
+extern mdk_thread_t * md_register_thread (void (*run) (void *data),
+ void *data, const char *name);
+extern void md_unregister_thread (mdk_thread_t *thread);
+extern void md_wakeup_thread(mdk_thread_t *thread);
+extern void md_interrupt_thread (mdk_thread_t *thread);
+extern int md_update_sb (mddev_t *mddev);
+extern int md_do_sync(mddev_t *mddev, mdp_disk_t *spare);
+extern void md_recover_arrays (void);
+extern int md_check_ordering (mddev_t *mddev);
+extern void autodetect_raid(void);
+extern struct gendisk * find_gendisk (kdev_t dev);
+extern int md_notify_reboot(struct notifier_block *this,
+ unsigned long code, void *x);
+extern int md_error (kdev_t mddev, kdev_t rdev);
+
+#if CONFIG_BLK_DEV_MD
+extern void raid_setup(char *str,int *ints) md__init;
+#endif
+
+extern void md_print_devices (void);
+
+#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
+
+#endif _MD_H
+
diff --git a/include/linux/raid/md_compatible.h b/include/linux/raid/md_compatible.h
new file mode 100644
index 000000000..fbeac31a1
--- /dev/null
+++ b/include/linux/raid/md_compatible.h
@@ -0,0 +1,160 @@
+
+/*
+ md.h : Multiple Devices driver compatibility layer for Linux 2.0/2.2
+ Copyright (C) 1998 Ingo Molnar
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/version.h>
+
+#ifndef _MD_COMPATIBLE_H
+#define _MD_COMPATIBLE_H
+
+/** 2.3/2.4 stuff: **/
+
+#include <linux/reboot.h>
+#include <linux/vmalloc.h>
+#include <linux/blkpg.h>
+
+/* 000 */
+#define md__get_free_pages(x,y) __get_free_pages(x,y)
+
+#ifdef __i386__
+/* 001 */
+extern __inline__ int md_cpu_has_mmx(void)
+{
+ return boot_cpu_data.x86_capability & X86_FEATURE_MMX;
+}
+#endif
+
+/* 002 */
+#define md_clear_page(page) clear_page(page)
+
+/* 003 */
+#define MD_EXPORT_SYMBOL(x) EXPORT_SYMBOL(x)
+
+/* 004 */
+#define md_copy_to_user(x,y,z) copy_to_user(x,y,z)
+
+/* 005 */
+#define md_copy_from_user(x,y,z) copy_from_user(x,y,z)
+
+/* 006 */
+#define md_put_user put_user
+
+/* 007 */
+extern inline int md_capable_admin(void)
+{
+ return capable(CAP_SYS_ADMIN);
+}
+
+/* 008 */
+#define MD_FILE_TO_INODE(file) ((file)->f_dentry->d_inode)
+
+/* 009 */
+extern inline void md_flush_signals (void)
+{
+ spin_lock(&current->sigmask_lock);
+ flush_signals(current);
+ spin_unlock(&current->sigmask_lock);
+}
+
+/* 010 */
+extern inline void md_init_signals (void)
+{
+ current->exit_signal = SIGCHLD;
+ siginitsetinv(&current->blocked, sigmask(SIGKILL));
+}
+
+/* 011 */
+#define md_signal_pending signal_pending
+
+/* 012 */
+extern inline void md_set_global_readahead(int * table)
+{
+ max_readahead[MD_MAJOR] = table;
+}
+
+/* 013 */
+#define md_mdelay(x) mdelay(x)
+
+/* 014 */
+#define MD_SYS_DOWN SYS_DOWN
+#define MD_SYS_HALT SYS_HALT
+#define MD_SYS_POWER_OFF SYS_POWER_OFF
+
+/* 015 */
+#define md_register_reboot_notifier register_reboot_notifier
+
+/* 016 */
+#define md_test_and_set_bit test_and_set_bit
+
+/* 017 */
+#define md_test_and_clear_bit test_and_clear_bit
+
+/* 018 */
+#define md_atomic_read atomic_read
+#define md_atomic_set atomic_set
+
+/* 019 */
+#define md_lock_kernel lock_kernel
+#define md_unlock_kernel unlock_kernel
+
+/* 020 */
+
+#include <linux/init.h>
+
+#define md__init __init
+#define md__initdata __initdata
+#define md__initfunc(__arginit) __initfunc(__arginit)
+
+/* 021 */
+
+
+/* 022 */
+
+#define md_list_head list_head
+#define MD_LIST_HEAD(name) LIST_HEAD(name)
+#define MD_INIT_LIST_HEAD(ptr) INIT_LIST_HEAD(ptr)
+#define md_list_add list_add
+#define md_list_del list_del
+#define md_list_empty list_empty
+
+#define md_list_entry(ptr, type, member) list_entry(ptr, type, member)
+
+/* 023 */
+
+#define md_schedule_timeout schedule_timeout
+
+/* 024 */
+#define md_need_resched(tsk) ((tsk)->need_resched)
+
+/* 025 */
+#define md_spinlock_t spinlock_t
+#define MD_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
+
+#define md_spin_lock spin_lock
+#define md_spin_unlock spin_unlock
+#define md_spin_lock_irq spin_lock_irq
+#define md_spin_unlock_irq spin_unlock_irq
+#define md_spin_unlock_irqrestore spin_unlock_irqrestore
+#define md_spin_lock_irqsave spin_lock_irqsave
+
+/* 026 */
+typedef wait_queue_head_t md_wait_queue_head_t;
+#define MD_DECLARE_WAITQUEUE(w,t) DECLARE_WAITQUEUE((w),(t))
+#define MD_DECLARE_WAIT_QUEUE_HEAD(x) DECLARE_WAIT_QUEUE_HEAD(x)
+#define md_init_waitqueue_head init_waitqueue_head
+
+/* END */
+
+#endif _MD_COMPATIBLE_H
+
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
new file mode 100644
index 000000000..d00563357
--- /dev/null
+++ b/include/linux/raid/md_k.h
@@ -0,0 +1,341 @@
+/*
+ md_k.h : kernel internal structure of the Linux MD driver
+ Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_K_H
+#define _MD_K_H
+
+#define MD_RESERVED 0UL
+#define LINEAR 1UL
+#define STRIPED 2UL
+#define RAID0 STRIPED
+#define RAID1 3UL
+#define RAID5 4UL
+#define TRANSLUCENT 5UL
+#define HSM 6UL
+#define MAX_PERSONALITY 7UL
+
+extern inline int pers_to_level (int pers)
+{
+ switch (pers) {
+ case HSM: return -3;
+ case TRANSLUCENT: return -2;
+ case LINEAR: return -1;
+ case RAID0: return 0;
+ case RAID1: return 1;
+ case RAID5: return 5;
+ }
+ panic("pers_to_level()");
+}
+
+extern inline int level_to_pers (int level)
+{
+ switch (level) {
+ case -3: return HSM;
+ case -2: return TRANSLUCENT;
+ case -1: return LINEAR;
+ case 0: return RAID0;
+ case 1: return RAID1;
+ case 4:
+ case 5: return RAID5;
+ }
+ return MD_RESERVED;
+}
+
+typedef struct mddev_s mddev_t;
+typedef struct mdk_rdev_s mdk_rdev_t;
+
+#if (MINORBITS != 8)
+#error MD doesnt handle bigger kdev yet
+#endif
+
+#define MAX_REAL 12 /* Max number of disks per md dev */
+#define MAX_MD_DEVS (1<<MINORBITS) /* Max number of md dev */
+
+/*
+ * Maps a kdev to an mddev/subdev. How 'data' is handled is up to
+ * the personality. (eg. HSM uses this to identify individual LVs)
+ */
+typedef struct dev_mapping_s {
+ mddev_t *mddev;
+ void *data;
+} dev_mapping_t;
+
+extern dev_mapping_t mddev_map [MAX_MD_DEVS];
+
+extern inline mddev_t * kdev_to_mddev (kdev_t dev)
+{
+ return mddev_map[MINOR(dev)].mddev;
+}
+
+/*
+ * options passed in raidrun:
+ */
+
+#define MAX_CHUNK_SIZE (4096*1024)
+
+/*
+ * default readahead
+ */
+#define MD_READAHEAD (256 * 512)
+
+extern inline int disk_faulty(mdp_disk_t * d)
+{
+ return d->state & (1 << MD_DISK_FAULTY);
+}
+
+extern inline int disk_active(mdp_disk_t * d)
+{
+ return d->state & (1 << MD_DISK_ACTIVE);
+}
+
+extern inline int disk_sync(mdp_disk_t * d)
+{
+ return d->state & (1 << MD_DISK_SYNC);
+}
+
+extern inline int disk_spare(mdp_disk_t * d)
+{
+ return !disk_sync(d) && !disk_active(d) && !disk_faulty(d);
+}
+
+extern inline int disk_removed(mdp_disk_t * d)
+{
+ return d->state & (1 << MD_DISK_REMOVED);
+}
+
+extern inline void mark_disk_faulty(mdp_disk_t * d)
+{
+ d->state |= (1 << MD_DISK_FAULTY);
+}
+
+extern inline void mark_disk_active(mdp_disk_t * d)
+{
+ d->state |= (1 << MD_DISK_ACTIVE);
+}
+
+extern inline void mark_disk_sync(mdp_disk_t * d)
+{
+ d->state |= (1 << MD_DISK_SYNC);
+}
+
+extern inline void mark_disk_spare(mdp_disk_t * d)
+{
+ d->state = 0;
+}
+
+extern inline void mark_disk_removed(mdp_disk_t * d)
+{
+ d->state = (1 << MD_DISK_FAULTY) | (1 << MD_DISK_REMOVED);
+}
+
+extern inline void mark_disk_inactive(mdp_disk_t * d)
+{
+ d->state &= ~(1 << MD_DISK_ACTIVE);
+}
+
+extern inline void mark_disk_nonsync(mdp_disk_t * d)
+{
+ d->state &= ~(1 << MD_DISK_SYNC);
+}
+
+/*
+ * MD's 'extended' device
+ */
+struct mdk_rdev_s
+{
+ struct md_list_head same_set; /* RAID devices within the same set */
+ struct md_list_head all; /* all RAID devices */
+ struct md_list_head pending; /* undetected RAID devices */
+
+ kdev_t dev; /* Device number */
+ kdev_t old_dev; /* "" when it was last imported */
+ int size; /* Device size (in blocks) */
+ mddev_t *mddev; /* RAID array if running */
+ unsigned long last_events; /* IO event timestamp */
+
+ struct inode *inode; /* Lock inode */
+ struct file filp; /* Lock file */
+
+ mdp_super_t *sb;
+ int sb_offset;
+
+ int faulty; /* if faulty do not issue IO requests */
+ int desc_nr; /* descriptor index in the superblock */
+};
+
+
+/*
+ * disk operations in a working array:
+ */
+#define DISKOP_SPARE_INACTIVE 0
+#define DISKOP_SPARE_WRITE 1
+#define DISKOP_SPARE_ACTIVE 2
+#define DISKOP_HOT_REMOVE_DISK 3
+#define DISKOP_HOT_ADD_DISK 4
+
+typedef struct mdk_personality_s mdk_personality_t;
+
+struct mddev_s
+{
+ void *private;
+ mdk_personality_t *pers;
+ int __minor;
+ mdp_super_t *sb;
+ int nb_dev;
+ struct md_list_head disks;
+ int sb_dirty;
+ mdu_param_t param;
+ int ro;
+ unsigned int curr_resync;
+ unsigned long resync_start;
+ char *name;
+ int recovery_running;
+ struct semaphore reconfig_sem;
+ struct semaphore recovery_sem;
+ struct semaphore resync_sem;
+ struct md_list_head all_mddevs;
+ request_queue_t queue;
+};
+
+struct mdk_personality_s
+{
+ char *name;
+ int (*map)(mddev_t *mddev, kdev_t dev, kdev_t *rdev,
+ unsigned long *rsector, unsigned long size);
+ int (*make_request)(mddev_t *mddev, int rw, struct buffer_head * bh);
+ void (*end_request)(struct buffer_head * bh, int uptodate);
+ int (*run)(mddev_t *mddev);
+ int (*stop)(mddev_t *mddev);
+ int (*status)(char *page, mddev_t *mddev);
+ int (*ioctl)(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+ int max_invalid_dev;
+ int (*error_handler)(mddev_t *mddev, kdev_t dev);
+
+/*
+ * Some personalities (RAID-1, RAID-5) can have disks hot-added and
+ * hot-removed. Hot removal is different from failure. (failure marks
+ * a disk inactive, but the disk is still part of the array) The interface
+ * to such operations is the 'pers->diskop()' function, can be NULL.
+ *
+ * the diskop function can change the pointer pointing to the incoming
+ * descriptor, but must do so very carefully. (currently only
+ * SPARE_ACTIVE expects such a change)
+ */
+ int (*diskop) (mddev_t *mddev, mdp_disk_t **descriptor, int state);
+
+ int (*stop_resync)(mddev_t *mddev);
+ int (*restart_resync)(mddev_t *mddev);
+};
+
+
+/*
+ * Currently we index md_array directly, based on the minor
+ * number. This will have to change to dynamic allocation
+ * once we start supporting partitioning of md devices.
+ */
+extern inline int mdidx (mddev_t * mddev)
+{
+ return mddev->__minor;
+}
+
+extern inline kdev_t mddev_to_kdev(mddev_t * mddev)
+{
+ return MKDEV(MD_MAJOR, mdidx(mddev));
+}
+
+extern mdk_rdev_t * find_rdev(mddev_t * mddev, kdev_t dev);
+extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr);
+
+/*
+ * iterates through some rdev ringlist. It's safe to remove the
+ * current 'rdev'. Dont touch 'tmp' though.
+ */
+#define ITERATE_RDEV_GENERIC(head,field,rdev,tmp) \
+ \
+ for (tmp = head.next; \
+ rdev = md_list_entry(tmp, mdk_rdev_t, field), \
+ tmp = tmp->next, tmp->prev != &head \
+ ; )
+/*
+ * iterates through the 'same array disks' ringlist
+ */
+#define ITERATE_RDEV(mddev,rdev,tmp) \
+ ITERATE_RDEV_GENERIC((mddev)->disks,same_set,rdev,tmp)
+
+/*
+ * Same as above, but assumes that the device has rdev->desc_nr numbered
+ * from 0 to mddev->nb_dev, and iterates through rdevs in ascending order.
+ */
+#define ITERATE_RDEV_ORDERED(mddev,rdev,i) \
+ for (i = 0; rdev = find_rdev_nr(mddev, i), i < mddev->nb_dev; i++)
+
+
+/*
+ * Iterates through all 'RAID managed disks'
+ */
+#define ITERATE_RDEV_ALL(rdev,tmp) \
+ ITERATE_RDEV_GENERIC(all_raid_disks,all,rdev,tmp)
+
+/*
+ * Iterates through 'pending RAID disks'
+ */
+#define ITERATE_RDEV_PENDING(rdev,tmp) \
+ ITERATE_RDEV_GENERIC(pending_raid_disks,pending,rdev,tmp)
+
+/*
+ * iterates through all used mddevs in the system.
+ */
+#define ITERATE_MDDEV(mddev,tmp) \
+ \
+ for (tmp = all_mddevs.next; \
+ mddev = md_list_entry(tmp, mddev_t, all_mddevs), \
+ tmp = tmp->next, tmp->prev != &all_mddevs \
+ ; )
+
+extern inline int lock_mddev (mddev_t * mddev)
+{
+ return down_interruptible(&mddev->reconfig_sem);
+}
+
+extern inline void unlock_mddev (mddev_t * mddev)
+{
+ up(&mddev->reconfig_sem);
+}
+
+#define xchg_values(x,y) do { __typeof__(x) __tmp = x; \
+ x = y; y = __tmp; } while (0)
+
+typedef struct mdk_thread_s {
+ void (*run) (void *data);
+ void *data;
+ md_wait_queue_head_t wqueue;
+ unsigned long flags;
+ struct semaphore *sem;
+ struct task_struct *tsk;
+ const char *name;
+} mdk_thread_t;
+
+#define THREAD_WAKEUP 0
+
+#define MAX_DISKNAME_LEN 32
+
+typedef struct dev_name_s {
+ struct md_list_head list;
+ kdev_t dev;
+ char name [MAX_DISKNAME_LEN];
+} dev_name_t;
+
+#endif _MD_K_H
+
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
new file mode 100644
index 000000000..1b9632c14
--- /dev/null
+++ b/include/linux/raid/md_p.h
@@ -0,0 +1,161 @@
+/*
+ md_p.h : physical layout of Linux RAID devices
+ Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_P_H
+#define _MD_P_H
+
+/*
+ * RAID superblock.
+ *
+ * The RAID superblock maintains some statistics on each RAID configuration.
+ * Each real device in the RAID set contains it near the end of the device.
+ * Some of the ideas are copied from the ext2fs implementation.
+ *
+ * We currently use 4096 bytes as follows:
+ *
+ * word offset function
+ *
+ * 0 - 31 Constant generic RAID device information.
+ * 32 - 63 Generic state information.
+ * 64 - 127 Personality specific information.
+ * 128 - 511 12 32-words descriptors of the disks in the raid set.
+ * 512 - 911 Reserved.
+ * 912 - 1023 Disk specific descriptor.
+ */
+
+/*
+ * If x is the real device size in bytes, we return an apparent size of:
+ *
+ * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
+ *
+ * and place the 4kB superblock at offset y.
+ */
+#define MD_RESERVED_BYTES (64 * 1024)
+#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512)
+#define MD_RESERVED_BLOCKS (MD_RESERVED_BYTES / BLOCK_SIZE)
+
+#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
+#define MD_NEW_SIZE_BLOCKS(x) ((x & ~(MD_RESERVED_BLOCKS - 1)) - MD_RESERVED_BLOCKS)
+
+#define MD_SB_BYTES 4096
+#define MD_SB_WORDS (MD_SB_BYTES / 4)
+#define MD_SB_BLOCKS (MD_SB_BYTES / BLOCK_SIZE)
+#define MD_SB_SECTORS (MD_SB_BYTES / 512)
+
+/*
+ * The following are counted in 32-bit words
+ */
+#define MD_SB_GENERIC_OFFSET 0
+#define MD_SB_PERSONALITY_OFFSET 64
+#define MD_SB_DISKS_OFFSET 128
+#define MD_SB_DESCRIPTOR_OFFSET 992
+
+#define MD_SB_GENERIC_CONSTANT_WORDS 32
+#define MD_SB_GENERIC_STATE_WORDS 32
+#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
+#define MD_SB_PERSONALITY_WORDS 64
+#define MD_SB_DESCRIPTOR_WORDS 32
+#define MD_SB_DISKS 27
+#define MD_SB_DISKS_WORDS (MD_SB_DISKS*MD_SB_DESCRIPTOR_WORDS)
+#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
+#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
+
+/*
+ * Device "operational" state bits
+ */
+#define MD_DISK_FAULTY 0 /* disk is faulty / operational */
+#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */
+#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */
+#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */
+
+typedef struct mdp_device_descriptor_s {
+ __u32 number; /* 0 Device number in the entire set */
+ __u32 major; /* 1 Device major number */
+ __u32 minor; /* 2 Device minor number */
+ __u32 raid_disk; /* 3 The role of the device in the raid set */
+ __u32 state; /* 4 Operational state */
+ __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
+} mdp_disk_t;
+
+#define MD_SB_MAGIC 0xa92b4efc
+
+/*
+ * Superblock state bits
+ */
+#define MD_SB_CLEAN 0
+#define MD_SB_ERRORS 1
+
+typedef struct mdp_superblock_s {
+ /*
+ * Constant generic information
+ */
+ __u32 md_magic; /* 0 MD identifier */
+ __u32 major_version; /* 1 major version to which the set conforms */
+ __u32 minor_version; /* 2 minor version ... */
+ __u32 patch_version; /* 3 patchlevel version ... */
+ __u32 gvalid_words; /* 4 Number of used words in this section */
+ __u32 set_uuid0; /* 5 Raid set identifier */
+ __u32 ctime; /* 6 Creation time */
+ __u32 level; /* 7 Raid personality */
+ __u32 size; /* 8 Apparent size of each individual disk */
+ __u32 nr_disks; /* 9 total disks in the raid set */
+ __u32 raid_disks; /* 10 disks in a fully functional raid set */
+ __u32 md_minor; /* 11 preferred MD minor device number */
+ __u32 not_persistent; /* 12 does it have a persistent superblock */
+ __u32 set_uuid1; /* 13 Raid set identifier #2 */
+ __u32 set_uuid2; /* 14 Raid set identifier #3 */
+ __u32 set_uuid3; /* 14 Raid set identifier #4 */
+ __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 16];
+
+ /*
+ * Generic state information
+ */
+ __u32 utime; /* 0 Superblock update time */
+ __u32 state; /* 1 State bits (clean, ...) */
+ __u32 active_disks; /* 2 Number of currently active disks */
+ __u32 working_disks; /* 3 Number of working disks */
+ __u32 failed_disks; /* 4 Number of failed disks */
+ __u32 spare_disks; /* 5 Number of spare disks */
+ __u32 sb_csum; /* 6 checksum of the whole superblock */
+ __u64 events; /* 7 number of superblock updates (64-bit!) */
+ __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 9];
+
+ /*
+ * Personality information
+ */
+ __u32 layout; /* 0 the array's physical layout */
+ __u32 chunk_size; /* 1 chunk size in bytes */
+ __u32 root_pv; /* 2 LV root PV */
+ __u32 root_block; /* 3 LV root block */
+ __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 4];
+
+ /*
+ * Disks information
+ */
+ mdp_disk_t disks[MD_SB_DISKS];
+
+ /*
+ * Reserved
+ */
+ __u32 reserved[MD_SB_RESERVED_WORDS];
+
+ /*
+ * Active descriptor
+ */
+ mdp_disk_t this_disk;
+
+} mdp_super_t;
+
+#endif _MD_P_H
+
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
new file mode 100644
index 000000000..9478513f9
--- /dev/null
+++ b/include/linux/raid/md_u.h
@@ -0,0 +1,115 @@
+/*
+ md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
+ Copyright (C) 1998 Ingo Molnar
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_U_H
+#define _MD_U_H
+
+/* ioctls */
+
+/* status */
+#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t)
+#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
+#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)
+#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13)
+
+/* configuration */
+#define CLEAR_ARRAY _IO (MD_MAJOR, 0x20)
+#define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t)
+#define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22)
+#define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t)
+#define SET_DISK_INFO _IO (MD_MAJOR, 0x24)
+#define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25)
+#define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26)
+#define PROTECT_ARRAY _IO (MD_MAJOR, 0x27)
+#define HOT_ADD_DISK _IO (MD_MAJOR, 0x28)
+#define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29)
+
+/* usage */
+#define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t)
+#define START_ARRAY _IO (MD_MAJOR, 0x31)
+#define STOP_ARRAY _IO (MD_MAJOR, 0x32)
+#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
+#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
+
+typedef struct mdu_version_s {
+ int major;
+ int minor;
+ int patchlevel;
+} mdu_version_t;
+
+typedef struct mdu_array_info_s {
+ /*
+ * Generic constant information
+ */
+ int major_version;
+ int minor_version;
+ int patch_version;
+ int ctime;
+ int level;
+ int size;
+ int nr_disks;
+ int raid_disks;
+ int md_minor;
+ int not_persistent;
+
+ /*
+ * Generic state information
+ */
+ int utime; /* 0 Superblock update time */
+ int state; /* 1 State bits (clean, ...) */
+ int active_disks; /* 2 Number of currently active disks */
+ int working_disks; /* 3 Number of working disks */
+ int failed_disks; /* 4 Number of failed disks */
+ int spare_disks; /* 5 Number of spare disks */
+
+ /*
+ * Personality information
+ */
+ int layout; /* 0 the array's physical layout */
+ int chunk_size; /* 1 chunk size in bytes */
+
+} mdu_array_info_t;
+
+typedef struct mdu_disk_info_s {
+ /*
+ * configuration/status of one particular disk
+ */
+ int number;
+ int major;
+ int minor;
+ int raid_disk;
+ int state;
+
+} mdu_disk_info_t;
+
+typedef struct mdu_start_info_s {
+ /*
+ * configuration/status of one particular disk
+ */
+ int major;
+ int minor;
+ int raid_disk;
+ int state;
+
+} mdu_start_info_t;
+
+typedef struct mdu_param_s
+{
+ int personality; /* 1,2,3,4 */
+ int chunk_size; /* in bytes */
+ int max_fault; /* unused for now */
+} mdu_param_t;
+
+#endif _MD_U_H
+
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h
new file mode 100644
index 000000000..3ea74db60
--- /dev/null
+++ b/include/linux/raid/raid0.h
@@ -0,0 +1,33 @@
+#ifndef _RAID0_H
+#define _RAID0_H
+
+#include <linux/raid/md.h>
+
+struct strip_zone
+{
+ int zone_offset; /* Zone offset in md_dev */
+ int dev_offset; /* Zone offset in real dev */
+ int size; /* Zone size */
+ int nb_dev; /* # of devices attached to the zone */
+ mdk_rdev_t *dev[MAX_REAL]; /* Devices attached to the zone */
+};
+
+struct raid0_hash
+{
+ struct strip_zone *zone0, *zone1;
+};
+
+struct raid0_private_data
+{
+ struct raid0_hash *hash_table; /* Dynamically allocated */
+ struct strip_zone *strip_zone; /* This one too */
+ int nr_strip_zones;
+ struct strip_zone *smallest;
+ int nr_zones;
+};
+
+typedef struct raid0_private_data raid0_conf_t;
+
+#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
+
+#endif
diff --git a/include/linux/raid0.h b/include/linux/raid0.h
deleted file mode 100644
index e1ae51c02..000000000
--- a/include/linux/raid0.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _RAID0_H
-#define _RAID0_H
-
-struct strip_zone
-{
- int zone_offset; /* Zone offset in md_dev */
- int dev_offset; /* Zone offset in real dev */
- int size; /* Zone size */
- int nb_dev; /* Number of devices attached to the zone */
- struct real_dev *dev[MAX_REAL]; /* Devices attached to the zone */
-};
-
-struct raid0_hash
-{
- struct strip_zone *zone0, *zone1;
-};
-
-struct raid0_data
-{
- struct raid0_hash *hash_table; /* Dynamically allocated */
- struct strip_zone *strip_zone; /* This one too */
- int nr_strip_zones;
- struct strip_zone *smallest;
- int nr_zones;
-};
-
-#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 21c5a0198..55510558c 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -1,13 +1,13 @@
-/* $Id: rtc.h,v 1.1 1998/07/09 20:01:31 ralf Exp $
- *
- * Interface definitions for the /dev/rtc realtime clock interface.
- *
- * permission is hereby granted to copy, modify and redistribute this code
- * in terms of the GNU Library General Public License, Version 2 or later,
- * at your option.
+/*
+ * Generic RTC interface.
+ * This version contains the part of the user interface to the Real Time Clock
+ * service. It is used with both the legacy mc146818 and also EFI
+ *
+ * Copyright (C) 1999 Hewlett-Packard Co.
+ * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
*/
#ifndef _LINUX_RTC_H
-#define _LINUX_RTC_H
+#define _LINUX_RTC_H_
/*
* The struct used to pass data via the following ioctl. Similar to the
@@ -29,18 +29,17 @@ struct rtc_time {
/*
* ioctl calls that are permitted to the /dev/rtc interface, if
- * CONFIG_RTC or CONFIG_SGI_DS1286 are enabled. The interface definitions
- * in this file are a superset from the features provided by actual
- * RTC driver and chip implementations.
+ * CONFIG_RTC/CONFIG_EFI_RTC was enabled.
*/
+
#define RTC_AIE_ON _IO('p', 0x01) /* Alarm int. enable on */
#define RTC_AIE_OFF _IO('p', 0x02) /* ... off */
#define RTC_UIE_ON _IO('p', 0x03) /* Update int. enable on */
#define RTC_UIE_OFF _IO('p', 0x04) /* ... off */
#define RTC_PIE_ON _IO('p', 0x05) /* Periodic int. enable on */
#define RTC_PIE_OFF _IO('p', 0x06) /* ... off */
-#define RTC_WIE_ON _IO('p', 0x0f) /* Watchdog int. enable on */
-#define RTC_WIE_OFF _IO('p', 0x10) /* ... off */
+#define RTC_WIE_ON _IO('p', 0x0f) /* Watchdog int. enable on */
+#define RTC_WIE_OFF _IO('p', 0x10) /* ... off */
#define RTC_ALM_SET _IOW('p', 0x07, struct rtc_time) /* Set alarm time */
#define RTC_ALM_READ _IOR('p', 0x08, struct rtc_time) /* Read alarm time */
@@ -51,4 +50,4 @@ struct rtc_time {
#define RTC_EPOCH_READ _IOR('p', 0x0d, unsigned long) /* Read epoch */
#define RTC_EPOCH_SET _IOW('p', 0x0e, unsigned long) /* Set epoch */
-#endif /* _LINUX_RTC_H */
+#endif /* _LINUX_RTC_H_ */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index e8f6c7328..f27ad591f 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -434,11 +434,15 @@ enum
IFLA_QDISC,
IFLA_STATS,
IFLA_COST,
- IFLA_PRIORITY
+#define IFLA_COST IFLA_COST
+ IFLA_PRIORITY,
+#define IFLA_PRIORITY IFLA_PRIORITY
+ IFLA_MASTER
+#define IFLA_MASTER IFLA_MASTER
};
-#define IFLA_MAX IFLA_STATS
+#define IFLA_MAX IFLA_MASTER
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
@@ -464,7 +468,7 @@ enum
IFF_BROADCAST devices are able to use multicasts too.
*/
-/* ifi_link.
+/* IFLA_LINK.
For usual devices it is equal ifi_index.
If it is a "virtual interface" (f.e. tunnel), ifi_link
can point to real physical interface (f.e. for bandwidth calculations),
@@ -558,6 +562,13 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
#define RTA_PUT(skb, attrtype, attrlen, data) \
({ if (skb_tailroom(skb) < (int)RTA_SPACE(attrlen)) goto rtattr_failure; \
__rta_fill(skb, attrtype, attrlen, data); })
+
+extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
+
+#else
+
+#define rtmsg_ifinfo(a,b,c) do { } while (0)
+
#endif
extern struct semaphore rtnl_sem;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 615eba6c5..6ec45eeff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -80,7 +80,6 @@ extern int last_pid;
#define TASK_UNINTERRUPTIBLE 2
#define TASK_ZOMBIE 4
#define TASK_STOPPED 8
-#define TASK_SWAPPING 16
#define TASK_EXCLUSIVE 32
#define __set_task_state(tsk, state_value) \
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 2f24729f6..e8b7d1f7c 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -2,6 +2,7 @@
#define _LINUX_SHM_H_
#include <linux/ipc.h>
+#include <linux/mm.h>
/*
* SHMMAX, SHMMNI and SHMALL are upper limits are defaults which can
diff --git a/include/linux/smb_fs.h b/include/linux/smb_fs.h
index 451ff761a..52c3d7f33 100644
--- a/include/linux/smb_fs.h
+++ b/include/linux/smb_fs.h
@@ -101,6 +101,7 @@ int smb_mmap(struct file *, struct vm_area_struct *);
/* linux/fs/smbfs/file.c */
extern struct inode_operations smb_file_inode_operations;
+extern struct address_space_operations smb_file_aops;
/* linux/fs/smbfs/dir.c */
extern struct inode_operations smb_dir_inode_operations;
diff --git a/include/linux/sonet.h b/include/linux/sonet.h
index d4e12b8f1..c2307389c 100644
--- a/include/linux/sonet.h
+++ b/include/linux/sonet.h
@@ -1,22 +1,22 @@
/* sonet.h - SONET/SHD physical layer control */
-/* Written 1995 by Werner Almesberger, EPFL LRC */
+/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
#ifndef LINUX_SONET_H
#define LINUX_SONET_H
struct sonet_stats {
- long section_bip; /* section parity errors (B1) */
- long line_bip; /* line parity errors (B2) */
- long path_bip; /* path parity errors (B3) */
- long line_febe; /* line parity errors at remote */
- long path_febe; /* path parity errors at remote */
- long corr_hcs; /* correctable header errors */
- long uncorr_hcs; /* uncorrectable header errors */
- long tx_cells; /* cells sent */
- long rx_cells; /* cells received */
-};
+ int section_bip; /* section parity errors (B1) */
+ int line_bip; /* line parity errors (B2) */
+ int path_bip; /* path parity errors (B3) */
+ int line_febe; /* line parity errors at remote */
+ int path_febe; /* path parity errors at remote */
+ int corr_hcs; /* correctable header errors */
+ int uncorr_hcs; /* uncorrectable header errors */
+ int tx_cells; /* cells sent */
+ int rx_cells; /* cells received */
+} __attribute__ ((packed));
#define SONET_GETSTAT _IOR('a',ATMIOC_PHYTYP,struct sonet_stats)
/* get statistics */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 6c1d060d0..a84ae422c 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -53,6 +53,7 @@
#define spin_lock_init(lock) do { } while(0)
#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
+#define spin_is_locked(lock) (0)
#define spin_trylock(lock) ({1; })
#define spin_unlock_wait(lock) do { } while(0)
#define spin_unlock(lock) do { } while(0)
@@ -65,6 +66,7 @@ typedef struct {
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_is_locked(lock) (test_bit(0,(lock)))
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
#define spin_lock(x) do { (x)->lock = 1; } while (0)
@@ -83,6 +85,7 @@ typedef struct {
#include <linux/kernel.h>
#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_is_locked(lock) (test_bit(0,(lock)))
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index db1cb7c41..1e17e52bb 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -29,6 +29,7 @@ struct svc_serv {
struct svc_sock * sv_sockets; /* pending sockets */
struct svc_program * sv_program; /* RPC program */
struct svc_stat * sv_stats; /* RPC statistics */
+ spinlock_t sv_lock;
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_bufsz; /* datagram buffer size */
unsigned int sv_xdrsize; /* XDR buffer size */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 5f8dc8768..82d9678d4 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -21,6 +21,7 @@ struct svc_sock {
struct svc_sock * sk_list; /* list of all sockets */
struct socket * sk_sock; /* berkeley socket layer */
struct sock * sk_sk; /* INET layer */
+ spinlock_t sk_lock;
struct svc_serv * sk_server; /* service for this socket */
unsigned char sk_inuse; /* use count */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 03148253d..e7c710646 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -183,7 +183,8 @@ enum
NET_CORE_FASTROUTE=7,
NET_CORE_MSG_COST=8,
NET_CORE_MSG_BURST=9,
- NET_CORE_OPTMEM_MAX=10
+ NET_CORE_OPTMEM_MAX=10,
+ NET_CORE_HOT_LIST_LENGTH=11
};
/* /proc/sys/net/ethernet */
diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h
index 5168218e0..4f65ba7dd 100644
--- a/include/linux/sysv_fs.h
+++ b/include/linux/sysv_fs.h
@@ -384,7 +384,6 @@ extern int sysv_new_block(struct super_block * sb);
extern void sysv_free_block(struct super_block * sb, unsigned int block);
extern unsigned long sysv_count_free_blocks(struct super_block *sb);
-extern int sysv_get_block(struct inode *, long, struct buffer_head *, int);
extern struct buffer_head * sysv_file_bread(struct inode *, int, int);
extern void sysv_truncate(struct inode *);
@@ -394,9 +393,8 @@ extern int sysv_sync_inode(struct inode *);
extern int sysv_sync_file(struct file *, struct dentry *);
extern struct inode_operations sysv_file_inode_operations;
-extern struct inode_operations sysv_file_inode_operations_with_bmap;
extern struct inode_operations sysv_dir_inode_operations;
-extern struct inode_operations sysv_symlink_inode_operations;
+extern struct address_space_operations sysv_aops;
#endif /* __KERNEL__ */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 796749fdb..d159222b7 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -51,6 +51,7 @@ struct timer_list {
unsigned long expires;
unsigned long data;
void (*function)(unsigned long);
+ volatile int running;
};
extern void add_timer(struct timer_list * timer);
@@ -61,7 +62,7 @@ extern int del_timer(struct timer_list * timer);
* active timer (if the timer is inactive it will be activated)
* mod_timer(a,b) is equivalent to del_timer(a); a->expires = b; add_timer(a)
*/
-void mod_timer(struct timer_list *timer, unsigned long expires);
+int mod_timer(struct timer_list *timer, unsigned long expires);
extern void it_real_fn(unsigned long);
@@ -69,6 +70,9 @@ extern inline void init_timer(struct timer_list * timer)
{
timer->next = NULL;
timer->prev = NULL;
+#ifdef __SMP__
+ timer->running = 0;
+#endif
}
extern inline int timer_pending(const struct timer_list * timer)
@@ -76,6 +80,20 @@ extern inline int timer_pending(const struct timer_list * timer)
return timer->prev != NULL;
}
+#ifdef __SMP__
+#define timer_exit(t) do { (t)->running = 0; mb(); } while (0)
+#define timer_set_running(t) do { (t)->running = 1; mb(); } while (0)
+#define timer_is_running(t) ((t)->running != 0)
+#define timer_synchronize(t) while (timer_is_running(t)) barrier()
+extern int del_timer_sync(struct timer_list * timer);
+#else
+#define timer_exit(t) do { } while (0)
+#define timer_set_running(t) do { } while (0)
+#define timer_is_running(t) (0)
+#define timer_synchronize(t) barrier()
+#define del_timer_sync(t) del_timer(t)
+#endif
+
/*
* These inlines deal with timer wrapping correctly. You are
* strongly encouraged to use them
diff --git a/include/linux/tqueue.h b/include/linux/tqueue.h
index b02f07665..934850f26 100644
--- a/include/linux/tqueue.h
+++ b/include/linux/tqueue.h
@@ -116,7 +116,8 @@ extern __inline__ void run_task_queue(task_queue *list)
p = p -> next;
mb();
save_p -> sync = 0;
- (*f)(arg);
+ if (f)
+ (*f)(arg);
}
}
}
diff --git a/include/linux/udf_167.h b/include/linux/udf_167.h
index ee09bd9ad..19b3aa8e2 100644
--- a/include/linux/udf_167.h
+++ b/include/linux/udf_167.h
@@ -390,10 +390,13 @@ struct LogicalVolIntegrityDesc {
#define INTEGRITY_TYPE_CLOSE 1
/* Recorded Address (ECMA 167 4/7.1) */
+#ifndef _LINUX_UDF_FS_I_H
+/* Declared in udf_fs_i.h */
typedef struct {
Uint32 logicalBlockNum;
Uint16 partitionReferenceNum;
} lb_addr;
+#endif
/* Extent interpretation (ECMA 167 4/14.14.1.1) */
#define EXTENT_RECORDED_ALLOCATED 0x00
diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h
index b650d7897..83a6a69e7 100644
--- a/include/linux/ufs_fs.h
+++ b/include/linux/ufs_fs.h
@@ -525,6 +525,8 @@ extern int ufs_check_dir_entry (const char *, struct inode *, struct ufs_dir_ent
extern struct inode_operations ufs_file_inode_operations;
extern struct file_operations ufs_file_operations;
+extern struct address_space_operations ufs_aops;
+
/* ialloc.c */
extern void ufs_free_inode (struct inode *inode);
extern struct inode * ufs_new_inode (const struct inode *, int, int *);
@@ -538,7 +540,6 @@ extern int ufs_sync_inode (struct inode *);
extern void ufs_write_inode (struct inode *);
extern void ufs_delete_inode (struct inode *);
extern struct buffer_head * ufs_getfrag (struct inode *, unsigned, int, int *);
-extern int ufs_getfrag_block (struct inode *, long, struct buffer_head *, int);
extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *);
/* namei.c */
@@ -562,7 +563,6 @@ extern int init_ufs_fs(void);
extern void ufs_write_super (struct super_block *);
/* symlink.c */
-extern struct inode_operations ufs_symlink_inode_operations;
extern struct inode_operations ufs_fast_symlink_inode_operations;
/* truncate.c */
diff --git a/include/linux/umsdos_fs.h b/include/linux/umsdos_fs.h
index 56ebe78aa..1e7a0bead 100644
--- a/include/linux/umsdos_fs.h
+++ b/include/linux/umsdos_fs.h
@@ -174,7 +174,6 @@ struct umsdos_ioctl {
#endif
extern struct inode_operations umsdos_dir_inode_operations;
-extern struct inode_operations umsdos_symlink_inode_operations;
extern int init_umsdos_fs (void);
#include <linux/umsdos_fs.p>
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index ca1ec519d..626b2524d 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -15,7 +15,7 @@
#include <linux/config.h>
-#ifdef CONFIG_VGA_CONSOLE
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
#include <asm/vga.h>
#endif
diff --git a/include/net/atmclip.h b/include/net/atmclip.h
index f350c3103..edcae7c37 100644
--- a/include/net/atmclip.h
+++ b/include/net/atmclip.h
@@ -1,6 +1,6 @@
/* net/atm/atmarp.h - RFC1577 ATM ARP */
-/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */
+/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
#ifndef _ATMCLIP_H
@@ -26,7 +26,9 @@ struct clip_vcc {
unsigned long last_use; /* last send or receive operation */
unsigned long idle_timeout; /* keep open idle for so many jiffies*/
void (*old_push)(struct atm_vcc *vcc,struct sk_buff *skb);
- /* keep old push fn for detaching */
+ /* keep old push fn for chaining */
+ void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb);
+ /* keep old pop fn for chaining */
struct clip_vcc *next; /* next VCC */
};
diff --git a/include/net/dsfield.h b/include/net/dsfield.h
index 2201a88d9..778b6baea 100644
--- a/include/net/dsfield.h
+++ b/include/net/dsfield.h
@@ -1,6 +1,6 @@
/* include/net/dsfield.h - Manipulation of the Differentiated Services field */
-/* Written 1998 by Werner Almesberger, EPFL ICA */
+/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
#ifndef __NET_DSFIELD_H
@@ -46,7 +46,7 @@ extern __inline__ void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask,
__u16 tmp;
tmp = ntohs(*(__u16 *) ipv6h);
- tmp = (tmp & (mask << 4)) | (value << 4);
+ tmp = (tmp & ((mask << 4) | 0xf00f)) | (value << 4);
*(__u16 *) ipv6h = htons(tmp);
}
diff --git a/include/net/irda/nsc-ircc.h b/include/net/irda/nsc-ircc.h
index 75e5c2bc7..637458dd1 100644
--- a/include/net/irda/nsc-ircc.h
+++ b/include/net/irda/nsc-ircc.h
@@ -31,6 +31,7 @@
#include <linux/time.h>
#include <linux/spinlock.h>
+#include <linux/pm.h>
#include <asm/io.h>
/* DMA modes needed */
@@ -253,6 +254,8 @@ struct nsc_ircc_cb {
__u32 flags; /* Interface flags */
__u32 new_speed;
int index; /* Instance index */
+
+ struct pm_dev *dev;
};
static inline void switch_bank(int iobase, int bank)
diff --git a/include/net/irda/smc-ircc.h b/include/net/irda/smc-ircc.h
index cac7644ca..0bd5e38b4 100644
--- a/include/net/irda/smc-ircc.h
+++ b/include/net/irda/smc-ircc.h
@@ -31,6 +31,7 @@
#define SMC_IRCC_H
#include <linux/spinlock.h>
+#include <linux/pm.h>
#include <net/irda/irport.h>
@@ -181,6 +182,8 @@ struct ircc_cb {
int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
int tx_len; /* Number of frames in tx_buff */
+
+ struct pm_dev *pmdev;
};
#endif /* SMC_IRCC_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 1a6f1dad0..b63398881 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -163,6 +163,7 @@ struct neigh_table
unsigned long last_rand;
struct neigh_parms *parms_list;
kmem_cache_t *kmem_cachep;
+ struct tasklet_struct gc_task;
struct neigh_statistics stats;
struct neighbour *hash_buckets[NEIGH_HASHMASK+1];
struct pneigh_entry *phash_buckets[PNEIGH_HASHMASK+1];
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index b866777a6..2c4b4cff9 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -63,19 +63,10 @@ struct Qdisc_ops
int (*dump)(struct Qdisc *, struct sk_buff *);
};
-struct Qdisc_head
-{
- struct Qdisc_head *forw;
- struct Qdisc_head *back;
-};
-
-extern struct Qdisc_head qdisc_head;
-extern spinlock_t qdisc_runqueue_lock;
extern rwlock_t qdisc_tree_lock;
struct Qdisc
{
- struct Qdisc_head h;
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
struct sk_buff * (*dequeue)(struct Qdisc *dev);
unsigned flags;
@@ -87,11 +78,9 @@ struct Qdisc
u32 handle;
atomic_t refcnt;
struct sk_buff_head q;
- struct net_device *dev;
+ struct net_device *dev;
struct tc_stats stats;
- unsigned long tx_timeo;
- unsigned long tx_last;
int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q);
/* This field is deprecated, but it is still used by CBQ
@@ -437,60 +426,13 @@ int teql_init(void);
int tc_filter_init(void);
int pktsched_init(void);
-extern void qdisc_run_queues(void);
extern int qdisc_restart(struct net_device *dev);
-extern spinlock_t qdisc_runqueue_lock;
-
-/* Is it on run list? Reliable only under qdisc_runqueue_lock. */
-
-extern __inline__ int qdisc_on_runqueue(struct Qdisc *q)
-{
- return q->h.forw != NULL;
-}
-
-/* Is run list not empty? Reliable only under qdisc_runqueue_lock. */
-
-extern __inline__ int qdisc_pending(void)
-{
- return qdisc_head.forw != &qdisc_head;
-}
-
-/* Add qdisc to tail of run list. Called with BH, disabled on this CPU */
-
-extern __inline__ void qdisc_run(struct Qdisc *q)
-{
- spin_lock(&qdisc_runqueue_lock);
- if (!qdisc_on_runqueue(q) && q->dev) {
- q->h.forw = &qdisc_head;
- q->h.back = qdisc_head.back;
- qdisc_head.back->forw = &q->h;
- qdisc_head.back = &q->h;
- }
- spin_unlock(&qdisc_runqueue_lock);
-}
-
-extern __inline__ int __qdisc_wakeup(struct net_device *dev)
+extern __inline__ void qdisc_run(struct net_device *dev)
{
- int res;
-
- while ((res = qdisc_restart(dev))<0 && !dev->tbusy)
+ while (!test_bit(LINK_STATE_XOFF, &dev->state) &&
+ qdisc_restart(dev)<0)
/* NOTHING */;
-
- return res;
-}
-
-
-/* If the device is not throttled, restart it and add to run list.
- * BH must be disabled on this CPU. Usually, it is called by timers.
- */
-
-extern __inline__ void qdisc_wakeup(struct net_device *dev)
-{
- spin_lock(&dev->queue_lock);
- if (dev->tbusy || __qdisc_wakeup(dev))
- qdisc_run(dev->qdisc);
- spin_unlock(&dev->queue_lock);
}
/* Calculate maximal size of packet seen by hard_start_xmit
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 5105fd220..8bcb17085 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -202,7 +202,7 @@ struct linux_mib
unsigned long __pad[32-26];
};
-#define SNMP_INC_STATS(mib, field) ((mib)[2*smp_processor_id()+!in_interrupt()].field++)
+#define SNMP_INC_STATS(mib, field) ((mib)[2*smp_processor_id()+!in_softirq()].field++)
#define SNMP_INC_STATS_BH(mib, field) ((mib)[2*smp_processor_id()].field++)
#define SNMP_INC_STATS_USER(mib, field) ((mib)[2*smp_processor_id()+1].field++)
diff --git a/include/net/sock.h b/include/net/sock.h
index 5dc9f5be3..92519ee88 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -242,6 +242,7 @@ struct tcp_opt {
__u32 lrcvtime; /* timestamp of last received data packet*/
__u16 last_seg_size; /* Size of last incoming segment */
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
+ __u32 rcv_segs; /* Number of received segments since last ack */
} ack;
/* Data for direct copy to user */
@@ -325,7 +326,6 @@ struct tcp_opt {
__u32 rcv_tsecr; /* Time stamp echo reply */
__u32 ts_recent; /* Time stamp to echo next */
long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
- __u32 last_ack_sent; /* last ack we sent (RTTM/PAWS) */
/* SACKs data */
struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
@@ -934,20 +934,20 @@ extern __inline__ void sock_put(struct sock *sk)
*/
extern __inline__ void sock_orphan(struct sock *sk)
{
- write_lock_irq(&sk->callback_lock);
+ write_lock_bh(&sk->callback_lock);
sk->dead = 1;
sk->socket = NULL;
sk->sleep = NULL;
- write_unlock_irq(&sk->callback_lock);
+ write_unlock_bh(&sk->callback_lock);
}
extern __inline__ void sock_graft(struct sock *sk, struct socket *parent)
{
- write_lock_irq(&sk->callback_lock);
+ write_lock_bh(&sk->callback_lock);
sk->sleep = &parent->wait;
parent->sk = sk;
sk->socket = parent;
- write_unlock_irq(&sk->callback_lock);
+ write_unlock_bh(&sk->callback_lock);
}
@@ -1150,7 +1150,7 @@ extern __inline__ int sock_writeable(struct sock *sk)
extern __inline__ int gfp_any(void)
{
- return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
extern __inline__ long sock_rcvtimeo(struct sock *sk, int noblock)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index db16f7253..d2c937f96 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -20,6 +20,8 @@
#define TCP_DEBUG 1
#undef TCP_FORMAL_WINDOW
+#define TCP_MORE_COARSE_ACKS
+#undef TCP_LESS_COARSE_ACKS
#include <linux/config.h>
#include <linux/tcp.h>
@@ -287,10 +289,10 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
* TIME-WAIT timer.
*/
-#define TCP_DELACK_MAX (HZ/2) /* maximal time to delay before sending an ACK */
+#define TCP_DELACK_MAX (HZ/5) /* maximal time to delay before sending an ACK */
#define TCP_DELACK_MIN (2) /* minimal time to delay before sending an ACK,
- * 2 scheduler ticks, not depending on HZ */
-#define TCP_ATO_MAX ((TCP_DELACK_MAX*4)/5) /* ATO producing TCP_DELACK_MAX */
+ * 2 scheduler ticks, not depending on HZ. */
+#define TCP_ATO_MAX (HZ/2) /* Clamp ATO estimator at his value. */
#define TCP_ATO_MIN 2
#define TCP_RTO_MAX (120*HZ)
#define TCP_RTO_MIN (HZ/5)
@@ -335,12 +337,14 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
so that we select tick to get range about 4 seconds.
*/
-#if HZ == 100 || HZ == 128
-#define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#if HZ == 20
+# define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ == 100 || HZ == 128
+# define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
#elif HZ == 1024
-#define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
+# define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
#else
-#error HZ != 100 && HZ != 1024.
+# error HZ != 20 && HZ != 100 && HZ != 1024.
#endif
/*
@@ -594,11 +598,8 @@ extern int tcp_rcv_established(struct sock *sk,
static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
{
- if (tp->ack.quick && --tp->ack.quick == 0 && !tp->ack.pingpong) {
- /* Leaving quickack mode we deflate ATO to give peer
- * a time to adapt to new worse(!) RTO. It is not required
- * in pingpong mode, when ACKs were delayed in any case.
- */
+ if (tp->ack.quick && --tp->ack.quick == 0) {
+ /* Leaving quickack mode we deflate ATO. */
tp->ack.ato = TCP_ATO_MIN;
}
}
@@ -825,12 +826,13 @@ extern __inline__ u16 tcp_select_window(struct sock *sk)
* Don't update rcv_wup/rcv_wnd here or else
* we will not be able to advertise a zero
* window in time. --DaveM
+ *
+ * Relax Will Robinson.
*/
new_win = cur_win;
- } else {
- tp->rcv_wnd = new_win;
- tp->rcv_wup = tp->rcv_nxt;
}
+ tp->rcv_wnd = new_win;
+ tp->rcv_wup = tp->rcv_nxt;
/* RFC1323 scaling applied */
new_win >>= tp->rcv_wscale;
@@ -1186,7 +1188,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
- tcp_statistics[smp_processor_id()*2+!in_interrupt()].TcpCurrEstab--;
+ tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
}
/* Change state AFTER socket is unhashed to avoid closed
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 6756f8d2f..ddd942d14 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -3,25 +3,25 @@
/*
History:
- Started: Aug 9 by Lawrence Foard (entropy@world.std.com), to allow user
- process control of SCSI devices.
+ Started: Aug 9 by Lawrence Foard (entropy@world.std.com), to allow user
+ process control of SCSI devices.
Development Sponsored by Killy Corp. NY NY
Original driver (sg.h):
* Copyright (C) 1992 Lawrence Foard
-2.x extensions to driver:
+Version 2 and 3 extensions to driver:
* Copyright (C) 1998, 1999 Douglas Gilbert
+ Version: 3.1.10 (20000123)
+ This version is for 2.3/2.4 series kernels.
- Version: 2.3.35 (990708)
- This version for 2.3 series kernels. It only differs from sg version
- 2.1.35 used in the 2.2 series kernels by changes to wait_queue. This
- in an internal kernel interface and should not effect users.
- D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au)
-
- Changes since 2.1.34 (990603)
+ Changes since 2.1.34 (990603) and 2.3.35 (990708)
+ - add new interface structure: sg_io_hdr_t
+ - supports larger sense buffer, DMA residual count + direct IO
+ - add SG_IO ioctl (combines function of write() + read() )
+ - remove SG_SET_MERGE_FD, UNDERRUN_FLAG + _GET_ ioctls + logic
+ - add proc_fs support in /proc/scsi/sg/ directory
- add queuing info into struct sg_scsi_id
- - block negative timeout values
- - add back write() wait on previous read() when no cmd queuing
+ - def_reserved_size can be given at driver or module load time
Changes since 2.1.33 (990521)
- implement SG_SET_RESERVED_SIZE and associated memory re-org.
- add SG_NEXT_CMD_LEN to override SCSI command lengths
@@ -34,117 +34,135 @@ Original driver (sg.h):
- clean up logging of pointers to use %p (for 64 bit architectures)
- rework usage of get_user/copy_to_user family of kernel calls
- "disown" scsi_command blocks before releasing them
- Changes since 2.1.30 (990320)
- - memory tweaks: change flags on kmalloc (GFP_KERNEL to GFP_ATOMIC)
- - increase max allowable mid-level pool usage
-
-
- New features and changes:
- - per file descriptor (fd) write-read sequencing and command queues.
- - command queuing supported (SG_MAX_QUEUE is maximum per fd).
- - scatter-gather supported (allowing potentially megabyte transfers).
- - the SCSI target, host and driver status are returned
- in unused fields of sg_header (maintaining its original size).
- - asynchronous notification support added (SIGPOLL, SIGIO) for
- read()s (write()s should never block).
- - pack_id logic added so read() can wait for a specific pack_id.
- - uses memory > ISA_DMA_THRESHOLD if adapter allows it (e.g. a
- pci scsi adapter).
- - this driver no longer uses a single SG_BIG_BUFF sized buffer
- obtained at driver/module init time. Rather it tries to obtain a
- SG_DEF_RESERVED_SIZE buffer when a fd is open()ed and frees it
- at the corresponding release() (ie per fd). Actually the "buffer"
- may be a collection of buffers if scatter-gather is being used.
- - add SG_SET_RESERVED_SIZE ioctl allowing the user to request a
- large buffer for duration of current file descriptor's lifetime.
- - SG_GET_RESERVED_SIZE ioctl can be used to find out how much
- actually has been reserved.
- - add SG_NEXT_CMD_LEN ioctl to override SCSI command length on
- the next write() to this file descriptor.
- - SG_GET_RESERVED_SIZE's presence as a symbol can be used for
- compile time identification of the version 2 sg driver.
- However, it is recommended that run time identification based on
- calling the ioctl of the same name is a more flexible and
- safer approach.
- - adds several ioctl calls, see ioctl section below.
-
- Good documentation on the original "sg" device interface and usage can be
- found in the Linux HOWTO document: "SCSI Programming HOWTO" (version 0.5)
- by Heiko Eissfeldt; last updated 7 May 1996. Here is a quick summary of
- sg basics:
- An SG device is accessed by writing SCSI commands plus any associated
- outgoing data to it; the resulting status codes and any incoming data
- are then obtained by a read call. The device can be opened O_NONBLOCK
- (non-blocking) and poll() used to monitor its progress. The device may be
- opened O_EXCL which excludes other "sg" users from this device (but not
- "sd", "st" or "sr" users). The buffer given to the write() call is made
- up as follows:
- - struct sg_header image (see below)
- - scsi command (6, 10 or 12 bytes long)
- - data to be written to the device (if any)
-
- The buffer received from the corresponding read() call contains:
- - struct sg_header image (check results + sense_buffer)
- - data read back from device (if any)
-
- The given SCSI command has its LUN field overwritten internally by the
- value associated with the device that has been opened.
-
- This device currently uses "indirect IO" in the sense that data is
- DMAed into kernel buffers from the hardware and afterwards is
- transferred into the user space (or vice versa if you are writing).
- Transfer speeds or up to 20 to 30MBytes/sec have been measured using
- indirect IO. For faster throughputs "direct IO" which cuts out the
- double handling of data is required. This will also need a new interface.
-
- Grabbing memory for those kernel buffers used in this driver for DMA may
- cause the dreaded ENOMEM error. This error seems to be more prevalent
- under early 2.2.x kernels than under the 2.0.x kernel series. For a given
- (large) transfer the memory obtained by this driver must be contiguous or
- scatter-gather must be used (if supported by the adapter). [Furthermore,
- ISA SCSI adapters can only use memory below the 16MB level on a i386.]
-
- When a "sg" device is open()ed O_RDWR then this driver will attempt to
- reserve a buffer of SG_DEF_RESERVED_SIZE that will be used by subsequent
- write()s on this file descriptor as long as:
- - it is not already in use (eg when command queuing is in use)
- - the write() does not call for a buffer size larger than the
- reserved size.
- In these cases the write() will attempt to find the memory it needs for
- DMA buffers dynamically and in the worst case will fail with ENOMEM.
- The amount of memory actually reserved depends on various dynamic factors
- and can be checked with the SG_GET_RESERVED_SIZE ioctl(). [In a very
- tight memory situation it may yield 0!] The size of the reserved buffer
- can be changed with the SG_SET_RESERVED_SIZE ioctl(). It should be
- followed with a call to the SG_GET_RESERVED_SIZE ioctl() to find out how
- much was actually reserved.
-
- More documentation plus test and utility programs can be found at
- http://www.torque.net/sg
+
+Map of SG verions to the Linux kernels in which they appear:
+ ---------- ----------------------------------
+ original all kernels < 2.2.6
+ 2.1.31 2.2.6 and 2.2.7
+ 2.1.32 2.2.8 and 2.2.9
+ 2.1.34 2.2.10 to 2.2.13
+ 2.1.36 2.2.14
+ 2.3.35 2.3.x development series kernels (starting 2.3.20)
+ 3.0.x optional version 3 sg driver for 2.2 series
+ 3.1.x candidate version 3 sg driver for 2.3 series
+
+Major new features in SG 3.x driver (cf SG 2.x drivers)
+ - SG_IO ioctl() combines function if write() and read()
+ - new interface (sg_io_hdr_t) but still supports old interface
+ - scatter/gather in user space and direct IO supported
+
+Major features in SG 2.x driver (cf original SG driver)
+ - per file descriptor (fd) write-read sequencing
+ - command queuing supported
+ - scatter-gather supported at kernel level allowing potentially
+ large transfers
+ - more SCSI status information returned
+ - asynchronous notification support added (SIGPOLL, SIGIO)
+ - read() can fetch by given pack_id
+ - uses kernel memory as appropriate for SCSI adapter being used
+ - single SG_BIG_BUFF replaced by per file descriptor "reserve
+ buffer" whose size can be manipulated by ioctls()
+
+ The term "indirect IO" refers a method by which data is DMAed into kernel
+ buffers from the hardware and afterwards is transferred into the user
+ space (or vice versa if you are writing). Transfer speeds of up to 20 to
+ 30MBytes/sec have been measured using indirect IO. For faster throughputs
+ "direct IO" which cuts out the double handling of data is required.
+ Direct IO is supported by the SG 3.x drivers on 2.3 series Linux kernels
+ (or later) and requires the use of the new interface.
+
+ Requests for direct IO with the new interface will automatically fall back
+ to indirect IO mode if they cannot be fulfilled. An example of such a case
+ is an ISA SCSI adapter which is only capable of DMAing to the lower 16MB of
+ memory due to the architecture of ISA. The 'info' field in the new
+ interface indicates whether a direct or indirect data transfer took place.
+
+ Obtaining memory for the kernel buffers used in indirect IO is done by
+ first checking if the "reserved buffer" for the current file descriptor
+ is available and large enough. If these conditions are _not_ met then
+ kernel memory is obtained on a per SCSI command basis. This corresponds
+ to a write(), read() sequence or a SG_IO ioctl() call. Further, the
+ kernel memory that is suitable for DMA may be constrained by the
+ architecture of the SCSI adapter (e.g. ISA adapters).
+
+ Documentation
+ =============
+ A web site for SG device drivers can be found at:
+ http://www.torque.net/sg [alternatively check the MAINTAINERS file]
+ The main documents are still based on 2.x versions:
+ http://www.torque.net/sg/p/scsi-generic.txt
+ http://www.torque.net/sg/p/scsi-generic_long.txt
+ The first document can also be found in the kernel source tree, probably at:
+ /usr/src/linux/Documentation/scsi-generic.txt .
+ Documentation on the changes and additions in 3.x version of the sg driver
+ can be found at: http://www.torque.net/sg/p/scsi-generic_v3.txt
+ Utility and test programs are also available at that web site.
*/
-#define SG_MAX_SENSE 16 /* too little, unlikely to change in 2.2.x */
+/* New interface introduced in the 3.x SG drivers follows */
-struct sg_header
-{
- int pack_len; /* [o] reply_len (ie useless), ignored as input */
- int reply_len; /* [i] max length of expected reply (inc. sg_header) */
- int pack_id; /* [io] id number of packet (use ints >= 0) */
- int result; /* [o] 0==ok, else (+ve) Unix errno (best ignored) */
- unsigned int twelve_byte:1;
- /* [i] Force 12 byte command length for group 6 & 7 commands */
- unsigned int target_status:5; /* [o] scsi status from target */
- unsigned int host_status:8; /* [o] host status (see "DID" codes) */
- unsigned int driver_status:8; /* [o] driver status+suggestion */
- unsigned int other_flags:10; /* unused */
- unsigned char sense_buffer[SG_MAX_SENSE]; /* [o] Output in 3 cases:
- when target_status is CHECK_CONDITION or
- when target_status is COMMAND_TERMINATED or
- when (driver_status & DRIVER_SENSE) is true. */
-}; /* This structure is 36 bytes long on i386 */
+typedef struct sg_iovec /* same structure as used by readv() Linux system */
+{ /* call. It defines one scatter-gather element. */
+ void * iov_base; /* Starting address */
+ size_t iov_len; /* Length in bytes */
+} sg_iovec_t;
-typedef struct sg_scsi_id {
+typedef struct sg_io_hdr
+{
+ char interface_id; /* [i] 'S' for SCSI generic (required) */
+ unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
+ unsigned char iovec_count; /* [i] 0 implies no scatter gather */
+ unsigned char mx_sb_len; /* [i] max length to write to sbp */
+ int dxfer_direction; /* [i] data transfer direction */
+ unsigned int dxfer_len; /* [i] byte count of data transfer */
+ void * dxferp; /* [i], [*io] points to data transfer memory
+ or scatter gather list */
+ unsigned char * cmdp; /* [i], [*i] points to command to perform */
+ unsigned char * sbp; /* [i], [*o] points to sense_buffer memory */
+ unsigned int timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
+ unsigned int flags; /* [i] 0 -> default, see SG_FLAG... */
+ int pack_id; /* [i->o] unused internally (normally) */
+ void * usr_ptr; /* [i->o] unused internally */
+ unsigned char status; /* [o] scsi status */
+ unsigned char masked_status;/* [o] shifted, masked scsi status */
+ unsigned char msg_status; /* [o] messaging level data (optional) */
+ unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
+ unsigned short host_status; /* [o] errors from host adapter */
+ unsigned short driver_status;/* [o] errors from software driver */
+ int resid; /* [o] dxfer_len - actual_transferred */
+ unsigned int duration; /* [o] time taken by cmd (unit: millisec) */
+ unsigned int info; /* [o] auxiliary information */
+} sg_io_hdr_t; /* 60 bytes long (on i386) */
+
+/* Use negative values to flag difference from original sg_header structure */
+#define SG_DXFER_NONE -1 /* e.g. a SCSI Test Unit Ready command */
+#define SG_DXFER_TO_DEV -2 /* e.g. a SCSI WRITE command */
+#define SG_DXFER_FROM_DEV -3 /* e.g. a SCSI READ command */
+#define SG_DXFER_TO_FROM_DEV -4 /* treated like SG_DXFER_FROM_DEV with the
+ additional property than during indirect
+ IO the user buffer is copied into the
+ kernel buffers before the transfer */
+
+/* following flag values can be "or"-ed together */
+#define SG_FLAG_DIRECT_IO 1 /* default is indirect IO */
+#define SG_FLAG_LUN_INHIBIT 2 /* default is to put device's lun into */
+ /* the 2nd byte of SCSI command */
+#define SG_FLAG_NO_DXFER 0x10000 /* no transfer of kernel buffers to/from */
+ /* user space (debug indirect IO) */
+
+/* following 'info' values are "or"-ed together */
+#define SG_INFO_OK_MASK 0x1
+#define SG_INFO_OK 0x0 /* no sense, host nor driver "noise" */
+#define SG_INFO_CHECK 0x1 /* something abnormal happened */
+
+#define SG_INFO_DIRECT_IO_MASK 0x6
+#define SG_INFO_INDIRECT_IO 0x0 /* data xfer via kernel buffers (or no xfer) */
+#define SG_INFO_DIRECT_IO 0x2 /* direct IO requested and performed */
+#define SG_INFO_MIXED_IO 0x4 /* part direct, part indirect IO */
+
+
+typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */
int host_no; /* as in "scsi<n>" where 'n' is one of 0, 1, 2 etc */
int channel;
int scsi_id; /* scsi id of target device */
@@ -152,25 +170,38 @@ typedef struct sg_scsi_id {
int scsi_type; /* TYPE_... defined in scsi/scsi.h */
short h_cmd_per_lun;/* host (adapter) maximum commands per lun */
short d_queue_depth;/* device (or adapter) maximum queue length */
- int unused1; /* probably find a good use, set 0 for now */
- int unused2; /* ditto */
-} Sg_scsi_id;
-
-/* IOCTLs: ( _GET_s yield result via 'int *' 3rd argument unless
- otherwise indicated) */
-#define SG_SET_TIMEOUT 0x2201 /* unit: jiffies (10ms on i386) */
-#define SG_GET_TIMEOUT 0x2202 /* yield timeout as _return_ value */
+ int unused[2]; /* probably find a good use, set 0 for now */
+} sg_scsi_id_t; /* 32 bytes long on i386 */
+
+typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
+ char req_state; /* 0 -> not used, 1 -> written, 2 -> ready to read */
+ char orphan; /* 0 -> normal request, 1 -> from interruped SG_IO */
+ char sg_io_owned; /* 0 -> complete with read(), 1 -> owned by SG_IO */
+ char problem; /* 0 -> no problem detected, 1 -> error to report */
+ int pack_id; /* pack_id associated with request */
+ void * usr_ptr; /* user provided pointer (in new interface) */
+ unsigned int duration; /* millisecs elapsed since written (req_state==1)
+ or request duration (req_state==2) */
+ int unused;
+} sg_req_info_t; /* 20 bytes long on i386 */
+
+
+/* IOCTLs: Those ioctls that are relevant to the SG 3.x drivers follow.
+ [Those that only apply to the SG 2.x drivers are at the end of the file.]
+ (_GET_s yield result via 'int *' 3rd argument unless otherwise indicated) */
#define SG_EMULATED_HOST 0x2203 /* true for emulated host adapter (ATAPI) */
/* Used to configure SCSI command transformation layer for ATAPI devices */
-#define SG_SET_TRANSFORM 0x2204
+/* Only supported by the ide-scsi driver */
+#define SG_SET_TRANSFORM 0x2204 /* N.B. 3rd arg is not pointer but value: */
+ /* 3rd arg = 0 to disable transform, 1 to enable it */
#define SG_GET_TRANSFORM 0x2205
#define SG_SET_RESERVED_SIZE 0x2275 /* request a new reserved buffer size */
#define SG_GET_RESERVED_SIZE 0x2272 /* actual size of reserved buffer */
-/* The following ioctl takes a 'Sg_scsi_id *' object as its 3rd argument. */
+/* The following ioctl has a 'sg_scsi_id_t *' object as its 3rd argument. */
#define SG_GET_SCSI_ID 0x2276 /* Yields fd's bus, chan, dev, lun + type */
/* SCSI id information can also be obtained from SCSI_IOCTL_GET_IDLUN */
@@ -179,66 +210,111 @@ typedef struct sg_scsi_id {
#define SG_GET_LOW_DMA 0x227a /* 0-> use all ram for dma; 1-> low dma ram */
/* When SG_SET_FORCE_PACK_ID set to 1, pack_id is input to read() which
- will attempt to read that pack_id or block (or return EAGAIN). If
- pack_id is -1 then read oldest waiting. When ...FORCE_PACK_ID set to 0
- then pack_id ignored by read() and oldest readable fetched. */
+ tries to fetch a packet with a matching pack_id, waits, or returns EAGAIN.
+ If pack_id is -1 then read oldest waiting. When ...FORCE_PACK_ID set to 0
+ then pack_id ignored by read() and oldest readable fetched. */
#define SG_SET_FORCE_PACK_ID 0x227b
#define SG_GET_PACK_ID 0x227c /* Yields oldest readable pack_id (or -1) */
#define SG_GET_NUM_WAITING 0x227d /* Number of commands awaiting read() */
-/* Turn on error sense trace (1..8), dump this device to log/console (9)
- or dump all sg device states ( >9 ) to log/console */
-#define SG_SET_DEBUG 0x227e /* 0 -> turn off debug */
-
/* Yields max scatter gather tablesize allowed by current host adapter */
#define SG_GET_SG_TABLESIZE 0x227F /* 0 implies can't do scatter gather */
-/* Control whether sequencing per file descriptor or per device */
-#define SG_GET_MERGE_FD 0x2274 /* 0-> per fd, 1-> per device */
-#define SG_SET_MERGE_FD 0x2273 /* Attempt to change sequencing state,
- if more than current fd open on device, will fail with EBUSY */
-
-/* Get/set command queuing state per fd (default is SG_DEF_COMMAND_Q) */
-#define SG_GET_COMMAND_Q 0x2270 /* Yields 0 (queuing off) or 1 (on) */
-#define SG_SET_COMMAND_Q 0x2271 /* Change queuing state with 0 or 1 */
-
-/* Get/set whether DMA underrun will cause an error (DID_ERROR). This only
- currently applies to the [much-used] aic7xxx driver. */
-#define SG_GET_UNDERRUN_FLAG 0x2280 /* Yields 0 (don't flag) or 1 (flag) */
-#define SG_SET_UNDERRUN_FLAG 0x2281 /* Change flag underrun state */
-
#define SG_GET_VERSION_NUM 0x2282 /* Example: version 2.1.34 yields 20134 */
-#define SG_NEXT_CMD_LEN 0x2283 /* override SCSI command length with given
- number on the next write() on this file descriptor */
/* Returns -EBUSY if occupied else takes as input: 0 -> do nothing,
1 -> device reset or 2 -> bus reset (may not be activated yet) */
#define SG_SCSI_RESET 0x2284
+/* synchronous SCSI command ioctl, (only in version 3 interface) */
+#define SG_IO 0x2285 /* similar effect as write() followed by read() */
+
+#define SG_GET_REQUEST_TABLE 0x2286 /* yields table of active requests */
+
+/* How to treat EINTR during SG_IO ioctl(), only in SG 3.x series */
+#define SG_SET_KEEP_ORPHAN 0x2287 /* 1 -> hold for read(), 0 -> drop (def) */
+#define SG_GET_KEEP_ORPHAN 0x2288
+
#define SG_SCATTER_SZ (8 * 4096) /* PAGE_SIZE not available to user */
/* Largest size (in bytes) a single scatter-gather list element can have.
- The value must be a power of 2 and <= (PAGE_SIZE * 32) [131072 bytes on
+ The value must be a power of 2 and <= (PAGE_SIZE * 32) [131072 bytes on
i386]. The minimum value is PAGE_SIZE. If scatter-gather not supported
by adapter then this value is the largest data block that can be
read/written by a single scsi command. The user can find the value of
PAGE_SIZE by calling getpagesize() defined in unistd.h . */
-#define SG_DEFAULT_TIMEOUT (60*HZ) /* HZ == 'jiffies in 1 second' */
#define SG_DEFAULT_RETRIES 1
/* Defaults, commented if they differ from original sg driver */
-#define SG_DEF_COMMAND_Q 0
-#define SG_DEF_MERGE_FD 0 /* was 1 -> per device sequencing */
#define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */
#define SG_DEF_FORCE_PACK_ID 0
-#define SG_DEF_UNDERRUN_FLAG 0
-#define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ
+#define SG_DEF_KEEP_ORPHAN 0
+#define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */
/* maximum outstanding requests, write() yields EDOM if exceeded */
#define SG_MAX_QUEUE 16
#define SG_BIG_BUFF SG_DEF_RESERVED_SIZE /* for backward compatibility */
+/* Alternate style type names, "..._t" variants preferred */
+typedef struct sg_io_hdr Sg_io_hdr;
+typedef struct sg_io_vec Sg_io_vec;
+typedef struct sg_scsi_id Sg_scsi_id;
+typedef struct sg_req_info Sg_req_info;
+
+
+/* vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */
+/* The older SG interface based on the 'sg_header' structure follows. */
+/* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
+
+#define SG_MAX_SENSE 16 /* this only applies to the sg_header interface */
+
+struct sg_header
+{
+ int pack_len; /* [o] reply_len (ie useless), ignored as input */
+ int reply_len; /* [i] max length of expected reply (inc. sg_header) */
+ int pack_id; /* [io] id number of packet (use ints >= 0) */
+ int result; /* [o] 0==ok, else (+ve) Unix errno (best ignored) */
+ unsigned int twelve_byte:1;
+ /* [i] Force 12 byte command length for group 6 & 7 commands */
+ unsigned int target_status:5; /* [o] scsi status from target */
+ unsigned int host_status:8; /* [o] host status (see "DID" codes) */
+ unsigned int driver_status:8; /* [o] driver status+suggestion */
+ unsigned int other_flags:10; /* unused */
+ unsigned char sense_buffer[SG_MAX_SENSE]; /* [o] Output in 3 cases:
+ when target_status is CHECK_CONDITION or
+ when target_status is COMMAND_TERMINATED or
+ when (driver_status & DRIVER_SENSE) is true. */
+}; /* This structure is 36 bytes long on i386 */
+
+
+/* IOCTLs: The following are not required (or ignored) when the sg_io_hdr_t
+ interface is used. That are kept for backward compatibility with
+ the original and version 2 drivers. */
+
+#define SG_SET_TIMEOUT 0x2201 /* unit: jiffies (10ms on i386) */
+#define SG_GET_TIMEOUT 0x2202 /* yield timeout as _return_ value */
+
+/* Get/set command queuing state per fd (default is SG_DEF_COMMAND_Q.
+ Each time a sg_io_hdr_t object is seen on this file descriptor, this
+ command queuing flag is set on (overriding the previous setting). */
+#define SG_GET_COMMAND_Q 0x2270 /* Yields 0 (queuing off) or 1 (on) */
+#define SG_SET_COMMAND_Q 0x2271 /* Change queuing state with 0 or 1 */
+
+/* Turn on/off error sense trace (1 and 0 respectively, default is off).
+ Try using: "# cat /proc/scsi/sg/debug" instead in the v3 driver */
+#define SG_SET_DEBUG 0x227e /* 0 -> turn off debug */
+
+#define SG_NEXT_CMD_LEN 0x2283 /* override SCSI command length with given
+ number on the next write() on this file descriptor */
+
+
+/* Defaults, commented if they differ from original sg driver */
+#define SG_DEFAULT_TIMEOUT (60*HZ) /* HZ == 'jiffies in 1 second' */
+#define SG_DEF_COMMAND_Q 0 /* command queuing is always on when
+ the new interface is used */
+#define SG_DEF_UNDERRUN_FLAG 0
+
#endif
diff --git a/include/video/macmodes.h b/include/video/macmodes.h
index 1bdfa815b..054bd9845 100644
--- a/include/video/macmodes.h
+++ b/include/video/macmodes.h
@@ -42,6 +42,7 @@
#define VMODE_CHOOSE 99
#define CMODE_NVRAM -1
+#define CMODE_CHOOSE -2
#define CMODE_8 0 /* 8 bits/pixel */
#define CMODE_16 1 /* 16 (actually 15) bits/pixel */
#define CMODE_32 2 /* 32 (actually 24) bits/pixel */