summaryrefslogtreecommitdiffstats
path: root/include/asm-ia64
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-11-23 02:00:47 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-11-23 02:00:47 +0000
commit06615f62b17d7de6e12d2f5ec6b88cf30af08413 (patch)
tree8766f208847d4876a6db619aebbf54d53b76eb44 /include/asm-ia64
parentfa9bdb574f4febb751848a685d9a9017e04e1d53 (diff)
Merge with Linux 2.4.0-test10.
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/acpi-ext.h2
-rw-r--r--include/asm-ia64/acpikcfg.h29
-rw-r--r--include/asm-ia64/atomic.h7
-rw-r--r--include/asm-ia64/bitops.h25
-rw-r--r--include/asm-ia64/delay.h14
-rw-r--r--include/asm-ia64/efi.h2
-rw-r--r--include/asm-ia64/fcntl.h8
-rw-r--r--include/asm-ia64/hardirq.h8
-rw-r--r--include/asm-ia64/hw_irq.h14
-rw-r--r--include/asm-ia64/ia32.h2
-rw-r--r--include/asm-ia64/io.h49
-rw-r--r--include/asm-ia64/mmu_context.h12
-rw-r--r--include/asm-ia64/module.h108
-rw-r--r--include/asm-ia64/offsets.h8
-rw-r--r--include/asm-ia64/page.h7
-rw-r--r--include/asm-ia64/pal.h116
-rw-r--r--include/asm-ia64/param.h6
-rw-r--r--include/asm-ia64/parport.h20
-rw-r--r--include/asm-ia64/pci.h6
-rw-r--r--include/asm-ia64/pgalloc.h26
-rw-r--r--include/asm-ia64/pgtable.h53
-rw-r--r--include/asm-ia64/processor.h109
-rw-r--r--include/asm-ia64/ptrace.h1
-rw-r--r--include/asm-ia64/ptrace_offsets.h4
-rw-r--r--include/asm-ia64/sal.h44
-rw-r--r--include/asm-ia64/semaphore.h24
-rw-r--r--include/asm-ia64/siginfo.h3
-rw-r--r--include/asm-ia64/smp.h49
-rw-r--r--include/asm-ia64/spinlock.h146
-rw-r--r--include/asm-ia64/string.h5
-rw-r--r--include/asm-ia64/system.h96
-rw-r--r--include/asm-ia64/uaccess.h18
-rw-r--r--include/asm-ia64/unaligned.h12
-rw-r--r--include/asm-ia64/unistd.h2
-rw-r--r--include/asm-ia64/unwind.h95
35 files changed, 697 insertions, 433 deletions
diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h
index 24f9822e6..e5acd26a5 100644
--- a/include/asm-ia64/acpi-ext.h
+++ b/include/asm-ia64/acpi-ext.h
@@ -60,7 +60,7 @@ typedef struct {
#define LSAPIC_PERFORMANCE_RESTRICTED (1<<1)
#define LSAPIC_PRESENT (1<<2)
-typedef struct {
+typedef struct acpi_entry_lsapic {
u8 type;
u8 length;
u16 acpi_processor_id;
diff --git a/include/asm-ia64/acpikcfg.h b/include/asm-ia64/acpikcfg.h
new file mode 100644
index 000000000..1bf49cf01
--- /dev/null
+++ b/include/asm-ia64/acpikcfg.h
@@ -0,0 +1,29 @@
+/*
+ * acpikcfg.h - ACPI based Kernel Configuration Manager External Interfaces
+ *
+ * Copyright (C) 2000 Intel Corp.
+ * Copyright (C) 2000 J.I. Lee <jung-ik.lee@intel.com>
+ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_ACPI_KERNEL_CONFIG
+
+u32 __init acpi_cf_init (void * rsdp);
+u32 __init acpi_cf_terminate (void );
+
+u32 __init
+acpi_cf_get_pci_vectors (
+ struct pci_vector_struct **vectors,
+ int *num_pci_vectors
+ );
+
+
+#ifdef CONFIG_ACPI_KERNEL_CONFIG_DEBUG
+void __init
+acpi_cf_print_pci_vectors (
+ struct pci_vector_struct *vectors,
+ int num_pci_vectors
+ );
+#endif
+#endif /* CONFIG_ACPI_KERNEL_CONFIG */
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 43a44f4ad..08fc25c57 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -17,13 +17,6 @@
#include <asm/system.h>
/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-#define __atomic_fool_gcc(x) (*(volatile struct { int a[100]; } *)x)
-
-/*
* On IA-64, counter must always be volatile to ensure that that the
* memory accesses are ordered.
*/
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 054704c47..33da4a962 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -20,7 +20,7 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
-extern __inline__ void
+static __inline__ void
set_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
@@ -36,7 +36,12 @@ set_bit (int nr, volatile void *addr)
} while (cmpxchg_acq(m, old, new) != old);
}
-extern __inline__ void
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+static __inline__ void
clear_bit (int nr, volatile void *addr)
{
__u32 mask, old, new;
@@ -52,7 +57,7 @@ clear_bit (int nr, volatile void *addr)
} while (cmpxchg_acq(m, old, new) != old);
}
-extern __inline__ void
+static __inline__ void
change_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
@@ -68,7 +73,7 @@ change_bit (int nr, volatile void *addr)
} while (cmpxchg_acq(m, old, new) != old);
}
-extern __inline__ int
+static __inline__ int
test_and_set_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
@@ -85,7 +90,7 @@ test_and_set_bit (int nr, volatile void *addr)
return (old & bit) != 0;
}
-extern __inline__ int
+static __inline__ int
test_and_clear_bit (int nr, volatile void *addr)
{
__u32 mask, old, new;
@@ -102,7 +107,7 @@ test_and_clear_bit (int nr, volatile void *addr)
return (old & ~mask) != 0;
}
-extern __inline__ int
+static __inline__ int
test_and_change_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
@@ -119,7 +124,7 @@ test_and_change_bit (int nr, volatile void *addr)
return (old & bit) != 0;
}
-extern __inline__ int
+static __inline__ int
test_bit (int nr, volatile void *addr)
{
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
@@ -129,7 +134,7 @@ test_bit (int nr, volatile void *addr)
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
-extern inline unsigned long
+static inline unsigned long
ffz (unsigned long x)
{
unsigned long result;
@@ -164,7 +169,7 @@ ia64_fls (unsigned long x)
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
-extern __inline__ unsigned long
+static __inline__ unsigned long
hweight64 (unsigned long x)
{
unsigned long result;
@@ -181,7 +186,7 @@ hweight64 (unsigned long x)
/*
* Find next zero bit in a bitmap reasonably efficiently..
*/
-extern inline int
+static inline int
find_next_zero_bit (void *addr, unsigned long size, unsigned long offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h
index cca4ecdf6..dda714e20 100644
--- a/include/asm-ia64/delay.h
+++ b/include/asm-ia64/delay.h
@@ -18,13 +18,13 @@
#include <asm/processor.h>
-extern __inline__ void
+static __inline__ void
ia64_set_itm (unsigned long val)
{
__asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory");
}
-extern __inline__ unsigned long
+static __inline__ unsigned long
ia64_get_itm (void)
{
unsigned long result;
@@ -33,7 +33,7 @@ ia64_get_itm (void)
return result;
}
-extern __inline__ void
+static __inline__ void
ia64_set_itv (unsigned char vector, unsigned char masked)
{
if (masked > 1)
@@ -43,13 +43,13 @@ ia64_set_itv (unsigned char vector, unsigned char masked)
:: "r"((masked << 16) | vector) : "memory");
}
-extern __inline__ void
+static __inline__ void
ia64_set_itc (unsigned long val)
{
__asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory");
}
-extern __inline__ unsigned long
+static __inline__ unsigned long
ia64_get_itc (void)
{
unsigned long result;
@@ -58,7 +58,7 @@ ia64_get_itc (void)
return result;
}
-extern __inline__ void
+static __inline__ void
__delay (unsigned long loops)
{
unsigned long saved_ar_lc;
@@ -72,7 +72,7 @@ __delay (unsigned long loops)
__asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc));
}
-extern __inline__ void
+static __inline__ void
udelay (unsigned long usecs)
{
#ifdef CONFIG_IA64_SOFTSDV_HACKS
diff --git a/include/asm-ia64/efi.h b/include/asm-ia64/efi.h
index 5d311d32e..cfdfd4efd 100644
--- a/include/asm-ia64/efi.h
+++ b/include/asm-ia64/efi.h
@@ -219,7 +219,7 @@ extern struct efi {
efi_reset_system_t *reset_system;
} efi;
-extern inline int
+static inline int
efi_guidcmp (efi_guid_t left, efi_guid_t right)
{
return memcmp(&left, &right, sizeof (efi_guid_t));
diff --git a/include/asm-ia64/fcntl.h b/include/asm-ia64/fcntl.h
index c20ab10ec..eeb42f793 100644
--- a/include/asm-ia64/fcntl.h
+++ b/include/asm-ia64/fcntl.h
@@ -3,8 +3,8 @@
/*
* This is mostly compatible with Linux/x86.
*
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
@@ -78,5 +78,9 @@ struct flock {
pid_t l_pid;
};
+#ifdef __KERNEL__
+# define flock64 flock
+#endif
+
#define F_LINUX_SPECIFIC_BASE 1024
#endif /* _ASM_IA64_FCNTL_H */
diff --git a/include/asm-ia64/hardirq.h b/include/asm-ia64/hardirq.h
index 38a12be6e..7c1a4d109 100644
--- a/include/asm-ia64/hardirq.h
+++ b/include/asm-ia64/hardirq.h
@@ -39,8 +39,8 @@ typedef struct {
# define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
# define hardirq_endlock(cpu) do { } while (0)
-# define irq_enter(cpu, irq) (++local_irq_count(cpu))
-# define irq_exit(cpu, irq) (--local_irq_count(cpu))
+# define irq_enter(cpu, irq) (local_irq_count(cpu)++)
+# define irq_exit(cpu, irq) (local_irq_count(cpu)--)
# define synchronize_irq() barrier()
#else
@@ -72,7 +72,7 @@ static inline void release_irqlock(int cpu)
static inline void irq_enter(int cpu, int irq)
{
- ++local_irq_count(cpu);
+ local_irq_count(cpu)++;
while (test_bit(0,&global_irq_lock)) {
/* nothing */;
@@ -81,7 +81,7 @@ static inline void irq_enter(int cpu, int irq)
static inline void irq_exit(int cpu, int irq)
{
- --local_irq_count(cpu);
+ local_irq_count(cpu)--;
}
static inline int hardirq_trylock(int cpu)
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index e4dd5c1ee..06528f8d2 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -6,8 +6,6 @@
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <linux/config.h>
-
#include <linux/types.h>
#include <asm/ptrace.h>
@@ -67,17 +65,7 @@ extern void ipi_send (int cpu, int vector, int delivery_mode, int redirect);
static inline void
hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector)
{
- int my_cpu_id;
-
-#ifdef CONFIG_SMP
- my_cpu_id = smp_processor_id();
-#else
- __u64 lid;
-
- __asm__ ("mov %0=cr.lid" : "=r"(lid));
- my_cpu_id = (lid >> 24) & 0xff; /* extract id (ignore eid) */
-#endif
- ipi_send(my_cpu_id, vector, IA64_IPI_DM_INT, 0);
+ ipi_send(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
}
#endif /* _ASM_IA64_HW_IRQ_H */
diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h
index c6732837c..48a6d0bd4 100644
--- a/include/asm-ia64/ia32.h
+++ b/include/asm-ia64/ia32.h
@@ -351,6 +351,8 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
(granularity << IA32_SEG_G) | \
(((base >> 24) & 0xFF) << IA32_SEG_HIGH_BASE))
+#define IA32_IOBASE 0x2000000000000000 /* Virtual addres for I/O space */
+
#define IA32_CR0 0x80000001 /* Enable PG and PE bits */
#define IA32_CR4 0 /* No architectural extensions */
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index 0740af45f..3dcc496fa 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -63,16 +63,15 @@ phys_to_virt(unsigned long address)
*/
#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
-extern inline const unsigned long
+static inline const unsigned long
__ia64_get_io_port_base (void)
{
- unsigned long addr;
+ extern unsigned long ia64_iobase;
- __asm__ ("mov %0=ar.k0;;" : "=r"(addr));
- return __IA64_UNCACHED_OFFSET | addr;
+ return ia64_iobase;
}
-extern inline void*
+static inline void*
__ia64_mk_io_addr (unsigned long port)
{
const unsigned long io_base = __ia64_get_io_port_base();
@@ -100,7 +99,7 @@ __ia64_mk_io_addr (unsigned long port)
* order. --davidm 99/12/07
*/
-extern inline unsigned int
+static inline unsigned int
__inb (unsigned long port)
{
volatile unsigned char *addr = __ia64_mk_io_addr(port);
@@ -111,7 +110,7 @@ __inb (unsigned long port)
return ret;
}
-extern inline unsigned int
+static inline unsigned int
__inw (unsigned long port)
{
volatile unsigned short *addr = __ia64_mk_io_addr(port);
@@ -122,7 +121,7 @@ __inw (unsigned long port)
return ret;
}
-extern inline unsigned int
+static inline unsigned int
__inl (unsigned long port)
{
volatile unsigned int *addr = __ia64_mk_io_addr(port);
@@ -133,7 +132,7 @@ __inl (unsigned long port)
return ret;
}
-extern inline void
+static inline void
__insb (unsigned long port, void *dst, unsigned long count)
{
volatile unsigned char *addr = __ia64_mk_io_addr(port);
@@ -147,7 +146,7 @@ __insb (unsigned long port, void *dst, unsigned long count)
return;
}
-extern inline void
+static inline void
__insw (unsigned long port, void *dst, unsigned long count)
{
volatile unsigned short *addr = __ia64_mk_io_addr(port);
@@ -161,7 +160,7 @@ __insw (unsigned long port, void *dst, unsigned long count)
return;
}
-extern inline void
+static inline void
__insl (unsigned long port, void *dst, unsigned long count)
{
volatile unsigned int *addr = __ia64_mk_io_addr(port);
@@ -175,7 +174,7 @@ __insl (unsigned long port, void *dst, unsigned long count)
return;
}
-extern inline void
+static inline void
__outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr = __ia64_mk_io_addr(port);
@@ -184,7 +183,7 @@ __outb (unsigned char val, unsigned long port)
__ia64_mf_a();
}
-extern inline void
+static inline void
__outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr = __ia64_mk_io_addr(port);
@@ -193,7 +192,7 @@ __outw (unsigned short val, unsigned long port)
__ia64_mf_a();
}
-extern inline void
+static inline void
__outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr = __ia64_mk_io_addr(port);
@@ -202,7 +201,7 @@ __outl (unsigned int val, unsigned long port)
__ia64_mf_a();
}
-extern inline void
+static inline void
__outsb (unsigned long port, const void *src, unsigned long count)
{
volatile unsigned char *addr = __ia64_mk_io_addr(port);
@@ -215,7 +214,7 @@ __outsb (unsigned long port, const void *src, unsigned long count)
return;
}
-extern inline void
+static inline void
__outsw (unsigned long port, const void *src, unsigned long count)
{
volatile unsigned short *addr = __ia64_mk_io_addr(port);
@@ -228,7 +227,7 @@ __outsw (unsigned long port, const void *src, unsigned long count)
return;
}
-extern inline void
+static inline void
__outsl (unsigned long port, void *src, unsigned long count)
{
volatile unsigned int *addr = __ia64_mk_io_addr(port);
@@ -257,49 +256,49 @@ __outsl (unsigned long port, void *src, unsigned long count)
/*
* The address passed to these functions are ioremap()ped already.
*/
-extern inline unsigned char
+static inline unsigned char
__readb (void *addr)
{
return *(volatile unsigned char *)addr;
}
-extern inline unsigned short
+static inline unsigned short
__readw (void *addr)
{
return *(volatile unsigned short *)addr;
}
-extern inline unsigned int
+static inline unsigned int
__readl (void *addr)
{
return *(volatile unsigned int *) addr;
}
-extern inline unsigned long
+static inline unsigned long
__readq (void *addr)
{
return *(volatile unsigned long *) addr;
}
-extern inline void
+static inline void
__writeb (unsigned char val, void *addr)
{
*(volatile unsigned char *) addr = val;
}
-extern inline void
+static inline void
__writew (unsigned short val, void *addr)
{
*(volatile unsigned short *) addr = val;
}
-extern inline void
+static inline void
__writel (unsigned int val, void *addr)
{
*(volatile unsigned int *) addr = val;
}
-extern inline void
+static inline void
__writeq (unsigned long val, void *addr)
{
*(volatile unsigned long *) addr = val;
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index c50eacaf0..f385b15cb 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -57,7 +57,7 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
-extern inline unsigned long
+static inline unsigned long
ia64_rid (unsigned long context, unsigned long region_addr)
{
# ifdef CONFIG_IA64_TLB_CHECKS_REGION_NUMBER
@@ -67,7 +67,7 @@ ia64_rid (unsigned long context, unsigned long region_addr)
# endif
}
-extern inline void
+static inline void
get_new_mmu_context (struct mm_struct *mm)
{
spin_lock(&ia64_ctx.lock);
@@ -80,7 +80,7 @@ get_new_mmu_context (struct mm_struct *mm)
}
-extern inline void
+static inline void
get_mmu_context (struct mm_struct *mm)
{
/* check if our ASN is of an older generation and thus invalid: */
@@ -88,20 +88,20 @@ get_mmu_context (struct mm_struct *mm)
get_new_mmu_context(mm);
}
-extern inline int
+static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm)
{
mm->context = 0;
return 0;
}
-extern inline void
+static inline void
destroy_context (struct mm_struct *mm)
{
/* Nothing to do. */
}
-extern inline void
+static inline void
reload_context (struct mm_struct *mm)
{
unsigned long rid;
diff --git a/include/asm-ia64/module.h b/include/asm-ia64/module.h
new file mode 100644
index 000000000..d8d19cb2f
--- /dev/null
+++ b/include/asm-ia64/module.h
@@ -0,0 +1,108 @@
+#ifndef _ASM_IA64_MODULE_H
+#define _ASM_IA64_MODULE_H
+/*
+ * This file contains the ia64 architecture specific module code.
+ *
+ * Copyright (C) 2000 Intel Corporation.
+ * Copyright (C) 2000 Mike Stephens <mike.stephens@intel.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <asm/unwind.h>
+
+#define module_map(x) vmalloc(x)
+#define module_unmap(x) ia64_module_unmap(x)
+#define module_arch_init(x) ia64_module_init(x)
+
+/*
+ * This must match in size and layout the data created by
+ * modutils/obj/obj-ia64.c
+ */
+struct archdata {
+ const char *unw_table;
+ const char *segment_base;
+ const char *unw_start;
+ const char *unw_end;
+ const char *gp;
+};
+
+/*
+ * functions to add/remove a modules unwind info when
+ * it is loaded or unloaded.
+ */
+static inline int
+ia64_module_init(struct module *mod)
+{
+#ifdef CONFIG_IA64_NEW_UNWIND
+ struct archdata *archdata;
+
+ if (!mod_member_present(mod, archdata_start) || !mod->archdata_start)
+ return 0;
+ archdata = (struct archdata *)(mod->archdata_start);
+
+ /*
+ * Make sure the unwind pointers are sane.
+ */
+
+ if (archdata->unw_table)
+ {
+ printk(KERN_ERR "arch_init_module: archdata->unw_table must be zero.\n");
+ return 1;
+ }
+ if (!mod_bound(archdata->gp, 0, mod))
+ {
+ printk(KERN_ERR "arch_init_module: archdata->gp out of bounds.\n");
+ return 1;
+ }
+ if (!mod_bound(archdata->unw_start, 0, mod))
+ {
+ printk(KERN_ERR "arch_init_module: archdata->unw_start out of bounds.\n");
+ return 1;
+ }
+ if (!mod_bound(archdata->unw_end, 0, mod))
+ {
+ printk(KERN_ERR "arch_init_module: archdata->unw_end out of bounds.\n");
+ return 1;
+ }
+ if (!mod_bound(archdata->segment_base, 0, mod))
+ {
+ printk(KERN_ERR "arch_init_module: archdata->unw_table out of bounds.\n");
+ return 1;
+ }
+
+ /*
+ * Pointers are reasonable, add the module unwind table
+ */
+ archdata->unw_table = unw_add_unwind_table(mod->name, archdata->segment_base,
+ (unsigned long) archdata->gp,
+ (unsigned long) archdata->unw_start,
+ (unsigned long) archdata->unw_end);
+#endif /* CONFIG_IA64_NEW_UNWIND */
+ return 0;
+}
+
+static inline void
+ia64_module_unmap(void * addr)
+{
+#ifdef CONFIG_IA64_NEW_UNWIND
+ struct module *mod = (struct module *) addr;
+ struct archdata *archdata;
+
+ /*
+ * Before freeing the module memory remove the unwind table entry
+ */
+ if (mod_member_present(mod, archdata_start) && mod->archdata_start)
+ {
+ archdata = (struct archdata *)(mod->archdata_start);
+
+ if (archdata->unw_table != NULL)
+ unw_remove_unwind_table(archdata->unw_table);
+ }
+#endif /* CONFIG_IA64_NEW_UNWIND */
+
+ vfree(addr);
+}
+
+#endif /* _ASM_IA64_MODULE_H */
diff --git a/include/asm-ia64/offsets.h b/include/asm-ia64/offsets.h
index 88cad88d5..64e9f2fef 100644
--- a/include/asm-ia64/offsets.h
+++ b/include/asm-ia64/offsets.h
@@ -11,7 +11,7 @@
#define PT_PTRACED_BIT 0
#define PT_TRACESYS_BIT 1
-#define IA64_TASK_SIZE 2864 /* 0xb30 */
+#define IA64_TASK_SIZE 3328 /* 0xd00 */
#define IA64_PT_REGS_SIZE 400 /* 0x190 */
#define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */
#define IA64_SIGINFO_SIZE 128 /* 0x80 */
@@ -21,9 +21,9 @@
#define IA64_TASK_SIGPENDING_OFFSET 16 /* 0x10 */
#define IA64_TASK_NEED_RESCHED_OFFSET 40 /* 0x28 */
#define IA64_TASK_PROCESSOR_OFFSET 100 /* 0x64 */
-#define IA64_TASK_THREAD_OFFSET 896 /* 0x380 */
-#define IA64_TASK_THREAD_KSP_OFFSET 896 /* 0x380 */
-#define IA64_TASK_THREAD_SIGMASK_OFFSET 2744 /* 0xab8 */
+#define IA64_TASK_THREAD_OFFSET 1424 /* 0x590 */
+#define IA64_TASK_THREAD_KSP_OFFSET 1424 /* 0x590 */
+#define IA64_TASK_THREAD_SIGMASK_OFFSET 3184 /* 0xc70 */
#define IA64_TASK_PID_OFFSET 188 /* 0xbc */
#define IA64_TASK_MM_OFFSET 88 /* 0x58 */
#define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 62881b538..c81337e07 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -102,15 +102,13 @@ typedef unsigned long pgprot_t;
#ifdef CONFIG_IA64_GENERIC
# include <asm/machvec.h>
# define virt_to_page(kaddr) (mem_map + platform_map_nr(kaddr))
-#elif defined (CONFIG_IA64_SN_SN1_SIM)
+#elif defined (CONFIG_IA64_SN_SN1)
# define virt_to_page(kaddr) (mem_map + MAP_NR_SN1(kaddr))
#else
# define virt_to_page(kaddr) (mem_map + MAP_NR_DENSE(kaddr))
#endif
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
-# endif /* __KERNEL__ */
-
typedef union ia64_va {
struct {
unsigned long off : 61; /* intra-region offset */
@@ -138,7 +136,7 @@ typedef union ia64_va {
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0)
#define PAGE_BUG(page) do { BUG(); } while (0)
-extern __inline__ int
+static __inline__ int
get_order (unsigned long size)
{
double d = size - 1;
@@ -151,6 +149,7 @@ get_order (unsigned long size)
return order;
}
+# endif /* __KERNEL__ */
#endif /* !ASSEMBLY */
#define PAGE_OFFSET 0xe000000000000000
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index d55b16253..ad3efe1ec 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -66,6 +66,7 @@
#define PAL_CACHE_PROT_INFO 38 /* get i/d cache protection info */
#define PAL_REGISTER_INFO 39 /* return AR and CR register information*/
#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
+#define PAL_PREFETCH_VISIBILITY 41
#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
@@ -644,15 +645,16 @@ struct ia64_pal_retval {
* (generally 0) MUST be passed. Reserved parameters are not optional
* parameters.
*/
-extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64);
-extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
-extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
-extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
-#define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0, a1, a2, a3)
-#define PAL_CALL_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_stacked(a0, a1, a2, a3)
-#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_static(a0, a1, a2, a3)
-#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3)
+#define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0, a1, a2, a3, 0)
+#define PAL_CALL_IC_OFF(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0, a1, a2, a3, 1)
+#define PAL_CALL_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_stacked(a0, a1, a2, a3)
+#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_static(a0, a1, a2, a3)
+#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3)
typedef int (*ia64_pal_handler) (u64, ...);
extern ia64_pal_handler ia64_pal;
@@ -706,7 +708,7 @@ typedef union pal_bus_features_u {
extern void pal_bus_features_print (u64);
/* Provide information about configurable processor bus features */
-extern inline s64
+static inline s64
ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
pal_bus_features_u_t *features_status,
pal_bus_features_u_t *features_control)
@@ -723,7 +725,7 @@ ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
}
/* Enables/disables specific processor bus features */
-extern inline s64
+static inline s64
ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
{
struct ia64_pal_retval iprv;
@@ -732,7 +734,7 @@ ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
}
/* Get detailed cache information */
-extern inline s64
+static inline s64
ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf)
{
struct ia64_pal_retval iprv;
@@ -750,7 +752,7 @@ ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_in
}
/* Get detailed cche protection information */
-extern inline s64
+static inline s64
ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot)
{
struct ia64_pal_retval iprv;
@@ -773,18 +775,18 @@ ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_
* Flush the processor instruction or data caches. *PROGRESS must be
* initialized to zero before calling this for the first time..
*/
-extern inline s64
+static inline s64
ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress);
+ PAL_CALL_IC_OFF(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress);
*progress = iprv.v1;
return iprv.status;
}
/* Initialize the processor controlled caches */
-extern inline s64
+static inline s64
ia64_pal_cache_init (u64 level, u64 cache_type, u64 restrict)
{
struct ia64_pal_retval iprv;
@@ -796,7 +798,7 @@ ia64_pal_cache_init (u64 level, u64 cache_type, u64 restrict)
* processor controlled cache to known values without the availability
* of backing memory.
*/
-extern inline s64
+static inline s64
ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
{
struct ia64_pal_retval iprv;
@@ -806,7 +808,7 @@ ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
/* Read the data and tag of a processor controlled cache line for diags */
-extern inline s64
+static inline s64
ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
{
struct ia64_pal_retval iprv;
@@ -815,7 +817,7 @@ ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
}
/* Return summary information about the heirarchy of caches controlled by the processor */
-extern inline s64
+static inline s64
ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
{
struct ia64_pal_retval iprv;
@@ -828,7 +830,7 @@ ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
}
/* Write the data and tag of a processor-controlled cache line for diags */
-extern inline s64
+static inline s64
ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data)
{
struct ia64_pal_retval iprv;
@@ -838,7 +840,7 @@ ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data
/* Return the parameters needed to copy relocatable PAL procedures from ROM to memory */
-extern inline s64
+static inline s64
ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
u64 *buffer_size, u64 *buffer_align)
{
@@ -852,7 +854,7 @@ ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
}
/* Copy relocatable PAL procedures from ROM to memory */
-extern inline s64
+static inline s64
ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc_offset)
{
struct ia64_pal_retval iprv;
@@ -863,7 +865,7 @@ ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc
}
/* Return the number of instruction and data debug register pairs */
-extern inline s64
+static inline s64
ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs)
{
struct ia64_pal_retval iprv;
@@ -878,7 +880,7 @@ ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs)
#ifdef TBD
/* Switch from IA64-system environment to IA-32 system environment */
-extern inline s64
+static inline s64
ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
{
struct ia64_pal_retval iprv;
@@ -888,7 +890,7 @@ ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
#endif
/* Get unique geographical address of this processor on its bus */
-extern inline s64
+static inline s64
ia64_pal_fixed_addr (u64 *global_unique_addr)
{
struct ia64_pal_retval iprv;
@@ -899,7 +901,7 @@ ia64_pal_fixed_addr (u64 *global_unique_addr)
}
/* Get base frequency of the platform if generated by the processor */
-extern inline s64
+static inline s64
ia64_pal_freq_base (u64 *platform_base_freq)
{
struct ia64_pal_retval iprv;
@@ -913,7 +915,7 @@ ia64_pal_freq_base (u64 *platform_base_freq)
* Get the ratios for processor frequency, bus frequency and interval timer to
* to base frequency of the platform
*/
-extern inline s64
+static inline s64
ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
struct pal_freq_ratio *itc_ratio)
{
@@ -932,7 +934,7 @@ ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *
* power states where prefetching and execution are suspended and cache and
* TLB coherency is not maintained.
*/
-extern inline s64
+static inline s64
ia64_pal_halt (u64 halt_state)
{
struct ia64_pal_retval iprv;
@@ -952,7 +954,7 @@ typedef union pal_power_mgmt_info_u {
} pal_power_mgmt_info_u_t;
/* Return information about processor's optional power management capabilities. */
-extern inline s64
+static inline s64
ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
{
struct ia64_pal_retval iprv;
@@ -963,7 +965,7 @@ ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
* suspended, but cache and TLB coherency is maintained.
*/
-extern inline s64
+static inline s64
ia64_pal_halt_light (void)
{
struct ia64_pal_retval iprv;
@@ -975,7 +977,7 @@ ia64_pal_halt_light (void)
* the error logging registers to be written. This procedure also checks the pending
* machine check bit and pending INIT bit and reports their states.
*/
-extern inline s64
+static inline s64
ia64_pal_mc_clear_log (u64 *pending_vector)
{
struct ia64_pal_retval iprv;
@@ -988,7 +990,7 @@ ia64_pal_mc_clear_log (u64 *pending_vector)
/* Ensure that all outstanding transactions in a processor are completed or that any
* MCA due to thes outstanding transaction is taken.
*/
-extern inline s64
+static inline s64
ia64_pal_mc_drain (void)
{
struct ia64_pal_retval iprv;
@@ -997,7 +999,7 @@ ia64_pal_mc_drain (void)
}
/* Return the machine check dynamic processor state */
-extern inline s64
+static inline s64
ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
{
struct ia64_pal_retval iprv;
@@ -1010,7 +1012,7 @@ ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
}
/* Return processor machine check information */
-extern inline s64
+static inline s64
ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_info)
{
struct ia64_pal_retval iprv;
@@ -1025,7 +1027,7 @@ ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_in
/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot
* attempt to correct any expected machine checks.
*/
-extern inline s64
+static inline s64
ia64_pal_mc_expected (u64 expected, u64 *previous)
{
struct ia64_pal_retval iprv;
@@ -1039,7 +1041,7 @@ ia64_pal_mc_expected (u64 expected, u64 *previous)
* minimal processor state in the event of a machine check or initialization
* event.
*/
-extern inline s64
+static inline s64
ia64_pal_mc_register_mem (u64 physical_addr)
{
struct ia64_pal_retval iprv;
@@ -1050,7 +1052,7 @@ ia64_pal_mc_register_mem (u64 physical_addr)
/* Restore minimal architectural processor state, set CMC interrupt if necessary
* and resume execution
*/
-extern inline s64
+static inline s64
ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
{
struct ia64_pal_retval iprv;
@@ -1059,7 +1061,7 @@ ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
}
/* Return the memory attributes implemented by the processor */
-extern inline s64
+static inline s64
ia64_pal_mem_attrib (u64 *mem_attrib)
{
struct ia64_pal_retval iprv;
@@ -1072,7 +1074,7 @@ ia64_pal_mem_attrib (u64 *mem_attrib)
/* Return the amount of memory needed for second phase of processor
* self-test and the required alignment of memory.
*/
-extern inline s64
+static inline s64
ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
{
struct ia64_pal_retval iprv;
@@ -1098,7 +1100,7 @@ typedef union pal_perf_mon_info_u {
/* Return the performance monitor information about what can be counted
* and how to configure the monitors to count the desired events.
*/
-extern inline s64
+static inline s64
ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
{
struct ia64_pal_retval iprv;
@@ -1111,7 +1113,7 @@ ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
/* Specifies the physical address of the processor interrupt block
* and I/O port space.
*/
-extern inline s64
+static inline s64
ia64_pal_platform_addr (u64 type, u64 physical_addr)
{
struct ia64_pal_retval iprv;
@@ -1120,7 +1122,7 @@ ia64_pal_platform_addr (u64 type, u64 physical_addr)
}
/* Set the SAL PMI entrypoint in memory */
-extern inline s64
+static inline s64
ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
{
struct ia64_pal_retval iprv;
@@ -1130,7 +1132,7 @@ ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
struct pal_features_s;
/* Provide information about configurable processor features */
-extern inline s64
+static inline s64
ia64_pal_proc_get_features (u64 *features_avail,
u64 *features_status,
u64 *features_control)
@@ -1146,7 +1148,7 @@ ia64_pal_proc_get_features (u64 *features_avail,
}
/* Enable/disable processor dependent features */
-extern inline s64
+static inline s64
ia64_pal_proc_set_features (u64 feature_select)
{
struct ia64_pal_retval iprv;
@@ -1167,7 +1169,7 @@ typedef struct ia64_ptce_info_s {
/* Return the information required for the architected loop used to purge
* (initialize) the entire TC
*/
-extern inline s64
+static inline s64
ia64_get_ptce (ia64_ptce_info_t *ptce)
{
struct ia64_pal_retval iprv;
@@ -1187,7 +1189,7 @@ ia64_get_ptce (ia64_ptce_info_t *ptce)
}
/* Return info about implemented application and control registers. */
-extern inline s64
+static inline s64
ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
{
struct ia64_pal_retval iprv;
@@ -1211,7 +1213,7 @@ typedef union pal_hints_u {
/* Return information about the register stack and RSE for this processor
* implementation.
*/
-extern inline s64
+static inline s64
ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
{
struct ia64_pal_retval iprv;
@@ -1227,7 +1229,7 @@ ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
* suspended, but cause cache and TLB coherency to be maintained.
* This is usually called in IA-32 mode.
*/
-extern inline s64
+static inline s64
ia64_pal_shutdown (void)
{
struct ia64_pal_retval iprv;
@@ -1236,7 +1238,7 @@ ia64_pal_shutdown (void)
}
/* Perform the second phase of processor self-test. */
-extern inline s64
+static inline s64
ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 *self_test_state)
{
struct ia64_pal_retval iprv;
@@ -1261,7 +1263,7 @@ typedef union pal_version_u {
/* Return PAL version information */
-extern inline s64
+static inline s64
ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
{
struct ia64_pal_retval iprv;
@@ -1299,7 +1301,7 @@ typedef union pal_tc_info_u {
/* Return information about the virtual memory characteristics of the processor
* implementation.
*/
-extern inline s64
+static inline s64
ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_pages)
{
struct ia64_pal_retval iprv;
@@ -1314,7 +1316,7 @@ ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_
/* Get page size information about the virtual memory characteristics of the processor
* implementation.
*/
-extern inline s64
+static inline s64
ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages)
{
struct ia64_pal_retval iprv;
@@ -1353,7 +1355,7 @@ typedef union pal_vm_info_2_u {
/* Get summary information about the virtual memory characteristics of the processor
* implementation.
*/
-extern inline s64
+static inline s64
ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
{
struct ia64_pal_retval iprv;
@@ -1377,7 +1379,7 @@ typedef union pal_itr_valid_u {
} pal_tr_valid_u_t;
/* Read a translation register */
-extern inline s64
+static inline s64
ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr_valid)
{
struct ia64_pal_retval iprv;
@@ -1387,6 +1389,14 @@ ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr
return iprv.status;
}
+static inline s64
+ia64_pal_prefetch_visibility (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, 0, 0, 0);
+ return iprv.status;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_PAL_H */
diff --git a/include/asm-ia64/param.h b/include/asm-ia64/param.h
index a410b8892..e8920ea76 100644
--- a/include/asm-ia64/param.h
+++ b/include/asm-ia64/param.h
@@ -15,7 +15,7 @@
* Yeah, simulating stuff is slow, so let us catch some breath between
* timer interrupts...
*/
-# define HZ 20
+# define HZ 32
#else
# define HZ 1024
#endif
@@ -36,4 +36,8 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
+#ifdef __KERNEL__
+# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
+#endif
+
#endif /* _ASM_IA64_PARAM_H */
diff --git a/include/asm-ia64/parport.h b/include/asm-ia64/parport.h
new file mode 100644
index 000000000..67e16adfc
--- /dev/null
+++ b/include/asm-ia64/parport.h
@@ -0,0 +1,20 @@
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_IA64_PARPORT_H
+#define _ASM_IA64_PARPORT_H 1
+
+static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
+
+static int __devinit
+parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+ return parport_pc_find_isa_ports(autoirq, autodma);
+}
+
+#endif /* _ASM_IA64_PARPORT_H */
diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h
index 01c038774..652aaffca 100644
--- a/include/asm-ia64/pci.h
+++ b/include/asm-ia64/pci.h
@@ -21,12 +21,12 @@
struct pci_dev;
-extern inline void pcibios_set_master(struct pci_dev *dev)
+static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
-extern inline void pcibios_penalize_isa_irq(int irq)
+static inline void pcibios_penalize_isa_irq(int irq)
{
/* We don't do dynamic PCI IRQ allocation */
}
@@ -127,7 +127,7 @@ extern void pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int n
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
*/
-extern inline int
+static inline int
pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
{
return 1;
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 7c35de4bc..5256a4fff 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -32,7 +32,7 @@
#define pte_quicklist (my_cpu_data.pte_quick)
#define pgtable_cache_size (my_cpu_data.pgtable_cache_sz)
-extern __inline__ pgd_t*
+static __inline__ pgd_t*
get_pgd_slow (void)
{
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
@@ -41,7 +41,7 @@ get_pgd_slow (void)
return ret;
}
-extern __inline__ pgd_t*
+static __inline__ pgd_t*
get_pgd_fast (void)
{
unsigned long *ret = pgd_quicklist;
@@ -54,7 +54,7 @@ get_pgd_fast (void)
return (pgd_t *)ret;
}
-extern __inline__ pgd_t*
+static __inline__ pgd_t*
pgd_alloc (void)
{
pgd_t *pgd;
@@ -65,7 +65,7 @@ pgd_alloc (void)
return pgd;
}
-extern __inline__ void
+static __inline__ void
free_pgd_fast (pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
@@ -73,7 +73,7 @@ free_pgd_fast (pgd_t *pgd)
++pgtable_cache_size;
}
-extern __inline__ pmd_t *
+static __inline__ pmd_t *
get_pmd_slow (void)
{
pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
@@ -83,7 +83,7 @@ get_pmd_slow (void)
return pmd;
}
-extern __inline__ pmd_t *
+static __inline__ pmd_t *
get_pmd_fast (void)
{
unsigned long *ret = (unsigned long *)pmd_quicklist;
@@ -96,7 +96,7 @@ get_pmd_fast (void)
return (pmd_t *)ret;
}
-extern __inline__ void
+static __inline__ void
free_pmd_fast (pmd_t *pmd)
{
*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
@@ -104,7 +104,7 @@ free_pmd_fast (pmd_t *pmd)
++pgtable_cache_size;
}
-extern __inline__ void
+static __inline__ void
free_pmd_slow (pmd_t *pmd)
{
free_page((unsigned long)pmd);
@@ -112,7 +112,7 @@ free_pmd_slow (pmd_t *pmd)
extern pte_t *get_pte_slow (pmd_t *pmd, unsigned long address_preadjusted);
-extern __inline__ pte_t *
+static __inline__ pte_t *
get_pte_fast (void)
{
unsigned long *ret = (unsigned long *)pte_quicklist;
@@ -125,7 +125,7 @@ get_pte_fast (void)
return (pte_t *)ret;
}
-extern __inline__ void
+static __inline__ void
free_pte_fast (pte_t *pte)
{
*(unsigned long *)pte = (unsigned long) pte_quicklist;
@@ -142,7 +142,7 @@ free_pte_fast (pte_t *pte)
extern void __handle_bad_pgd (pgd_t *pgd);
extern void __handle_bad_pmd (pmd_t *pmd);
-extern __inline__ pte_t*
+static __inline__ pte_t*
pte_alloc (pmd_t *pmd, unsigned long vmaddr)
{
unsigned long offset;
@@ -163,7 +163,7 @@ pte_alloc (pmd_t *pmd, unsigned long vmaddr)
return (pte_t *) pmd_page(*pmd) + offset;
}
-extern __inline__ pmd_t*
+static __inline__ pmd_t*
pmd_alloc (pgd_t *pgd, unsigned long vmaddr)
{
unsigned long offset;
@@ -228,7 +228,7 @@ extern spinlock_t ptcg_lock;
/*
* Flush a specified user mapping
*/
-extern __inline__ void
+static __inline__ void
flush_tlb_mm (struct mm_struct *mm)
{
if (mm) {
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index fcf340ee0..6fbed9cc1 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -3,9 +3,9 @@
/*
* This file contains the functions and defines necessary to modify and use
- * the ia-64 page table tree.
+ * the IA-64 page table tree.
*
- * This hopefully works with any (fixed) ia-64 page-size, as defined
+ * This hopefully works with any (fixed) IA-64 page-size, as defined
* in <asm/page.h> (currently 8192).
*
* Copyright (C) 1998-2000 Hewlett-Packard Co
@@ -19,12 +19,6 @@
#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
-/* Is ADDR a valid kernel address? */
-#define kern_addr_valid(addr) ((addr) >= TASK_SIZE)
-
-/* Is ADDR a valid physical address? */
-#define phys_addr_valid(addr) (((addr) & my_cpu_data.unimpl_pa_mask) == 0)
-
/*
* First, define the various bits in a PTE. Note that the PTE format
* matches the VHPT short format, the firt doubleword of the VHPD long
@@ -166,7 +160,7 @@
* Given a pointer to an mem_map[] entry, return the kernel virtual
* address corresponding to that page.
*/
-#define page_address(page) ((void *) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)))
+#define page_address(page) ((page)->virtual)
/*
* Now for some cache flushing routines. This is the kind of stuff
@@ -190,6 +184,28 @@ do { \
ia64_flush_icache_page((unsigned long) page_address(pg)); \
} while (0)
+/* Quick test to see if ADDR is a (potentially) valid physical address. */
+static __inline__ long
+ia64_phys_addr_valid (unsigned long addr)
+{
+ return (addr & (my_cpu_data.unimpl_pa_mask)) == 0;
+}
+
+/*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory. For the return value to be meaningful, ADDR must be >=
+ * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
+ * require a hash-, or multi-level tree-lookup or something of that
+ * sort) but it guarantees to return TRUE only if accessing the page
+ * at that address does not cause an error. Note that there may be
+ * addresses for which kern_addr_valid() returns FALSE even though an
+ * access would not cause an error (e.g., this is typically true for
+ * memory mapped I/O regions.
+ *
+ * XXX Need to implement this for IA-64.
+ */
+#define kern_addr_valid(addr) (1)
+
/*
* Now come the defines and routines to manage and access the three-level
* page table.
@@ -248,14 +264,14 @@ extern pmd_t *ia64_bad_pagetable (void);
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = __pa(ptep))
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (!phys_addr_valid(pmd_val(pmd)))
+#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = __pa(pmdp))
#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_bad(pgd) (!phys_addr_valid(pgd_val(pgd)))
+#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
@@ -301,7 +317,7 @@ extern pmd_t *ia64_bad_pagetable (void);
/*
* Return the region index for virtual address ADDRESS.
*/
-extern __inline__ unsigned long
+static __inline__ unsigned long
rgn_index (unsigned long address)
{
ia64_va a;
@@ -313,7 +329,7 @@ rgn_index (unsigned long address)
/*
* Return the region offset for virtual address ADDRESS.
*/
-extern __inline__ unsigned long
+static __inline__ unsigned long
rgn_offset (unsigned long address)
{
ia64_va a;
@@ -325,7 +341,7 @@ rgn_offset (unsigned long address)
#define RGN_SIZE (1UL << 61)
#define RGN_KERNEL 7
-extern __inline__ unsigned long
+static __inline__ unsigned long
pgd_index (unsigned long address)
{
unsigned long region = address >> 61;
@@ -336,7 +352,7 @@ pgd_index (unsigned long address)
/* The offset in the 1-level directory is given by the 3 region bits
(61..63) and the seven level-1 bits (33-39). */
-extern __inline__ pgd_t*
+static __inline__ pgd_t*
pgd_offset (struct mm_struct *mm, unsigned long address)
{
return mm->pgd + pgd_index(address);
@@ -409,9 +425,6 @@ do { \
#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define module_map vmalloc
-#define module_unmap vfree
-
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define PageSkip(page) (0)
@@ -421,9 +434,11 @@ do { \
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[1024];
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+#include <asm-generic/pgtable.h>
+
# endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index c37fc76b1..37a8c2ba4 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -253,9 +253,9 @@ struct cpuinfo_ia64 {
#define my_cpu_data cpu_data[smp_processor_id()]
#ifdef CONFIG_SMP
-# define loops_per_sec() my_cpu_data.loops_per_sec
+# define ia64_loops_per_sec() my_cpu_data.loops_per_sec
#else
-# define loops_per_sec() loops_per_sec
+# define ia64_loops_per_sec() loops_per_sec
#endif
extern struct cpuinfo_ia64 cpu_data[NR_CPUS];
@@ -305,10 +305,11 @@ struct thread_struct {
__u64 csd; /* IA32 code selector descriptor */
__u64 ssd; /* IA32 stack selector descriptor */
__u64 tssd; /* IA32 TSS descriptor */
+ __u64 old_iob; /* old IOBase value */
union {
__u64 sigmask; /* aligned mask for sigsuspend scall */
} un;
-# define INIT_THREAD_IA32 , 0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, {0}
+# define INIT_THREAD_IA32 , 0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, 0, {0}
#else
# define INIT_THREAD_IA32
#endif /* CONFIG_IA32_SUPPORT */
@@ -334,6 +335,8 @@ struct thread_struct {
#define start_thread(regs,new_ip,new_sp) do { \
set_fs(USER_DS); \
+ ia64_psr(regs)->dfh = 1; /* disable fph */ \
+ ia64_psr(regs)->mfh = 0; /* clear mfh */ \
ia64_psr(regs)->cpl = 3; /* set user mode */ \
ia64_psr(regs)->ri = 0; /* clear return slot number */ \
ia64_psr(regs)->is = 0; /* IA-64 instruction set */ \
@@ -390,6 +393,8 @@ extern unsigned long get_wchan (struct task_struct *p);
/* Return stack pointer of blocked task TSK. */
#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
+#ifndef CONFIG_SMP
+
static inline struct task_struct *
ia64_get_fpu_owner (void)
{
@@ -404,6 +409,8 @@ ia64_set_fpu_owner (struct task_struct *t)
__asm__ __volatile__ ("mov ar.k5=%0" :: "r"(t));
}
+#endif /* !CONFIG_SMP */
+
extern void __ia64_init_fpu (void);
extern void __ia64_save_fpu (struct ia64_fpreg *fph);
extern void __ia64_load_fpu (struct ia64_fpreg *fph);
@@ -447,31 +454,31 @@ ia64_load_fpu (struct ia64_fpreg *fph) {
ia64_fph_disable();
}
-extern inline void
+static inline void
ia64_fc (void *addr)
{
__asm__ __volatile__ ("fc %0" :: "r"(addr) : "memory");
}
-extern inline void
+static inline void
ia64_sync_i (void)
{
__asm__ __volatile__ (";; sync.i" ::: "memory");
}
-extern inline void
+static inline void
ia64_srlz_i (void)
{
__asm__ __volatile__ (";; srlz.i ;;" ::: "memory");
}
-extern inline void
+static inline void
ia64_srlz_d (void)
{
__asm__ __volatile__ (";; srlz.d" ::: "memory");
}
-extern inline __u64
+static inline __u64
ia64_get_rr (__u64 reg_bits)
{
__u64 r;
@@ -479,13 +486,13 @@ ia64_get_rr (__u64 reg_bits)
return r;
}
-extern inline void
+static inline void
ia64_set_rr (__u64 reg_bits, __u64 rr_val)
{
__asm__ __volatile__ ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory");
}
-extern inline __u64
+static inline __u64
ia64_get_dcr (void)
{
__u64 r;
@@ -493,14 +500,14 @@ ia64_get_dcr (void)
return r;
}
-extern inline void
+static inline void
ia64_set_dcr (__u64 val)
{
__asm__ __volatile__ ("mov cr.dcr=%0;;" :: "r"(val) : "memory");
ia64_srlz_d();
}
-extern inline __u64
+static inline __u64
ia64_get_lid (void)
{
__u64 r;
@@ -508,7 +515,7 @@ ia64_get_lid (void)
return r;
}
-extern inline void
+static inline void
ia64_invala (void)
{
__asm__ __volatile__ ("invala" ::: "memory");
@@ -526,7 +533,7 @@ ia64_invala (void)
* Insert a translation into an instruction and/or data translation
* register.
*/
-extern inline void
+static inline void
ia64_itr (__u64 target_mask, __u64 tr_num,
__u64 vmaddr, __u64 pte,
__u64 log_page_size)
@@ -545,7 +552,7 @@ ia64_itr (__u64 target_mask, __u64 tr_num,
* Insert a translation into the instruction and/or data translation
* cache.
*/
-extern inline void
+static inline void
ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
__u64 log_page_size)
{
@@ -562,7 +569,7 @@ ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
* Purge a range of addresses from instruction and/or data translation
* register(s).
*/
-extern inline void
+static inline void
ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
{
if (target_mask & 0x1)
@@ -572,21 +579,21 @@ ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
}
/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
-extern inline void
+static inline void
ia64_set_iva (void *ivt_addr)
{
__asm__ __volatile__ ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory");
}
/* Set the page table address and control bits. */
-extern inline void
+static inline void
ia64_set_pta (__u64 pta)
{
/* Note: srlz.i implies srlz.d */
__asm__ __volatile__ ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory");
}
-extern inline __u64
+static inline __u64
ia64_get_cpuid (__u64 regnum)
{
__u64 r;
@@ -595,13 +602,13 @@ ia64_get_cpuid (__u64 regnum)
return r;
}
-extern inline void
+static inline void
ia64_eoi (void)
{
__asm__ ("mov cr.eoi=r0;; srlz.d;;" ::: "memory");
}
-extern __inline__ void
+static inline void
ia64_set_lrr0 (__u8 vector, __u8 masked)
{
if (masked > 1)
@@ -612,7 +619,7 @@ ia64_set_lrr0 (__u8 vector, __u8 masked)
}
-extern __inline__ void
+static inline void
ia64_set_lrr1 (__u8 vector, __u8 masked)
{
if (masked > 1)
@@ -622,13 +629,13 @@ ia64_set_lrr1 (__u8 vector, __u8 masked)
:: "r"((masked << 16) | vector) : "memory");
}
-extern __inline__ void
+static inline void
ia64_set_pmv (__u64 val)
{
__asm__ __volatile__ ("mov cr.pmv=%0" :: "r"(val) : "memory");
}
-extern __inline__ __u64
+static inline __u64
ia64_get_pmc (__u64 regnum)
{
__u64 retval;
@@ -637,13 +644,13 @@ ia64_get_pmc (__u64 regnum)
return retval;
}
-extern __inline__ void
+static inline void
ia64_set_pmc (__u64 regnum, __u64 value)
{
__asm__ __volatile__ ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value));
}
-extern __inline__ __u64
+static inline __u64
ia64_get_pmd (__u64 regnum)
{
__u64 retval;
@@ -652,7 +659,7 @@ ia64_get_pmd (__u64 regnum)
return retval;
}
-extern __inline__ void
+static inline void
ia64_set_pmd (__u64 regnum, __u64 value)
{
__asm__ __volatile__ ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value));
@@ -662,7 +669,7 @@ ia64_set_pmd (__u64 regnum, __u64 value)
* Given the address to which a spill occurred, return the unat bit
* number that corresponds to this address.
*/
-extern inline __u64
+static inline __u64
ia64_unat_pos (void *spill_addr)
{
return ((__u64) spill_addr >> 3) & 0x3f;
@@ -672,7 +679,7 @@ ia64_unat_pos (void *spill_addr)
* Set the NaT bit of an integer register which was spilled at address
* SPILL_ADDR. UNAT is the mask to be updated.
*/
-extern inline void
+static inline void
ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
{
__u64 bit = ia64_unat_pos(spill_addr);
@@ -685,7 +692,7 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
* Return saved PC of a blocked thread.
* Note that the only way T can block is through a call to schedule() -> switch_to().
*/
-extern inline unsigned long
+static inline unsigned long
thread_saved_pc (struct thread_struct *t)
{
struct unw_frame_info info;
@@ -720,7 +727,7 @@ thread_saved_pc (struct thread_struct *t)
/*
* Set the correctable machine check vector register
*/
-extern __inline__ void
+static inline void
ia64_set_cmcv (__u64 val)
{
__asm__ __volatile__ ("mov cr.cmcv=%0" :: "r"(val) : "memory");
@@ -729,7 +736,7 @@ ia64_set_cmcv (__u64 val)
/*
* Read the correctable machine check vector register
*/
-extern __inline__ __u64
+static inline __u64
ia64_get_cmcv (void)
{
__u64 val;
@@ -738,7 +745,7 @@ ia64_get_cmcv (void)
return val;
}
-extern inline __u64
+static inline __u64
ia64_get_ivr (void)
{
__u64 r;
@@ -746,13 +753,13 @@ ia64_get_ivr (void)
return r;
}
-extern inline void
+static inline void
ia64_set_tpr (__u64 val)
{
__asm__ __volatile__ ("mov cr.tpr=%0" :: "r"(val));
}
-extern inline __u64
+static inline __u64
ia64_get_tpr (void)
{
__u64 r;
@@ -760,71 +767,75 @@ ia64_get_tpr (void)
return r;
}
-extern __inline__ void
+static inline void
ia64_set_irr0 (__u64 val)
{
__asm__ __volatile__("mov cr.irr0=%0;;" :: "r"(val) : "memory");
ia64_srlz_d();
}
-extern __inline__ __u64
+static inline __u64
ia64_get_irr0 (void)
{
__u64 val;
- __asm__ ("mov %0=cr.irr0" : "=r"(val));
+ /* this is volatile because irr may change unbeknownst to gcc... */
+ __asm__ __volatile__("mov %0=cr.irr0" : "=r"(val));
return val;
}
-extern __inline__ void
+static inline void
ia64_set_irr1 (__u64 val)
{
__asm__ __volatile__("mov cr.irr1=%0;;" :: "r"(val) : "memory");
ia64_srlz_d();
}
-extern __inline__ __u64
+static inline __u64
ia64_get_irr1 (void)
{
__u64 val;
- __asm__ ("mov %0=cr.irr1" : "=r"(val));
+ /* this is volatile because irr may change unbeknownst to gcc... */
+ __asm__ __volatile__("mov %0=cr.irr1" : "=r"(val));
return val;
}
-extern __inline__ void
+static inline void
ia64_set_irr2 (__u64 val)
{
__asm__ __volatile__("mov cr.irr2=%0;;" :: "r"(val) : "memory");
ia64_srlz_d();
}
-extern __inline__ __u64
+static inline __u64
ia64_get_irr2 (void)
{
__u64 val;
- __asm__ ("mov %0=cr.irr2" : "=r"(val));
+ /* this is volatile because irr may change unbeknownst to gcc... */
+ __asm__ __volatile__("mov %0=cr.irr2" : "=r"(val));
return val;
}
-extern __inline__ void
+static inline void
ia64_set_irr3 (__u64 val)
{
__asm__ __volatile__("mov cr.irr3=%0;;" :: "r"(val) : "memory");
ia64_srlz_d();
}
-extern __inline__ __u64
+static inline __u64
ia64_get_irr3 (void)
{
__u64 val;
- __asm__ ("mov %0=cr.irr3" : "=r"(val));
+ /* this is volatile because irr may change unbeknownst to gcc... */
+ __asm__ __volatile__("mov %0=cr.irr3" : "=r"(val));
return val;
}
-extern __inline__ __u64
+static inline __u64
ia64_get_gp(void)
{
__u64 val;
@@ -852,7 +863,7 @@ ia64_get_gp(void)
#define ia64_rotl(w,n) ia64_rotr((w),(64)-(n))
-extern __inline__ __u64
+static inline __u64
ia64_thash (__u64 addr)
{
__u64 result;
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h
index b71acee5f..68ebe286e 100644
--- a/include/asm-ia64/ptrace.h
+++ b/include/asm-ia64/ptrace.h
@@ -219,6 +219,7 @@ struct switch_stack {
extern void show_regs (struct pt_regs *);
extern long ia64_peek (struct pt_regs *, struct task_struct *, unsigned long addr, long *val);
extern long ia64_poke (struct pt_regs *, struct task_struct *, unsigned long addr, long val);
+ extern void ia64_flush_fph (struct task_struct *t);
extern void ia64_sync_fph (struct task_struct *t);
#ifdef CONFIG_IA64_NEW_UNWIND
diff --git a/include/asm-ia64/ptrace_offsets.h b/include/asm-ia64/ptrace_offsets.h
index b32b6c89d..6fa6fb446 100644
--- a/include/asm-ia64/ptrace_offsets.h
+++ b/include/asm-ia64/ptrace_offsets.h
@@ -17,6 +17,8 @@
* unsigned long dbr[8];
* unsigned long rsvd2[504];
* unsigned long ibr[8];
+ * unsigned long rsvd3[504];
+ * unsigned long pmd[4];
* }
*/
@@ -157,6 +159,7 @@
#define PT_B4 0x07f0
#define PT_B5 0x07f8
+#define PT_AR_EC 0x0800
#define PT_AR_LC 0x0808
/* pt_regs */
@@ -209,5 +212,6 @@
#define PT_DBR 0x2000 /* data breakpoint registers */
#define PT_IBR 0x3000 /* instruction breakpoint registers */
+#define PT_PMD 0x4000 /* performance monitoring counters */
#endif /* _ASM_IA64_PTRACE_OFFSETS_H */
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index 06096644b..9f02ac571 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -17,6 +17,7 @@
*/
#include <linux/config.h>
+#include <linux/spinlock.h>
#include <asm/pal.h>
#include <asm/system.h>
@@ -158,12 +159,22 @@ struct ia64_sal_desc_tr {
char reserved2[8];
};
-struct ia64_sal_desc_ptc {
+typedef struct ia64_sal_desc_ptc {
char type;
char reserved1[3];
unsigned int num_domains; /* # of coherence domains */
- long domain_info; /* physical address of domain info table */
-};
+ s64 domain_info; /* physical address of domain info table */
+} ia64_sal_desc_ptc_t;
+
+typedef struct ia64_sal_ptc_domain_info {
+ unsigned long proc_count; /* number of processors in domain */
+ long proc_list; /* physical address of LID array */
+} ia64_sal_ptc_domain_info_t;
+
+typedef struct ia64_sal_ptc_domain_proc_entry {
+ unsigned char id; /* id of processor */
+ unsigned char eid; /* eid of processor */
+} ia64_sal_ptc_domain_proc_entry_t;
#define IA64_SAL_AP_EXTERNAL_INT 0
@@ -175,6 +186,7 @@ struct ia64_sal_desc_ap_wakeup {
};
extern ia64_sal_handler ia64_sal;
+extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
extern const char *ia64_sal_strerror (long status);
extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
@@ -387,7 +399,7 @@ typedef struct psilog
* Now define a couple of inline functions for improved type checking
* and convenience.
*/
-extern inline long
+static inline long
ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
unsigned long *drift_info)
{
@@ -400,7 +412,7 @@ ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
}
/* Flush all the processor and platform level instruction and/or data caches */
-extern inline s64
+static inline s64
ia64_sal_cache_flush (u64 cache_type)
{
struct ia64_sal_retval isrv;
@@ -411,7 +423,7 @@ ia64_sal_cache_flush (u64 cache_type)
/* Initialize all the processor and platform level instruction and data caches */
-extern inline s64
+static inline s64
ia64_sal_cache_init (void)
{
struct ia64_sal_retval isrv;
@@ -422,7 +434,7 @@ ia64_sal_cache_init (void)
/* Clear the processor and platform information logged by SAL with respect to the
* machine state at the time of MCA's, INITs or CMCs
*/
-extern inline s64
+static inline s64
ia64_sal_clear_state_info (u64 sal_info_type, u64 sal_info_sub_type)
{
struct ia64_sal_retval isrv;
@@ -434,7 +446,7 @@ ia64_sal_clear_state_info (u64 sal_info_type, u64 sal_info_sub_type)
/* Get the processor and platform information logged by SAL with respect to the machine
* state at the time of the MCAs, INITs or CMCs.
*/
-extern inline u64
+static inline u64
ia64_sal_get_state_info (u64 sal_info_type, u64 sal_info_sub_type, u64 *sal_info)
{
struct ia64_sal_retval isrv;
@@ -446,7 +458,7 @@ ia64_sal_get_state_info (u64 sal_info_type, u64 sal_info_sub_type, u64 *sal_info
/* Get the maximum size of the information logged by SAL with respect to the machine
* state at the time of MCAs, INITs or CMCs
*/
-extern inline u64
+static inline u64
ia64_sal_get_state_info_size (u64 sal_info_type, u64 sal_info_sub_type)
{
struct ia64_sal_retval isrv;
@@ -459,7 +471,7 @@ ia64_sal_get_state_info_size (u64 sal_info_type, u64 sal_info_sub_type)
/* Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup
* from the monarch processor.
*/
-extern inline s64
+static inline s64
ia64_sal_mc_rendez (void)
{
struct ia64_sal_retval isrv;
@@ -471,7 +483,7 @@ ia64_sal_mc_rendez (void)
* the machine check rendezvous sequence as well as the mechanism to wake up the
* non-monarch processor at the end of machine check processing.
*/
-extern inline s64
+static inline s64
ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout)
{
struct ia64_sal_retval isrv;
@@ -480,7 +492,7 @@ ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout)
}
/* Read from PCI configuration space */
-extern inline s64
+static inline s64
ia64_sal_pci_config_read (u64 pci_config_addr, u64 size, u64 *value)
{
struct ia64_sal_retval isrv;
@@ -503,7 +515,7 @@ ia64_sal_pci_config_read (u64 pci_config_addr, u64 size, u64 *value)
}
/* Write to PCI configuration space */
-extern inline s64
+static inline s64
ia64_sal_pci_config_write (u64 pci_config_addr, u64 size, u64 value)
{
struct ia64_sal_retval isrv;
@@ -527,7 +539,7 @@ ia64_sal_pci_config_write (u64 pci_config_addr, u64 size, u64 value)
* Register physical addresses of locations needed by SAL when SAL
* procedures are invoked in virtual mode.
*/
-extern inline s64
+static inline s64
ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
{
struct ia64_sal_retval isrv;
@@ -539,7 +551,7 @@ ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
* or entry points where SAL will pass control for the specified event. These event
* handlers are for the bott rendezvous, MCAs and INIT scenarios.
*/
-extern inline s64
+static inline s64
ia64_sal_set_vectors (u64 vector_type,
u64 handler_addr1, u64 gp1, u64 handler_len1,
u64 handler_addr2, u64 gp2, u64 handler_len2)
@@ -552,7 +564,7 @@ ia64_sal_set_vectors (u64 vector_type,
return isrv.status;
}
/* Update the contents of PAL block in the non-volatile storage device */
-extern inline s64
+static inline s64
ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
u64 *error_code, u64 *scratch_buf_size_needed)
{
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index c42aff9ad..f829713bf 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -39,7 +39,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
-extern inline void
+static inline void
sema_init (struct semaphore *sem, int val)
{
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
@@ -68,7 +68,7 @@ extern spinlock_t semaphore_wake_lock;
* Atomically decrement the semaphore's count. If it goes negative,
* block the calling thread in the TASK_UNINTERRUPTIBLE state.
*/
-extern inline void
+static inline void
down (struct semaphore *sem)
{
#if WAITQUEUE_DEBUG
@@ -82,7 +82,7 @@ down (struct semaphore *sem)
* Atomically decrement the semaphore's count. If it goes negative,
* block the calling thread in the TASK_INTERRUPTIBLE state.
*/
-extern inline int
+static inline int
down_interruptible (struct semaphore * sem)
{
int ret = 0;
@@ -95,7 +95,7 @@ down_interruptible (struct semaphore * sem)
return ret;
}
-extern inline int
+static inline int
down_trylock (struct semaphore *sem)
{
int ret = 0;
@@ -108,7 +108,7 @@ down_trylock (struct semaphore *sem)
return ret;
}
-extern inline void
+static inline void
up (struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
@@ -181,7 +181,7 @@ extern void __down_read_failed (struct rw_semaphore *sem, long count);
extern void __down_write_failed (struct rw_semaphore *sem, long count);
extern void __rwsem_wake (struct rw_semaphore *sem, long count);
-extern inline void
+static inline void
init_rwsem (struct rw_semaphore *sem)
{
sem->count = RW_LOCK_BIAS;
@@ -196,7 +196,7 @@ init_rwsem (struct rw_semaphore *sem)
#endif
}
-extern inline void
+static inline void
down_read (struct rw_semaphore *sem)
{
long count;
@@ -218,7 +218,7 @@ down_read (struct rw_semaphore *sem)
#endif
}
-extern inline void
+static inline void
down_write (struct rw_semaphore *sem)
{
long old_count, new_count;
@@ -252,7 +252,7 @@ down_write (struct rw_semaphore *sem)
* case is when there was a writer waiting, and we've
* bumped the count to 0: we must wake the writer up.
*/
-extern inline void
+static inline void
__up_read (struct rw_semaphore *sem)
{
long count;
@@ -271,7 +271,7 @@ __up_read (struct rw_semaphore *sem)
* Releasing the writer is easy -- just release it and
* wake up any sleepers.
*/
-extern inline void
+static inline void
__up_write (struct rw_semaphore *sem)
{
long old_count, new_count;
@@ -290,7 +290,7 @@ __up_write (struct rw_semaphore *sem)
__rwsem_wake(sem, new_count);
}
-extern inline void
+static inline void
up_read (struct rw_semaphore *sem)
{
#if WAITQUEUE_DEBUG
@@ -303,7 +303,7 @@ up_read (struct rw_semaphore *sem)
__up_read(sem);
}
-extern inline void
+static inline void
up_write (struct rw_semaphore *sem)
{
#if WAITQUEUE_DEBUG
diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h
index a54312e12..dfafe187d 100644
--- a/include/asm-ia64/siginfo.h
+++ b/include/asm-ia64/siginfo.h
@@ -235,7 +235,8 @@ typedef struct sigevent {
#ifdef __KERNEL__
#include <linux/string.h>
-extern inline void copy_siginfo(siginfo_t *to, siginfo_t *from)
+static inline void
+copy_siginfo (siginfo_t *to, siginfo_t *from)
{
if (from->si_code < 0)
memcpy(to, from, sizeof(siginfo_t));
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 0788865fc..156a1fbf2 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -25,54 +25,71 @@
#define smp_processor_id() (current->processor)
-struct smp_boot_data {
+extern struct smp_boot_data {
int cpu_count;
- int cpu_map[NR_CPUS];
-};
+ int cpu_phys_id[NR_CPUS];
+} smp_boot_data __initdata;
extern unsigned long cpu_present_map;
extern unsigned long cpu_online_map;
extern unsigned long ipi_base_addr;
extern int bootstrap_processor;
-extern volatile int __cpu_number_map[NR_CPUS];
-extern volatile int __cpu_logical_map[NR_CPUS];
+extern volatile int __cpu_physical_id[NR_CPUS];
extern unsigned char smp_int_redirect;
extern char no_int_routing;
-
-#define cpu_number_map(i) __cpu_number_map[i]
-#define cpu_logical_map(i) __cpu_logical_map[i]
+extern int smp_num_cpus;
+
+#define cpu_physical_id(i) __cpu_physical_id[i]
+#define cpu_number_map(i) (i)
+#define cpu_logical_map(i) (i)
extern unsigned long ap_wakeup_vector;
/*
+ * Function to map hard smp processor id to logical id. Slow, so
+ * don't use this in performance-critical code.
+ */
+static inline int
+cpu_logical_id (int cpuid)
+{
+ int i;
+
+ for (i=0; i<smp_num_cpus; i++) {
+ if (cpu_physical_id(i) == cpuid)
+ break;
+ }
+ return i;
+}
+
+/*
* XTP control functions:
* min_xtp : route all interrupts to this CPU
* normal_xtp: nominal XTP value
* max_xtp : never deliver interrupts to this CPU.
*/
-extern __inline void
+static inline void
min_xtp(void)
{
if (smp_int_redirect & SMP_IRQ_REDIRECTION)
writeb(0x00, ipi_base_addr | XTP_OFFSET); /* XTP to min */
}
-extern __inline void
+static inline void
normal_xtp(void)
{
if (smp_int_redirect & SMP_IRQ_REDIRECTION)
writeb(0x08, ipi_base_addr | XTP_OFFSET); /* XTP normal */
}
-extern __inline void
+static inline void
max_xtp(void)
{
if (smp_int_redirect & SMP_IRQ_REDIRECTION)
writeb(0x0f, ipi_base_addr | XTP_OFFSET); /* Set XTP to max */
}
-extern __inline__ unsigned int
+static inline unsigned int
hard_smp_processor_id(void)
{
struct {
@@ -84,13 +101,7 @@ hard_smp_processor_id(void)
__asm__ ("mov %0=cr.lid" : "=r" (lid));
-#ifdef LARGE_CPU_ID_OK
- return lid.eid << 8 | lid.id;
-#else
- if (((lid.id << 8) | lid.eid) > NR_CPUS)
- printk("WARNING: SMP ID %d > NR_CPUS\n", (lid.id << 8) | lid.eid);
- return lid.id;
-#endif
+ return lid.id << 8 | lid.eid;
}
#define NO_PROC_ID (-1)
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h
index 24b85b4d6..eb421385c 100644
--- a/include/asm-ia64/spinlock.h
+++ b/include/asm-ia64/spinlock.h
@@ -29,48 +29,42 @@ typedef struct {
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when
* there is contention.
- *
- * XXX Fix me: instead of preserving ar.pfs, we should just mark it
- * XXX as "clobbered". Unfortunately, the Mar 2000 release of the compiler
- * XXX doesn't let us do that. The August release fixes that.
*/
-#define spin_lock(x) \
-{ \
+#define spin_lock(x) \
+{ \
+ register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
+ \
+ __asm__ __volatile__ ( \
+ "mov r30=1\n" \
+ "mov ar.ccv=r0\n" \
+ ";;\n" \
+ IA64_SEMFIX"cmpxchg1.acq r30=[%0],r30,ar.ccv\n" \
+ ";;\n" \
+ "cmp.ne p15,p0=r30,r0\n" \
+ "(p15) br.call.spnt.few b7=ia64_spinlock_contention\n" \
+ ";;\n" \
+ "1:\n" /* force a new bundle */ \
+ :: "r"(addr) \
+ : "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory"); \
+}
+
+#define spin_trylock(x) \
+({ \
register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
- long saved_pfs; \
+ register long result; \
\
__asm__ __volatile__ ( \
"mov r30=1\n" \
"mov ar.ccv=r0\n" \
";;\n" \
- IA64_SEMFIX"cmpxchg1.acq r30=[%1],r30,ar.ccv\n" \
- ";;\n" \
- "cmp.ne p15,p0=r30,r0\n" \
- "mov %0=ar.pfs\n" \
- "(p15) br.call.spnt.few b7=ia64_spinlock_contention\n" \
- ";;\n" \
- "1: (p15) mov ar.pfs=%0;;\n" /* force a new bundle */ \
- : "=&r"(saved_pfs) : "r"(addr) \
- : "p15", "r28", "r29", "r30", "memory"); \
-}
-
-#define spin_trylock(x) \
-({ \
- register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
- register long result; \
- \
- __asm__ __volatile__ ( \
- "mov r30=1\n" \
- "mov ar.ccv=r0\n" \
- ";;\n" \
- IA64_SEMFIX"cmpxchg1.acq %0=[%1],r30,ar.ccv\n" \
- : "=r"(result) : "r"(addr) : "r30", "memory"); \
- (result == 0); \
+ IA64_SEMFIX"cmpxchg1.acq %0=[%1],r30,ar.ccv\n" \
+ : "=r"(result) : "r"(addr) : "ar.ccv", "r30", "memory"); \
+ (result == 0); \
})
#define spin_is_locked(x) ((x)->lock != 0)
-#define spin_unlock(x) ({((spinlock_t *) x)->lock = 0;})
-#define spin_unlock_wait(x) ({ while ((x)->lock); })
+#define spin_unlock(x) do {((spinlock_t *) x)->lock = 0;} while (0)
+#define spin_unlock_wait(x) do {} while ((x)->lock)
#else /* !NEW_LOCK */
@@ -91,21 +85,21 @@ typedef struct {
"mov r29 = 1\n" \
";;\n" \
"1:\n" \
- "ld4 r2 = %0\n" \
+ "ld4 r2 = [%0]\n" \
";;\n" \
"cmp4.eq p0,p7 = r0,r2\n" \
"(p7) br.cond.spnt.few 1b \n" \
- IA64_SEMFIX"cmpxchg4.acq r2 = %0, r29, ar.ccv\n" \
+ IA64_SEMFIX"cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
";;\n" \
"cmp4.eq p0,p7 = r0, r2\n" \
"(p7) br.cond.spnt.few 1b\n" \
";;\n" \
- :: "m" __atomic_fool_gcc((x)) : "r2", "r29", "memory")
+ :: "r"(&(x)->lock) : "r2", "r29", "memory")
#define spin_is_locked(x) ((x)->lock != 0)
-#define spin_unlock(x) ({((spinlock_t *) x)->lock = 0; barrier();})
+#define spin_unlock(x) do {((spinlock_t *) x)->lock = 0; barrier(); } while (0)
#define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
-#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
+#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#endif /* !NEW_LOCK */
@@ -115,44 +109,54 @@ typedef struct {
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
-#define read_lock(rw) \
-do { \
- int tmp = 0; \
- __asm__ __volatile__ ("1:\t"IA64_SEMFIX"fetchadd4.acq %0 = %1, 1\n" \
- ";;\n" \
- "tbit.nz p6,p0 = %0, 31\n" \
- "(p6) br.cond.sptk.few 2f\n" \
- ".section .text.lock,\"ax\"\n" \
- "2:\t"IA64_SEMFIX"fetchadd4.rel %0 = %1, -1\n" \
- ";;\n" \
- "3:\tld4.acq %0 = %1\n" \
- ";;\n" \
- "tbit.nz p6,p0 = %0, 31\n" \
- "(p6) br.cond.sptk.few 3b\n" \
- "br.cond.sptk.few 1b\n" \
- ";;\n" \
- ".previous\n" \
- : "=r" (tmp), "=m" (__atomic_fool_gcc(rw)) \
- :: "memory"); \
+#define read_lock(rw) \
+do { \
+ int tmp = 0; \
+ __asm__ __volatile__ ("1:\t"IA64_SEMFIX"fetchadd4.acq %0 = [%1], 1\n" \
+ ";;\n" \
+ "tbit.nz p6,p0 = %0, 31\n" \
+ "(p6) br.cond.sptk.few 2f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\t"IA64_SEMFIX"fetchadd4.rel %0 = [%1], -1\n" \
+ ";;\n" \
+ "3:\tld4.acq %0 = [%1]\n" \
+ ";;\n" \
+ "tbit.nz p6,p0 = %0, 31\n" \
+ "(p6) br.cond.sptk.few 3b\n" \
+ "br.cond.sptk.few 1b\n" \
+ ";;\n" \
+ ".previous\n" \
+ : "=&r" (tmp) \
+ : "r" (rw): "memory"); \
} while(0)
-#define read_unlock(rw) \
-do { \
- int tmp = 0; \
- __asm__ __volatile__ (IA64_SEMFIX"fetchadd4.rel %0 = %1, -1\n" \
- : "=r" (tmp) \
- : "m" (__atomic_fool_gcc(rw)) \
- : "memory"); \
+#define read_unlock(rw) \
+do { \
+ int tmp = 0; \
+ __asm__ __volatile__ (IA64_SEMFIX"fetchadd4.rel %0 = [%1], -1\n" \
+ : "=r" (tmp) \
+ : "r" (rw) \
+ : "memory"); \
} while(0)
-#define write_lock(rw) \
-do { \
- do { \
- while ((rw)->write_lock); \
- } while (test_and_set_bit(31, (rw))); \
- while ((rw)->read_counter); \
- barrier(); \
-} while (0)
+#define write_lock(rw) \
+do { \
+ __asm__ __volatile__ ( \
+ "mov ar.ccv = r0\n" \
+ "movl r29 = 0x80000000\n" \
+ ";;\n" \
+ "1:\n" \
+ "ld4 r2 = [%0]\n" \
+ ";;\n" \
+ "cmp4.eq p0,p7 = r0,r2\n" \
+ "(p7) br.cond.spnt.few 1b \n" \
+ IA64_SEMFIX"cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
+ ";;\n" \
+ "cmp4.eq p0,p7 = r0, r2\n" \
+ "(p7) br.cond.spnt.few 1b\n" \
+ ";;\n" \
+ :: "r"(rw) : "r2", "r29", "memory"); \
+} while(0)
/*
* clear_bit() has "acq" semantics; we're really need "rel" semantics,
diff --git a/include/asm-ia64/string.h b/include/asm-ia64/string.h
index 56bd64327..c17fc8de4 100644
--- a/include/asm-ia64/string.h
+++ b/include/asm-ia64/string.h
@@ -9,6 +9,9 @@
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <linux/config.h> /* remove this once we remove the A-step workaround... */
+#ifndef CONFIG_ITANIUM_ASTEP_SPECIFIC
+
#define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */
#define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */
#define __HAVE_ARCH_MEMCPY 1 /* see arch/ia64/lib/memcpy.S */
@@ -18,4 +21,6 @@ extern __kernel_size_t strlen (const char *);
extern void *memset (void *, int, __kernel_size_t);
extern void *memcpy (void *, const void *, __kernel_size_t);
+#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
+
#endif /* _ASM_IA64_STRING_H */
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 25438c18e..4bff3ff94 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -38,6 +38,7 @@
#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
#include <linux/types.h>
struct pci_vector_struct {
@@ -67,7 +68,7 @@ extern struct ia64_boot_param {
__u64 initrd_size;
} ia64_boot_param;
-extern inline void
+static inline void
ia64_insn_group_barrier (void)
{
__asm__ __volatile__ (";;" ::: "memory");
@@ -99,6 +100,16 @@ ia64_insn_group_barrier (void)
#define rmb() mb()
#define wmb() mb()
+#ifdef CONFIG_SMP
+# define smp_mb() mb()
+# define smp_rmb() rmb()
+# define smp_wmb() wmb()
+#else
+# define smp_mb() barrier()
+# define smp_rmb() barrier()
+# define smp_wmb() barrier()
+#endif
+
/*
* XXX check on these---I suspect what Linus really wants here is
* acquire vs release semantics but we can't discuss this stuff with
@@ -240,15 +251,13 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
({ \
switch (sz) { \
case 4: \
- __asm__ __volatile__ (IA64_SEMFIX"fetchadd4.rel %0=%1,%3" \
- : "=r"(tmp), "=m"(__atomic_fool_gcc(v)) \
- : "m" (__atomic_fool_gcc(v)), "i"(n)); \
+ __asm__ __volatile__ (IA64_SEMFIX"fetchadd4.rel %0=[%1],%2" \
+ : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \
\
case 8: \
- __asm__ __volatile__ (IA64_SEMFIX"fetchadd8.rel %0=%1,%3" \
- : "=r"(tmp), "=m"(__atomic_fool_gcc(v)) \
- : "m" (__atomic_fool_gcc(v)), "i"(n)); \
+ __asm__ __volatile__ (IA64_SEMFIX"fetchadd8.rel %0=[%1],%2" \
+ : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \
\
default: \
@@ -289,23 +298,23 @@ __xchg (unsigned long x, volatile void *ptr, int size)
switch (size) {
case 1:
- __asm__ __volatile (IA64_SEMFIX"xchg1 %0=%1,%2" : "=r" (result)
- : "m" (*(char *) ptr), "r" (x) : "memory");
+ __asm__ __volatile (IA64_SEMFIX"xchg1 %0=[%1],%2" : "=r" (result)
+ : "r" (ptr), "r" (x) : "memory");
return result;
case 2:
- __asm__ __volatile (IA64_SEMFIX"xchg2 %0=%1,%2" : "=r" (result)
- : "m" (*(short *) ptr), "r" (x) : "memory");
+ __asm__ __volatile (IA64_SEMFIX"xchg2 %0=[%1],%2" : "=r" (result)
+ : "r" (ptr), "r" (x) : "memory");
return result;
case 4:
- __asm__ __volatile (IA64_SEMFIX"xchg4 %0=%1,%2" : "=r" (result)
- : "m" (*(int *) ptr), "r" (x) : "memory");
+ __asm__ __volatile (IA64_SEMFIX"xchg4 %0=[%1],%2" : "=r" (result)
+ : "r" (ptr), "r" (x) : "memory");
return result;
case 8:
- __asm__ __volatile (IA64_SEMFIX"xchg8 %0=%1,%2" : "=r" (result)
- : "m" (*(long *) ptr), "r" (x) : "memory");
+ __asm__ __volatile (IA64_SEMFIX"xchg8 %0=[%1],%2" : "=r" (result)
+ : "r" (ptr), "r" (x) : "memory");
return result;
}
__xchg_called_with_bad_pointer();
@@ -329,9 +338,6 @@ __xchg (unsigned long x, volatile void *ptr, int size)
*/
extern long __cmpxchg_called_with_bad_pointer(void);
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) (*(struct __xchg_dummy *)(x))
-
#define ia64_cmpxchg(sem,ptr,old,new,size) \
({ \
__typeof__(ptr) _p_ = (ptr); \
@@ -348,27 +354,23 @@ struct __xchg_dummy { unsigned long a[100]; };
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
switch (size) { \
case 1: \
- __asm__ __volatile__ (IA64_SEMFIX"cmpxchg1."sem" %0=%2,%3,ar.ccv" \
- : "=r"(_r_), "=m"(__xg(_p_)) \
- : "m"(__xg(_p_)), "r"(_n_)); \
+ __asm__ __volatile__ (IA64_SEMFIX"cmpxchg1."sem" %0=[%1],%2,ar.ccv" \
+ : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \
\
case 2: \
- __asm__ __volatile__ (IA64_SEMFIX"cmpxchg2."sem" %0=%2,%3,ar.ccv" \
- : "=r"(_r_), "=m"(__xg(_p_)) \
- : "m"(__xg(_p_)), "r"(_n_)); \
+ __asm__ __volatile__ (IA64_SEMFIX"cmpxchg2."sem" %0=[%1],%2,ar.ccv" \
+ : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \
\
case 4: \
- __asm__ __volatile__ (IA64_SEMFIX"cmpxchg4."sem" %0=%2,%3,ar.ccv" \
- : "=r"(_r_), "=m"(__xg(_p_)) \
- : "m"(__xg(_p_)), "r"(_n_)); \
+ __asm__ __volatile__ (IA64_SEMFIX"cmpxchg4."sem" %0=[%1],%2,ar.ccv" \
+ : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \
\
case 8: \
- __asm__ __volatile__ (IA64_SEMFIX"cmpxchg8."sem" %0=%2,%3,ar.ccv" \
- : "=r"(_r_), "=m"(__xg(_p_)) \
- : "m"(__xg(_p_)), "r"(_n_)); \
+ __asm__ __volatile__ (IA64_SEMFIX"cmpxchg8."sem" %0=[%1],%2,ar.ccv" \
+ : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \
\
default: \
@@ -433,33 +435,31 @@ extern void ia64_load_extra (struct task_struct *task);
if (((next)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \
|| IS_IA32_PROCESS(ia64_task_regs(next))) \
ia64_load_extra(next); \
- ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \
(last) = ia64_switch_to((next)); \
} while (0)
#ifdef CONFIG_SMP
/*
* In the SMP case, we save the fph state when context-switching
- * away from a thread that owned and modified fph. This way, when
- * the thread gets scheduled on another CPU, the CPU can pick up the
- * state frm task->thread.fph, avoiding the complication of having
- * to fetch the latest fph state from another CPU. If the thread
- * happens to be rescheduled on the same CPU later on and nobody
- * else has touched the FPU in the meantime, the thread will fault
- * upon the first access to fph but since the state in fph is still
- * valid, no other overheads are incurred. In other words, CPU
- * affinity is a Good Thing.
+ * away from a thread that modified fph. This way, when the thread
+ * gets scheduled on another CPU, the CPU can pick up the state from
+ * task->thread.fph, avoiding the complication of having to fetch
+ * the latest fph state from another CPU.
*/
-# define switch_to(prev,next,last) do { \
- if (ia64_get_fpu_owner() == (prev) && ia64_psr(ia64_task_regs(prev))->mfh) { \
- ia64_psr(ia64_task_regs(prev))->mfh = 0; \
- (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
- __ia64_save_fpu((prev)->thread.fph); \
- } \
- __switch_to(prev,next,last); \
+# define switch_to(prev,next,last) do { \
+ if (ia64_psr(ia64_task_regs(prev))->mfh) { \
+ ia64_psr(ia64_task_regs(prev))->mfh = 0; \
+ (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
+ __ia64_save_fpu((prev)->thread.fph); \
+ } \
+ ia64_psr(ia64_task_regs(prev))->dfh = 1; \
+ __switch_to(prev,next,last); \
} while (0)
#else
-# define switch_to(prev,next,last) __switch_to(prev,next,last)
+# define switch_to(prev,next,last) do { \
+ ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \
+ __switch_to(prev,next,last); \
+} while (0)
#endif
#endif /* __KERNEL__ */
diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h
index 319784a5b..6e8aef3df 100644
--- a/include/asm-ia64/uaccess.h
+++ b/include/asm-ia64/uaccess.h
@@ -61,7 +61,7 @@
#define __access_ok(addr,size,segment) (((unsigned long) (addr)) <= (segment).seg)
#define access_ok(type,addr,size) __access_ok((addr),(size),get_fs())
-extern inline int
+static inline int
verify_area (int type, const void *addr, unsigned long size)
{
return access_ok(type,addr,size) ? 0 : -EFAULT;
@@ -126,7 +126,7 @@ struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct *)(x))
#define __get_user_64(addr) \
- __asm__ ("\n1:\tld8 %0=%2\t// %0 and %1 get overwritten by exception handler\n" \
+ __asm__ ("\n1:\tld8 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t\tdata4 @gprel(1b)\n" \
@@ -136,7 +136,7 @@ struct __large_struct { unsigned long buf[100]; };
: "m"(__m(addr)), "1"(__gu_err));
#define __get_user_32(addr) \
- __asm__ ("\n1:\tld4 %0=%2\t// %0 and %1 get overwritten by exception handler\n" \
+ __asm__ ("\n1:\tld4 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t\tdata4 @gprel(1b)\n" \
@@ -146,7 +146,7 @@ struct __large_struct { unsigned long buf[100]; };
: "m"(__m(addr)), "1"(__gu_err));
#define __get_user_16(addr) \
- __asm__ ("\n1:\tld2 %0=%2\t// %0 and %1 get overwritten by exception handler\n" \
+ __asm__ ("\n1:\tld2 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t\tdata4 @gprel(1b)\n" \
@@ -156,7 +156,7 @@ struct __large_struct { unsigned long buf[100]; };
: "m"(__m(addr)), "1"(__gu_err));
#define __get_user_8(addr) \
- __asm__ ("\n1:\tld1 %0=%2\t// %0 and %1 get overwritten by exception handler\n" \
+ __asm__ ("\n1:\tld1 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t\tdata4 @gprel(1b)\n" \
@@ -205,7 +205,7 @@ extern void __put_user_unknown (void);
*/
#define __put_user_64(x,addr) \
__asm__ __volatile__ ( \
- "\n1:\tst8 %1=%r2\t// %0 gets overwritten by exception handler\n" \
+ "\n1:\tst8 %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t\tdata4 @gprel(1b)\n" \
@@ -216,7 +216,7 @@ extern void __put_user_unknown (void);
#define __put_user_32(x,addr) \
__asm__ __volatile__ ( \
- "\n1:\tst4 %1=%r2\t// %0 gets overwritten by exception handler\n" \
+ "\n1:\tst4 %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t\tdata4 @gprel(1b)\n" \
@@ -227,7 +227,7 @@ extern void __put_user_unknown (void);
#define __put_user_16(x,addr) \
__asm__ __volatile__ ( \
- "\n1:\tst2 %1=%r2\t// %0 gets overwritten by exception handler\n" \
+ "\n1:\tst2 %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t\tdata4 @gprel(1b)\n" \
@@ -238,7 +238,7 @@ extern void __put_user_unknown (void);
#define __put_user_8(x,addr) \
__asm__ __volatile__ ( \
- "\n1:\tst1 %1=%r2\t// %0 gets overwritten by exception handler\n" \
+ "\n1:\tst1 %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t\tdata4 @gprel(1b)\n" \
diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h
index 6dc82c87e..c9d6dca0e 100644
--- a/include/asm-ia64/unaligned.h
+++ b/include/asm-ia64/unaligned.h
@@ -22,42 +22,42 @@ struct __una_u64 { __u64 x __attribute__((packed)); };
struct __una_u32 { __u32 x __attribute__((packed)); };
struct __una_u16 { __u16 x __attribute__((packed)); };
-extern inline unsigned long
+static inline unsigned long
__uldq (const unsigned long * r11)
{
const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
return ptr->x;
}
-extern inline unsigned long
+static inline unsigned long
__uldl (const unsigned int * r11)
{
const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
return ptr->x;
}
-extern inline unsigned long
+static inline unsigned long
__uldw (const unsigned short * r11)
{
const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
return ptr->x;
}
-extern inline void
+static inline void
__ustq (unsigned long r5, unsigned long * r11)
{
struct __una_u64 *ptr = (struct __una_u64 *) r11;
ptr->x = r5;
}
-extern inline void
+static inline void
__ustl (unsigned long r5, unsigned int * r11)
{
struct __una_u32 *ptr = (struct __una_u32 *) r11;
ptr->x = r5;
}
-extern inline void
+static inline void
__ustw (unsigned long r5, unsigned short * r11)
{
struct __una_u16 *ptr = (struct __una_u16 *) r11;
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 456376e1b..8b13b8f95 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -93,7 +93,7 @@
#define __NR_setpriority 1102
#define __NR_statfs 1103
#define __NR_fstatfs 1104
-#define __NR_ioperm 1105
+/* unused; used to be __NR_ioperm */
#define __NR_semget 1106
#define __NR_semop 1107
#define __NR_semctl 1108
diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h
index 60bb46cf9..95285ad99 100644
--- a/include/asm-ia64/unwind.h
+++ b/include/asm-ia64/unwind.h
@@ -52,36 +52,38 @@ struct unw_frame_info {
unsigned int flags;
short hint;
short prev_script;
- unsigned long bsp;
- unsigned long sp; /* stack pointer */
- unsigned long psp; /* previous sp */
- unsigned long ip; /* instruction pointer */
- unsigned long pr_val; /* current predicates */
- unsigned long *cfm;
+
+ /* current frame info: */
+ unsigned long bsp; /* backing store pointer value */
+ unsigned long sp; /* stack pointer value */
+ unsigned long psp; /* previous sp value */
+ unsigned long ip; /* instruction pointer value */
+ unsigned long pr; /* current predicate values */
+ unsigned long *cfm_loc; /* cfm save location (or NULL) */
struct task_struct *task;
struct switch_stack *sw;
/* preserved state: */
- unsigned long *pbsp; /* previous bsp */
- unsigned long *bspstore;
- unsigned long *pfs;
- unsigned long *rnat;
- unsigned long *rp;
- unsigned long *pri_unat;
- unsigned long *unat;
- unsigned long *pr;
- unsigned long *lc;
- unsigned long *fpsr;
+ unsigned long *bsp_loc; /* previous bsp save location */
+ unsigned long *bspstore_loc;
+ unsigned long *pfs_loc;
+ unsigned long *rnat_loc;
+ unsigned long *rp_loc;
+ unsigned long *pri_unat_loc;
+ unsigned long *unat_loc;
+ unsigned long *pr_loc;
+ unsigned long *lc_loc;
+ unsigned long *fpsr_loc;
struct unw_ireg {
unsigned long *loc;
struct unw_ireg_nat {
- int type : 3; /* enum unw_nat_type */
- signed int off; /* NaT word is at loc+nat.off */
+ long type : 3; /* enum unw_nat_type */
+ signed long off : 61; /* NaT word is at loc+nat.off */
} nat;
} r4, r5, r6, r7;
- unsigned long *b1, *b2, *b3, *b4, *b5;
- struct ia64_fpreg *f2, *f3, *f4, *f5, *fr[16];
+ unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc;
+ struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16];
};
/*
@@ -140,19 +142,56 @@ extern int unw_unwind (struct unw_frame_info *info);
*/
extern int unw_unwind_to_user (struct unw_frame_info *info);
-#define unw_get_ip(info,vp) ({*(vp) = (info)->ip; 0;})
-#define unw_get_sp(info,vp) ({*(vp) = (unsigned long) (info)->sp; 0;})
-#define unw_get_psp(info,vp) ({*(vp) = (unsigned long) (info)->psp; 0;})
-#define unw_get_bsp(info,vp) ({*(vp) = (unsigned long) (info)->bsp; 0;})
-#define unw_get_cfm(info,vp) ({*(vp) = *(info)->cfm; 0;})
-#define unw_set_cfm(info,val) ({*(info)->cfm = (val); 0;})
+#define unw_is_intr_frame(info) (((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0)
+
+static inline unsigned long
+unw_get_ip (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->ip;
+ return 0;
+}
+
+static inline unsigned long
+unw_get_sp (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->sp;
+ return 0;
+}
+
+static inline unsigned long
+unw_get_psp (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->psp;
+ return 0;
+}
+
+static inline unsigned long
+unw_get_bsp (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->bsp;
+ return 0;
+}
+
+static inline unsigned long
+unw_get_cfm (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = *(info)->cfm_loc;
+ return 0;
+}
+
+static inline unsigned long
+unw_set_cfm (struct unw_frame_info *info, unsigned long val)
+{
+ *(info)->cfm_loc = val;
+ return 0;
+}
static inline int
unw_get_rp (struct unw_frame_info *info, unsigned long *val)
{
- if (!info->rp)
+ if (!info->rp_loc)
return -1;
- *val = *info->rp;
+ *val = *info->rp_loc;
return 0;
}