summaryrefslogtreecommitdiffstats
path: root/include/asm-ia64
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-08-28 22:00:09 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-08-28 22:00:09 +0000
commit1a1d77dd589de5a567fa95e36aa6999c704ceca4 (patch)
tree141e31f89f18b9fe0831f31852e0435ceaccafc5 /include/asm-ia64
parentfb9c690a18b3d66925a65b17441c37fa14d4370b (diff)
Merge with 2.4.0-test7.
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/acpi-ext.h2
-rw-r--r--include/asm-ia64/asmmacro.h2
-rw-r--r--include/asm-ia64/efi.h1
-rw-r--r--include/asm-ia64/ia32.h19
-rw-r--r--include/asm-ia64/io.h5
-rw-r--r--include/asm-ia64/machvec.h48
-rw-r--r--include/asm-ia64/machvec_dig.h2
-rw-r--r--include/asm-ia64/machvec_init.h6
-rw-r--r--include/asm-ia64/mmu_context.h51
-rw-r--r--include/asm-ia64/offsets.h6
-rw-r--r--include/asm-ia64/page.h9
-rw-r--r--include/asm-ia64/pal.h21
-rw-r--r--include/asm-ia64/param.h14
-rw-r--r--include/asm-ia64/pci.h71
-rw-r--r--include/asm-ia64/pgtable.h12
-rw-r--r--include/asm-ia64/processor.h39
-rw-r--r--include/asm-ia64/scatterlist.h1
-rw-r--r--include/asm-ia64/siginfo.h5
-rw-r--r--include/asm-ia64/smp.h4
-rw-r--r--include/asm-ia64/spinlock.h111
-rw-r--r--include/asm-ia64/system.h129
-rw-r--r--include/asm-ia64/unistd.h1
22 files changed, 329 insertions, 230 deletions
diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h
index c3999f148..24f9822e6 100644
--- a/include/asm-ia64/acpi-ext.h
+++ b/include/asm-ia64/acpi-ext.h
@@ -69,7 +69,7 @@ typedef struct {
u8 eid;
} acpi_entry_lsapic_t;
-typedef struct {
+typedef struct acpi_entry_iosapic {
u8 type;
u8 length;
u16 reserved;
diff --git a/include/asm-ia64/asmmacro.h b/include/asm-ia64/asmmacro.h
index 4991bb26e..614ca7e8e 100644
--- a/include/asm-ia64/asmmacro.h
+++ b/include/asm-ia64/asmmacro.h
@@ -23,7 +23,7 @@
#endif
#define ENTRY(name) \
- .align 16; \
+ .align 32; \
.proc name; \
name:
diff --git a/include/asm-ia64/efi.h b/include/asm-ia64/efi.h
index 543132486..5d311d32e 100644
--- a/include/asm-ia64/efi.h
+++ b/include/asm-ia64/efi.h
@@ -226,6 +226,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
}
extern void efi_init (void);
+extern void efi_map_pal_code (void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timeval *tv);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h
index 80cc76755..c6732837c 100644
--- a/include/asm-ia64/ia32.h
+++ b/include/asm-ia64/ia32.h
@@ -40,7 +40,6 @@ struct flock32 {
__kernel_off_t32 l_start;
__kernel_off_t32 l_len;
__kernel_pid_t32 l_pid;
- short __unused;
};
@@ -105,11 +104,21 @@ typedef struct {
} sigset32_t;
struct sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
+ unsigned int sa_handler; /* Really a pointer, but need to deal
+ with 32 bits */
unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
- sigset32_t sa_mask; /* A 32 bit mask */
+ unsigned int sa_restorer; /* Another 32 bit pointer */
+ sigset32_t sa_mask; /* A 32 bit mask */
+};
+
+typedef unsigned int old_sigset32_t; /* at least 32 bits */
+
+struct old_sigaction32 {
+ unsigned int sa_handler; /* Really a pointer, but need to deal
+ with 32 bits */
+ old_sigset32_t sa_mask; /* A 32 bit mask */
+ unsigned int sa_flags;
+ unsigned int sa_restorer; /* Another 32 bit pointer */
};
typedef struct sigaltstack_ia32 {
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index a371f1361..0740af45f 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -47,6 +47,10 @@ phys_to_virt(unsigned long address)
return (void *) (address + PAGE_OFFSET);
}
+/*
+ * The following two macros are deprecated and scheduled for removal.
+ * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
+ */
#define bus_to_virt phys_to_virt
#define virt_to_bus virt_to_phys
@@ -315,6 +319,7 @@ __writeq (unsigned long val, void *addr)
#define writeq(v,a) __writeq((v), (void *) (a))
#define __raw_writeb writeb
#define __raw_writew writew
+#define __raw_writel writel
#define __raw_writeq writeq
#ifndef inb_p
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index 890224329..3ac473f14 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -4,8 +4,8 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) Vijay Chander <vijay@engr.sgi.com>
- * Copyright (C) 1999 Hewlett-Packard Co.
- * Copyright (C) David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2000 Hewlett-Packard Co.
+ * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef _ASM_IA64_MACHVEC_H
#define _ASM_IA64_MACHVEC_H
@@ -21,6 +21,7 @@ struct pt_regs;
struct task_struct;
struct timeval;
struct vm_area_struct;
+struct acpi_entry_iosapic;
typedef void ia64_mv_setup_t (char **);
typedef void ia64_mv_irq_init_t (void);
@@ -30,15 +31,33 @@ typedef void ia64_mv_mca_init_t (void);
typedef void ia64_mv_mca_handler_t (void);
typedef void ia64_mv_cmci_handler_t (int, void *, struct pt_regs *);
typedef void ia64_mv_log_print_t (void);
+typedef void ia64_mv_register_iosapic_t (struct acpi_entry_iosapic *);
+
+extern void machvec_noop (void);
# if defined (CONFIG_IA64_HP_SIM)
# include <asm/machvec_hpsim.h>
# elif defined (CONFIG_IA64_DIG)
# include <asm/machvec_dig.h>
# elif defined (CONFIG_IA64_SGI_SN1_SIM)
-# include <asm/machvec_sgi_sn1_SIM.h>
+# include <asm/machvec_sn1.h>
# elif defined (CONFIG_IA64_GENERIC)
+# ifdef MACHVEC_PLATFORM_HEADER
+# include MACHVEC_PLATFORM_HEADER
+# else
+# define platform_name ia64_mv.name
+# define platform_setup ia64_mv.setup
+# define platform_irq_init ia64_mv.irq_init
+# define platform_map_nr ia64_mv.map_nr
+# define platform_mca_init ia64_mv.mca_init
+# define platform_mca_handler ia64_mv.mca_handler
+# define platform_cmci_handler ia64_mv.cmci_handler
+# define platform_log_print ia64_mv.log_print
+# define platform_pci_fixup ia64_mv.pci_fixup
+# define platform_register_iosapic ia64_mv.register_iosapic
+# endif
+
struct ia64_machine_vector {
const char *name;
ia64_mv_setup_t *setup;
@@ -49,6 +68,7 @@ struct ia64_machine_vector {
ia64_mv_mca_handler_t *mca_handler;
ia64_mv_cmci_handler_t *cmci_handler;
ia64_mv_log_print_t *log_print;
+ ia64_mv_register_iosapic_t *register_iosapic;
};
#define MACHVEC_INIT(name) \
@@ -61,22 +81,12 @@ struct ia64_machine_vector {
platform_mca_init, \
platform_mca_handler, \
platform_cmci_handler, \
- platform_log_print \
+ platform_log_print, \
+ platform_register_iosapic \
}
-# ifndef MACHVEC_INHIBIT_RENAMING
-# define platform_name ia64_mv.name
-# define platform_setup ia64_mv.setup
-# define platform_irq_init ia64_mv.irq_init
-# define platform_map_nr ia64_mv.map_nr
-# define platform_mca_init ia64_mv.mca_init
-# define platform_mca_handler ia64_mv.mca_handler
-# define platform_cmci_handler ia64_mv.cmci_handler
-# define platform_log_print ia64_mv.log_print
-# endif
-
extern struct ia64_machine_vector ia64_mv;
-extern void machvec_noop (void);
+extern void machvec_init (const char *name);
# else
# error Unknown configuration. Update asm-ia64/machvec.h.
@@ -104,5 +114,11 @@ extern void machvec_noop (void);
#ifndef platform_log_print
# define platform_log_print ((ia64_mv_log_print_t *) machvec_noop)
#endif
+#ifndef platform_pci_fixup
+# define platform_pci_fixup ((ia64_mv_pci_fixup_t *) machvec_noop)
+#endif
+#ifndef platform_register_iosapic
+# define platform_register_iosapic ((ia64_mv_register_iosapic_t *) machvec_noop)
+#endif
#endif /* _ASM_IA64_MACHVEC_H */
diff --git a/include/asm-ia64/machvec_dig.h b/include/asm-ia64/machvec_dig.h
index a63e586c8..dedf37cdd 100644
--- a/include/asm-ia64/machvec_dig.h
+++ b/include/asm-ia64/machvec_dig.h
@@ -5,6 +5,7 @@ extern ia64_mv_setup_t dig_setup;
extern ia64_mv_irq_init_t dig_irq_init;
extern ia64_mv_pci_fixup_t dig_pci_fixup;
extern ia64_mv_map_nr_t map_nr_dense;
+extern ia64_mv_register_iosapic_t dig_register_iosapic;
/*
* This stuff has dual use!
@@ -18,5 +19,6 @@ extern ia64_mv_map_nr_t map_nr_dense;
#define platform_irq_init dig_irq_init
#define platform_pci_fixup dig_pci_fixup
#define platform_map_nr map_nr_dense
+#define platform_register_iosapic dig_register_iosapic
#endif /* _ASM_IA64_MACHVEC_DIG_h */
diff --git a/include/asm-ia64/machvec_init.h b/include/asm-ia64/machvec_init.h
index 60859418a..2cae5accf 100644
--- a/include/asm-ia64/machvec_init.h
+++ b/include/asm-ia64/machvec_init.h
@@ -1,4 +1,6 @@
-#define MACHVEC_INHIBIT_RENAMING
+#define __MACHVEC_HDR(n) <asm/machvec_##n##.h>
+#define __MACHVEC_EXPAND(n) __MACHVEC_HDR(n)
+#define MACHVEC_PLATFORM_HEADER __MACHVEC_EXPAND(MACHVEC_PLATFORM_NAME)
#include <asm/machvec.h>
@@ -7,3 +9,5 @@
= MACHVEC_INIT(name);
#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name)
+
+MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index 22c2b2297..c50eacaf0 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -2,12 +2,13 @@
#define _ASM_IA64_MMU_CONTEXT_H
/*
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <asm/processor.h>
@@ -26,21 +27,6 @@
* architecture manual guarantees this number to be in the range
* 18-24.
*
- * A context number has the following format:
- *
- * +--------------------+---------------------+
- * | generation number | region id |
- * +--------------------+---------------------+
- *
- * A context number of 0 is considered "invalid".
- *
- * The generation number is incremented whenever we end up having used
- * up all available region ids. At that point with flush the entire
- * TLB and reuse the first region id. The new generation number
- * ensures that when we context switch back to an old process, we do
- * not inadvertently end up using its possibly reused region id.
- * Instead, we simply allocate a new region id for that process.
- *
* Copyright (C) 1998 David Mosberger-Tang <davidm@hpl.hp.com>
*/
@@ -56,9 +42,15 @@
#define IA64_HW_CONTEXT_MASK ((1UL << IA64_HW_CONTEXT_BITS) - 1)
-extern unsigned long ia64_next_context;
+struct ia64_ctx {
+ spinlock_t lock;
+ unsigned int next; /* next context number to use */
+ unsigned int limit; /* next >= limit => must call wrap_mmu_context() */
+};
-extern void get_new_mmu_context (struct mm_struct *mm);
+extern struct ia64_ctx ia64_ctx;
+
+extern void wrap_mmu_context (struct mm_struct *mm);
static inline void
enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
@@ -76,18 +68,31 @@ ia64_rid (unsigned long context, unsigned long region_addr)
}
extern inline void
+get_new_mmu_context (struct mm_struct *mm)
+{
+ spin_lock(&ia64_ctx.lock);
+ {
+ if (ia64_ctx.next >= ia64_ctx.limit)
+ wrap_mmu_context(mm);
+ mm->context = ia64_ctx.next++;
+ }
+ spin_unlock(&ia64_ctx.lock);
+
+}
+
+extern inline void
get_mmu_context (struct mm_struct *mm)
{
/* check if our ASN is of an older generation and thus invalid: */
- if (((mm->context ^ ia64_next_context) & ~IA64_HW_CONTEXT_MASK) != 0) {
+ if (mm->context == 0)
get_new_mmu_context(mm);
- }
}
-extern inline void
+extern inline int
init_new_context (struct task_struct *p, struct mm_struct *mm)
{
mm->context = 0;
+ return 0;
}
extern inline void
@@ -103,7 +108,7 @@ reload_context (struct mm_struct *mm)
unsigned long rid_incr = 0;
unsigned long rr0, rr1, rr2, rr3, rr4;
- rid = (mm->context & IA64_HW_CONTEXT_MASK);
+ rid = mm->context;
#ifndef CONFIG_IA64_TLB_CHECKS_REGION_NUMBER
rid <<= 3; /* make space for encoding the region number */
diff --git a/include/asm-ia64/offsets.h b/include/asm-ia64/offsets.h
index c5c02dedf..88cad88d5 100644
--- a/include/asm-ia64/offsets.h
+++ b/include/asm-ia64/offsets.h
@@ -11,10 +11,10 @@
#define PT_PTRACED_BIT 0
#define PT_TRACESYS_BIT 1
-#define IA64_TASK_SIZE 2768 /* 0xad0 */
+#define IA64_TASK_SIZE 2864 /* 0xb30 */
#define IA64_PT_REGS_SIZE 400 /* 0x190 */
#define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */
-#define IA64_SIGINFO_SIZE 136 /* 0x88 */
+#define IA64_SIGINFO_SIZE 128 /* 0x80 */
#define UNW_FRAME_INFO_SIZE 448 /* 0x1c0 */
#define IA64_TASK_PTRACE_OFFSET 48 /* 0x30 */
@@ -23,7 +23,7 @@
#define IA64_TASK_PROCESSOR_OFFSET 100 /* 0x64 */
#define IA64_TASK_THREAD_OFFSET 896 /* 0x380 */
#define IA64_TASK_THREAD_KSP_OFFSET 896 /* 0x380 */
-#define IA64_TASK_THREAD_SIGMASK_OFFSET 2648 /* 0xa58 */
+#define IA64_TASK_THREAD_SIGMASK_OFFSET 2744 /* 0xab8 */
#define IA64_TASK_PID_OFFSET 188 /* 0xbc */
#define IA64_TASK_MM_OFFSET 88 /* 0x58 */
#define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index f046dad86..62881b538 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -100,13 +100,14 @@ typedef unsigned long pgprot_t;
#define MAP_NR_SN1(addr) (((unsigned long) (addr) - PAGE_OFFSET) >> PAGE_SHIFT)
#ifdef CONFIG_IA64_GENERIC
-# define virt_to_page(kaddr) (mem_map + platform_map_nr(kaddr))
+# include <asm/machvec.h>
+# define virt_to_page(kaddr) (mem_map + platform_map_nr(kaddr))
#elif defined (CONFIG_IA64_SN_SN1_SIM)
-# define virt_to_page(kaddr) (mem_map + MAP_NR_SN1(kaddr))
+# define virt_to_page(kaddr) (mem_map + MAP_NR_SN1(kaddr))
#else
-# define virt_to_page(kaddr) (mem_map + MAP_NR_DENSE(kaddr))
+# define virt_to_page(kaddr) (mem_map + MAP_NR_DENSE(kaddr))
#endif
-#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
# endif /* __KERNEL__ */
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 5169b3f82..d55b16253 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -18,7 +18,8 @@
* 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
* 00/03/23 cfleck Modified processor min-state save area to match updated PAL & SAL info
* 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added
- * 00/05/25 eranian Support for stack calls, and statis physical calls
+ * 00/05/25 eranian Support for stack calls, and static physical calls
+ * 00/06/18 eranian Support for stacked physical calls
*/
/*
@@ -646,10 +647,12 @@ struct ia64_pal_retval {
extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64);
extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
#define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0, a1, a2, a3)
#define PAL_CALL_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_stacked(a0, a1, a2, a3)
#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_static(a0, a1, a2, a3)
+#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3)
typedef int (*ia64_pal_handler) (u64, ...);
extern ia64_pal_handler ia64_pal;
@@ -951,7 +954,7 @@ typedef union pal_power_mgmt_info_u {
/* Return information about processor's optional power management capabilities. */
extern inline s64
ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
-{
+{
struct ia64_pal_retval iprv;
PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
return iprv.status;
@@ -1370,17 +1373,17 @@ typedef union pal_itr_valid_u {
dirty_bit_valid : 1,
mem_attr_valid : 1,
reserved : 60;
- } pal_itr_valid_s;
-} pal_itr_valid_u_t;
+ } pal_tr_valid_s;
+} pal_tr_valid_u_t;
/* Read a translation register */
extern inline s64
-ia64_pal_vm_tr_read (u64 reg_num, u64 tr_type, u64 tr_buffer, pal_itr_valid_u_t *itr_valid)
-{
+ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr_valid)
+{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_VM_TR_READ, reg_num, tr_type, tr_buffer);
- if (itr_valid)
- itr_valid->piv_val = iprv.v0;
+ PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, tr_type,(u64)__pa(tr_buffer));
+ if (tr_valid)
+ tr_valid->piv_val = iprv.v0;
return iprv.status;
}
diff --git a/include/asm-ia64/param.h b/include/asm-ia64/param.h
index bc4c19ae6..a410b8892 100644
--- a/include/asm-ia64/param.h
+++ b/include/asm-ia64/param.h
@@ -10,23 +10,13 @@
#include <linux/config.h>
-#ifdef CONFIG_IA64_HP_SIM
+#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_SOFTSDV_HACKS)
/*
* Yeah, simulating stuff is slow, so let us catch some breath between
* timer interrupts...
*/
# define HZ 20
-#endif
-
-#ifdef CONFIG_IA64_DIG
-# ifdef CONFIG_IA64_SOFTSDV_HACKS
-# define HZ 20
-# else
-# define HZ 100
-# endif
-#endif
-
-#ifndef HZ
+#else
# define HZ 1024
#endif
diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h
index 0c40b0e6b..01c038774 100644
--- a/include/asm-ia64/pci.h
+++ b/include/asm-ia64/pci.h
@@ -1,6 +1,14 @@
#ifndef _ASM_IA64_PCI_H
#define _ASM_IA64_PCI_H
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+
/*
* Can be used to override the logic in pci_scan_bus for skipping
* already-configured bus numbers - to be used for buggy BIOSes or
@@ -11,6 +19,8 @@
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
+struct pci_dev;
+
extern inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
@@ -23,18 +33,8 @@ extern inline void pcibios_penalize_isa_irq(int irq)
/*
* Dynamic DMA mapping API.
- * IA-64 has everything mapped statically.
*/
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-#include <asm/scatterlist.h>
-
-struct pci_dev;
-
/*
* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices,
@@ -64,13 +64,7 @@ extern void pci_free_consistent (struct pci_dev *hwdev, size_t size,
* Once the device is given the dma address, the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single is performed.
*/
-extern inline dma_addr_t
-pci_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
- return virt_to_bus(ptr);
-}
+extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size
@@ -80,13 +74,7 @@ pci_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction)
* After this call, reads by the cpu to the buffer are guarenteed to see
* whatever the device wrote there.
*/
-extern inline void
-pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
- /* Nothing to do */
-}
+extern void pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
/*
* Map a set of buffers described by scatterlist in streaming
@@ -104,26 +92,14 @@ pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int d
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
-extern inline int
-pci_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
- return nents;
-}
+extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
/*
* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
-extern inline void
-pci_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
- /* Nothing to do */
-}
+extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
/*
* Make physical memory consistent for a single
@@ -135,13 +111,7 @@ pci_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
* next point you give the PCI dma address back to the card, the
* device again owns the buffer.
*/
-extern inline void
-pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
- /* Nothing to do */
-}
+extern void pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
/*
* Make physical memory consistent for a set of streaming mode DMA
@@ -150,20 +120,15 @@ pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size,
* The same as pci_dma_sync_single but for a scatter-gather list,
* same rules and usage.
*/
-extern inline void
-pci_dma_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
- /* Nothing to do */
-}
+extern void pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
/* Return whether the given PCI device DMA address mask can
* be supported properly. For example, if your device can
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
*/
-extern inline int pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
+extern inline int
+pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
{
return 1;
}
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 9963ebb73..fcf340ee0 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -286,7 +286,17 @@ extern pmd_t *ia64_bad_pagetable (void);
* contains the memory attribute bits, dirty bits, and various other
* bits as well.
*/
-#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
+#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
+
+/*
+ * Macro to make mark a page protection value as "write-combining".
+ * Note that "protection" is really a misnomer here as the protection
+ * value contains the memory attribute bits, dirty bits, and various
+ * other bits as well. Accesses through a write-combining translation
+ * works bypasses the caches, but does allow for consecutive writes to
+ * be combined into single (but larger) write transactions.
+ */
+#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
/*
* Return the region index for virtual address ADDRESS.
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 5024801ae..c37fc76b1 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -19,6 +19,7 @@
#include <asm/types.h>
#define IA64_NUM_DBG_REGS 8
+#define IA64_NUM_PM_REGS 4
/*
* TASK_SIZE really is a mis-named. It really is the maximum user
@@ -152,12 +153,13 @@
#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
-#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 2) /* don't log unaligned accesses */
-#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 3) /* generate SIGBUS on unaligned acc. */
-#define IA64_THREAD_KRBS_SYNCED (__IA64_UL(1) << 4) /* krbs synced with process vm? */
+#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
+#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
+#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
+#define IA64_THREAD_KRBS_SYNCED (__IA64_UL(1) << 5) /* krbs synced with process vm? */
#define IA64_KERNEL_DEATH (__IA64_UL(1) << 63) /* see die_if_kernel()... */
-#define IA64_THREAD_UAC_SHIFT 2
+#define IA64_THREAD_UAC_SHIFT 3
#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
#ifndef __ASSEMBLY__
@@ -285,6 +287,14 @@ struct thread_struct {
struct ia64_fpreg fph[96]; /* saved/loaded on demand */
__u64 dbr[IA64_NUM_DBG_REGS];
__u64 ibr[IA64_NUM_DBG_REGS];
+#ifdef CONFIG_PERFMON
+ __u64 pmc[IA64_NUM_PM_REGS];
+ __u64 pmd[IA64_NUM_PM_REGS];
+ __u64 pmod[IA64_NUM_PM_REGS];
+# define INIT_THREAD_PM {0, }, {0, }, {0, },
+#else
+# define INIT_THREAD_PM
+#endif
__u64 map_base; /* base address for mmap() */
#ifdef CONFIG_IA32_SUPPORT
__u64 eflag; /* IA32 EFLAGS reg */
@@ -316,6 +326,7 @@ struct thread_struct {
{{{{0}}}, }, /* fph */ \
{0, }, /* dbr */ \
{0, }, /* ibr */ \
+ INIT_THREAD_PM \
0x2000000000000000 /* map_base */ \
INIT_THREAD_IA32, \
0 /* siginfo */ \
@@ -338,8 +349,12 @@ struct thread_struct {
struct mm_struct;
struct task_struct;
-/* Free all resources held by a thread. */
-extern void release_thread (struct task_struct *);
+/*
+ * Free all resources held by a thread. This is called after the
+ * parent of DEAD_TASK has collected the exist status of the task via
+ * wait(). This is a no-op on IA-64.
+ */
+#define release_thread(dead_task)
/*
* This is the mechanism for creating a new kernel thread.
@@ -392,6 +407,18 @@ ia64_set_fpu_owner (struct task_struct *t)
extern void __ia64_init_fpu (void);
extern void __ia64_save_fpu (struct ia64_fpreg *fph);
extern void __ia64_load_fpu (struct ia64_fpreg *fph);
+extern void ia64_save_debug_regs (unsigned long *save_area);
+extern void ia64_load_debug_regs (unsigned long *save_area);
+
+#ifdef CONFIG_IA32_SUPPORT
+extern void ia32_save_state (struct thread_struct *thread);
+extern void ia32_load_state (struct thread_struct *thread);
+#endif
+
+#ifdef CONFIG_PERFMON
+extern void ia64_save_pm_regs (struct thread_struct *thread);
+extern void ia64_load_pm_regs (struct thread_struct *thread);
+#endif
#define ia64_fph_enable() __asm__ __volatile__ (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
#define ia64_fph_disable() __asm__ __volatile__ (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h
index 5a119b6c8..192eef92e 100644
--- a/include/asm-ia64/scatterlist.h
+++ b/include/asm-ia64/scatterlist.h
@@ -13,6 +13,7 @@ struct scatterlist {
* indirection buffer, NULL otherwise:
*/
char *alt_address;
+ char *orig_address; /* Save away the original buffer address (used by pci-dma.c) */
unsigned int length; /* buffer length */
};
diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h
index 7222fb285..a54312e12 100644
--- a/include/asm-ia64/siginfo.h
+++ b/include/asm-ia64/siginfo.h
@@ -14,12 +14,13 @@ typedef union sigval {
} sigval_t;
#define SI_MAX_SIZE 128
-#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 3)
+#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 4)
typedef struct siginfo {
int si_signo;
int si_errno;
int si_code;
+ int __pad0;
union {
int _pad[SI_PAD_SIZE];
@@ -212,7 +213,7 @@ typedef struct siginfo {
#define SIGEV_THREAD 2 /* deliver via thread creation */
#define SIGEV_MAX_SIZE 64
-#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3)
+#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4)
typedef struct sigevent {
sigval_t sigev_value;
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 6175de538..0788865fc 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -99,5 +99,9 @@ hard_smp_processor_id(void)
extern void __init init_smp_config (void);
extern void smp_do_timer (struct pt_regs *regs);
+extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
+ int retry, int wait);
+
+
#endif /* CONFIG_SMP */
#endif /* _ASM_IA64_SMP_H */
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h
index fedd8f8c6..24b85b4d6 100644
--- a/include/asm-ia64/spinlock.h
+++ b/include/asm-ia64/spinlock.h
@@ -15,8 +15,11 @@
#include <asm/bitops.h>
#include <asm/atomic.h>
+#undef NEW_LOCK
+
+#ifdef NEW_LOCK
typedef struct {
- volatile unsigned int lock;
+ volatile unsigned char lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
@@ -26,44 +29,86 @@ typedef struct {
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when
* there is contention.
+ *
+ * XXX Fix me: instead of preserving ar.pfs, we should just mark it
+ * XXX as "clobbered". Unfortunately, the Mar 2000 release of the compiler
+ * XXX doesn't let us do that. The August release fixes that.
*/
-#if 1 /* Bad code generation? */
-#define spin_lock(x) __asm__ __volatile__ ( \
- "mov ar.ccv = r0\n" \
- "mov r29 = 1\n" \
- ";;\n" \
- "1:\n" \
- "ld4 r2 = %0\n" \
- ";;\n" \
- "cmp4.eq p0,p7 = r0,r2\n" \
- "(p7) br.cond.spnt.few 1b \n" \
- "cmpxchg4.acq r2 = %0, r29, ar.ccv\n" \
- ";;\n" \
- "cmp4.eq p0,p7 = r0, r2\n" \
- "(p7) br.cond.spnt.few 1b\n" \
- ";;\n" \
- :: "m" __atomic_fool_gcc((x)) : "r2", "r29", "memory")
-
-#else
-#define spin_lock(x) \
-{ \
- spinlock_t *__x = (x); \
- \
- do { \
- while (__x->lock); \
- } while (cmpxchg_acq(&__x->lock, 0, 1)); \
+#define spin_lock(x) \
+{ \
+ register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
+ long saved_pfs; \
+ \
+ __asm__ __volatile__ ( \
+ "mov r30=1\n" \
+ "mov ar.ccv=r0\n" \
+ ";;\n" \
+ IA64_SEMFIX"cmpxchg1.acq r30=[%1],r30,ar.ccv\n" \
+ ";;\n" \
+ "cmp.ne p15,p0=r30,r0\n" \
+ "mov %0=ar.pfs\n" \
+ "(p15) br.call.spnt.few b7=ia64_spinlock_contention\n" \
+ ";;\n" \
+ "1: (p15) mov ar.pfs=%0;;\n" /* force a new bundle */ \
+ : "=&r"(saved_pfs) : "r"(addr) \
+ : "p15", "r28", "r29", "r30", "memory"); \
}
-#endif
+
+#define spin_trylock(x) \
+({ \
+ register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
+ register long result; \
+ \
+ __asm__ __volatile__ ( \
+ "mov r30=1\n" \
+ "mov ar.ccv=r0\n" \
+ ";;\n" \
+ IA64_SEMFIX"cmpxchg1.acq %0=[%1],r30,ar.ccv\n" \
+ : "=r"(result) : "r"(addr) : "r30", "memory"); \
+ (result == 0); \
+})
#define spin_is_locked(x) ((x)->lock != 0)
+#define spin_unlock(x) ({((spinlock_t *) x)->lock = 0;})
+#define spin_unlock_wait(x) ({ while ((x)->lock); })
-#define spin_unlock(x) ({((spinlock_t *) x)->lock = 0; barrier();})
+#else /* !NEW_LOCK */
-/* Streamlined !test_and_set_bit(0, (x)) */
-#define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#define spin_lock_init(x) ((x)->lock = 0)
+/*
+ * Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
+ * rather than a simple xchg to avoid writing the cache-line when
+ * there is contention.
+ */
+#define spin_lock(x) __asm__ __volatile__ ( \
+ "mov ar.ccv = r0\n" \
+ "mov r29 = 1\n" \
+ ";;\n" \
+ "1:\n" \
+ "ld4 r2 = %0\n" \
+ ";;\n" \
+ "cmp4.eq p0,p7 = r0,r2\n" \
+ "(p7) br.cond.spnt.few 1b \n" \
+ IA64_SEMFIX"cmpxchg4.acq r2 = %0, r29, ar.ccv\n" \
+ ";;\n" \
+ "cmp4.eq p0,p7 = r0, r2\n" \
+ "(p7) br.cond.spnt.few 1b\n" \
+ ";;\n" \
+ :: "m" __atomic_fool_gcc((x)) : "r2", "r29", "memory")
+
+#define spin_is_locked(x) ((x)->lock != 0)
+#define spin_unlock(x) ({((spinlock_t *) x)->lock = 0; barrier();})
+#define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
+#endif /* !NEW_LOCK */
+
typedef struct {
volatile int read_counter:31;
volatile int write_lock:1;
@@ -73,12 +118,12 @@ typedef struct {
#define read_lock(rw) \
do { \
int tmp = 0; \
- __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = %1, 1\n" \
+ __asm__ __volatile__ ("1:\t"IA64_SEMFIX"fetchadd4.acq %0 = %1, 1\n" \
";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \
"(p6) br.cond.sptk.few 2f\n" \
".section .text.lock,\"ax\"\n" \
- "2:\tfetchadd4.rel %0 = %1, -1\n" \
+ "2:\t"IA64_SEMFIX"fetchadd4.rel %0 = %1, -1\n" \
";;\n" \
"3:\tld4.acq %0 = %1\n" \
";;\n" \
@@ -94,7 +139,7 @@ do { \
#define read_unlock(rw) \
do { \
int tmp = 0; \
- __asm__ __volatile__ ("fetchadd4.rel %0 = %1, -1\n" \
+ __asm__ __volatile__ (IA64_SEMFIX"fetchadd4.rel %0 = %1, -1\n" \
: "=r" (tmp) \
: "m" (__atomic_fool_gcc(rw)) \
: "memory"); \
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index abffefa01..25438c18e 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -27,6 +27,15 @@
#define GATE_ADDR (0xa000000000000000 + PAGE_SIZE)
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC)
+ /* Workaround for Errata 97. */
+# define IA64_SEMFIX_INSN mf;
+# define IA64_SEMFIX "mf;"
+#else
+# define IA64_SEMFIX_INSN
+# define IA64_SEMFIX ""
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -231,13 +240,13 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
({ \
switch (sz) { \
case 4: \
- __asm__ __volatile__ ("fetchadd4.rel %0=%1,%3" \
+ __asm__ __volatile__ (IA64_SEMFIX"fetchadd4.rel %0=%1,%3" \
: "=r"(tmp), "=m"(__atomic_fool_gcc(v)) \
: "m" (__atomic_fool_gcc(v)), "i"(n)); \
break; \
\
case 8: \
- __asm__ __volatile__ ("fetchadd8.rel %0=%1,%3" \
+ __asm__ __volatile__ (IA64_SEMFIX"fetchadd8.rel %0=%1,%3" \
: "=r"(tmp), "=m"(__atomic_fool_gcc(v)) \
: "m" (__atomic_fool_gcc(v)), "i"(n)); \
break; \
@@ -280,22 +289,22 @@ __xchg (unsigned long x, volatile void *ptr, int size)
switch (size) {
case 1:
- __asm__ __volatile ("xchg1 %0=%1,%2" : "=r" (result)
+ __asm__ __volatile (IA64_SEMFIX"xchg1 %0=%1,%2" : "=r" (result)
: "m" (*(char *) ptr), "r" (x) : "memory");
return result;
case 2:
- __asm__ __volatile ("xchg2 %0=%1,%2" : "=r" (result)
+ __asm__ __volatile (IA64_SEMFIX"xchg2 %0=%1,%2" : "=r" (result)
: "m" (*(short *) ptr), "r" (x) : "memory");
return result;
case 4:
- __asm__ __volatile ("xchg4 %0=%1,%2" : "=r" (result)
+ __asm__ __volatile (IA64_SEMFIX"xchg4 %0=%1,%2" : "=r" (result)
: "m" (*(int *) ptr), "r" (x) : "memory");
return result;
case 8:
- __asm__ __volatile ("xchg8 %0=%1,%2" : "=r" (result)
+ __asm__ __volatile (IA64_SEMFIX"xchg8 %0=%1,%2" : "=r" (result)
: "m" (*(long *) ptr), "r" (x) : "memory");
return result;
}
@@ -305,7 +314,6 @@ __xchg (unsigned long x, volatile void *ptr, int size)
#define xchg(ptr,x) \
((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
-#define tas(ptr) (xchg ((ptr), 1))
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -324,50 +332,50 @@ extern long __cmpxchg_called_with_bad_pointer(void);
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) (*(struct __xchg_dummy *)(x))
-#define ia64_cmpxchg(sem,ptr,old,new,size) \
-({ \
- __typeof__(ptr) _p_ = (ptr); \
- __typeof__(new) _n_ = (new); \
- __u64 _o_, _r_; \
- \
- switch (size) { \
- case 1: _o_ = (__u8 ) (old); break; \
- case 2: _o_ = (__u16) (old); break; \
- case 4: _o_ = (__u32) (old); break; \
- case 8: _o_ = (__u64) (old); break; \
- default: \
- } \
- __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
- switch (size) { \
- case 1: \
- __asm__ __volatile__ ("cmpxchg1."sem" %0=%2,%3,ar.ccv" \
- : "=r"(_r_), "=m"(__xg(_p_)) \
- : "m"(__xg(_p_)), "r"(_n_)); \
- break; \
- \
- case 2: \
- __asm__ __volatile__ ("cmpxchg2."sem" %0=%2,%3,ar.ccv" \
- : "=r"(_r_), "=m"(__xg(_p_)) \
- : "m"(__xg(_p_)), "r"(_n_)); \
- break; \
- \
- case 4: \
- __asm__ __volatile__ ("cmpxchg4."sem" %0=%2,%3,ar.ccv" \
- : "=r"(_r_), "=m"(__xg(_p_)) \
- : "m"(__xg(_p_)), "r"(_n_)); \
- break; \
- \
- case 8: \
- __asm__ __volatile__ ("cmpxchg8."sem" %0=%2,%3,ar.ccv" \
- : "=r"(_r_), "=m"(__xg(_p_)) \
- : "m"(__xg(_p_)), "r"(_n_)); \
- break; \
- \
- default: \
- _r_ = __cmpxchg_called_with_bad_pointer(); \
- break; \
- } \
- (__typeof__(old)) _r_; \
+#define ia64_cmpxchg(sem,ptr,old,new,size) \
+({ \
+ __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(new) _n_ = (new); \
+ __u64 _o_, _r_; \
+ \
+ switch (size) { \
+ case 1: _o_ = (__u8 ) (long) (old); break; \
+ case 2: _o_ = (__u16) (long) (old); break; \
+ case 4: _o_ = (__u32) (long) (old); break; \
+ case 8: _o_ = (__u64) (long) (old); break; \
+ default: \
+ } \
+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
+ switch (size) { \
+ case 1: \
+ __asm__ __volatile__ (IA64_SEMFIX"cmpxchg1."sem" %0=%2,%3,ar.ccv" \
+ : "=r"(_r_), "=m"(__xg(_p_)) \
+ : "m"(__xg(_p_)), "r"(_n_)); \
+ break; \
+ \
+ case 2: \
+ __asm__ __volatile__ (IA64_SEMFIX"cmpxchg2."sem" %0=%2,%3,ar.ccv" \
+ : "=r"(_r_), "=m"(__xg(_p_)) \
+ : "m"(__xg(_p_)), "r"(_n_)); \
+ break; \
+ \
+ case 4: \
+ __asm__ __volatile__ (IA64_SEMFIX"cmpxchg4."sem" %0=%2,%3,ar.ccv" \
+ : "=r"(_r_), "=m"(__xg(_p_)) \
+ : "m"(__xg(_p_)), "r"(_n_)); \
+ break; \
+ \
+ case 8: \
+ __asm__ __volatile__ (IA64_SEMFIX"cmpxchg8."sem" %0=%2,%3,ar.ccv" \
+ : "=r"(_r_), "=m"(__xg(_p_)) \
+ : "m"(__xg(_p_)), "r"(_n_)); \
+ break; \
+ \
+ default: \
+ _r_ = __cmpxchg_called_with_bad_pointer(); \
+ break; \
+ } \
+ (__typeof__(old)) _r_; \
})
#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr)))
@@ -418,15 +426,15 @@ extern struct task_struct *ia64_switch_to (void *next_task);
extern void ia64_save_extra (struct task_struct *task);
extern void ia64_load_extra (struct task_struct *task);
-#define __switch_to(prev,next,last) do { \
- if (((prev)->thread.flags & IA64_THREAD_DBG_VALID) \
- || IS_IA32_PROCESS(ia64_task_regs(prev))) \
- ia64_save_extra(prev); \
- if (((next)->thread.flags & IA64_THREAD_DBG_VALID) \
- || IS_IA32_PROCESS(ia64_task_regs(next))) \
- ia64_load_extra(next); \
- ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \
- (last) = ia64_switch_to((next)); \
+#define __switch_to(prev,next,last) do { \
+ if (((prev)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \
+ || IS_IA32_PROCESS(ia64_task_regs(prev))) \
+ ia64_save_extra(prev); \
+ if (((next)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \
+ || IS_IA32_PROCESS(ia64_task_regs(next))) \
+ ia64_load_extra(next); \
+ ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \
+ (last) = ia64_switch_to((next)); \
} while (0)
#ifdef CONFIG_SMP
@@ -444,6 +452,7 @@ extern void ia64_load_extra (struct task_struct *task);
*/
# define switch_to(prev,next,last) do { \
if (ia64_get_fpu_owner() == (prev) && ia64_psr(ia64_task_regs(prev))->mfh) { \
+ ia64_psr(ia64_task_regs(prev))->mfh = 0; \
(prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
__ia64_save_fpu((prev)->thread.fph); \
} \
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 82de77440..456376e1b 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -203,6 +203,7 @@
#define __NR_lstat 1211
#define __NR_fstat 1212
#define __NR_clone2 1213
+#define __NR_getdents64 1214
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)