diff options
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/dma.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/fpswa.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/hardirq.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/mmu_context.h | 5 | ||||
-rw-r--r-- | include/asm-ia64/offsets.h | 7 | ||||
-rw-r--r-- | include/asm-ia64/page.h | 12 | ||||
-rw-r--r-- | include/asm-ia64/pci.h | 34 | ||||
-rw-r--r-- | include/asm-ia64/pgtable.h | 21 | ||||
-rw-r--r-- | include/asm-ia64/ptrace.h | 8 | ||||
-rw-r--r-- | include/asm-ia64/semaphore.h | 7 | ||||
-rw-r--r-- | include/asm-ia64/smp.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/softirq.h | 138 | ||||
-rw-r--r-- | include/asm-ia64/types.h | 44 |
13 files changed, 91 insertions, 197 deletions
diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h index 3e6185064..4e9b30a91 100644 --- a/include/asm-ia64/dma.h +++ b/include/asm-ia64/dma.h @@ -6,11 +6,11 @@ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <asm/io.h> /* need byte IO */ #include <linux/config.h> #include <linux/spinlock.h> /* And spinlocks */ #include <linux/delay.h> +#include <asm/io.h> /* need byte IO */ #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER #define dma_outb outb_p diff --git a/include/asm-ia64/fpswa.h b/include/asm-ia64/fpswa.h index a6facbd8c..bbf8afcfd 100644 --- a/include/asm-ia64/fpswa.h +++ b/include/asm-ia64/fpswa.h @@ -9,7 +9,7 @@ * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com> */ -#if 0 +#if 1 #define FPSWA_BUG #endif diff --git a/include/asm-ia64/hardirq.h b/include/asm-ia64/hardirq.h index 95523854e..567243650 100644 --- a/include/asm-ia64/hardirq.h +++ b/include/asm-ia64/hardirq.h @@ -17,12 +17,16 @@ extern unsigned long hardirq_no[NR_CPUS]; * or hardware interrupt processing? */ +#define in_irq() (local_irq_count[smp_processor_id()] != 0) + #define in_interrupt() \ ({ \ int __cpu = smp_processor_id(); \ (local_irq_count[__cpu] + local_bh_count[__cpu]) != 0; \ }) + + #ifndef CONFIG_SMP # define hardirq_trylock(cpu) (local_irq_count[cpu] == 0) # define hardirq_endlock(cpu) ((void) 0) diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index b775d0a9a..22c2b2297 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -60,6 +60,11 @@ extern unsigned long ia64_next_context; extern void get_new_mmu_context (struct mm_struct *mm); +static inline void +enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) +{ +} + extern inline unsigned long ia64_rid (unsigned long context, unsigned long region_addr) { diff --git a/include/asm-ia64/offsets.h b/include/asm-ia64/offsets.h index 9639a9e40..d989cb911 100644 --- a/include/asm-ia64/offsets.h +++ b/include/asm-ia64/offsets.h @@ -10,7 +10,7 @@ #define PF_PTRACED_BIT 4 -#define IA64_TASK_SIZE 2752 /* 0xac0 */ +#define IA64_TASK_SIZE 3280 /* 0xcd0 */ #define IA64_PT_REGS_SIZE 400 /* 0x190 */ #define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */ #define IA64_SIGINFO_SIZE 136 /* 0x88 */ @@ -18,8 +18,9 @@ #define IA64_TASK_FLAGS_OFFSET 8 /* 0x8 */ #define IA64_TASK_SIGPENDING_OFFSET 16 /* 0x10 */ #define IA64_TASK_NEED_RESCHED_OFFSET 40 /* 0x28 */ -#define IA64_TASK_THREAD_OFFSET 912 /* 0x390 */ -#define IA64_TASK_THREAD_KSP_OFFSET 912 /* 0x390 */ +#define IA64_TASK_PROCESSOR_OFFSET 108 /* 0x6c */ +#define IA64_TASK_THREAD_OFFSET 1424 /* 0x590 */ +#define IA64_TASK_THREAD_KSP_OFFSET 1424 /* 0x590 */ #define IA64_TASK_PID_OFFSET 188 /* 0xbc */ #define IA64_TASK_MM_OFFSET 88 /* 0x58 */ #define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */ diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 64d044599..53eb9f963 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -127,6 +127,18 @@ typedef union ia64_va { #define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0) #define PAGE_BUG(page) do { BUG(); } while (0) +extern __inline__ int get_order(unsigned long size) +{ + double d = size - 1; + long order; + + __asm__ ("getf.exp %0=%1" : "=r"(order) : "f"(d)); + order = order - PAGE_SHIFT - 0xffff + 1; + if (order < 0) + order = 0; + return order; +} + #endif /* !ASSEMBLY */ #define PAGE_OFFSET 0xe000000000000000 diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h index bb3e4fb21..de50f98f0 100644 --- a/include/asm-ia64/pci.h +++ b/include/asm-ia64/pci.h @@ -55,8 +55,10 @@ extern void pci_free_consistent (struct pci_dev *hwdev, size_t size, * until either pci_unmap_single or pci_dma_sync_single is performed. */ extern inline dma_addr_t -pci_map_single (struct pci_dev *hwdev, void *ptr, size_t size) +pci_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction) { + if (direction == PCI_DMA_NONE) + BUG(); return virt_to_bus(ptr); } @@ -69,8 +71,10 @@ pci_map_single (struct pci_dev *hwdev, void *ptr, size_t size) * whatever the device wrote there. */ extern inline void -pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size) +pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) { + if (direction == PCI_DMA_NONE) + BUG(); /* Nothing to do */ } @@ -91,8 +95,10 @@ pci_unmap_single (struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size) * the same here. */ extern inline int -pci_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents) +pci_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { + if (direction == PCI_DMA_NONE) + BUG(); return nents; } @@ -102,8 +108,10 @@ pci_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents) * pci_unmap_single() above. */ extern inline void -pci_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents) +pci_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { + if (direction == PCI_DMA_NONE) + BUG(); /* Nothing to do */ } @@ -118,8 +126,10 @@ pci_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nents) * device again owns the buffer. */ extern inline void -pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size) +pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + if (direction == PCI_DMA_NONE) + BUG(); /* Nothing to do */ } @@ -131,11 +141,23 @@ pci_dma_sync_single (struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size) * same rules and usage. */ extern inline void -pci_dma_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems) +pci_dma_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { + if (direction == PCI_DMA_NONE) + BUG(); /* Nothing to do */ } +/* Return whether the given PCI device DMA address mask can + * be supported properly. For example, if your device can + * only drive the low 24-bits during PCI bus mastering, then + * you would pass 0x00ffffff as the mask to this function. + */ +extern inline int pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask) +{ + return 1; +} + /* These macros should be used after a pci_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. * You should only work with the number of sg entries pci_map_sg diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 2defe7cdf..203005b5c 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -187,9 +187,14 @@ #define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_page_to_ram(page) do { } while (0) #define flush_icache_range(start, end) do { } while (0) + extern void ia64_flush_icache_page (unsigned long addr); -#define flush_icache_page(pg) ia64_flush_icache_page(page_address(pg)) +#define flush_icache_page(vma,pg) \ +do { \ + if ((vma)->vm_flags & PROT_EXEC) \ + ia64_flush_icache_page(page_address(pg)); \ +} while (0) /* * Now come the defines and routines to manage and access the three-level @@ -289,15 +294,21 @@ extern pmd_t *ia64_bad_pagetable (void); */ #define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC) +extern __inline__ unsigned long +pgd_index (unsigned long address) +{ + unsigned long region = address >> 61; + unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); + + return (region << (PAGE_SHIFT - 6)) | l1index; +} + /* The offset in the 1-level directory is given by the 3 region bits (61..63) and the seven level-1 bits (33-39). */ extern __inline__ pgd_t* pgd_offset (struct mm_struct *mm, unsigned long address) { - unsigned long region = address >> 61; - unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); - - return mm->pgd + ((region << (PAGE_SHIFT - 6)) | l1index); + return mm->pgd + pgd_index(address); } /* In the kernel's mapped region we have a full 43 bit space available and completely diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index b2d0cc906..d50ce82e7 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h @@ -145,10 +145,10 @@ struct pt_regs { * Floating point registers that the kernel considers * scratch: */ - struct ia64_fpreg f6; /* scratch*/ - struct ia64_fpreg f7; /* scratch*/ - struct ia64_fpreg f8; /* scratch*/ - struct ia64_fpreg f9; /* scratch*/ + struct ia64_fpreg f6; /* scratch */ + struct ia64_fpreg f7; /* scratch */ + struct ia64_fpreg f8; /* scratch */ + struct ia64_fpreg f9; /* scratch */ }; /* diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h index 2c75056c5..a50ee01e3 100644 --- a/include/asm-ia64/semaphore.h +++ b/include/asm-ia64/semaphore.h @@ -137,13 +137,6 @@ up (struct semaphore * sem) * In terms of fairness, when there is heavy use of the lock, we want * to see the lock being passed back and forth between readers and * writers (like in a producer/consumer style of communication). - * - - For - * liveness, it would be necessary to process the blocked readers and - * writers in FIFO order. However, we don't do this (yet). I suppose - * if you have a lock that is _that_ heavily contested, you're in big - * trouble anyhow. * * -ben (with clarifications & IA-64 comments by davidm) */ diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 1ad1a8149..a6ea5c17d 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -8,6 +8,7 @@ #define _ASM_IA64_SMP_H #include <linux/config.h> + #include <linux/init.h> #include <linux/threads.h> #include <linux/kernel.h> @@ -25,9 +26,10 @@ extern unsigned long cpu_present_map; extern unsigned long cpu_online_map; extern unsigned long ipi_base_addr; extern int bootstrap_processor; -extern volatile int cpu_number_map[NR_CPUS]; +extern volatile int __cpu_number_map[NR_CPUS]; extern volatile int __cpu_logical_map[NR_CPUS]; +#define cpu_number_map(i) __cpu_number_map[i] #define cpu_logical_map(i) __cpu_logical_map[i] #if defined(CONFIG_KDB) diff --git a/include/asm-ia64/softirq.h b/include/asm-ia64/softirq.h index 8b92f4442..fb40999e2 100644 --- a/include/asm-ia64/softirq.h +++ b/include/asm-ia64/softirq.h @@ -2,13 +2,9 @@ #define _ASM_IA64_SOFTIRQ_H /* - * Copyright (C) 1998, 1999 Hewlett-Packard Co - * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1998-2000 Hewlett-Packard Co + * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <linux/config.h> -#include <linux/stddef.h> - -#include <asm/system.h> #include <asm/hardirq.h> extern unsigned int local_bh_count[NR_CPUS]; @@ -16,137 +12,9 @@ extern unsigned int local_bh_count[NR_CPUS]; #define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0) #define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0) -#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1)) -#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0) - #define local_bh_disable() cpu_bh_disable(smp_processor_id()) #define local_bh_enable() cpu_bh_enable(smp_processor_id()) -#define get_active_bhs() (bh_mask & bh_active) - -static inline void -clear_active_bhs (unsigned long x) -{ - unsigned long old, new; - volatile unsigned long *bh_activep = (void *) &bh_active; - CMPXCHG_BUGCHECK_DECL - - do { - CMPXCHG_BUGCHECK(bh_activep); - old = *bh_activep; - new = old & ~x; - } while (ia64_cmpxchg(bh_activep, old, new, 8) != old); -} - -extern inline void -init_bh (int nr, void (*routine)(void)) -{ - bh_base[nr] = routine; - atomic_set(&bh_mask_count[nr], 0); - bh_mask |= 1 << nr; -} - -extern inline void -remove_bh (int nr) -{ - bh_mask &= ~(1 << nr); - mb(); - bh_base[nr] = NULL; -} - -extern inline void -mark_bh (int nr) -{ - set_bit(nr, &bh_active); -} - -#ifdef CONFIG_SMP - -/* - * The locking mechanism for base handlers, to prevent re-entrancy, - * is entirely private to an implementation, it should not be - * referenced at all outside of this file. - */ -extern atomic_t global_bh_lock; -extern atomic_t global_bh_count; - -extern void synchronize_bh(void); - -static inline void -start_bh_atomic (void) -{ - atomic_inc(&global_bh_lock); - synchronize_bh(); -} - -static inline void -end_bh_atomic (void) -{ - atomic_dec(&global_bh_lock); -} - -/* These are for the irq's testing the lock */ -static inline int -softirq_trylock (int cpu) -{ - if (cpu_bh_trylock(cpu)) { - if (!test_and_set_bit(0, &global_bh_count)) { - if (atomic_read(&global_bh_lock) == 0) - return 1; - clear_bit(0,&global_bh_count); - } - cpu_bh_endlock(cpu); - } - return 0; -} - -static inline void -softirq_endlock (int cpu) -{ - cpu_bh_enable(cpu); - clear_bit(0,&global_bh_count); -} - -#else /* !CONFIG_SMP */ - -extern inline void -start_bh_atomic (void) -{ - local_bh_disable(); - barrier(); -} - -extern inline void -end_bh_atomic (void) -{ - barrier(); - local_bh_enable(); -} - -/* These are for the irq's testing the lock */ -#define softirq_trylock(cpu) (cpu_bh_trylock(cpu)) -#define softirq_endlock(cpu) (cpu_bh_endlock(cpu)) -#define synchronize_bh() barrier() - -#endif /* !CONFIG_SMP */ - -/* - * These use a mask count to correctly handle - * nested disable/enable calls - */ -extern inline void -disable_bh (int nr) -{ - bh_mask &= ~(1 << nr); - atomic_inc(&bh_mask_count[nr]); - synchronize_bh(); -} - -extern inline void -enable_bh (int nr) -{ - if (atomic_dec_and_test(&bh_mask_count[nr])) - bh_mask |= 1 << nr; -} +#define in_softirq() (local_bh_count[smp_processor_id()] != 0) #endif /* _ASM_IA64_SOFTIRQ_H */ diff --git a/include/asm-ia64/types.h b/include/asm-ia64/types.h index a86d0a7f3..d4b1732e2 100644 --- a/include/asm-ia64/types.h +++ b/include/asm-ia64/types.h @@ -8,8 +8,8 @@ * not a major issue. However, for interoperability, libraries still * need to be careful to avoid a name clashes. * - * Copyright (C) 1998, 1999 Hewlett-Packard Co - * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1998-2000 Hewlett-Packard Co + * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> */ #ifdef __ASSEMBLY__ @@ -38,52 +38,28 @@ typedef unsigned short __u16; typedef __signed__ int __s32; typedef unsigned int __u32; -/* - * There are 32-bit compilers for the ia-64 out there.. - */ -# if ((~0UL) == 0xffffffff) -# if defined(__GNUC__) && !defined(__STRICT_ANSI__) -typedef __signed__ long long __s64; -typedef unsigned long long __u64; -# endif -# else typedef __signed__ long __s64; typedef unsigned long __u64; -# endif /* * These aren't exported outside the kernel to avoid name space clashes */ # ifdef __KERNEL__ -typedef signed char s8; -typedef unsigned char u8; +typedef __s8 s8; +typedef __u8 u8; -typedef signed short s16; -typedef unsigned short u16; +typedef __s16 s16; +typedef __u16 u16; -typedef signed int s32; -typedef unsigned int u32; +typedef __s32 s32; +typedef __u32 u32; -/* - * There are 32-bit compilers for the ia-64 out there... (don't rely - * on cpp because that may cause su problem in a 32->64 bit - * cross-compilation environment). - */ -# ifdef __LP64__ +typedef __s64 s64; +typedef __u64 u64; -typedef signed long s64; -typedef unsigned long u64; #define BITS_PER_LONG 64 -# else - -typedef signed long long s64; -typedef unsigned long long u64; -#define BITS_PER_LONG 32 - -# endif - /* DMA addresses are 64-bits wide, in general. */ typedef u64 dma_addr_t; |