diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-alpha/atomic.h | 2 | ||||
-rw-r--r-- | include/asm-alpha/bitops.h | 3 | ||||
-rw-r--r-- | include/asm-alpha/init.h | 2 | ||||
-rw-r--r-- | include/asm-alpha/io.h | 7 | ||||
-rw-r--r-- | include/asm-alpha/machvec.h | 1 | ||||
-rw-r--r-- | include/asm-alpha/mmu_context.h | 110 | ||||
-rw-r--r-- | include/asm-alpha/page.h | 9 | ||||
-rw-r--r-- | include/asm-alpha/processor.h | 17 | ||||
-rw-r--r-- | include/asm-alpha/smp.h | 1 | ||||
-rw-r--r-- | include/asm-alpha/softirq.h | 69 | ||||
-rw-r--r-- | include/asm-alpha/spinlock.h | 88 | ||||
-rw-r--r-- | include/asm-alpha/system.h | 143 | ||||
-rw-r--r-- | include/asm-i386/spinlock.h | 6 | ||||
-rw-r--r-- | include/asm-mips/spinlock.h | 10 | ||||
-rw-r--r-- | include/linux/fs.h | 1 | ||||
-rw-r--r-- | include/linux/pagemap.h | 1 | ||||
-rw-r--r-- | include/linux/swap.h | 12 |
17 files changed, 277 insertions, 205 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 2dccf3521..67b74d356 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -75,6 +75,7 @@ extern __inline__ long atomic_add_return(int i, atomic_t * v) " mov %0,%2\n" " stl_c %0,%1\n" " beq %0,2f\n" + " mb\n" ".section .text2,\"ax\"\n" "2: br 1b\n" ".previous" @@ -92,6 +93,7 @@ extern __inline__ long atomic_sub_return(int i, atomic_t * v) " mov %0,%2\n" " stl_c %0,%1\n" " beq %0,2f\n" + " mb\n" ".section .text2,\"ax\"\n" "2: br 1b\n" ".previous" diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h index adaf2fac2..c9e7e7aee 100644 --- a/include/asm-alpha/bitops.h +++ b/include/asm-alpha/bitops.h @@ -90,6 +90,7 @@ extern __inline__ unsigned long test_and_set_bit(unsigned long nr, " xor %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,3f\n" + " mb\n" "2:\n" ".section .text2,\"ax\"\n" "3: br 1b\n" @@ -114,6 +115,7 @@ extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, " xor %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,3f\n" + " mb\n" "2:\n" ".section .text2,\"ax\"\n" "3: br 1b\n" @@ -137,6 +139,7 @@ extern __inline__ unsigned long test_and_change_bit(unsigned long nr, " xor %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,3f\n" + " mb\n" ".section .text2,\"ax\"\n" "3: br 1b\n" ".previous" diff --git a/include/asm-alpha/init.h b/include/asm-alpha/init.h index a85501cbb..f343aecd3 100644 --- a/include/asm-alpha/init.h +++ b/include/asm-alpha/init.h @@ -1,6 +1,7 @@ #ifndef _ALPHA_INIT_H #define _ALPHA_INIT_H +#ifndef MODULE #define __init __attribute__ ((__section__ (".text.init"))) #define __initdata __attribute__ ((__section__ (".data.init"))) #define __initfunc(__arginit) \ @@ -11,6 +12,7 @@ #define __INIT .section .text.init,"ax" #define __FINIT .previous #define __INITDATA .section .data.init,"a" +#endif #define __cacheline_aligned __attribute__((__aligned__(32))) diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h index f908f7464..5ba356f61 100644 --- a/include/asm-alpha/io.h +++ b/include/asm-alpha/io.h @@ -29,15 +29,16 @@ */ static inline void __set_hae(unsigned long new_hae) { - unsigned long ipl = swpipl(7); + unsigned long flags; + __save_and_cli(flags); alpha_mv.hae_cache = new_hae; *alpha_mv.hae_register = new_hae; mb(); - /* Re-read to make sure it was written. */ new_hae = *alpha_mv.hae_register; - setipl(ipl); + + __restore_flags(flags); } static inline void set_hae(unsigned long new_hae) diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h index 035ffa4e2..587fa8a3e 100644 --- a/include/asm-alpha/machvec.h +++ b/include/asm-alpha/machvec.h @@ -32,7 +32,6 @@ struct alpha_machine_vector int rtc_port; int max_asn; unsigned long max_dma_address; - unsigned long mmu_context_mask; unsigned long irq_probe_mask; unsigned long iack_sc; diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h index 03aa3c6d2..7b800d156 100644 --- a/include/asm-alpha/mmu_context.h +++ b/include/asm-alpha/mmu_context.h @@ -49,31 +49,24 @@ # endif #endif -#ifdef __SMP__ -#define WIDTH_THIS_PROCESSOR 5 /* - * last_asn[processor]: + * cpu_last_asn(processor): * 63 0 * +-------------+----------------+--------------+ * | asn version | this processor | hardware asn | * +-------------+----------------+--------------+ */ -extern unsigned long last_asn[]; -#define asn_cache last_asn[p->processor] +#ifdef __SMP__ +#include <asm/smp.h> +#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) #else -#define WIDTH_THIS_PROCESSOR 0 -/* - * asn_cache: - * 63 0 - * +------------------------------+--------------+ - * | asn version | hardware asn | - * +------------------------------+--------------+ - */ -extern unsigned long asn_cache; +extern unsigned long last_asn; +#define cpu_last_asn(cpuid) last_asn #endif /* __SMP__ */ #define WIDTH_HARDWARE_ASN 8 +#define WIDTH_THIS_PROCESSOR 5 #define ASN_FIRST_VERSION (1UL << (WIDTH_THIS_PROCESSOR + WIDTH_HARDWARE_ASN)) #define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1) @@ -96,20 +89,46 @@ extern unsigned long asn_cache; extern void get_new_mmu_context(struct task_struct *p, struct mm_struct *mm); -__EXTERN_INLINE void ev4_get_mmu_context(struct task_struct *p) +static inline unsigned long +__get_new_mmu_context(struct task_struct *p, struct mm_struct *mm) { - /* As described, ASN's are broken. */ + unsigned long asn = cpu_last_asn(smp_processor_id()); + unsigned long next = asn + 1; + + if ((next ^ asn) & ~MAX_ASN) { + tbiap(); + next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; + } + cpu_last_asn(smp_processor_id()) = next; + mm->context = next; /* full version + asn */ + return next; } -__EXTERN_INLINE void ev5_get_mmu_context(struct task_struct *p) +__EXTERN_INLINE void +ev4_get_mmu_context(struct task_struct *p) { - struct mm_struct * mm = p->mm; + /* As described, ASN's are broken. But we can optimize for + switching between threads -- if the mm is unchanged from + current we needn't flush. */ + if (current->mm != p->mm) + tbiap(); +} - if (mm) { - unsigned long asn = asn_cache; - /* Check if our ASN is of an older version and thus invalid */ - if ((mm->context ^ asn) & ~HARDWARE_ASN_MASK) - get_new_mmu_context(p, mm); +__EXTERN_INLINE void +ev5_get_mmu_context(struct task_struct *p) +{ + /* Check if our ASN is of an older version, or on a different CPU, + and thus invalid. */ + + long asn = cpu_last_asn(smp_processor_id()); + struct mm_struct *mm = p->mm; + long mmc = mm->context; + + if ((p->tss.mm_context ^ asn) & ~HARDWARE_ASN_MASK) { + if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) + mmc = __get_new_mmu_context(p, mm); + p->tss.mm_context = mmc; + p->tss.asn = mmc & HARDWARE_ASN_MASK; } } @@ -123,40 +142,40 @@ __EXTERN_INLINE void ev5_get_mmu_context(struct task_struct *p) # endif #endif -extern inline void init_new_context(struct mm_struct *mm) +extern inline void +init_new_context(struct mm_struct *mm) { mm->context = 0; } -extern inline void destroy_context(struct mm_struct *mm) +extern inline void +destroy_context(struct mm_struct *mm) { /* Nothing to do. */ } +#ifdef __MMU_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __MMU_EXTERN_INLINE +#endif /* * Force a context reload. This is needed when we change the page * table pointer or when we update the ASN of the current process. */ -#if defined(CONFIG_ALPHA_GENERIC) -#define MASK_CONTEXT(tss) \ - ((struct thread_struct *)((unsigned long)(tss) & alpha_mv.mmu_context_mask)) -#elif defined(CONFIG_ALPHA_DP264) -#define MASK_CONTEXT(tss) \ - ((struct thread_struct *)((unsigned long)(tss) & 0xfffffffffful)) -#else -#define MASK_CONTEXT(tss) (tss) +/* Don't get into trouble with dueling __EXTERN_INLINEs. */ +#ifndef __EXTERN_INLINE +#include <asm/io.h> #endif -__EXTERN_INLINE struct thread_struct * +extern inline unsigned long __reload_tss(struct thread_struct *tss) { - register struct thread_struct *a0 __asm__("$16"); - register struct thread_struct *v0 __asm__("$0"); - - a0 = MASK_CONTEXT(tss); + register unsigned long a0 __asm__("$16"); + register unsigned long v0 __asm__("$0"); + a0 = virt_to_phys(tss); __asm__ __volatile__( "call_pal %2 #__reload_tss" : "=r"(v0), "=r"(a0) @@ -166,27 +185,22 @@ __reload_tss(struct thread_struct *tss) return v0; } -__EXTERN_INLINE void +extern inline void reload_context(struct task_struct *task) { __reload_tss(&task->tss); } /* - * After we have set current->mm to a new value, this activates the - * context for the new mm so we see the new mappings. + * After setting current->mm to a new value, activate the context for the + * new mm so we see the new mappings. */ -__EXTERN_INLINE void +extern inline void activate_context(struct task_struct *task) { - get_mmu_context(task); + get_new_mmu_context(task, task->mm); reload_context(task); } -#ifdef __MMU_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __MMU_EXTERN_INLINE -#endif - #endif /* __ALPHA_MMU_CONTEXT_H */ diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h index c2d27951e..816219ce9 100644 --- a/include/asm-alpha/page.h +++ b/include/asm-alpha/page.h @@ -105,6 +105,15 @@ typedef unsigned long pgprot_t; #define __pgprot(x) (x) #endif /* STRICT_MM_TYPECHECKS */ + +#define BUG() \ +do { \ + printk("Kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + __asm__ __volatile__("call_pal 129 # bugchk"); \ +} while (1) + +#define PAGE_BUG(page) BUG() + #endif /* !ASSEMBLY */ /* to align the pointer to the (next) page boundary */ diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h index f6097cf7c..fa6c47b63 100644 --- a/include/asm-alpha/processor.h +++ b/include/asm-alpha/processor.h @@ -8,10 +8,10 @@ #define __ASM_ALPHA_PROCESSOR_H /* - * Default implementation of macro that returns current - * instruction pointer ("program counter"). + * Returns current instruction pointer ("program counter"). */ -#define current_text_addr() ({ __label__ _l; _l: &&_l;}) +#define current_text_addr() \ + ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; }) /* * We have a 42-bit user address space: 4TB user VM... @@ -61,6 +61,15 @@ struct thread_struct { */ unsigned long flags; + /* The full version of the ASN including serial number. + + Two threads running on two different processors must of necessity + have different serial numbers. Having this duplicated from + mm->context allows them to be slightly out of sync preventing + the asn from incrementing each and every time the two threads + are scheduled. */ + unsigned long mm_context; + /* Perform syscall argument validation (get/set_fs). */ mm_segment_t fs; @@ -77,7 +86,7 @@ struct thread_struct { 0, 0, 0, \ 0, 0, 0, \ 0, 0, 0, \ - 0, \ + 0, 0, \ KERNEL_DS \ } diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h index d53142bb1..785194f81 100644 --- a/include/asm-alpha/smp.h +++ b/include/asm-alpha/smp.h @@ -9,6 +9,7 @@ struct cpuinfo_alpha { unsigned long loops_per_sec; + unsigned long last_asn; unsigned long *pgd_cache; unsigned long *pte_cache; unsigned long pgtable_cache_sz; diff --git a/include/asm-alpha/softirq.h b/include/asm-alpha/softirq.h index cb89c5328..dad9c4905 100644 --- a/include/asm-alpha/softirq.h +++ b/include/asm-alpha/softirq.h @@ -5,18 +5,33 @@ #include <asm/atomic.h> #include <asm/hardirq.h> -/* - * This works but is wrong - on SMP it should disable only on the - * current CPU and shouldn't synchronize like the heavy global - * disable does. Oh, well. - * - * See the x86 version for an example. - */ -#define local_bh_enable() start_bh_atomic() -#define local_bh_disable() end_bh_atomic() - extern unsigned int local_bh_count[NR_CPUS]; +extern inline void cpu_bh_disable(int cpu) +{ + local_bh_count[cpu]++; + mb(); +} + +extern inline void cpu_bh_enable(int cpu) +{ + mb(); + local_bh_count[cpu]--; +} + +extern inline int cpu_bh_trylock(int cpu) +{ + return local_bh_count[cpu] ? 0 : (local_bh_count[cpu] = 1); +} + +extern inline void cpu_bh_endlock(int cpu) +{ + local_bh_count[cpu] = 0; +} + +#define local_bh_enable() cpu_bh_enable(smp_processor_id()) +#define local_bh_disable() cpu_bh_disable(smp_processor_id()) + #define get_active_bhs() (bh_mask & bh_active) static inline void clear_active_bhs(unsigned long x) @@ -43,8 +58,9 @@ extern inline void init_bh(int nr, void (*routine)(void)) extern inline void remove_bh(int nr) { - bh_base[nr] = NULL; bh_mask &= ~(1 << nr); + wmb(); + bh_base[nr] = NULL; } extern inline void mark_bh(int nr) @@ -78,44 +94,39 @@ static inline void end_bh_atomic(void) /* These are for the irq's testing the lock */ static inline int softirq_trylock(int cpu) { - if (!test_and_set_bit(0,&global_bh_count)) { - if (atomic_read(&global_bh_lock) == 0) { - ++local_bh_count[cpu]; - return 1; + if (cpu_bh_trylock(cpu)) { + if (!test_and_set_bit(0, &global_bh_count)) { + if (atomic_read(&global_bh_lock) == 0) + return 1; + clear_bit(0, &global_bh_count); } - clear_bit(0,&global_bh_count); + cpu_bh_endlock(cpu); } return 0; } static inline void softirq_endlock(int cpu) { - local_bh_count[cpu]--; - clear_bit(0,&global_bh_count); + cpu_bh_enable(cpu); + clear_bit(0, &global_bh_count); } #else extern inline void start_bh_atomic(void) { - local_bh_count[smp_processor_id()]++; - barrier(); + local_bh_disable(); } extern inline void end_bh_atomic(void) { - barrier(); - local_bh_count[smp_processor_id()]--; + local_bh_enable(); } /* These are for the irq's testing the lock */ -#define softirq_trylock(cpu) \ - (local_bh_count[cpu] ? 0 : (local_bh_count[cpu] = 1)) - -#define softirq_endlock(cpu) \ - (local_bh_count[cpu] = 0) - -#define synchronize_bh() do { } while (0) +#define softirq_trylock(cpu) cpu_bh_trylock(cpu) +#define softirq_endlock(cpu) cpu_bh_endlock(cpu) +#define synchronize_bh() barrier() #endif /* SMP */ diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h index bbc8de52b..454a56582 100644 --- a/include/asm-alpha/spinlock.h +++ b/include/asm-alpha/spinlock.h @@ -8,29 +8,47 @@ * and read-write locks.. We should actually do a * <linux/spinlock.h> with all of this. Oh, well. */ -#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0) -#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0) -#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0) - -#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0) -#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0) -#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0) - -#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0) -#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0) -#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0) - -#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0) -#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0) -#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0) - -#define read_unlock_irqrestore(lock, flags) do { read_unlock(lock); local_irq_restore(flags); } while (0) -#define read_unlock_irq(lock) do { read_unlock(lock); local_irq_enable(); } while (0) -#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0) - -#define write_unlock_irqrestore(lock, flags) do { write_unlock(lock); local_irq_restore(flags); } while (0) -#define write_unlock_irq(lock) do { write_unlock(lock); local_irq_enable(); } while (0) -#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0) +#define spin_lock_irqsave(lock, flags) \ + do { local_irq_save(flags); spin_lock(lock); } while (0) +#define spin_lock_irq(lock) \ + do { local_irq_disable(); spin_lock(lock); } while (0) +#define spin_lock_bh(lock) \ + do { local_bh_disable(); spin_lock(lock); } while (0) + +#define read_lock_irqsave(lock, flags) \ + do { local_irq_save(flags); read_lock(lock); } while (0) +#define read_lock_irq(lock) \ + do { local_irq_disable(); read_lock(lock); } while (0) +#define read_lock_bh(lock) \ + do { local_bh_disable(); read_lock(lock); } while (0) + +#define write_lock_irqsave(lock, flags) \ + do { local_irq_save(flags); write_lock(lock); } while (0) +#define write_lock_irq(lock) \ + do { local_irq_disable(); write_lock(lock); } while (0) +#define write_lock_bh(lock) \ + do { local_bh_disable(); write_lock(lock); } while (0) + +#define spin_unlock_irqrestore(lock, flags) \ + do { spin_unlock(lock); local_irq_restore(flags); } while (0) +#define spin_unlock_irq(lock) \ + do { spin_unlock(lock); local_irq_enable(); } while (0) +#define spin_unlock_bh(lock) \ + do { spin_unlock(lock); local_bh_enable(); } while (0) + +#define read_unlock_irqrestore(lock, flags) \ + do { read_unlock(lock); local_irq_restore(flags); } while (0) +#define read_unlock_irq(lock) \ + do { read_unlock(lock); local_irq_enable(); } while (0) +#define read_unlock_bh(lock) \ + do { read_unlock(lock); local_bh_enable(); } while (0) + +#define write_unlock_irqrestore(lock, flags) \ + do { write_unlock(lock); local_irq_restore(flags); } while (0) +#define write_unlock_irq(lock) \ + do { write_unlock(lock); local_irq_enable(); } while (0) +#define write_unlock_bh(lock) \ + do { write_unlock(lock); local_bh_enable(); } while (0) #ifndef __SMP__ @@ -49,7 +67,7 @@ #define spin_lock_init(lock) ((void) 0) #define spin_lock(lock) ((void) 0) -#define spin_trylock(lock) ((void) 0) +#define spin_trylock(lock) (1) #define spin_unlock_wait(lock) ((void) 0) #define spin_unlock(lock) ((void) 0) @@ -94,19 +112,20 @@ */ typedef struct { - volatile unsigned int lock; + volatile unsigned int lock /*__attribute__((aligned(32))) */; #if DEBUG_SPINLOCK - char debug_state, target_ipl, saved_ipl, on_cpu; + int on_cpu; + int line_no; void *previous; struct task_struct * task; + const char *base_file; #endif } spinlock_t; #if DEBUG_SPINLOCK -#define SPIN_LOCK_UNLOCKED (spinlock_t) {0, 1, 0, 0, 0, 0} +#define SPIN_LOCK_UNLOCKED (spinlock_t) {0, -1, 0, 0, 0, 0} #define spin_lock_init(x) \ - ((x)->lock = 0, (x)->target_ipl = 0, (x)->debug_state = 1, \ - (x)->previous = 0, (x)->task = 0) + ((x)->lock = 0, (x)->on_cpu = -1, (x)->previous = 0, (x)->task = 0) #else #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define spin_lock_init(x) ((x)->lock = 0) @@ -120,8 +139,11 @@ typedef struct { unsigned long a[100]; } __dummy_lock_t; #if DEBUG_SPINLOCK extern void spin_unlock(spinlock_t * lock); -extern void spin_lock(spinlock_t * lock); -extern int spin_trylock(spinlock_t * lock); +extern void debug_spin_lock(spinlock_t * lock, const char *, int); +extern int debug_spin_trylock(spinlock_t * lock, const char *, int); + +#define spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__) +#define spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__) #define spin_lock_own(LOCK, LOCATION) \ do { \ @@ -167,7 +189,9 @@ static inline void spin_lock(spinlock_t * lock) /***********************************************************/ -typedef struct { volatile int write_lock:1, read_counter:31; } rwlock_t; +typedef struct { + volatile int write_lock:1, read_counter:31; +} /*__attribute__((aligned(32)))*/ rwlock_t; #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h index 2be0ced69..6939d37ab 100644 --- a/include/asm-alpha/system.h +++ b/include/asm-alpha/system.h @@ -86,16 +86,6 @@ struct el_common_EV5_uncorrectable_mcheck { unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ }; - -extern void wrent(void *, unsigned long); -extern void wrkgp(unsigned long); -extern void wrusp(unsigned long); -extern unsigned long rdusp(void); -extern unsigned long rdmces (void); -extern void wrmces (unsigned long); -extern unsigned long whami(void); -extern void wripir(unsigned long); - extern void halt(void) __attribute__((noreturn)); #define switch_to(prev,next,last) \ @@ -159,73 +149,86 @@ enum amask_enum { __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ __amask; }) -static inline unsigned long -wrperfmon(unsigned long perf_fun, unsigned long arg) -{ - register unsigned long __r0 __asm__("$0"); - register unsigned long __r16 __asm__("$16"); - register unsigned long __r17 __asm__("$17"); - __r16 = perf_fun; - __r17 = arg; - __asm__ __volatile__( - "call_pal %1" - : "=r"(__r0) - : "i"(PAL_wrperfmon), "r"(__r16), "r"(__r17) - : "$1", "$22", "$23", "$24", "$25", "$26"); - return __r0; +#define __CALL_PAL_R0(NAME, TYPE) \ +static inline TYPE NAME(void) \ +{ \ + register TYPE __r0 __asm__("$0"); \ + __asm__ __volatile__( \ + "call_pal %1 # " #NAME \ + :"=r" (__r0) \ + :"i" (PAL_ ## NAME) \ + :"$1", "$16", "$22", "$23", "$24", "$25"); \ + return __r0; \ } +#define __CALL_PAL_W1(NAME, TYPE0) \ +static inline void NAME(TYPE0 arg0) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "call_pal %1 # "#NAME \ + : "=r"(__r16) \ + : "i"(PAL_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} -#define call_pal1(palno,arg) \ -({ \ - register unsigned long __r0 __asm__("$0"); \ - register unsigned long __r16 __asm__("$16"); __r16 = arg; \ - __asm__ __volatile__( \ - "call_pal %3 #call_pal1" \ - :"=r" (__r0),"=r" (__r16) \ - :"1" (__r16),"i" (palno) \ - :"$1", "$22", "$23", "$24", "$25", "memory"); \ - __r0; \ -}) - -#define getipl() \ -({ \ - register unsigned long r0 __asm__("$0"); \ - __asm__ __volatile__( \ - "call_pal %1 #getipl" \ - :"=r" (r0) \ - :"i" (PAL_rdps) \ - :"$1", "$16", "$22", "$23", "$24", "$25", "memory"); \ - r0; \ -}) +#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ +static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "call_pal %2 # "#NAME \ + : "=r"(__r16), "=r"(__r17) \ + : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} -#define setipl(ipl) \ -({ \ - register unsigned long __r16 __asm__("$16"); __r16 = (ipl); \ - __asm__ __volatile__( \ - "call_pal %2 #setipl" \ - :"=r" (__r16) \ - :"0" (__r16),"i" (PAL_swpipl) \ - :"$0", "$1", "$22", "$23", "$24", "$25", "memory"); \ -}) +#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ +static inline RTYPE NAME(TYPE0 arg0) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "call_pal %2 # "#NAME \ + : "=r"(__r16), "=r"(__r0) \ + : "i"(PAL_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} -#define swpipl(ipl) \ -({ \ - register unsigned long __r0 __asm__("$0"); \ - register unsigned long __r16 __asm__("$16") = (ipl); \ +#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ +static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ __asm__ __volatile__( \ - "call_pal %3 #swpipl" \ - :"=r" (__r0),"=r" (__r16) \ - :"1" (__r16),"i" (PAL_swpipl) \ - :"$1", "$22", "$23", "$24", "$25", "memory"); \ - __r0; \ -}) + "call_pal %3 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ + : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} -#define __cli() setipl(7) -#define __sti() setipl(0) -#define __save_flags(flags) ((flags) = getipl()) +__CALL_PAL_R0(rdmces, unsigned long); +__CALL_PAL_R0(rdps, unsigned long); +__CALL_PAL_R0(rdusp, unsigned long); +__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); +__CALL_PAL_R0(whami, unsigned long); +__CALL_PAL_W2(wrent, void*, unsigned long); +__CALL_PAL_W1(wripir, unsigned long); +__CALL_PAL_W1(wrkgp, unsigned long); +__CALL_PAL_W1(wrmces, unsigned long); +__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); +__CALL_PAL_W1(wrusp, unsigned long); +__CALL_PAL_W1(wrvptptr, unsigned long); + +#define __cli() ((void) swpipl(7)) +#define __sti() ((void) swpipl(0)) +#define __save_flags(flags) ((flags) = rdps()) #define __save_and_cli(flags) ((flags) = swpipl(7)) -#define __restore_flags(flags) setipl(flags) +#define __restore_flags(flags) ((void) swpipl(flags)) #define local_irq_save(flags) __save_and_cli(flags) #define local_irq_restore(flags) __restore_flags(flags) @@ -294,6 +297,7 @@ extern __inline__ unsigned long xchg_u32(volatile int *m, unsigned long val) " bis $31,%3,%1\n" " stl_c %1,%2\n" " beq %1,2f\n" + " mb\n" ".section .text2,\"ax\"\n" "2: br 1b\n" ".previous" @@ -312,6 +316,7 @@ extern __inline__ unsigned long xchg_u64(volatile long * m, unsigned long val) " bis $31,%3,%1\n" " stq_c %1,%2\n" " beq %1,2f\n" + " mb\n" ".section .text2,\"ax\"\n" "2: br 1b\n" ".previous" diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index b447a402f..d3e9fc744 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -50,7 +50,7 @@ #endif #define spin_lock_init(lock) do { } while(0) -#define spin_lock(lock) do { } while(0) +#define spin_lock(lock) (void)(lock) /* Not "unused variable". */ #define spin_trylock(lock) (1) #define spin_unlock_wait(lock) do { } while(0) #define spin_unlock(lock) do { } while(0) @@ -109,9 +109,9 @@ typedef struct { #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } #endif -#define read_lock(lock) do { } while(0) +#define read_lock(lock) (void)(lock) /* Not "unused variable". */ #define read_unlock(lock) do { } while(0) -#define write_lock(lock) do { } while(0) +#define write_lock(lock) (void)(lock) /* Not "unused variable". */ #define write_unlock(lock) do { } while(0) #else /* __SMP__ */ diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index 6a7b067b2..63811c615 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h @@ -1,4 +1,4 @@ -/* $Id: spinlock.h,v 1.4 1998/08/25 16:45:46 tsbogend Exp $ +/* $Id: spinlock.h,v 1.5 1999/06/17 13:30:39 ralf Exp $ */ #ifndef __ASM_MIPS_SPINLOCK_H #define __ASM_MIPS_SPINLOCK_H @@ -46,10 +46,10 @@ #endif #define spin_lock_init(lock) do { } while(0) -#define spin_lock(lock) do { } while(0) +#define spin_lock(lock) (void)(lock) /* Not "unused variable". */ #define spin_trylock(lock) (1) #define spin_unlock_wait(lock) do { } while(0) -#define spin_unlock(lock) do { } while(0) +#define spin_unlock(lock) (void)(lock) /* Not "unused variable". */ /* * Read-write spinlocks, allowing multiple readers @@ -64,9 +64,9 @@ typedef struct { } rwlock_t; #define RW_LOCK_UNLOCKED (rwlock_t) { } -#define read_lock(lock) do { } while(0) +#define read_lock(lock) (void)(lock) /* Not "unused variable". */ #define read_unlock(lock) do { } while(0) -#define write_lock(lock) do { } while(0) +#define write_lock(lock) (void)(lock) /* Not "unused variable". */ #define write_unlock(lock) do { } while(0) #else diff --git a/include/linux/fs.h b/include/linux/fs.h index 792df9495..8afa183f9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -738,7 +738,6 @@ extern int fs_may_mount(kdev_t); extern struct file *inuse_filps; -extern void set_writetime(struct buffer_head *, int); extern int try_to_free_buffers(struct page *); extern void refile_buffer(struct buffer_head * buf); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c5a8af7c7..3b7272caa 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -76,6 +76,7 @@ extern void lock_page(struct page *page); extern void __add_page_to_hash_queue(struct page * page, struct page **p); +extern void add_to_page_cache(struct page * page, struct inode * inode, unsigned long offset); extern int add_to_page_cache_unique(struct page * page, struct inode * inode, unsigned long offset, struct page **hash); static inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long offset) diff --git a/include/linux/swap.h b/include/linux/swap.h index c06ddba63..e95a3881a 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -52,7 +52,6 @@ struct swap_info_struct { kdev_t swap_device; struct dentry * swap_file; unsigned short * swap_map; - unsigned char * swap_lockmap; unsigned int lowest_bit; unsigned int highest_bit; unsigned int cluster_next; @@ -85,7 +84,7 @@ extern void swap_setup (void); extern int try_to_free_pages(unsigned int gfp_mask); /* linux/mm/page_io.c */ -extern void rw_swap_page(int, unsigned long, char *, int); +extern void rw_swap_page(int, struct page *, int); extern void rw_swap_page_nocache(int, unsigned long, char *); extern void rw_swap_page_nolock(int, unsigned long, char *, int); extern void swap_after_unlock_page (unsigned long entry); @@ -97,7 +96,7 @@ extern void swap_in(struct task_struct *, struct vm_area_struct *, /* linux/mm/swap_state.c */ extern void show_swap_cache_info(void); -extern int add_to_swap_cache(struct page *, unsigned long); +extern void add_to_swap_cache(struct page *, unsigned long); extern int swap_duplicate(unsigned long); extern int swap_check_entry(unsigned long); struct page * lookup_swap_cache(unsigned long); @@ -146,13 +145,6 @@ extern unsigned long swap_cache_find_total; extern unsigned long swap_cache_find_success; #endif -extern inline unsigned long in_swap_cache(struct page *page) -{ - if (PageSwapCache(page)) - return page->offset; - return 0; -} - /* * Work out if there are any other processes sharing this page, ignoring * any page reference coming from the swap cache, or from outstanding |