diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-02-23 00:40:54 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-02-23 00:40:54 +0000 |
commit | 529c593ece216e4aaffd36bd940cb94f1fa63129 (patch) | |
tree | 78f1c0b805f5656aa7b0417a043c5346f700a2cf /include/asm-i386 | |
parent | 0bd079751d25808d1972baee5c4eaa1db2227257 (diff) |
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/hardirq.h | 2 | ||||
-rw-r--r-- | include/asm-i386/io.h | 20 | ||||
-rw-r--r-- | include/asm-i386/mc146818rtc.h | 6 | ||||
-rw-r--r-- | include/asm-i386/md.h | 13 | ||||
-rw-r--r-- | include/asm-i386/mmu_context.h | 27 | ||||
-rw-r--r-- | include/asm-i386/page.h | 2 | ||||
-rw-r--r-- | include/asm-i386/pgalloc.h | 35 | ||||
-rw-r--r-- | include/asm-i386/pgtable.h | 31 | ||||
-rw-r--r-- | include/asm-i386/softirq.h | 126 | ||||
-rw-r--r-- | include/asm-i386/spinlock.h | 1 |
10 files changed, 74 insertions, 189 deletions
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h index fc8ff9016..610db5633 100644 --- a/include/asm-i386/hardirq.h +++ b/include/asm-i386/hardirq.h @@ -12,6 +12,8 @@ extern unsigned int local_irq_count[NR_CPUS]; #define in_interrupt() ({ int __cpu = smp_processor_id(); \ (local_irq_count[__cpu] + local_bh_count[__cpu] != 0); }) +#define in_irq() (local_irq_count[smp_processor_id()] != 0) + #ifndef __SMP__ #define hardirq_trylock(cpu) (local_irq_count[cpu] == 0) diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h index 7a4e9facc..4ec380c2d 100644 --- a/include/asm-i386/io.h +++ b/include/asm-i386/io.h @@ -195,15 +195,15 @@ extern void iounmap(void *addr); */ #define __ISA_IO_base ((char *)(PAGE_OFFSET)) -#define isa_readb(a) readb(__ISA_IO_base + (a)) -#define isa_readw(a) readw(__ISA_IO_base + (a)) -#define isa_readl(a) readl(__ISA_IO_base + (a)) -#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) -#define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) -#define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) -#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) -#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) -#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) +#define isa_readb(a) readb(__ISA_IO_base + (unsigned long)(a)) +#define isa_readw(a) readw(__ISA_IO_base + (unsigned long)(a)) +#define isa_readl(a) readl(__ISA_IO_base + (unsigned long)(a)) +#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (unsigned long)(a)) +#define isa_writew(w,a) writew(w,__ISA_IO_base + (unsigned long)(a)) +#define isa_writel(l,a) writel(l,__ISA_IO_base + (unsigned long)(a)) +#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (unsigned long)(a),(b),(c)) +#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (unsigned long)(b),(c)) +#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (unsigned long)(a),(b),(c)) /* @@ -211,7 +211,7 @@ extern void iounmap(void *addr); */ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(b),(c),(d)) -#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d)) +#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (unsigned long)(b)),(c),(d)) static inline int check_signature(unsigned long io_addr, const unsigned char *signature, int length) diff --git a/include/asm-i386/mc146818rtc.h b/include/asm-i386/mc146818rtc.h index 31eddb2da..07f4149a4 100644 --- a/include/asm-i386/mc146818rtc.h +++ b/include/asm-i386/mc146818rtc.h @@ -1,8 +1,8 @@ /* * Machine dependent access functions for RTC registers. */ -#ifndef __ASM_I386_MC146818RTC_H -#define __ASM_I386_MC146818RTC_H +#ifndef _I386_MC146818RTC_H +#define _I386_MC146818RTC_H #include <asm/io.h> @@ -24,4 +24,4 @@ outb_p((addr),RTC_PORT(0)); \ outb_p((val),RTC_PORT(1)); \ }) -#endif /* __ASM_I386_MC146818RTC_H */ +#endif /* _I386_MC146818RTC_H */ diff --git a/include/asm-i386/md.h b/include/asm-i386/md.h deleted file mode 100644 index 0a2c5dd01..000000000 --- a/include/asm-i386/md.h +++ /dev/null @@ -1,13 +0,0 @@ -/* $Id: md.h,v 1.1 1997/12/15 15:11:57 jj Exp $ - * md.h: High speed xor_block operation for RAID4/5 - * - */ - -#ifndef __ASM_MD_H -#define __ASM_MD_H - -/* #define HAVE_ARCH_XORBLOCK */ - -#define MD_XORBLOCK_ALIGNMENT sizeof(long) - -#endif /* __ASM_MD_H */ diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h index fb3af63ae..1d9248632 100644 --- a/include/asm-i386/mmu_context.h +++ b/include/asm-i386/mmu_context.h @@ -1,6 +1,7 @@ #ifndef __I386_MMU_CONTEXT_H #define __I386_MMU_CONTEXT_H +#include <linux/config.h> #include <asm/desc.h> #include <asm/atomic.h> #include <asm/pgalloc.h> @@ -12,30 +13,46 @@ #define init_new_context(tsk,mm) do { } while (0) #ifdef __SMP__ -extern unsigned int cpu_tlbbad[NR_CPUS]; + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) +{ + if(cpu_tlbstate[cpu].state == TLBSTATE_OK) + cpu_tlbstate[cpu].state = TLBSTATE_LAZY; +} +#else +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) +{ +} #endif static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) { + set_bit(cpu, &next->cpu_vm_mask); if (prev != next) { /* * Re-load LDT if necessary */ if (prev->segments != next->segments) load_LDT(next); - +#ifdef CONFIG_SMP + cpu_tlbstate[cpu].state = TLBSTATE_OK; + cpu_tlbstate[cpu].active_mm = next; +#endif /* Re-load page tables */ asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd))); clear_bit(cpu, &prev->cpu_vm_mask); } #ifdef __SMP__ else { - if(cpu_tlbbad[cpu]) + int old_state = cpu_tlbstate[cpu].state; + cpu_tlbstate[cpu].state = TLBSTATE_OK; + if(cpu_tlbstate[cpu].active_mm != next) + BUG(); + if(old_state == TLBSTATE_OLD) local_flush_tlb(); } - cpu_tlbbad[cpu] = 0; + #endif - set_bit(cpu, &next->cpu_vm_mask); } #define activate_mm(prev, next) \ diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index d6f199989..67855d163 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h @@ -37,10 +37,12 @@ typedef struct { unsigned long long pte; } pte_t; typedef struct { unsigned long long pmd; } pmd_t; typedef struct { unsigned long long pgd; } pgd_t; +#define PTE_MASK (~(unsigned long long) (PAGE_SIZE-1)) #else typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pgd; } pgd_t; +#define PTE_MASK PAGE_MASK #endif typedef struct { unsigned long pgprot; } pgprot_t; diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h index fd01afad1..5cb20763d 100644 --- a/include/asm-i386/pgalloc.h +++ b/include/asm-i386/pgalloc.h @@ -187,6 +187,7 @@ extern inline void set_pgdir(unsigned long address, pgd_t entry) * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(mm, start, end) flushes a range of pages + * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. @@ -220,11 +221,6 @@ static inline void flush_tlb_range(struct mm_struct *mm, #else -/* - * We aren't very clever about this yet - SMP could certainly - * avoid some global flushes.. - */ - #include <asm/smp.h> #define local_flush_tlb() \ @@ -242,23 +238,24 @@ static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, u flush_tlb_mm(mm); } -extern volatile unsigned long smp_invalidate_needed; -extern unsigned int cpu_tlbbad[NR_CPUS]; +#define TLBSTATE_OK 1 +#define TLBSTATE_LAZY 2 +#define TLBSTATE_OLD 3 -static inline void do_flush_tlb_local(void) +struct tlb_state { - unsigned long cpu = smp_processor_id(); - struct mm_struct *mm = current->mm; - - clear_bit(cpu, &smp_invalidate_needed); - if (mm) { - set_bit(cpu, &mm->cpu_vm_mask); - local_flush_tlb(); - } else { - cpu_tlbbad[cpu] = 1; - } -} + struct mm_struct *active_mm; + int state; +}; +extern struct tlb_state cpu_tlbstate[NR_CPUS]; + #endif +extern inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + /* i386 does not keep any page table caches in TLB */ +} + #endif /* _I386_PGALLOC_H */ diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index e4f8afcf3..ef491587f 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -29,12 +29,13 @@ extern pgd_t swapper_pg_dir[1024]; #define __flush_tlb() \ do { \ - __asm__ __volatile__ \ - ("movl %0, %%cr3;" \ - : \ - : "r" __pa(current->active_mm->pgd) \ - : "memory" \ - ); \ + unsigned int tmpreg; \ + \ + __asm__ __volatile__( \ + "movl %%cr3, %0; # flush TLB \n" \ + "movl %0, %%cr3; \n" \ + : "=r" (tmpreg) \ + :: "memory"); \ } while (0) /* @@ -43,14 +44,16 @@ extern pgd_t swapper_pg_dir[1024]; */ #define __flush_tlb_global() \ do { \ + unsigned int tmpreg; \ + \ __asm__ __volatile__( \ - "movl %0, %%cr4; # turn off PGE \n" \ - "mov %2, %%cr3; # flush TLB \n" \ - "mov %1, %%cr4; # turn PGE back on \n" \ - : \ - : "r" (mmu_cr4_features), \ - "r" (mmu_cr4_features & ~X86_CR4_PGE), \ - "r" (__pa(current->active_mm->pgd)) \ + "movl %1, %%cr4; # turn off PGE \n" \ + "movl %%cr3, %0; # flush TLB \n" \ + "movl %0, %%cr3; \n" \ + "movl %2, %%cr4; # turn PGE back on \n" \ + : "=r" (tmpreg) \ + : "r" (mmu_cr4_features & ~X86_CR4_PGE), \ + "r" (mmu_cr4_features) \ : "memory"); \ } while (0) @@ -151,7 +154,7 @@ extern unsigned long empty_zero_page[1024]; #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h index 6eb68524a..9964ba5bc 100644 --- a/include/asm-i386/softirq.h +++ b/include/asm-i386/softirq.h @@ -9,133 +9,9 @@ extern unsigned int local_bh_count[NR_CPUS]; #define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0) #define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0) -#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1)) -#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0) - #define local_bh_disable() cpu_bh_disable(smp_processor_id()) #define local_bh_enable() cpu_bh_enable(smp_processor_id()) -#define get_active_bhs() (bh_mask & bh_active) -#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active) - -extern spinlock_t i386_bh_lock; - -#ifdef __SMP__ - -/* - * The locking mechanism for base handlers, to prevent re-entrancy, - * is entirely private to an implementation, it should not be - * referenced at all outside of this file. - */ -extern atomic_t global_bh_lock; -extern atomic_t global_bh_count; - -extern void synchronize_bh(void); - -static inline void start_bh_atomic(void) -{ - atomic_inc(&global_bh_lock); - synchronize_bh(); -} - -static inline void end_bh_atomic(void) -{ - atomic_dec(&global_bh_lock); -} - -/* These are for the IRQs testing the lock */ -static inline int softirq_trylock(int cpu) -{ - if (cpu_bh_trylock(cpu)) { - if (!test_and_set_bit(0,&global_bh_count)) { - if (atomic_read(&global_bh_lock) == 0) - return 1; - clear_bit(0,&global_bh_count); - } - cpu_bh_endlock(cpu); - } - return 0; -} - -static inline void softirq_endlock(int cpu) -{ - cpu_bh_enable(cpu); - clear_bit(0,&global_bh_count); -} - -#else - -extern inline void start_bh_atomic(void) -{ - local_bh_disable(); - barrier(); -} - -extern inline void end_bh_atomic(void) -{ - barrier(); - local_bh_enable(); -} - -/* These are for the irq's testing the lock */ -#define softirq_trylock(cpu) (cpu_bh_trylock(cpu)) -#define softirq_endlock(cpu) (cpu_bh_endlock(cpu)) -#define synchronize_bh() barrier() - -#endif /* SMP */ - -extern inline void init_bh(int nr, void (*routine)(void)) -{ - unsigned long flags; - - bh_base[nr] = routine; - atomic_set(&bh_mask_count[nr], 0); - - spin_lock_irqsave(&i386_bh_lock, flags); - bh_mask |= 1 << nr; - spin_unlock_irqrestore(&i386_bh_lock, flags); -} - -extern inline void remove_bh(int nr) -{ - unsigned long flags; - - spin_lock_irqsave(&i386_bh_lock, flags); - bh_mask &= ~(1 << nr); - spin_unlock_irqrestore(&i386_bh_lock, flags); - - synchronize_bh(); - bh_base[nr] = NULL; -} - -extern inline void mark_bh(int nr) -{ - set_bit(nr, &bh_active); -} - -/* - * These use a mask count to correctly handle - * nested disable/enable calls - */ -extern inline void disable_bh(int nr) -{ - unsigned long flags; - - spin_lock_irqsave(&i386_bh_lock, flags); - bh_mask &= ~(1 << nr); - atomic_inc(&bh_mask_count[nr]); - spin_unlock_irqrestore(&i386_bh_lock, flags); - synchronize_bh(); -} - -extern inline void enable_bh(int nr) -{ - unsigned long flags; - - spin_lock_irqsave(&i386_bh_lock, flags); - if (atomic_dec_and_test(&bh_mask_count[nr])) - bh_mask |= 1 << nr; - spin_unlock_irqrestore(&i386_bh_lock, flags); -} +#define in_softirq() (local_bh_count[smp_processor_id()] != 0) #endif /* __ASM_SOFTIRQ_H */ diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 44a2e59e5..4ce58066c 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -44,6 +44,7 @@ typedef struct { */ #define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock) +#define spin_is_locked(x) ((x)->lock != 0) #define spin_lock_string \ "\n1:\t" \ |