diff options
Diffstat (limited to 'include/asm-alpha/pgtable.h')
-rw-r--r-- | include/asm-alpha/pgtable.h | 375 |
1 files changed, 28 insertions, 347 deletions
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h index cc59d36df..a627d50ba 100644 --- a/include/asm-alpha/pgtable.h +++ b/include/asm-alpha/pgtable.h @@ -9,165 +9,11 @@ * in <asm/page.h> (currently 8192). */ #include <linux/config.h> -#include <linux/spinlock.h> /* For the task lock */ -#include <asm/system.h> +#include <asm/page.h> #include <asm/processor.h> /* For TASK_SIZE */ -#include <asm/mmu_context.h> #include <asm/machvec.h> - -/* Caches aren't brain-dead on the Alpha. */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_range(mm, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr) do { } while (0) -#define flush_page_to_ram(page) do { } while (0) -#define flush_icache_range(start, end) do { } while (0) - -/* - * Use a few helper functions to hide the ugly broken ASN - * numbers on early Alphas (ev4 and ev45) - */ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __MMU_EXTERN_INLINE -#endif - -__EXTERN_INLINE void -ev4_flush_tlb_current(struct mm_struct *mm) -{ - tbiap(); -} - -__EXTERN_INLINE void -ev4_flush_tlb_other(struct mm_struct *mm) -{ -} - -extern void ev5_flush_tlb_current(struct mm_struct *mm); - -__EXTERN_INLINE void -ev5_flush_tlb_other(struct mm_struct *mm) -{ - mm->context = 0; -} - -/* - * Flush just one page in the current TLB set. - * We need to be very careful about the icache here, there - * is no way to invalidate a specific icache page.. - */ - -__EXTERN_INLINE void -ev4_flush_tlb_current_page(struct mm_struct * mm, - struct vm_area_struct *vma, - unsigned long addr) -{ - tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr); -} - -__EXTERN_INLINE void -ev5_flush_tlb_current_page(struct mm_struct * mm, - struct vm_area_struct *vma, - unsigned long addr) -{ - if (vma->vm_flags & VM_EXEC) - ev5_flush_tlb_current(mm); - else - tbi(2, addr); -} - - -#ifdef CONFIG_ALPHA_GENERIC -# define flush_tlb_current alpha_mv.mv_flush_tlb_current -# define flush_tlb_other alpha_mv.mv_flush_tlb_other -# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page -#else -# ifdef CONFIG_ALPHA_EV4 -# define flush_tlb_current ev4_flush_tlb_current -# define flush_tlb_other ev4_flush_tlb_other -# define flush_tlb_current_page ev4_flush_tlb_current_page -# else -# define flush_tlb_current ev5_flush_tlb_current -# define flush_tlb_other ev5_flush_tlb_other -# define flush_tlb_current_page ev5_flush_tlb_current_page -# endif -#endif - -#ifdef __MMU_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __MMU_EXTERN_INLINE -#endif - -/* - * Flush current user mapping. - */ -static inline void flush_tlb(void) -{ - flush_tlb_current(current->mm); -} - -#ifndef __SMP__ -/* - * Flush everything (kernel mapping may also have - * changed due to vmalloc/vfree) - */ -static inline void flush_tlb_all(void) -{ - tbia(); -} - -/* - * Flush a specified user mapping - */ -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - if (mm != current->mm) - flush_tlb_other(mm); - else - flush_tlb_current(mm); -} - -/* - * Page-granular tlb flush. - * - * do a tbisd (type = 2) normally, and a tbis (type = 3) - * if it is an executable mapping. We want to avoid the - * itlb flush, because that potentially also does a - * icache flush. - */ -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) -{ - struct mm_struct * mm = vma->vm_mm; - - if (mm != current->mm) - flush_tlb_other(mm); - else - flush_tlb_current_page(mm, vma, addr); -} - -/* - * Flush a specified range of user mapping: on the - * Alpha we flush the whole user tlb. - */ -static inline void flush_tlb_range(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - flush_tlb_mm(mm); -} - -#else /* __SMP__ */ - -extern void flush_tlb_all(void); -extern void flush_tlb_mm(struct mm_struct *); -extern void flush_tlb_page(struct vm_area_struct *, unsigned long); -extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long); - -#endif /* __SMP__ */ - /* Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. @@ -293,7 +139,7 @@ extern unsigned long __zero_page(void); #define BAD_PAGETABLE __bad_pagetable() #define BAD_PAGE __bad_page() -#define ZERO_PAGE(vaddr) (PAGE_OFFSET+0x30A000) +#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(ZERO_PGE)) /* number of bits that fit into a memory pointer */ #define BITS_PER_PTR (8*sizeof(unsigned long)) @@ -330,8 +176,14 @@ extern unsigned long __zero_page(void); * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ -extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot) -{ pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; } +#define mk_pte(page, pgprot) \ +({ \ + pte_t pte; \ + \ + pte_val(pte) = ((unsigned long)(page - mem_map) << 32) | \ + pgprot_val(pgprot); \ + pte; \ +}) extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpage) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; } @@ -345,8 +197,8 @@ extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } -extern inline unsigned long pte_page(pte_t pte) -{ return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); } +#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> 32))) +#define pte_page(x) (mem_map+pte_pagenr(x)) extern inline unsigned long pmd_page(pmd_t pmd) { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); } @@ -368,6 +220,8 @@ extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _P extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; } extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; } +#define page_address(page) ((page)->virtual) + /* * The following only work if pte_present() is true. * Undefined behaviour if not.. @@ -395,10 +249,8 @@ extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; retu #define pgd_offset_k(address) pgd_offset(&init_mm, address) /* to find an entry in a page-table-directory. */ -extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) -{ - return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)); -} +#define __pgd_offset(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) +#define pgd_offset(mm, address) ((mm)->pgd+__pgd_offset(address)) /* Find an entry in the second-level page table.. */ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) @@ -412,186 +264,6 @@ extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address) return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1)); } -/* - * Allocate and free page tables. The xxx_kernel() versions are - * used to allocate a kernel page table - this turns on ASN bits - * if any. - */ -#ifndef __SMP__ -extern struct pgtable_cache_struct { - unsigned long *pgd_cache; - unsigned long *pte_cache; - unsigned long pgtable_cache_sz; -} quicklists; -#else -#include <asm/smp.h> -#define quicklists cpu_data[smp_processor_id()] -#endif -#define pgd_quicklist (quicklists.pgd_cache) -#define pmd_quicklist ((unsigned long *)0) -#define pte_quicklist (quicklists.pte_cache) -#define pgtable_cache_size (quicklists.pgtable_cache_sz) - -extern __inline__ pgd_t *get_pgd_slow(void) -{ - pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init; - - if (ret) { - init = pgd_offset(&init_mm, 0); - memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); - memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - - pgd_val(ret[PTRS_PER_PGD]) - = pte_val(mk_pte((unsigned long)ret, PAGE_KERNEL)); - } - return ret; -} - -extern __inline__ pgd_t *get_pgd_fast(void) -{ - unsigned long *ret; - - if((ret = pgd_quicklist) != NULL) { - pgd_quicklist = (unsigned long *)(*ret); - ret[0] = ret[1]; - pgtable_cache_size--; - } else - ret = (unsigned long *)get_pgd_slow(); - return (pgd_t *)ret; -} - -extern __inline__ void free_pgd_fast(pgd_t *pgd) -{ - *(unsigned long *)pgd = (unsigned long) pgd_quicklist; - pgd_quicklist = (unsigned long *) pgd; - pgtable_cache_size++; -} - -extern __inline__ void free_pgd_slow(pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - -extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_premasked); - -extern __inline__ pmd_t *get_pmd_fast(void) -{ - unsigned long *ret; - - if((ret = (unsigned long *)pte_quicklist) != NULL) { - pte_quicklist = (unsigned long *)(*ret); - ret[0] = ret[1]; - pgtable_cache_size--; - } - return (pmd_t *)ret; -} - -extern __inline__ void free_pmd_fast(pmd_t *pmd) -{ - *(unsigned long *)pmd = (unsigned long) pte_quicklist; - pte_quicklist = (unsigned long *) pmd; - pgtable_cache_size++; -} - -extern __inline__ void free_pmd_slow(pmd_t *pmd) -{ - free_page((unsigned long)pmd); -} - -extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted); - -extern __inline__ pte_t *get_pte_fast(void) -{ - unsigned long *ret; - - if((ret = (unsigned long *)pte_quicklist) != NULL) { - pte_quicklist = (unsigned long *)(*ret); - ret[0] = ret[1]; - pgtable_cache_size--; - } - return (pte_t *)ret; -} - -extern __inline__ void free_pte_fast(pte_t *pte) -{ - *(unsigned long *)pte = (unsigned long) pte_quicklist; - pte_quicklist = (unsigned long *) pte; - pgtable_cache_size++; -} - -extern __inline__ void free_pte_slow(pte_t *pte) -{ - free_page((unsigned long)pte); -} - -extern void __bad_pte(pmd_t *pmd); -extern void __bad_pmd(pgd_t *pgd); - -#define pte_free_kernel(pte) free_pte_fast(pte) -#define pte_free(pte) free_pte_fast(pte) -#define pmd_free_kernel(pmd) free_pmd_fast(pmd) -#define pmd_free(pmd) free_pmd_fast(pmd) -#define pgd_free(pgd) free_pgd_fast(pgd) -#define pgd_alloc() get_pgd_fast() - -extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address) -{ - address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); - if (pmd_none(*pmd)) { - pte_t *page = get_pte_fast(); - - if (!page) - return get_pte_slow(pmd, address); - pmd_set(pmd, page); - return page + address; - } - if (pmd_bad(*pmd)) { - __bad_pte(pmd); - return NULL; - } - return (pte_t *) pmd_page(*pmd) + address; -} - -extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address) -{ - address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); - if (pgd_none(*pgd)) { - pmd_t *page = get_pmd_fast(); - - if (!page) - return get_pmd_slow(pgd, address); - pgd_set(pgd, page); - return page + address; - } - if (pgd_bad(*pgd)) { - __bad_pmd(pgd); - return NULL; - } - return (pmd_t *) pgd_page(*pgd) + address; -} - -#define pte_alloc_kernel pte_alloc -#define pmd_alloc_kernel pmd_alloc - -extern int do_check_pgt_cache(int, int); - -extern inline void set_pgdir(unsigned long address, pgd_t entry) -{ - struct task_struct * p; - pgd_t *pgd; - - read_lock(&tasklist_lock); - for_each_task(p) { - if (!p->mm) - continue; - *pgd_offset(p->mm,address) = entry; - } - read_unlock(&tasklist_lock); - for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd) - pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry; -} - extern pgd_t swapper_pg_dir[1024]; /* @@ -610,9 +282,11 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma, extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } -#define SWP_TYPE(entry) (((entry) >> 32) & 0xff) -#define SWP_OFFSET(entry) ((entry) >> 40) -#define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset))) +#define SWP_TYPE(x) (((x).val >> 32) & 0xff) +#define SWP_OFFSET(x) ((x).val >> 40) +#define SWP_ENTRY(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) +#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define swp_entry_to_pte(x) ((pte_t) { (x).val }) #define module_map vmalloc #define module_unmap vfree @@ -624,4 +298,11 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define io_remap_page_range(start, busaddr, size, prot) \ remap_page_range(start, virt_to_phys(__ioremap(busaddr)), size, prot) +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) + #endif /* _ALPHA_PGTABLE_H */ |