summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha/pgalloc.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-04 07:40:19 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-04 07:40:19 +0000
commit33263fc5f9ac8e8cb2b22d06af3ce5ac1dd815e4 (patch)
tree2d1b86a40bef0958a68cf1a2eafbeb0667a70543 /include/asm-alpha/pgalloc.h
parent216f5f51aa02f8b113aa620ebc14a9631a217a00 (diff)
Merge with Linux 2.3.32.
Diffstat (limited to 'include/asm-alpha/pgalloc.h')
-rw-r--r--include/asm-alpha/pgalloc.h337
1 files changed, 337 insertions, 0 deletions
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
new file mode 100644
index 000000000..5ea0193be
--- /dev/null
+++ b/include/asm-alpha/pgalloc.h
@@ -0,0 +1,337 @@
+#ifndef _ALPHA_PGALLOC_H
+#define _ALPHA_PGALLOC_H
+
+#include <linux/config.h>
+
+/* Caches aren't brain-dead on the Alpha. */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(mm, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+
+/*
+ * Use a few helper functions to hide the ugly broken ASN
+ * numbers on early Alphas (ev4 and ev45)
+ */
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __MMU_EXTERN_INLINE
+#endif
+
+__EXTERN_INLINE void
+ev4_flush_tlb_current(struct mm_struct *mm)
+{
+ tbiap();
+}
+
+__EXTERN_INLINE void
+ev4_flush_tlb_other(struct mm_struct *mm)
+{
+}
+
+extern void ev5_flush_tlb_current(struct mm_struct *mm);
+
+__EXTERN_INLINE void
+ev5_flush_tlb_other(struct mm_struct *mm)
+{
+ mm->context = 0;
+}
+
+/*
+ * Flush just one page in the current TLB set.
+ * We need to be very careful about the icache here, there
+ * is no way to invalidate a specific icache page..
+ */
+
+__EXTERN_INLINE void
+ev4_flush_tlb_current_page(struct mm_struct * mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
+}
+
+__EXTERN_INLINE void
+ev5_flush_tlb_current_page(struct mm_struct * mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_flags & VM_EXEC)
+ ev5_flush_tlb_current(mm);
+ else
+ tbi(2, addr);
+}
+
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define flush_tlb_current alpha_mv.mv_flush_tlb_current
+# define flush_tlb_other alpha_mv.mv_flush_tlb_other
+# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
+#else
+# ifdef CONFIG_ALPHA_EV4
+# define flush_tlb_current ev4_flush_tlb_current
+# define flush_tlb_other ev4_flush_tlb_other
+# define flush_tlb_current_page ev4_flush_tlb_current_page
+# else
+# define flush_tlb_current ev5_flush_tlb_current
+# define flush_tlb_other ev5_flush_tlb_other
+# define flush_tlb_current_page ev5_flush_tlb_current_page
+# endif
+#endif
+
+#ifdef __MMU_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __MMU_EXTERN_INLINE
+#endif
+
+/*
+ * Flush current user mapping.
+ */
+static inline void flush_tlb(void)
+{
+ flush_tlb_current(current->mm);
+}
+
+#ifndef __SMP__
+/*
+ * Flush everything (kernel mapping may also have
+ * changed due to vmalloc/vfree)
+ */
+static inline void flush_tlb_all(void)
+{
+ tbia();
+}
+
+/*
+ * Flush a specified user mapping
+ */
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm != current->mm)
+ flush_tlb_other(mm);
+ else
+ flush_tlb_current(mm);
+}
+
+/*
+ * Page-granular tlb flush.
+ *
+ * do a tbisd (type = 2) normally, and a tbis (type = 3)
+ * if it is an executable mapping. We want to avoid the
+ * itlb flush, because that potentially also does a
+ * icache flush.
+ */
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct mm_struct * mm = vma->vm_mm;
+
+ if (mm != current->mm)
+ flush_tlb_other(mm);
+ else
+ flush_tlb_current_page(mm, vma, addr);
+}
+
+/*
+ * Flush a specified range of user mapping: on the
+ * Alpha we flush the whole user tlb.
+ */
+static inline void flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(mm);
+}
+
+#else /* __SMP__ */
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
+
+#endif /* __SMP__ */
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+#ifndef __SMP__
+extern struct pgtable_cache_struct {
+ unsigned long *pgd_cache;
+ unsigned long *pte_cache;
+ unsigned long pgtable_cache_sz;
+} quicklists;
+#else
+#include <asm/smp.h>
+#define quicklists cpu_data[smp_processor_id()]
+#endif
+#define pgd_quicklist (quicklists.pgd_cache)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (quicklists.pte_cache)
+#define pgtable_cache_size (quicklists.pgtable_cache_sz)
+
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+
+ if (ret) {
+ init = pgd_offset(&init_mm, 0UL);
+ memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+ pgd_val(ret[PTRS_PER_PGD])
+ = pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
+ }
+ return ret;
+}
+
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ } else
+ ret = (unsigned long *)get_pgd_slow();
+ return (pgd_t *)ret;
+}
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_premasked);
+
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pmd_t *)ret;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
+{
+ *(unsigned long *)pmd = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pmd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pmd_slow(pmd_t *pmd)
+{
+ free_page((unsigned long)pmd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+extern __inline__ void free_pte_fast(pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+extern void __bad_pte(pmd_t *pmd);
+extern void __bad_pmd(pgd_t *pgd);
+
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(pte)
+#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
+#define pmd_free(pmd) free_pmd_fast(pmd)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
+
+extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ pte_t *page = get_pte_fast();
+
+ if (!page)
+ return get_pte_slow(pmd, address);
+ pmd_set(pmd, page);
+ return page + address;
+ }
+ if (pmd_bad(*pmd)) {
+ __bad_pte(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + address;
+}
+
+extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
+{
+ address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+ if (pgd_none(*pgd)) {
+ pmd_t *page = get_pmd_fast();
+
+ if (!page)
+ return get_pmd_slow(pgd, address);
+ pgd_set(pgd, page);
+ return page + address;
+ }
+ if (pgd_bad(*pgd)) {
+ __bad_pmd(pgd);
+ return NULL;
+ }
+ return (pmd_t *) pgd_page(*pgd) + address;
+}
+
+#define pte_alloc_kernel pte_alloc
+#define pmd_alloc_kernel pmd_alloc
+
+extern int do_check_pgt_cache(int, int);
+
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
+{
+ struct task_struct * p;
+ pgd_t *pgd;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry;
+}
+
+#endif /* _ALPHA_PGALLOC_H */