summaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/pgalloc.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ia64/pgalloc.h')
-rw-r--r--include/asm-ia64/pgalloc.h86
1 files changed, 67 insertions, 19 deletions
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 5256a4fff..800807df6 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -15,6 +15,7 @@
#include <linux/config.h>
+#include <linux/mm.h>
#include <linux/threads.h>
#include <asm/mmu_context.h>
@@ -175,11 +176,8 @@ pmd_alloc (pgd_t *pgd, unsigned long vmaddr)
if (!pmd_page)
pmd_page = get_pmd_slow();
if (pmd_page) {
- if (pgd_none(*pgd)) {
- pgd_set(pgd, pmd_page);
- return pmd_page + offset;
- } else
- free_pmd_fast(pmd_page);
+ pgd_set(pgd, pmd_page);
+ return pmd_page + offset;
} else
return NULL;
}
@@ -196,13 +194,6 @@ pmd_alloc (pgd_t *pgd, unsigned long vmaddr)
extern int do_check_pgt_cache (int, int);
/*
- * This establishes kernel virtual mappings (e.g., as a result of a
- * vmalloc call). Since ia-64 uses a separate kernel page table,
- * there is nothing to do here... :)
- */
-#define set_pgdir(vmaddr, entry) do { } while(0)
-
-/*
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
*/
@@ -249,7 +240,12 @@ extern void flush_tlb_range (struct mm_struct *mm, unsigned long start, unsigned
static __inline__ void
flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
{
- flush_tlb_range(vma->vm_mm, addr, addr + PAGE_SIZE);
+#ifdef CONFIG_SMP
+ flush_tlb_range(vma->vm_mm, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
+#else
+ if (vma->vm_mm == current->active_mm)
+ asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
+#endif
}
/*
@@ -259,14 +255,66 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
static inline void
flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
{
- /*
- * XXX fix mmap(), munmap() et al to guarantee that there are no mappings
- * across region boundaries. --davidm 00/02/23
- */
- if (rgn_index(start) != rgn_index(end)) {
+ if (rgn_index(start) != rgn_index(end))
printk("flush_tlb_pgtables: can't flush across regions!!\n");
- }
flush_tlb_range(mm, ia64_thash(start), ia64_thash(end));
}
+/*
+ * Now for some cache flushing routines. This is the kind of stuff
+ * that can be very expensive, so try to avoid them whenever possible.
+ */
+
+/* Caches aren't brain-dead on the IA-64. */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(mm, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+
+extern void flush_icache_range (unsigned long start, unsigned long end);
+
+static inline void
+flush_dcache_page (struct page *page)
+{
+ clear_bit(PG_arch_1, &page->flags);
+}
+
+static inline void
+clear_user_page (void *addr, unsigned long vaddr, struct page *page)
+{
+ clear_page(addr);
+ flush_dcache_page(page);
+}
+
+static inline void
+copy_user_page (void *to, void *from, unsigned long vaddr, struct page *page)
+{
+ copy_page(to, from);
+ flush_dcache_page(page);
+}
+
+/*
+ * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
+ * information. However, we use this macro to take care of any (delayed) i-cache flushing
+ * that may be necessary.
+ */
+static inline void
+update_mmu_cache (struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ struct page *page;
+
+ if (!pte_exec(pte))
+ return; /* not an executable page... */
+
+ page = pte_page(pte);
+ address &= PAGE_MASK;
+
+ if (test_bit(PG_arch_1, &page->flags))
+ return; /* i-cache is already coherent with d-cache */
+
+ flush_icache_range(address, address + PAGE_SIZE);
+ set_bit(PG_arch_1, &page->flags); /* mark page as clean */
+}
+
#endif /* _ASM_IA64_PGALLOC_H */