summaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/pgtable.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-11-23 02:00:47 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-11-23 02:00:47 +0000
commit06615f62b17d7de6e12d2f5ec6b88cf30af08413 (patch)
tree8766f208847d4876a6db619aebbf54d53b76eb44 /include/asm-ia64/pgtable.h
parentfa9bdb574f4febb751848a685d9a9017e04e1d53 (diff)
Merge with Linux 2.4.0-test10.
Diffstat (limited to 'include/asm-ia64/pgtable.h')
-rw-r--r--include/asm-ia64/pgtable.h53
1 files changed, 34 insertions, 19 deletions
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index fcf340ee0..6fbed9cc1 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -3,9 +3,9 @@
/*
* This file contains the functions and defines necessary to modify and use
- * the ia-64 page table tree.
+ * the IA-64 page table tree.
*
- * This hopefully works with any (fixed) ia-64 page-size, as defined
+ * This hopefully works with any (fixed) IA-64 page-size, as defined
* in <asm/page.h> (currently 8192).
*
* Copyright (C) 1998-2000 Hewlett-Packard Co
@@ -19,12 +19,6 @@
#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
-/* Is ADDR a valid kernel address? */
-#define kern_addr_valid(addr) ((addr) >= TASK_SIZE)
-
-/* Is ADDR a valid physical address? */
-#define phys_addr_valid(addr) (((addr) & my_cpu_data.unimpl_pa_mask) == 0)
-
/*
* First, define the various bits in a PTE. Note that the PTE format
* matches the VHPT short format, the firt doubleword of the VHPD long
@@ -166,7 +160,7 @@
* Given a pointer to an mem_map[] entry, return the kernel virtual
* address corresponding to that page.
*/
-#define page_address(page) ((void *) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)))
+#define page_address(page) ((page)->virtual)
/*
* Now for some cache flushing routines. This is the kind of stuff
@@ -190,6 +184,28 @@ do { \
ia64_flush_icache_page((unsigned long) page_address(pg)); \
} while (0)
+/* Quick test to see if ADDR is a (potentially) valid physical address. */
+static __inline__ long
+ia64_phys_addr_valid (unsigned long addr)
+{
+ return (addr & (my_cpu_data.unimpl_pa_mask)) == 0;
+}
+
+/*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory. For the return value to be meaningful, ADDR must be >=
+ * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
+ * require a hash-, or multi-level tree-lookup or something of that
+ * sort) but it guarantees to return TRUE only if accessing the page
+ * at that address does not cause an error. Note that there may be
+ * addresses for which kern_addr_valid() returns FALSE even though an
+ * access would not cause an error (e.g., this is typically true for
+ * memory mapped I/O regions.
+ *
+ * XXX Need to implement this for IA-64.
+ */
+#define kern_addr_valid(addr) (1)
+
/*
* Now come the defines and routines to manage and access the three-level
* page table.
@@ -248,14 +264,14 @@ extern pmd_t *ia64_bad_pagetable (void);
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = __pa(ptep))
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (!phys_addr_valid(pmd_val(pmd)))
+#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = __pa(pmdp))
#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_bad(pgd) (!phys_addr_valid(pgd_val(pgd)))
+#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
@@ -301,7 +317,7 @@ extern pmd_t *ia64_bad_pagetable (void);
/*
* Return the region index for virtual address ADDRESS.
*/
-extern __inline__ unsigned long
+static __inline__ unsigned long
rgn_index (unsigned long address)
{
ia64_va a;
@@ -313,7 +329,7 @@ rgn_index (unsigned long address)
/*
* Return the region offset for virtual address ADDRESS.
*/
-extern __inline__ unsigned long
+static __inline__ unsigned long
rgn_offset (unsigned long address)
{
ia64_va a;
@@ -325,7 +341,7 @@ rgn_offset (unsigned long address)
#define RGN_SIZE (1UL << 61)
#define RGN_KERNEL 7
-extern __inline__ unsigned long
+static __inline__ unsigned long
pgd_index (unsigned long address)
{
unsigned long region = address >> 61;
@@ -336,7 +352,7 @@ pgd_index (unsigned long address)
/* The offset in the 1-level directory is given by the 3 region bits
(61..63) and the seven level-1 bits (33-39). */
-extern __inline__ pgd_t*
+static __inline__ pgd_t*
pgd_offset (struct mm_struct *mm, unsigned long address)
{
return mm->pgd + pgd_index(address);
@@ -409,9 +425,6 @@ do { \
#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define module_map vmalloc
-#define module_unmap vfree
-
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define PageSkip(page) (0)
@@ -421,9 +434,11 @@ do { \
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[1024];
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+#include <asm-generic/pgtable.h>
+
# endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PGTABLE_H */