summaryrefslogtreecommitdiffstats
path: root/include/asm-mips64
diff options
context:
space:
mode:
authorKanoj Sarcar <kanoj@engr.sgi.com>2000-06-30 00:48:29 +0000
committerKanoj Sarcar <kanoj@engr.sgi.com>2000-06-30 00:48:29 +0000
commit72d92cedc30af23d855fec53b04d9266d88c6671 (patch)
treea3d02ad3d35bc5939c34c0527daba0793c799ab9 /include/asm-mips64
parent706a11128cbe9c614436350e5ff807b6d6ac79ea (diff)
Implement a functional vmalloc(). THe vmalloc range address translations
are stashed in an array of page tables, starting from kptbl[]. The fast tlbmiss handler quickly checks to see if the faulting address is in the vmalloc range, and if so, it uses the translations in the kptbl to update the tlbs. Still to do: tlb invalid faults in the vmalloc range needs to be handled properly.
Diffstat (limited to 'include/asm-mips64')
-rw-r--r--include/asm-mips64/pgalloc.h60
-rw-r--r--include/asm-mips64/pgtable.h4
2 files changed, 18 insertions, 46 deletions
diff --git a/include/asm-mips64/pgalloc.h b/include/asm-mips64/pgalloc.h
index 4db514c48..ed7df254d 100644
--- a/include/asm-mips64/pgalloc.h
+++ b/include/asm-mips64/pgalloc.h
@@ -12,8 +12,6 @@
#include <linux/config.h>
-#include <linux/config.h>
-
/* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLB entries
@@ -158,25 +156,6 @@ extern void __bad_pmd(pgd_t *pgd);
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc() get_pgd_fast()
-extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-
- if (pmd_none(*pmd)) {
- pte_t *page = get_pte_fast();
- if (page) {
- pmd_val(*pmd) = (unsigned long) page;
- return page + address;
- }
- return get_pte_kernel_slow(pmd, address);
- }
- if (pmd_bad(*pmd)) {
- __bad_pte_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
-}
-
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
@@ -214,35 +193,26 @@ extern inline pmd_t *pmd_alloc(pgd_t * pgd, unsigned long address)
return (pmd_t *) pgd_page(*pgd) + address;
}
-#define pmd_alloc_kernel pmd_alloc
+extern pte_t kptbl[(PAGE_SIZE<<KPTBL_PAGE_ORDER)*PAGE_SIZE/sizeof(pte_t)];
+
+#define MAGIC_PMD_VAL ((pmd_t *)0x1234)
+#define pmd_alloc_kernel(d,a) MAGIC_PMD_VAL
+
+extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
+{
+ if (pmd != MAGIC_PMD_VAL) {
+ printk("pte_alloc_kernel problem\n");
+ while(1);
+ }
+ return (kptbl + (address >> PAGE_SHIFT));
+}
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
- struct task_struct * p;
- pgd_t *pgd;
-#ifdef CONFIG_SMP
- int i;
-#endif
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm, address) = entry;
- }
- read_unlock(&tasklist_lock);
-#ifndef CONFIG_SMP
- for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[address >> PGDIR_SHIFT] = entry;
-#else
- /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our
- callee, so we can modify pgd caches of other CPUs as well. -jj */
- for (i = 0; i < NR_CPUS; i++)
- for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[address >> PGDIR_SHIFT] = entry;
-#endif
+ printk("set_pgdir!\n");
+ while(1);
}
#endif /* _ASM_PGALLOC_H */
diff --git a/include/asm-mips64/pgtable.h b/include/asm-mips64/pgtable.h
index 3d25fcf43..0d9996a7c 100644
--- a/include/asm-mips64/pgtable.h
+++ b/include/asm-mips64/pgtable.h
@@ -81,9 +81,11 @@ extern void (*_flush_page_to_ram)(struct page * page);
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_PGD_NR 0
+#define KPTBL_PAGE_ORDER 1
#define VMALLOC_START XKSEG
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#define VMALLOC_END (KSEG3 + (1UL << 40)) /* 1 TB */
+#define VMALLOC_END \
+ (VMALLOC_START + ((PAGE_SIZE << KPTBL_PAGE_ORDER) * PTRS_PER_PTE * PAGE_SIZE))
/* Note that we shift the lower 32bits of each EntryLo[01] entry
* 6 bits to the left. That way we can convert the PFN into the