summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKanoj Sarcar <kanoj@engr.sgi.com>2000-06-26 23:09:22 +0000
committerKanoj Sarcar <kanoj@engr.sgi.com>2000-06-26 23:09:22 +0000
commit5ba8d2c6b76ceb5b7cf229c4e30f0b880aeefc5d (patch)
tree0f126991af8c416755d89652890decbbe85a4118
parent1e06cd329da230702f8426be546410f0f5cde959 (diff)
Delete pmd_present/pgd_present. Explain pgd/pmd/pte allocation and
initialization. Make sure to allocate only one page for the page table (prevent memory leaks), since only one page is freed up.
-rw-r--r--arch/mips64/kernel/head.S5
-rw-r--r--arch/mips64/mm/init.c7
-rw-r--r--arch/mips64/sgi-ip27/ip27-memory.c2
-rw-r--r--include/asm-mips64/pgtable.h18
4 files changed, 18 insertions, 14 deletions
diff --git a/arch/mips64/kernel/head.S b/arch/mips64/kernel/head.S
index 7958ae36f..7292b77ff 100644
--- a/arch/mips64/kernel/head.S
+++ b/arch/mips64/kernel/head.S
@@ -167,7 +167,8 @@ NESTED(bootstrap, 16, sp)
.align 12
page swapper_pg_dir, 1
- page invalid_pte_table, 1
+ page invalid_pte_table, 0
page invalid_pmd_table, 1
- page empty_bad_page_table, 1
+ page empty_bad_page_table, 0
+ page empty_bad_pmd_table, 1
page empty_bad_page
diff --git a/arch/mips64/mm/init.c b/arch/mips64/mm/init.c
index 97c68c6cc..c6b301e14 100644
--- a/arch/mips64/mm/init.c
+++ b/arch/mips64/mm/init.c
@@ -161,7 +161,7 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
{
pte_t *page;
- page = (pte_t *) __get_free_pages(GFP_KERNEL, 1);
+ page = (pte_t *) __get_free_pages(GFP_KERNEL, 0);
if (pmd_none(*pmd)) {
if (page) {
clear_page(page);
@@ -171,7 +171,7 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
- free_pages((unsigned long)page, 1);
+ free_pages((unsigned long)page, 0);
if (pmd_bad(*pmd)) {
__bad_pte(pmd);
return NULL;
@@ -287,7 +287,6 @@ pmd_t * __bad_pmd_table(void)
pte_t * __bad_pagetable(void)
{
- extern char empty_bad_page_table[PAGE_SIZE];
unsigned long page;
page = (unsigned long) empty_bad_page_table;
@@ -348,7 +347,7 @@ void __init paging_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
pmd_init((unsigned long)invalid_pmd_table);
- memset((void *)invalid_pte_table, 0, sizeof(pte_t) * 2 * PTRS_PER_PTE);
+ memset((void *)invalid_pte_table, 0, sizeof(pte_t) * PTRS_PER_PTE);
max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
low = max_low_pfn;
diff --git a/arch/mips64/sgi-ip27/ip27-memory.c b/arch/mips64/sgi-ip27/ip27-memory.c
index ff218d1d2..ddc01c1f1 100644
--- a/arch/mips64/sgi-ip27/ip27-memory.c
+++ b/arch/mips64/sgi-ip27/ip27-memory.c
@@ -249,7 +249,7 @@ void __init paging_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
pmd_init((unsigned long)invalid_pmd_table);
- memset((void *)invalid_pte_table, 0, sizeof(pte_t) * 2 * PTRS_PER_PTE);
+ memset((void *)invalid_pte_table, 0, sizeof(pte_t) * PTRS_PER_PTE);
for (node = 0; node < numnodes; node++) {
pfn_t start_pfn = slot_getbasepfn(node, 0);
diff --git a/include/asm-mips64/pgtable.h b/include/asm-mips64/pgtable.h
index af8a65d0f..9ba68add9 100644
--- a/include/asm-mips64/pgtable.h
+++ b/include/asm-mips64/pgtable.h
@@ -46,11 +46,14 @@ extern void (*_flush_page_to_ram)(struct page * page);
#define flush_icache_page(vma, page) flush_cache_page(vma, page)
-/* Basically we have the same two-level (which is the logical three level
- * Linux page table layout folded) page tables as the i386. Some day
- * when we have proper page coloring support we can have a 1% quicker
- * tlb refill handling mechanism, but for now it is a bit slower but
- * works even with the cache aliasing problem the R4k and above have.
+/*
+ * Each address space has 2 4K pages as its page directory, giving 1024
+ * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
+ * pair of 4K pages, giving 1024 (== PTRS_PER_PMD) 8 byte pointers to
+ * page tables. Each page table is a single 4K page, giving 512 (==
+ * PTRS_PER_PTE) 8 byte ptes. Each pgde is initialized to point to
+ * invalid_pmd_table, each pmde is initialized to point to
+ * invalid_pte_table, each pte is initialized to 0.
*/
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
@@ -204,8 +207,10 @@ extern unsigned long zero_page_mask;
#define PAGE_PTR(address) \
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
-extern pte_t invalid_pte_table[2*PAGE_SIZE/sizeof(pte_t)];
+extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
+extern pte_t empty_bad_page_table[PAGE_SIZE/sizeof(pte_t)];
extern pmd_t invalid_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)];
+extern pmd_t empty_bad_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)];
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -464,7 +469,6 @@ extern inline pte_t *pte_offset(pmd_t * dir, unsigned long address)
/*
* Initialize a new pgd / pmd table with invalid pointers.
*/
-extern void pte_init(unsigned long page);
extern void pgd_init(unsigned long page);
extern void pmd_init(unsigned long page);