summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-08-08 22:17:26 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-08-08 22:17:26 +0000
commit984760dbf5e48f6224dfe9e26e03536b3d38b6cd (patch)
treea99674322e524c4c90009b3bdae5fab0eebf2094 /mm
parenta7ce7d5e94c98ef5b867f61b2ebecd563f4b6ec9 (diff)
Merge with Linux 2.4.0-test6-pre8.
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c2
-rw-r--r--mm/highmem.c4
-rw-r--r--mm/memory.c34
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/slab.c30
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c2
10 files changed, 43 insertions, 52 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 0e11fe9ed..fbcb2bb06 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -246,7 +246,7 @@ static unsigned long __init free_all_bootmem_core(int nid, bootmem_data_t *bdata
* Now free the allocator bitmap itself, it's not
* needed anymore:
*/
- page = mem_map + MAP_NR(bdata->node_bootmem_map);
+ page = virt_to_page(bdata->node_bootmem_map);
count = 0;
for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
count++;
diff --git a/mm/highmem.c b/mm/highmem.c
index 411f20c52..6208e347d 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -22,8 +22,6 @@
#include <linux/swap.h>
#include <linux/slab.h>
-unsigned long highmem_mapnr;
-
/*
* Take one locked page, return another low-memory locked page.
*/
@@ -61,7 +59,7 @@ struct page * prepare_highmem_swapout(struct page * page)
* we stored its data into the new regular_page.
*/
page_cache_release(page);
- new_page = mem_map + MAP_NR(regular_page);
+ new_page = virt_to_page(regular_page);
LockPage(new_page);
return new_page;
}
diff --git a/mm/memory.c b/mm/memory.c
index 0d93216b5..83fc97cb3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -210,7 +210,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
do {
pte_t pte = *src_pte;
- unsigned long page_nr;
+ struct page *ptepage;
/* copy_one_pte */
@@ -221,9 +221,9 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
set_pte(dst_pte, pte);
goto cont_copy_pte_range;
}
- page_nr = pte_pagenr(pte);
- if (page_nr >= max_mapnr ||
- PageReserved(mem_map+page_nr)) {
+ ptepage = pte_page(pte);
+ if ((!VALID_PAGE(ptepage)) ||
+ PageReserved(ptepage)) {
set_pte(dst_pte, pte);
goto cont_copy_pte_range;
}
@@ -236,7 +236,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
if (vma->vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
set_pte(dst_pte, pte_mkold(pte));
- get_page(mem_map + page_nr);
+ get_page(ptepage);
cont_copy_pte_range: address += PAGE_SIZE;
if (address >= end)
@@ -262,14 +262,14 @@ nomem:
static inline int free_pte(pte_t page)
{
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return 0;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
- free_page_and_swap_cache(mem_map+nr);
+ free_page_and_swap_cache(ptpage);
return 1;
}
swap_free(pte_to_swp_entry(page));
@@ -409,7 +409,7 @@ static struct page * follow_page(unsigned long address)
static inline struct page * get_page_map(struct page *page)
{
- if (page > (mem_map + max_mapnr))
+ if (!VALID_PAGE(page))
return 0;
return page;
}
@@ -711,12 +711,12 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
- unsigned long mapnr;
+ struct page *page;
pte_t oldpage = *pte;
pte_clear(pte);
- mapnr = MAP_NR(__va(phys_addr));
- if (mapnr >= max_mapnr || PageReserved(mem_map+mapnr))
+ page = virt_to_page(__va(phys_addr));
+ if ((!VALID_PAGE(page)) || PageReserved(page))
set_pte(pte, mk_pte_phys(phys_addr, prot));
forget_pte(oldpage);
address += PAGE_SIZE;
@@ -818,13 +818,11 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * old_page
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table, pte_t pte)
{
- unsigned long map_nr;
struct page *old_page, *new_page;
- map_nr = pte_pagenr(pte);
- if (map_nr >= max_mapnr)
+ old_page = pte_page(pte);
+ if (!VALID_PAGE(old_page))
goto bad_wp_page;
- old_page = mem_map + map_nr;
/*
* We can avoid the copy if:
@@ -883,7 +881,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
bad_wp_page:
spin_unlock(&mm->page_table_lock);
- printk("do_wp_page: bogus page at address %08lx (nr %ld)\n",address,map_nr);
+ printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
return -1;
}
@@ -920,7 +918,7 @@ static void partial_clear(struct vm_area_struct *vma, unsigned long address)
return;
flush_cache_page(vma, address);
page = pte_page(pte);
- if ((page-mem_map >= max_mapnr) || PageReserved(page))
+ if ((!VALID_PAGE(page)) || PageReserved(page))
return;
offset = address & ~PAGE_MASK;
memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 420f91f92..8b74a73db 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -86,7 +86,7 @@ static void __free_pages_ok (struct page *page, unsigned long order)
BUG();
if (page->mapping)
BUG();
- if (page-mem_map >= max_mapnr)
+ if (!VALID_PAGE(page))
BUG();
if (PageSwapCache(page))
BUG();
@@ -350,14 +350,14 @@ void __free_pages(struct page *page, unsigned long order)
void free_pages(unsigned long addr, unsigned long order)
{
- unsigned long map_nr;
+ struct page *fpage;
#ifdef CONFIG_DISCONTIGMEM
if (addr == 0) return;
#endif
- map_nr = MAP_NR(addr);
- if (map_nr < max_mapnr)
- __free_pages(mem_map + map_nr, order);
+ fpage = virt_to_page(addr);
+ if (VALID_PAGE(fpage))
+ __free_pages(fpage, order);
}
/*
diff --git a/mm/page_io.c b/mm/page_io.c
index b2b6359d0..25ed62221 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -126,7 +126,7 @@ void rw_swap_page(int rw, struct page *page, int wait)
*/
void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf, int wait)
{
- struct page *page = mem_map + MAP_NR(buf);
+ struct page *page = virt_to_page(buf);
if (!PageLocked(page))
PAGE_BUG(page);
diff --git a/mm/slab.c b/mm/slab.c
index 49c1a4879..815430698 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -496,7 +496,7 @@ static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags)
static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
{
unsigned long i = (1<<cachep->gfporder);
- struct page *page = mem_map + MAP_NR(addr);
+ struct page *page = virt_to_page(addr);
/* free_pages() does not clear the type bit - we do that.
* The pages have been unlinked from their cache-slab,
@@ -1115,7 +1115,7 @@ static int kmem_cache_grow (kmem_cache_t * cachep, int flags)
/* Nasty!!!!!! I hope this is OK. */
i = 1 << cachep->gfporder;
- page = mem_map + MAP_NR(objp);
+ page = virt_to_page(objp);
do {
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
@@ -1321,9 +1321,9 @@ alloc_new_slab_nolock:
*/
#if DEBUG
-# define CHECK_NR(nr) \
+# define CHECK_NR(pg) \
do { \
- if (nr >= max_mapnr) { \
+ if (!VALID_PAGE(pg)) { \
printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
@@ -1331,6 +1331,7 @@ alloc_new_slab_nolock:
} while (0)
# define CHECK_PAGE(page) \
do { \
+ CHECK_NR(page); \
if (!PageSlab(page)) { \
printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
(unsigned long)objp); \
@@ -1339,23 +1340,21 @@ alloc_new_slab_nolock:
} while (0)
#else
-# define CHECK_NR(nr) do { } while (0)
-# define CHECK_PAGE(nr) do { } while (0)
+# define CHECK_PAGE(pg) do { } while (0)
#endif
static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
{
slab_t* slabp;
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
/* reduces memory footprint
*
if (OPTIMIZE(cachep))
slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
else
*/
- slabp = GET_PAGE_SLAB(mem_map + MAP_NR(objp));
+ slabp = GET_PAGE_SLAB(virt_to_page(objp));
#if DEBUG
if (cachep->flags & SLAB_DEBUG_INITIAL)
@@ -1452,8 +1451,7 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
#ifdef CONFIG_SMP
cpucache_t *cc = cc_data(cachep);
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
if (cc) {
int batchcount;
if (cc->avail < cc->limit) {
@@ -1536,9 +1534,8 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp)
{
unsigned long flags;
#if DEBUG
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
- if (cachep != GET_PAGE_CACHE(mem_map + MAP_NR(objp)))
+ CHECK_PAGE(virt_to_page(objp));
+ if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
BUG();
#endif
@@ -1562,9 +1559,8 @@ void kfree (const void *objp)
if (!objp)
return;
local_irq_save(flags);
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
- c = GET_PAGE_CACHE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
+ c = GET_PAGE_CACHE(virt_to_page(objp));
__kmem_cache_free(c, (void*)objp);
local_irq_restore(flags);
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 72f3eaca4..506160354 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -220,7 +220,7 @@ struct page * read_swap_cache_async(swp_entry_t entry, int wait)
new_page_addr = __get_free_page(GFP_USER);
if (!new_page_addr)
goto out_free_swap; /* Out of memory */
- new_page = mem_map + MAP_NR(new_page_addr);
+ new_page = virt_to_page(new_page_addr);
/*
* Check the swap cache again, in case we stalled above.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a84e73f2f..fa4cb133e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -645,7 +645,7 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
goto bad_swap;
}
- lock_page(mem_map + MAP_NR(swap_header));
+ lock_page(virt_to_page(swap_header));
rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header, 1);
if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 57f3ca56c..817a3966b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -41,10 +41,9 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
if (pte_none(page))
continue;
if (pte_present(page)) {
- unsigned long map_nr = pte_pagenr(page);
- if ((map_nr < max_mapnr) &&
- (!PageReserved(mem_map + map_nr)))
- __free_page(mem_map + map_nr);
+ struct page *ptpage = pte_page(page);
+ if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
+ __free_page(ptpage);
continue;
}
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4dda15dd1..95098e4d1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -45,7 +45,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
if (!pte_present(pte))
goto out_failed;
page = pte_page(pte);
- if ((page-mem_map >= max_mapnr) || PageReserved(page))
+ if ((!VALID_PAGE(page)) || PageReserved(page))
goto out_failed;
if (mm->swap_cnt)