diff options
105 files changed, 393 insertions, 412 deletions
@@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 SUBLEVEL = 0 -EXTRAVERSION = -test6-pre7 +EXTRAVERSION = -test6-pre8 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 84b608f45..2ab7941b2 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -141,7 +141,7 @@ pte_t __bad_page(void) { memset((void *) EMPTY_PGE, 0, PAGE_SIZE); - return pte_mkdirty(mk_pte(mem_map + MAP_NR(EMPTY_PGE), PAGE_SHARED)); + return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED)); } void @@ -325,8 +325,8 @@ free_initmem (void) addr = (unsigned long)(&__init_begin); for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(addr)); - set_page_count(mem_map+MAP_NR(addr), 1); + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); free_page(addr); totalram_pages++; } @@ -339,8 +339,8 @@ void free_initrd_mem(unsigned long start, unsigned long end) { for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(start)); - set_page_count(mem_map+MAP_NR(start), 1); + ClearPageReserved(virt_to_page(start)); + set_page_count(virt_to_page(start), 1); free_page(start); totalram_pages++; } diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 589653111..72eaa2e47 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -534,8 +534,8 @@ void __init paging_init(struct meminfo *mi) memzero(zero_page, PAGE_SIZE); memzero(bad_page, PAGE_SIZE); - empty_zero_page = mem_map + MAP_NR(zero_page); - empty_bad_page = mem_map + MAP_NR(bad_page); + empty_zero_page = virt_to_page(zero_page); + empty_bad_page = virt_to_page(bad_page); empty_bad_pte_table = ((pte_t *)bad_table) + TABLE_OFFSET; } @@ -598,7 +598,7 @@ void __init mem_init(void) static inline void free_area(unsigned long addr, unsigned long end, char *s) { unsigned int size = (end - addr) >> 10; - struct page *page = mem_map + MAP_NR(addr); + struct page *page = virt_to_page(addr); for (; addr < end; addr += PAGE_SIZE, page ++) { ClearPageReserved(page); @@ -632,8 +632,8 @@ void free_initrd_mem(unsigned long start, unsigned long end) if (!keep_initrd) { for (addr = start; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(addr)); - set_page_count(mem_map+MAP_NR(addr), 1); + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); free_page(addr); totalram_pages++; } diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index 1edbc35fd..e656a25d2 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c @@ -417,8 +417,8 @@ static inline void free_memmap(unsigned long start, unsigned long end) start = __phys_to_virt(start); end = __phys_to_virt(end); - pg = PAGE_ALIGN((unsigned long)(mem_map + MAP_NR(start))); - pgend = ((unsigned long)(mem_map + MAP_NR(end))) & PAGE_MASK; + pg = PAGE_ALIGN((unsigned long)(virt_to_page(start))); + pgend = ((unsigned long)(virt_to_page(end))) & PAGE_MASK; start = __virt_to_phys(pg); end = __virt_to_phys(pgend); diff --git a/arch/arm/mm/small_page.c b/arch/arm/mm/small_page.c index 40c91ba32..27fb0f663 100644 --- a/arch/arm/mm/small_page.c +++ b/arch/arm/mm/small_page.c @@ -142,12 +142,10 @@ no_page: static void __free_small_page(unsigned long spage, struct order *order) { unsigned long flags; - unsigned long nr; struct page *page; - nr = MAP_NR(spage); - if (nr < max_mapnr) { - page = mem_map + nr; + page = virt_to_page(spage); + if (VALID_PAGE(page)) { /* * The container-page must be marked Reserved diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index af72d1581..9ba2baa31 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -566,8 +566,6 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM highmem_start_page = mem_map + highstart_pfn; - /* cache the highmem_mapnr */ - highmem_mapnr = highstart_pfn; max_mapnr = num_physpages = highend_pfn; #else max_mapnr = num_physpages = max_low_pfn; @@ -642,8 +640,8 @@ void free_initmem(void) addr = (unsigned long)(&__init_begin); for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(addr)); - set_page_count(mem_map+MAP_NR(addr), 1); + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); free_page(addr); totalram_pages++; } @@ -656,8 +654,8 @@ void free_initrd_mem(unsigned long start, unsigned long end) if (start < end) printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(start)); - set_page_count(mem_map+MAP_NR(start), 1); + ClearPageReserved(virt_to_page(start)); + set_page_count(virt_to_page(start), 1); free_page(start); totalram_pages++; } diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c index cb4427672..ba5c2e7ba 100644 --- a/arch/i386/mm/ioremap.c +++ b/arch/i386/mm/ioremap.c @@ -121,15 +121,14 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag */ if (phys_addr < virt_to_phys(high_memory)) { char *t_addr, *t_end; - int i; + struct page *page; t_addr = __va(phys_addr); t_end = t_addr + (size - 1); - for(i = MAP_NR(t_addr); i < MAP_NR(t_end); i++) { - if(!PageReserved(mem_map + i)) + for(page = virt_to_page(t_addr); page < virt_to_page(t_end); page++) + if(!PageReserved(page)) return NULL; - } } /* diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c index 6c63e33a7..770ef6363 100644 --- a/arch/ia64/ia32/binfmt_elf32.c +++ b/arch/ia64/ia32/binfmt_elf32.c @@ -81,9 +81,9 @@ void ia64_elf32_init(struct pt_regs *regs) { int nr; - put_shared_page(current, mem_map + MAP_NR(ia32_gdt_table), IA32_PAGE_OFFSET); + put_shared_page(current, virt_to_page(ia32_gdt_table), IA32_PAGE_OFFSET); if (PAGE_SHIFT <= IA32_PAGE_SHIFT) - put_shared_page(current, mem_map + MAP_NR(ia32_tss), IA32_PAGE_OFFSET + PAGE_SIZE); + put_shared_page(current, virt_to_page(ia32_tss), IA32_PAGE_OFFSET + PAGE_SIZE); nr = smp_processor_id(); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index d911c91bf..8ddda7e11 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -173,8 +173,8 @@ free_initmem (void) addr = (unsigned long) &__init_begin; for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) { - clear_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags); - set_page_count(&mem_map[MAP_NR(addr)], 1); + clear_bit(PG_reserved, &virt_to_page(addr)->flags); + set_page_count(virt_to_page(addr), 1); free_page(addr); ++totalram_pages; } @@ -188,8 +188,8 @@ free_initrd_mem(unsigned long start, unsigned long end) if (start < end) printk ("Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); for (; start < end; start += PAGE_SIZE) { - clear_bit(PG_reserved, &mem_map[MAP_NR(start)].flags); - set_page_count(&mem_map[MAP_NR(start)], 1); + clear_bit(PG_reserved, &virt_to_page(start)->flags); + set_page_count(virt_to_page(start), 1); free_page(start); ++totalram_pages; } @@ -372,7 +372,7 @@ count_reserved_pages (u64 start, u64 end, void *arg) unsigned long *count = arg; struct page *pg; - for (pg = mem_map + MAP_NR(start); pg < mem_map + MAP_NR(end); ++pg) + for (pg = virt_to_page(start); pg < virt_to_page(end); ++pg) if (PageReserved(pg)) ++num_reserved; *count += num_reserved; @@ -409,7 +409,7 @@ mem_init (void) datasize >> 10, initsize >> 10); /* install the gate page in the global page table: */ - put_gate_page(mem_map + MAP_NR(__start_gate_section), GATE_ADDR); + put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR); #ifndef CONFIG_IA64_SOFTSDV_HACKS /* diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c index 2b804b767..d0acdaaae 100644 --- a/arch/m68k/atari/stram.c +++ b/arch/m68k/atari/stram.c @@ -305,7 +305,7 @@ void __init atari_stram_reserve_pages(unsigned long start_mem) /* always reserve first page of ST-RAM, the first 2 kB are * supervisor-only! */ - set_bit( PG_reserved, &mem_map[MAP_NR(stram_start)].flags ); + set_bit( PG_reserved, &virt_to_page(stram_start)->flags ); #ifdef CONFIG_STRAM_SWAP if (!max_swap_size) { @@ -699,7 +699,7 @@ static inline void unswap_pte(struct vm_area_struct * vma, unsigned long if (pte_page(pte) != page) return; if (0 /* isswap */) - mem_map[MAP_NR(pte_page(pte))].offset = page; + virt_to_page(pte_page(pte))->offset = page; else /* We will be removing the swap cache in a moment, so... */ set_pte(dir, pte_mkdirty(pte)); @@ -716,7 +716,7 @@ static inline void unswap_pte(struct vm_area_struct * vma, unsigned long DPRINTK( "unswap_pte: replacing entry %08lx by new page %08lx", entry, page ); set_pte(dir, pte_mkdirty(__mk_pte(page,vma->vm_page_prot))); - atomic_inc(&mem_map[MAP_NR(page)].count); + atomic_inc(&virt_to_page(page)->count); ++vma->vm_mm->rss; } swap_free(entry); @@ -1291,7 +1291,7 @@ static int get_gfp_order( unsigned long size ) /* reserve a range of pages in mem_map[] */ static void reserve_region( unsigned long addr, unsigned long end ) { - mem_map_t *mapp = &mem_map[MAP_NR(addr)]; + mem_map_t *mapp = virt_to_page(addr); for( ; addr < end; addr += PAGE_SIZE, ++mapp ) set_bit( PG_reserved, &mapp->flags ); diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 4e6b28265..612d51eda 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -153,10 +153,10 @@ void __init mem_init(void) #if 0 #ifndef CONFIG_SUN3 if (virt_to_phys ((void *)tmp) >= mach_max_dma_address) - clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags); + clear_bit(PG_DMA, &virt_to_page(tmp)->flags); #endif #endif - if (PageReserved(mem_map+MAP_NR(tmp))) { + if (PageReserved(virt_to_page(tmp))) { if (tmp >= (unsigned long)&_text && tmp < (unsigned long)&_etext) codepages++; @@ -168,7 +168,7 @@ void __init mem_init(void) continue; } #if 0 - set_page_count(mem_map+MAP_NR(tmp), 1); + set_page_count(virt_to_page(tmp), 1); #ifdef CONFIG_BLK_DEV_INITRD if (!initrd_start || (tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end)) @@ -202,8 +202,8 @@ void __init mem_init(void) void free_initrd_mem(unsigned long start, unsigned long end) { for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(start)); - set_page_count(mem_map+MAP_NR(start), 1); + ClearPageReserved(virt_to_page(start)); + set_page_count(virt_to_page(start), 1); free_page(start); totalram_pages++; } diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index a0a5336ed..a3c4f38c5 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -93,7 +93,7 @@ pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset) typedef struct list_head ptable_desc; static LIST_HEAD(ptable_list); -#define PD_PTABLE(page) ((ptable_desc *)&mem_map[MAP_NR(page)]) +#define PD_PTABLE(page) ((ptable_desc *)virt_to_page(page)) #define PD_PAGE(ptable) (list_entry(ptable, struct page, list)) #define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index) diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 146dc7279..61cd918e7 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -293,8 +293,8 @@ void free_initmem(void) addr = (unsigned long)&__init_begin; for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) { - mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved); - set_page_count(mem_map+MAP_NR(addr), 1); + virt_to_page(addr)->flags &= ~(1 << PG_reserved); + set_page_count(virt_to_page(addr), 1); free_page(addr); } } diff --git a/arch/mips/arc/memory.c b/arch/mips/arc/memory.c index 49de2ffa9..7988cfc39 100644 --- a/arch/mips/arc/memory.c +++ b/arch/mips/arc/memory.c @@ -228,8 +228,8 @@ prom_free_prom_memory (void) addr = PAGE_OFFSET + p->base; while (addr < p->base + p->size) { - ClearPageReserved(mem_map + MAP_NR(addr)); - set_page_count(mem_map + MAP_NR(addr), 1); + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); free_page(addr); addr += PAGE_SIZE; freed += PAGE_SIZE; diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c index 1025104ce..e54e78a25 100644 --- a/arch/mips/dec/prom/memory.c +++ b/arch/mips/dec/prom/memory.c @@ -149,8 +149,8 @@ void prom_free_prom_memory (void) addr = PAGE_SIZE; while (addr < end) { - ClearPageReserved(mem_map + MAP_NR(addr)); - set_page_count(mem_map + MAP_NR(addr), 1); + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); free_page(addr); addr += PAGE_SIZE; } diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index b1fde5108..a6f448e05 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -120,7 +120,8 @@ unsigned long empty_zero_page, zero_page_mask; static inline unsigned long setup_zero_pages(void) { - unsigned long order, size, pg; + unsigned long order, size; + struct page *page; switch (mips_cputype) { case CPU_R4000SC: @@ -137,11 +138,11 @@ static inline unsigned long setup_zero_pages(void) if (!empty_zero_page) panic("Oh boy, that early out of memory?"); - pg = MAP_NR(empty_zero_page); - while (pg < MAP_NR(empty_zero_page) + (1 << order)) { - set_bit(PG_reserved, &mem_map[pg].flags); - set_page_count(mem_map + pg, 0); - pg++; + page = virt_to_page(empty_zero_page); + while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) { + set_bit(PG_reserved, &page->flags); + set_page_count(page, 0); + page++; } size = PAGE_SIZE << order; @@ -309,8 +310,8 @@ void __init mem_init(void) void free_initrd_mem(unsigned long start, unsigned long end) { for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(start)); - set_page_count(mem_map+MAP_NR(start), 1); + ClearPageReserved(virt_to_page(start)); + set_page_count(virt_to_page(start), 1); free_page(start); totalram_pages++; } @@ -329,8 +330,8 @@ void free_initmem(void) addr = (unsigned long) &__init_begin; while (addr < (unsigned long) &__init_end) { - ClearPageReserved(mem_map + MAP_NR(addr)); - set_page_count(mem_map + MAP_NR(addr), 1); + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); free_page(addr); totalram_pages++; addr += PAGE_SIZE; diff --git a/arch/mips/mm/umap.c b/arch/mips/mm/umap.c index 003866ac4..465bec37b 100644 --- a/arch/mips/mm/umap.c +++ b/arch/mips/mm/umap.c @@ -115,10 +115,10 @@ void *vmalloc_uncached (unsigned long size) static inline void free_pte(pte_t page) { if (pte_present(page)) { - unsigned long nr = pte_pagenr(page); - if (nr >= max_mapnr || PageReserved(mem_map+nr)) + struct page *ptpage = pte_page(page); + if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) return; - __free_page(pte_page(page)); + __free_page(ptpage); if (current->mm->rss <= 0) return; current->mm->rss--; diff --git a/arch/mips64/arc/memory.c b/arch/mips64/arc/memory.c index 1e88dd6c7..d2ef95869 100644 --- a/arch/mips64/arc/memory.c +++ b/arch/mips64/arc/memory.c @@ -233,8 +233,8 @@ prom_free_prom_memory (void) addr = PAGE_OFFSET + (unsigned long) (long) p->base; end = addr + (unsigned long) (long) p->size; while (addr < end) { - ClearPageReserved(mem_map + MAP_NR(addr)); - set_page_count(mem_map + MAP_NR(addr), 1); + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); free_page(addr); addr += PAGE_SIZE; freed += PAGE_SIZE; diff --git a/arch/mips64/mm/init.c b/arch/mips64/mm/init.c index 223ca3e5e..f1d95c702 100644 --- a/arch/mips64/mm/init.c +++ b/arch/mips64/mm/init.c @@ -215,7 +215,8 @@ unsigned long empty_zero_page, zero_page_mask; unsigned long setup_zero_pages(void) { - unsigned long order, size, pg; + unsigned long order, size; + struct page *page; switch (mips_cputype) { case CPU_R4000SC: @@ -232,11 +233,11 @@ unsigned long setup_zero_pages(void) if (!empty_zero_page) panic("Oh boy, that early out of memory?"); - pg = MAP_NR(empty_zero_page); - while (pg < MAP_NR(empty_zero_page) + (1 << order)) { - set_bit(PG_reserved, &mem_map[pg].flags); - set_page_count(mem_map + pg, 0); - pg++; + page = virt_to_page(empty_zero_page); + while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) { + set_bit(PG_reserved, &page->flags); + set_page_count(page, 0); + page++; } size = PAGE_SIZE << order; @@ -374,8 +375,8 @@ void __init mem_init(void) void free_initrd_mem(unsigned long start, unsigned long end) { for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(start)); - set_page_count(mem_map+MAP_NR(start), 1); + ClearPageReserved(virt_to_page(start)); + set_page_count(virt_to_page(start), 1); free_page(start); totalram_pages++; } @@ -396,8 +397,8 @@ free_initmem(void) addr = (unsigned long)(&__init_begin); while (addr < (unsigned long)&__init_end) { page = PAGE_OFFSET | CPHYSADDR(addr); - ClearPageReserved(mem_map + MAP_NR(page)); - set_page_count(mem_map + MAP_NR(page), 1); + ClearPageReserved(virt_to_page(page)); + set_page_count(virt_to_page(page), 1); free_page(page); totalram_pages++; addr += PAGE_SIZE; diff --git a/arch/mips64/mm/umap.c b/arch/mips64/mm/umap.c index b8ae542cc..9b0e7db1e 100644 --- a/arch/mips64/mm/umap.c +++ b/arch/mips64/mm/umap.c @@ -109,10 +109,10 @@ void *vmalloc_uncached (unsigned long size) static inline void free_pte(pte_t page) { if (pte_present(page)) { - unsigned long nr = pte_pagenr(page); - if (nr >= max_mapnr || PageReserved(mem_map+nr)) + struct page *ptpage = pte_page(page); + if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) return; - __free_page(pte_page(page)); + __free_page(ptpage); if (current->mm->rss <= 0) return; current->mm->rss--; diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c index 16c3bc694..67257c50b 100644 --- a/arch/ppc/mm/init.c +++ b/arch/ppc/mm/init.c @@ -797,8 +797,8 @@ void __init free_initmem(void) #define FREESEC(START,END,CNT) do { \ a = (unsigned long)(&START); \ for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \ - clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \ - set_page_count(mem_map+MAP_NR(a), 1); \ + clear_bit(PG_reserved, &virt_to_page(a)->flags); \ + set_page_count(virt_to_page(a), 1); \ free_page(a); \ CNT++; \ } \ @@ -865,8 +865,8 @@ void __init free_initmem(void) void free_initrd_mem(unsigned long start, unsigned long end) { for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(start)); - set_page_count(mem_map+MAP_NR(start), 1); + ClearPageReserved(virt_to_page(start)); + set_page_count(virt_to_page(start), 1); free_page(start); totalram_pages++; } @@ -1187,7 +1187,7 @@ void __init mem_init(void) make sure the ramdisk pages aren't reserved. */ if (initrd_start) { for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE) - clear_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags); + clear_bit(PG_reserved, &virt_to_page(addr)->flags); } #endif /* CONFIG_BLK_DEV_INITRD */ @@ -1196,17 +1196,17 @@ void __init mem_init(void) if ( rtas_data ) for (addr = rtas_data; addr < PAGE_ALIGN(rtas_data+rtas_size) ; addr += PAGE_SIZE) - SetPageReserved(mem_map + MAP_NR(addr)); + SetPageReserved(virt_to_page(addr)); #endif /* defined(CONFIG_ALL_PPC) */ if ( sysmap_size ) for (addr = (unsigned long)sysmap; addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ; addr += PAGE_SIZE) - SetPageReserved(mem_map + MAP_NR(addr)); + SetPageReserved(virt_to_page(addr)); for (addr = PAGE_OFFSET; addr < (unsigned long)end_of_DRAM; addr += PAGE_SIZE) { - if (!PageReserved(mem_map + MAP_NR(addr))) + if (!PageReserved(virt_to_page(addr))) continue; if (addr < (ulong) etext) codepages++; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 4e30c015a..a45d02ed8 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -351,8 +351,8 @@ void free_initmem(void) addr = (unsigned long)(&__init_begin); for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(addr)); - set_page_count(mem_map+MAP_NR(addr), 1); + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); free_page(addr); totalram_pages++; } @@ -366,8 +366,8 @@ void free_initrd_mem(unsigned long start, unsigned long end) if (start < end) printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(start)); - set_page_count(mem_map+MAP_NR(start), 1); + ClearPageReserved(virt_to_page(start)); + set_page_count(virt_to_page(start), 1); free_page(start); totalram_pages++; } diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index f798ef968..5e0632a86 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -270,8 +270,8 @@ void free_initmem(void) addr = (unsigned long)(&__init_begin); for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(addr)); - set_page_count(mem_map+MAP_NR(addr), 1); + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); free_page(addr); totalram_pages++; } @@ -283,8 +283,8 @@ void free_initrd_mem(unsigned long start, unsigned long end) { unsigned long p; for (p = start; p < end; p += PAGE_SIZE) { - ClearPageReserved(mem_map + MAP_NR(p)); - set_page_count(mem_map+MAP_NR(p), 1); + ClearPageReserved(virt_to_page(p)); + set_page_count(virt_to_page(p), 1); free_page(p); totalram_pages++; } diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 1294ad31a..280392629 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -298,20 +298,20 @@ void __init smp4d_boot_cpus(void) } /* Free unneeded trap tables */ - ClearPageReserved(mem_map + MAP_NR(trapbase_cpu1)); - set_page_count(mem_map + MAP_NR(trapbase_cpu1), 1); + ClearPageReserved(virt_to_page(trapbase_cpu1)); + set_page_count(virt_to_page(trapbase_cpu1), 1); free_page((unsigned long)trapbase_cpu1); totalram_pages++; num_physpages++; - ClearPageReserved(mem_map + MAP_NR(trapbase_cpu2)); - set_page_count(mem_map + MAP_NR(trapbase_cpu2), 1); + ClearPageReserved(virt_to_page(trapbase_cpu2)); + set_page_count(virt_to_page(trapbase_cpu2), 1); free_page((unsigned long)trapbase_cpu2); totalram_pages++; num_physpages++; - ClearPageReserved(mem_map + MAP_NR(trapbase_cpu3)); - set_page_count(mem_map + MAP_NR(trapbase_cpu3), 1); + ClearPageReserved(virt_to_page(trapbase_cpu3)); + set_page_count(virt_to_page(trapbase_cpu3), 1); free_page((unsigned long)trapbase_cpu3); totalram_pages++; num_physpages++; diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 60c37373e..d6c126d00 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -279,22 +279,22 @@ void __init smp4m_boot_cpus(void) /* Free unneeded trap tables */ if (!(cpu_present_map & (1 << 1))) { - ClearPageReserved(mem_map + MAP_NR(trapbase_cpu1)); - set_page_count(mem_map + MAP_NR(trapbase_cpu1), 1); + ClearPageReserved(virt_to_page(trapbase_cpu1)); + set_page_count(virt_to_page(trapbase_cpu1), 1); free_page((unsigned long)trapbase_cpu1); totalram_pages++; num_physpages++; } if (!(cpu_present_map & (1 << 2))) { - ClearPageReserved(mem_map + MAP_NR(trapbase_cpu2)); - set_page_count(mem_map + MAP_NR(trapbase_cpu2), 1); + ClearPageReserved(virt_to_page(trapbase_cpu2)); + set_page_count(virt_to_page(trapbase_cpu2), 1); free_page((unsigned long)trapbase_cpu2); totalram_pages++; num_physpages++; } if (!(cpu_present_map & (1 << 3))) { - ClearPageReserved(mem_map + MAP_NR(trapbase_cpu3)); - set_page_count(mem_map + MAP_NR(trapbase_cpu3), 1); + ClearPageReserved(virt_to_page(trapbase_cpu3)); + set_page_count(virt_to_page(trapbase_cpu3), 1); free_page((unsigned long)trapbase_cpu3); totalram_pages++; num_physpages++; diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c index 9e599fd9d..ad74d0ed2 100644 --- a/arch/sparc/mm/generic.c +++ b/arch/sparc/mm/generic.c @@ -18,14 +18,14 @@ static inline void forget_pte(pte_t page) if (pte_none(page)) return; if (pte_present(page)) { - unsigned long nr = pte_pagenr(page); - if (nr >= max_mapnr || PageReserved(mem_map+nr)) + struct page *ptpage = pte_page(page); + if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) return; /* * free_page() used to be able to clear swap cache * entries. We may now have to do it manually. */ - free_page_and_swap_cache(mem_map+nr); + free_page_and_swap_cache(ptpage); return; } swap_free(pte_to_swp_entry(page)); diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c index ced31b91f..3d42e0c63 100644 --- a/arch/sparc/mm/init.c +++ b/arch/sparc/mm/init.c @@ -369,8 +369,8 @@ void __init free_mem_map_range(struct page *first, struct page *last) prom_printf("[%p,%p] ", first, last); #endif while (first < last) { - ClearPageReserved(mem_map + MAP_NR(first)); - set_page_count(mem_map + MAP_NR(first), 1); + ClearPageReserved(virt_to_page(first)); + set_page_count(virt_to_page(first), 1); free_page((unsigned long)first); totalram_pages++; num_physpages++; @@ -542,7 +542,7 @@ void free_initmem (void) struct page *p; page = addr + phys_base; - p = mem_map + MAP_NR(page); + p = virt_to_page(page); ClearPageReserved(p); set_page_count(p, 1); @@ -559,7 +559,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) if (start < end) printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); for (; start < end; start += PAGE_SIZE) { - struct page *p = mem_map + MAP_NR(start); + struct page *p = virt_to_page(start); ClearPageReserved(p); set_page_count(p, 1); diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index a370ea2d4..f395f06f5 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -190,7 +190,7 @@ static void iounit_map_dma_area(unsigned long va, __u32 addr, int len) pmdp = pmd_offset(pgdp, addr); ptep = pte_offset(pmdp, addr); - set_pte(ptep, pte_val(mk_pte(mem_map + MAP_NR(page), dvma_prot))); + set_pte(ptep, pte_val(mk_pte(virt_to_page(page), dvma_prot))); i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 5fde9a52a..e48128479 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -247,7 +247,7 @@ static void iommu_map_dma_area(unsigned long va, __u32 addr, int len) pmdp = pmd_offset(pgdp, addr); ptep = pte_offset(pmdp, addr); - set_pte(ptep, mk_pte(mem_map + MAP_NR(page), dvma_prot)); + set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); if (ipte_cache != 0) { iopte_val(*iopte++) = MKIOPTE(__pa(page)); } else { diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c7f839f28..7e76841de 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -612,7 +612,7 @@ static void srmmu_free_task_struct(struct task_struct *tsk) static void srmmu_get_task_struct(struct task_struct *tsk) { - atomic_inc(&mem_map[MAP_NR(tsk)].count); + atomic_inc(&virt_to_page(tsk)->count); } /* tsunami.S */ @@ -2153,7 +2153,7 @@ void __init ld_mmu_srmmu(void) BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(pte_pagenr, srmmu_pte_pagenr, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(sparc_pte_pagenr, srmmu_pte_pagenr, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM); diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index 55551c561..717269b68 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -1320,7 +1320,7 @@ static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus unsigned long page; page = ((unsigned long)bufptr) & PAGE_MASK; - if (MAP_NR(page) > max_mapnr) { + if (!VALID_PAGE(virt_to_page(page))) { sun4c_flush_page(page); return (__u32)bufptr; /* already locked */ } @@ -2095,7 +2095,7 @@ static int sun4c_pmd_none(pmd_t pmd) { return !pmd_val(pmd); } static int sun4c_pmd_bad(pmd_t pmd) { return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) || - (MAP_NR(pmd_val(pmd)) > max_mapnr)); + (!VALID_PAGE(virt_to_page(pmd_val(pmd))))); } static int sun4c_pmd_present(pmd_t pmd) @@ -2650,7 +2650,7 @@ void __init ld_mmu_sun4c(void) BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0); - BTFIXUPSET_CALL(pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(sparc_pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM); #if PAGE_SHIFT <= 12 BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1)); #else diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 63da67c8e..cd4b3365c 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -110,7 +110,7 @@ static unsigned int get_user_insn(unsigned long tpc) if(!pte_present(pte)) goto out; - pa = phys_base + (pte_pagenr(pte) << PAGE_SHIFT); + pa = phys_base + (sparc64_pte_pagenr(pte) << PAGE_SHIFT); pa += (tpc & ~PAGE_MASK); /* Use phys bypass so we don't pollute dtlb/dcache. */ diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index 9a1ab1de3..c469c6062 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c @@ -18,14 +18,14 @@ static inline void forget_pte(pte_t page) if (pte_none(page)) return; if (pte_present(page)) { - unsigned long nr = pte_pagenr(page); - if (nr >= max_mapnr || PageReserved(mem_map+nr)) + struct page *ptpage = pte_page(page); + if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) return; /* * free_page() used to be able to clear swap cache * entries. We may now have to do it manually. */ - free_page_and_swap_cache(mem_map+nr); + free_page_and_swap_cache(ptpage); return; } swap_free(pte_to_swp_entry(page)); diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index d3c9e5036..63994c51d 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1242,7 +1242,7 @@ void free_initmem (void) page = (addr + ((unsigned long) __va(phys_base)) - ((unsigned long) &empty_zero_page)); - p = mem_map + MAP_NR(page); + p = virt_to_page(page); ClearPageReserved(p); set_page_count(p, 1); @@ -1257,7 +1257,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) if (start < end) printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); for (; start < end; start += PAGE_SIZE) { - struct page *p = mem_map + MAP_NR(start); + struct page *p = virt_to_page(start); ClearPageReserved(p); set_page_count(p, 1); diff --git a/drivers/block/lvm-snap.c b/drivers/block/lvm-snap.c index 20e1c78cc..938ffc26e 100644 --- a/drivers/block/lvm-snap.c +++ b/drivers/block/lvm-snap.c @@ -326,7 +326,7 @@ static int lvm_snapshot_alloc_iobuf_pages(struct kiobuf * iobuf, int sectors) if (!addr) goto out; iobuf->pagelist[i] = addr; - page = mem_map + MAP_NR(addr); + page = virt_to_page(addr); } #endif diff --git a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c index 573419a28..a31a6acf7 100644 --- a/drivers/char/agp/agpgart_be.c +++ b/drivers/char/agp/agpgart_be.c @@ -141,8 +141,8 @@ static unsigned long agp_alloc_page(void) if (pt == NULL) { return 0; } - atomic_inc(&mem_map[MAP_NR(pt)].count); - set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); + atomic_inc(&virt_to_page(pt)->count); + set_bit(PG_locked, &virt_to_page(pt)->flags); atomic_inc(&agp_bridge.current_memory_agp); return (unsigned long) pt; } @@ -154,9 +154,9 @@ static void agp_destroy_page(unsigned long page) if (pt == NULL) { return; } - atomic_dec(&mem_map[MAP_NR(pt)].count); - clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); - wake_up(&mem_map[MAP_NR(pt)].wait); + atomic_dec(&virt_to_page(pt)->count); + clear_bit(PG_locked, &virt_to_page(pt)->flags); + wake_up(&virt_to_page(pt)->wait); free_page((unsigned long) pt); atomic_dec(&agp_bridge.current_memory_agp); } @@ -541,6 +541,7 @@ static int agp_generic_create_gatt_table(void) int num_entries; int i; void *temp; + struct page *page; /* The generic routines can't handle 2 level gatt's */ if (agp_bridge.size_type == LVL2_APER_SIZE) { @@ -622,9 +623,8 @@ static int agp_generic_create_gatt_table(void) } table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); - for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { - set_bit(PG_reserved, &mem_map[i].flags); - } + for (page = virt_to_page(table); page < get_mem_map(table_end); page++) + set_bit(PG_reserved, &page->flags); agp_bridge.gatt_table_real = (unsigned long *) table; CACHE_FLUSH(); @@ -633,9 +633,8 @@ static int agp_generic_create_gatt_table(void) CACHE_FLUSH(); if (agp_bridge.gatt_table == NULL) { - for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { - clear_bit(PG_reserved, &mem_map[i].flags); - } + for (page = virt_to_page(table); page < get_mem_map(table_end); page++) + clear_bit(PG_reserved, &page->flags); free_pages((unsigned long) table, page_order); @@ -653,10 +652,10 @@ static int agp_generic_create_gatt_table(void) static int agp_generic_free_gatt_table(void) { - int i; int page_order; char *table, *table_end; void *temp; + struct page *page; temp = agp_bridge.current_size; @@ -691,9 +690,8 @@ static int agp_generic_free_gatt_table(void) table = (char *) agp_bridge.gatt_table_real; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); - for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { - clear_bit(PG_reserved, &mem_map[i].flags); - } + for (page = virt_to_page(table); page < get_mem_map(table_end); page++) + clear_bit(PG_reserved, &page->flags); free_pages((unsigned long) agp_bridge.gatt_table_real, page_order); return 0; @@ -1500,13 +1498,13 @@ static int amd_create_page_map(amd_page_map *page_map) if (page_map->real == NULL) { return -ENOMEM; } - set_bit(PG_reserved, &mem_map[MAP_NR(page_map->real)].flags); + set_bit(PG_reserved, &virt_to_page(page_map->real)->flags); CACHE_FLUSH(); page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL) { clear_bit(PG_reserved, - &mem_map[MAP_NR(page_map->real)].flags); + &virt_to_page(page_map->real)->flags); free_page((unsigned long) page_map->real); page_map->real = NULL; return -ENOMEM; @@ -1524,7 +1522,7 @@ static void amd_free_page_map(amd_page_map *page_map) { iounmap(page_map->remapped); clear_bit(PG_reserved, - &mem_map[MAP_NR(page_map->real)].flags); + &virt_to_page(page_map->real)->flags); free_page((unsigned long) page_map->real); } diff --git a/drivers/char/bttv-driver.c b/drivers/char/bttv-driver.c index 74d4188a2..6c88a757e 100644 --- a/drivers/char/bttv-driver.c +++ b/drivers/char/bttv-driver.c @@ -191,7 +191,7 @@ static void * rvmalloc(signed long size) while (size > 0) { page = kvirt_to_pa(adr); - mem_map_reserve(MAP_NR(__va(page))); + mem_map_reserve(virt_to_page(__va(page))); adr+=PAGE_SIZE; size-=PAGE_SIZE; } @@ -209,7 +209,7 @@ static void rvfree(void * mem, signed long size) while (size > 0) { page = kvirt_to_pa(adr); - mem_map_unreserve(MAP_NR(__va(page))); + mem_map_unreserve(virt_to_page(__va(page))); adr+=PAGE_SIZE; size-=PAGE_SIZE; } diff --git a/drivers/char/buz.c b/drivers/char/buz.c index 299a4f8ca..ca3cb4f47 100644 --- a/drivers/char/buz.c +++ b/drivers/char/buz.c @@ -199,7 +199,7 @@ static int v4l_fbuffer_alloc(struct zoran *zr) zr->v4l_gbuf[i].fbuffer_phys = virt_to_phys(mem); zr->v4l_gbuf[i].fbuffer_bus = virt_to_bus(mem); for (off = 0; off < v4l_bufsize; off += PAGE_SIZE) - mem_map_reserve(MAP_NR(mem + off)); + mem_map_reserve(virt_to_page(mem + off)); DEBUG(printk(BUZ_INFO ": V4L frame %d mem 0x%x (bus: 0x%x=%d)\n", i, mem, virt_to_bus(mem), virt_to_bus(mem))); } else { return -ENOBUFS; @@ -221,7 +221,7 @@ static void v4l_fbuffer_free(struct zoran *zr) mem = zr->v4l_gbuf[i].fbuffer; for (off = 0; off < v4l_bufsize; off += PAGE_SIZE) - mem_map_unreserve(MAP_NR(mem + off)); + mem_map_unreserve(virt_to_page(mem + off)); kfree((void *) zr->v4l_gbuf[i].fbuffer); zr->v4l_gbuf[i].fbuffer = NULL; } @@ -286,7 +286,7 @@ static int jpg_fbuffer_alloc(struct zoran *zr) zr->jpg_gbuf[i].frag_tab[0] = virt_to_bus((void *) mem); zr->jpg_gbuf[i].frag_tab[1] = ((zr->jpg_bufsize / 4) << 1) | 1; for (off = 0; off < zr->jpg_bufsize; off += PAGE_SIZE) - mem_map_reserve(MAP_NR(mem + off)); + mem_map_reserve(virt_to_page(mem + off)); } else { /* jpg_bufsize is alreay page aligned */ for (j = 0; j < zr->jpg_bufsize / PAGE_SIZE; j++) { @@ -297,7 +297,7 @@ static int jpg_fbuffer_alloc(struct zoran *zr) } zr->jpg_gbuf[i].frag_tab[2 * j] = virt_to_bus((void *) mem); zr->jpg_gbuf[i].frag_tab[2 * j + 1] = (PAGE_SIZE / 4) << 1; - mem_map_reserve(MAP_NR(mem)); + mem_map_reserve(virt_to_page(mem)); } zr->jpg_gbuf[i].frag_tab[2 * j - 1] |= 1; @@ -329,7 +329,7 @@ static void jpg_fbuffer_free(struct zoran *zr) if (zr->jpg_gbuf[i].frag_tab[0]) { mem = (unsigned char *) bus_to_virt(zr->jpg_gbuf[i].frag_tab[0]); for (off = 0; off < zr->jpg_bufsize; off += PAGE_SIZE) - mem_map_unreserve(MAP_NR(mem + off)); + mem_map_unreserve(virt_to_page(mem + off)); kfree((void *) mem); zr->jpg_gbuf[i].frag_tab[0] = 0; zr->jpg_gbuf[i].frag_tab[1] = 0; @@ -338,7 +338,7 @@ static void jpg_fbuffer_free(struct zoran *zr) for (j = 0; j < zr->jpg_bufsize / PAGE_SIZE; j++) { if (!zr->jpg_gbuf[i].frag_tab[2 * j]) break; - mem_map_unreserve(MAP_NR(bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j]))); + mem_map_unreserve(virt_to_page(bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j]))); free_page((unsigned long) bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j])); zr->jpg_gbuf[i].frag_tab[2 * j] = 0; zr->jpg_gbuf[i].frag_tab[2 * j + 1] = 0; diff --git a/drivers/char/cpia.c b/drivers/char/cpia.c index c9ec988d1..d7d007f01 100644 --- a/drivers/char/cpia.c +++ b/drivers/char/cpia.c @@ -239,7 +239,7 @@ static void *rvmalloc(unsigned long size) adr = (unsigned long) mem; while (size > 0) { page = kvirt_to_pa(adr); - mem_map_reserve(MAP_NR(__va(page))); + mem_map_reserve(virt_to_page(__va(page))); adr += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; @@ -263,7 +263,7 @@ static void rvfree(void *mem, unsigned long size) adr = (unsigned long) mem; while (size > 0) { page = kvirt_to_pa(adr); - mem_map_unreserve(MAP_NR(__va(page))); + mem_map_unreserve(virt_to_page(__va(page))); adr += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c index f3d9db43e..19b7bd928 100644 --- a/drivers/char/drm/i810_dma.c +++ b/drivers/char/drm/i810_dma.c @@ -282,8 +282,8 @@ static unsigned long i810_alloc_page(drm_device_t *dev) if(address == 0UL) return 0; - atomic_inc(&mem_map[MAP_NR((void *) address)].count); - set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags); + atomic_inc(&virt_to_page(address)->count); + set_bit(PG_locked, &virt_to_page(address)->flags); return address; } @@ -293,9 +293,9 @@ static void i810_free_page(drm_device_t *dev, unsigned long page) if(page == 0UL) return; - atomic_dec(&mem_map[MAP_NR((void *) page)].count); - clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags); - wake_up(&mem_map[MAP_NR((void *) page)].wait); + atomic_dec(&virt_to_page(page)->count); + clear_bit(PG_locked, &virt_to_page(page)->flags); + wake_up(&virt_to_page(page)->wait); free_page(page); return; } diff --git a/drivers/char/drm/memory.c b/drivers/char/drm/memory.c index 5023de808..43e46f1d6 100644 --- a/drivers/char/drm/memory.c +++ b/drivers/char/drm/memory.c @@ -246,7 +246,7 @@ unsigned long drm_alloc_pages(int order, int area) for (addr = address, sz = bytes; sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { - mem_map_reserve(MAP_NR(addr)); + mem_map_reserve(virt_to_page(addr)); } return address; @@ -267,7 +267,7 @@ void drm_free_pages(unsigned long address, int order, int area) for (addr = address, sz = bytes; sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { - mem_map_unreserve(MAP_NR(addr)); + mem_map_unreserve(virt_to_page(addr)); } free_pages(address, order); } diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c index 28e8811c8..d02c3b581 100644 --- a/drivers/char/drm/mga_dma.c +++ b/drivers/char/drm/mga_dma.c @@ -57,8 +57,8 @@ static unsigned long mga_alloc_page(drm_device_t *dev) if(address == 0UL) { return 0; } - atomic_inc(&mem_map[MAP_NR((void *) address)].count); - set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags); + atomic_inc(&virt_to_page(address)->count); + set_bit(PG_locked, &virt_to_page(address)->flags); return address; } @@ -70,9 +70,9 @@ static void mga_free_page(drm_device_t *dev, unsigned long page) if(page == 0UL) { return; } - atomic_dec(&mem_map[MAP_NR((void *) page)].count); - clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags); - wake_up(&mem_map[MAP_NR((void *) page)].wait); + atomic_dec(&virt_to_page(page)->count); + clear_bit(PG_locked, &virt_to_page(page)->flags); + wake_up(&virt_to_page(page)->wait); free_page(page); return; } diff --git a/drivers/char/drm/vm.c b/drivers/char/drm/vm.c index 5ee9e3242..1386bd172 100644 --- a/drivers/char/drm/vm.c +++ b/drivers/char/drm/vm.c @@ -89,13 +89,13 @@ struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, offset = address - vma->vm_start; page = offset >> PAGE_SHIFT; physical = (unsigned long)dev->lock.hw_lock + offset; - atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */ + atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */ DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical); #if LINUX_VERSION_CODE < 0x020317 return physical; #else - return mem_map + MAP_NR(physical); + return (virt_to_page(physical)); #endif } @@ -124,13 +124,13 @@ struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ page = offset >> PAGE_SHIFT; physical = dma->pagelist[page] + (offset & (~PAGE_MASK)); - atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */ + atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */ DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical); #if LINUX_VERSION_CODE < 0x020317 return physical; #else - return mem_map + MAP_NR(physical); + return (virt_to_page(physical)); #endif } diff --git a/drivers/char/ftape/lowlevel/ftape-buffer.c b/drivers/char/ftape/lowlevel/ftape-buffer.c index d7d31dbbb..bf3addcf4 100644 --- a/drivers/char/ftape/lowlevel/ftape-buffer.c +++ b/drivers/char/ftape/lowlevel/ftape-buffer.c @@ -48,11 +48,10 @@ static inline void *dmaalloc(size_t size) } addr = __get_dma_pages(GFP_KERNEL, get_order(size)); if (addr) { - int i; + struct page *page; - for (i = MAP_NR(addr); i < MAP_NR(addr+size); i++) { - mem_map_reserve(i); - } + for (page = virt_to_page(addr); page < get_mem_map(addr+size); page++) + mem_map_reserve(page); } return (void *)addr; } @@ -60,12 +59,11 @@ static inline void *dmaalloc(size_t size) static inline void dmafree(void *addr, size_t size) { if (size > 0) { - int i; + struct page *page; - for (i = MAP_NR((unsigned long)addr); - i < MAP_NR((unsigned long)addr+size); i++) { - mem_map_unreserve (i); - } + for (page = virt_to_page((unsigned long)addr); + page < virt_to_page((unsigned long)addr+size); page++) + mem_map_unreserve(page); free_pages((unsigned long) addr, get_order(size)); } } diff --git a/drivers/char/planb.c b/drivers/char/planb.c index fe5f905e9..94707619d 100644 --- a/drivers/char/planb.c +++ b/drivers/char/planb.c @@ -136,13 +136,12 @@ static int grabbuf_alloc(struct planb *pb) |GFP_DMA, 0); if (!pb->rawbuf[i]) break; - set_bit(PG_reserved, &mem_map[MAP_NR(pb->rawbuf[i])].flags); + mem_map_reserve(virt_to_page(pb->rawbuf[i])); } if (i-- < npage) { printk(KERN_DEBUG "PlanB: init_grab: grab buffer not allocated\n"); for (; i > 0; i--) { - clear_bit(PG_reserved, - &mem_map[MAP_NR(pb->rawbuf[i])].flags); + mem_map_unreserve(virt_to_page(pb->rawbuf[i])); free_pages((unsigned long)pb->rawbuf[i], 0); } kfree(pb->rawbuf); @@ -435,8 +434,7 @@ static void planb_prepare_close(struct planb *pb) } if(pb->rawbuf) { for (i = 0; i < pb->rawbuf_size; i++) { - clear_bit(PG_reserved, - &mem_map[MAP_NR(pb->rawbuf[i])].flags); + mem_map_unreserve(virt_to_page(pb->rawbuf[i])); free_pages((unsigned long)pb->rawbuf[i], 0); } kfree(pb->rawbuf); diff --git a/drivers/char/zr36120_mem.c b/drivers/char/zr36120_mem.c index 082cfee06..b4c6078d3 100644 --- a/drivers/char/zr36120_mem.c +++ b/drivers/char/zr36120_mem.c @@ -50,7 +50,7 @@ void* bmalloc(unsigned long size) if (mem) { unsigned long adr = (unsigned long)mem; while (size > 0) { - mem_map_reserve(MAP_NR(phys_to_virt(adr))); + mem_map_reserve(virt_to_page(phys_to_virt(adr))); adr += PAGE_SIZE; size -= PAGE_SIZE; } @@ -64,7 +64,7 @@ void bfree(void* mem, unsigned long size) unsigned long adr = (unsigned long)mem; unsigned long siz = size; while (siz > 0) { - mem_map_unreserve(MAP_NR(phys_to_virt(adr))); + mem_map_unreserve(virt_to_page(phys_to_virt(adr))); adr += PAGE_SIZE; siz -= PAGE_SIZE; } diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index b42ce0b46..f43d82803 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -221,7 +221,7 @@ static void * rvmalloc(unsigned long size) while (size > 0) { page = kvirt_to_pa(adr); - mem_map_reserve(MAP_NR(__va(page))); + mem_map_reserve(virt_to_page(__va(page))); adr+=PAGE_SIZE; size-=PAGE_SIZE; } @@ -239,7 +239,7 @@ static void rvfree(void * mem, unsigned long size) while (size > 0) { page = kvirt_to_pa(adr); - mem_map_unreserve(MAP_NR(__va(page))); + mem_map_unreserve(virt_to_page(__va(page))); adr+=PAGE_SIZE; size-=PAGE_SIZE; } diff --git a/drivers/sound/cmpci.c b/drivers/sound/cmpci.c index e97721044..6fcb896ac 100644 --- a/drivers/sound/cmpci.c +++ b/drivers/sound/cmpci.c @@ -109,6 +109,7 @@ #include <linux/malloc.h> #include <linux/soundcard.h> #include <linux/pci.h> +#include <linux/wrapper.h> #include <asm/io.h> #include <asm/dma.h> #include <linux/init.h> @@ -591,13 +592,13 @@ static void start_adc(struct cm_state *s) static void dealloc_dmabuf(struct dmabuf *db) { - unsigned long map, mapend; + struct page *pstart, *pend; if (db->rawbuf) { /* undo marking the pages as reserved */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - clear_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++) + mem_map_unreserve(pstart); free_pages((unsigned long)db->rawbuf, db->buforder); } db->rawbuf = NULL; @@ -614,7 +615,7 @@ static int prog_dmabuf(struct cm_state *s, unsigned rec) int order; unsigned bytepersec; unsigned bufs; - unsigned long map, mapend; + struct page *pstart, *pend; unsigned char fmt; unsigned long flags; @@ -646,9 +647,9 @@ static int prog_dmabuf(struct cm_state *s, unsigned rec) printk(KERN_DEBUG "cmpci: DMA buffer beyond 16MB: busaddr 0x%lx size %ld\n", virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder); /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - set_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++) + mem_map_reserve(pstart); } bytepersec = rate << sample_shift[fmt]; bufs = PAGE_SIZE << db->buforder; diff --git a/drivers/sound/dmabuf.c b/drivers/sound/dmabuf.c index 07d416a27..c11c1fe73 100644 --- a/drivers/sound/dmabuf.c +++ b/drivers/sound/dmabuf.c @@ -56,8 +56,9 @@ static long dmabuf_timeout(struct dma_buffparms *dmap) static int sound_alloc_dmap(struct dma_buffparms *dmap) { char *start_addr, *end_addr; - int i, dma_pagesize; + int dma_pagesize; int sz, size; + struct page *page; dmap->mapping_flags &= ~DMA_MAP_MAPPED; @@ -113,14 +114,15 @@ static int sound_alloc_dmap(struct dma_buffparms *dmap) dmap->raw_buf = start_addr; dmap->raw_buf_phys = virt_to_bus(start_addr); - for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++) - set_bit(PG_reserved, &mem_map[i].flags);; + for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++) + mem_map_reserve(page); return 0; } static void sound_free_dmap(struct dma_buffparms *dmap) { - int sz, size, i; + int sz, size; + struct page *page; unsigned long start_addr, end_addr; if (dmap->raw_buf == NULL) @@ -132,8 +134,8 @@ static void sound_free_dmap(struct dma_buffparms *dmap) start_addr = (unsigned long) dmap->raw_buf; end_addr = start_addr + dmap->buffsize; - for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++) - clear_bit(PG_reserved, &mem_map[i].flags);; + for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++) + mem_map_unreserve(page); free_pages((unsigned long) dmap->raw_buf, sz); dmap->raw_buf = NULL; diff --git a/drivers/sound/emu10k1/audio.c b/drivers/sound/emu10k1/audio.c index 9e2ee24d1..4d81c1a46 100644 --- a/drivers/sound/emu10k1/audio.c +++ b/drivers/sound/emu10k1/audio.c @@ -38,6 +38,7 @@ #include "audio.h" #include <linux/sched.h> #include <linux/smp_lock.h> +#include <linux/wrapper.h> static void calculate_ofrag(struct woinst *); static void calculate_ifrag(struct wiinst *); @@ -918,7 +919,7 @@ static int emu10k1_audio_mmap(struct file *file, struct vm_area_struct *vma) /* Now mark the pages as reserved, otherwise remap_page_range doesn't do what we want */ for (i = 0; i < wave_out->wavexferbuf->numpages; i++) - set_bit(PG_reserved, &mem_map[MAP_NR(wave_out->pagetable[i])].flags); + mem_map_reserve(virt_to_page(wave_out->pagetable[i])); } size = vma->vm_end - vma->vm_start; @@ -1137,7 +1138,7 @@ static int emu10k1_audio_release(struct inode *inode, struct file *file) /* Undo marking the pages as reserved */ for (i = 0; i < woinst->wave_out->wavexferbuf->numpages; i++) - set_bit(PG_reserved, &mem_map[MAP_NR(woinst->wave_out->pagetable[i])].flags); + mem_map_reserve(virt_to_page(woinst->wave_out->pagetable[i])); } woinst->mapped = 0; diff --git a/drivers/sound/es1370.c b/drivers/sound/es1370.c index e6c5945cb..a5bb668f2 100644 --- a/drivers/sound/es1370.c +++ b/drivers/sound/es1370.c @@ -150,6 +150,7 @@ #include <linux/soundcard.h> #include <linux/pci.h> #include <linux/smp_lock.h> +#include <linux/wrapper.h> #include <asm/io.h> #include <asm/dma.h> #include <linux/init.h> @@ -540,13 +541,13 @@ static void start_adc(struct es1370_state *s) extern inline void dealloc_dmabuf(struct es1370_state *s, struct dmabuf *db) { - unsigned long map, mapend; + struct page *page, *pend; if (db->rawbuf) { /* undo marking the pages as reserved */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - clear_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (page = virt_to_page(db->rawbuf); page <= pend; page++) + mem_map_unreserve(page); pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr); } db->rawbuf = NULL; @@ -558,7 +559,7 @@ static int prog_dmabuf(struct es1370_state *s, struct dmabuf *db, unsigned rate, int order; unsigned bytepersec; unsigned bufs; - unsigned long map, mapend; + struct page *page, *pend; db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0; if (!db->rawbuf) { @@ -570,9 +571,9 @@ static int prog_dmabuf(struct es1370_state *s, struct dmabuf *db, unsigned rate, return -ENOMEM; db->buforder = order; /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - set_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (page = virt_to_page(db->rawbuf); page <= pend; page++) + mem_map_reserve(page); } fmt &= ES1370_FMT_MASK; bytepersec = rate << sample_shift[fmt]; diff --git a/drivers/sound/es1371.c b/drivers/sound/es1371.c index 5a2cc9574..df362e8bb 100644 --- a/drivers/sound/es1371.c +++ b/drivers/sound/es1371.c @@ -123,6 +123,7 @@ #include <linux/spinlock.h> #include <linux/smp_lock.h> #include <linux/ac97_codec.h> +#include <linux/wrapper.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/uaccess.h> @@ -872,13 +873,13 @@ static void start_adc(struct es1371_state *s) extern inline void dealloc_dmabuf(struct es1371_state *s, struct dmabuf *db) { - unsigned long map, mapend; + struct page *page, *pend; if (db->rawbuf) { /* undo marking the pages as reserved */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - clear_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (page = virt_to_page(db->rawbuf); page <= pend; page++) + mem_map_unreserve(page); pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr); } db->rawbuf = NULL; @@ -890,7 +891,7 @@ static int prog_dmabuf(struct es1371_state *s, struct dmabuf *db, unsigned rate, int order; unsigned bytepersec; unsigned bufs; - unsigned long map, mapend; + struct page *page, *pend; db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0; if (!db->rawbuf) { @@ -902,9 +903,9 @@ static int prog_dmabuf(struct es1371_state *s, struct dmabuf *db, unsigned rate, return -ENOMEM; db->buforder = order; /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - set_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (page = virt_to_page(db->rawbuf); page <= pend; page++) + mem_map_reserve(page); } fmt &= ES1371_FMT_MASK; bytepersec = rate << sample_shift[fmt]; diff --git a/drivers/sound/esssolo1.c b/drivers/sound/esssolo1.c index 6a1623cf0..4c3bdaff8 100644 --- a/drivers/sound/esssolo1.c +++ b/drivers/sound/esssolo1.c @@ -91,6 +91,7 @@ #include <linux/poll.h> #include <linux/spinlock.h> #include <linux/smp_lock.h> +#include <linux/wrapper.h> #include <asm/uaccess.h> #include <asm/hardirq.h> @@ -403,13 +404,13 @@ static void start_adc(struct solo1_state *s) extern inline void dealloc_dmabuf(struct solo1_state *s, struct dmabuf *db) { - unsigned long map, mapend; + struct page *page, *pend; if (db->rawbuf) { /* undo marking the pages as reserved */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - clear_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (page = virt_to_page(db->rawbuf); page <= pend; page++) + mem_map_unreserve(page); pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr); } db->rawbuf = NULL; @@ -421,7 +422,7 @@ static int prog_dmabuf(struct solo1_state *s, struct dmabuf *db) int order; unsigned bytespersec; unsigned bufs, sample_shift = 0; - unsigned long map, mapend; + struct page *page, *pend; db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0; if (!db->rawbuf) { @@ -433,9 +434,9 @@ static int prog_dmabuf(struct solo1_state *s, struct dmabuf *db) return -ENOMEM; db->buforder = order; /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - set_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (page = virt_to_page(db->rawbuf); page <= pend; page++) + mem_map_reserve(page); } if (s->fmt & (AFMT_S16_LE | AFMT_U16_LE)) sample_shift++; diff --git a/drivers/sound/i810_audio.c b/drivers/sound/i810_audio.c index 792f670af..27a1c243b 100644 --- a/drivers/sound/i810_audio.c +++ b/drivers/sound/i810_audio.c @@ -78,6 +78,7 @@ #include <linux/spinlock.h> #include <linux/smp_lock.h> #include <linux/ac97_codec.h> +#include <linux/wrapper.h> #include <asm/uaccess.h> #include <asm/hardirq.h> @@ -628,7 +629,7 @@ static int alloc_dmabuf(struct i810_state *state) struct dmabuf *dmabuf = &state->dmabuf; void *rawbuf; int order; - unsigned long map, mapend; + struct page *page, *pend; /* alloc as big a chunk as we can, FIXME: is this necessary ?? */ for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) @@ -649,9 +650,9 @@ static int alloc_dmabuf(struct i810_state *state) dmabuf->buforder = order; /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ - mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1); - for (map = MAP_NR(rawbuf); map <= mapend; map++) - set_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1); + for (page = virt_to_page(rawbuf); page <= pend; page++) + mem_map_reserve(page); return 0; } @@ -660,13 +661,13 @@ static int alloc_dmabuf(struct i810_state *state) static void dealloc_dmabuf(struct i810_state *state) { struct dmabuf *dmabuf = &state->dmabuf; - unsigned long map, mapend; + struct page *page, *pend; if (dmabuf->rawbuf) { /* undo marking the pages as reserved */ - mapend = MAP_NR(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1); - for (map = MAP_NR(dmabuf->rawbuf); map <= mapend; map++) - clear_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1); + for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++) + mem_map_unreserve(page); pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder, dmabuf->rawbuf, dmabuf->dma_handle); } diff --git a/drivers/sound/maestro.c b/drivers/sound/maestro.c index 429f0055f..6c664aea4 100644 --- a/drivers/sound/maestro.c +++ b/drivers/sound/maestro.c @@ -199,6 +199,7 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/smp_lock.h> +#include <linux/wrapper.h> #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) @@ -2819,7 +2820,7 @@ allocate_buffers(struct ess_state *s) { void *rawbuf=NULL; int order,i; - unsigned long mapend,map; + struct page *page, *pend; /* alloc as big a chunk as we can */ for (order = (dsps_order + (16-PAGE_SHIFT) + 1); order >= (dsps_order + 2 + 1); order--) @@ -2865,17 +2866,16 @@ allocate_buffers(struct ess_state *s) } /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ - mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1); - for (map = MAP_NR(rawbuf); map <= mapend; map++) { - set_bit(PG_reserved, &mem_map[map].flags); - } + pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1); + for (page = virt_to_page(rawbuf); page <= pend; page++) + mem_map_reserve(page); return 0; } static void free_buffers(struct ess_state *s) { - unsigned long map, mapend; + struct page *page, *pend; s->dma_dac.rawbuf = s->dma_adc.rawbuf = NULL; s->dma_dac.mapped = s->dma_adc.mapped = 0; @@ -2884,9 +2884,9 @@ free_buffers(struct ess_state *s) M_printk("maestro: freeing %p\n",s->card->dmapages); /* undo marking the pages as reserved */ - mapend = MAP_NR(s->card->dmapages + (PAGE_SIZE << s->card->dmaorder) - 1); - for (map = MAP_NR(s->card->dmapages); map <= mapend; map++) - clear_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(s->card->dmapages + (PAGE_SIZE << s->card->dmaorder) - 1); + for (page = virt_to_page(s->card->dmapages); page <= pend; page++) + mem_map_unreserve(page); free_pages((unsigned long)s->card->dmapages,s->card->dmaorder); s->card->dmapages = NULL; diff --git a/drivers/sound/msnd_pinnacle.c b/drivers/sound/msnd_pinnacle.c index 91c7d2306..59d805c7b 100644 --- a/drivers/sound/msnd_pinnacle.c +++ b/drivers/sound/msnd_pinnacle.c @@ -33,6 +33,7 @@ * ********************************************************************/ +#include <linux/kernel.h> #include <linux/config.h> #include <linux/version.h> #include <linux/module.h> diff --git a/drivers/sound/sonicvibes.c b/drivers/sound/sonicvibes.c index 6b51468b3..526ba929a 100644 --- a/drivers/sound/sonicvibes.c +++ b/drivers/sound/sonicvibes.c @@ -109,6 +109,7 @@ #include <linux/poll.h> #include <linux/spinlock.h> #include <linux/smp_lock.h> +#include <linux/wrapper.h> #include <asm/uaccess.h> #include <asm/hardirq.h> @@ -692,13 +693,13 @@ static void start_adc(struct sv_state *s) static void dealloc_dmabuf(struct sv_state *s, struct dmabuf *db) { - unsigned long map, mapend; + struct page *page, *pend; if (db->rawbuf) { /* undo marking the pages as reserved */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - clear_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (page = virt_to_page(db->rawbuf); page <= pend; page++) + mem_map_unreserve(page); pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr); } db->rawbuf = NULL; @@ -715,7 +716,7 @@ static int prog_dmabuf(struct sv_state *s, unsigned rec) int order; unsigned bytepersec; unsigned bufs; - unsigned long map, mapend; + struct page *page, *pend; unsigned char fmt; unsigned long flags; @@ -747,9 +748,9 @@ static int prog_dmabuf(struct sv_state *s, unsigned rec) printk(KERN_DEBUG "sv: DMA buffer beyond 16MB: busaddr 0x%lx size %ld\n", virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder); /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ - mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); - for (map = MAP_NR(db->rawbuf); map <= mapend; map++) - set_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); + for (page = virt_to_page(db->rawbuf); page <= pend; page++) + mem_map_reserve(page); } bytepersec = rate << sample_shift[fmt]; bufs = PAGE_SIZE << db->buforder; diff --git a/drivers/sound/sscape.c b/drivers/sound/sscape.c index b9ac2879e..e953aac6f 100644 --- a/drivers/sound/sscape.c +++ b/drivers/sound/sscape.c @@ -810,8 +810,9 @@ static void sscape_write_host_ctrl2(sscape_info *devc, int a, int b) static int sscape_alloc_dma(sscape_info *devc) { char *start_addr, *end_addr; - int i, dma_pagesize; + int dma_pagesize; int sz, size; + struct page *page; if (devc->raw_buf != NULL) return 0; /* Already done */ dma_pagesize = (devc->dma < 4) ? (64 * 1024) : (128 * 1024); @@ -848,23 +849,24 @@ static int sscape_alloc_dma(sscape_info *devc) devc->raw_buf = start_addr; devc->raw_buf_phys = virt_to_bus(start_addr); - for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++) - set_bit(PG_reserved, &mem_map[i].flags);; + for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++) + mem_map_reserve(page); return 1; } static void sscape_free_dma(sscape_info *devc) { - int sz, size, i; + int sz, size; unsigned long start_addr, end_addr; + struct page *page; if (devc->raw_buf == NULL) return; for (sz = 0, size = PAGE_SIZE; size < devc->buffsize; sz++, size <<= 1); start_addr = (unsigned long) devc->raw_buf; end_addr = start_addr + devc->buffsize; - for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++) - clear_bit(PG_reserved, &mem_map[i].flags);; + for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++) + mem_map_unreserve(page); free_pages((unsigned long) devc->raw_buf, sz); devc->raw_buf = NULL; diff --git a/drivers/sound/trident.c b/drivers/sound/trident.c index f19cb1740..27a29e42d 100644 --- a/drivers/sound/trident.c +++ b/drivers/sound/trident.c @@ -109,6 +109,7 @@ #include <linux/spinlock.h> #include <linux/smp_lock.h> #include <linux/ac97_codec.h> +#include <linux/wrapper.h> #include <asm/uaccess.h> #include <asm/hardirq.h> #include <linux/bitops.h> @@ -925,7 +926,7 @@ static int alloc_dmabuf(struct trident_state *state) struct dmabuf *dmabuf = &state->dmabuf; void *rawbuf; int order; - unsigned long map, mapend; + struct page *page, *pend; /* alloc as big a chunk as we can, FIXME: is this necessary ?? */ for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) @@ -946,9 +947,9 @@ static int alloc_dmabuf(struct trident_state *state) dmabuf->buforder = order; /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ - mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1); - for (map = MAP_NR(rawbuf); map <= mapend; map++) - set_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1); + for (page = virt_to_page(rawbuf); page <= pend; page++) + mem_map_reserve(page); return 0; } @@ -957,13 +958,13 @@ static int alloc_dmabuf(struct trident_state *state) static void dealloc_dmabuf(struct trident_state *state) { struct dmabuf *dmabuf = &state->dmabuf; - unsigned long map, mapend; + struct page *page, *pend; if (dmabuf->rawbuf) { /* undo marking the pages as reserved */ - mapend = MAP_NR(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1); - for (map = MAP_NR(dmabuf->rawbuf); map <= mapend; map++) - clear_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1); + for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++) + mem_map_unreserve(page); pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder, dmabuf->rawbuf, dmabuf->dma_handle); } diff --git a/drivers/usb/audio.c b/drivers/usb/audio.c index 5d6def562..3efd73183 100644 --- a/drivers/usb/audio.c +++ b/drivers/usb/audio.c @@ -422,7 +422,7 @@ static void dmabuf_release(struct dmabuf *db) for(nr = 0; nr < NRSGBUF; nr++) { if (!(p = db->sgbuf[nr])) continue; - mem_map_unreserve(MAP_NR(p)); + mem_map_unreserve(virt_to_page(p)); free_page((unsigned long)p); db->sgbuf[nr] = NULL; } @@ -464,7 +464,7 @@ static int dmabuf_init(struct dmabuf *db) if (!p) return -ENOMEM; db->sgbuf[nr] = p; - mem_map_reserve(MAP_NR(p)); + mem_map_reserve(virt_to_page(p)); } memset(db->sgbuf[nr], AFMT_ISUNSIGNED(db->format) ? 0x80 : 0, PAGE_SIZE); if ((nr << PAGE_SHIFT) >= db->dmasize) diff --git a/drivers/usb/ibmcam.c b/drivers/usb/ibmcam.c index 86360f687..806c9834d 100644 --- a/drivers/usb/ibmcam.c +++ b/drivers/usb/ibmcam.c @@ -268,7 +268,7 @@ static void *rvmalloc(unsigned long size) adr = (unsigned long) mem; while (size > 0) { page = kvirt_to_pa(adr); - mem_map_reserve(MAP_NR(__va(page))); + mem_map_reserve(virt_to_page(__va(page))); adr += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; @@ -292,7 +292,7 @@ static void rvfree(void *mem, unsigned long size) adr=(unsigned long) mem; while (size > 0) { page = kvirt_to_pa(adr); - mem_map_unreserve(MAP_NR(__va(page))); + mem_map_unreserve(virt_to_page(__va(page))); adr += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; diff --git a/drivers/usb/ov511.c b/drivers/usb/ov511.c index 055099a0c..8f32026f2 100644 --- a/drivers/usb/ov511.c +++ b/drivers/usb/ov511.c @@ -248,7 +248,7 @@ static void *rvmalloc(unsigned long size) adr = (unsigned long) mem; while (size > 0) { page = kvirt_to_pa(adr); - mem_map_reserve(MAP_NR(__va(page))); + mem_map_reserve(virt_to_page(__va(page))); adr += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; @@ -272,7 +272,7 @@ static void rvfree(void *mem, unsigned long size) adr=(unsigned long) mem; while (size > 0) { page = kvirt_to_pa(adr); - mem_map_unreserve(MAP_NR(__va(page))); + mem_map_unreserve(virt_to_page(__va(page))); adr += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c index f504cddaf..e5e0e4aad 100644 --- a/drivers/video/acornfb.c +++ b/drivers/video/acornfb.c @@ -22,6 +22,7 @@ #include <linux/malloc.h> #include <linux/init.h> #include <linux/fb.h> +#include <linux/wrapper.h> #include <asm/hardware.h> #include <asm/io.h> @@ -1532,8 +1533,8 @@ free_unused_pages(unsigned int virtual_start, unsigned int virtual_end) * set count to 1, and free * the page. */ - clear_bit(PG_reserved, &mem_map[MAP_NR(virtual_start)].flags); - atomic_set(&mem_map[MAP_NR(virtual_start)].count, 1); + mem_map_unreserve(virt_to_page(virtual_start)); + atomic_set(&virt_to_page(virtual_start)->count, 1); free_page(virtual_start); virtual_start += PAGE_SIZE; @@ -1628,7 +1629,7 @@ acornfb_init(void) for (page = current_par.screen_base; page < PAGE_ALIGN(current_par.screen_base + size); page += PAGE_SIZE) - mem_map[MAP_NR(page)].flags |= (1 << PG_reserved); + mem_map_reserve(virt_to_page(page)); /* Hand back any excess pages that we allocated. */ for (page = current_par.screen_base + size; page < top; page += PAGE_SIZE) free_page(page); diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c index 7a91d0b01..df86a674e 100644 --- a/drivers/video/sa1100fb.c +++ b/drivers/video/sa1100fb.c @@ -36,6 +36,7 @@ #include <linux/init.h> #include <linux/fb.h> #include <linux/delay.h> +#include <linux/wrapper.h> #include <asm/hardware.h> #include <asm/io.h> @@ -730,8 +731,8 @@ __init sa1100fb_map_video_memory(void) u_int required_pages; u_int extra_pages; u_int order; - u_int i; char *allocated_region; + struct page *page; if (VideoMemRegion != NULL) return -EINVAL; @@ -757,9 +758,9 @@ __init sa1100fb_map_video_memory(void) /* Set reserved flag for fb memory to allow it to be remapped into */ /* user space by the common fbmem driver using remap_page_range(). */ - for(i = MAP_NR(VideoMemRegion); - i < MAP_NR(VideoMemRegion + ALLOCATED_FB_MEM_SIZE); i++) - set_bit(PG_reserved, &mem_map[i].flags); + for(page = virt_to_page(VideoMemRegion); + page < virt_to_page(VideoMemRegion + ALLOCATED_FB_MEM_SIZE); page++) + mem_map_reserve(page); /* Remap the fb memory to a non-buffered, non-cached region */ VideoMemRegion = (u_char *)__ioremap((u_long)VideoMemRegion_phys, diff --git a/fs/proc/array.c b/fs/proc/array.c index 22c9af395..9137fc765 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -421,6 +421,7 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned end = PMD_SIZE; do { pte_t page = *pte; + struct page *ptpage; address += PAGE_SIZE; pte++; @@ -432,8 +433,9 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned ++*pages; if (pte_dirty(page)) ++*dirty; - if ((pte_pagenr(page) >= max_mapnr) || - PageReserved(pte_pagenr(page) + mem_map)) + ptpage = pte_page(page); + if ((!VALID_PAGE(ptpage)) || + PageReserved(ptpage)) continue; if (page_count(pte_page(page)) > 1) ++*shared; diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h index 45e5b3b59..dc23ef74d 100644 --- a/include/asm-alpha/pgalloc.h +++ b/include/asm-alpha/pgalloc.h @@ -245,7 +245,7 @@ extern __inline__ pgd_t *get_pgd_slow(void) (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); pgd_val(ret[PTRS_PER_PGD]) - = pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL)); + = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL)); } return ret; } diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h index 2614e8189..d68e18332 100644 --- a/include/asm-alpha/pgtable.h +++ b/include/asm-alpha/pgtable.h @@ -141,7 +141,7 @@ extern unsigned long __zero_page(void); #define BAD_PAGETABLE __bad_pagetable() #define BAD_PAGE __bad_page() -#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(ZERO_PGE)) +#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE)) /* number of bits that fit into a memory pointer */ #define BITS_PER_PTR (8*sizeof(unsigned long)) @@ -209,8 +209,7 @@ extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } -#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> 32))) -#define pte_page(x) (mem_map+pte_pagenr(x)) +#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> 32))) extern inline unsigned long pmd_page(pmd_t pmd) { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); } diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h index 6992e1445..81427025c 100644 --- a/include/asm-alpha/processor.h +++ b/include/asm-alpha/processor.h @@ -145,7 +145,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \ ((struct task_struct *) __get_free_pages(GFP_KERNEL,1)) #define free_task_struct(p) free_pages((unsigned long)(p),1) -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) #define init_task (init_task_union.task) #define init_stack (init_task_union.stack) diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h index b18e572d0..850ea4139 100644 --- a/include/asm-arm/pgtable.h +++ b/include/asm-arm/pgtable.h @@ -80,7 +80,7 @@ extern void __handle_bad_pmd_kernel(pmd_t *pmd); #define pte_clear(ptep) set_pte((ptep), __pte(0)) #ifndef CONFIG_DISCONTIGMEM -#define pte_pagenr(pte) ((unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT))) +#define pte_page(x) (mem_map + (unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT))) #else /* * I'm not happy with this - we needlessly convert a physical address @@ -88,7 +88,7 @@ extern void __handle_bad_pmd_kernel(pmd_t *pmd); * which, if __va and __pa are expensive causes twice the expense for * zero gain. --rmk */ -#define pte_pagenr(pte) MAP_NR(__va(pte_val(pte))) +#define pte_page(x) (mem_map + MAP_NR(__va(pte_val(pte)))) #endif #define pmd_none(pmd) (!pmd_val(pmd)) @@ -99,7 +99,6 @@ extern void __handle_bad_pmd_kernel(pmd_t *pmd); */ #define page_address(page) ((page)->virtual) #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) -#define pte_page(x) (mem_map + pte_pagenr(x)) /* * Conversion functions: convert a page and protection to a page entry, diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 33d5a73e5..94025647e 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -89,7 +89,7 @@ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[1024]; -#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #endif /* !__ASSEMBLY__ */ diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index e9faddd62..769269192 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -422,7 +422,7 @@ unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE (2*PAGE_SIZE) #define alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1)) #define free_task_struct(p) free_pages((unsigned long) (p), 1) -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) #define init_task (init_task_union.task) #define init_stack (init_task_union.stack) diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 0efc7f155..9963ebb73 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -169,12 +169,6 @@ #define page_address(page) ((void *) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))) /* - * Given a PTE, return the index of the mem_map[] entry corresponding - * to the page frame the PTE. - */ -#define pte_pagenr(x) ((unsigned long) ((pte_val(x) & _PFN_MASK) >> PAGE_SHIFT)) - -/* * Now for some cache flushing routines. This is the kind of stuff * that can be very expensive, so try to avoid them whenever possible. */ @@ -250,7 +244,7 @@ extern pmd_t *ia64_bad_pagetable (void); #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) #define pte_clear(pte) (pte_val(*(pte)) = 0UL) /* pte_page() returns the "struct page *" corresponding to the PTE: */ -#define pte_page(pte) (mem_map + pte_pagenr(pte)) +#define pte_page(pte) (mem_map + (unsigned long) ((pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT)) #define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = __pa(ptep)) #define pmd_none(pmd) (!pmd_val(pmd)) @@ -418,7 +412,7 @@ do { \ * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[1024]; -#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) # endif /* !__ASSEMBLY__ */ diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index fa3721bde..5024801ae 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -685,7 +685,7 @@ thread_saved_pc (struct thread_struct *t) #define alloc_task_struct() \ ((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES)) #define free_task_struct(p) free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES) -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) #define init_task (init_task_union.task) #define init_stack (init_task_union.stack) diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h index 3755cb2f4..bedaecc4e 100644 --- a/include/asm-m68k/pgtable.h +++ b/include/asm-m68k/pgtable.h @@ -172,7 +172,7 @@ extern pte_t * __bad_pagetable(void); #define BAD_PAGETABLE __bad_pagetable() #define BAD_PAGE __bad_page() -#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) /* number of bits that fit into a memory pointer */ #define BITS_PER_PTR (8*sizeof(unsigned long)) @@ -228,7 +228,6 @@ extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_FAKE_SUPER)) #define pte_clear(ptep) ({ pte_val(*(ptep)) = 0; }) -#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE) @@ -248,7 +247,7 @@ extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) /* Permanent address of a page. */ #define page_address(page) ((page)->virtual) #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) -#define pte_page(pte) (mem_map+pte_pagenr(pte)) +#define pte_page(pte) (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)) #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) diff --git a/include/asm-m68k/processor.h b/include/asm-m68k/processor.h index 4444e4f43..79d81adc2 100644 --- a/include/asm-m68k/processor.h +++ b/include/asm-m68k/processor.h @@ -141,7 +141,7 @@ unsigned long get_wchan(struct task_struct *p); ({ \ unsigned long eip = 0; \ if ((tsk)->thread.esp0 > PAGE_SIZE && \ - MAP_NR((tsk)->thread.esp0) < max_mapnr) \ + (VALID_PAGE(virt_to_page((tsk)->thread.esp0)))) \ eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \ eip; }) #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) @@ -152,7 +152,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \ ((struct task_struct *) __get_free_pages(GFP_KERNEL,1)) #define free_task_struct(p) free_pages((unsigned long)(p),1) -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) #define init_task (init_task_union.task) #define init_stack (init_task_union.stack) diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index 5de15976f..e0c24161a 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -212,7 +212,7 @@ extern unsigned long zero_page_mask; #define BAD_PAGETABLE __bad_pagetable() #define BAD_PAGE __bad_page() #define ZERO_PAGE(vaddr) \ - (mem_map + MAP_NR(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) + (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) /* number of bits that fit into a memory pointer */ #define BITS_PER_PTR (8*sizeof(unsigned long)) @@ -237,11 +237,6 @@ extern pmd_t invalid_pte_table[PAGE_SIZE/sizeof(pmd_t)]; * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ -extern inline unsigned long pte_page(pte_t pte) -{ - return PAGE_OFFSET + (pte_val(pte) & PAGE_MASK); -} - extern inline unsigned long pmd_page(pmd_t pmd) { return pmd_val(pmd); @@ -304,12 +299,11 @@ extern inline int pgd_present(pgd_t pgd) { return 1; } extern inline void pgd_clear(pgd_t *pgdp) { } /* - * Permanent address of a page. On MIPS64 we never have highmem, so this + * Permanent address of a page. On MIPS we never have highmem, so this * is simple. */ #define page_address(page) ((page)->virtual) -#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT))) -#define pte_page(x) (mem_map+pte_pagenr(x)) +#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT))) /* * The following only work if pte_present() is true. diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h index d6fb7526d..022bb1215 100644 --- a/include/asm-mips/processor.h +++ b/include/asm-mips/processor.h @@ -234,7 +234,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \ ((struct task_struct *) __get_free_pages(GFP_KERNEL,1)) #define free_task_struct(p) free_pages((unsigned long)(p),1) -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) #define init_task (init_task_union.task) #define init_stack (init_task_union.stack) diff --git a/include/asm-mips64/pgtable.h b/include/asm-mips64/pgtable.h index ccabdb171..42cdcab75 100644 --- a/include/asm-mips64/pgtable.h +++ b/include/asm-mips64/pgtable.h @@ -241,7 +241,7 @@ extern unsigned long zero_page_mask; #define BAD_PMDTABLE __bad_pmd_table() #define BAD_PAGE __bad_page() #define ZERO_PAGE(vaddr) \ - (mem_map + MAP_NR(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) + (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) /* number of bits that fit into a memory pointer */ #define BITS_PER_PTR (8*sizeof(unsigned long)) @@ -354,13 +354,13 @@ extern inline void pgd_clear(pgd_t *pgdp) */ #define page_address(page) ((page)->virtual) #ifndef CONFIG_DISCONTIGMEM -#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT))) +#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT))) #else -#define pte_pagenr(x) \ +#define mips64_pte_pagenr(x) \ (PLAT_NODE_DATA_STARTNR(PHYSADDR_TO_NID(pte_val(x))) + \ PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x)))) -#endif #define pte_page(x) (mem_map+pte_pagenr(x)) +#endif /* * The following only work if pte_present() is true. diff --git a/include/asm-mips64/processor.h b/include/asm-mips64/processor.h index 3e8cab4b5..618e8ad25 100644 --- a/include/asm-mips64/processor.h +++ b/include/asm-mips64/processor.h @@ -290,7 +290,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \ ((struct task_struct *) __get_free_pages(GFP_KERNEL, 2)) #define free_task_struct(p) free_pages((unsigned long)(p), 2) -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) #define init_task (init_task_union.task) #define init_stack (init_task_union.stack) diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 703dc409b..348404828 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -274,7 +274,7 @@ extern unsigned long ioremap_bot, ioremap_base; * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[1024]; -#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) /* * BAD_PAGETABLE is used when we need a bogus page-table, while @@ -303,7 +303,6 @@ extern pte_t * __bad_pagetable(void); #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define pte_clear(ptep) do { pte_val(*(ptep)) = 0; } while (0) -#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT))) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) != 0) @@ -315,7 +314,7 @@ extern pte_t * __bad_pagetable(void); */ #define page_address(page) ((page)->virtual) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) -#define pte_page(x) (mem_map+pte_pagenr(x)) +#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT))) #ifndef __ASSEMBLY__ /* diff --git a/include/asm-ppc/processor.h b/include/asm-ppc/processor.h index a0a96d937..4fd684705 100644 --- a/include/asm-ppc/processor.h +++ b/include/asm-ppc/processor.h @@ -687,7 +687,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \ ((struct task_struct *) __get_free_pages(GFP_KERNEL,1)) #define free_task_struct(p) free_pages((unsigned long)(p),1) -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) /* in process.c - for early bootup debug -- Cort */ int ll_printk(const char *, ...); diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index fd4792961..d6509ea2f 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -41,7 +41,7 @@ extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[1024]; -#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #endif /* !__ASSEMBLY__ */ /* Certain architectures need to do special things when PTEs @@ -272,7 +272,6 @@ extern inline int pte_none(pte_t pte) { return ((pte_val(pte) & (_PAGE extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = _PAGE_INVALID; } #define PTE_INIT(x) pte_clear(x) -extern inline int pte_pagenr(pte_t pte) { return ((unsigned long)((pte_val(pte) >> PAGE_SHIFT))); } extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) == 0); } @@ -337,7 +336,7 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot); return pte; } #define page_address(page) ((page)->virtual) -#define pte_page(x) (mem_map+pte_pagenr(x)) +#define pte_page(x) (mem_map+(unsigned long)((pte_val(pte) >> PAGE_SHIFT))) #define pmd_page(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index d55567d94..5cb89c8a3 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -149,7 +149,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \ ((struct task_struct *) __get_free_pages(GFP_KERNEL,1)) #define free_task_struct(p) free_pages((unsigned long)(p),1) -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) #define init_task (init_task_union.task) #define init_stack (init_task_union.stack) diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index d7da3b727..867cf06ac 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -62,7 +62,7 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg); * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[1024]; -#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #endif /* !__ASSEMBLY__ */ @@ -156,7 +156,6 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd); #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) -#define pte_pagenr(x) ((unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT))) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) @@ -169,7 +168,7 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd); */ #define page_address(page) ((page)->virtual) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) -#define pte_page(x) (mem_map+pte_pagenr(x)) +#define pte_page(x) (mem_map+(unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT))) /* * The following only work if pte_present() is true. diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h index cf59dbb88..77cbe9c1e 100644 --- a/include/asm-sh/processor.h +++ b/include/asm-sh/processor.h @@ -217,7 +217,7 @@ extern unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE (2*PAGE_SIZE) extern struct task_struct * alloc_task_struct(void); extern void free_task_struct(struct task_struct *); -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) #define init_task (init_task_union.task) #define init_stack (init_task_union.stack) diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h index 5e1dc8b63..5fc507132 100644 --- a/include/asm-sparc/pgtable.h +++ b/include/asm-sparc/pgtable.h @@ -204,11 +204,11 @@ extern unsigned long empty_zero_page; #define SIZEOF_PTR_LOG2 2 -BTFIXUPDEF_CALL_CONST(unsigned long, pte_pagenr, pte_t) +BTFIXUPDEF_CALL_CONST(unsigned long, sparc_pte_pagenr, pte_t) BTFIXUPDEF_CALL_CONST(unsigned long, pmd_page, pmd_t) BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t) -#define pte_pagenr(pte) BTFIXUP_CALL(pte_pagenr)(pte) +#define sparc_pte_pagenr(pte) BTFIXUP_CALL(sparc_pte_pagenr)(pte) #define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd) #define pgd_page(pgd) BTFIXUP_CALL(pgd_page)(pgd) @@ -308,7 +308,7 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t) /* Permanent address of a page. */ #define page_address(page) ((page)->virtual) -#define pte_page(x) (mem_map+pte_pagenr(x)) +#define pte_page(x) (mem_map+sparc_pte_pagenr(x)) /* * Conversion functions: convert a page and protection to a page entry, diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index 41cd7a7dd..b81b9cee4 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h @@ -125,7 +125,7 @@ extern struct pgtable_cache_struct { extern __inline__ void free_pgd_fast(pgd_t *pgd) { - struct page *page = mem_map + MAP_NR(pgd); + struct page *page = virt_to_page(pgd); if (!page->pprev_hash) { (unsigned long *)page->next_hash = pgd_quicklist; diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 6a03c66d8..3318578f3 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -177,7 +177,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) #define pgd_set(pgdp, pmdp) \ (pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL)) -#define pte_pagenr(pte) (((unsigned long) ((pte_val(pte)&~PAGE_OFFSET)-phys_base)>>PAGE_SHIFT)) +#define sparc64_pte_pagenr(pte) (((unsigned long) ((pte_val(pte)&~PAGE_OFFSET)-phys_base)>>PAGE_SHIFT)) #define pmd_page(pmd) ((unsigned long) __va((pmd_val(pmd)<<11UL))) #define pgd_page(pgd) ((unsigned long) __va((pgd_val(pgd)<<11UL))) #define pte_none(pte) (!pte_val(pte)) @@ -209,7 +209,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) #define __page_address(page) ((page)->virtual) #define page_address(page) ({ __page_address(page); }) -#define pte_page(x) (mem_map+pte_pagenr(x)) +#define pte_page(x) (mem_map+sparc64_pte_pagenr(x)) /* Be very careful when you change these three, they are delicate. */ #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R)) diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index b7b124a56..37433eace 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -259,7 +259,7 @@ __out: __ret; \ /* Allocation and freeing of task_struct and kernel stack. */ #define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL, 1)) #define free_task_struct(tsk) free_pages((unsigned long)(tsk),1) -#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count) +#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) #define init_task (init_task_union.task) #define init_stack (init_task_union.stack) diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 9a2f2cbe6..bde781475 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -11,7 +11,6 @@ extern struct page *highmem_start_page; #include <asm/highmem.h> /* declarations for linux/mm/highmem.c */ -extern unsigned long highmem_mapnr; FASTCALL(unsigned int nr_free_highpages(void)); extern struct page * prepare_highmem_swapout(struct page *); diff --git a/include/linux/mm.h b/include/linux/mm.h index a248e939f..94e30f7c7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -338,6 +338,9 @@ extern unsigned long FASTCALL(get_zeroed_page(int gfp_mask)); #define __get_dma_pages(gfp_mask, order) \ __get_free_pages((gfp_mask) | GFP_DMA,(order)) +#define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) +#define VALID_PAGE(page) ((page - mem_map) < max_mapnr) + /* * The old interface name will be removed in 2.5: */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a1176b978..a3b897378 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -36,7 +36,7 @@ /* * From a kernel address, get the "struct page *" */ -#define page_cache_entry(x) (mem_map + MAP_NR(x)) +#define page_cache_entry(x) virt_to_page(x) extern unsigned int page_hash_bits; #define PAGE_HASH_BITS (page_hash_bits) diff --git a/include/linux/wrapper.h b/include/linux/wrapper.h index 36d80ef1e..edf8ef153 100644 --- a/include/linux/wrapper.h +++ b/include/linux/wrapper.h @@ -29,8 +29,8 @@ #define vma_get_end(v) v->vm_end #define vma_get_page_prot(v) v->vm_page_prot -#define mem_map_reserve(p) set_bit(PG_reserved, &mem_map[p].flags) -#define mem_map_unreserve(p) clear_bit(PG_reserved, &mem_map[p].flags) -#define mem_map_inc_count(p) atomic_inc(&(mem_map[p].count)) -#define mem_map_dec_count(p) atomic_dec(&(mem_map[p].count)) +#define mem_map_reserve(p) set_bit(PG_reserved, &p->flags) +#define mem_map_unreserve(p) clear_bit(PG_reserved, &p->flags) +#define mem_map_inc_count(p) atomic_inc(&(p->count)) +#define mem_map_dec_count(p) atomic_dec(&(p->count)) #endif diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 6c5d1f143..a749bb501 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -24,7 +24,6 @@ static int access_one_page(struct mm_struct * mm, struct vm_area_struct * vma, u pgd_t * pgdir; pmd_t * pgmiddle; pte_t * pgtable; - unsigned long mapnr; unsigned long maddr; struct page *page; @@ -42,11 +41,10 @@ repeat: pgtable = pte_offset(pgmiddle, addr); if (!pte_present(*pgtable)) goto fault_in_page; - mapnr = pte_pagenr(*pgtable); if (write && (!pte_write(*pgtable) || !pte_dirty(*pgtable))) goto fault_in_page; - page = mem_map + mapnr; - if ((mapnr >= max_mapnr) || PageReserved(page)) + page = pte_page(*pgtable); + if ((!VALID_PAGE(page)) || PageReserved(page)) return 0; flush_cache_page(vma, addr); diff --git a/mm/bootmem.c b/mm/bootmem.c index 0e11fe9ed..fbcb2bb06 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -246,7 +246,7 @@ static unsigned long __init free_all_bootmem_core(int nid, bootmem_data_t *bdata * Now free the allocator bitmap itself, it's not * needed anymore: */ - page = mem_map + MAP_NR(bdata->node_bootmem_map); + page = virt_to_page(bdata->node_bootmem_map); count = 0; for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) { count++; diff --git a/mm/highmem.c b/mm/highmem.c index 411f20c52..6208e347d 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -22,8 +22,6 @@ #include <linux/swap.h> #include <linux/slab.h> -unsigned long highmem_mapnr; - /* * Take one locked page, return another low-memory locked page. */ @@ -61,7 +59,7 @@ struct page * prepare_highmem_swapout(struct page * page) * we stored its data into the new regular_page. */ page_cache_release(page); - new_page = mem_map + MAP_NR(regular_page); + new_page = virt_to_page(regular_page); LockPage(new_page); return new_page; } diff --git a/mm/memory.c b/mm/memory.c index 0d93216b5..83fc97cb3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -210,7 +210,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; do { pte_t pte = *src_pte; - unsigned long page_nr; + struct page *ptepage; /* copy_one_pte */ @@ -221,9 +221,9 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; set_pte(dst_pte, pte); goto cont_copy_pte_range; } - page_nr = pte_pagenr(pte); - if (page_nr >= max_mapnr || - PageReserved(mem_map+page_nr)) { + ptepage = pte_page(pte); + if ((!VALID_PAGE(ptepage)) || + PageReserved(ptepage)) { set_pte(dst_pte, pte); goto cont_copy_pte_range; } @@ -236,7 +236,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; if (vma->vm_flags & VM_SHARED) pte = pte_mkclean(pte); set_pte(dst_pte, pte_mkold(pte)); - get_page(mem_map + page_nr); + get_page(ptepage); cont_copy_pte_range: address += PAGE_SIZE; if (address >= end) @@ -262,14 +262,14 @@ nomem: static inline int free_pte(pte_t page) { if (pte_present(page)) { - unsigned long nr = pte_pagenr(page); - if (nr >= max_mapnr || PageReserved(mem_map+nr)) + struct page *ptpage = pte_page(page); + if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) return 0; /* * free_page() used to be able to clear swap cache * entries. We may now have to do it manually. */ - free_page_and_swap_cache(mem_map+nr); + free_page_and_swap_cache(ptpage); return 1; } swap_free(pte_to_swp_entry(page)); @@ -409,7 +409,7 @@ static struct page * follow_page(unsigned long address) static inline struct page * get_page_map(struct page *page) { - if (page > (mem_map + max_mapnr)) + if (!VALID_PAGE(page)) return 0; return page; } @@ -711,12 +711,12 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned if (end > PMD_SIZE) end = PMD_SIZE; do { - unsigned long mapnr; + struct page *page; pte_t oldpage = *pte; pte_clear(pte); - mapnr = MAP_NR(__va(phys_addr)); - if (mapnr >= max_mapnr || PageReserved(mem_map+mapnr)) + page = virt_to_page(__va(phys_addr)); + if ((!VALID_PAGE(page)) || PageReserved(page)) set_pte(pte, mk_pte_phys(phys_addr, prot)); forget_pte(oldpage); address += PAGE_SIZE; @@ -818,13 +818,11 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * old_page static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t pte) { - unsigned long map_nr; struct page *old_page, *new_page; - map_nr = pte_pagenr(pte); - if (map_nr >= max_mapnr) + old_page = pte_page(pte); + if (!VALID_PAGE(old_page)) goto bad_wp_page; - old_page = mem_map + map_nr; /* * We can avoid the copy if: @@ -883,7 +881,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, bad_wp_page: spin_unlock(&mm->page_table_lock); - printk("do_wp_page: bogus page at address %08lx (nr %ld)\n",address,map_nr); + printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page); return -1; } @@ -920,7 +918,7 @@ static void partial_clear(struct vm_area_struct *vma, unsigned long address) return; flush_cache_page(vma, address); page = pte_page(pte); - if ((page-mem_map >= max_mapnr) || PageReserved(page)) + if ((!VALID_PAGE(page)) || PageReserved(page)) return; offset = address & ~PAGE_MASK; memclear_highpage_flush(page, offset, PAGE_SIZE - offset); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 420f91f92..8b74a73db 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -86,7 +86,7 @@ static void __free_pages_ok (struct page *page, unsigned long order) BUG(); if (page->mapping) BUG(); - if (page-mem_map >= max_mapnr) + if (!VALID_PAGE(page)) BUG(); if (PageSwapCache(page)) BUG(); @@ -350,14 +350,14 @@ void __free_pages(struct page *page, unsigned long order) void free_pages(unsigned long addr, unsigned long order) { - unsigned long map_nr; + struct page *fpage; #ifdef CONFIG_DISCONTIGMEM if (addr == 0) return; #endif - map_nr = MAP_NR(addr); - if (map_nr < max_mapnr) - __free_pages(mem_map + map_nr, order); + fpage = virt_to_page(addr); + if (VALID_PAGE(fpage)) + __free_pages(fpage, order); } /* diff --git a/mm/page_io.c b/mm/page_io.c index b2b6359d0..25ed62221 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -126,7 +126,7 @@ void rw_swap_page(int rw, struct page *page, int wait) */ void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf, int wait) { - struct page *page = mem_map + MAP_NR(buf); + struct page *page = virt_to_page(buf); if (!PageLocked(page)) PAGE_BUG(page); @@ -496,7 +496,7 @@ static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags) static inline void kmem_freepages (kmem_cache_t *cachep, void *addr) { unsigned long i = (1<<cachep->gfporder); - struct page *page = mem_map + MAP_NR(addr); + struct page *page = virt_to_page(addr); /* free_pages() does not clear the type bit - we do that. * The pages have been unlinked from their cache-slab, @@ -1115,7 +1115,7 @@ static int kmem_cache_grow (kmem_cache_t * cachep, int flags) /* Nasty!!!!!! I hope this is OK. */ i = 1 << cachep->gfporder; - page = mem_map + MAP_NR(objp); + page = virt_to_page(objp); do { SET_PAGE_CACHE(page, cachep); SET_PAGE_SLAB(page, slabp); @@ -1321,9 +1321,9 @@ alloc_new_slab_nolock: */ #if DEBUG -# define CHECK_NR(nr) \ +# define CHECK_NR(pg) \ do { \ - if (nr >= max_mapnr) { \ + if (!VALID_PAGE(pg)) { \ printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \ (unsigned long)objp); \ BUG(); \ @@ -1331,6 +1331,7 @@ alloc_new_slab_nolock: } while (0) # define CHECK_PAGE(page) \ do { \ + CHECK_NR(page); \ if (!PageSlab(page)) { \ printk(KERN_ERR "kfree: bad ptr %lxh.\n", \ (unsigned long)objp); \ @@ -1339,23 +1340,21 @@ alloc_new_slab_nolock: } while (0) #else -# define CHECK_NR(nr) do { } while (0) -# define CHECK_PAGE(nr) do { } while (0) +# define CHECK_PAGE(pg) do { } while (0) #endif static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp) { slab_t* slabp; - CHECK_NR(MAP_NR(objp)); - CHECK_PAGE(mem_map + MAP_NR(objp)); + CHECK_PAGE(virt_to_page(objp)); /* reduces memory footprint * if (OPTIMIZE(cachep)) slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1))); else */ - slabp = GET_PAGE_SLAB(mem_map + MAP_NR(objp)); + slabp = GET_PAGE_SLAB(virt_to_page(objp)); #if DEBUG if (cachep->flags & SLAB_DEBUG_INITIAL) @@ -1452,8 +1451,7 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp) #ifdef CONFIG_SMP cpucache_t *cc = cc_data(cachep); - CHECK_NR(MAP_NR(objp)); - CHECK_PAGE(mem_map + MAP_NR(objp)); + CHECK_PAGE(virt_to_page(objp)); if (cc) { int batchcount; if (cc->avail < cc->limit) { @@ -1536,9 +1534,8 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp) { unsigned long flags; #if DEBUG - CHECK_NR(MAP_NR(objp)); - CHECK_PAGE(mem_map + MAP_NR(objp)); - if (cachep != GET_PAGE_CACHE(mem_map + MAP_NR(objp))) + CHECK_PAGE(virt_to_page(objp)); + if (cachep != GET_PAGE_CACHE(virt_to_page(objp))) BUG(); #endif @@ -1562,9 +1559,8 @@ void kfree (const void *objp) if (!objp) return; local_irq_save(flags); - CHECK_NR(MAP_NR(objp)); - CHECK_PAGE(mem_map + MAP_NR(objp)); - c = GET_PAGE_CACHE(mem_map + MAP_NR(objp)); + CHECK_PAGE(virt_to_page(objp)); + c = GET_PAGE_CACHE(virt_to_page(objp)); __kmem_cache_free(c, (void*)objp); local_irq_restore(flags); } diff --git a/mm/swap_state.c b/mm/swap_state.c index 72f3eaca4..506160354 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -220,7 +220,7 @@ struct page * read_swap_cache_async(swp_entry_t entry, int wait) new_page_addr = __get_free_page(GFP_USER); if (!new_page_addr) goto out_free_swap; /* Out of memory */ - new_page = mem_map + MAP_NR(new_page_addr); + new_page = virt_to_page(new_page_addr); /* * Check the swap cache again, in case we stalled above. diff --git a/mm/swapfile.c b/mm/swapfile.c index a84e73f2f..fa4cb133e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -645,7 +645,7 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags) goto bad_swap; } - lock_page(mem_map + MAP_NR(swap_header)); + lock_page(virt_to_page(swap_header)); rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header, 1); if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10)) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 57f3ca56c..817a3966b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -41,10 +41,9 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo if (pte_none(page)) continue; if (pte_present(page)) { - unsigned long map_nr = pte_pagenr(page); - if ((map_nr < max_mapnr) && - (!PageReserved(mem_map + map_nr))) - __free_page(mem_map + map_nr); + struct page *ptpage = pte_page(page); + if (VALID_PAGE(ptpage) && (!PageReserved(ptpage))) + __free_page(ptpage); continue; } printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n"); diff --git a/mm/vmscan.c b/mm/vmscan.c index 4dda15dd1..95098e4d1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -45,7 +45,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un if (!pte_present(pte)) goto out_failed; page = pte_page(pte); - if ((page-mem_map >= max_mapnr) || PageReserved(page)) + if ((!VALID_PAGE(page)) || PageReserved(page)) goto out_failed; if (mm->swap_cnt) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index b0ea8b730..862cc7027 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1564,11 +1564,11 @@ static void free_pg_vec(unsigned long *pg_vec, unsigned order, unsigned len) for (i=0; i<len; i++) { if (pg_vec[i]) { - unsigned long map, mapend; + struct page *page, *pend; - mapend = MAP_NR(pg_vec[i] + (PAGE_SIZE << order) - 1); - for (map = MAP_NR(pg_vec[i]); map <= mapend; map++) - clear_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(pg_vec[i] + (PAGE_SIZE << order) - 1); + for (page = virt_to_page(pg_vec[i]); page <= pend; page++) + mem_map_unreserve(page); free_pages(pg_vec[i], order); } } @@ -1616,14 +1616,14 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing memset(pg_vec, 0, req->tp_block_nr*sizeof(unsigned long*)); for (i=0; i<req->tp_block_nr; i++) { - unsigned long map, mapend; + struct page *page, *pend; pg_vec[i] = __get_free_pages(GFP_KERNEL, order); if (!pg_vec[i]) goto out_free_pgvec; - mapend = MAP_NR(pg_vec[i] + (PAGE_SIZE << order) - 1); - for (map = MAP_NR(pg_vec[i]); map <= mapend; map++) - set_bit(PG_reserved, &mem_map[map].flags); + pend = virt_to_page(pg_vec[i] + (PAGE_SIZE << order) - 1); + for (page = virt_to_page(pg_vec[i]); page <= pend; page++) + mem_map_reserve(page); } /* Page vector is allocated */ |