diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-01-29 01:41:54 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-01-29 01:41:54 +0000 |
commit | f969d69ba9f952e5bdd38278e25e26a3e4a61a70 (patch) | |
tree | b3530d803df59d726afaabebc6626987dee1ca05 /include/linux/pagemap.h | |
parent | a10ce7ef2066b455d69187643ddf2073bfc4db24 (diff) |
Merge with 2.3.27.
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r-- | include/linux/pagemap.h | 44 |
1 files changed, 17 insertions, 27 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index de5514574..66b558627 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1,8 +1,6 @@ #ifndef _LINUX_PAGEMAP_H #define _LINUX_PAGEMAP_H -#include <asm/system.h> - /* * Page-mapping primitive inline functions * @@ -11,17 +9,10 @@ #include <linux/mm.h> #include <linux/fs.h> -#include <linux/highmem.h> #include <linux/list.h> -extern inline pte_t get_pagecache_pte(struct page *page) -{ - /* - * the pagecache is still machineword sized. The rest of the VM - * can deal with arbitrary sized ptes. - */ - return __pte(page->offset); -} +#include <asm/system.h> +#include <asm/pgtable.h> /* * The page cache can done in larger chunks than @@ -36,7 +27,7 @@ extern inline pte_t get_pagecache_pte(struct page *page) #define PAGE_CACHE_MASK PAGE_MASK #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) -#define page_cache_alloc() __get_pages(GFP_USER, 0) +#define page_cache_alloc() alloc_pages(GFP_HIGHUSER, 0) #define page_cache_free(x) __free_page(x) #define page_cache_release(x) __free_page(x) @@ -57,43 +48,42 @@ extern void page_cache_init(unsigned long); /* * We use a power-of-two hash table to avoid a modulus, * and get a reasonable hash by knowing roughly how the - * inode pointer and offsets are distributed (ie, we + * inode pointer and indexes are distributed (ie, we * roughly know which bits are "significant") * * For the time being it will work for struct address_space too (most of * them sitting inside the inodes). We might want to change it later. */ -extern inline unsigned long _page_hashfn(struct address_space * mapping, unsigned long offset) +extern inline unsigned long _page_hashfn(struct address_space * mapping, unsigned long index) { #define i (((unsigned long) mapping)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1))) -#define o (offset >> PAGE_SHIFT) #define s(x) ((x)+((x)>>PAGE_HASH_BITS)) - return s(i+o) & (PAGE_HASH_SIZE-1); + return s(i+index) & (PAGE_HASH_SIZE-1); #undef i #undef o #undef s } -#define page_hash(mapping,offset) (page_hash_table+_page_hashfn(mapping,offset)) +#define page_hash(mapping,index) (page_hash_table+_page_hashfn(mapping,index)) extern struct page * __find_get_page (struct address_space *mapping, - unsigned long offset, struct page **hash); -#define find_get_page(mapping, offset) \ - __find_get_page(mapping, offset, page_hash(mapping, offset)) + unsigned long index, struct page **hash); +#define find_get_page(mapping, index) \ + __find_get_page(mapping, index, page_hash(mapping, index)) extern struct page * __find_lock_page (struct address_space * mapping, - unsigned long offset, struct page **hash); + unsigned long index, struct page **hash); extern void lock_page(struct page *page); -#define find_lock_page(mapping, offset) \ - __find_lock_page(mapping, offset, page_hash(mapping, offset)) +#define find_lock_page(mapping, index) \ + __find_lock_page(mapping, index, page_hash(mapping, index)) extern void __add_page_to_hash_queue(struct page * page, struct page **p); -extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long offset); -extern int add_to_page_cache_unique(struct page * page, struct address_space *mapping, unsigned long offset, struct page **hash); +extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index); +extern int add_to_page_cache_unique(struct page * page, struct address_space *mapping, unsigned long index, struct page **hash); -extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long offset) +extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long index) { - __add_page_to_hash_queue(page, page_hash(&inode->i_data,offset)); + __add_page_to_hash_queue(page, page_hash(&inode->i_data,index)); } extern inline void add_page_to_inode_queue(struct address_space *mapping, struct page * page) |