summaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h90
1 files changed, 37 insertions, 53 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b413095d3..2413bfedf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -11,6 +11,7 @@
extern unsigned long max_mapnr;
extern unsigned long num_physpages;
extern void * high_memory;
+extern int page_cluster;
#include <asm/page.h>
#include <asm/atomic.h>
@@ -34,10 +35,17 @@ struct vm_area_struct {
struct mm_struct * vm_mm; /* VM area parameters */
unsigned long vm_start;
unsigned long vm_end;
+
+ /* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct *vm_next;
+
pgprot_t vm_page_prot;
unsigned short vm_flags;
- struct vm_area_struct *vm_next;
- struct vm_area_struct **vm_pprev;
+
+ /* AVL tree of VM areas per task, sorted by address */
+ short vm_avl_height;
+ struct vm_area_struct * vm_avl_left;
+ struct vm_area_struct * vm_avl_right;
/* For areas with inode, the list inode->i_mmap, for shm areas,
* the list of attaches, otherwise unused.
@@ -97,7 +105,7 @@ struct vm_operations_struct {
unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
unsigned long page);
- int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
+ int (*swapout)(struct vm_area_struct *, struct page *);
pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
};
@@ -117,32 +125,32 @@ typedef struct page {
unsigned long offset;
struct page *next_hash;
atomic_t count;
- unsigned int unused;
unsigned long flags; /* atomic flags, some possibly updated asynchronously */
struct wait_queue *wait;
struct page **pprev_hash;
struct buffer_head * buffers;
- unsigned long map_nr; /* page->map_nr == page - mem_map */
} mem_map_t;
/* Page flag bit values */
#define PG_locked 0
#define PG_error 1
#define PG_referenced 2
-#define PG_uptodate 3
-#define PG_free_after 4
-#define PG_decr_after 5
-#define PG_swap_unlock_after 6
-#define PG_DMA 7
-#define PG_Slab 8
-#define PG_swap_cache 9
-#define PG_skip 10
+#define PG_dirty 3
+#define PG_uptodate 4
+#define PG_free_after 5
+#define PG_decr_after 6
+#define PG_swap_unlock_after 7
+#define PG_DMA 8
+#define PG_Slab 9
+#define PG_swap_cache 10
+#define PG_skip 11
#define PG_reserved 31
/* Make it prettier to test the above... */
#define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
#define PageError(page) (test_bit(PG_error, &(page)->flags))
#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
+#define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
#define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
@@ -154,12 +162,17 @@ typedef struct page {
#define PageSetSlab(page) (set_bit(PG_Slab, &(page)->flags))
#define PageSetSwapCache(page) (set_bit(PG_swap_cache, &(page)->flags))
+
+#define PageTestandSetDirty(page) \
+ (test_and_set_bit(PG_dirty, &(page)->flags))
#define PageTestandSetSwapCache(page) \
(test_and_set_bit(PG_swap_cache, &(page)->flags))
#define PageClearSlab(page) (clear_bit(PG_Slab, &(page)->flags))
#define PageClearSwapCache(page)(clear_bit(PG_swap_cache, &(page)->flags))
+#define PageTestandClearDirty(page) \
+ (test_and_clear_bit(PG_dirty, &(page)->flags))
#define PageTestandClearSwapCache(page) \
(test_and_clear_bit(PG_swap_cache, &(page)->flags))
@@ -249,6 +262,8 @@ extern inline unsigned long get_free_page(int gfp_mask)
return page;
}
+extern int low_on_memory;
+
/* memory.c & swap.c*/
#define free_page(addr) free_pages((addr),0)
@@ -260,7 +275,7 @@ extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
unsigned long address);
extern void free_page_tables(struct mm_struct * mm);
-extern void clear_page_tables(struct task_struct * tsk);
+extern void clear_page_tables(struct mm_struct *, unsigned long, int);
extern int new_page_tables(struct task_struct * tsk);
extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
@@ -285,6 +300,7 @@ extern void si_meminfo(struct sysinfo * val);
extern void vma_init(void);
extern void merge_segments(struct mm_struct *, unsigned long, unsigned long);
extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void build_mmap_avl(struct mm_struct *);
extern void exit_mmap(struct mm_struct *);
extern unsigned long get_unmapped_area(unsigned long, unsigned long);
@@ -307,15 +323,18 @@ extern void put_cached_page(unsigned long);
#define __GFP_LOW 0x02
#define __GFP_MED 0x04
#define __GFP_HIGH 0x08
+#define __GFP_IO 0x10
+#define __GFP_SWAP 0x20
#define __GFP_UNCACHED 0x40
#define __GFP_DMA 0x80
#define GFP_BUFFER (__GFP_LOW | __GFP_WAIT)
#define GFP_ATOMIC (__GFP_HIGH)
-#define GFP_USER (__GFP_LOW | __GFP_WAIT)
-#define GFP_KERNEL (__GFP_MED | __GFP_WAIT)
-#define GFP_NFS (__GFP_HIGH | __GFP_WAIT)
+#define GFP_USER (__GFP_LOW | __GFP_WAIT | __GFP_IO)
+#define GFP_KERNEL (__GFP_MED | __GFP_WAIT | __GFP_IO)
+#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
+#define GFP_KSWAPD (__GFP_IO | __GFP_SWAP)
/* Flag - indicates that the buffer should be allocated uncached as for an
architecture where the caches don't snoop DMA access. This is a even
@@ -359,22 +378,7 @@ static inline int expand_stack(struct vm_area_struct * vma, unsigned long addres
}
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
-static inline struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
-{
- struct vm_area_struct *vma = NULL;
-
- if (mm) {
- /* Check the cache first. */
- vma = mm->mmap_cache;
- if(!vma || (vma->vm_end <= addr) || (vma->vm_start > addr)) {
- vma = mm->mmap;
- while(vma && vma->vm_end <= addr)
- vma = vma->vm_next;
- mm->mmap_cache = vma;
- }
- }
- return vma;
-}
+extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
@@ -389,28 +393,8 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
#define buffer_under_min() ((buffermem >> PAGE_SHIFT) * 100 < \
buffer_mem.min_percent * num_physpages)
-#define buffer_under_borrow() ((buffermem >> PAGE_SHIFT) * 100 < \
- buffer_mem.borrow_percent * num_physpages)
-#define buffer_under_max() ((buffermem >> PAGE_SHIFT) * 100 < \
- buffer_mem.max_percent * num_physpages)
-#define buffer_over_min() ((buffermem >> PAGE_SHIFT) * 100 > \
- buffer_mem.min_percent * num_physpages)
-#define buffer_over_borrow() ((buffermem >> PAGE_SHIFT) * 100 > \
- buffer_mem.borrow_percent * num_physpages)
-#define buffer_over_max() ((buffermem >> PAGE_SHIFT) * 100 > \
- buffer_mem.max_percent * num_physpages)
#define pgcache_under_min() (page_cache_size * 100 < \
page_cache.min_percent * num_physpages)
-#define pgcache_under_borrow() (page_cache_size * 100 < \
- page_cache.borrow_percent * num_physpages)
-#define pgcache_under_max() (page_cache_size * 100 < \
- page_cache.max_percent * num_physpages)
-#define pgcache_over_min() (page_cache_size * 100 > \
- page_cache.min_percent * num_physpages)
-#define pgcache_over_borrow() (page_cache_size * 100 > \
- page_cache.borrow_percent * num_physpages)
-#define pgcache_over_max() (page_cache_size * 100 > \
- page_cache.max_percent * num_physpages)
#endif /* __KERNEL__ */