summaryrefslogtreecommitdiffstats
path: root/include/linux/swap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/swap.h')
-rw-r--r--include/linux/swap.h146
1 files changed, 121 insertions, 25 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 9226ce0a5..05317e725 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -65,13 +65,16 @@ struct swap_info_struct {
extern int nr_swap_pages;
FASTCALL(unsigned int nr_free_pages(void));
+FASTCALL(unsigned int nr_inactive_clean_pages(void));
FASTCALL(unsigned int nr_free_buffer_pages(void));
-FASTCALL(unsigned int nr_free_highpages(void));
-extern int nr_lru_pages;
+extern int nr_active_pages;
+extern int nr_inactive_dirty_pages;
extern atomic_t nr_async_pages;
extern struct address_space swapper_space;
extern atomic_t page_cache_size;
extern atomic_t buffermem_pages;
+extern spinlock_t pagecache_lock;
+extern void __remove_inode_page(struct page *);
/* Incomplete types for prototype declarations: */
struct task_struct;
@@ -83,9 +86,30 @@ struct zone_t;
extern int shm_swap(int, int);
/* linux/mm/swap.c */
+extern int memory_pressure;
+extern void age_page_up(struct page *);
+extern void age_page_up_nolock(struct page *);
+extern void age_page_down(struct page *);
+extern void age_page_down_nolock(struct page *);
+extern void age_page_down_ageonly(struct page *);
+extern void deactivate_page(struct page *);
+extern void deactivate_page_nolock(struct page *);
+extern void activate_page(struct page *);
+extern void activate_page_nolock(struct page *);
+extern void lru_cache_add(struct page *);
+extern void __lru_cache_del(struct page *);
+extern void lru_cache_del(struct page *);
+extern void recalculate_vm_stats(void);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
+extern struct page * reclaim_page(zone_t *);
+extern wait_queue_head_t kswapd_wait;
+extern wait_queue_head_t kreclaimd_wait;
+extern int page_launder(int, int);
+extern int free_shortage(void);
+extern int inactive_shortage(void);
+extern void wakeup_kswapd(int);
extern int try_to_free_pages(unsigned int gfp_mask);
/* linux/mm/page_io.c */
@@ -161,30 +185,102 @@ static inline int is_page_shared(struct page *page)
extern spinlock_t pagemap_lru_lock;
/*
- * Helper macros for lru_pages handling.
+ * Page aging defines.
+ * Since we do exponential decay of the page age, we
+ * can chose a fairly large maximum.
*/
-#define lru_cache_add(page) \
-do { \
- spin_lock(&pagemap_lru_lock); \
- list_add(&(page)->lru, &lru_cache); \
- nr_lru_pages++; \
- spin_unlock(&pagemap_lru_lock); \
-} while (0)
-
-#define __lru_cache_del(page) \
-do { \
- list_del(&(page)->lru); \
- nr_lru_pages--; \
-} while (0)
-
-#define lru_cache_del(page) \
-do { \
- if (!PageLocked(page)) \
- BUG(); \
- spin_lock(&pagemap_lru_lock); \
- __lru_cache_del(page); \
- spin_unlock(&pagemap_lru_lock); \
-} while (0)
+#define PAGE_AGE_START 2
+#define PAGE_AGE_ADV 3
+#define PAGE_AGE_MAX 64
+
+/*
+ * List add/del helper macros. These must be called
+ * with the pagemap_lru_lock held!
+ */
+#define DEBUG_ADD_PAGE \
+ if (PageActive(page) || PageInactiveDirty(page) || \
+ PageInactiveClean(page)) BUG();
+
+#define ZERO_PAGE_BUG \
+ if (page_count(page) == 0) BUG();
+
+#define add_page_to_active_list(page) { \
+ DEBUG_ADD_PAGE \
+ ZERO_PAGE_BUG \
+ SetPageActive(page); \
+ list_add(&(page)->lru, &active_list); \
+ nr_active_pages++; \
+}
+
+#define add_page_to_inactive_dirty_list(page) { \
+ DEBUG_ADD_PAGE \
+ ZERO_PAGE_BUG \
+ SetPageInactiveDirty(page); \
+ list_add(&(page)->lru, &inactive_dirty_list); \
+ nr_inactive_dirty_pages++; \
+ page->zone->inactive_dirty_pages++; \
+}
+
+#define add_page_to_inactive_clean_list(page) { \
+ DEBUG_ADD_PAGE \
+ ZERO_PAGE_BUG \
+ SetPageInactiveClean(page); \
+ list_add(&(page)->lru, &page->zone->inactive_clean_list); \
+ page->zone->inactive_clean_pages++; \
+}
+
+#define del_page_from_active_list(page) { \
+ list_del(&(page)->lru); \
+ ClearPageActive(page); \
+ nr_active_pages--; \
+ DEBUG_ADD_PAGE \
+ ZERO_PAGE_BUG \
+}
+
+#define del_page_from_inactive_dirty_list(page) { \
+ list_del(&(page)->lru); \
+ ClearPageInactiveDirty(page); \
+ nr_inactive_dirty_pages--; \
+ page->zone->inactive_dirty_pages--; \
+ DEBUG_ADD_PAGE \
+ ZERO_PAGE_BUG \
+}
+
+#define del_page_from_inactive_clean_list(page) { \
+ list_del(&(page)->lru); \
+ ClearPageInactiveClean(page); \
+ page->zone->inactive_clean_pages--; \
+ DEBUG_ADD_PAGE \
+ ZERO_PAGE_BUG \
+}
+
+/*
+ * In mm/swap.c::recalculate_vm_stats(), we substract
+ * inactive_target from memory_pressure every second.
+ * This means that memory_pressure is smoothed over
+ * 64 (1 << INACTIVE_SHIFT) seconds.
+ */
+#define INACTIVE_SHIFT 6
+#define inactive_min(a,b) ((a) < (b) ? (a) : (b))
+#define inactive_target inactive_min((memory_pressure >> INACTIVE_SHIFT), \
+ (num_physpages / 4))
+
+/*
+ * Ugly ugly ugly HACK to make sure the inactive lists
+ * don't fill up with unfreeable ramdisk pages. We really
+ * want to fix the ramdisk driver to mark its pages as
+ * unfreeable instead of using dirty buffer magic, but the
+ * next code-change time is when 2.5 is forked...
+ */
+#ifndef _LINUX_KDEV_T_H
+#include <linux/kdev_t.h>
+#endif
+#ifndef _LINUX_MAJOR_H
+#include <linux/major.h>
+#endif
+
+#define page_ramdisk(page) \
+ (page->buffers && (MAJOR(page->buffers->b_dev) == RAMDISK_MAJOR))
extern spinlock_t swaplock;