summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c88
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/memory.c8
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/slab.c4
-rw-r--r--mm/swap_state.c16
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmscan.c4
8 files changed, 56 insertions, 74 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index acafb3353..81f7d7ab9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -109,54 +109,41 @@ void remove_inode_page(struct page *page)
spin_unlock(&pagecache_lock);
}
-#define ITERATIONS 100
+/**
+ * invalidate_inode_pages - Invalidate all the unlocked pages of one inode
+ * @inode: the inode which pages we want to invalidate
+ *
+ * This function only removes the unlocked pages, if you want to
+ * remove all the pages of one inode, you must call truncate_inode_pages.
+ */
void invalidate_inode_pages(struct inode * inode)
{
struct list_head *head, *curr;
struct page * page;
- int count;
head = &inode->i_mapping->pages;
- while (head != head->next) {
- spin_lock(&pagecache_lock);
- spin_lock(&pagemap_lru_lock);
- head = &inode->i_mapping->pages;
- curr = head->next;
- count = 0;
-
- while ((curr != head) && (count++ < ITERATIONS)) {
- page = list_entry(curr, struct page, list);
- curr = curr->next;
-
- /* We cannot invalidate a locked page */
- if (TryLockPage(page))
- continue;
+ spin_lock(&pagecache_lock);
+ spin_lock(&pagemap_lru_lock);
+ curr = head->next;
- __lru_cache_del(page);
- __remove_inode_page(page);
- UnlockPage(page);
- page_cache_release(page);
- }
+ while (curr != head) {
+ page = list_entry(curr, struct page, list);
+ curr = curr->next;
- /* At this stage we have passed through the list
- * once, and there may still be locked pages. */
+ /* We cannot invalidate a locked page */
+ if (TryLockPage(page))
+ continue;
- if (head->next!=head) {
- page = list_entry(head->next, struct page, list);
- get_page(page);
- spin_unlock(&pagemap_lru_lock);
- spin_unlock(&pagecache_lock);
- /* We need to block */
- lock_page(page);
- UnlockPage(page);
- page_cache_release(page);
- } else {
- spin_unlock(&pagemap_lru_lock);
- spin_unlock(&pagecache_lock);
- }
+ __lru_cache_del(page);
+ __remove_inode_page(page);
+ UnlockPage(page);
+ page_cache_release(page);
}
+
+ spin_unlock(&pagemap_lru_lock);
+ spin_unlock(&pagecache_lock);
}
/*
@@ -187,13 +174,13 @@ repeat:
/* page wholly truncated - free it */
if (offset >= start) {
if (TryLockPage(page)) {
- get_page(page);
+ page_cache_get(page);
spin_unlock(&pagecache_lock);
wait_on_page(page);
page_cache_release(page);
goto repeat;
}
- get_page(page);
+ page_cache_get(page);
spin_unlock(&pagecache_lock);
if (!page->buffers || block_flushpage(page, 0))
@@ -237,7 +224,7 @@ repeat:
spin_unlock(&pagecache_lock);
goto repeat;
}
- get_page(page);
+ page_cache_get(page);
spin_unlock(&pagecache_lock);
memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
@@ -252,9 +239,6 @@ repeat:
*/
UnlockPage(page);
page_cache_release(page);
- get_page(page);
- wait_on_page(page);
- put_page(page);
goto repeat;
}
spin_unlock(&pagecache_lock);
@@ -312,7 +296,7 @@ int shrink_mmap(int priority, int gfp_mask)
spin_unlock(&pagemap_lru_lock);
/* avoid freeing the page while it's locked */
- get_page(page);
+ page_cache_get(page);
/*
* Is it a buffer page? Try to clean it up regardless
@@ -376,7 +360,7 @@ cache_unlock_continue:
unlock_continue:
spin_lock(&pagemap_lru_lock);
UnlockPage(page);
- put_page(page);
+ page_cache_release(page);
dispose_continue:
list_add(page_lru, dispose);
}
@@ -386,7 +370,7 @@ made_inode_progress:
page_cache_release(page);
made_buffer_progress:
UnlockPage(page);
- put_page(page);
+ page_cache_release(page);
ret = 1;
spin_lock(&pagemap_lru_lock);
/* nr_lru_pages needs the spinlock */
@@ -474,7 +458,7 @@ static int do_buffer_fdatasync(struct inode *inode, unsigned long start, unsigne
if (page->index < start)
continue;
- get_page(page);
+ page_cache_get(page);
spin_unlock(&pagecache_lock);
lock_page(page);
@@ -516,7 +500,7 @@ void add_to_page_cache_locked(struct page * page, struct address_space *mapping,
if (!PageLocked(page))
BUG();
- get_page(page);
+ page_cache_get(page);
spin_lock(&pagecache_lock);
page->index = index;
add_page_to_inode_queue(mapping, page);
@@ -541,7 +525,7 @@ static inline void __add_to_page_cache(struct page * page,
flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty));
page->flags = flags | (1 << PG_locked) | (1 << PG_referenced);
- get_page(page);
+ page_cache_get(page);
page->index = offset;
add_page_to_inode_queue(mapping, page);
__add_page_to_hash_queue(page, hash);
@@ -683,7 +667,7 @@ repeat:
spin_lock(&pagecache_lock);
page = __find_page_nolock(mapping, offset, *hash);
if (page)
- get_page(page);
+ page_cache_get(page);
spin_unlock(&pagecache_lock);
/* Found the page, sleep if locked. */
@@ -733,7 +717,7 @@ repeat:
spin_lock(&pagecache_lock);
page = __find_page_nolock(mapping, offset, *hash);
if (page)
- get_page(page);
+ page_cache_get(page);
spin_unlock(&pagecache_lock);
/* Found the page, sleep if locked. */
@@ -1091,7 +1075,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
if (!page)
goto no_cached_page;
found_page:
- get_page(page);
+ page_cache_get(page);
spin_unlock(&pagecache_lock);
if (!Page_Uptodate(page))
@@ -1594,7 +1578,7 @@ static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
set_pte(ptep, pte_mkclean(pte));
flush_tlb_page(vma, address);
page = pte_page(pte);
- get_page(page);
+ page_cache_get(page);
} else {
if (pte_none(pte))
return 0;
diff --git a/mm/highmem.c b/mm/highmem.c
index 3e028dced..11e03521e 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -108,7 +108,7 @@ struct page * replace_with_highmem(struct page * page)
* n means that there are (n-1) current users of it.
*/
static int pkmap_count[LAST_PKMAP];
-static unsigned int last_pkmap_nr = 0;
+static unsigned int last_pkmap_nr;
static spinlock_t kmap_lock = SPIN_LOCK_UNLOCKED;
pte_t * pkmap_page_table;
diff --git a/mm/memory.c b/mm/memory.c
index f0baed69f..e5a548925 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -48,9 +48,9 @@
#include <linux/pagemap.h>
-unsigned long max_mapnr = 0;
-unsigned long num_physpages = 0;
-void * high_memory = NULL;
+unsigned long max_mapnr;
+unsigned long num_physpages;
+void * high_memory;
struct page *highmem_start_page;
/*
@@ -861,7 +861,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
* Ok, we need to copy. Oh, well..
*/
spin_unlock(&mm->page_table_lock);
- new_page = alloc_page(GFP_HIGHUSER);
+ new_page = page_cache_alloc();
if (!new_page)
return -1;
spin_lock(&mm->page_table_lock);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c3ea96efc..926364499 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -24,9 +24,9 @@
#define NUMNODES numnodes
#endif
-int nr_swap_pages = 0;
-int nr_lru_pages = 0;
-pg_data_t *pgdat_list = (pg_data_t *)0;
+int nr_swap_pages;
+int nr_lru_pages;
+pg_data_t *pgdat_list;
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
static int zone_balance_ratio[MAX_NR_ZONES] = { 128, 128, 128, };
diff --git a/mm/slab.c b/mm/slab.c
index 055282872..7dbc443fb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -401,12 +401,12 @@ static struct semaphore cache_chain_sem;
static kmem_cache_t *clock_searchp = &cache_cache;
/* Internal slab management cache, for when slab management is off-slab. */
-static kmem_cache_t *cache_slabp = NULL;
+static kmem_cache_t *cache_slabp;
/* Max number of objs-per-slab for caches which use bufctl's.
* Needed to avoid a possible looping condition in kmem_cache_grow().
*/
-static unsigned long bufctl_limit = 0;
+static unsigned long bufctl_limit;
/* Initialisation - setup the `cache' cache. */
void __init kmem_cache_init(void)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ad686e4c3..347f87372 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -31,10 +31,10 @@ struct address_space swapper_space = {
};
#ifdef SWAP_CACHE_INFO
-unsigned long swap_cache_add_total = 0;
-unsigned long swap_cache_del_total = 0;
-unsigned long swap_cache_find_total = 0;
-unsigned long swap_cache_find_success = 0;
+unsigned long swap_cache_add_total;
+unsigned long swap_cache_del_total;
+unsigned long swap_cache_find_total;
+unsigned long swap_cache_find_success;
void show_swap_cache_info(void)
{
@@ -136,7 +136,7 @@ void free_page_and_swap_cache(struct page *page)
}
UnlockPage(page);
}
- __free_page(page);
+ page_cache_release(page);
}
@@ -172,7 +172,7 @@ repeat:
*/
if (!PageSwapCache(found)) {
UnlockPage(found);
- __free_page(found);
+ page_cache_release(found);
goto repeat;
}
if (found->mapping != &swapper_space)
@@ -187,7 +187,7 @@ repeat:
out_bad:
printk (KERN_ERR "VM: Found a non-swapper swap page!\n");
UnlockPage(found);
- __free_page(found);
+ page_cache_release(found);
return 0;
}
@@ -237,7 +237,7 @@ struct page * read_swap_cache_async(swp_entry_t entry, int wait)
return new_page;
out_free_page:
- __free_page(new_page);
+ page_cache_release(new_page);
out_free_swap:
swap_free(entry);
out:
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c5f8db242..c4b4733b7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -18,7 +18,7 @@
#include <asm/pgtable.h>
spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
-unsigned int nr_swapfiles = 0;
+unsigned int nr_swapfiles;
struct swap_list_t swap_list = {-1, -1};
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2c07830d0..8734cc459 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -542,14 +542,12 @@ int kswapd(void *unused)
continue;
something_to_do = 1;
do_try_to_free_pages(GFP_KSWAPD);
- if (tsk->need_resched)
- schedule();
}
run_task_queue(&tq_disk);
pgdat = pgdat->node_next;
} while (pgdat);
- if (!something_to_do) {
+ if (tsk->need_resched || !something_to_do) {
tsk->state = TASK_INTERRUPTIBLE;
interruptible_sleep_on(&kswapd_wait);
}