summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/mmap.c20
-rw-r--r--mm/slab.c46
4 files changed, 24 insertions, 50 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 58f642b30..107f9d8a9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -414,7 +414,7 @@ static int writeout_one_page(struct page *page)
if (buffer_locked(bh) || !buffer_dirty(bh) || !buffer_uptodate(bh))
continue;
- bh->b_flushtime = 0;
+ bh->b_flushtime = jiffies;
ll_rw_block(WRITE, 1, &bh);
} while ((bh = bh->b_this_page) != head);
return 0;
@@ -513,7 +513,6 @@ static inline void __add_to_page_cache(struct page * page,
struct address_space *mapping, unsigned long offset,
struct page **hash)
{
- struct page *alias;
unsigned long flags;
if (PageLocked(page))
@@ -526,9 +525,6 @@ static inline void __add_to_page_cache(struct page * page,
add_page_to_inode_queue(mapping, page);
__add_page_to_hash_queue(page, hash);
lru_cache_add(page);
- alias = __find_page_nolock(mapping, offset, *hash);
- if (alias != page)
- BUG();
}
void add_to_page_cache(struct page * page, struct address_space * mapping, unsigned long offset)
diff --git a/mm/highmem.c b/mm/highmem.c
index 6208e347d..3be601c6f 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -339,7 +339,7 @@ repeat_page:
bh->b_count = bh_orig->b_count;
bh->b_rdev = bh_orig->b_rdev;
bh->b_state = bh_orig->b_state;
- bh->b_flushtime = 0;
+ bh->b_flushtime = jiffies;
bh->b_next_free = NULL;
bh->b_prev_free = NULL;
/* bh->b_this_page */
diff --git a/mm/mmap.c b/mm/mmap.c
index d3e596d25..9667d19db 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -36,9 +36,6 @@ pgprot_t protection_map[16] = {
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
-/* SLAB cache for vm_area_struct's. */
-kmem_cache_t *vm_area_cachep;
-
int sysctl_overcommit_memory;
/* Check that a process has enough memory to allocate a
@@ -994,20 +991,3 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l
mpnt = prev;
}
}
-
-void __init vma_init(void)
-{
- vm_area_cachep = kmem_cache_create("vm_area_struct",
- sizeof(struct vm_area_struct),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if(!vm_area_cachep)
- panic("vma_init: Cannot alloc vm_area_struct cache.");
-
- mm_cachep = kmem_cache_create("mm_struct",
- sizeof(struct mm_struct),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if(!mm_cachep)
- panic("vma_init: Cannot alloc mm_struct cache.");
-}
diff --git a/mm/slab.c b/mm/slab.c
index 815430698..ed5d018f1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -843,21 +843,17 @@ static kmem_cache_t *cache_to_drain = NULL;
static DECLARE_WAIT_QUEUE_HEAD(cache_drain_wait);
unsigned long slab_cache_drain_mask;
-static void drain_cpu_caches(kmem_cache_t *cachep)
+/*
+ * Waits for all CPUs to execute slab_drain_local_cache().
+ * Caller must be holding cache_drain_sem.
+ */
+static void slab_drain_all_sync(void)
{
DECLARE_WAITQUEUE(wait, current);
- unsigned long cpu_mask = 0;
- int i;
-
- for (i = 0; i < smp_num_cpus; i++)
- cpu_mask |= (1UL << cpu_logical_map(i));
-
- down(&cache_drain_sem);
-
- cache_to_drain = cachep;
- slab_cache_drain_mask = cpu_mask;
+ local_irq_disable();
slab_drain_local_cache();
+ local_irq_enable();
add_wait_queue(&cache_drain_wait, &wait);
current->state = TASK_UNINTERRUPTIBLE;
@@ -865,7 +861,21 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&cache_drain_wait, &wait);
+}
+
+static void drain_cpu_caches(kmem_cache_t *cachep)
+{
+ unsigned long cpu_mask = 0;
+ int i;
+ for (i = 0; i < smp_num_cpus; i++)
+ cpu_mask |= (1UL << cpu_logical_map(i));
+
+ down(&cache_drain_sem);
+
+ cache_to_drain = cachep;
+ slab_cache_drain_mask = cpu_mask;
+ slab_drain_all_sync();
cache_to_drain = NULL;
up(&cache_drain_sem);
@@ -1594,7 +1604,6 @@ static ccupdate_struct_t *ccupdate_state = NULL;
/* Called from per-cpu timer interrupt. */
void slab_drain_local_cache(void)
{
- local_irq_disable();
if (ccupdate_state != NULL) {
ccupdate_struct_t *new = ccupdate_state;
cpucache_t *old = cc_data(new->cachep);
@@ -1610,7 +1619,6 @@ void slab_drain_local_cache(void)
cc->avail = 0;
}
}
- local_irq_enable();
clear_bit(smp_processor_id(), &slab_cache_drain_mask);
if (slab_cache_drain_mask == 0)
@@ -1619,7 +1627,6 @@ void slab_drain_local_cache(void)
static void do_ccupdate(ccupdate_struct_t *data)
{
- DECLARE_WAITQUEUE(wait, current);
unsigned long cpu_mask = 0;
int i;
@@ -1630,16 +1637,7 @@ static void do_ccupdate(ccupdate_struct_t *data)
ccupdate_state = data;
slab_cache_drain_mask = cpu_mask;
-
- slab_drain_local_cache();
-
- add_wait_queue(&cache_drain_wait, &wait);
- current->state = TASK_UNINTERRUPTIBLE;
- while (slab_cache_drain_mask != 0UL)
- schedule();
- current->state = TASK_RUNNING;
- remove_wait_queue(&cache_drain_wait, &wait);
-
+ slab_drain_all_sync();
ccupdate_state = NULL;
up(&cache_drain_sem);