summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-07-08 00:53:00 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-07-08 00:53:00 +0000
commitb8553086288629b4efb77e97f5582e08bc50ad65 (patch)
tree0a19bd1c21e148f35c7a0f76baa4f7a056b966b0 /mm
parent75b6d92f2dd5112b02f4e78cf9f35f9825946ef0 (diff)
Merge with 2.4.0-test3-pre4.
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/vmscan.c106
2 files changed, 83 insertions, 31 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 74eb0ef83..7f226eef3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1741,12 +1741,14 @@ static int msync_interval(struct vm_area_struct * vma,
{
if (vma->vm_file && vma->vm_ops && vma->vm_ops->sync) {
int error;
+ lock_kernel();
error = vma->vm_ops->sync(vma, start, end-start, flags);
if (!error && (flags & MS_SYNC)) {
struct file * file = vma->vm_file;
if (file && file->f_op && file->f_op->fsync)
error = file->f_op->fsync(file, file->f_dentry, 1);
}
+ unlock_kernel();
return error;
}
return 0;
@@ -1759,7 +1761,6 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
int unmapped_error, error = -EINVAL;
down(&current->mm->mmap_sem);
- lock_kernel();
if (start & ~PAGE_MASK)
goto out;
len = (len + ~PAGE_MASK) & PAGE_MASK;
@@ -1805,7 +1806,6 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
vma = vma->vm_next;
}
out:
- unlock_kernel();
up(&current->mm->mmap_sem);
return error;
}
@@ -2021,13 +2021,9 @@ static long madvise_dontneed(struct vm_area_struct * vma,
if (vma->vm_flags & VM_LOCKED)
return -EINVAL;
- lock_kernel(); /* is this really necessary? */
-
flush_cache_range(vma->vm_mm, start, end);
zap_page_range(vma->vm_mm, start, end - start);
flush_tlb_range(vma->vm_mm, start, end);
-
- unlock_kernel();
return 0;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1919c0961..be600ec49 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -419,6 +419,48 @@ out:
}
/*
+ * Check if there is any memory pressure (free_pages < pages_low)
+ */
+static inline int memory_pressure(void)
+{
+ pg_data_t *pgdat = pgdat_list;
+
+ do {
+ int i;
+ for(i = 0; i < MAX_NR_ZONES; i++) {
+ zone_t *zone = pgdat->node_zones+ i;
+ if (zone->size &&
+ zone->free_pages < zone->pages_low)
+ return 1;
+ }
+ pgdat = pgdat->node_next;
+ } while (pgdat);
+
+ return 0;
+}
+
+/*
+ * Check if there is any memory pressure (free_pages < pages_low)
+ */
+static inline int keep_kswapd_awake(void)
+{
+ pg_data_t *pgdat = pgdat_list;
+
+ do {
+ int i;
+ for(i = 0; i < MAX_NR_ZONES; i++) {
+ zone_t *zone = pgdat->node_zones+ i;
+ if (zone->size &&
+ zone->zone_wake_kswapd)
+ return 1;
+ }
+ pgdat = pgdat->node_next;
+ } while (pgdat);
+
+ return 0;
+}
+
+/*
* We need to make the locks finer granularity, but right
* now we need this so that we can do page allocations
* without holding the kernel lock etc.
@@ -442,11 +484,23 @@ static int do_try_to_free_pages(unsigned int gfp_mask)
priority = 64;
do {
+ if (current->need_resched) {
+ schedule();
+ /* time has passed - pressure too? */
+ if (!memory_pressure())
+ goto done;
+ }
+
while (shrink_mmap(priority, gfp_mask)) {
if (!--count)
goto done;
}
+ /* not (been) low on memory - it is
+ * pointless to try to swap out.
+ */
+ if (!keep_kswapd_awake())
+ goto done;
/* Try to get rid of some shared memory pages.. */
if (gfp_mask & __GFP_IO) {
@@ -457,8 +511,18 @@ static int do_try_to_free_pages(unsigned int gfp_mask)
*/
count -= shrink_dcache_memory(priority, gfp_mask);
count -= shrink_icache_memory(priority, gfp_mask);
- if (count <= 0)
+ /*
+ * Not currently working, see fixme in shrink_?cache_memory
+ * In the inner funtions there is a comment:
+ * "To help debugging, a zero exit status indicates
+ * all slabs were released." (-arca?)
+ * lets handle it in a primitive but working way...
+ * if (count <= 0)
+ * goto done;
+ */
+ if (!keep_kswapd_awake())
goto done;
+
while (shm_swap(priority, gfp_mask)) {
if (!--count)
goto done;
@@ -477,7 +541,8 @@ static int do_try_to_free_pages(unsigned int gfp_mask)
if (--swap_count < 0)
break;
- } while (--priority >= 0);
+ priority--;
+ } while (priority >= 0);
/* Always end on a shrink_mmap.. */
while (shrink_mmap(0, gfp_mask)) {
@@ -486,7 +551,7 @@ static int do_try_to_free_pages(unsigned int gfp_mask)
}
/* We return 1 if we are freed some page */
return (count != FREE_COUNT);
-
+
done:
return 1;
}
@@ -530,29 +595,14 @@ int kswapd(void *unused)
tsk->flags |= PF_MEMALLOC;
for (;;) {
- pg_data_t *pgdat;
- int something_to_do = 0;
-
- pgdat = pgdat_list;
- do {
- int i;
- for(i = 0; i < MAX_NR_ZONES; i++) {
- zone_t *zone = pgdat->node_zones+ i;
- if (tsk->need_resched)
- schedule();
- if (!zone->size || !zone->zone_wake_kswapd)
- continue;
- if (zone->free_pages < zone->pages_low)
- something_to_do = 1;
- do_try_to_free_pages(GFP_KSWAPD);
- }
- pgdat = pgdat->node_next;
- } while (pgdat);
-
- if (!something_to_do) {
- tsk->state = TASK_INTERRUPTIBLE;
- interruptible_sleep_on(&kswapd_wait);
+ if (!keep_kswapd_awake()) {
+ /* wake up regulary to do an early attempt too free
+ * pages - pages will not actually be freed.
+ */
+ interruptible_sleep_on_timeout(&kswapd_wait, HZ);
}
+
+ do_try_to_free_pages(GFP_KSWAPD);
}
}
@@ -580,6 +630,12 @@ int try_to_free_pages(unsigned int gfp_mask)
retval = do_try_to_free_pages(gfp_mask);
current->flags &= ~PF_MEMALLOC;
}
+ else {
+ /* make sure kswapd runs */
+ if (waitqueue_active(&kswapd_wait))
+ wake_up_interruptible(&kswapd_wait);
+ }
+
return retval;
}