summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-06-17 13:20:30 +0000
committerRalf Baechle <ralf@linux-mips.org>1997-06-17 13:20:30 +0000
commit7acb77a6e7bddd4c4c5aa975bbf976927c013798 (patch)
tree4139829ec6edb85f73774bb95cdec376758bfc73 /mm
parent64d58d4c8cd6a89ee218301ec0dc0ebfec91a4db (diff)
Merge with 2.1.43.
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mlock.c6
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/page_alloc.c14
-rw-r--r--mm/page_io.c4
-rw-r--r--mm/slab.c106
-rw-r--r--mm/swap_state.c30
-rw-r--r--mm/swapfile.c14
-rw-r--r--mm/vmscan.c4
12 files changed, 109 insertions, 86 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 88c2fd49d..56aa1b486 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -921,6 +921,7 @@ static inline int do_write_page(struct inode * inode, struct file * file,
retval = -EIO;
if (size == file->f_op->write(inode, file, (const char *) page, size))
retval = 0;
+ /* inode->i_status |= ST_MODIFIED is willingly *not* done here */
set_fs(old_fs);
return retval;
}
@@ -1195,7 +1196,7 @@ int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_s
inode->i_dirt = 1;
}
vma->vm_inode = inode;
- inode->i_count++;
+ atomic_inc(&inode->i_count);
vma->vm_ops = ops;
return 0;
}
diff --git a/mm/memory.c b/mm/memory.c
index 4bcf1a74e..81022e770 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -195,7 +195,7 @@ static inline void copy_one_pte(pte_t * old_pte, pte_t * new_pte, int cow)
}
if (cow)
pte = pte_wrprotect(pte);
- if (delete_from_swap_cache(page_nr))
+ if (delete_from_swap_cache(&mem_map[page_nr]))
pte = pte_mkdirty(pte);
set_pte(new_pte, pte_mkold(pte));
set_pte(old_pte, pte);
diff --git a/mm/mlock.c b/mm/mlock.c
index 27bff13ff..5a69e4b55 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -39,7 +39,7 @@ static inline int mlock_fixup_start(struct vm_area_struct * vma,
vma->vm_offset += vma->vm_start - n->vm_start;
n->vm_flags = newflags;
if (n->vm_inode)
- n->vm_inode->i_count++;
+ atomic_inc(&n->vm_inode->i_count);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -60,7 +60,7 @@ static inline int mlock_fixup_end(struct vm_area_struct * vma,
n->vm_offset += n->vm_start - vma->vm_start;
n->vm_flags = newflags;
if (n->vm_inode)
- n->vm_inode->i_count++;
+ atomic_inc(&n->vm_inode->i_count);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -90,7 +90,7 @@ static inline int mlock_fixup_middle(struct vm_area_struct * vma,
right->vm_offset += right->vm_start - left->vm_start;
vma->vm_flags = newflags;
if (vma->vm_inode)
- vma->vm_inode->i_count += 2;
+ atomic_add(2, &vma->vm_inode->i_count);
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
vma->vm_ops->open(right);
diff --git a/mm/mmap.c b/mm/mmap.c
index 13b19bec0..af8cd0a4a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -409,7 +409,7 @@ static void unmap_fixup(struct vm_area_struct *area,
mpnt->vm_offset += (end - area->vm_start);
mpnt->vm_start = end;
if (mpnt->vm_inode)
- mpnt->vm_inode->i_count++;
+ atomic_inc(&mpnt->vm_inode->i_count);
if (mpnt->vm_ops && mpnt->vm_ops->open)
mpnt->vm_ops->open(mpnt);
area->vm_end = addr; /* Truncate area */
@@ -646,7 +646,7 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l
}
remove_shared_vm_struct(mpnt);
if (mpnt->vm_inode)
- mpnt->vm_inode->i_count--;
+ atomic_dec(&mpnt->vm_inode->i_count);
kmem_cache_free(vm_area_cachep, mpnt);
mpnt = prev;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7f5e26243..2e46ca142 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -111,7 +111,7 @@ static inline int mprotect_fixup_start(struct vm_area_struct * vma,
n->vm_flags = newflags;
n->vm_page_prot = prot;
if (n->vm_inode)
- n->vm_inode->i_count++;
+ atomic_inc(&n->vm_inode->i_count);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -134,7 +134,7 @@ static inline int mprotect_fixup_end(struct vm_area_struct * vma,
n->vm_flags = newflags;
n->vm_page_prot = prot;
if (n->vm_inode)
- n->vm_inode->i_count++;
+ atomic_inc(&n->vm_inode->i_count);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
insert_vm_struct(current->mm, n);
@@ -166,7 +166,7 @@ static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
vma->vm_flags = newflags;
vma->vm_page_prot = prot;
if (vma->vm_inode)
- vma->vm_inode->i_count += 2;
+ atomic_add(2, &vma->vm_inode->i_count);
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
vma->vm_ops->open(right);
diff --git a/mm/mremap.c b/mm/mremap.c
index dfe826847..a52db58de 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -141,7 +141,7 @@ static inline unsigned long move_vma(struct vm_area_struct * vma,
new_vma->vm_end = new_addr+new_len;
new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
if (new_vma->vm_inode)
- new_vma->vm_inode->i_count++;
+ atomic_inc(&new_vma->vm_inode->i_count);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
insert_vm_struct(current->mm, new_vma);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e32f1a92e..07264f81e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -97,7 +97,9 @@ static inline void remove_mem_queue(struct page * entry)
*
* Hint: -mask = 1+~mask
*/
+#ifdef __SMP__
static spinlock_t page_alloc_lock;
+#endif
static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
{
@@ -131,9 +133,8 @@ static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
void __free_page(struct page *page)
{
if (!PageReserved(page) && atomic_dec_and_test(&page->count)) {
- unsigned long map_nr = page->map_nr;
- delete_from_swap_cache(map_nr);
- free_pages_ok(map_nr, 0);
+ delete_from_swap_cache(page);
+ free_pages_ok(page->map_nr, 0);
}
}
@@ -146,7 +147,7 @@ void free_pages(unsigned long addr, unsigned long order)
if (PageReserved(map))
return;
if (atomic_dec_and_test(&map->count)) {
- delete_from_swap_cache(map_nr);
+ delete_from_swap_cache(map);
free_pages_ok(map_nr, order);
return;
}
@@ -278,8 +279,7 @@ __initfunc(unsigned long free_area_init(unsigned long start_mem, unsigned long e
min_free_pages = i;
free_pages_low = i + (i>>1);
free_pages_high = i + i;
- start_mem = init_swap_cache(start_mem, end_mem);
- mem_map = (mem_map_t *) start_mem;
+ mem_map = (mem_map_t *) LONG_ALIGN(start_mem);
p = mem_map + MAP_NR(end_mem);
start_mem = LONG_ALIGN((unsigned long) p);
memset(mem_map, 0, start_mem - (unsigned long) mem_map);
@@ -334,7 +334,7 @@ void swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
}
vma->vm_mm->rss++;
tsk->maj_flt++;
- if (!write_access && add_to_swap_cache(MAP_NR(page), entry)) {
+ if (!write_access && add_to_swap_cache(&mem_map[MAP_NR(page)], entry)) {
/* keep swap page allocated for the moment (swap cache) */
set_pte(page_table, mk_pte(page, vma->vm_page_prot));
return;
diff --git a/mm/page_io.c b/mm/page_io.c
index 6a16ccee8..30d0c882e 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -83,7 +83,9 @@ void rw_swap_page(int rw, unsigned long entry, char * buf, int wait)
set_bit(PG_free_after, &page->flags);
set_bit(PG_decr_after, &page->flags);
set_bit(PG_swap_unlock_after, &page->flags);
- page->swap_unlock_entry = entry;
+ /* swap-cache shouldn't be set, but play safe */
+ PageClearSwapCache(page);
+ page->pg_swap_entry = entry;
atomic_inc(&nr_async_pages);
}
ll_rw_page(rw,p->swap_device,offset,buf);
diff --git a/mm/slab.c b/mm/slab.c
index e4db9e9e3..6277739d4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -769,6 +769,7 @@ kmem_cache_create(const char *name, size_t size, size_t offset,
printk("%sForcing size word alignment - %s\n", func_nm, name);
}
+ cachep->c_org_size = size;
#if SLAB_DEBUG_SUPPORT
if (flags & SLAB_RED_ZONE) {
/* There is no point trying to honour cache alignment when redzoning. */
@@ -776,7 +777,6 @@ kmem_cache_create(const char *name, size_t size, size_t offset,
size += 2*BYTES_PER_WORD; /* words for redzone */
}
#endif /* SLAB_DEBUG_SUPPORT */
- cachep->c_org_size = size;
align = BYTES_PER_WORD;
if (flags & SLAB_HWCACHE_ALIGN)
@@ -1250,18 +1250,18 @@ opps2:
opps1:
kmem_freepages(cachep, objp);
failed:
+ spin_lock_irq(&cachep->c_spinlock);
if (local_flags != SLAB_ATOMIC && cachep->c_gfporder) {
/* For large order (>0) slabs, we try again.
* Needed because the gfp() functions are not good at giving
* out contigious pages unless pushed (but do not push too hard).
*/
- spin_lock_irq(&cachep->c_spinlock);
if (cachep->c_failures++ < 4 && cachep->c_freep == kmem_slab_end(cachep))
goto re_try;
cachep->c_failures = 1; /* Memory is low, don't try as hard next time. */
- cachep->c_growing--;
- spin_unlock_irqrestore(&cachep->c_spinlock, save_flags);
}
+ cachep->c_growing--;
+ spin_unlock_irqrestore(&cachep->c_spinlock, save_flags);
return 0;
}
@@ -1467,16 +1467,14 @@ __kmem_cache_free(kmem_cache_t *cachep, void *objp)
goto null_addr;
#if SLAB_DEBUG_SUPPORT
- if (cachep->c_flags & SLAB_RED_ZONE)
- objp -= BYTES_PER_WORD;
-#endif /* SLAB_DEBUG_SUPPORT */
-
-
-#if SLAB_DEBUG_SUPPORT
/* A verify func is called without the cache-lock held. */
if (cachep->c_flags & SLAB_DEBUG_INITIAL)
goto init_state_check;
finished_initial:
+
+ if (cachep->c_flags & SLAB_RED_ZONE)
+ goto red_zone;
+return_red:
#endif /* SLAB_DEBUG_SUPPORT */
spin_lock_irqsave(&cachep->c_spinlock, save_flags);
@@ -1511,25 +1509,24 @@ passed_extra:
slabp->s_inuse--;
bufp->buf_nextp = slabp->s_freep;
slabp->s_freep = bufp;
- if (slabp->s_inuse) {
- if (bufp->buf_nextp) {
+ if (bufp->buf_nextp) {
+ if (slabp->s_inuse) {
/* (hopefully) The most common case. */
finished:
#if SLAB_DEBUG_SUPPORT
- /* Need to poision the obj while holding the lock. */
- if (cachep->c_flags & SLAB_POISION)
+ if (cachep->c_flags & SLAB_POISION) {
+ if (cachep->c_flags & SLAB_RED_ZONE)
+ objp += BYTES_PER_WORD;
kmem_poision_obj(cachep, objp);
- if (cachep->c_flags & SLAB_RED_ZONE)
- goto red_zone;
-return_red:
+ }
#endif /* SLAB_DEBUG_SUPPORT */
spin_unlock_irqrestore(&cachep->c_spinlock, save_flags);
return;
}
- kmem_cache_one_free(cachep, slabp);
+ kmem_cache_full_free(cachep, slabp);
goto finished;
}
- kmem_cache_full_free(cachep, slabp);
+ kmem_cache_one_free(cachep, slabp);
goto finished;
}
@@ -1563,20 +1560,20 @@ extra_checks:
}
goto passed_extra;
red_zone:
- /* We hold the cache-lock while checking the red-zone, just incase
- * some tries to take this obj from us...
+ /* We do not hold the cache-lock while checking the red-zone.
*/
+ objp -= BYTES_PER_WORD;
if (xchg((unsigned long *)objp, SLAB_RED_MAGIC1) != SLAB_RED_MAGIC2) {
/* Either write before start of obj, or a double free. */
kmem_report_free_err("Bad front redzone", objp, cachep);
}
- objp += BYTES_PER_WORD;
- if (xchg((unsigned long *)(objp+cachep->c_org_size), SLAB_RED_MAGIC1) != SLAB_RED_MAGIC2) {
+ if (xchg((unsigned long *)(objp+cachep->c_org_size+BYTES_PER_WORD), SLAB_RED_MAGIC1) != SLAB_RED_MAGIC2) {
/* Either write past end of obj, or a double free. */
kmem_report_free_err("Bad rear redzone", objp, cachep);
}
goto return_red;
#endif /* SLAB_DEBUG_SUPPORT */
+
bad_slab:
/* Slab doesn't contain the correct magic num. */
if (slabp->s_magic == SLAB_MAGIC_DESTROYED) {
@@ -1713,24 +1710,49 @@ kmem_cache_reap(int pri, int dma, int wait)
kmem_slab_t *slabp;
kmem_cache_t *searchp;
kmem_cache_t *best_cachep;
- unsigned long scan;
- unsigned long reap_level;
+ unsigned int scan;
+ unsigned int reap_level;
+ static unsigned long call_count = 0;
if (in_interrupt()) {
printk("kmem_cache_reap() called within int!\n");
return 0;
}
- scan = 9-pri;
- reap_level = pri >> 1;
/* We really need a test semphore op so we can avoid sleeping when
* !wait is true.
*/
down(&cache_chain_sem);
+
+ scan = 10-pri;
+ if (pri == 6 && !dma) {
+ if (++call_count == 199) {
+ /* Hack Alert!
+ * Occassionally we try hard to reap a slab.
+ */
+ call_count = 0UL;
+ reap_level = 0;
+ scan += 2;
+ } else
+ reap_level = 3;
+ } else {
+ if (pri >= 5) {
+ /* We also come here for dma==1 at pri==6, just
+ * to try that bit harder (assumes that there are
+ * less DMAable pages in a system - not always true,
+ * but this doesn't hurt).
+ */
+ reap_level = 2;
+ } else
+ reap_level = 0;
+ }
+
best_cachep = NULL;
searchp = clock_searchp;
do {
- unsigned long full_free;
+ unsigned int full_free;
+ unsigned int dma_flag;
+
/* It's safe to test this without holding the cache-lock. */
if (searchp->c_flags & SLAB_NO_REAP)
goto next;
@@ -1747,6 +1769,7 @@ kmem_cache_reap(int pri, int dma, int wait)
printk(KERN_ERR "kmem_reap: Corrupted cache struct for %s\n", searchp->c_name);
goto next;
}
+ dma_flag = 0;
full_free = 0;
/* Count num of fully free slabs. Hopefully there are not many,
@@ -1756,9 +1779,14 @@ kmem_cache_reap(int pri, int dma, int wait)
while (!slabp->s_inuse && slabp != kmem_slab_end(searchp)) {
slabp = slabp->s_prevp;
full_free++;
+ if (slabp->s_dma)
+ dma_flag++;
}
spin_unlock_irq(&searchp->c_spinlock);
+ if (dma && !dma_flag)
+ goto next;
+
if (full_free) {
if (full_free >= 10) {
best_cachep = searchp;
@@ -1769,10 +1797,8 @@ kmem_cache_reap(int pri, int dma, int wait)
* more than one page per slab (as it can be difficult
* to get high orders from gfp()).
*/
- if (pri == 6) { /* magic '6' from try_to_free_page() */
- if (searchp->c_ctor)
- full_free--;
- if (full_free && searchp->c_gfporder)
+ if (pri == 6) { /* magic '6' from try_to_free_page() */
+ if (searchp->c_gfporder || searchp->c_ctor)
full_free--;
}
if (full_free >= reap_level) {
@@ -1797,8 +1823,21 @@ next:
spin_lock_irq(&best_cachep->c_spinlock);
if (!best_cachep->c_growing && !(slabp = best_cachep->c_lastp)->s_inuse && slabp != kmem_slab_end(best_cachep)) {
+ if (dma) {
+ do {
+ if (slabp->s_dma)
+ goto good_dma;
+ slabp = slabp->s_prevp;
+ } while (!slabp->s_inuse && slabp != kmem_slab_end(best_cachep));
+
+ /* Didn't found a DMA slab (there was a free one -
+ * must have been become active).
+ */
+ goto dma_fail;
+good_dma:
+ }
if (slabp == best_cachep->c_freep)
- best_cachep->c_freep = kmem_slab_end(best_cachep);
+ best_cachep->c_freep = slabp->s_nextp;
kmem_slab_unlink(slabp);
SLAB_STATS_INC_REAPED(best_cachep);
@@ -1809,6 +1848,7 @@ next:
kmem_slab_destroy(best_cachep, slabp);
return 1;
}
+dma_fail:
spin_unlock_irq(&best_cachep->c_spinlock);
return 0;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f3ffa46d5..e0cfe1fef 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -24,15 +24,6 @@
#include <asm/bitops.h>
#include <asm/pgtable.h>
-/*
- * To save us from swapping out pages which have just been swapped in and
- * have not been modified since then, we keep in swap_cache[page>>PAGE_SHIFT]
- * the swap entry which was last used to fill the page, or zero if the
- * page does not currently correspond to a page in swap. PAGE_DIRTY makes
- * this info useless.
- */
-unsigned long *swap_cache;
-
#ifdef SWAP_CACHE_INFO
unsigned long swap_cache_add_total = 0;
unsigned long swap_cache_add_success = 0;
@@ -50,7 +41,7 @@ void show_swap_cache_info(void)
}
#endif
-int add_to_swap_cache(unsigned long index, unsigned long entry)
+int add_to_swap_cache(struct page *page, unsigned long entry)
{
struct swap_info_struct * p = &swap_info[SWP_TYPE(entry)];
@@ -58,10 +49,9 @@ int add_to_swap_cache(unsigned long index, unsigned long entry)
swap_cache_add_total++;
#endif
if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
- entry = xchg(swap_cache + index, entry);
- if (entry) {
- printk("swap_cache: replacing non-NULL entry\n");
- }
+ page->pg_swap_entry = entry;
+ if (PageTestandSetSwapCache(page))
+ printk("swap_cache: replacing non-empty entry\n");
#ifdef SWAP_CACHE_INFO
swap_cache_add_success++;
#endif
@@ -70,18 +60,6 @@ int add_to_swap_cache(unsigned long index, unsigned long entry)
return 0;
}
-__initfunc(unsigned long init_swap_cache(unsigned long mem_start,
- unsigned long mem_end))
-{
- unsigned long swap_cache_size;
-
- mem_start = (mem_start + 15) & ~15;
- swap_cache = (unsigned long *) mem_start;
- swap_cache_size = MAP_NR(mem_end);
- memset(swap_cache, 0, swap_cache_size * sizeof (unsigned long));
- return (unsigned long) (swap_cache + swap_cache_size);
-}
-
void swap_duplicate(unsigned long entry)
{
struct swap_info_struct * p;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 32a5ed8b0..819ae7aa8 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -176,14 +176,16 @@ static inline int unuse_pte(struct vm_area_struct * vma, unsigned long address,
if (pte_none(pte))
return 0;
if (pte_present(pte)) {
+ struct page *pg;
unsigned long page_nr = MAP_NR(pte_page(pte));
if (page_nr >= max_mapnr)
return 0;
- if (!in_swap_cache(page_nr))
+ pg = mem_map + page_nr;
+ if (!in_swap_cache(pg))
return 0;
- if (SWP_TYPE(in_swap_cache(page_nr)) != type)
+ if (SWP_TYPE(in_swap_cache(pg)) != type)
return 0;
- delete_from_swap_cache(page_nr);
+ delete_from_swap_cache(pg);
set_pte(dir, pte_mkdirty(pte));
return 0;
}
@@ -332,7 +334,7 @@ asmlinkage int sys_swapoff(const char * specialfile)
lock_kernel();
if (!suser())
goto out;
- err = namei(specialfile,&inode);
+ err = namei(NAM_FOLLOW_LINK, specialfile, &inode);
if (err)
goto out;
prev = -1;
@@ -486,12 +488,12 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags)
} else {
p->prio = --least_priority;
}
- error = namei(specialfile,&swap_inode);
+ error = namei(NAM_FOLLOW_LINK, specialfile, &swap_inode);
if (error)
goto bad_swap_2;
p->swap_file = swap_inode;
error = -EBUSY;
- if (swap_inode->i_count != 1)
+ if (atomic_read(&swap_inode->i_count) != 1)
goto bad_swap_2;
error = -EINVAL;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 875f668ee..21c178159 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -87,7 +87,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
/* Deal with page aging. Pages age from being unused; they
* rejuvenate on being accessed. Only swap old pages (age==0
* is oldest). */
- if ((pte_dirty(pte) && delete_from_swap_cache(MAP_NR(page)))
+ if ((pte_dirty(pte) && delete_from_swap_cache(page_map))
|| pte_young(pte)) {
set_pte(page_table, pte_mkold(pte));
touch_page(page_map);
@@ -117,7 +117,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
free_page(page);
return 1; /* we slept: the process may not exist any more */
}
- if ((entry = find_in_swap_cache(MAP_NR(page)))) {
+ if ((entry = find_in_swap_cache(page_map))) {
if (atomic_read(&page_map->count) != 1) {
set_pte(page_table, pte_mkdirty(pte));
printk("Aiee.. duplicated cached swap-cache entry\n");