summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-06-21 01:44:39 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-06-21 01:44:39 +0000
commit5205a16d8870cdd4cc524589de3e09ad176d129a (patch)
tree6deddf1269b9e6f13f2fa00529cd4674c3b2a3fa /mm
parente8b2e78e4f14d329f2cdfb8ef7ed3582c71454e5 (diff)
Merge with Linux 2.4.0-ac22-riel.
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/slab.c115
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/vmscan.c23
4 files changed, 131 insertions, 17 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index ba0048cb8..5e9e6e00b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -564,7 +564,7 @@ void add_to_page_cache_locked(struct page * page, struct address_space *mapping,
/*
* This adds a page to the page cache, starting out as locked,
- * owned by us, referenced, but not uptodate and with no errors.
+ * owned by us, but not uptodate and with no errors.
*/
static inline void __add_to_page_cache(struct page * page,
struct address_space *mapping, unsigned long offset,
@@ -576,8 +576,8 @@ static inline void __add_to_page_cache(struct page * page,
if (PageLocked(page))
BUG();
- flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty));
- page->flags = flags | (1 << PG_locked) | (1 << PG_referenced);
+ flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error));
+ page->flags = flags | (1 << PG_locked);
page_cache_get(page);
page->index = offset;
add_page_to_inode_queue(mapping, page);
diff --git a/mm/slab.c b/mm/slab.c
index f3d04da8e..cccc16c58 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -666,9 +666,33 @@ kmem_cache_cal_waste(unsigned long gfporder, size_t size, size_t extra,
return (wastage + gfporder + (extra * *num));
}
-/* Create a cache:
+/**
+ * kmem_cache_create - Create a cache.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @offset: The offset to use within the page.
+ * @flags: SLAB flags
+ * @ctor: A constructor for the objects.
+ * @dtor: A destructor for the objects.
+ *
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache
+ * and the @dtor is run before the pages are handed back.
+ * The flags are
+ *
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
+ * to catch references to uninitialised memory.
+ *
+ * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
+ * for buffer overruns.
+ *
+ * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
+ * memory pressure.
+ *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
*/
kmem_cache_t *
kmem_cache_create(const char *name, size_t size, size_t offset,
@@ -1044,7 +1068,11 @@ static int __kmem_cache_shrink(kmem_cache_t *cachep)
return ret;
}
-/* Shrink a cache. Releases as many slabs as possible for a cache.
+/**
+ * kmem_cache_shrink - Shrink a cache.
+ * @cachep: The cache to shrink.
+ *
+ * Releases as many slabs as possible for a cache.
* To help debugging, a zero exit status indicates all slabs were released.
*/
int
@@ -1060,9 +1088,12 @@ kmem_cache_shrink(kmem_cache_t *cachep)
return __kmem_cache_shrink(cachep);
}
-/*
- * Remove a kmem_cache_t object from the slab cache. When returns 0 it
- * completed succesfully. -arca
+/**
+ * kmem_cache_destroy - delete a cache
+ * @cachep: the cache to destroy
+ *
+ * Remove a kmem_cache_t object from the slab cache.
+ * Returns 0 on success.
*
* It is expected this function will be called by a module when it is
* unloaded. This will remove the cache completely, and avoid a duplicate
@@ -1670,18 +1701,55 @@ null_addr:
return;
}
+/**
+ * kmem_cache_alloc - Allocate an object
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache. The flags are only relevant
+ * if the cache has no available objects.
+ */
void *
kmem_cache_alloc(kmem_cache_t *cachep, int flags)
{
return __kmem_cache_alloc(cachep, flags);
}
+/**
+ * kmem_cache_free - Deallocate an object
+ * @cachep: The cache the allocation was from.
+ * @objp: The previously allocated object.
+ *
+ * Free an object which was previously allocated from this
+ * cache.
+ */
void
kmem_cache_free(kmem_cache_t *cachep, void *objp)
{
__kmem_cache_free(cachep, objp);
}
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * kmalloc is the normal method of allocating memory
+ * in the kernel. The @flags argument may be one of:
+ *
+ * %GFP_BUFFER - XXX
+ *
+ * %GFP_ATOMIC - allocation will not sleep. Use inside interrupt handlers.
+ *
+ * %GFP_USER - allocate memory on behalf of user. May sleep.
+ *
+ * %GFP_KERNEL - allocate normal kernel ram. May sleep.
+ *
+ * %GFP_NFS - has a slightly lower probability of sleeping than %GFP_KERNEL.
+ * Don't use unless you're in the NFS code.
+ *
+ * %GFP_KSWAPD - Don't use unless you're modifying kswapd.
+ */
void *
kmalloc(size_t size, int flags)
{
@@ -1696,6 +1764,13 @@ kmalloc(size_t size, int flags)
return NULL;
}
+/**
+ * kfree - free previously allocated memory
+ * @objp: pointer returned by kmalloc.
+ *
+ * Don't free memory not originally allocated by kmalloc()
+ * or you will run into trouble.
+ */
void
kfree(const void *objp)
{
@@ -1741,6 +1816,17 @@ null_ptr:
return;
}
+/**
+ * kfree_s - free previously allocated memory
+ * @objp: pointer returned by kmalloc.
+ * @size: size of object which is being freed.
+ *
+ * This function performs the same task as kfree() except
+ * that it can use the extra information to speed up deallocation
+ * or perform additional tests.
+ * Don't free memory not originally allocated by kmalloc()
+ * or allocated with a different size, or you will run into trouble.
+ */
void
kfree_s(const void *objp, size_t size)
{
@@ -1788,7 +1874,11 @@ kmem_find_general_cachep(size_t size)
}
-/* Called from try_to_free_page().
+/**
+ * kmem_cache_reap - Reclaim memory from caches.
+ * @gfp_mask: the type of memory required.
+ *
+ * Called from try_to_free_page().
* This function _cannot_ be called within a int, but it
* can be interrupted.
*/
@@ -1951,8 +2041,17 @@ kmem_self_test(void)
#endif /* SLAB_SELFTEST */
#if defined(CONFIG_PROC_FS)
-/* /proc/slabinfo
- * cache-name num-active-objs total-objs num-active-slabs total-slabs num-pages-per-slab
+/**
+ * get_slabinfo - generates /proc/slabinfo
+ * @buf: the buffer to write it into
+ *
+ * The contents of the buffer are
+ * cache-name
+ * num-active-objs
+ * total-objs
+ * num-active-slabs
+ * total-slabs
+ * num-pages-per-slab
*/
int
get_slabinfo(char *buf)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 87ecc0c10..00000843a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -58,8 +58,8 @@ void add_to_swap_cache(struct page *page, swp_entry_t entry)
BUG();
if (page->mapping)
BUG();
- flags = page->flags & ~((1 << PG_error) | (1 << PG_dirty));
- page->flags = flags | (1 << PG_referenced) | (1 << PG_uptodate);
+ flags = page->flags & ~(1 << PG_error);
+ page->flags = flags | (1 << PG_uptodate);
add_to_page_cache_locked(page, &swapper_space, entry.val);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 597a1b093..4e7ad6ab7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -444,6 +444,7 @@ static int do_try_to_free_pages(unsigned int gfp_mask)
int priority;
int count = FREE_COUNT;
int swap_count = 0;
+ int made_progress = 0;
int ret = 0;
/* Always trim SLAB caches when memory gets low. */
@@ -452,7 +453,7 @@ static int do_try_to_free_pages(unsigned int gfp_mask)
priority = 64;
do {
while (shrink_mmap(priority, gfp_mask)) {
- ret = 1;
+ made_progress = 1;
if (!--count)
goto done;
}
@@ -468,11 +469,11 @@ static int do_try_to_free_pages(unsigned int gfp_mask)
count -= shrink_dcache_memory(priority, gfp_mask);
count -= shrink_icache_memory(priority, gfp_mask);
if (count <= 0) {
- ret = 1;
+ made_progress = 1;
goto done;
}
while (shm_swap(priority, gfp_mask)) {
- ret = 1;
+ made_progress = 1;
if (!--count)
goto done;
}
@@ -493,11 +494,25 @@ static int do_try_to_free_pages(unsigned int gfp_mask)
*/
swap_count += count;
while (swap_out(priority, gfp_mask)) {
+ made_progress = 1;
if (--swap_count < 0)
break;
}
- } while (--priority >= 0);
+ /*
+ * If we made progress at the current priority, the next
+ * loop will also be done at this priority level. There's
+ * absolutely no reason to drop to a lower priority and
+ * potentially upset the balance between shrink_mmap and
+ * swap_out.
+ */
+ if (made_progress) {
+ made_progress = 0;
+ ret = 1;
+ } else {
+ priority--;
+ }
+ } while (priority >= 0);
/* Always end on a shrink_mmap.. */
while (shrink_mmap(0, gfp_mask)) {