summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 19dd69a11..2d6466656 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -111,6 +111,10 @@
#include <asm/atomic.h>
#include <asm/smp_lock.h>
#include <asm/spinlock.h>
+#ifdef __mips__
+#include <asm/pgtable.h>
+#include <asm/addrspace.h>
+#endif
/* If there is a different PAGE_SIZE around, and it works with this allocator,
* then change the following.
@@ -1610,11 +1614,19 @@ void *
kmalloc(size_t size, int flags)
{
cache_sizes_t *csizep = cache_sizes;
+ unsigned long addr;
for (; csizep->cs_size; csizep++) {
if (size > csizep->cs_size)
continue;
- return __kmem_cache_alloc(csizep->cs_cachep, flags);
+ addr = __kmem_cache_alloc(csizep->cs_cachep, flags);
+#ifdef __mips__
+ if (addr && (flags & GFP_UNCACHED)) {
+ flush_cache_all(); /* Ouch ... */
+ addr = KSEG1ADDR(addr);
+ }
+#endif /* __mips__ */
+ return addr;
}
printk(KERN_ERR "kmalloc: Size (%lu) too large\n", (unsigned long) size);
return NULL;
@@ -1628,6 +1640,10 @@ kfree(const void *objp)
if (!objp)
goto null_ptr;
+#ifdef __mips__
+ if (KSEGX(objp) == KSEG1)
+ objp = KSEG0ADDR(objp);
+#endif __mips__
nr = MAP_NR(objp);
if (nr >= max_mapnr)
goto bad_ptr;