summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-06-17 13:25:08 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-06-17 13:25:08 +0000
commit59223edaa18759982db0a8aced0e77457d10c68e (patch)
tree89354903b01fa0a447bffeefe00df3044495db2e /arch/mips/mm
parentdb7d4daea91e105e3859cf461d7e53b9b77454b2 (diff)
Merge with Linux 2.3.6. Sorry, this isn't tested on silicon, I don't
have a MIPS box at hand.
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/loadmmu.c3
-rw-r--r--arch/mips/mm/r4xx0.c10
2 files changed, 11 insertions, 2 deletions
diff --git a/arch/mips/mm/loadmmu.c b/arch/mips/mm/loadmmu.c
index 6142d82c5..8538ff2dc 100644
--- a/arch/mips/mm/loadmmu.c
+++ b/arch/mips/mm/loadmmu.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
- * $Id: loadmmu.c,v 1.8 1999/04/11 17:13:56 harald Exp $
+ * $Id: loadmmu.c,v 1.9 1999/06/13 16:30:35 ralf Exp $
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -31,6 +31,7 @@ void (*flush_page_to_ram)(unsigned long page);
/* DMA cache operations. */
void (*dma_cache_wback_inv)(unsigned long start, unsigned long size);
+void (*dma_cache_wback)(unsigned long start, unsigned long size);
void (*dma_cache_inv)(unsigned long start, unsigned long size);
/* TLB operations. */
diff --git a/arch/mips/mm/r4xx0.c b/arch/mips/mm/r4xx0.c
index 1b2ac3674..31e76cead 100644
--- a/arch/mips/mm/r4xx0.c
+++ b/arch/mips/mm/r4xx0.c
@@ -1,4 +1,4 @@
-/* $Id: r4xx0.c,v 1.20 1998/10/14 23:40:45 ralf Exp $
+/* $Id: r4xx0.c,v 1.21 1999/01/04 16:03:54 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -2172,6 +2172,12 @@ r4k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
}
}
+static void
+r4k_dma_cache_wback(unsigned long addr, unsigned long size)
+{
+ panic("r4k_dma_cache called - should not happen.\n");
+}
+
/*
* While we're protected against bad userland addresses we don't care
* very much about what happens in that case. Usually a segmentation
@@ -2652,6 +2658,7 @@ __initfunc(static void setup_noscache_funcs(void))
break;
}
dma_cache_wback_inv = r4k_dma_cache_wback_inv_pc;
+ dma_cache_wback = r4k_dma_cache_wback;
dma_cache_inv = r4k_dma_cache_inv_pc;
}
@@ -2735,6 +2742,7 @@ __initfunc(static void setup_scache_funcs(void))
break;
}
dma_cache_wback_inv = r4k_dma_cache_wback_inv_sc;
+ dma_cache_wback = r4k_dma_cache_wback;
dma_cache_inv = r4k_dma_cache_inv_sc;
}