summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-12-14 21:39:50 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-12-14 21:39:50 +0000
commit873d7498bd7efba53e22e2db70ebbc11bf686d07 (patch)
tree6cd350493a7bbd9adc739e4193af44f2d1765040 /include
parent920c6058e2a57774263231a6a2c76c4f8b633eaa (diff)
MIPS32 patches from MIPS.
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/mips32_cache.h288
-rw-r--r--include/asm-mips/mipsregs.h26
-rw-r--r--include/asm-mips/semaphore-helper.h11
-rw-r--r--include/asm-mips/semaphore.h3
4 files changed, 319 insertions, 9 deletions
diff --git a/include/asm-mips/mips32_cache.h b/include/asm-mips/mips32_cache.h
new file mode 100644
index 000000000..2de18bd7c
--- /dev/null
+++ b/include/asm-mips/mips32_cache.h
@@ -0,0 +1,288 @@
+/*
+ * mips32_cache.h
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ * Inline assembly cache operations.
+ *
+ * This file is the original r4cache.c file with modification that makes the
+ * cache handling more generic.
+ *
+ * FIXME: Handle split L2 caches.
+ *
+ */
+#ifndef _MIPS_R4KCACHE_H
+#define _MIPS_R4KCACHE_H
+
+#include <asm/asm.h>
+#include <asm/cacheops.h>
+
+extern inline void flush_icache_line_indexed(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Index_Invalidate_I));
+}
+
+extern inline void flush_dcache_line_indexed(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Index_Writeback_Inv_D));
+}
+
+extern inline void flush_scache_line_indexed(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Index_Writeback_Inv_SD));
+}
+
+extern inline void flush_icache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Invalidate_I));
+}
+
+extern inline void flush_dcache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Writeback_Inv_D));
+}
+
+extern inline void invalidate_dcache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Invalidate_D));
+}
+
+extern inline void invalidate_scache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Invalidate_SD));
+}
+
+extern inline void flush_scache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Writeback_Inv_SD));
+}
+
+/*
+ * The next two are for badland addresses like signal trampolines.
+ */
+extern inline void protected_flush_icache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n"
+ "1:\tcache %1,(%0)\n"
+ "2:\t.set mips0\n\t"
+ ".set reorder\n\t"
+ ".section\t__ex_table,\"a\"\n\t"
+ STR(PTR)"\t1b,2b\n\t"
+ ".previous"
+ :
+ : "r" (addr),
+ "i" (Hit_Invalidate_I));
+}
+
+extern inline void protected_writeback_dcache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n"
+ "1:\tcache %1,(%0)\n"
+ "2:\t.set mips0\n\t"
+ ".set reorder\n\t"
+ ".section\t__ex_table,\"a\"\n\t"
+ STR(PTR)"\t1b,2b\n\t"
+ ".previous"
+ :
+ : "r" (addr),
+ "i" (Hit_Writeback_D));
+}
+
+#define cache_unroll(base,op) \
+ __asm__ __volatile__(" \
+ .set noreorder; \
+ .set mips3; \
+ cache %1, (%0); \
+ .set mips0; \
+ .set reorder" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+
+extern inline void blast_dcache(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + dcache_size);
+
+ while(start < end) {
+ cache_unroll(start,Index_Writeback_Inv_D);
+ start += dc_lsize;
+ }
+}
+
+extern inline void blast_dcache_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache_unroll(start,Hit_Writeback_Inv_D);
+ start += dc_lsize;
+ }
+}
+
+extern inline void blast_dcache_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache_unroll(start,Index_Writeback_Inv_D);
+ start += dc_lsize;
+ }
+}
+
+extern inline void blast_icache(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + icache_size);
+
+ while(start < end) {
+ cache_unroll(start,Index_Invalidate_I);
+ start += ic_lsize;
+ }
+}
+
+extern inline void blast_icache_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache_unroll(start,Hit_Invalidate_I);
+ start += ic_lsize;
+ }
+}
+
+extern inline void blast_icache_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache_unroll(start,Index_Invalidate_I);
+ start += ic_lsize;
+ }
+}
+
+extern inline void blast_scache(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = KSEG0 + scache_size;
+
+ while(start < end) {
+ cache_unroll(start,Index_Writeback_Inv_SD);
+ start += sc_lsize;
+ }
+}
+
+extern inline void blast_scache_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache_unroll(start,Hit_Writeback_Inv_SD);
+ start += sc_lsize;
+ }
+}
+
+extern inline void blast_scache_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache_unroll(start,Index_Writeback_Inv_SD);
+ start += sc_lsize;
+ }
+}
+
+#endif /* !(_MIPS_R4KCACHE_H) */
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index a3c3f64ba..cbedf0ae5 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -6,9 +6,11 @@
* Copyright (C) 1994, 1995, 1996, 1997, 2000 by Ralf Baechle
* Copyright (C) 2000 Silicon Graphics, Inc.
* Modified for further R[236]000 support by Paul M. Antoine, 1996.
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
-#ifndef __ASM_MIPS_MIPSREGS_H
-#define __ASM_MIPS_MIPSREGS_H
+#ifndef _ASM_MIPSREGS_H
+#define _ASM_MIPSREGS_H
#include <linux/linkage.h>
@@ -187,6 +189,24 @@
"dmtc0\t%0,"STR(register)"\n\t" \
".set\tmips0" \
: : "r" (value))
+
+#ifdef CONFIG_CPU_MIPS32
+/*
+ * This should be changed when we get a compiler that support the MIPS32 ISA.
+ */
+#define read_mips32_cp0_config1() \
+({ int __res; \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ ".set\tnoat\n\t" \
+ ".word\t0x40018001\n\t" \
+ "move\t%0,$1\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+:"=r" (__res)); \
+ __res;})
+#endif
+
/*
* R4x00 interrupt enable / cause bits
*/
@@ -421,4 +441,4 @@ extern asmlinkage unsigned int read_perf_cntl(unsigned int counter);
extern asmlinkage void write_perf_cntl(unsigned int counter, unsigned int val);
#endif
-#endif /* __ASM_MIPS_MIPSREGS_H */
+#endif /* _ASM_MIPSREGS_H */
diff --git a/include/asm-mips/semaphore-helper.h b/include/asm-mips/semaphore-helper.h
index c183834e7..c281a4f3e 100644
--- a/include/asm-mips/semaphore-helper.h
+++ b/include/asm-mips/semaphore-helper.h
@@ -1,10 +1,11 @@
/*
* SMP- and interrupt-safe semaphores helper functions.
*
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- * (C) Copyright 1999 Ralf Baechle
- * (C) Copyright 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1996 Linus Torvalds
+ * Copyright (C) 1999 Andrea Arcangeli
+ * Copyright (C) 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
*/
#ifndef _ASM_SEMAPHORE_HELPER_H
#define _ASM_SEMAPHORE_HELPER_H
@@ -19,7 +20,7 @@ static inline void wake_one_more(struct semaphore * sem)
atomic_inc(&sem->waking);
}
-#if !defined(CONFIG_CPU_HAS_LLSC)
+#if !defined(CONFIG_CPU_HAS_LLSC) || defined(CONFIG_CPU_MIPS32)
/*
* It doesn't make sense, IMHO, to endlessly turn interrupts off and on again.
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index 19fb03efc..ebd77bd39 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -8,6 +8,7 @@
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1998, 1999, 2000 Ralf Baechle
* (C) Copyright 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#ifndef _ASM_SEMAPHORE_H
#define _ASM_SEMAPHORE_H
@@ -104,7 +105,7 @@ static inline int down_interruptible(struct semaphore * sem)
return ret;
}
-#if !defined(CONFIG_CPU_HAS_LLSC)
+#if !defined(CONFIG_CPU_HAS_LLSC) || defined(CONFIG_CPU_MIPS32)
static inline int down_trylock(struct semaphore * sem)
{