summaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
commit012bb3e61e5eced6c610f9e036372bf0c8def2d1 (patch)
tree87efc733f9b164e8c85c0336f92c8fb7eff6d183 /arch/sh
parent625a1589d3d6464b5d90b8a0918789e3afffd220 (diff)
Merge with Linux 2.4.0-test9. Please check DECstation, I had a number
of rejects to fixup while integrating Linus patches. I also found that this kernel will only boot SMP on Origin; the UP kernel freeze soon after bootup with SCSI timeout messages. I commit this anyway since I found that the last CVS versions had the same problem.
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/boot/compressed/Makefile2
-rw-r--r--arch/sh/config.in2
-rw-r--r--arch/sh/kernel/entry.S236
-rw-r--r--arch/sh/kernel/head.S2
-rw-r--r--arch/sh/kernel/io.c2
-rw-r--r--arch/sh/kernel/irq_imask.c7
-rw-r--r--arch/sh/kernel/irq_ipr.c2
-rw-r--r--arch/sh/kernel/process.c26
-rw-r--r--arch/sh/kernel/setup_cqreek.c64
-rw-r--r--arch/sh/kernel/sh_bios.c12
-rw-r--r--arch/sh/kernel/sh_ksyms.c29
-rw-r--r--arch/sh/kernel/signal.c2
-rw-r--r--arch/sh/kernel/time.c34
-rw-r--r--arch/sh/kernel/traps.c13
-rw-r--r--arch/sh/lib/checksum.S10
-rw-r--r--arch/sh/mm/cache.c124
-rw-r--r--arch/sh/mm/fault.c179
-rw-r--r--arch/sh/mm/init.c3
-rw-r--r--arch/sh/vmlinux.lds.S5
19 files changed, 485 insertions, 269 deletions
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index bd419991b..7e34e14da 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -36,7 +36,7 @@ piggy.o: $(SYSTEM)
$(OBJCOPY) -R .empty_zero_page $(SYSTEM) $$tmppiggy; \
gzip -f -9 < $$tmppiggy > $$tmppiggy.gz; \
echo "SECTIONS { .data : { input_len = .; LONG(input_data_end - input_data) input_data = .; *(.data) input_data_end = .; }}" > $$tmppiggy.lnk; \
- $(LD) -r -o piggy.o -b binary $$tmppiggy.gz -b elf32-shl -T $$tmppiggy.lnk; \
+ $(LD) -r -o piggy.o -b binary $$tmppiggy.gz -b elf32-sh-linux -T $$tmppiggy.lnk; \
rm -f $$tmppiggy $$tmppiggy.gz $$tmppiggy.lnk
clean:
diff --git a/arch/sh/config.in b/arch/sh/config.in
index fa36d92f7..f0a51f8d6 100644
--- a/arch/sh/config.in
+++ b/arch/sh/config.in
@@ -123,6 +123,8 @@ source drivers/mtd/Config.in
source drivers/block/Config.in
+source drivers/md/Config.in
+
if [ "$CONFIG_NET" = "y" ]; then
source net/Config.in
fi
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
index 7335803da..3b74e2d8e 100644
--- a/arch/sh/kernel/entry.S
+++ b/arch/sh/kernel/entry.S
@@ -14,7 +14,12 @@
#include <linux/linkage.h>
#include <linux/config.h>
-#define COMPAT_OLD_SYSCALL_ABI 1
+
+/*
+ * Define this to turn on compatibility with the previous
+ * system call ABI. This feature is not properly maintained.
+ */
+#undef COMPAT_OLD_SYSCALL_ABI
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -28,6 +33,12 @@
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call.
*
+ * NOTE: This code uses a convention that instructions in the delay slot
+ * of a transfer-control instruction are indented by an extra space, thus:
+ *
+ * jmp @$k0 ! control-transfer instruction
+ * ldc $k1, $ssr ! delay slot
+ *
* Stack layout in 'ret_from_syscall':
* ptrace needs to have all regs on the stack.
* if the order here is changed, it needs to be
@@ -58,6 +69,7 @@ PT_TRACESYS = 0x00000002
PF_USEDFPU = 0x00100000
ENOSYS = 38
+EINVAL = 22
#if defined(__sh3__)
TRA = 0xffffffd0
@@ -76,7 +88,14 @@ MMU_TEA = 0xff00000c ! TLB Exception Address Register
#endif
/* Offsets to the stack */
-R0 = 0 /* Return value */
+R0 = 0 /* Return value. New ABI also arg4 */
+R1 = 4 /* New ABI: arg5 */
+R2 = 8 /* New ABI: arg6 */
+R3 = 12 /* New ABI: syscall_nr */
+R4 = 16 /* New ABI: arg0 */
+R5 = 20 /* New ABI: arg1 */
+R6 = 24 /* New ABI: arg2 */
+R7 = 28 /* New ABI: arg3 */
SP = (15*4)
SR = (16*4+8)
SYSCALL_NR = (16*4+6*4)
@@ -132,7 +151,6 @@ SYSCALL_NR = (16*4+6*4)
tlb_miss_load:
mov.l 2f, $r0
mov.l @$r0, $r6
- STI()
mov $r15, $r4
mov.l 1f, $r0
jmp @$r0
@@ -142,7 +160,6 @@ tlb_miss_load:
tlb_miss_store:
mov.l 2f, $r0
mov.l @$r0, $r6
- STI()
mov $r15, $r4
mov.l 1f, $r0
jmp @$r0
@@ -152,7 +169,6 @@ tlb_miss_store:
initial_page_write:
mov.l 2f, $r0
mov.l @$r0, $r6
- STI()
mov $r15, $r4
mov.l 1f, $r0
jmp @$r0
@@ -162,7 +178,6 @@ initial_page_write:
tlb_protection_violation_load:
mov.l 2f, $r0
mov.l @$r0, $r6
- STI()
mov $r15, $r4
mov.l 1f, $r0
jmp @$r0
@@ -172,14 +187,13 @@ tlb_protection_violation_load:
tlb_protection_violation_store:
mov.l 2f, $r0
mov.l @$r0, $r6
- STI()
mov $r15, $r4
mov.l 1f, $r0
jmp @$r0
mov #1, $r5
.align 2
-1: .long SYMBOL_NAME(do_page_fault)
+1: .long SYMBOL_NAME(__do_page_fault)
2: .long MMU_TEA
#if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS)
@@ -249,9 +263,6 @@ error:
.align 2
1: .long SYMBOL_NAME(do_exception_error)
-badsys: mov #-ENOSYS, $r0
- rts ! go to ret_from_syscall..
- mov.l $r0, @(R0,$r15)
!
!
@@ -291,7 +302,7 @@ ENTRY(ret_from_fork)
*/
system_call:
- mov.l 1f, $r9
+ mov.l __TRA, $r9
mov.l @$r9, $r8
!
! Is the trap argument >= 0x20? (TRA will be >= 0x80)
@@ -304,122 +315,160 @@ system_call:
mov #SYSCALL_NR, $r14
add $r15, $r14
!
- mov #0x40, $r9
#ifdef COMPAT_OLD_SYSCALL_ABI
+ mov #0x40, $r9
cmp/hs $r9, $r8
- mov $r0, $r10
- bf/s 0f
- mov $r0, $r9
+ bf/s old_abi_system_call
+ nop
#endif
! New Syscall ABI
add #-0x40, $r8
shlr2 $r8
shll8 $r8
- shll8 $r8
+ shll8 $r8 ! $r8 = num_args<<16
mov $r3, $r10
or $r8, $r10 ! Encode syscall # and # of arguments
- !
- mov $r3, $r9
- mov #0, $r8
-0:
mov.l $r10, @$r14 ! set syscall_nr
STI()
- mov.l __n_sys, $r10
- cmp/hs $r10, $r9
- bt badsys
!
-#ifdef COMPAT_OLD_SYSCALL_ABI
- ! Build the stack frame if TRA > 0
- mov $r8, $r10
- cmp/pl $r10
- bf 0f
- mov.l @(SP,$r15), $r0 ! get original stack
-7: add #-4, $r10
-4: mov.l @($r0,$r10), $r1 ! May cause address error exception..
- mov.l $r1, @-$r15
- cmp/pl $r10
- bt 7b
-#endif
-0: stc $k_current, $r11
- mov.l @(tsk_ptrace,$r11), $r10 ! Is it trace?
+ stc $k_current, $r11
+ mov.l @(tsk_ptrace,$r11), $r10 ! Is current PTRACE_SYSCALL'd?
mov #PT_TRACESYS, $r11
tst $r11, $r10
bt 5f
- ! Trace system call
- mov #-ENOSYS, $r11
- mov.l $r11, @(R0,$r15)
- ! Push up $R0--$R2, and $R4--$R7
- mov.l $r0, @-$r15
- mov.l $r1, @-$r15
- mov.l $r2, @-$r15
- mov.l $r4, @-$r15
- mov.l $r5, @-$r15
- mov.l $r6, @-$r15
- mov.l $r7, @-$r15
- !
- mov.l 2f, $r11
- jsr @$r11
+ ! Yes it is traced.
+ mov.l __syscall_trace, $r11 ! Call syscall_trace() which notifies
+ jsr @$r11 ! superior (will chomp $R[0-7])
nop
- ! Pop down $R0--$R2, and $R4--$R7
- mov.l @$r15+, $r7
- mov.l @$r15+, $r6
- mov.l @$r15+, $r5
- mov.l @$r15+, $r4
- mov.l @$r15+, $r2
- mov.l @$r15+, $r1
- mov.l @$r15+, $r0
- !
+ ! Reload $R0-$R4 from kernel stack, where the
+ ! parent may have modified them using
+ ! ptrace(POKEUSR). (Note that $R0-$R2 are
+ ! used by the system call handler directly
+ ! from the kernel stack anyway, so don't need
+ ! to be reloaded here.) This allows the parent
+ ! to rewrite system calls and args on the fly.
+ mov.l @(R4,$r15), $r4 ! arg0
+ mov.l @(R5,$r15), $r5
+ mov.l @(R6,$r15), $r6
+ mov.l @(R7,$r15), $r7 ! arg3
+ mov.l @(R3,$r15), $r3 ! syscall_nr
+ ! Arrange for syscall_trace() to be called
+ ! again as the system call returns.
mov.l __syscall_ret_trace, $r10
bra 6f
lds $r10, $pr
- !
+ ! No it isn't traced.
+ ! Arrange for normal system call return.
5: mov.l __syscall_ret, $r10
lds $r10, $pr
- !
-6: mov $r9, $r10
- shll2 $r10 ! x4
+ ! Call the system call handler through the table.
+ ! (both normal and ptrace'd)
+ ! First check for bad syscall number
+6: mov $r3, $r9
+ mov.l __n_sys, $r10
+ cmp/hs $r10, $r9
+ bf 2f
+ ! Bad syscall number
+ rts ! go to syscall_ret or syscall_ret_trace
+ mov #-ENOSYS, $r0
+ ! Good syscall number
+2: shll2 $r9 ! x4
mov.l __sct, $r11
- add $r11, $r10
- mov.l @$r10, $r11
- jmp @$r11
+ add $r11, $r9
+ mov.l @$r9, $r11
+ jmp @$r11 ! jump to specific syscall handler
nop
! In case of trace
- .align 2
-3:
-#ifdef COMPAT_OLD_SYSCALL_ABI
- add $r8, $r15 ! pop off the arguments
-#endif
+syscall_ret_trace:
mov.l $r0, @(R0,$r15) ! save the return value
- mov.l 2f, $r1
+ mov.l __syscall_trace, $r1
mova SYMBOL_NAME(ret_from_syscall), $r0
- jmp @$r1
- lds $r0, $pr
- .align 2
-1: .long TRA
-2: .long SYMBOL_NAME(syscall_trace)
-__n_sys: .long NR_syscalls
-__sct: .long SYMBOL_NAME(sys_call_table)
-__syscall_ret_trace:
- .long 3b
-__syscall_ret:
- .long SYMBOL_NAME(syscall_ret)
+ jmp @$r1 ! Call syscall_trace() which notifies superior
+ lds $r0, $pr ! Then return to ret_from_syscall()
+
+
#ifdef COMPAT_OLD_SYSCALL_ABI
+! Handle old ABI system call.
+! Note that ptrace(SYSCALL) is not supported for the old ABI.
+! At this point:
+! $r0, $r4-7 as per ABI
+! $r8 = value of TRA register (= num_args<<2)
+! $r14 = points to SYSCALL_NR in stack frame
+old_abi_system_call:
+ mov $r0, $r9 ! Save system call number in $r9
+ ! ! arrange for return which pops stack
+ mov.l __old_abi_syscall_ret, $r10
+ lds $r10, $pr
+ ! Build the stack frame if TRA > 0
+ mov $r8, $r10
+ cmp/pl $r10
+ bf 0f
+ mov.l @(SP,$r15), $r0 ! get original user stack
+7: add #-4, $r10
+4: mov.l @($r0,$r10), $r1 ! May cause address error exception..
+ mov.l $r1, @-$r15
+ cmp/pl $r10
+ bt 7b
+0:
+ mov.l $r9, @$r14 ! set syscall_nr
+ STI()
+ ! Call the system call handler through the table.
+ ! First check for bad syscall number
+ mov.l __n_sys, $r10
+ cmp/hs $r10, $r9
+ bf 2f
+ ! Bad syscall number
+ rts ! return to old_abi_syscall_ret
+ mov #-ENOSYS, $r0
+ ! Good syscall number
+2: shll2 $r9 ! x4
+ mov.l __sct, $r11
+ add $r11, $r9
+ mov.l @$r9, $r11
+ jmp @$r11 ! call specific syscall handler,
+ nop
+
+ .align 2
+__old_abi_syscall_ret:
+ .long old_abi_syscall_ret
+
+ ! This code gets called on address error exception when copying
+ ! syscall arguments from user stack to kernel stack. It is
+ ! supposed to return -EINVAL through old_abi_syscall_ret, but it
+ ! appears to have been broken for a long time in that the $r0
+ ! return value will be saved into the kernel stack relative to $r15
+ ! but the value of $r15 is not correct partway through the loop.
+ ! So the user prog is returned its old $r0 value, not -EINVAL.
+ ! Greg Banks 28 Aug 2000.
.section .fixup,"ax"
fixup_syscall_argerr:
+ ! First get $r15 back to
rts
- mov.l 1f, $r0
-1: .long -22 ! -EINVAL
-.previous
+ mov #-EINVAL, $r0
+ .previous
.section __ex_table, "a"
.align 2
.long 4b,fixup_syscall_argerr
-.previous
+ .previous
#endif
.align 2
+__TRA: .long TRA
+__syscall_trace:
+ .long SYMBOL_NAME(syscall_trace)
+__n_sys:.long NR_syscalls
+__sct: .long SYMBOL_NAME(sys_call_table)
+__syscall_ret_trace:
+ .long syscall_ret_trace
+__syscall_ret:
+ .long SYMBOL_NAME(syscall_ret)
+
+
+
+ .align 2
reschedule:
mova SYMBOL_NAME(ret_from_syscall), $r0
mov.l 1f, $r1
@@ -454,10 +503,12 @@ __INV_IMASK:
.long 0xffffff0f ! ~(IMASK)
.align 2
-syscall_ret:
#ifdef COMPAT_OLD_SYSCALL_ABI
+old_abi_syscall_ret:
add $r8, $r15 ! pop off the arguments
+ /* fall through */
#endif
+syscall_ret:
mov.l $r0, @(R0,$r15) ! save the return value
/* fall through */
@@ -707,7 +758,7 @@ handle_exception:
#endif
8: /* User space to kernel */
mov #0x20, $k1
- shll8 $k1 ! $k1 <= 8192
+ shll8 $k1 ! $k1 <= 8192 == THREAD_SIZE
add $current, $k1
mov $k1, $r15 ! change to kernel stack
!
@@ -1107,6 +1158,7 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_mincore)
.long SYMBOL_NAME(sys_madvise)
.long SYMBOL_NAME(sys_getdents64) /* 220 */
+ .long SYMBOL_NAME(sys_fcntl64)
/*
* NOTE!! This doesn't have to be exact - we just have
@@ -1114,7 +1166,7 @@ ENTRY(sys_call_table)
* entries. Don't panic if you notice that this hasn't
* been shrunk every time we add a new system call.
*/
- .rept NR_syscalls-220
+ .rept NR_syscalls-221
.long SYMBOL_NAME(sys_ni_syscall)
.endr
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index db3e8b0a3..f1ac6fd17 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -21,9 +21,9 @@ ENTRY(empty_zero_page)
.long 0x00360000 /* INITRD_START */
.long 0x000a0000 /* INITRD_SIZE */
.long 0
+ .balign 4096,0,4096
.text
- .balign 4096,0,4096
/*
* Condition at the entry of _stext:
*
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index fca718ece..80f50d9ad 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/sh/kernel/io_generic.c
+ * linux/arch/sh/kernel/io.c
*
* Copyright (C) 2000 Stuart Menefy
*
diff --git a/arch/sh/kernel/irq_imask.c b/arch/sh/kernel/irq_imask.c
index 8ac95823b..27d91b372 100644
--- a/arch/sh/kernel/irq_imask.c
+++ b/arch/sh/kernel/irq_imask.c
@@ -41,7 +41,7 @@ static void end_imask_irq(unsigned int irq);
static unsigned int startup_imask_irq(unsigned int irq)
{
- enable_imask_irq(irq);
+ /* Nothing to do */
return 0; /* never anything pending */
}
@@ -71,7 +71,8 @@ void static inline set_interrupt_registers(int ip)
"ldc %0, $sr\n"
"1:"
: "=&z" (__dummy)
- : "r" (~0xf0), "r" (ip << 4));
+ : "r" (~0xf0), "r" (ip << 4)
+ : "t");
}
static void disable_imask_irq(unsigned int irq)
@@ -103,7 +104,7 @@ static void end_imask_irq(unsigned int irq)
static void shutdown_imask_irq(unsigned int irq)
{
- disable_imask_irq(irq);
+ /* Nothing to do */
}
void make_imask_irq(unsigned int irq)
diff --git a/arch/sh/kernel/irq_ipr.c b/arch/sh/kernel/irq_ipr.c
index f229e8a12..918e010e9 100644
--- a/arch/sh/kernel/irq_ipr.c
+++ b/arch/sh/kernel/irq_ipr.c
@@ -128,12 +128,14 @@ void __init init_IRQ(void)
#ifdef SCIF_ERI_IRQ
make_ipr_irq(SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
make_ipr_irq(SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
+ make_ipr_irq(SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
make_ipr_irq(SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
#endif
#ifdef IRDA_ERI_IRQ
make_ipr_irq(IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
make_ipr_irq(IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
+ make_ipr_irq(IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
make_ipr_irq(IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
#endif
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 60205379c..c7511093b 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -136,11 +136,12 @@ void free_task_struct(struct task_struct *p)
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{ /* Don't use this in BL=1(cli). Or else, CPU resets! */
- register unsigned long __sc0 __asm__ ("$r3") = __NR_clone;
- register unsigned long __sc4 __asm__ ("$r4") = (long) flags | CLONE_VM;
- register unsigned long __sc5 __asm__ ("$r5") = 0;
- register unsigned long __sc8 __asm__ ("$r8") = (long) arg;
- register unsigned long __sc9 __asm__ ("$r9") = (long) fn;
+ register unsigned long __sc0 __asm__ ("r0");
+ register unsigned long __sc3 __asm__ ("r3") = __NR_clone;
+ register unsigned long __sc4 __asm__ ("r4") = (long) flags | CLONE_VM;
+ register unsigned long __sc5 __asm__ ("r5") = 0;
+ register unsigned long __sc8 __asm__ ("r8") = (long) arg;
+ register unsigned long __sc9 __asm__ ("r9") = (long) fn;
__asm__("trapa #0x12\n\t" /* Linux/SH system call */
"tst #0xff, $r0\n\t" /* child or parent? */
@@ -148,13 +149,13 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
"jsr @$r9\n\t" /* call fn */
" mov $r8, $r4\n\t" /* push argument */
"mov $r0, $r4\n\t" /* return value to arg of exit */
- "mov %2, $r3\n\t" /* exit */
+ "mov %1, $r3\n\t" /* exit */
"trapa #0x11\n"
"1:"
: "=z" (__sc0)
- : "0" (__sc0), "i" (__NR_exit),
- "r" (__sc4), "r" (__sc5), "r" (__sc8), "r" (__sc9)
- : "memory");
+ : "i" (__NR_exit), "r" (__sc3), "r" (__sc4), "r" (__sc5),
+ "r" (__sc8), "r" (__sc9)
+ : "memory", "t");
return __sc0;
}
@@ -211,6 +212,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
asmlinkage void ret_from_fork(void);
int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
@@ -292,7 +294,7 @@ asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
- return do_fork(SIGCHLD, regs.regs[15], &regs);
+ return do_fork(SIGCHLD, regs.regs[15], &regs, 0);
}
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
@@ -301,7 +303,7 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
{
if (!newsp)
newsp = regs.regs[15];
- return do_fork(clone_flags, newsp, &regs);
+ return do_fork(clone_flags, newsp, &regs, 0);
}
/*
@@ -318,7 +320,7 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs);
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs, 0);
}
/*
diff --git a/arch/sh/kernel/setup_cqreek.c b/arch/sh/kernel/setup_cqreek.c
index ea5dd7ece..af7dfe568 100644
--- a/arch/sh/kernel/setup_cqreek.c
+++ b/arch/sh/kernel/setup_cqreek.c
@@ -1,4 +1,4 @@
-/* $Id: setup_cqreek.c,v 1.1 2000/08/05 06:25:23 gniibe Exp $
+/* $Id: setup_cqreek.c,v 1.5 2000/09/18 05:51:24 gniibe Exp $
*
* arch/sh/kernel/setup_cqreek.c
*
@@ -44,15 +44,24 @@ static unsigned long cqreek_port2addr(unsigned long port)
return ISA_OFFSET + port;
}
+struct cqreek_irq_data {
+ unsigned short mask_port; /* Port of Interrupt Mask Register */
+ unsigned short stat_port; /* Port of Interrupt Status Register */
+ unsigned short bit; /* Value of the bit */
+};
+static struct cqreek_irq_data cqreek_irq_data[NR_IRQS];
+
static void disable_cqreek_irq(unsigned int irq)
{
unsigned long flags;
unsigned short mask;
+ unsigned short mask_port = cqreek_irq_data[irq].mask_port;
+ unsigned short bit = cqreek_irq_data[irq].bit;
save_and_cli(flags);
/* Disable IRQ */
- mask = inw(BRIDGE_ISA_INTR_MASK) & ~(1 << irq);
- outw_p(mask, BRIDGE_ISA_INTR_MASK);
+ mask = inw(mask_port) & ~bit;
+ outw_p(mask, mask_port);
restore_flags(flags);
}
@@ -60,32 +69,29 @@ static void enable_cqreek_irq(unsigned int irq)
{
unsigned long flags;
unsigned short mask;
+ unsigned short mask_port = cqreek_irq_data[irq].mask_port;
+ unsigned short bit = cqreek_irq_data[irq].bit;
save_and_cli(flags);
/* Enable IRQ */
- mask = inw(BRIDGE_ISA_INTR_MASK) | (1 << irq);
- outw_p(mask, BRIDGE_ISA_INTR_MASK);
+ mask = inw(mask_port) | bit;
+ outw_p(mask, mask_port);
restore_flags(flags);
}
-#define CLEAR_AT_ACCEPT
-
static void mask_and_ack_cqreek(unsigned int irq)
{
- inw(BRIDGE_ISA_INTR_STAT);
+ unsigned short stat_port = cqreek_irq_data[irq].stat_port;
+ unsigned short bit = cqreek_irq_data[irq].bit;
+
+ inw(stat_port);
disable_cqreek_irq(irq);
-#ifdef CLEAR_AT_ACCEPT
/* Clear IRQ (it might be edge IRQ) */
- outw_p((1<<irq), BRIDGE_ISA_INTR_STAT);
-#endif
+ outw_p(bit, stat_port);
}
static void end_cqreek_irq(unsigned int irq)
{
-#ifndef CLEAR_AT_ACCEPT
- /* Clear IRQ (it might be edge IRQ) */
- outw_p((1<<irq), BRIDGE_ISA_INTR_STAT);
-#endif
enable_cqreek_irq(irq);
}
@@ -101,7 +107,7 @@ static void shutdown_cqreek_irq(unsigned int irq)
}
static struct hw_interrupt_type cqreek_irq_type = {
- "CQREEK-IRQ",
+ "CqREEK-IRQ",
startup_cqreek_irq,
shutdown_cqreek_irq,
enable_cqreek_irq,
@@ -116,10 +122,24 @@ static int has_ide, has_isa;
What we really need is virtualized IRQ and demultiplexer like HP600 port */
void __init init_cqreek_IRQ(void)
{
- if (has_ide)
- make_ipr_irq(14, IDE_OFFSET+BRIDGE_IDE_INTR_LVL, 0, 0x0f-14);
+ if (has_ide) {
+ cqreek_irq_data[14].mask_port = BRIDGE_IDE_INTR_MASK;
+ cqreek_irq_data[14].stat_port = BRIDGE_IDE_INTR_STAT;
+ cqreek_irq_data[14].bit = 1;
+
+ irq_desc[14].handler = &cqreek_irq_type;
+ irq_desc[14].status = IRQ_DISABLED;
+ irq_desc[14].action = 0;
+ irq_desc[14].depth = 1;
+
+ disable_cqreek_irq(14);
+ }
if (has_isa) {
+ cqreek_irq_data[10].mask_port = BRIDGE_ISA_INTR_MASK;
+ cqreek_irq_data[10].stat_port = BRIDGE_ISA_INTR_STAT;
+ cqreek_irq_data[10].bit = (1 << 10);
+
/* XXX: Err... we may need demultiplexer for ISA irq... */
irq_desc[10].handler = &cqreek_irq_type;
irq_desc[10].status = IRQ_DISABLED;
@@ -135,10 +155,17 @@ void __init init_cqreek_IRQ(void)
*/
void __init setup_cqreek(void)
{
+ extern void disable_hlt(void);
int i;
/* udelay is not available at setup time yet... */
#define DELAY() do {for (i=0; i<10000; i++) ctrl_inw(0xa0000000);} while(0)
+ /*
+ * XXX: I don't know the reason, but it becomes so fragile with
+ * "sleep", so we need to stop sleeping.
+ */
+ disable_hlt();
+
if ((inw (BRIDGE_FEATURE) & 1)) { /* We have IDE interface */
outw_p(0, BRIDGE_IDE_INTR_LVL);
outw_p(0, BRIDGE_IDE_INTR_MASK);
@@ -219,7 +246,6 @@ struct sh_machine_vector mv_cqreek __initmv = {
mv_init_arch: setup_cqreek,
mv_init_irq: init_cqreek_IRQ,
- mv_port2addr: cqreek_port2addr,
mv_isa_port2addr: cqreek_port2addr,
};
ALIAS_MV(cqreek)
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index 12f716249..81a56b960 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -1,4 +1,4 @@
-/* $Id: sh_bios.c,v 1.2 2000/07/26 04:37:32 gniibe Exp $
+/* $Id: sh_bios.c,v 1.3 2000/09/30 03:43:30 gniibe Exp $
*
* linux/arch/sh/kernel/sh_bios.c
* C interface for trapping into the standard LinuxSH BIOS.
@@ -20,11 +20,11 @@
static __inline__ long sh_bios_call(long func, long arg0, long arg1, long arg2, long arg3)
{
- register long r0 __asm__("$r0") = func;
- register long r4 __asm__("$r4") = arg0;
- register long r5 __asm__("$r5") = arg1;
- register long r6 __asm__("$r6") = arg2;
- register long r7 __asm__("$r7") = arg3;
+ register long r0 __asm__("r0") = func;
+ register long r4 __asm__("r4") = arg0;
+ register long r5 __asm__("r5") = arg1;
+ register long r6 __asm__("r6") = arg2;
+ register long r7 __asm__("r7") = arg3;
__asm__ __volatile__("trapa #0x3f"
: "=z" (r0)
: "0" (r0), "r" (r4), "r" (r5), "r" (r6), "r" (r7)
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index 73d971982..de03cfb19 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -17,6 +17,7 @@
#include <asm/hardirq.h>
#include <asm/delay.h>
#include <asm/irq.h>
+#include <asm/pgtable.h>
extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(elf_fpregset_t *);
@@ -35,7 +36,35 @@ EXPORT_SYMBOL(csum_partial_copy);
EXPORT_SYMBOL(strtok);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strlen);
+
+/* mem exports */
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+
+/* this is not provided by arch/sh/lib/*.S but is
+ potentially needed by modules (af_packet.o/unix.o
+ use memcmp, for instance) */
+EXPORT_SYMBOL(memcmp);
#ifdef CONFIG_VT
EXPORT_SYMBOL(screen_info);
#endif
+
+
+#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL_NOVERS(name)
+
+/* These symbols are generated by the compiler itself */
+#ifdef __SH4__
+
+DECLARE_EXPORT(__udivsi3_i4);
+DECLARE_EXPORT(__sdivsi3_i4);
+DECLARE_EXPORT(__movstr_i4_even);
+DECLARE_EXPORT(__movstr_i4_odd);
+DECLARE_EXPORT(__ashrdi3);
+DECLARE_EXPORT(__ashldi3);
+
+/* needed by some modules */
+EXPORT_SYMBOL(flush_dcache_page);
+#endif
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 9bdddc9d9..fe2f1b319 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -672,7 +672,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
/* FALLTHRU */
default:
- sigaddset(&current->signal, signr);
+ sigaddset(&current->pending.signal, signr);
recalc_sigpending(current);
current->flags |= PF_SIGNALED;
do_exit(exit_code);
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index f1e8d28bf..5af1e6de8 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -274,37 +274,6 @@ static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
write_unlock(&xtime_lock);
}
-/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
- * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
- * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
- *
- * [For the Julian calendar (which was used in Russia before 1917,
- * Britain & colonies before 1752, anywhere else before 1582,
- * and is still in use by some communities) leave out the
- * -year/100+year/400 terms, and add 10.]
- *
- * This algorithm was first published by Gauss (I think).
- *
- * WARNING: this function will overflow on 2106-02-07 06:28:16 on
- * machines were long is 32-bit! (However, as time_t is signed, we
- * will already get problems at other places on 2038-01-19 03:14:08)
- */
-static inline unsigned long mktime(unsigned int year, unsigned int mon,
- unsigned int day, unsigned int hour,
- unsigned int min, unsigned int sec)
-{
- if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
- mon += 12; /* Puts Feb last since it has leap day */
- year -= 1;
- }
- return (((
- (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
- year*365 - 719499
- )*24 + hour /* now have hours */
- )*60 + min /* now have minutes */
- )*60 + sec; /* finally seconds */
-}
-
static unsigned long get_rtc_time(void)
{
unsigned int sec, min, hr, wk, day, mon, yr, yr100;
@@ -373,7 +342,8 @@ static __init unsigned int get_cpu_mhz(void)
"bt/s 1b\n\t"
" add #1,%0"
: "=r"(count), "=z" (__dummy)
- : "0" (0), "1" (0));
+ : "0" (0), "1" (0)
+ : "t");
cli();
/*
* SH-3:
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index a9775f306..db11c1247 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -131,9 +131,16 @@ void dump_stack(void)
asm("mov $r15, %0" : "=r" (start));
asm("stc $r7_bank, %0" : "=r" (end));
- end += 8192;
+ end += 8192/4;
printk("%08lx:%08lx\n", (unsigned long)start, (unsigned long)end);
- for (p=start; p < end; p++)
- printk("%08lx\n", *p);
+ for (p=start; p < end; p++) {
+ extern long _text, _etext;
+ unsigned long v=*p;
+
+ if ((v >= (unsigned long )&_text)
+ && (v <= (unsigned long )&_etext)) {
+ printk("%08lx\n", v);
+ }
+ }
}
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index 3317b3ccc..c7e96a394 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -159,14 +159,14 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
* them all but there's no guarantee.
*/
-#define SRC(y...) \
- 9999: y; \
+#define SRC(x,y) \
+ 9999: x,y; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
-#define DST(y...) \
- 9999: y; \
+#define DST(x,y) \
+ 9999: x,y; \
.section __ex_table, "a"; \
.long 9999b, 6002f ; \
.previous
@@ -276,7 +276,7 @@ DST( mov.l r0,@r5 )
DST( mov.l r1,@r5 )
add #4,r5
-SRC( mov.l @r4+,r0 )
+SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@r5 )
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index af494b9a3..ab63535e4 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -64,9 +64,9 @@ static struct _cache_system_info cache_system_info = {0,};
#define CACHE_IC_WAY_SHIFT 13
#define CACHE_OC_ENTRY_SHIFT 5
#define CACHE_IC_ENTRY_SHIFT 5
-#define CACHE_OC_ENTRY_MASK 0x3fe0
-#define CACHE_OC_ENTRY_PHYS_MASK 0x0fe0
-#define CACHE_IC_ENTRY_MASK 0x1fe0
+#define CACHE_OC_ENTRY_MASK 0x3fe0
+#define CACHE_OC_ENTRY_PHYS_MASK 0x0fe0
+#define CACHE_IC_ENTRY_MASK 0x1fe0
#define CACHE_IC_NUM_ENTRIES 256
#define CACHE_OC_NUM_ENTRIES 512
#define CACHE_OC_NUM_WAYS 1
@@ -92,7 +92,8 @@ static inline void cache_wback_all(void)
addr = CACHE_OC_ADDRESS_ARRAY|(j<<CACHE_OC_WAY_SHIFT)|
(i<<CACHE_OC_ENTRY_SHIFT);
data = ctrl_inl(addr);
- if (data & CACHE_UPDATED) {
+ if ((data & (CACHE_UPDATED|CACHE_VALID))
+ == (CACHE_UPDATED|CACHE_VALID)) {
data &= ~CACHE_UPDATED;
ctrl_outl(data, addr);
}
@@ -114,17 +115,25 @@ detect_cpu_and_cache_system(void)
*/
addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
+
+ /* First, write back & invalidate */
data0 = ctrl_inl(addr0);
- data0 ^= 0x00000001;
- ctrl_outl(data0,addr0);
+ ctrl_outl(data0&~(CACHE_VALID|CACHE_UPDATED), addr0);
+ data1 = ctrl_inl(addr1);
+ ctrl_outl(data1&~(CACHE_VALID|CACHE_UPDATED), addr1);
+
+ /* Next, check if there's shadow or not */
+ data0 = ctrl_inl(addr0);
+ data0 ^= CACHE_VALID;
+ ctrl_outl(data0, addr0);
data1 = ctrl_inl(addr1);
- data2 = data1 ^ 0x00000001;
- ctrl_outl(data2,addr1);
+ data2 = data1 ^ CACHE_VALID;
+ ctrl_outl(data2, addr1);
data3 = ctrl_inl(addr0);
- /* Invaliate them, in case the cache has been enabled already. */
- ctrl_outl(data0&~0x00000001, addr0);
- ctrl_outl(data2&~0x00000001, addr1);
+ /* Lastly, invaliate them. */
+ ctrl_outl(data0&~CACHE_VALID, addr0);
+ ctrl_outl(data2&~CACHE_VALID, addr1);
back_to_P1();
if (data0 == data1 && data2 == data3) { /* Shadow */
@@ -150,8 +159,6 @@ void __init cache_init(void)
detect_cpu_and_cache_system();
ccr = ctrl_inl(CCR);
- if (ccr == CCR_CACHE_VAL)
- return;
jump_to_P2();
if (ccr & CCR_CACHE_ENABLE)
/*
@@ -380,29 +387,114 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
}
/*
+ * Write-back & invalidate the cache.
+ *
* After accessing the memory from kernel space (P1-area), we need to
- * write back the cache line to maintain DMA coherency.
+ * write back the cache line.
*
* We search the D-cache to see if we have the entries corresponding to
* the page, and if found, write back them.
*/
+void __flush_page_to_ram(void *kaddr)
+{
+ unsigned long phys, addr, data, i;
+
+ /* Physical address of this page */
+ phys = PHYSADDR(kaddr);
+
+ jump_to_P2();
+ /* Loop all the D-cache */
+ for (i=0; i<CACHE_OC_NUM_ENTRIES; i++) {
+ addr = CACHE_OC_ADDRESS_ARRAY| (i<<CACHE_OC_ENTRY_SHIFT);
+ data = ctrl_inl(addr);
+ if ((data & CACHE_VALID) && (data&PAGE_MASK) == phys) {
+ data &= ~(CACHE_UPDATED|CACHE_VALID);
+ ctrl_outl(data, addr);
+ }
+ }
+ back_to_P1();
+}
+
void flush_page_to_ram(struct page *pg)
{
+ unsigned long phys;
+
+ /* Physical address of this page */
+ phys = (pg - mem_map)*PAGE_SIZE + __MEMORY_START;
+ __flush_page_to_ram(phys_to_virt(phys));
+}
+
+/*
+ * Check entries of the I-cache & D-cache of the page.
+ * (To see "alias" issues)
+ */
+void check_cache_page(struct page *pg)
+{
unsigned long phys, addr, data, i;
+ unsigned long kaddr;
+ unsigned long cache_line_index;
+ int bingo = 0;
/* Physical address of this page */
phys = (pg - mem_map)*PAGE_SIZE + __MEMORY_START;
+ kaddr = phys + PAGE_OFFSET;
+ cache_line_index = (kaddr&CACHE_OC_ENTRY_MASK)>>CACHE_OC_ENTRY_SHIFT;
jump_to_P2();
/* Loop all the D-cache */
for (i=0; i<CACHE_OC_NUM_ENTRIES; i++) {
addr = CACHE_OC_ADDRESS_ARRAY| (i<<CACHE_OC_ENTRY_SHIFT);
data = ctrl_inl(addr);
- if ((data & CACHE_UPDATED) && (data&PAGE_MASK) == phys) {
- data &= ~CACHE_UPDATED;
+ if ((data & (CACHE_UPDATED|CACHE_VALID))
+ == (CACHE_UPDATED|CACHE_VALID)
+ && (data&PAGE_MASK) == phys) {
+ data &= ~(CACHE_VALID|CACHE_UPDATED);
ctrl_outl(data, addr);
+ if ((i^cache_line_index)&0x180)
+ bingo = 1;
+ }
+ }
+
+ cache_line_index &= 0xff;
+ /* Loop all the I-cache */
+ for (i=0; i<CACHE_IC_NUM_ENTRIES; i++) {
+ addr = CACHE_IC_ADDRESS_ARRAY| (i<<CACHE_IC_ENTRY_SHIFT);
+ data = ctrl_inl(addr);
+ if ((data & CACHE_VALID) && (data&PAGE_MASK) == phys) {
+ data &= ~CACHE_VALID;
+ ctrl_outl(data, addr);
+ if (((i^cache_line_index)&0x80))
+ bingo = 2;
}
}
back_to_P1();
+
+ if (bingo) {
+ extern void dump_stack(void);
+
+ if (bingo ==1)
+ printk("BINGO!\n");
+ else
+ printk("Bingo!\n");
+ dump_stack();
+ printk("--------------------\n");
+ }
+}
+
+/* Page is 4K, OC size is 16K, there are four lines. */
+#define CACHE_ALIAS 0x00003000
+
+void clear_user_page(void *to, unsigned long address)
+{
+ clear_page(to);
+ if (((address ^ (unsigned long)to) & CACHE_ALIAS))
+ __flush_page_to_ram(to);
+}
+
+void copy_user_page(void *to, void *from, unsigned long address)
+{
+ copy_page(to, from);
+ if (((address ^ (unsigned long)to) & CACHE_ALIAS))
+ __flush_page_to_ram(to);
}
#endif
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index f2e342f0b..9c06626da 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -28,9 +28,9 @@
#include <asm/mmu_context.h>
extern void die(const char *,struct pt_regs *,long);
-static void __flush_tlb_page(struct mm_struct *mm, unsigned long page);
+static void __flush_tlb_page(unsigned long asid, unsigned long page);
#if defined(__SH4__)
-static void __flush_tlb_phys(struct mm_struct *mm, unsigned long phys);
+static void __flush_tlb_phys(unsigned long phys);
#endif
/*
@@ -85,42 +85,6 @@ bad_area:
return 0;
}
-static void handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
-{
- pgd_t *dir;
- pmd_t *pmd;
- pte_t *pte;
- pte_t entry;
-
- dir = pgd_offset_k(address);
- pmd = pmd_offset(dir, address);
- if (pmd_none(*pmd)) {
- printk(KERN_ERR "vmalloced area %08lx bad\n", address);
- return;
- }
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return;
- }
- pte = pte_offset(pmd, address);
- entry = *pte;
- if (pte_none(entry) || !pte_present(entry) || !pte_write(entry)) {
- printk(KERN_ERR "vmalloced area %08lx bad\n", address);
- return;
- }
-
-#if defined(__SH4__)
- /*
- * ITLB is not affected by "ldtlb" instruction.
- * So, we need to flush the entry by ourselves.
- */
- if (mm)
- __flush_tlb_page(mm, address&PAGE_MASK);
-#endif
- update_mmu_cache(NULL, address, entry);
-}
-
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -138,11 +102,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
tsk = current;
mm = tsk->mm;
- if (address >= VMALLOC_START && address < VMALLOC_END) {
- handle_vmalloc_fault(mm, address);
- return;
- }
-
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -272,6 +231,67 @@ do_sigbus:
goto no_context;
}
+static int __do_page_fault1(struct pt_regs *regs, unsigned long writeaccess,
+ unsigned long address)
+{
+ pgd_t *dir;
+ pmd_t *pmd;
+ pte_t *pte;
+ pte_t entry;
+
+ if (address >= VMALLOC_START && address < VMALLOC_END)
+ /* We can change the implementation of P3 area pte entries.
+ set_pgdir and such. */
+ dir = pgd_offset_k(address);
+ else
+ dir = pgd_offset(current->mm, address);
+
+ pmd = pmd_offset(dir, address);
+ if (pmd_none(*pmd))
+ return 1;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return 1;
+ }
+ pte = pte_offset(pmd, address);
+ entry = *pte;
+ if (pte_none(entry) || !pte_present(entry)
+ || (writeaccess && !pte_write(entry)))
+ return 1;
+
+ if (writeaccess)
+ entry = pte_mkdirty(entry);
+ entry = pte_mkyoung(entry);
+#if defined(__SH4__)
+ /*
+ * ITLB is not affected by "ldtlb" instruction.
+ * So, we need to flush the entry by ourselves.
+ */
+ __flush_tlb_page(get_asid(), address&PAGE_MASK);
+#endif
+ set_pte(pte, entry);
+ update_mmu_cache(NULL, address, entry);
+ return 0;
+}
+
+/*
+ * Called with interrupt disabled.
+ */
+asmlinkage void __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
+ unsigned long address)
+{
+ /*
+ * XXX: Could you please implement this (calling __do_page_fault1)
+ * in assembler language in entry.S?
+ */
+ if (__do_page_fault1(regs, writeaccess, address) == 0)
+ /* Done. */
+ return;
+ sti();
+ do_page_fault(regs, writeaccess, address);
+}
+
void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
@@ -282,28 +302,30 @@ void update_mmu_cache(struct vm_area_struct * vma,
save_and_cli(flags);
#if defined(__SH4__)
- if (vma && (vma->vm_flags & VM_SHARED)) {
+ if (pte_shared(pte)) {
struct page *pg;
pteval = pte_val(pte);
pteval &= PAGE_MASK; /* Physicall page address */
- __flush_tlb_phys(vma->vm_mm, pteval);
+ __flush_tlb_phys(pteval);
pg = virt_to_page(__va(pteval));
flush_dcache_page(pg);
}
#endif
- /* Set PTEH register */
- if (vma) {
- pteaddr = (address & MMU_VPN_MASK) |
- (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK);
- ctrl_outl(pteaddr, MMU_PTEH);
+ /* Ptrace may call this routine. */
+ if (vma && current->active_mm != vma->vm_mm) {
+ restore_flags(flags);
+ return;
}
+ /* Set PTEH register */
+ pteaddr = (address & MMU_VPN_MASK) | get_asid();
+ ctrl_outl(pteaddr, MMU_PTEH);
+
/* Set PTEL register */
pteval = pte_val(pte);
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
- pteval |= _PAGE_FLAGS_HARDWARE_DEFAULT; /* add default flags */
ctrl_outl(pteval, MMU_PTEL);
/* Load the TLB */
@@ -311,24 +333,16 @@ void update_mmu_cache(struct vm_area_struct * vma,
restore_flags(flags);
}
-static void __flush_tlb_page(struct mm_struct *mm, unsigned long page)
+static void __flush_tlb_page(unsigned long asid, unsigned long page)
{
- unsigned long addr, data, asid;
- unsigned long saved_asid = MMU_NO_ASID;
-
- if (mm->context == NO_CONTEXT)
- return;
-
- asid = mm->context & MMU_CONTEXT_ASID_MASK;
- if (mm != current->mm) {
- saved_asid = get_asid();
- /*
- * We need to set ASID of the target entry to flush,
- * because TLB is indexed by (ASID and PAGE).
- */
- set_asid(asid);
- }
+ unsigned long addr, data;
+ /*
+ * NOTE: PTEH.ASID should be set to this MM
+ * _AND_ we need to write ASID to the array.
+ *
+ * It would be simple if we didn't need to set PTEH.ASID...
+ */
#if defined(__sh3__)
addr = MMU_TLB_ADDRESS_ARRAY |(page & 0x1F000)| MMU_PAGE_ASSOC_BIT;
data = (page & 0xfffe0000) | asid; /* VALID bit is off */
@@ -340,12 +354,10 @@ static void __flush_tlb_page(struct mm_struct *mm, unsigned long page)
ctrl_outl(data, addr);
back_to_P1();
#endif
- if (saved_asid != MMU_NO_ASID)
- set_asid(saved_asid);
}
#if defined(__SH4__)
-static void __flush_tlb_phys(struct mm_struct *mm, unsigned long phys)
+static void __flush_tlb_phys(unsigned long phys)
{
int i;
unsigned long addr, data;
@@ -373,12 +385,22 @@ static void __flush_tlb_phys(struct mm_struct *mm, unsigned long phys)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
- unsigned long flags;
+ if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
+ unsigned long flags;
+ unsigned long asid;
+ unsigned long saved_asid = MMU_NO_ASID;
- if (vma->vm_mm) {
+ asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
page &= PAGE_MASK;
+
save_and_cli(flags);
- __flush_tlb_page(vma->vm_mm, page);
+ if (vma->vm_mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+ __flush_tlb_page(asid, page);
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
restore_flags(flags);
}
}
@@ -397,13 +419,22 @@ void flush_tlb_range(struct mm_struct *mm, unsigned long start,
if (mm == current->mm)
activate_context(mm);
} else {
+ unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
+ unsigned long saved_asid = MMU_NO_ASID;
+
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
+ if (mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
while (start < end) {
- __flush_tlb_page(mm, start);
+ __flush_tlb_page(asid, start);
start += PAGE_SIZE;
}
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
}
restore_flags(flags);
}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 5e0632a86..8568afb31 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -227,7 +227,7 @@ void __init paging_init(void)
zones_size[ZONE_DMA] = max_dma - start_pfn;
zones_size[ZONE_NORMAL] = low - max_dma;
}
- free_area_init_node(0, 0, zones_size, __MEMORY_START, 0);
+ free_area_init_node(0, 0, 0, zones_size, __MEMORY_START, 0);
}
}
@@ -241,6 +241,7 @@ void __init mem_init(void)
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
+ flush_page_to_ram(virt_to_page(empty_zero_page));
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
diff --git a/arch/sh/vmlinux.lds.S b/arch/sh/vmlinux.lds.S
index ad1fc1c1b..93ea453cb 100644
--- a/arch/sh/vmlinux.lds.S
+++ b/arch/sh/vmlinux.lds.S
@@ -4,9 +4,9 @@
*/
#include <linux/config.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN
-OUTPUT_FORMAT("elf32-shl", "elf32-shl", "elf32-shl")
+OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
#else
-OUTPUT_FORMAT("elf32-sh", "elf32-sh", "elf32-sh")
+OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
#endif
OUTPUT_ARCH(sh)
ENTRY(_start)
@@ -89,6 +89,7 @@ SECTIONS
/DISCARD/ : {
*(.text.exit)
*(.data.exit)
+ *(.exitcall.exit)
}
/* Stabs debugging sections. */