diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1997-03-25 23:40:36 +0000 |
---|---|---|
committer | <ralf@linux-mips.org> | 1997-03-25 23:40:36 +0000 |
commit | 7206675c40394c78a90e74812bbdbf8cf3cca1be (patch) | |
tree | 251895cf5a0008e2b4ce438cb01ad4d55fb5b97b /arch | |
parent | beb116954b9b7f3bb56412b2494b562f02b864b1 (diff) |
Import of Linux/MIPS 2.1.14.2
Diffstat (limited to 'arch')
151 files changed, 15486 insertions, 7204 deletions
diff --git a/arch/mips/Makefile b/arch/mips/Makefile index c7ba5db9b..19a632ebb 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -12,19 +12,17 @@ # # Copyright (C) 1994, 1995, 1996 by Ralf Baechle # DECStation modifications by Paul M. Antoine, 1996 -# ACN MIPS board modifications by Robin Farine (Robin.Farine@info.unine.ch) -# and Didier Frick (dfrick@dial.eunet.ch), copyright (C) 1996 by ACN S.A. # ifdef CONFIG_CPU_LITTLE_ENDIAN -cross-target = mipsel-linux- +CROSS_COMPILE = mipsel- ifdef CONFIG_MIPS_ECOFF oformat = ecoff-littlemips else -oformat = a.out-mips-little-linux +oformat = a.out-mips-big-linux endif else -cross-target = mips-linux- +CROSS_COMPILE = mips- ifdef CONFIG_MIPS_ECOFF oformat = ecoff-bigmips else @@ -32,65 +30,37 @@ oformat = a.out-mips-big-linux endif endif -ifdef CONFIG_CROSS_COMPILE -CROSS_COMPILE := $(cross-target) +ifdef CONFIG_EXTRA_ELF_COMPILER +CROSS_COMPILE := $(CROSS_COMPILE)linuxelf- else -CROSS_COMPILE := +CROSS_COMPILE := $(CROSS_COMPILE)linux- endif -LINKFLAGS = -N +LINKFLAGS = -static -N # -# The GCC uses -G 0 -mabicalls -fpic as default. We don't need PIC +# The new ELF GCC uses -G0 -mabicalls -fpic as default. We don't need PIC # code in the kernel since it only slows down the whole thing. For the # old GCC these options are just the defaults. At some point we might # make use of global pointer optimizations. # -# We also pass -G 0 to the linker to avoid generation of a .scommon section. -# # The DECStation requires an ECOFF kernel for remote booting, other MIPS -# machines may also. We build an ELF kernel and them convert it into an -# ECOFF kernel. +# machines may also. # +ifdef CONFIG_ELF_KERNEL CFLAGS += -G 0 -mno-abicalls -fno-pic LINKFLAGS += -G 0 +endif +ifdef CONFIG_ECOFF_KERNEL +CFLAGS += -G 0 -mno-abicalls -fno-pic +LINKFLAGS += -G 0 -oformat ecoff-littlemips +endif ifdef CONFIG_REMOTE_DEBUG CFLAGS := $(CFLAGS) -g endif -# -# CPU dependand compiler/assembler options for optimization. -# -ifdef CONFIG_CPU_R3000 -CFLAGS := $(CFLAGS) -mcpu=r3000 -mips1 -SUBDIRS += arch/mips/mips1 -cpu-core = arch/mips/mips1/mips.o -endif -ifdef CONFIG_CPU_R6000 -CFLAGS := $(CFLAGS) -mcpu=r6000 -mips2 -SUBDIRS += arch/mips/mips2 -cpu-core = arch/mips/mips2/mips.o -endif -ifdef CONFIG_CPU_R4X00 -ifdef CONFIG_OPTIMIZE_R4600 -CFLAGS := $(CFLAGS) -mcpu=r4600 -mips3 -else -CFLAGS := $(CFLAGS) -mcpu=r4400 -mips3 -endif -SUBDIRS += arch/mips/mips3 -cpu-core = arch/mips/mips3/mips.o -endif -ifdef CONFIG_CPU_R8000 -CFLAGS := $(CFLAGS) -mcpu=r8000 -mips4 -SUBDIRS += arch/mips/mips3 -cpu-core = arch/mips/mips3/mips.o -endif -ifdef CONFIG_CPU_R10000 -CFLAGS := $(CFLAGS) -mcpu=r8000 -mips4 -SUBDIRS += arch/mips/mips3 -cpu-core = arch/mips/mips3/mips.o -endif +CFLAGS := $(CFLAGS) -mips2 # # Board dependand options and extra files @@ -98,78 +68,72 @@ endif ifdef CONFIG_ACER_PICA_61 ARCHIVES += arch/mips/jazz/jazz.o SUBDIRS += arch/mips/jazz -LINKSCRIPT += arch/mips/ld.script LOADADDR += 0x80000000 endif -ifdef CONFIG_ACN_MIPS_BOARD -ARCHIVES += arch/mips/acn/acn.o -SUBDIRS += arch/mips/acn -LINKSCRIPT += arch/mips/ld.script -LOADADDR += 0x80000000 -endif -ifdef CONFIG_MIPS_DECSTATION +ifdef CONFIG_DECSTATION ARCHIVES += arch/mips/dec/dec.o SUBDIRS += arch/mips/dec -LINKSCRIPT += arch/mips/ld.script +LINKSCRIPT += arch/mips/dec/ld.script LOADADDR += 0x80000000 endif ifdef CONFIG_DESKSTATION_RPC44 ARCHIVES += arch/mips/deskstation/deskstation.o SUBDIRS += arch/mips/deskstation -LINKSCRIPT += arch/mips/ld.script -LOADADDR += 0x80000000 +LOADADDR += 0x80100000 endif ifdef CONFIG_DESKSTATION_TYNE ARCHIVES += arch/mips/deskstation/deskstation.o SUBDIRS += arch/mips/deskstation -LINKSCRIPT += arch/mips/ld.script LOADADDR += 0x80000000 endif ifdef CONFIG_MIPS_MAGNUM_3000 -LINKFLAGS += -Ttext 0x80100000 -endif -ifdef CONFIG_MIPS_MAGNUM_4000 -ARCHIVES += arch/mips/jazz/jazz.o -SUBDIRS += arch/mips/jazz -LINKSCRIPT += arch/mips/ld.script LOADADDR += 0x80000000 endif -ifdef CONFIG_OLIVETTI_M700 +ifdef CONFIG_MIPS_MAGNUM_4000 ARCHIVES += arch/mips/jazz/jazz.o SUBDIRS += arch/mips/jazz -LINKSCRIPT += arch/mips/ld.script LOADADDR += 0x80000000 endif ifdef CONFIG_SNI_RM200_PCI ARCHIVES += arch/mips/sni/sni.o SUBDIRS += arch/mips/sni -LINKSCRIPT += arch/mips/ld.script LOADADDR += 0x80000000 endif +ifdef CONFIG_SGI +ARCHIVES += arch/mips/sgi/kernel/sgikern.a arch/mips/sgi/prom/promlib.a +SUBDIRS += arch/mips/sgi/kernel arch/mips/sgi/prom +LOADADDR += 0x88069000 +HOSTCC = cc +endif # # Choosing incompatible machines durings configuration will result in -# error messages during linking +# error messages during linking. Select a default linkscript if +# none has been choosen above. # -ifdef LINKSCRIPT -LINKFLAGS += -T $(word 1,$(LINKSCRIPT)) +ifndef LINKSCRIPT +ifndef CONFIG_CPU_LITTLE_ENDIAN +LINKSCRIPT = arch/mips/ld.script.big +else +LINKSCRIPT = arch/mips/ld.script.little +endif endif +LINKFLAGS += -T $(word 1,$(LINKSCRIPT)) ifdef LOADADDR LINKFLAGS += -Ttext $(word 1,$(LOADADDR)) endif # -# The pipe options is bad for low-mem machines -# Uncomment this if you want this. Helps most on diskless -# Linux machines. +# The pipe options is bad for my low-mem machine +# Uncomment this if you want this. # -CFLAGS += #-pipe +#CFLAGS += -pipe HEAD := arch/mips/kernel/head.o -SUBDIRS := $(SUBDIRS) arch/mips/kernel arch/mips/mm arch/mips/lib -ARCHIVES := $(cpu-core) arch/mips/kernel/kernel.o arch/mips/mm/mm.o $(ARCHIVES) +SUBDIRS := $(SUBDIRS) $(addprefix arch/mips/, kernel mm lib tools) +ARCHIVES := arch/mips/kernel/kernel.o arch/mips/mm/mm.o $(ARCHIVES) LIBS := arch/mips/lib/lib.a $(LIBS) arch/mips/lib/lib.a MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot @@ -185,6 +149,7 @@ zdisk: vmlinux archclean: @$(MAKEBOOT) clean $(MAKE) -C arch/$(ARCH)/kernel clean + $(MAKE) -C arch/$(ARCH)/tools clean archdep: @$(MAKEBOOT) dep diff --git a/arch/mips/TODO b/arch/mips/TODO deleted file mode 100644 index c5df56d0c..000000000 --- a/arch/mips/TODO +++ /dev/null @@ -1,166 +0,0 @@ - - Kernel: - - fd_set_dma_addr() Virtuelle Adressen -> Bus adressen - - __pa() und __va() Makro Benutzer in include/asm-mips/ aendern - - The PICs in some RISC PC class machines are cascaded, in others not. - Yuck. If they're using PC crap part they should do it right ... - - Print a warning for R4600 V1.0 users; there is a problem with the - caches that isn't handled yet in the kernel. - - IP checksums overhaul. (Dave said he did for the SGI port) - - The networking related constants still aren't indentical with IRIX/ABI. - - Check the definitions for F_EXLCK and F_SHLCK in include/asm-mips/fcntl.h - for correctness and compatibility with MIPS ABI. - - Check the definitions for O_NDELAY in include/asm-mips/fcntl.h for - correctness and compatibility with MIPS ABI. - - What are the fields l_sysid and pad in struct flock supposed to contain? - Do we need to handle them in the kernel? - - Check the resource limits defines in asm-mips/resource.h - - Recheck struct stat in asm-mips/stat.h - - Use timestruc_t in struct stat in asm/stat.h instead of time_t - - asm-mips/termios.h is pretty hacked; it should be verified for ABI - conformance by someone who has the documentation. - - Cleanup include files. To many files include too many others. - - Fix asm-mips/elf.h. This is just good enough to make the thing compile. - - asm-mips/socket.h needs further fixes for ABI conformance. - - linux/types.h / asm-mips/types.h: FD_SETSIZE should be 1024, not 256. - 1024 is required by the ABI. - - asm-mips/socket.h: check the FIO* and SIO* defines. - - linux/sockios.h: The sockios aren't ABI compatible. - - For performance reasons the missing inline functions in asm-mips/string.h - should be implemented. - - For performance reasons the tlbflush code should be rewritten. - - Module support for the Sonic driver. - - From 1.3.17 on the kernel does no more compile with network support but - without procfs support included. - - /proc/cpuinfo should contain more information that just CPU model, - computer model and damn BogoMIPS. - - The sysmips(2) syscall needs to be tested. Some of the subcommands - are still missing. - - The whole kernel is junked with #if 0 ... #endif debug code. Cleanup. - - Use set_pte hook in page.h. It's the more effective and elegant way - than my solution which was just optimized just to better survives other - peoples kernel patches without rejects ... - - Build the machine vector in arch/mips/kernel/setup.c table using - constructors, not static initialization. - - Can the virtual address spaces of the kernel virtual memory and page - tables colide with each other? (Yes, when the kernel uses excessive - amounts of kernel virtual memory.) - - Test the memory protection with nuclear warheads model W-crashme ... - - die_if_kernel() should try to handle branch delay slots. - - Try to reduce dependencies between header files. - - Unroll some of the memset type assembler loops. - - Replace memcpy and friends by a highly optimized routine. Will probably - speed up scrolling by another 100 %. Certainly not required by the Acer, - but Magnum and Olivetti ... - - Hmm... Think that the i386 version of the kernel has a bug. It doesn't - set the carry flag for syscalls > NR_syscalls. Really unimportant. - - The size_t definitions in <linux/stddef.h> and <asm/types.h> are - incompatible. - - Cleanup the parameter passing to new execve-ed ELF/a.out executables. - - Cleanup drivers/char/keyboard.c and send it to Linus. - - No SMP support yet. - - Caches: - - sys_cacheflush should check for illegal flags - - The way cache flushing is being done in floppy.c is dangerous because - the flushing is actually a writeback invalidate. Could corrupt data. - - The kernel is very conservative about flushing the TLB/caches. Try - to eleminate flushing as far as possible. - - page colouring NEEDS to be implemented for R4000+ to circument the - page aliasing problem due to the stupid R4000 cache. There are - alternatives but they'd be dog slow. - - Allocate the swapper pgd dynamically. - - it is impossible to send signals >= 32 (send_sig() in kernel/exit.c) - - Which is the unit of RLIMIT_CPU? - - Eleminate invalid_pte_table. It contains only zeros just like - empty_zero_page. - - Writing to floppies doesn't work in 1.3.62. - - Cleanup the arch/mips/ directory structure. Split up kernel/ and mm/ - in cpu dependand and machine dependand directories. Do this also for - the configfile. - - FIXME: resume() assumes current == prev. - - The timer interrupt runs with interrupts disabled. This means that - sometimes interrupts are off for a long time. Use some other strategy - for the jiffies stuff. - - Scrollback in the VGA console is broken. - - Modify pte_alloc() the same way as for Intel. - - Binutils 2.7: - - 2.6 introduces a new machine instruction waiti for the 4010. Is this - the same machine instruction as wait for R4200/R4300i/R4600? At least - the opcodes are the sames. - - strip --remove-section= with no other options also removes the symbols. - - Shouldn't the binutils support an ulwu macro? - - uld, ulw generate bad code for the special case when both registers are - the same. - - MIPS support in gprof is missing. - - When explicitly giving an nonexistant entry point as -e <entry> point - during loading a shared lib, ld should complain. - - The assembler dies when the argument to .gpword is a extern symbol. - - This source breaks GAS in binutils 2.7: - .macro ins - .if (3 | 16) - .endif - .endm - - ins - - GNU libc 951215: - - There are lots of collisions between structures in the kernel header - files and the includes generated by the kernel. - - setjmp & friends fail for the 32 double register model of > MIPS3. - - R4400: - - Per once told me he found something he believes is a CPU bug. Ask him if - it's really a bug. - - R8000 in arch/mips/config.in: - - Can the R8000 do a TLB shutdown like the R4000 or is it like the R10000? - - Manpages: - - The sysmips(2) documentation needs to be completed. - - The cacheflush(2) and cachectl(2) documentation needs to be proof read - by a native speaker. - - Translate the manpages into as many foreign languages as possible. - - Send the corrected/translated manpages to the LDP/Andries Brouwer. - - GCC 2.7.2: - - The info files lack descriptions of some MIPS options like -mips4. - - The multilib support has no way to specify incompatible options - like -mabicalls and -fno-PIC. For such option pairs library compilation - is nonsense and breaks anyway. - - The following macros work but generate good code only for certain - data types. We only use get_unaligned() for accessing unaligned ints - and then GCC makes full score anyway ... - -#define get_unaligned(ptr) \ - ({ \ - struct __unal { \ - __typeof__(*(ptr)) __x __attribute__((packed)); \ - }; \ - \ - ((struct __unal *)(ptr))->__x; \ - }) - -#define put_unaligned(ptr,val) \ - ({ \ - struct __unal { \ - __typeof__(*(ptr)) __x __attribute__((packed)); \ - }; \ - \ - ((struct __unal *)(ptr))->__x = (val); \ - }) - -int -foo(int *p) -{ - return get_unaligned(p); -} - -int -bar(int *p, int var) -{ - put_unaligned(p, var); -} - -struct blah { - char x1 __attribute__((packed)); - int x2 __attribute__((packed)); -}; - -fasel(struct blah *p, int var) -{ - p->x2 = var; -} diff --git a/arch/mips/acn/Makefile b/arch/mips/acn/Makefile deleted file mode 100644 index 3821d7b62..000000000 --- a/arch/mips/acn/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -# -# Makefile for the SNI specific part of the kernel -# -# Note! Dependencies are done automagically by 'make dep', which also -# removes any old dependencies. DON'T put your own dependencies here -# unless it's something special (ie not a .c file). -# - -.S.s: - $(CPP) $(CFLAGS) $< -o $*.s -.S.o: - $(CC) $(CFLAGS) -c $< -o $*.o - -all: sni.o -O_TARGET := sni.o -O_OBJS := hw-access.o int-handler.o reset.o setup.o - -int-handler.o: int-handler.S - -clean: - -include $(TOPDIR)/Rules.make diff --git a/arch/mips/acn/reset.c b/arch/mips/acn/reset.c deleted file mode 100644 index 149fedd9d..000000000 --- a/arch/mips/acn/reset.c +++ /dev/null @@ -1,12 +0,0 @@ -/* - * linux/arch/mips/sni/process.c - * - * Reset the ACN board. - */ -#include <asm/io.h> -#include <asm/system.h> - -void -acn_hard_reset_now(void) -{ -} diff --git a/arch/mips/acn/setup.c b/arch/mips/acn/setup.c deleted file mode 100644 index 5fe8731e5..000000000 --- a/arch/mips/acn/setup.c +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Setup pointers to hardware dependand routines. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - */ -#include <asm/ptrace.h> -#include <linux/ioport.h> -#include <linux/sched.h> -#include <linux/interrupt.h> -#include <linux/timex.h> -#include <asm/bootinfo.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/vector.h> - -extern void acn_hard_reset_now(void); - -void -sni_rm200_pci_setup(void) -{ - hard_reset_now = acn_hard_reset_now; -} diff --git a/arch/mips/boot/.cvsignore b/arch/mips/boot/.cvsignore new file mode 100644 index 000000000..4671378ae --- /dev/null +++ b/arch/mips/boot/.cvsignore @@ -0,0 +1 @@ +.depend diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 3a8698d34..fd445c865 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -34,7 +34,7 @@ mkboot: mkboot.c $(HOSTCC) -o $@ $^ zdisk: zImage - mcopy -n zImage a:vmlinux + mcopy -o zImage a:vmlinux dep: $(CPP) -M *.[cS] > .depend diff --git a/arch/mips/boot/compressed/cache.S b/arch/mips/boot/compressed/cache.S index 3e6a3d57d..7cc97b53e 100644 --- a/arch/mips/boot/compressed/cache.S +++ b/arch/mips/boot/compressed/cache.S @@ -18,10 +18,7 @@ #include <asm/asm.h> #include <asm/cachectl.h> #include <asm/mipsregs.h> -#include <asm/uaccess.h> - -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS5) +#include <asm/segment.h> /* * Some bits in the config register @@ -29,8 +26,6 @@ #define CONFIG_IB (1<<5) #define CONFIG_DB (1<<4) -#define CACHEBLOCKS 32 - /* * Flush instruction/data caches * @@ -45,14 +40,16 @@ .set noreorder LEAF(cacheflush) andi t1,a2,DCACHE + beqz t1,do_icache - li t0,KSEG0 # delay slot + li t0,KSEG0 /* * Writeback data cache, even lines */ - li t1,CACHEBLOCKS-1 -1: cache Index_Writeback_Inv_D,0(t0) + li t1,CACHELINES-1 +1: + cache Index_Writeback_Inv_D,0(t0) cache Index_Writeback_Inv_D,32(t0) cache Index_Writeback_Inv_D,64(t0) cache Index_Writeback_Inv_D,96(t0) @@ -69,8 +66,9 @@ cache Index_Writeback_Inv_D,448(t0) cache Index_Writeback_Inv_D,480(t0) addiu t0,512 + bnez t1,1b - subu t1,1 + subu t1,1 /* * Writeback data cache, odd lines @@ -79,8 +77,9 @@ mfc0 t1,CP0_CONFIG andi t1,CONFIG_IB bnez t1,do_icache - li t1,CACHEBLOCKS-1 -1: cache Index_Writeback_Inv_D,16(t0) + li t1,CACHELINES-1 +1: + cache Index_Writeback_Inv_D,16(t0) cache Index_Writeback_Inv_D,48(t0) cache Index_Writeback_Inv_D,80(t0) cache Index_Writeback_Inv_D,112(t0) @@ -97,18 +96,21 @@ cache Index_Writeback_Inv_D,464(t0) cache Index_Writeback_Inv_D,496(t0) addiu t0,512 + bnez t1,1b - subu t1,1 + subu t1,1 -do_icache: andi t1,a2,ICACHE +do_icache: + andi t1,a2,ICACHE beqz t1,done + lui t0,0x8000 /* * Flush instruction cache, even lines */ - lui t0,0x8000 - li t1,CACHEBLOCKS-1 -1: cache Index_Invalidate_I,0(t0) + li t1,CACHELINES-1 +1: + cache Index_Invalidate_I,0(t0) cache Index_Invalidate_I,32(t0) cache Index_Invalidate_I,64(t0) cache Index_Invalidate_I,96(t0) @@ -125,8 +127,9 @@ do_icache: andi t1,a2,ICACHE cache Index_Invalidate_I,448(t0) cache Index_Invalidate_I,480(t0) addiu t0,512 + bnez t1,1b - subu t1,1 + subu t1,1 /* * Flush instruction cache, even lines @@ -135,8 +138,10 @@ do_icache: andi t1,a2,ICACHE mfc0 t1,CP0_CONFIG andi t1,CONFIG_DB bnez t1,done - li t1,CACHEBLOCKS-1 -1: cache Index_Invalidate_I,16(t0) + li t1,CACHELINES-1 + +1: + cache Index_Invalidate_I,16(t0) cache Index_Invalidate_I,48(t0) cache Index_Invalidate_I,80(t0) cache Index_Invalidate_I,112(t0) @@ -153,13 +158,11 @@ do_icache: andi t1,a2,ICACHE cache Index_Invalidate_I,464(t0) cache Index_Invalidate_I,496(t0) addiu t0,512 + bnez t1,1b - subu t1,1 + subu t1,1 -done: j ra - nop +done: + j ra + nop END(sys_cacheflush) - -#else -#error "No R3000 cacheflushing implemented yet!" -#endif diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S index 0ca599563..dad90855e 100644 --- a/arch/mips/boot/compressed/head.S +++ b/arch/mips/boot/compressed/head.S @@ -28,16 +28,18 @@ la t0,_edata la t1,_end sw zero,(t0) -1: addiu t0,4 +1: + addiu t0,4 + bnel t0,t1,1b - sw zero,(t0) + sw zero,(t0) END(kernel_entry) /* * Do the decompression, and jump to the new kernel.. */ jal C_LABEL(decompress_kernel) - nop + nop /* * Flush caches @@ -49,4 +51,4 @@ */ la t0,KSEG0 jr t0 - nop + nop diff --git a/arch/mips/boot/compressed/misc.c b/arch/mips/boot/compressed/misc.c index 625a75cd0..1e3bb5f82 100644 --- a/arch/mips/boot/compressed/misc.c +++ b/arch/mips/boot/compressed/misc.c @@ -12,7 +12,7 @@ #include "gzip.h" #include "lzw.h" -#include <asm/uaccess.h> +#include <asm/segment.h> /* * These are set up by the setup-routine at boot-time: diff --git a/arch/mips/boot/mkboot.c b/arch/mips/boot/mkboot.c index c34332f99..48f27113e 100644 --- a/arch/mips/boot/mkboot.c +++ b/arch/mips/boot/mkboot.c @@ -381,7 +381,7 @@ main(argc, argv) fprintf(stderr, "Fix mkboot: sizeof(Elf32_Addr) != 4\n"); exit(1); } - + if (argc != 3) usage(argv[0]); @@ -597,7 +597,6 @@ main(argc, argv) put_word(ahdr + 8, 0); /* data size */ put_word(ahdr + 12, bss); /* bss size */ put_word(ahdr + 16, 2 * 12); /* size of symbol table */ -// put_word(ahdr + 16, 0); /* size of symbol table */ put_word(ahdr + 20, entry); /* base address */ put_word(ahdr + 24, 0); /* size of text relocations */ put_word(ahdr + 28, 0); /* size of data relocations */ diff --git a/arch/mips/config.in b/arch/mips/config.in index 3dd4c00c0..e2065779e 100644 --- a/arch/mips/config.in +++ b/arch/mips/config.in @@ -11,24 +11,17 @@ endmenu mainmenu_option next_comment comment 'Machine selection' - bool 'Support for Acer PICA 1 chipset' CONFIG_ACER_PICA_61 if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - bool 'Support for ACN MIPS Board' CONFIG_ACN_MIPS_BOARD -fi -if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - bool 'Support for DECstation' CONFIG_MIPS_DECSTATION -fi -if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + bool 'Support for DECstation' CONFIG_DECSTATION bool 'Support for Deskstation RPC44' CONFIG_DESKSTATION_RPC44 bool 'Support for Deskstation Tyne' CONFIG_DESKSTATION_TYNE bool 'Support for Mips Magnum 3000' CONFIG_MIPS_MAGNUM_3000 fi bool 'Support for Mips Magnum 4000' CONFIG_MIPS_MAGNUM_4000 bool 'Support for Olivetti M700-10' CONFIG_OLIVETTI_M700 -if [ "$CONFIG_MIPS_MAGNUM_4000" = "y" -o \ - "$CONFIG_OLIVETTI_M700" = "y" ]; then - define_bool CONFIG_VIDEO_G364 y +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + bool 'Support for SGI workstations' CONFIG_SGI fi bool 'Support for SNI RM200 PCI' CONFIG_SNI_RM200_PCI if [ "$CONFIG_DESKSTATION_RPC44" = "y" -o \ @@ -40,57 +33,23 @@ if [ "$CONFIG_ACER_PICA_61" = "y" -o \ "$CONFIG_MIPS_MAGNUM_4000" = "y" ]; then define_bool CONFIG_MIPS_JAZZ y fi -if [ "$CONFIG_ACN_MIPS_BOARD" = "y" ]; then - define_bool CONFIG_SERIAL_ONLY_CONSOLE y - define_bool CONFIG_NO_SWAPPER y - define_bool CONFIG_CUSTOM_UART y - define_bool CONFIG_CUSTOM_TIMER y -fi -endmenu - -mainmenu_option next_comment -comment 'CPU selection' - -choice 'CPU type' \ - "R3000 CONFIG_CPU_R3000 \ - R6000 CONFIG_CPU_R6000 \ - R4x00 CONFIG_CPU_R4X00 \ - R8000 CONFIG_CPU_R8000 \ - R10000 CONFIG_CPU_R10000" R4x00 -if [ "$CONFIG_CPU_R3000" = "y" -o \ - "$CONFIG_CPU_R6000" = "y" -o \ - "$CONFIG_CPU_R4X00" = "y" -o \ - "$CONFIG_CPU_R8000" = "y" ]; then - define_bool CONFIG_TLB_SHUTDOWN y -fi -if [ "$CONFIG_CPU_R4X00" = "y" ]; then - bool "Optimize for R4600 only" CONFIG_OPTIMIZE_R4600 -fi endmenu mainmenu_option next_comment comment 'General setup' -define_bool CONFIG_BINFMT_ELF y -define_bool CONFIG_BINFMT_AOUT n -if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - tristate 'Kernel support for JAVA binaries' CONFIG_BINFMT_JAVA -fi -if [ "$CONFIG_MIPS_DECSTATION" = "y" ]; then +if [ "$CONFIG_DECSTATION" = "y" ]; then bool 'Compile the kernel into the ECOFF object format' CONFIG_ECOFF_KERNEL - comment 'Assuming little endian code required.' define_bool CONFIG_CPU_LITTLE_ENDIAN y else define_bool CONFIG_ELF_KERNEL y bool 'Generate little endian code' CONFIG_CPU_LITTLE_ENDIAN fi +if [ "$CONFIG_CPU_LITTLE_ENDIAN" = "n" ]; then + define_bool CONFIG_BINFMT_IRIX y +fi +define_bool CONFIG_BINFMT_ELF y +define_bool CONFIG_BINFMT_AOUT n bool 'Networking support' CONFIG_NET -bool 'Limit memory to low 16MB' CONFIG_MAX_16M -bool 'PCI bios support' CONFIG_PCI -#if [ "$CONFIG_PCI" = "y" ]; then -# if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then -# bool ' PCI bridge optimization (experimental)' CONFIG_PCI_OPTIMIZE -# fi -#fi bool 'System V IPC' CONFIG_SYSVIPC endmenu @@ -103,7 +62,11 @@ if [ "$CONFIG_MODULES" = "y" ]; then fi endmenu -source drivers/block/Config.in +# All SGI block devices are SCSI based AFAIK. -davem +# +if [ "$CONFIG_SGI" != "y" ]; then + source drivers/block/Config.in +fi if [ "$CONFIG_NET" = "y" ]; then source net/Config.in @@ -115,7 +78,27 @@ comment 'SCSI support' tristate 'SCSI support' CONFIG_SCSI if [ "$CONFIG_SCSI" != "n" ]; then - source drivers/scsi/Config.in + if [ "$CONFIG_SGI" = "n" ]; then + source drivers/scsi/Config.in + else + comment 'SCSI support type (disk, tape, CDrom)' + + dep_tristate 'SCSI disk support' CONFIG_BLK_DEV_SD $CONFIG_SCSI + dep_tristate 'SCSI tape support' CONFIG_CHR_DEV_ST $CONFIG_SCSI + dep_tristate 'SCSI CDROM support' CONFIG_BLK_DEV_SR $CONFIG_SCSI + dep_tristate 'SCSI generic support' CONFIG_CHR_DEV_SG $CONFIG_SCSI + + comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs' + + bool 'Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN + + bool 'Verbose SCSI error reporting' CONFIG_SCSI_CONSTANTS + + mainmenu_option next_comment + comment 'SCSI low-level drivers' + + dep_tristate 'SGI wd93 Scsi Driver' CONFIG_SCSI_SGIWD93 $CONFIG_SCSI + fi fi endmenu @@ -125,50 +108,74 @@ if [ "$CONFIG_NET" = "y" ]; then bool 'Network device support' CONFIG_NETDEVICES if [ "$CONFIG_NETDEVICES" = "y" ]; then - source drivers/net/Config.in + if [ "$CONFIG_SGI" != "y" ]; then + source drivers/net/Config.in + else + tristate 'Dummy net driver support' CONFIG_DUMMY + tristate 'SLIP (serial line) support' CONFIG_SLIP + if [ "$CONFIG_SLIP" != "n" ]; then + bool ' CSLIP compressed headers' CONFIG_SLIP_COMPRESSED + bool ' Keepalive and linefill' CONFIG_SLIP_SMART + fi + tristate 'PPP (point-to-point) support' CONFIG_PPP + if [ ! "$CONFIG_PPP" = "n" ]; then + comment 'CCP compressors for PPP are only built as modules.' + fi + bool 'SGI Seeq ethernet controller support' CONFIG_SGISEEQ + fi fi endmenu fi -mainmenu_option next_comment -comment 'ISDN subsystem' +if [ "$CONFIG_SGI" = "n" ]; then + mainmenu_option next_comment + comment 'ISDN subsystem' -tristate 'ISDN support' CONFIG_ISDN -if [ "$CONFIG_ISDN" != "n" ]; then - source drivers/isdn/Config.in -fi -endmenu + tristate 'ISDN support' CONFIG_ISDN + if [ "$CONFIG_ISDN" != "n" ]; then + source drivers/isdn/Config.in + fi + endmenu -mainmenu_option next_comment -comment 'CD-ROM drivers (not for SCSI or IDE/ATAPI drives)' + mainmenu_option next_comment + comment 'CD-ROM drivers (not for SCSI or IDE/ATAPI drives)' -bool 'Support non-SCSI/IDE/ATAPI drives' CONFIG_CD_NO_IDESCSI -if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then - source drivers/cdrom/Config.in + bool 'Support non-SCSI/IDE/ATAPI drives' CONFIG_CD_NO_IDESCSI + if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then + source drivers/cdrom/Config.in + fi + endmenu fi -endmenu source fs/Config.in -source drivers/char/Config.in +if [ "$CONFIG_SGI" = "n" ]; then + source drivers/char/Config.in -mainmenu_option next_comment -comment 'Sound' + mainmenu_option next_comment + comment 'Sound' -tristate 'Sound card support' CONFIG_SOUND -if [ "$CONFIG_SOUND" != "n" ]; then - source drivers/sound/Config.in + tristate 'Sound card support' CONFIG_SOUND + if [ "$CONFIG_SOUND" != "n" ]; then + source drivers/sound/Config.in + fi + endmenu +else + comment 'SGI Character Devices' + tristate 'PS/2 mouse (aka "auxiliary device") support' CONFIG_PSMOUSE + bool 'SGI Zilog85C30 serial support' CONFIG_SGI_SERIAL + if [ "$CONFIG_SGI_SERIAL" != "n" ]; then + define_bool CONFIG_SERIAL y + fi fi -endmenu mainmenu_option next_comment comment 'Kernel hacking' #bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC -#bool 'Remote GDB kernel debugging' CONFIG_REMOTE_DEBUG +bool 'Remote GDB kernel debugging' CONFIG_REMOTE_DEBUG bool 'Kernel profiling support' CONFIG_PROFILE if [ "$CONFIG_PROFILE" = "y" ]; then int ' Profile shift count' CONFIG_PROFILE_SHIFT 2 fi -bool 'Cross compilation' CONFIG_CROSS_COMPILE endmenu diff --git a/arch/mips/defconfig b/arch/mips/defconfig index bb3635c25..201e66b94 100644 --- a/arch/mips/defconfig +++ b/arch/mips/defconfig @@ -17,26 +17,14 @@ CONFIG_SNI_RM200_PCI=y CONFIG_MIPS_JAZZ=y # -# CPU selection -# -# CONFIG_CPU_R3000 is not set -# CONFIG_CPU_R6000 is not set -CONFIG_CPU_R4X00=y -# CONFIG_CPU_R8000 is not set -# CONFIG_CPU_R10000 is not set -CONFIG_TLB_SHUTDOWN=y -# CONFIG_OPTIMIZE_R4600 is not set - -# # General setup # -CONFIG_BINFMT_ELF=y -# CONFIG_BINFMT_AOUT is not set CONFIG_ELF_KERNEL=y CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_AOUT is not set CONFIG_NET=y -# CONFIG_MAX_16M is not set -# CONFIG_SYSVIPC is not set +CONFIG_SYSVIPC=y # # Loadable module support @@ -53,7 +41,7 @@ CONFIG_BLK_DEV_IDE=y # Please see Documentation/ide.txt for help/info on IDE drives # # CONFIG_BLK_DEV_HD_IDE is not set -# CONFIG_BLK_DEV_IDEDISK is not set +CONFIG_BLK_DEV_IDEDISK=y CONFIG_BLK_DEV_IDECD=y # CONFIG_BLK_DEV_IDETAPE is not set # CONFIG_BLK_DEV_IDEFLOPPY is not set @@ -87,7 +75,7 @@ CONFIG_INET=y # CONFIG_INET_RARP is not set # CONFIG_NO_PATH_MTU_DISCOVERY is not set CONFIG_IP_NOSR=y -CONFIG_SKB_LARGE=y +# CONFIG_SKB_LARGE is not set # # @@ -99,50 +87,7 @@ CONFIG_SKB_LARGE=y # # SCSI support # -CONFIG_SCSI=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=y -CONFIG_BLK_DEV_SR=y -# CONFIG_BLK_DEV_SR_VENDOR is not set -# CONFIG_CHR_DEV_SG is not set - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# -# CONFIG_SCSI_MULTI_LUN is not set -CONFIG_SCSI_CONSTANTS=y - -# -# SCSI low-level drivers -# -# CONFIG_SCSI_7000FASST is not set -# CONFIG_SCSI_AHA152X is not set -# CONFIG_SCSI_AHA1542 is not set -# CONFIG_SCSI_AHA1740 is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_IN2000 is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_BUSLOGIC is not set -# CONFIG_SCSI_DTC3280 is not set -CONFIG_SCSI_EATA_DMA=y -# CONFIG_SCSI_EATA_PIO is not set -# CONFIG_SCSI_EATA is not set -# CONFIG_SCSI_FUTURE_DOMAIN is not set -# CONFIG_SCSI_GENERIC_NCR5380 is not set -# CONFIG_SCSI_NCR53C406A is not set -# CONFIG_SCSI_NCR53C8XX is not set -# CONFIG_SCSI_PPA is not set -# CONFIG_SCSI_PAS16 is not set -# CONFIG_SCSI_QLOGIC_FAS is not set -# CONFIG_SCSI_SEAGATE is not set -# CONFIG_SCSI_T128 is not set -# CONFIG_SCSI_U14_34F is not set -# CONFIG_SCSI_ULTRASTOR is not set +# CONFIG_SCSI is not set # # Network device support @@ -152,22 +97,12 @@ CONFIG_NETDEVICES=y # CONFIG_DUMMY is not set # CONFIG_EQUALIZER is not set CONFIG_NET_ETHERNET=y +CONFIG_MIPS_JAZZ_SONIC=y # CONFIG_NET_VENDOR_3COM is not set CONFIG_LANCE=y CONFIG_LANCE32=y # CONFIG_NET_VENDOR_SMC is not set -CONFIG_NET_ISA=y -# CONFIG_E2100 is not set -# CONFIG_DEPCA is not set -# CONFIG_EWRK3 is not set -# CONFIG_EEXPRESS is not set -# CONFIG_HPLAN_PLUS is not set -# CONFIG_HPLAN is not set -# CONFIG_HP100 is not set -# CONFIG_NE2000 is not set -# CONFIG_NI52 is not set -# CONFIG_NI65 is not set -# CONFIG_SK_G16 is not set +# CONFIG_NET_ISA is not set # CONFIG_NET_EISA is not set # CONFIG_NET_POCKET is not set # CONFIG_FDDI is not set @@ -176,17 +111,6 @@ CONFIG_NET_ISA=y # CONFIG_NET_RADIO is not set # CONFIG_SLIP is not set # CONFIG_TR is not set -CONFIG_MIPS_JAZZ_SONIC=y - -# -# ISDN subsystem -# -# CONFIG_ISDN is not set - -# -# CD-ROM drivers (not for SCSI or IDE/ATAPI drives) -# -# CONFIG_CD_NO_IDESCSI is not set # # Filesystems @@ -194,7 +118,7 @@ CONFIG_MIPS_JAZZ_SONIC=y # CONFIG_QUOTA is not set # CONFIG_MINIX_FS is not set # CONFIG_EXT_FS is not set -# CONFIG_EXT2_FS is not set +CONFIG_EXT2_FS=y # CONFIG_XIA_FS is not set # CONFIG_FAT_FS is not set # CONFIG_MSDOS_FS is not set @@ -213,29 +137,13 @@ CONFIG_ISO9660_FS=y # CONFIG_UFS_FS is not set # -# Character devices -# -CONFIG_SERIAL=y -# CONFIG_DIGI is not set -# CONFIG_CYCLADES is not set -# CONFIG_STALDRV is not set -# CONFIG_RISCOM8 is not set -# CONFIG_PRINTER is not set -# CONFIG_MOUSE is not set -# CONFIG_UMISC is not set -# CONFIG_QIC02_TAPE is not set -# CONFIG_FTAPE is not set -# CONFIG_APM is not set -# CONFIG_WATCHDOG is not set -# CONFIG_RTC is not set - -# -# Sound +# SGI Character Devices # -# CONFIG_SOUND is not set +# CONFIG_PSMOUSE is not set +# CONFIG_SGI_SERIAL is not set # # Kernel hacking # +# CONFIG_REMOTE_DEBUG is not set # CONFIG_PROFILE is not set -CONFIG_CROSS_COMPILE=y diff --git a/arch/mips/deskstation/Makefile b/arch/mips/deskstation/Makefile index 8e1b6bb13..903a73eb0 100644 --- a/arch/mips/deskstation/Makefile +++ b/arch/mips/deskstation/Makefile @@ -13,7 +13,7 @@ all: deskstation.o O_TARGET := deskstation.o -O_OBJS := hw-access.o int-handler.o setup.o +O_OBJS := hw-access.o int-handler.o reset.o setup.o int-handler.o: int-handler.S diff --git a/arch/mips/deskstation/hw-access.c b/arch/mips/deskstation/hw-access.c index 2d79e6f77..418bac5f6 100644 --- a/arch/mips/deskstation/hw-access.c +++ b/arch/mips/deskstation/hw-access.c @@ -13,16 +13,17 @@ #include <linux/linkage.h> #include <linux/types.h> #include <asm/bootinfo.h> -#include <asm/cache.h> +#include <asm/cachectl.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/irq.h> -#include <asm/mc146818rtc.h> #include <asm/vector.h> extern int FLOPPY_IRQ; extern int FLOPPY_DMA; +asmlinkage extern void deskstation_handle_int(void); + /* * How to access the FDC's registers. */ @@ -110,24 +111,21 @@ fd_disable_irq(void) void deskstation_fd_cacheflush(const void *addr, size_t size) { - cacheflush(addr, size, CF_DCACHE|CF_ALL); + flush_cache_all(); } /* - * RTC stuff (This is a guess on how Deskstation handles this ...) + * RTC stuff */ -static unsigned char -rtc_read_data(unsigned long addr) +static unsigned char * +rtc_read_data() { - outb_p(addr, RTC_PORT(0)); - return inb_p(RTC_PORT(1)); + return 0; } static void -rtc_write_data(unsigned char data, unsigned long addr) +rtc_write_data(unsigned char data) { - outb_p(addr, RTC_PORT(0)); - outb_p(data, RTC_PORT(1)); } /* diff --git a/arch/mips/deskstation/int-handler.S b/arch/mips/deskstation/int-handler.S index a52df711f..d7ff36d0f 100644 --- a/arch/mips/deskstation/int-handler.S +++ b/arch/mips/deskstation/int-handler.S @@ -15,9 +15,9 @@ .set noreorder .set noat .align 5 - NESTED(deskstation_handle_int, FR_SIZE, sp) + NESTED(deskstation_handle_int, PT_SIZE, sp) SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) + REG_S sp,PT_OR2(sp) CLI .set at lui s0,%hi(PORT_BASE) diff --git a/arch/mips/deskstation/reset.c b/arch/mips/deskstation/reset.c new file mode 100644 index 000000000..7fb387631 --- /dev/null +++ b/arch/mips/deskstation/reset.c @@ -0,0 +1,15 @@ +/* + * linux/arch/mips/deskstation/process.c + * + * Reset a Deskstation. + */ +#include <asm/io.h> +#include <asm/system.h> + +void +jazz_hard_reset_now(void) +{ + printk("Implement jazz_hard_reset_now().\n"); + printk("Press reset to continue.\n"); + while(1); +} diff --git a/arch/mips/deskstation/setup.c b/arch/mips/deskstation/setup.c index fedab9c84..59d3da401 100644 --- a/arch/mips/deskstation/setup.c +++ b/arch/mips/deskstation/setup.c @@ -17,7 +17,6 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/mipsregs.h> -#include <asm/processor.h> #include <asm/vector.h> /* @@ -34,6 +33,7 @@ extern asmlinkage void deskstation_handle_int(void); extern asmlinkage void deskstation_fd_cacheflush(const void *addr, size_t size); extern struct feature deskstation_tyne_feature; extern struct feature deskstation_rpc44_feature; +extern void deskstation_hard_reset_now(void); #ifdef CONFIG_DESKSTATION_TYNE unsigned long mips_dma_cache_size = 0; @@ -43,6 +43,10 @@ static void tyne_irq_setup(void) { set_except_vector(0, deskstation_handle_int); + /* set the clock to 100 Hz */ + outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ + outb_p(LATCH & 0xff , 0x40); /* LSB */ + outb(LATCH >> 8 , 0x40); /* MSB */ request_region(0x20,0x20, "pic1"); request_region(0xa0,0x20, "pic2"); setup_x86_irq(2, &irq2); @@ -58,6 +62,10 @@ rpc44_irq_setup(void) * future, we need to consider merging the two -- imp */ set_except_vector(0, deskstation_handle_int); + /* set the clock to 100 Hz */ + outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */ + outb_p(LATCH & 0xff , 0x40); /* LSB */ + outb(LATCH >> 8 , 0x40); /* MSB */ request_region(0x20,0x20, "pic1"); request_region(0xa0,0x20, "pic2"); setup_x86_irq(2, &irq2); @@ -65,46 +73,9 @@ rpc44_irq_setup(void) } #endif -void (*board_time_init)(struct irqaction *irq); - -static void deskstation_time_init(struct irqaction *irq) -{ - /* set the clock to 100 Hz */ - outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ - outb_p(LATCH & 0xff , 0x40); /* LSB */ - outb(LATCH >> 8 , 0x40); /* MSB */ - setup_x86_irq(0, irq); -} - void deskstation_setup(void) { - tag *atag; - - /* - * We just check if a tag_screen_info can be gathered - * in setup_arch(), if yes we don't proceed futher... - */ - atag = bi_TagFind(tag_screen_info); - if (!atag) { - /* - * If no, we try to find the tag_arc_displayinfo which is - * always created by Milo for an ARC box (for now Milo only - * works on ARC boxes :) -Stoned. - */ - atag = bi_TagFind(tag_arcdisplayinfo); - if (atag) { - screen_info.orig_x = - ((mips_arc_DisplayInfo*)TAGVALPTR(atag))->cursor_x; - screen_info.orig_y = - ((mips_arc_DisplayInfo*)TAGVALPTR(atag))->cursor_y; - screen_info.orig_video_cols = - ((mips_arc_DisplayInfo*)TAGVALPTR(atag))->columns; - screen_info.orig_video_lines = - ((mips_arc_DisplayInfo*)TAGVALPTR(atag))->lines; - } - } - switch(mips_machtype) { #ifdef CONFIG_DESKSTATION_TYNE case MACH_DESKSTATION_TYNE: @@ -115,9 +86,8 @@ deskstation_setup(void) memcpy(&mips_dma_cache_base, TAGVALPTR(atag), atag->size); irq_setup = tyne_irq_setup; - feature = &deskstation_tyne_feature; // Will go away - port_base = PORT_BASE_TYNE; - isa_slot_offset = 0xe3000000; + feature = &deskstation_tyne_feature; + isa_slot_offset = 0xe3000000; // Will go away break; #endif #ifdef CONFIG_DESKSTATION_RPC44 @@ -125,17 +95,12 @@ deskstation_setup(void) irq_setup = rpc44_irq_setup; mips_memory_upper = KSEG0 + (32 << 20); /* xxx fixme imp */ feature = &deskstation_rpc44_feature; // Will go away - port_base = PORT_BASE_RPC44; isa_slot_offset = 0xa0000000; break; #endif } - board_time_init = deskstation_time_init; fd_cacheflush = deskstation_fd_cacheflush; request_region(0x00,0x20,"dma1"); request_region(0x40,0x20,"timer"); request_region(0x70,0x10,"rtc"); - - if (mips_machtype == MACH_DESKSTATION_RPC44) - EISA_bus = 1; } diff --git a/arch/mips/doc/mmimpl/openbsd b/arch/mips/doc/mmimpl/openbsd new file mode 100644 index 000000000..f393b2b98 --- /dev/null +++ b/arch/mips/doc/mmimpl/openbsd @@ -0,0 +1,300 @@ +Each instance of software process mapping state holds: + + Reference count + + Access control lock + + Statistic info + + pmap ASID + + global ASID generation comparator + + array of pages of R4K PTE's + +The control structure looks like the following: + +typedef struct pmap { + int pm_count; /* pmap reference count */ + simple_lock_data_t pm_lock; /* lock on pmap */ + struct pmap_statistics pm_stats; /* pmap statistics */ + int pm_tlbpid; /* address space tag */ + u_int pm_tlbgen; /* TLB PID generation number */ + struct segtab *pm_segtab; /* pointers to pages of PTEs */ +} *pmap_t; + +The PTE array is sized only to be able to map userspace, it looks +like this: + +#define PMAP_SEGTABSIZE 512 + +typedef union pt_entry { + unsigned int pt_entry; /* for copying, etc. */ + struct pte pt_pte; /* for getting to bits by name */ +} pt_entry_t; /* Mach page table entry */ + +struct segtab { + union pt_entry *seg_tab[PMAP_SEGTABSIZE]; +}; + +All user processes have pm_segtab point to a block of memory for this +array. The special kernel pmap has a NULL pm_segtab, this is how you +can tell if you are adding/removing mappings to the kernel or not. + +At boot time INIT gets pmap ASID 1 and the current global tlbpid_gen +value for it's generation comparator. All other new processes get a +pmap ASID and comparator of 0 which will always require the allocation +of a new ASID when this process is first scheduled. + +To find a PTE within the R4K PTE array given a user virtual address +and a software pmap extract like this: + + -------------------------------------------- +User VADDR | 0 | array elem | PTE within PAGE | 0 | + -------------------------------------------- + 31 30 22 21 12 11 0 +For example: + +pte_t *vaddr_to_pte(pmap_t pmap, unsigned long vaddr) +{ + int entry; + pte_t *pte; + + entry = (vaddr >> 22) & 0x1ff; + pte = pmap->pm_segtab->seg_tab[entry]; + return pte + ((vaddr >> 12) & 0x3ff); +} + +To destroy a process mapping. + +1) Decrement pmap reference count + +2) If reference count is now zero and pmap has pm_segtab + + a) check each seg_tab array entry + + b) if non-NULL, flush the page from the cache, + free the page, and mark seg_tab[xxx] to NULL + + c) Free pm_segtab array and set pm_segtab to NULL + +ASID allocation + + This happens only at switch() time, pmap_alloc_tlbpid() + is called and is passed a ptr to the proc being switched + to. + + The global tlbpid_gen counter is compared against the + pm_tlbgen in the pmap for this process. When the generation + changes and the pmap gen does not match, a new asid needs + to be allocated to the process. + + The idea is that when the entire tlb is flushed, you go to + another generation. So you see things go like this: + +Boot time: tlbpid_gen = 1 + tlbpid_cnt = 2 + +INIT task: pm_tlbpid = 1 + pm_tlbgen = tlbpid_gen + + When INIT hits the cpu for the first time, it's pm_tlbgen will + match tlbpid_gen and therefore it's pm_tlbpid of 1 will be + used as the ASID when control is passed back to switch(). + Let's say another task is forked off my init. + +New task: pm_tlbpid = 0 + pm_tlbgen = 0 + + When this task hits the cpu for the first time, since a tlbgen + of zero will never match tlbpid_gen, it is allocated a new ASID + which is the current value of tlbpid_cnt. tlbpid_cnt is now + incremented. When tlbpid_cnt grows larger than the number + of ASIDS supported by the MMU, the entire TLB is flushed and + this task instead gets a tlbpid of 1 and tlbpid_gen is + incremented causing all other tasks to require a new ASID + when they are switch()'d to next. + + The idea is that reallocating an ASID for a task would be too + expensive if it required searching for the previous ASID in the + current set of TLB entires. It is cheaper just to flush the + entire TLB and require everyone to get a new ASID when this + overflow happens. But in between overflows, and thus while + tlbpid_gen is the same, a process retains it's ASID for each + invocation of switch() for which it is scheduled. + +Adding a new entry into a processes pmap is pretty straight forward: + + a) Given a pmap, a virtual address, a physical page, + a set of protections and a wired true/false value + we decide what to do. + + b) If this is real physical RAM (as opposed to device + memory, IS_VM_PHYSADDR(pa) tells the case) we do + the following. + + Set up the pte page permissions: + + a) if the protections passed do not indicate + write protection, make it a read-only pte + which is MIPS terms is + + (PG_RDONLY | PG_VALID | PG_CACHED) + + The PG_RDONLY is a software bit which is masked + out at tlb refill time (discussed later). + + b) if the protections do indicate the mapping + to be entered is indeed writable we setup + the pte based upon whether this is going into + the kernel map or a user map + + For kernel map we just allow the page to be + written to from the get go, and clear the PG_CLEAN + flag for the page_struct this physical page is + represented by, end of story. + + For a user, we only allow writes from the start + if the page_struct is already not clean, else + we don't set the MIPS pte dirty bit. + + The page is marked valid and cachable no matter what. + + Enter the new mapping into the pv_list (discussed later). + + c) If this is a mapped device then use PG_IOPAGE permissions, + if not writable clear the dirty MIPS pte bit, clear the global + bit (which is set in the PG_IOPAGE expansion) in all cases. + + d) If this is an executable page, push the page out of + the instruction cache. + + MIPS is funny in that all cache operations perform an + address translation, so you have to be careful. OpenBSD + uses the KSEG0 address (which does not go through the + TLB to be translated) for these ICACHE flushes. The indexed + primary icache flush is used to remove the lines from the + cache. + + e) If this is a kernel mapping (pmap->pm_segtab is NULL), get + the pte_t ptr from kernel_regmap storage, or in the physical + page number and (PG_ROPAGE | PG_G) (XXX why all the time?), + set the wired bit and increment the wired count if the wired + boolean arg was true, increment the resident count if the + pte was previously invalid, call tlbupdate to get rid of + any previous mapping, and set pte->pt_entry to this new pte + value. + + f) For user we need to check first if the PTE array points to + a page yet. If not we need to get a zero page. Calculate + the offset into the appropriate page based upon virtual + address, or in the virtual page in the new pte value, + increment wired and resident count if necessary, and + set pte->pte_entry to this new pte value. Finally, if + the process has (potentially) a valid ASID (and therefore + entries in the TLB right now, ie. pmap->pm_tlbgen == + tlbpid_gen) then remote any matching enties in the TLB + for this processes virtual-page/ASID pair. + +The kernel keeps a pv_list, it has one entry for each managed physical +page in the system. Off each entry is a linked list, one for each +virtual page to which the entries physical page is mapped, the list +head counts as the first entry. This list is used to detect cache +aliasing problems with virtual caches. + +When the kernel adds a new element to a physical pages pv_list entry, +it checks whether this new virtual mapping could cause a cache alias, +if so then it marks all of the virtual pages in the list uncacheable. +The reason this is done is simple: + + Suppose a process has physical page X mapped at two virtual + addresses within it's address space, called Y and Z. Y and + Z could potentially be in the cache at the same time due to + the way their two addresses index entries in the virtual + cache. The process could bring both pages into the cache, + write to mapping Y, then read the same datum from mapping + Z and it would see the old data. + +Also, when a mapping is removed (discussed in a bit) the kernel +rechecks the pv_list to see if the physical pages mappings were marked +uncachable and if so, it runs through the list without the mapping now +being removed to see if the alias is no longer present. If no alias +exists any more, all the virtual pages in the pv_list are mapped +cachable again. + +The pv_list is also checked when the kernel changes permissions +on an extent of user virtual address space within a pmap. + +Mappings are removed from a processes pmap in the following manner: + + Kernel is told the pmap, beginning virtual address, and + ending virtual address in which to perform the de-map + operation. First the kernel case. + + For each pte within the given range, if the pte for that + page is not marked valid we skip it. If it is valid, first + we decrement the resident count unconditionally, and decrement + the wired count if the entry was marked with the wired + attribute. Next the pv_list is consulted as discussed above, + and if the mapping was the last pv_list element for the + assosciated physical page, then the cache is flushed of the + data. Finally, the pte is marked invalid, retaining the + global bit, and the tlb is flushed of the entry if still + present within the mmu hardware. + + On the user end of things, do the same as the kernel case, + except that the mmu TLB hardware is only flushed of each + entry if the pmap in question (potentially) holds a valid + ASID by way of pmap->pm_tlbgen being equal to tlbpid_gen. + +Changes occur to a range of virtual addresses within a processes pmap +in two slightly different ways. In on way, the protection of a single +page is lowered from what it is currently. The second way moves the +protection of a virtual address region to an arbitrary direction, +either more strict or less strict. + +In the first case, the kernel is given a physical address and a new +protection value. If the protection is full blast read/write/execute, +or just read+write, nothing is done because the existing protections +will always be equal to this new protection (this procedure is only +invoked to lower permissions). If a read only type protection is +being requested, the pv_list for this physical address is walked and +each virtual/pmap mapping is set to the requested protection. +Finally, if the read attribute is not set in the new protection, all +virtual mappings in the physical pages pv_list are removed one at a +time via the method described above. This first case, when just +changing protections and not removing them, calls the next procedure +to do the actual work on each mapping. + +Next, we are given a pmap, a virtual range extent, and the new +protections to apply to that particular range. Since this can be +called externally and not just by the per-page protection lowering +method just described, we handle the null protection request by +removing the mappings completely from the pmap. For the kernel pmap +we cycle throught the virtual addresses, and change the software copy +of the valid pte's to have the new protection then update the mmu TLB +hardware. For the user, we act similarly except that the TLB hardware +update is only performed if the pm_tlbgen for the pmap matches the +global tlbpid_gen comparator. + +The kernel can be asked to change the cachable attribute for an +arbitrarily mapped physical page. This is implemented just like the +page protection code just described, the pv_list is walked down and +the cachable "protection" bit(s) are modified as asked. This is +mainly used by the pv_list alias detection code to fix mappings which +will end up causing aliases, or are detected to no longer cause an +alias due to one of the virtual mappings being removed. + +The physical address backed by a given pmap/virtual-address pair can +be asked for as well. There is a method which performs this as well, +retaining the non-page offset bits in the return value if a virtual +to physical translation can be found, else NULL is returned. + +Finally, two methods are provided to control the copying and +zero-clearing out of pages which will be (or already are) mapped +within someone(s) per-process pmap. This can be used when it is +necessary to create a temporary mapping for the operation or do +special things to keep caches consistant for example. + + + diff --git a/arch/mips/doc/pagetables.txt b/arch/mips/doc/pagetables.txt deleted file mode 100644 index 3d900a194..000000000 --- a/arch/mips/doc/pagetables.txt +++ /dev/null @@ -1,87 +0,0 @@ -Format and handling of Linux/MIPS pagetables -============================================ - -This file describes the MIPS specific parts of the Linux pagetable handling. - -Opposed to other architecures like i386 or m68k architecture all MIPS -CPUs only implement the TLB itself and a small set of functions to -maintain it as hardware. The actual maintenance of the TLB's contents -is implemented in software only. - -The TLB has a relativly small number of entries. This limits the -maximum address space that can mapped by the TLB using 4kb pages and -without consideration of wired entries to a maximum of 512kb for the -R1000, 384kb for the R4000/4400 and 256kb for the R2000/R3000. This -actual size of mappable space is even smaller due to the wired entries. - -Especially for processes with a huge working set of pages it is therefore -important to make the process of reloading entries into the TLB as -efficient as possible. This means: - - - Choosing a data structure that can be handled as efficient as - possible. - - The implementation of the low level pagefault handling has to be - implemented in a efficient way. - -The Linux kernel itself implements three level page tables as a tree -structure. Linux implementations that don't need three levels of page -tables can fold one level of the page tables so that effectivly a two -level page table remains. The exact size and content of the entries -is upto the implementation. - -Opposed to this the MIPS hardware architecture implies by the data -provided in the c0_context/c0_xcontext registers a simple array of -4 byte elements (for R2000/R3000/R6000) or 8 byte elements (for the -other 64bit members of the CPU family). - -The page tables are mapped to the address TLBMAP (which is usually -defined as 0xe4000000 in <asm/mipsconfig.h). The page which contains -the root of the page table of the current process, the "page directory" -and is therefore mapped at (TLBMAP + (TLBMAP >> (12-2))) (this is the -value of the define TLB_ROOT which is defined as 0xe4390000). That -way the kernel itself can access the page tables as a tree structure -while the exception handlers can work with maxiumum efficiency accessing -the page tables as simple array. - -The tlb refill handler itself is very simple. For the R4x00 family it -has just 14 instruction, for the R4600 and derivatives it can be -optimized to 12 instruction, even further for the R10000. This -exception handler is very simple and fast and therefore doesn't any -checking for errors or special cases. - -It can therefore happen that the entry that is attempted to be reloaded -isn't mapped via the pagetables thus resulting in a double tlb refill -exception. Due to the EXL flag set in c0_status this exception goes -through the general exception vector and from there to handle_tlbl. -Handle_tlbl is a more complex exception handler that is - compared -to the first handler - complex and called far less often. It features -handling of special cases and some error checking for debugging. This -second handler still doesn't reenable interrupts, change to the kernel -stack or save registers to be as efficient as possible. Therefore -only the two registers k0/k1 are available for use. All this is only -done when do_page_fault() in arch/mips/mm/fault.c is called. For the -normal case this handler just reloads the entry mapping the pte table -which again contains the entries to be loaded in the tlb. Since the -original fault address has been lost this exception handler cannot -complete the job. So it just returns to the main program which after -taking another exception via the first tlb refill handler reloads the -originally missing entry into the TLB and continues normal execution. - -Another special in the Linux/MIPS page handling is the handling of -pages in non-existant branches of the page tables. To avoid that -the exception handlers have to handle this special case the kernel -maps these ptes (page table entries) to invalid_pte_table. This is a -4kb page full of invalid entries. On an attempted access to such an -invalid page the kernel then reloads - eventuall via a double fault -this invalid entry into the tlb. The CPU then takes a tlb invalid -exception resulting in a call to do_page_fault() which usually will -take the apropriate measures like sending SIGSEGV. - -Downsides of this implementation are it's complexity and the faster -handling of the majority of exceptions is bought at the expense of -having to handle page aliasing problems with the page tables (which -are accessed at TLBMAP and in KSEG1) itself. This is done using -uncached accesses which are especially on older machines with slow -memory subsystems painfully slow. The implementation is done this -way because for the original hardware which Linux/MIPS was intended for -had a blindingly fast memory interface. diff --git a/arch/mips/jazz/Makefile b/arch/mips/jazz/Makefile index 3ee478fee..3868b60cf 100644 --- a/arch/mips/jazz/Makefile +++ b/arch/mips/jazz/Makefile @@ -13,7 +13,7 @@ all: jazz.o O_TARGET := jazz.o -O_OBJS := hw-access.o int-handler.o jazzdma.o setup.o +O_OBJS := hw-access.o int-handler.o jazzdma.o reset.o setup.o ifdef CONFIG_VIDEO_G364 O_OBJS += g364.o diff --git a/arch/mips/jazz/hw-access.c b/arch/mips/jazz/hw-access.c index 112941275..52a61165f 100644 --- a/arch/mips/jazz/hw-access.c +++ b/arch/mips/jazz/hw-access.c @@ -10,11 +10,12 @@ #include <linux/delay.h> #include <linux/linkage.h> #include <linux/types.h> +#include <linux/mm.h> #include <asm/addrspace.h> -#include <asm/cache.h> #include <asm/vector.h> #include <asm/jazz.h> #include <asm/jazzdma.h> +#include <asm/pgtable.h> #include <asm/mc146818rtc.h> static unsigned char @@ -102,7 +103,7 @@ fd_disable_irq(void) void jazz_fd_cacheflush(const void *addr, size_t size) { - cacheflush((unsigned long)addr, size, CF_DCACHE|CF_ALL); + flush_cache_all(); } static unsigned char diff --git a/arch/mips/jazz/int-handler.S b/arch/mips/jazz/int-handler.S index 03c999124..7be6b8457 100644 --- a/arch/mips/jazz/int-handler.S +++ b/arch/mips/jazz/int-handler.S @@ -22,10 +22,10 @@ */ .set noreorder - NESTED(jazz_handle_int, FR_SIZE, ra) + NESTED(jazz_handle_int, PT_SIZE, ra) .set noat SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) + REG_S sp,PT_OR2(sp) CLI .set at @@ -89,7 +89,7 @@ ll_local_dev: lbu t0,JAZZ_IO_IRQ_SOURCE * whistles and bells and we're aware of the problem. */ ll_isa_irq: lw a0,JAZZ_EISA_IRQ_ACK - lui s0,%hi(PORT_BASE_JAZZ) + lui s0,%hi(JAZZ_PORT_BASE) li s1,1 andi t0,a0,8 # which pic? bnez t0,ack_second @@ -98,17 +98,17 @@ ll_isa_irq: lw a0,JAZZ_EISA_IRQ_ACK /* * Acknowledge first pic */ - lb t2,%lo(PORT_BASE_JAZZ)+0x21(s0) + lb t2,%lo(JAZZ_PORT_BASE)+0x21(s0) lui s4,%hi(cache_21) lb t0,%lo(cache_21)(s4) sllv s1,s1,a0 or t0,s1 sb t0,%lo(cache_21)(s4) - sb t0,%lo(PORT_BASE_JAZZ)+0x21(s0) + sb t0,%lo(JAZZ_PORT_BASE)+0x21(s0) lui s3,%hi(intr_count) lw t0,%lo(intr_count)(s3) li t2,0x20 - sb t2,%lo(PORT_BASE_JAZZ)+0x20(s0) + sb t2,%lo(JAZZ_PORT_BASE)+0x20(s0) /* * Now call the real handler */ @@ -123,7 +123,7 @@ ll_isa_irq: lw a0,JAZZ_EISA_IRQ_ACK /* * Unblock first pic */ - lbu a0,%lo(PORT_BASE_JAZZ)+0x21(s0) + lbu a0,%lo(JAZZ_PORT_BASE)+0x21(s0) lb a0,%lo(cache_21)(s4) subu t0,1 sw t0,%lo(intr_count)(s3) @@ -131,24 +131,24 @@ ll_isa_irq: lw a0,JAZZ_EISA_IRQ_ACK and a0,s1 sb a0,%lo(cache_21)(s4) jr v0 - sb a0,%lo(PORT_BASE_JAZZ)+0x21(s0) # delay slot + sb a0,%lo(JAZZ_PORT_BASE)+0x21(s0) # delay slot .align 5 ack_second: /* * Acknowledge second pic */ - lbu t2,%lo(PORT_BASE_JAZZ)+0xa1(s0) + lbu t2,%lo(JAZZ_PORT_BASE)+0xa1(s0) lui s4,%hi(cache_A1) lb t3,%lo(cache_A1)(s4) sllv s1,s1,a0 or t3,s1 sb t3,%lo(cache_A1)(s4) - sb t3,%lo(PORT_BASE_JAZZ)+0xa1(s0) + sb t3,%lo(JAZZ_PORT_BASE)+0xa1(s0) li t3,0x20 - sb t3,%lo(PORT_BASE_JAZZ)+0xa0(s0) + sb t3,%lo(JAZZ_PORT_BASE)+0xa0(s0) lui s3,%hi(intr_count) lw t0,%lo(intr_count)(s3) - sb t3,%lo(PORT_BASE_JAZZ)+0x20(s0) + sb t3,%lo(JAZZ_PORT_BASE)+0x20(s0) /* * Now call the real handler */ @@ -165,7 +165,7 @@ ack_second: /* /* * Unblock second pic */ - lb a0,%lo(PORT_BASE_JAZZ)+0xa1(s0) + lb a0,%lo(JAZZ_PORT_BASE)+0xa1(s0) lb a0,%lo(cache_A1)(s4) subu t0,1 sw t0,%lo(intr_count)(s3) @@ -173,7 +173,7 @@ ack_second: /* and a0,s1 sb a0,%lo(cache_A1)(s4) jr v0 - sb a0,%lo(PORT_BASE_JAZZ)+0xa1(s0) # delay slot + sb a0,%lo(JAZZ_PORT_BASE)+0xa1(s0) # delay slot /* * Hmm... This is not just a plain PC clone so the question is diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 3663f33d8..470185266 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -11,7 +11,7 @@ */ #include <linux/kernel.h> #include <linux/errno.h> -#include <asm/cache.h> +#include <linux/mm.h> #include <asm/mipsregs.h> #include <asm/mipsconfig.h> #include <asm/jazz.h> @@ -19,6 +19,7 @@ #include <asm/uaccess.h> #include <asm/dma.h> #include <asm/jazzdma.h> +#include <asm/pgtable.h> /* * Set this to one to enable additional vdma debug code. @@ -41,8 +42,7 @@ static int debuglvl = 3; * entries to be unused. Using this method will at least * allow some early device driver operations to work. */ -static __inline__ void -vdma_pgtbl_init(void) +static inline void vdma_pgtbl_init(void) { int i; unsigned long paddr = 0; @@ -69,7 +69,7 @@ unsigned long vdma_init(unsigned long memory_start, unsigned long memory_end) */ vdma_pagetable_start = KSEG1ADDR((memory_start + 4095) & ~4095); vdma_pagetable_end = vdma_pagetable_start + VDMA_PGTBL_SIZE; - cacheflush(vdma_pagetable_start, VDMA_PGTBL_SIZE, CF_DCACHE|CF_ALL); + flush_cache_all(); /* * Clear the R4030 translation table diff --git a/arch/mips/jazz/reset.c b/arch/mips/jazz/reset.c new file mode 100644 index 000000000..a0accc3f6 --- /dev/null +++ b/arch/mips/jazz/reset.c @@ -0,0 +1,15 @@ +/* + * linux/arch/mips/acn/process.c + * + * Reset a Jazz machine. + */ +#include <asm/io.h> +#include <asm/system.h> + +void +jazz_hard_reset_now(void) +{ + printk("Implement jazz_hard_reset_now().\n"); + printk("Press reset to continue.\n"); + while(1); +} diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c index c5f5b1d77..6aaa2af75 100644 --- a/arch/mips/jazz/setup.c +++ b/arch/mips/jazz/setup.c @@ -11,10 +11,8 @@ #include <linux/ioport.h> #include <linux/sched.h> #include <linux/interrupt.h> -#include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/jazz.h> -#include <asm/processor.h> #include <asm/vector.h> #include <asm/io.h> @@ -31,8 +29,7 @@ static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL}; extern asmlinkage void jazz_handle_int(void); extern asmlinkage void jazz_fd_cacheflush(const void *addr, size_t size); extern struct feature jazz_feature; -extern void (*ibe_board_handler)(struct pt_regs *regs); -extern void (*dbe_board_handler)(struct pt_regs *regs); +extern void jazz_hard_reset_now(void); static void jazz_irq_setup(void) @@ -47,91 +44,22 @@ jazz_irq_setup(void) r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */ r4030_read_reg32(JAZZ_R4030_INVAL_ADDR); /* clear error bits */ set_cp0_status(ST0_IM, IE_IRQ4 | IE_IRQ3 | IE_IRQ2 | IE_IRQ1); + /* set the clock to 100 Hz */ + r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9); request_region(0x20, 0x20, "pic1"); request_region(0xa0, 0x20, "pic2"); setup_x86_irq(2, &irq2); } -void (*board_time_init)(struct irqaction *irq); - -static void jazz_time_init(struct irqaction *irq) -{ - /* set the clock to 100 Hz */ - r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9); - setup_x86_irq(0, irq); -} - -/* - * The ibe/dbe exceptions are signaled by onboard hardware and should get - * a board specific handlers to get maximum available information. Bus - * errors are always symptom of hardware malfunction or a kernel error. - * We should try to handle this case a bit more gracefully than just - * zapping the process ... - */ -static void jazz_be_board_handler(struct pt_regs *regs) -{ - u32 jazz_is, jazz_ia; - - /* - * Give some debugging aid ... - */ - jazz_is = r4030_read_reg32(JAZZ_R4030_IRQ_SOURCE); - jazz_ia = r4030_read_reg32(JAZZ_R4030_INVAL_ADDR); - printk("Interrupt Source == %08x\n", jazz_is); - printk("Invalid Address Register == %08x\n", jazz_ia); - show_regs(regs); - - /* - * Assume it would be too dangerous to continue ... - */ - force_sig(SIGBUS, current); -} - void jazz_setup(void) { - tag *atag; - - /* - * we just check if a tag_screen_info can be gathered - * in setup_arch(), if yes we don't proceed futher... - */ - atag = bi_TagFind(tag_screen_info); - if (!atag) { - /* - * If no, we try to find the tag_arc_displayinfo which is - * always created by Milo for an ARC box (for now Milo only - * works on ARC boxes :) -Stoned. - */ - atag = bi_TagFind(tag_arcdisplayinfo); - if (atag) { - screen_info.orig_x = - ((mips_arc_DisplayInfo*)TAGVALPTR(atag))->cursor_x; - screen_info.orig_y = - ((mips_arc_DisplayInfo*)TAGVALPTR(atag))->cursor_y; - screen_info.orig_video_cols = - ((mips_arc_DisplayInfo*)TAGVALPTR(atag))->columns; - screen_info.orig_video_lines = - ((mips_arc_DisplayInfo*)TAGVALPTR(atag))->lines; - } - } irq_setup = jazz_irq_setup; - board_time_init = jazz_time_init; fd_cacheflush = jazz_fd_cacheflush; feature = &jazz_feature; // Will go away - port_base = PORT_BASE_JAZZ; isa_slot_offset = 0xe3000000; request_region(0x00,0x20,"dma1"); request_region(0x40,0x20,"timer"); /* The RTC is outside the port address space */ - - if (mips_machtype == MACH_MIPS_MAGNUM_4000 - && mips_machtype == MACH_OLIVETTI_M700) - EISA_bus = 1; - /* - * The Jazz hardware provides additional information for - * bus errors, so we use an special handler. - */ - ibe_board_handler = jazz_be_board_handler; - dbe_board_handler = jazz_be_board_handler; + hard_reset_now = jazz_hard_reset_now; } diff --git a/arch/mips/kernel/.cvsignore b/arch/mips/kernel/.cvsignore new file mode 100644 index 000000000..4671378ae --- /dev/null +++ b/arch/mips/kernel/.cvsignore @@ -0,0 +1 @@ +.depend diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index b76c723b2..e537ac73e 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -14,9 +14,24 @@ all: kernel.o head.o EXTRA_ASFLAGS = -mips3 -mcpu=r4000 O_TARGET := kernel.o -O_OBJS := branch.o process.o signal.o entry.o traps.o irq.o ptrace.o vm86.o \ - ioport.o setup.o syscall.o sysmips.o time.o bios32.o ipc.o ksyms.o \ - unaligned.o tags.o +O_OBJS := branch.o process.o signal.o entry.o traps.o ptrace.o vm86.o \ + ioport.o setup.o syscall.o sysmips.o bios32.o ipc.o ksyms.o \ + r4k_switch.o r4k_misc.o r4k_scall.o r4k_fpu.o r2300_switch.o \ + r2300_misc.o r2300_scall.o r2300_fpu.o r6000_fpu.o unaligned.o + +# +# SGI's have very different interrupt/timer hardware. +# +ifndef CONFIG_SGI +O_OBJS += irq.o time.o +endif + +# +# Do we want to be able to execute IRIX elf binaries? +# +ifdef CONFIG_BINFMT_IRIX +O_OBJS += irixelf.o irixioctl.o irixsig.o sysirix.o +endif # # Kernel debugging @@ -43,9 +58,27 @@ O_OBJS := $(sort $(O_OBJS)) all: kernel.o head.o entry.o: entry.S -exception.o: exception.S + head.o: head.S +#r4k_switch.o: r4k_switch.S +# +#r4k_misc.o: r4k_misc.S +# +#r4k_scall.o: r4k_scall.S +# +#r4k_fpu.o: r4k_fpu.S +# +#r2300_switch.o: r2300_switch.S +# +#r2300_misc.o: r2300_misc.S +# +#r2300_scall.o: r2300_scall.S +# +#r2300_fpu.o: r2300_fpu.S +# +#r6000_fpu.o: r6000_fpu.S + clean: include $(TOPDIR)/Rules.make diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 32705b320..a1e302711 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -5,7 +5,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1996 by Ralf Baechle + * Copyright (C) 1996, 1997 by Ralf Baechle */ #include <linux/kernel.h> #include <linux/sched.h> diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 6072afae2..515f9af13 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -18,8 +18,10 @@ #include <asm/asm.h> #include <asm/errno.h> +#include <asm/segment.h> #include <asm/mipsregs.h> #include <asm/mipsconfig.h> +#include <asm/page.h> #include <asm/pgtable.h> #include <asm/stackframe.h> #include <asm/processor.h> @@ -39,115 +41,98 @@ flags = 20 errno = 24 exec_domain = 60 -#ifdef __SMP__ -#error "Fix this for SMP" -#else -#define current current_set -#endif - /* * Heia ... The %lo, %hi and %HI stuff is too strong for the ELF assembler * and the ABI to cope with ... */ .text .set noreorder + .set mips3 .align 4 handle_bottom_half: lui s0,%hi(intr_count) lw s1,%lo(intr_count)(s0) mfc0 s3,CP0_STATUS # Enable IRQs - addiu s2,s1,1 + addiu s2,s1, 1 sw s2,%lo(intr_count)(s0) - ori t0,s3,0x1f + ori t0,s3, 0x1f xori t0,0x1e + jal do_bottom_half - mtc0 t0,CP0_STATUS # delay slot + mtc0 t0,CP0_STATUS + mtc0 s3,CP0_STATUS # Restore old IRQ state + b 9f - sw s1,%lo(intr_count)(s0) # delay slot + sw s1,%lo(intr_count)(s0) + +reschedule: + jal schedule + nop -reschedule: jal schedule - nop # delay slot EXPORT(ret_from_sys_call) lw t0,intr_count # bottom half bnez t0,return - -9: lw t0,bh_mask # delay slot +9: + lw t0,bh_mask lw t1,bh_active # unused delay slot and t0,t1 bnez t0,handle_bottom_half + lw t0,PT_STATUS(sp) # returning to kernel mode? - lw t0,FR_STATUS(sp) # returning to kernel mode? - andi t1,t0,0x10 + andi t1,t0, 0x10 beqz t1,return # -> yes + mfc0 t0,CP0_STATUS - mfc0 t0,CP0_STATUS # delay slot lw t1,need_resched ori t0,0x1f # enable irqs xori t0,0x1e bnez t1,reschedule - mtc0 t0,CP0_STATUS # delay slot + mtc0 t0,CP0_STATUS - lw s0,current + lw s0,current_set lw t0,task lw a0,blocked(s0) + beq s0,t0,return # task[0] cannot have signals - # save blocked in a0 for - # signal handling - lw t0,signal(s0) # delay slot + lw t0,signal(s0) # save blocked in a0 for signals + nor t1,zero,a0 and t1,t0,t1 beqz t1,return - nop + nop jal do_signal - move a1,sp # delay slot + move a1,sp .set noat EXPORT(return) RESTORE_ALL - ERET + eret .set at /* - * Beware: timer_interrupt, interrupt, fast_interrupt and bad_interrupt - * have unusual calling conventions to speedup the mess. + * Beware: interrupt, fast_interrupt and bad_interrupt have unusual + * calling conventions to speedup the mess. * * a0 - interrupt number * s2 - destroyed * return values: * v0 - return routine - * - * The timer interrupt is handled specially to insure that the jiffies - * variable is updated at all times. Specifically, the timer interrupt is - * just like the complete handlers except that it is invoked with interrupts - * disabled and should never re-enable them. If other interrupts were - * allowed to be processed while the timer interrupt is active, then the - * other interrupts would have to avoid using the jiffies variable for delay - * and interval timing operations to avoid hanging the system. */ .text .set at .align 5 -NESTED(timer_interrupt, FR_SIZE, sp) - move s2,ra - jal do_IRQ - move a1,sp # delay slot - .set reorder - la v0,ret_from_sys_call - jr s2 - .set noreorder - END(timer_interrupt) - - .align 5 -NESTED(interrupt, FR_SIZE, sp) +NESTED(interrupt, PT_SIZE, sp) move s2,ra mfc0 t0,CP0_STATUS # enable IRQs ori t0,0x1f xori t0,0x1e mtc0 t0,CP0_STATUS + jal do_IRQ - move a1,sp # delay slot + move a1,sp + mfc0 t0,CP0_STATUS # disable IRQs ori t0,1 xori t0,1 @@ -159,10 +144,12 @@ NESTED(interrupt, FR_SIZE, sp) END(interrupt) .align 5 -NESTED(fast_interrupt, FR_SIZE, sp) - .set reorder +NESTED(fast_interrupt, PT_SIZE, sp) move s2,ra jal do_fast_IRQ + nop + + .set reorder la v0,return jr s2 .set noreorder @@ -172,12 +159,11 @@ NESTED(fast_interrupt, FR_SIZE, sp) * Don't return & unblock the pic */ LEAF(bad_interrupt) - .set reorder lw t0,%lo(intr_count)(s3) subu t0,1 - .set noreorder + j return - sw t0,%lo(intr_count)(s3) # delay slot + sw t0,%lo(intr_count)(s3) END(bad_interrupt) .text @@ -191,8 +177,9 @@ LEAF(spurious_interrupt) lw t0,%lo(spurious_count)(t1) la v0,return addiu t0,1 + jr ra - sw t0,%lo(spurious_count)(t1) + sw t0,%lo(spurious_count)(t1) END(spurious_interrupt) /* @@ -200,41 +187,28 @@ LEAF(spurious_interrupt) * special handlers. If you didn't know yet - I *like* playing games with * the C preprocessor ... */ -#define __BUILD_clear_none(exception) \ - REG_S sp,FR_ORIG_REG2(sp); /* sp < 0 */ -#define __BUILD_clear_sys(exception) \ - REG_S v0,FR_ORIG_REG2(sp); \ - REG_S a3,FR_ORIG_REG7(sp); +#define __BUILD_clear_none(exception) #define __BUILD_clear_fpe(exception) \ - REG_S sp,FR_ORIG_REG2(sp); /* sp < 0 */ \ cfc1 a1,fcr31; \ li a2,~(0x3f<<12); \ and a2,a1; \ ctc1 a2,fcr31; -#define __BUILD_clear_watch(exception) \ - REG_S sp,FR_ORIG_REG2(sp); /* sp < 0 */ \ - mtc0 zero,CP0_WATCHLO; \ - mtc0 zero,CP0_WATCHHI #define __BUILD_clear_ade(exception) \ - REG_S sp,FR_ORIG_REG2(sp); /* sp < 0 */ \ MFC0 t0,CP0_BADVADDR; \ - REG_S t0,FR_BADVADDR(sp); + REG_S t0,PT_BVADDR(sp); #define __BUILD_silent(exception) -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) -#define fmt "Got %s at %08x.\n" -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) + #define fmt "Got %s at %016Lx.\n" -#endif + #define __BUILD_verbose(exception) \ la a1,8f; \ TEXT (#exception); \ - REG_L a2,FR_EPC(sp); \ + REG_L a2,PT_EPC(sp); \ PRINT(fmt) #define __BUILD_count(exception) \ .set reorder; \ lw t0,exception_count_##exception; \ - addiu t0,1; \ + addiu t0, 1; \ sw t0,exception_count_##exception; \ .set noreorder; \ .data; \ @@ -244,18 +218,19 @@ EXPORT(exception_count_##exception); \ #define BUILD_HANDLER(exception,handler,clear,verbose) \ .text; \ .align 5; \ - NESTED(handle_##exception, FR_SIZE, sp); \ + NESTED(handle_##exception, PT_SIZE, sp); \ .set noat; \ SAVE_ALL; \ __BUILD_clear_##clear(exception); \ STI; \ .set at; \ __BUILD_##verbose(exception); \ - REG_S sp,FR_ORIG_REG2(sp); /* not a sys call */ \ + li t0,-1; /* not a sys call */ \ + REG_S t0,PT_OR2(sp); \ jal do_##handler; \ - move a0,sp; /* delay slot */ \ + move a0,sp; \ j ret_from_sys_call; \ - nop; /* delay slot */ \ + nop; \ END(handle_##exception) BUILD_HANDLER(adel,ade,ade,silent) /* #4 */ @@ -270,7 +245,7 @@ EXPORT(exception_count_##exception); \ BUILD_HANDLER(tr,tr,none,silent) /* #13 */ BUILD_HANDLER(vcei,vcei,none,verbose) /* #14 */ BUILD_HANDLER(fpe,fpe,fpe,silent) /* #15 */ - BUILD_HANDLER(watch,watch,watch,verbose) /* #23 */ + BUILD_HANDLER(watch,watch,none,verbose) /* #23 */ BUILD_HANDLER(vced,vced,none,verbose) /* #31 */ BUILD_HANDLER(reserved,reserved,none,verbose) /* others */ @@ -295,32 +270,43 @@ EXPORT(IRQ_vectors) .data .align PTRLOG EXPORT(sys_call_table) - /* - * Reserved space for all the SVR4, SVR, BSD43 and POSIX - * flavoured syscalls. - */ - .space (__NR_Linux)*PTRSIZE - - /* - * Linux flavoured syscalls. - */ #define SYS(call, narg) PTR call + + /* Reserved space for all SVR4 syscalls. */ + .space (1000)*PTRSIZE + +#ifdef CONFIG_BINFMT_IRIX + /* 32bit IRIX5 system calls. */ +#include "irix5sys.h" +#else + .space (1000)*PTRSIZE /* No IRIX syscalls */ +#endif + + /* Reserved space for all the BSD43 and POSIX syscalls. */ + .space (2000)*PTRSIZE + + /* Linux flavoured syscalls. */ #include "syscalls.h" /* * Number of arguments of each syscall - * FIXME: This table contains huge empty areas wasting memory. */ EXPORT(sys_narg_table) - /* - * Reserved space for all the SVR4, SVR, BSD43 and POSIX - * flavoured syscalls. - */ - .space (__NR_Linux) - - /* - * Linux flavoured syscalls. - */ #undef SYS #define SYS(call, narg) .byte narg + + /* Reserved space for all SVR4 flavoured syscalls. */ + .space (1000) + +#ifdef CONFIG_BINFMT_IRIX + /* 32bit IRIX5 system calls. */ +#include "irix5sys.h" +#else + .space (1000) /* No IRIX syscalls */ +#endif + + /* Reserved space for all the BSD43 and POSIX syscalls. */ + .space (2000) + + /* Linux flavoured syscalls. */ #include "syscalls.h" diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S index 9b948a845..9bc35400b 100644 --- a/arch/mips/kernel/gdb-low.S +++ b/arch/mips/kernel/gdb-low.S @@ -9,6 +9,7 @@ #include <linux/sys.h> #include <asm/asm.h> +#include <asm/segment.h> #include <asm/mipsregs.h> #include <asm/mipsconfig.h> #include <asm/regdef.h> @@ -33,7 +34,8 @@ */ lui k1,%hi(kernelsp) lw k1,%lo(kernelsp)(k1) -1: move k0,sp +1: + move k0,sp subu sp,k1,GDB_FR_SIZE sw k0,GDB_FR_REG29(sp) sw v0,GDB_FR_REG2(sp) @@ -100,8 +102,9 @@ mfc0 v0,CP0_STATUS /* check if the FPU is enabled */ srl v0,v0,16 andi v0,v0,(ST0_CU1 >> 16) + beqz v0,2f /* disabled, skip */ - nop + nop swc1 $0,GDB_FR_FPR0(sp) swc1 $1,GDB_FR_FPR1(sp) @@ -149,7 +152,8 @@ * current stack frame ptr */ -2: sw sp,GDB_FR_FRP(sp) +2: + sw sp,GDB_FR_FRP(sp) /* * CP0 registers (R4000/R4400 unused registers skipped) @@ -179,8 +183,9 @@ */ move a0,sp + jal handle_exception - nop + nop /* * restore all writable registers, in reverse order @@ -207,8 +212,9 @@ mfc0 v0,CP0_STATUS /* check if the FPU is enabled */ srl v0,v0,16 andi v0,v0,(ST0_CU1 >> 16) + beqz v0,3f /* disabled, skip */ - nop + nop lwc1 $31,GDB_FR_FPR31(sp) lwc1 $30,GDB_FR_FPR30(sp) @@ -247,7 +253,8 @@ * Now the CP0 and integer registers */ -3: mfc0 t0,CP0_STATUS +3: + mfc0 t0,CP0_STATUS ori t0,0x1f xori t0,0x1f mtc0 t0,CP0_STATUS @@ -292,7 +299,7 @@ lw $1,GDB_FR_REG1(sp) lw sp,GDB_FR_REG29(sp) /* Deallocate stack */ - ERET + eret .set at .set reorder END(trap_low) diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c index 9a3240152..13bf353ff 100644 --- a/arch/mips/kernel/gdb-stub.c +++ b/arch/mips/kernel/gdb-stub.c @@ -68,10 +68,10 @@ #include <linux/signal.h> #include <linux/kernel.h> -#include <asm/addrspace.h> #include <asm/asm.h> #include <asm/mipsregs.h> -#include <asm/cache.h> +#include <asm/segment.h> +#include <asm/cachectl.h> #include <asm/system.h> #include <asm/gdb-stub.h> @@ -326,7 +326,10 @@ static struct hard_trap_info void set_debug_traps(void) { struct hard_trap_info *ht; + unsigned long flags; + unsigned char c; + save_flags(flags); cli(); for (ht = hard_trap_info; ht->tt && ht->signo; ht++) set_except_vector(ht->tt, trap_low); @@ -334,9 +337,14 @@ void set_debug_traps(void) * In case GDB is started before us, ack any packets * (presumably "$?#xx") sitting there. */ + while((c = getDebugChar()) != '$'); + while((c = getDebugChar()) != '#'); + c = getDebugChar(); /* eat first csum byte */ + c = getDebugChar(); /* eat second csum byte */ + putDebugChar('+'); /* ack it */ - putDebugChar ('+'); initialized = 1; + restore_flags(flags); breakpoint(); } @@ -605,7 +613,7 @@ void handle_exception (struct gdb_regs *regs) * NB: We flush both caches, just to be sure... */ - cacheflush((void *)KSEG0, KSEG1-KSEG0, CF_BCACHE|CF_ALL); + flush_cache_all(); return; /* NOTREACHED */ break; diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 82de12ff5..fa73c95bb 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -4,439 +4,621 @@ * Copyright (C) 1994, 1995 Waldorf Electronics, 1996 Paul M. Antoine * Written by Ralf Baechle and Andreas Busse * Modified for DECStation and hence R3000 support by Paul M. Antoine - * Additional R3000 support by Didier Frick <dfrick@dial.eunet.ch> - * for ACN S.A, Copyright (C) 1996 by ACN S.A + * Further modifications by David S. Miller * * Head.S contains the MIPS exception handler and startup code. - * - * FIXME: Note that the #ifdef's for R4X00 assume R3000 is the #else - * case, which is a little naughty. We also do NOT need the - * dec_entry goo at the begining of all this - PMA - * FIXME: This #ifdef stuff is ugly and I should move the tlb/exception - * handler code out into some other file - Ralf - * Take the zillions of (_MIPS_ISA == _MIPS_ISA_MIPSx) as a temporary - * solution. I know how they look ... */ -#include <linux/config.h> /* For the DECstation hacks */ +#include <linux/config.h> #include <linux/tasks.h> -#include <asm/addrspace.h> #include <asm/asm.h> +#include <asm/offset.h> #include <asm/processor.h> #include <asm/regdef.h> -#include <asm/cache.h> +#include <asm/segment.h> +#include <asm/cachectl.h> #include <asm/mipsregs.h> #include <asm/mipsconfig.h> #include <asm/stackframe.h> #include <asm/bootinfo.h> - -#define PAGE_SIZE 0x1000 - -/* - * FIXME: I still think the following should be in an include file (see - * also the reference in arch/mips/mips1/r3000.S - PMA - */ -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) -#define MODE_GLOBAL 0x0100 /* shared for all processes */ -#define MODE_ALIAS 0x00e0 -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS5) -#define MODE_GLOBAL 0x0001 /* shared for all processes */ -#define MODE_ALIAS 0x0016 /* uncachable */ -#endif - -/* - * The two symbols begin_except and end_except mark the range that is copied - * to KSEG0 on startup. - */ -EXPORT(begin_except) - .text +#include <asm/cpu.h> + + .text + /* + * Reserved space for exception handlers. + * Necessary for machines which link their kernels at KSEG0. + */ + .fill 512 /* * This is space for the interrupt handlers. * After trap_init() they are located at virtual address KSEG0. * - * For some machine where the kernel doesn't get directly loaded to KSEG0 - * the exceptionhandler get copied to KSEG0. They therefore must be - * relocatable code. + * These handlers much be written in a relocatable manner + * because based upon the cpu type an arbitrary one of the + * following pieces of code will be copied to the KSEG0 + * vector location. */ - /* - * TLB refill, EXL == 0 - */ - .set noreorder - .set noat - LEAF(except_vec0) -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - mfc0 k1,CP0_CONTEXT - nop - lw k0,(k1) # May cause another exception - mfc0 k1,CP0_EPC # Get the return address - srl k0,12 # Convert to EntryLo format - mtc0 k0,CP0_ENTRYLO0 -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS5) - .set mips3 - dmfc0 k1,CP0_CONTEXT - dsra k1,1 - lwu k0,(k1) # May cause another exception - lwu k1,4(k1) - dsrl k0,6 # Convert to EntryLo format - dsrl k1,6 # Convert to EntryLo format - dmtc0 k0,CP0_ENTRYLO0 - dmtc0 k1,CP0_ENTRYLO1 -#endif -#ifndef CONFIG_OPTIMIZE_R4600 - nop # Needed for R4[04]00 pipeline -#endif - tlbwr - nop # Needed for R4[04]00 pipeline - nop -#ifndef CONFIG_OPTIMIZE_R4600 - nop -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - jr k1 - rfe -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS5) - eret - /* - * Partial workaround for R4000 bug. For explanation see - * MIPS docs. Note that this that obscure that it wont - * almost never happen. Well, but Mips writes about it's bugs. - */ - nop - eret -#endif - END(except_vec0) - -/******************************************************************************/ - - /* - * XTLB refill, EXL == 0 - * Should never be reached on R4000. - */ - .org except_vec0+0x80 - NESTED(except_vec1, 0, sp) - .set noat - /* - * Register saving is delayed as long as we don't know - * which registers really need to be saved. - */ -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - mfc0 k1,CP0_CONTEXT - nop - lw k0,(k1) # May cause another exception - mfc0 k1,CP0_EPC # Get the return address - srl k0,12 # Convert to EntryLo format - mtc0 k0,CP0_ENTRYLO0 -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS5) - mfc0 k1,CP0_CONTEXT - dsra k1,1 - lwu k0,(k1) # May cause another exception - lwu k1,4(k1) - dsrl k0,6 # Convert to EntryLo format - dsrl k1,6 # Convert to EntryLo format - dmtc0 k0,CP0_ENTRYLO0 - dmtc0 k1,CP0_ENTRYLO1 -#endif - nop # Needed for R4[04]00 pipeline - tlbwr - nop # Needed for R4[04]00 pipeline - nop -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - jr k1 - rfe -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS5) - nop - eret -#endif - /* - * Partial workaround for R4000 bug. For explanation see - * MIPS docs. Note that this that obscure that it wont - * almost never happen. Well, but Mips writes about it's bugs. - */ - nop - eret - END(except_vec1) - -/******************************************************************************/ - - /* - * Cache Error - */ - .org except_vec1+0x80 - LEAF(except_vec2) -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - /* - * On the R3000, this is the "Uncached TLB Miss" handler. - */ - j except_vec0 - nop -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS5) - /* - * Famous last words: unreached - */ - mfc0 a1,CP0_ERROREPC - PRINT("Cache error exception: c0_errorepc == %08x\n") -1: j 1b - nop -#endif - END(except_vec2) - -/******************************************************************************/ - - /* - * General exception vector. - */ - .org except_vec2+0x80 - NESTED(except_vec3, 0, sp) - .set noat - /* - * Register saving is delayed as long as we don't know - * which registers really need to be saved. - * Except for k1 which MUST be preserved to allow - * nested TLB refill exceptions on the R3000. - */ - mfc0 k1,CP0_CAUSE - la k0,exception_handlers - /* - * Next lines assumes that the used CPU type has max. - * 32 different types of exceptions. We might use this - * to implement software exceptions in the future. - */ - andi k1,0x7c - addu k0,k1 - lw k0,(k0) - NOP - jr k0 - nop - END(except_vec3) - .set at - -EXPORT(end_except) - -/******************************************************************************/ + /* TLB refill, EXL == 0, R4xx0, non-R4600 version */ + .set noreorder + .set noat + LEAF(except_vec0_r4000) + .set mips3 + mfc0 k0, CP0_BADVADDR # Get faulting address + lui k1, %hi(current_set) + lw k1, %lo(current_set)(k1) # get current task ptr + srl k0, k0, 22 # get pgd only bits + lw k1, THREAD_PGDIR(k1) # get task pg_dir + sll k0, k0, 2 + addu k1, k1, k0 # add in pgd offset + mfc0 k0, CP0_CONTEXT # get context reg + lw k1, (k1) + srl k0, k0, 1 # get pte offset + and k0, k0, 0xff8 + addu k1, k1, k0 # add in offset + lw k0, 0(k1) # get even pte + lw k1, 4(k1) # get odd pte + srl k0, k0, 6 # convert to entrylo0 + mtc0 k0, CP0_ENTRYLO0 # load it + srl k1, k1, 6 # convert to entrylo1 + mtc0 k1, CP0_ENTRYLO1 # load it + b 1f + tlbwr # write random tlb entry +1: + nop + eret # return from trap + END(except_vec0_r4000) + + /* TLB refill, EXL == 0, R4600 version */ + LEAF(except_vec0_r4600) + .set mips3 + mfc0 k0, CP0_BADVADDR + lui k1, %hi(current_set) + lw k1, %lo(current_set)(k1) + srl k0, k0, 22 + lw k1, THREAD_PGDIR(k1) + sll k0, k0, 2 + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) + srl k0, k0, 1 + and k0, k0, 0xff8 + addu k1, k1, k0 + lw k0, 0(k1) + lw k1, 4(k1) + srl k0, k0, 6 + mtc0 k0, CP0_ENTRYLO0 + srl k1, k1, 6 + mtc0 k1, CP0_ENTRYLO1 + nop + tlbwr + nop + eret + END(except_vec0_r4600) + + /* TLB refill, EXL == 0, R4[40]00/R5000 badvaddr hwbug version */ + LEAF(except_vec0_r45k_bvahwbug) + .set mips3 + mfc0 k0, CP0_BADVADDR + lui k1, %hi(current_set) + lw k1, %lo(current_set)(k1) + srl k0, k0, 22 + lw k1, THREAD_PGDIR(k1) + sll k0, k0, 2 + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) + srl k0, k0, 1 + and k0, k0, 0xff8 + addu k1, k1, k0 + lw k0, 0(k1) + lw k1, 4(k1) + tlbp + srl k0, k0, 6 + mtc0 k0, CP0_ENTRYLO0 + srl k1, k1, 6 + mfc0 k0, CP0_INDEX + mtc0 k1, CP0_ENTRYLO1 + bltzl k0, 1f + tlbwr +1: + nop + eret + END(except_vec0_r45k_bvahwbug) + +#ifdef __SMP__ + /* TLB refill, EXL == 0, R4000 MP badvaddr hwbug version */ + LEAF(except_vec0_r4k_mphwbug) + .set mips3 + mfc0 k0, CP0_BADVADDR + lui k1, %hi(current_set) + lw k1, %lo(current_set)(k1) + srl k0, k0, 22 + lw k1, THREAD_PGDIR(k1) + sll k0, k0, 2 + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) + srl k0, k0, 1 + and k0, k0, 0xff8 + addu k1, k1, k0 + lw k0, 0(k1) + lw k1, 4(k1) + tlbp + srl k0, k0, 6 + mtc0 k0, CP0_ENTRYLO0 + srl k1, k1, 6 + mfc0 k0, CP0_INDEX + mtc0 k1, CP0_ENTRYLO1 + bltzl k0, 1f + tlbwr +1: + nop + eret + END(except_vec0_r4k_mphwbug) +#endif + + /* TLB refill, EXL == 0, R4000 UP 250MHZ entrylo[01] hwbug version */ + LEAF(except_vec0_r4k_250MHZhwbug) + .set mips3 + mfc0 k0, CP0_BADVADDR + lui k1, %hi(current_set) + lw k1, %lo(current_set)(k1) + srl k0, k0, 22 + lw k1, THREAD_PGDIR(k1) + sll k0, k0, 2 + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) + srl k0, k0, 1 + and k0, k0, 0xff8 + addu k1, k1, k0 + lw k0, 0(k1) + lw k1, 4(k1) + srl k0, k0, 6 + mtc0 zero, CP0_ENTRYLO0 + mtc0 k0, CP0_ENTRYLO0 + srl k1, k1, 6 + mtc0 zero, CP0_ENTRYLO1 + mtc0 k1, CP0_ENTRYLO1 + b 1f + tlbwr +1: + nop + eret + END(except_vec0_r4k_250MHZhwbug) + +#ifdef __SMP__ + /* TLB refill, EXL == 0, R4000 MP 250MHZ entrylo[01]+badvaddr bug version */ + LEAF(except_vec0_r4k_MP250MHZhwbug) + .set mips3 + mfc0 k0, CP0_BADVADDR + lui k1, %hi(current_set) + lw k1, %lo(current_set)(k1) + srl k0, k0, 22 + lw k1, THREAD_PGDIR(k1) + sll k0, k0, 2 + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) + srl k0, k0, 1 + and k0, k0, 0xff8 + addu k1, k1, k0 + lw k0, 0(k1) + lw k1, 4(k1) + tlbp + srl k0, k0, 6 + mtc0 zero, CP0_ENTRYLO0 + mtc0 k0, CP0_ENTRYLO0 + mfc0 k0, CP0_INDEX + srl k1, k1, 6 + mtc0 zero, CP0_ENTRYLO1 + mtc0 k1, CP0_ENTRYLO1 + bltzl k0, 1f + tlbwr +1: + nop + eret + END(except_vec0_r4k_MP250MHZhwbug) +#endif + + /* TLB refill, EXL == 0, R[23]00 version */ + LEAF(except_vec0_r2300) + .set mips1 + mfc0 k0, CP0_BADVADDR + lui k1, %hi(current_set) + lw k1, %lo(current_set)(k1) + srl k0, k0, 22 + lw k1, THREAD_PGDIR(k1) + sll k0, k0, 2 + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) + srl k0, k0, 1 + and k0, k0, 0xffc + addu k1, k1, k0 + lw k0, (k1) + srl k0, k0, 12 + mtc0 k0, CP0_ENTRYLO0 + mfc0 k1, CP0_EPC + tlbwr + nop + nop + nop + nop + jr k1 + rfe + END(except_vec0_r2300) + + + /* XTLB refill, EXL == 0, R4xx0 cpus only use this... */ + NESTED(except_vec1_generic, 0, sp) + .set noat + .set mips3 + /* Register saving is delayed as long as we don't know + * which registers really need to be saved. + */ + mfc0 k1, CP0_CONTEXT + dsra k1, 1 + lwu k0, (k1) # May cause another exception + lwu k1, 4(k1) + dsrl k0, 6 # Convert to EntryLo format + dsrl k1, 6 # Convert to EntryLo format + dmtc0 k0, CP0_ENTRYLO0 + dmtc0 k1, CP0_ENTRYLO1 + nop # Needed for R4[04]00 pipeline + tlbwr + nop # Needed for R4[04]00 pipeline + nop + nop + eret + nop /* Workaround for R4000 bug. */ + eret + END(except_vec1_generic) + + /* Cache Error */ + LEAF(except_vec2_generic) + /* Famous last words: unreached */ + mfc0 a1,CP0_ERROREPC + PRINT("Cache error exception: c0_errorepc == %08x\n") +1: + j 1b + nop + END(except_vec2_generic) + + /* General exception vector R4000 version. */ + NESTED(except_vec3_r4000, 0, sp) + .set noat + mfc0 k1, CP0_CAUSE + + /* XXX Have to check for VCE's _before_ we do a load or store. */ + + la k0, exception_handlers + andi k1, k1, 0x7c + addu k0, k0, k1 + lw k0, (k0) + nop + jr k0 + nop + END(except_vec3_r4000) + .set at + + /* General exception vector. */ + NESTED(except_vec3_generic, 0, sp) + .set noat + mfc0 k1, CP0_CAUSE + la k0, exception_handlers + andi k1, k1, 0x7c + addu k0, k0, k1 + lw k0, (k0) + nop + jr k0 + nop + END(except_vec3_generic) + .set at /* - * Kernel entry + * Kernel entry point */ - .set noreorder - - NESTED(kernel_entry, 16, sp) - /* - * The following two symbols are used for kernel profiling. - */ - EXPORT(stext) - EXPORT(_stext) - - /* - * Initialize the global pointer, if required. - */ - LOAD_GP - - /* - * First setup stack for kernel and init - */ - la sp,init_user_stack+(KERNEL_STACK_SIZE-4*SZREG) - la t0,init_kernel_stack+(KERNEL_STACK_SIZE) - LONG_S t0,kernelsp - - /* - * Clear BSS first so that there are no surprises... - */ - la t0,_edata - la t1,_end - sb zero,(t0) -1: addiu t0,1 -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) - /* - * Paul, this clears one word too much - Ralf - */ - bne t0,t1,1b -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS3) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS4) - bnel t0,t1,1b -#endif - sb zero,(t0) # delay slot - - /* - * Get the memory upper limit the bootloader passed to us - * in a0 - */ - sw a0,mips_memory_upper - - /* - * Get the very one tags we need early in the boot process - */ - jal bi_EarlySnarf - nop - - /* - * Initialize low level part of memory management - * First flush the TLB to make sure that we don't get a - * TLB shutdown during wire_mappings. - */ - jal tlbflush -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - nop -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - mtc0 zero,CP0_WIRED # delay slot -#endif - jal wire_mappings - nop - - /* - * Disable coprocessors - */ - mfc0 t0,CP0_STATUS - li t1,~(ST0_CU1|ST0_CU2|ST0_CU3) - and t0,t1 - li t1,ST0_CU0 - or t0,ST0_CU0 - mtc0 t0,CP0_STATUS - -1: jal start_kernel - nop # delay slot - /* - * Main should never return here, but - * just in case, we know what happens. - */ - b 1b - nop # delay slot - END(kernel_entry) +NESTED(kernel_entry, 16, sp) + .set noreorder + /* The following two symbols are used for kernel profiling. */ + EXPORT(stext) + EXPORT(_stext) + + /* Determine which MIPS variant we are running on. */ + b cpu_probe + nop + +probe_done: + +#ifndef CONFIG_SGI + /* Get the memory upper limit the bootloader passed to us + * in a0 + */ + la t0, mips_memory_upper + nop + sw a0, (t0) +#else + /* On SGI's the firmware/bootloader passes argc/argp/envp + * to us as arguments. But clear bss first because + * the romvec and other important info is stored there + * by prom_init(). + */ + la t0, _edata + sw zero, (t0) + la t1, (_end - 4) +1: + addiu t0, 4 + bne t0, t1, 1b + sw zero, (t0) + + jal prom_init /* prom_init(argc, argv, envp); */ + nop +#endif + /* Get the very one tags we need early in the boot process */ + nop + jal bi_EarlySnarf + nop +#ifndef CONFIG_SGI + /* Clear BSS first so that there are no surprises... */ + la t0, _edata + la t1, (_end - 4) + sw zero, (t0) +1: + addiu t0, 4 + bne t0, t1, 1b + sw zero, (t0) + nop +#endif + /* + * Determine the mmu/cache attached to this machine, + * then flush the tlb and caches. On the r4xx0 + * variants this also sets CP0_WIRED to zero. + */ + jal loadmmu + nop + + la t2, mips_cputype + lw t4, (t2) + li t1, CPU_R2000 + li t2, CPU_R3000 + li t3, CPU_R3000A + beq t4,t1,2f + nop + + beq t4,t2,2f + nop + + beq t4,t3,2f + nop + + jal wire_mappings_r4xx0 + nop + + b 9f + nop -/* - * wire_mappings - used to map hardware registers - */ - LEAF(wire_mappings) - /* - * Get base address of map0 table for the - * the board we're running on - */ - lw t1,mips_machgroup # mips_machgroup is set by - # bi_EarlySnarf() - la t0,map0table - sll t1,PTRLOG # machgroup used as index - addu t0,t1 - lw t1,mips_machtype # mips_machtype is set by - # bi_EarlySnarf() - lw t0,(t0) # load table @ for the group - sll t1,PTRLOG # machtype used as index - addu t0,t1 - lw t0,(t0) # load table @ for the box - nop - /* - * Get number of wired TLB entries and - * loop over selected map0 table. - */ - lw t1,(t0) # number of wired TLB entries - move t2,zero # TLB entry counter - addiu t3,t1,1 # wire one additional entry - beqz t1,2f # null, exit -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - nop -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - mtc0 t3,CP0_WIRED # delay slot -#endif - addiu t0,8 -1: lw t4,24(t0) # PageMask - ld t5,0(t0) # entryHi - ld t6,8(t0) # entryLo0 -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - ld t7,16(t0) # entryLo1 -#endif - addiu t2,1 # increment ctr - mtc0 t2,CP0_INDEX # set TLB entry -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - nop - mtc0 t5,CP0_ENTRYHI - nop - mtc0 t6,CP0_ENTRYLO0 -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - mtc0 t4,CP0_PAGEMASK - dmtc0 t5,CP0_ENTRYHI - dmtc0 t6,CP0_ENTRYLO0 - dmtc0 t7,CP0_ENTRYLO1 -#endif - addiu t0,32 - bne t1,t2,1b # next TLB entry - tlbwi # delay slot - - /* - * We use only 4k pages. Therefore the PageMask register - * is expected to be setup for 4k pages. - */ 2: -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - li t0,PM_4K - mtc0 t0,CP0_PAGEMASK + jal wire_mappings_r3000 + nop + + /* + * Stack for kernel and init + */ +9: la sp, init_user_stack+(KERNEL_STACK_SIZE-4*SZREG) + la t0, init_kernel_stack+(KERNEL_STACK_SIZE) + sw t0, kernelsp + + /* Disable coprocessors */ + mfc0 t0, CP0_STATUS + li t1, ~(ST0_CU1|ST0_CU2|ST0_CU3|ST0_KX|ST0_SX) + and t0, t1 + li t1, ST0_CU0 + or t0, ST0_CU0 + mtc0 t0, CP0_STATUS + +1: jal start_kernel + nop + /* + * Main should never return here, but + * just in case, we know what happens. + */ + b 1b + nop # delay slot + END(kernel_entry) -#endif - /* - * Now map the pagetables - */ - mtc0 zero,CP0_INDEX - la t0,TLB_ROOT -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - mtc0 t0,CP0_ENTRYHI - nop -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - dmtc0 t0,CP0_ENTRYHI -#endif - la t0,swapper_pg_dir-KSEG1 -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - srl t0,12 -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - srl t0,6 -#endif - ori t0,(MODE_ALIAS|MODE_GLOBAL) # uncachable, dirty, valid -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - mtc0 t0,CP0_ENTRYLO0 -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - dmtc0 t0,CP0_ENTRYLO0 - li t0,MODE_GLOBAL - dmtc0 t0,CP0_ENTRYLO1 -#endif - nop - tlbwi # delayed - - /* - * Load the context register with a value that allows - * it to be used as fast as possible in tlb exceptions. - * It is expected that this register's content will - * NEVER be changed. - */ - li t0,TLBMAP -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - srl t0,1 # this is a guess! - mtc0 t0,CP0_CONTEXT -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - dsll t0,1 - dmtc0 t0,CP0_CONTEXT -#endif - jr ra # delay slot - nop - END(wire_mappings) +/* + * wire_mappings - used to map hardware registers, r4xx0 version. + */ +LEAF(wire_mappings_r4xx0) + mtc0 zero, CP0_WIRED + nop + nop + nop + j ra + nop + END(wire_mappings_r4xx0) - .data +/* + * R3000 version of wire_mappings. + */ +LEAF(wire_mappings_r3000) + /* + * Get base address of map0 table for the + * the board we're running on + */ + lw t1, mips_machtype + la t0, map0table + sll t1, PTRLOG # machtype used as index + addu t0, t1 + lw t0, (t0) # get base address + nop + /* Get number of wired TLB entries and + * loop over selected map0 table. + */ + lw t1, (t0) # number of wired TLB entries + move t2, zero # TLB entry counter + addiu t3, t1, 1 # wire one additional entry + beqz t1, 2f # null, exit + nop + + addiu t0, 8 +1: + lw t4, 24(t0) # PageMask + ld t5, 0(t0) # entryHi + ld t6, 8(t0) # entryLo0 + addiu t2, 1 # increment ctr + mtc0 t2, CP0_INDEX # set TLB entry + nop + mtc0 t5, CP0_ENTRYHI + nop + mtc0 t6, CP0_ENTRYLO0 + addiu t0, 32 + bne t1, t2, 1b # next TLB entry + tlbwi + + /* We use only 4k pages. Therefore the PageMask register + * is expected to be setup for 4k pages. + */ +2: + /* Now map the pagetables */ + mtc0 zero, CP0_INDEX + la t0, TLB_ROOT + mtc0 t0, CP0_ENTRYHI + nop + la t0, swapper_pg_dir + srl t0, 12 + ori t0, (0x00e0|0x0100) # uncachable, dirty, valid + mtc0 t0, CP0_ENTRYLO0 + nop + tlbwi # delayed + + /* Load the context register with zero. To see why, look + * at how the tlb refill code above works. + */ + mtc0 zero, CP0_CONTEXT + + jr ra + nop + END(wire_mappings_r3000) + + /* CPU type probing code, called at Kernel entry. */ + LEAF(cpu_probe) + mfc0 t0, CP0_PRID + la t3, mips_cputype + andi t1, t0, 0xff00 + li t2, PRID_IMP_R2000 + bne t1, t2, 1f + andi t0, 0x00ff + + li t2, CPU_R2000 + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R3000 + bne t1, t2, 1f + nop + + li t2, PRID_REV_R3000A + bne t0, t2, 9f + nop + + li t2, CPU_R3000A + b probe_done + sw t2, (t3) +9: + li t2, CPU_R3000 + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R6000 + bne t1, t2, 1f + nop + + li t2, CPU_R6000 + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R4000 + bne t1, t2, 1f + nop + + li t2, PRID_REV_R4400 + bne t0, t2, 9f + nop + + li t2, CPU_R4400SC + b probe_done + sw t2, (t3) +9: + li t2, CPU_R4000SC + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R6000A + bne t1, t2, 1f + nop + + li t2, CPU_R6000A + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R10000 + bne t1, t2, 1f + nop + + li t2, CPU_R10000 + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R8000 + bne t1, t2, 1f + nop + + li t2, CPU_R8000 + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R4600 + bne t1, t2, 1f + nop + + li t2, CPU_R4600 + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R4700 + bne t1, t2, 1f + nop + + li t2, CPU_R4700 + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R4650 + bne t1, t2, 1f + nop + + li t2, CPU_R4650 + b probe_done + sw t2, (t3) +1: + li t2, PRID_IMP_R5000 + bne t1, t2, 1f + nop + + li t2, CPU_R5000 + b probe_done + sw t2, (t3) +1: + li t2, CPU_UNKNOWN + sw t2, (t3) + + b probe_done + nop + END(cpu_probe) + + .data /* * Build an entry for table of wired entries */ @@ -465,42 +647,17 @@ EXPORT(end_except) * Add your own stuff here but don't forget to define your * target system in bootinfo.h */ -/* First indirection level on the 'group' */ -map0table: PTR map0table_unknown # machgroup = unknown - PTR map0table_jazz # machgroup = JAZZ - PTR map0table_dec # machgroup = DEC - PTR map0table_arc # machgroup = ARC - PTR map0table_sni_rm # machgroup = SNI_RM - PTR map0table_acn # machgroup = ACN - .word 0 # pad - -/* table for group 'unknown' */ -map0table_unknown: PTR map0_dummy # machtype = unknown - .word 0 # pad - -/* table for group 'Jazz' */ -map0table_jazz: PTR map0_pica61 # Acer Pica-61 - PTR map0_magnum4000 # MIPS Magnum 4000PC (RC4030) - PTR map0_magnum4000 # Olivetti M700 (*same* table) - .word 0 # pad - -/* table for group 'Dec' */ -map0table_dec: PTR map0_dummy # DEC Personal DECStation 5000/2x (for now) - .word 0 # pad -/* table for group 'ARC' */ -map0table_arc: PTR map0_rpc # Deskstation rPC44 +map0table: PTR map0_dummy # machtype = unknown + PTR map0_rpc # Deskstation rPC44 PTR map0_tyne # Deskstation Tyne + PTR map0_pica61 # Acer Pica-61 + PTR map0_magnum4000 # MIPS Magnum 4000PC (RC4030) + PTR map0_dummy + PTR map0_dummy # DEC Personal DECStation 5000/2x (for now) + PTR map0_sni_rm200_pci # SNI RM200 PCI + PTR map0_dummy # SGI INDY -/* table for group 'SNI_RM' */ -map0table_sni_rm: PTR map0_sni_rm200_pci # SNI RM200 PCI - .word 0 - -/* table for group 'ACN' */ -map0table_acn: PTR map0_dummy # ACN mips board - .word 0 - -/* dummy table */ map0_dummy: .word 0 # 0 entries .align 3 @@ -513,8 +670,8 @@ map0_dummy: .word 0 # 0 entries map0_rpc: .word 2 # no. of wired TLB entries .word 0 # pad for alignment -MAPDATA(0xffffffffe0000000, 0x02800017, 0x00000011, PM_16M) # ISA Memory space -MAPDATA(0xffffffffe2000000, 0x02c00017, 0x00000011, PM_64K) # ISA I/O Space +MAPDATA(0xffffffffe0000000, 0x02800017, 0x00000001, PM_16M) # ISA Memory space +MAPDATA(0xffffffffe2000000, 0x02c00017, 0x00000001, PM_64K) # ISA I/O Space /* * Initial mappings for Deskstation Tyne boards. @@ -522,18 +679,23 @@ MAPDATA(0xffffffffe2000000, 0x02c00017, 0x00000011, PM_64K) # ISA I/O Space map0_tyne: .word 2 # no. of wired TLB entries .word 0 # pad for alignment -MAPDATA(0xffffffffe0000000, 0x04020017, 0x00000011, PM_1M) # VESA DMA cache +MAPDATA(0xffffffffe0000000, 0x04020017, 0x00000001, PM_1M) # VESA DMA cache MAPDATA(0xffffffffe2000000, 0x24000017, 0x04000017, PM_16M) # VESA I/O and memory space /* * Initial mapping for ACER PICA-61 boards. + * FIXME: These are rather preliminary since many drivers, such as serial, + * parallel, scsi and ethernet need some changes to distinguish between "local" + * (built-in) and "optional" (ISA/PCI) I/O hardware. Local video ram is mapped + * to the same location as the bios maps it to. Console driver has been changed + * accordingly (new video type: VIDEO_TYPE_PICA_S3). * FIXME: Remove or merge some of the mappings. */ map0_pica61: .word 7 # no. wired TLB entries .word 0 # dummy -MAPDATA(0xffffffffe0000000, 0x02000017, 0x00000011, PM_64K) # Local I/O space -MAPDATA(0xffffffffe0100000, 0x03c00017, 0x00000011, PM_4K) # Interrupt source register +MAPDATA(0xffffffffe0000000, 0x02000017, 0x00000001, PM_64K) # Local I/O space +MAPDATA(0xffffffffe0100000, 0x03c00017, 0x00000001, PM_4K) # Interrupt source register MAPDATA(0xffffffffe0200000, 0x01800017, 0x01804017, PM_1M) # Local video control MAPDATA(0xffffffffe0400000, 0x01808017, 0x0180c017, PM_1M) # Extended video control MAPDATA(0xffffffffe0800000, 0x01000017, 0x01010017, PM_4M) # Local video memory (BIOS mapping) @@ -542,15 +704,16 @@ MAPDATA(0xffffffffffffe000, 0x00000001, 0x0001ffd7, PM_4K) # PCR (???) /* * Initial mapping for Mips Magnum 4000PC systems. + * Do you believe me now that the Acer and Mips boxes are nearly the same ? :-) * FIXME: Remove or merge some of the mappings. */ map0_magnum4000: .word 8 # no. wired TLB entries .word 0 # dummy -MAPDATA(0xffffffffe1000000, 0x03ffc013, 0x00000011, PM_256K) # 0 -MAPDATA(0xffffffffe0000000, 0x02000017, 0x00000011, PM_64K) # 1 local I/O -MAPDATA(0xffffffffe0100000, 0x03c00017, 0x00000011, PM_4K) # 2 IRQ source +MAPDATA(0xffffffffe1000000, 0x03ffc013, 0x00000001, PM_256K) # 0 +MAPDATA(0xffffffffe0000000, 0x02000017, 0x00000001, PM_64K) # 1 local I/O +MAPDATA(0xffffffffe0100000, 0x03c00017, 0x00000001, PM_4K) # 2 IRQ source MAPDATA(0xffffffffe0200000, 0x01800017, 0x01804017, PM_1M) # 3 local video ctrl MAPDATA(0xffffffffe0400000, 0x01808017, 0x0180c017, PM_1M) # 4 ext. video ctrl MAPDATA(0xffffffffe0800000, 0x01000017, 0x01010017, PM_4M) # 5 local video mem. @@ -567,8 +730,7 @@ map0_sni_rm200_pci: .text .org 0x1000 - .globl swapper_pg_dir -swapper_pg_dir = . + (KSEG1-KSEG0) + EXPORT(swapper_pg_dir) .org 0x2000 EXPORT(empty_bad_page) diff --git a/arch/mips/kernel/ipc.c b/arch/mips/kernel/ipc.c index 336965acf..a68a91c21 100644 --- a/arch/mips/kernel/ipc.c +++ b/arch/mips/kernel/ipc.c @@ -5,7 +5,6 @@ * have a non-standard calling sequence on the Linux/MIPS * platform. */ -#include <linux/config.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> @@ -13,6 +12,9 @@ #include <linux/msg.h> #include <linux/shm.h> +#include <asm/ipc.h> +#include <asm/uaccess.h> + /* * sys_ipc() is the de-multiplexer for the SysV IPC calls.. * @@ -20,7 +22,6 @@ */ asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth) { -#ifdef CONFIG_SYSVIPC int version; version = call >> 16; /* hack for backward compatibility */ @@ -39,7 +40,7 @@ asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, return -EINVAL; if ((err = verify_area (VERIFY_READ, ptr, sizeof(long)))) return err; - get_from_user(fourth.__pad, ptr); + get_user(fourth.__pad, (void **) ptr); return sys_semctl (first, second, third, fourth); } default: @@ -59,8 +60,7 @@ asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, return -EINVAL; if ((err = verify_area (VERIFY_READ, ptr, sizeof(tmp)))) return err; - memcpy_fromfs (&tmp,(struct ipc_kludge *) ptr, - sizeof (tmp)); + copy_from_user(&tmp,(struct ipc_kludge *) ptr, sizeof (tmp)); return sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third); } case 1: default: @@ -103,7 +103,4 @@ asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, return -EINVAL; } return -EINVAL; -#else /* CONFIG_SYSVIPC */ - return -ENOSYS; -#endif /* CONFIG_SYSVIPC */ } diff --git a/arch/mips/kernel/irix5sys.h b/arch/mips/kernel/irix5sys.h new file mode 100644 index 000000000..a20e619e3 --- /dev/null +++ b/arch/mips/kernel/irix5sys.h @@ -0,0 +1,1024 @@ +/* $Id: irix5sys.h,v 1.8 1996/07/09 08:24:17 dm Exp $ + * irix5sys.h: 32-bit IRIX5 ABI system call table. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +/* This file is being included twice - once to build a list of all + * syscalls and once to build a table of how many arguments each syscall + * accepts. Syscalls that receive a pointer to the saved registers are + * marked as having zero arguments. + */ + +/* Keys: + * V == Valid and should work as expected for most cases. + * HV == Half Valid, some things will work, some likely will not + * IV == InValid, certainly will not work at all yet + * ?V == ?'ably Valid, I have not done enough looking into it + * DC == Don't Care, a rats ass we couldn't give + */ + +SYS(sys_syscall, 0) /* 1000 sysindir() V*/ +SYS(sys_exit, 1) /* 1001 exit() V*/ +SYS(sys_fork, 0) /* 1002 fork() V*/ +SYS(sys_read, 3) /* 1003 read() V*/ +SYS(sys_write, 3) /* 1004 write() V*/ +SYS(sys_open, 3) /* 1005 open() V*/ +SYS(sys_close, 1) /* 1006 close() V*/ +SYS(irix_unimp, 0) /* 1007 (XXX IRIX 4 wait) V*/ +SYS(sys_creat, 2) /* 1008 creat() V*/ +SYS(sys_link, 2) /* 1009 link() V*/ +SYS(sys_unlink, 1) /* 1010 unlink() V*/ +SYS(irix_exec, 0) /* 1011 exec() V*/ +SYS(sys_chdir, 1) /* 1012 chdir() V*/ +SYS(irix_gtime, 0) /* 1013 time() V*/ +SYS(irix_unimp, 0) /* 1014 (XXX IRIX 4 mknod) V*/ +SYS(sys_chmod, 2) /* 1015 chmod() V*/ +SYS(irix_chown, 3) /* 1016 chown() V*/ +SYS(irix_brk, 1) /* 1017 break() V*/ +SYS(irix_unimp, 0) /* 1018 (XXX IRIX 4 stat) V*/ +SYS(sys_lseek, 3) /* 1019 lseek() XXX64bit HV*/ +SYS(irix_getpid, 0) /* 1020 getpid() V*/ +SYS(irix_mount, 6) /* 1021 mount() IV*/ +SYS(sys_umount, 1) /* 1022 umount() V*/ +SYS(sys_setuid, 1) /* 1023 setuid() V*/ +SYS(irix_getuid, 0) /* 1024 getuid() V*/ +SYS(irix_stime, 1) /* 1025 stime() V*/ +SYS(irix_unimp, 4) /* 1026 XXX ptrace() IV*/ +SYS(irix_alarm, 1) /* 1027 alarm() V*/ +SYS(irix_unimp, 0) /* 1028 (XXX IRIX 4 fstat) V*/ +SYS(irix_pause, 0) /* 1029 pause() V*/ +SYS(sys_utime, 2) /* 1030 utime() V*/ +SYS(irix_unimp, 0) /* 1031 nuthin' V*/ +SYS(irix_unimp, 0) /* 1032 nobody home man... V*/ +SYS(sys_access, 2) /* 1033 access() V*/ +SYS(sys_nice, 1) /* 1034 nice() V*/ +SYS(irix_statfs, 2) /* 1035 statfs() V*/ +SYS(sys_sync, 0) /* 1036 sync() V*/ +SYS(sys_kill, 2) /* 1037 kill() V*/ +SYS(irix_fstatfs, 2) /* 1038 fstatfs() V*/ +SYS(irix_setpgrp, 1) /* 1039 setpgrp() V*/ +SYS(irix_syssgi, 0) /* 1040 syssgi() HV*/ +SYS(sys_dup, 1) /* 1041 dup() V*/ +SYS(sys_pipe, 0) /* 1042 pipe() V*/ +SYS(irix_times, 1) /* 1043 times() V*/ +SYS(irix_unimp, 0) /* 1044 XXX profil() IV*/ +SYS(irix_unimp, 0) /* 1045 XXX lock() IV*/ +SYS(sys_setgid, 1) /* 1046 setgid() V*/ +SYS(irix_getgid, 0) /* 1047 getgid() V*/ +SYS(irix_unimp, 0) /* 1048 (XXX IRIX 4 ssig) V*/ +SYS(irix_msgsys, 6) /* 1049 sys_msgsys V*/ +SYS(sys_sysmips, 4) /* 1050 sysmips() HV*/ +SYS(irix_unimp, 0) /* 1051 XXX sysacct() IV*/ +SYS(irix_shmsys, 5) /* 1052 sys_shmsys V*/ +SYS(irix_semsys, 0) /* 1053 sys_semsys V*/ +SYS(irix_ioctl, 3) /* 1054 ioctl() HV*/ +SYS(irix_uadmin, 0) /* 1055 XXX sys_uadmin() HC*/ +SYS(irix_sysmp, 0) /* 1056 sysmp() HV*/ +SYS(irix_utssys, 4) /* 1057 sys_utssys() HV*/ +SYS(irix_unimp, 0) /* 1058 nada enchilada V*/ +SYS(irix_exece, 0) /* 1059 exece() V*/ +SYS(sys_umask, 1) /* 1060 umask() V*/ +SYS(sys_chroot, 1) /* 1061 chroot() V*/ +SYS(irix_fcntl, 3) /* 1062 fcntl() ?V*/ +SYS(irix_ulimit, 2) /* 1063 ulimit() HV*/ +SYS(irix_unimp, 0) /* 1064 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1065 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1066 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1067 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1068 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1069 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1070 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1071 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1072 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1073 XXX AFS shit DC*/ +SYS(irix_unimp, 0) /* 1074 nuttin' V*/ +SYS(irix_unimp, 0) /* 1075 XXX sys_getrlimit64()IV*/ +SYS(irix_unimp, 0) /* 1076 XXX sys_setrlimit64()IV*/ +SYS(sys_nanosleep, 2) /* 1077 nanosleep() V*/ +SYS(irix_lseek64, 5) /* 1078 lseek64() ?V*/ +SYS(sys_rmdir, 1) /* 1079 rmdir() V*/ +SYS(sys_mkdir, 2) /* 1080 mkdir() V*/ +SYS(sys_getdents, 3) /* 1081 getdents() V*/ +SYS(irix_sginap, 1) /* 1082 sys_sginap() V*/ +SYS(irix_sgikopt, 3) /* 1083 sys_sgikopt() DC*/ +SYS(sys_sysfs, 3) /* 1084 sysfs() ?V*/ +SYS(irix_unimp, 0) /* 1085 XXX sys_getmsg() DC*/ +SYS(irix_unimp, 0) /* 1086 XXX sys_putmsg() DC*/ +SYS(irix_poll, 3) /* 1087 sys_poll() V*/ +SYS(irix_sigreturn, 0) /* 1088 sigreturn() ?V*/ +SYS(sys_accept, 3) /* 1089 accept() V*/ +SYS(sys_bind, 3) /* 1090 bind() V*/ +SYS(sys_connect, 3) /* 1091 connect() V*/ +SYS(irix_gethostid, 0) /* 1092 sys_gethostid() ?V*/ +SYS(sys_getpeername, 3) /* 1093 getpeername() V*/ +SYS(sys_getsockname, 3) /* 1094 getsockname() V*/ +SYS(sys_getsockopt, 5) /* 1095 getsockopt() V*/ +SYS(sys_listen, 2) /* 1096 listen() V*/ +SYS(sys_recv, 4) /* 1097 recv() V*/ +SYS(sys_recvfrom, 6) /* 1098 recvfrom() V*/ +SYS(sys_recvmsg, 3) /* 1099 recvmsg() V*/ +SYS(sys_select, 5) /* 1100 select() V*/ +SYS(sys_send, 4) /* 1101 send() V*/ +SYS(sys_sendmsg, 3) /* 1102 sendmsg() V*/ +SYS(sys_sendto, 6) /* 1103 sendto() V*/ +SYS(irix_sethostid, 1) /* 1104 sys_sethostid() ?V*/ +SYS(sys_setsockopt, 5) /* 1105 setsockopt() V*/ +SYS(sys_shutdown, 2) /* 1106 shutdown() ?V*/ +SYS(irix_socket, 3) /* 1107 socket() V*/ +SYS(sys_gethostname, 2) /* 1108 sys_gethostname() ?V*/ +SYS(sys_sethostname, 2) /* 1109 sethostname() ?V*/ +SYS(irix_getdomainname, 2) /* 1110 sys_getdomainname() ?V*/ +SYS(sys_setdomainname, 2) /* 1111 setdomainname() ?V*/ +SYS(sys_truncate, 2) /* 1112 truncate() V*/ +SYS(sys_ftruncate, 2) /* 1113 ftruncate() V*/ +SYS(sys_rename, 2) /* 1114 rename() V*/ +SYS(sys_symlink, 2) /* 1115 symlink() V*/ +SYS(sys_readlink, 3) /* 1116 readlink() V*/ +SYS(irix_unimp, 0) /* 1117 XXX IRIX 4 lstat() DC*/ +SYS(irix_unimp, 0) /* 1118 nothin' V*/ +SYS(irix_unimp, 0) /* 1119 XXX nfs_svc() DC*/ +SYS(irix_unimp, 0) /* 1120 XXX nfs_getfh() DC*/ +SYS(irix_unimp, 0) /* 1121 XXX async_daemon() DC*/ +SYS(irix_unimp, 0) /* 1122 XXX exportfs() DC*/ +SYS(sys_setregid, 2) /* 1123 setregid() V*/ +SYS(sys_setreuid, 2) /* 1124 setreuid() V*/ +SYS(sys_getitimer, 2) /* 1125 getitimer() V*/ +SYS(sys_setitimer, 3) /* 1126 setitimer() V*/ +SYS(irix_unimp, 1) /* 1127 XXX adjtime() IV*/ +SYS(irix_gettimeofday, 1) /* 1128 gettimeofday() V*/ +SYS(irix_unimp, 0) /* 1129 XXX sproc() IV*/ +SYS(irix_prctl, 0) /* 1130 prctl() HV*/ +SYS(irix_unimp, 0) /* 1131 XXX procblk() IV*/ +SYS(irix_unimp, 0) /* 1132 XXX sprocsp() IV*/ +SYS(irix_unimp, 0) /* 1133 XXX sgigsc() IV*/ +SYS(irix_mmap32, 6) /* 1134 mmap() XXXflags? ?V*/ +SYS(sys_munmap, 2) /* 1135 munmap() V*/ +SYS(sys_mprotect, 3) /* 1136 mprotect() V*/ +SYS(sys_msync, 4) /* 1137 msync() V*/ +SYS(irix_madvise, 3) /* 1138 madvise() DC*/ +SYS(irix_pagelock, 3) /* 1139 pagelock() IV*/ +SYS(irix_getpagesize, 0) /* 1140 getpagesize() V*/ +SYS(irix_quotactl, 0) /* 1141 quotactl() V*/ +SYS(irix_unimp, 0) /* 1142 nobody home man V*/ +SYS(sys_getpgid, 1) /* 1143 BSD getpgrp() V*/ +SYS(irix_BSDsetpgrp, 2) /* 1143 BSD setpgrp() V*/ +SYS(sys_vhangup, 0) /* 1144 vhangup() V*/ +SYS(sys_fsync, 1) /* 1145 fsync() V*/ +SYS(sys_fchdir, 1) /* 1146 fchdir() V*/ +SYS(sys_getrlimit, 2) /* 1147 getrlimit() ?V*/ +SYS(sys_setrlimit, 2) /* 1148 setrlimit() ?V*/ +SYS(sys_cacheflush, 3) /* 1150 cacheflush() HV*/ +SYS(sys_cachectl, 3) /* 1151 cachectl() HV*/ +SYS(sys_fchown, 3) /* 1152 fchown() ?V*/ +SYS(sys_fchmod, 2) /* 1153 fchmod() ?V*/ +SYS(irix_unimp, 0) /* 1154 XXX IRIX 4 wait3() V*/ +SYS(sys_socketpair, 4) /* 1155 socketpair() V*/ +SYS(irix_systeminfo, 3) /* 1156 systeminfo() IV*/ +SYS(irix_uname, 1) /* 1157 uname() IV*/ +SYS(irix_xstat, 3) /* 1158 xstat() V*/ +SYS(irix_lxstat, 3) /* 1159 lxstat() V*/ +SYS(irix_fxstat, 3) /* 1160 fxstat() V*/ +SYS(irix_xmknod, 0) /* 1161 xmknod() ?V*/ +SYS(irix_sigaction, 4) /* 1162 sigaction() ?V*/ +SYS(irix_sigpending, 1) /* 1163 sigpending() ?V*/ +SYS(irix_sigprocmask, 3) /* 1164 sigprocmask() ?V*/ +SYS(irix_sigsuspend, 0) /* 1165 sigsuspend() ?V*/ +SYS(irix_sigpoll_sys, 3) /* 1166 sigpoll_sys() IV*/ +SYS(irix_swapctl, 2) /* 1167 swapctl() IV*/ +SYS(irix_getcontext, 0) /* 1168 getcontext() HV*/ +SYS(irix_setcontext, 0) /* 1169 setcontext() HV*/ +SYS(irix_waitsys, 5) /* 1170 waitsys() IV*/ +SYS(irix_sigstack, 2) /* 1171 sigstack() HV*/ +SYS(irix_sigaltstack, 2) /* 1172 sigaltstack() HV*/ +SYS(irix_sigsendset, 2) /* 1173 sigsendset() IV*/ +SYS(irix_statvfs, 2) /* 1174 statvfs() V*/ +SYS(irix_fstatvfs, 2) /* 1175 fstatvfs() V*/ +SYS(irix_unimp, 0) /* 1176 XXX getpmsg() DC*/ +SYS(irix_unimp, 0) /* 1177 XXX putpmsg() DC*/ +SYS(irix_lchown, 3) /* 1178 lchown() V*/ +SYS(irix_priocntl, 0) /* 1179 priocntl() DC*/ +SYS(irix_sigqueue, 4) /* 1180 sigqueue() IV*/ +SYS(sys_readv, 3) /* 1181 readv() V*/ +SYS(sys_writev, 3) /* 1182 writev() V*/ +SYS(irix_truncate64, 4) /* 1183 truncate64() XX32bit HV*/ +SYS(irix_ftruncate64, 4) /* 1184 ftruncate64()XX32bit HV*/ +SYS(irix_mmap64, 0) /* 1185 mmap64() XX32bit HV*/ +SYS(irix_dmi, 0) /* 1186 dmi() DC*/ +SYS(irix_pread, 6) /* 1187 pread() IV*/ +SYS(irix_pwrite, 6) /* 1188 pwrite() IV*/ +SYS(sys_fsync, 1) /* 1189 fdatasync() XXPOSIX HV*/ +SYS(irix_sgifastpath, 7) /* 1190 sgifastpath() WHEEE IV*/ +SYS(irix_unimp, 0) /* 1191 XXX attr_get() DC*/ +SYS(irix_unimp, 0) /* 1192 XXX attr_getf() DC*/ +SYS(irix_unimp, 0) /* 1193 XXX attr_set() DC*/ +SYS(irix_unimp, 0) /* 1194 XXX attr_setf() DC*/ +SYS(irix_unimp, 0) /* 1195 XXX attr_remove() DC*/ +SYS(irix_unimp, 0) /* 1196 XXX attr_removef() DC*/ +SYS(irix_unimp, 0) /* 1197 XXX attr_list() DC*/ +SYS(irix_unimp, 0) /* 1198 XXX attr_listf() DC*/ +SYS(irix_unimp, 0) /* 1199 XXX attr_multi() DC*/ +SYS(irix_unimp, 0) /* 1200 XXX attr_multif() DC*/ +SYS(irix_statvfs64, 2) /* 1201 statvfs64() V*/ +SYS(irix_fstatvfs64, 2) /* 1202 fstatvfs64() V*/ +SYS(irix_getmountid, 2) /* 1203 getmountid()XXXfsids HV*/ +SYS(irix_nsproc, 5) /* 1204 nsproc() IV*/ +SYS(irix_getdents64, 3) /* 1205 getdents64() HV*/ +SYS(irix_unimp, 0) /* 1206 XXX DFS garbage DC*/ +SYS(irix_ngetdents, 4) /* 1207 ngetdents() XXXeop HV*/ +SYS(irix_ngetdents64, 4) /* 1208 ngetdents64() XXXeop HV*/ +SYS(irix_unimp, 0) /* 1209 nothin' V*/ +SYS(irix_unimp, 0) /* 1210 XXX pidsprocsp() */ +SYS(irix_unimp, 0) /* 1211 XXX rexec() */ +SYS(irix_unimp, 0) /* 1212 XXX timer_create() */ +SYS(irix_unimp, 0) /* 1213 XXX timer_delete() */ +SYS(irix_unimp, 0) /* 1214 XXX timer_settime() */ +SYS(irix_unimp, 0) /* 1215 XXX timer_gettime() */ +SYS(irix_unimp, 0) /* 1216 XXX timer_setoverrun() */ +SYS(sys_sched_rr_get_interval, 2) /* 1217 sched_rr_get_interval()V*/ +SYS(sys_sched_yield, 0) /* 1218 sched_yield() V*/ +SYS(sys_sched_getscheduler, 1) /* 1219 sched_getscheduler() V*/ +SYS(sys_sched_setscheduler, 3) /* 1220 sched_setscheduler() V*/ +SYS(sys_sched_getparam, 2) /* 1221 sched_getparam() V*/ +SYS(sys_sched_setparam, 2) /* 1222 sched_setparam() V*/ +SYS(irix_unimp, 0) /* 1223 XXX usync_cntl() */ +SYS(irix_unimp, 0) /* 1224 XXX psema_cntl() */ +SYS(irix_unimp, 0) /* 1225 XXX restartreturn() */ + +/* Just to pad things out nicely. */ +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) +SYS(irix_unimp, 0) + +/* YEEEEEEEEEEEEEEEEEE!!!! */ diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c new file mode 100644 index 000000000..ec4f5c449 --- /dev/null +++ b/arch/mips/kernel/irixelf.c @@ -0,0 +1,1374 @@ +/* $Id: irixelf.c,v 1.8 1996/08/24 03:52:25 dm Exp $ + * irixelf.c: Code to load IRIX ELF executables which conform to + * the MIPS ABI. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + * + * Based upon work which is: + * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). + */ + +#include <linux/module.h> + +#include <linux/fs.h> +#include <linux/stat.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/a.out.h> +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/binfmts.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/ptrace.h> +#include <linux/malloc.h> +#include <linux/shm.h> +#include <linux/personality.h> +#include <linux/elfcore.h> + +#include <asm/segment.h> +#include <asm/pgtable.h> + +#include <linux/config.h> + +#define DLINFO_ITEMS 12 + +#include <linux/elf.h> + +#undef DEBUG_ELF + +static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); +static int load_irix_library(int fd); +static int irix_core_dump(long signr, struct pt_regs * regs); +extern int dump_fpu (elf_fpregset_t *); + +static struct linux_binfmt irix_format = { +#ifndef MODULE + NULL, NULL, load_irix_binary, load_irix_library, irix_core_dump +#else + NULL, &mod_use_count_, load_irix_binary, load_irix_library, irix_core_dump +#endif +}; + +#ifdef DEBUG_ELF +/* Debugging routines. */ +static char *get_elf_p_type(Elf32_Word p_type) +{ + int i = (int) p_type; + + switch(i) { + case PT_NULL: return("PT_NULL"); break; + case PT_LOAD: return("PT_LOAD"); break; + case PT_DYNAMIC: return("PT_DYNAMIC"); break; + case PT_INTERP: return("PT_INTERP"); break; + case PT_NOTE: return("PT_NOTE"); break; + case PT_SHLIB: return("PT_SHLIB"); break; + case PT_PHDR: return("PT_PHDR"); break; + case PT_LOPROC: return("PT_LOPROC/REGINFO"); break; + case PT_HIPROC: return("PT_HIPROC"); break; + default: return("PT_BOGUS"); break; + } +} + +static void print_elfhdr(struct elfhdr *ehp) +{ + int i; + + printk("ELFHDR: e_ident<"); + for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]); + printk("%x>\n", ehp->e_ident[i]); + printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n", + (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine, + (unsigned long) ehp->e_version); + printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] " + "e_flags[%08lx]\n", + (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff, + (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags); + printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n", + (unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize, + (unsigned short) ehp->e_phnum); + printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n", + (unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum, + (unsigned short) ehp->e_shstrndx); +} + +static void print_phdr(int i, struct elf_phdr *ep) +{ + printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] " + "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type), + (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr, + (unsigned long) ep->p_paddr); + printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] " + "p_align[%08lx]\n", (unsigned long) ep->p_filesz, + (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags, + (unsigned long) ep->p_align); +} + +static void dump_phdrs(struct elf_phdr *ep, int pnum) +{ + int i; + + for(i = 0; i < pnum; i++, ep++) { + if((ep->p_type == PT_LOAD) || + (ep->p_type == PT_INTERP) || + (ep->p_type == PT_PHDR)) + print_phdr(i, ep); + } +} +#endif /* (DEBUG_ELF) */ + +static void set_brk(unsigned long start, unsigned long end) +{ + start = PAGE_ALIGN(start); + end = PAGE_ALIGN(end); + if (end <= start) + return; + do_mmap(NULL, start, end - start, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_FIXED | MAP_PRIVATE, 0); +} + + +/* We need to explicitly zero any fractional pages + * after the data section (i.e. bss). This would + * contain the junk from the file that should not + * be in memory. + */ +static void padzero(unsigned long elf_bss) +{ + unsigned long nbyte; + char * fpnt; + + nbyte = elf_bss & (PAGE_SIZE-1); + if (nbyte) { + nbyte = PAGE_SIZE - nbyte; + /* FIXME: someone should investigate, why a bad binary + * is allowed to bring a wrong elf_bss until here, + * and how to react. Suffice the plain return? + * rossius@hrz.tu-chemnitz.de + */ + if (verify_area(VERIFY_WRITE, (void *) elf_bss, nbyte)) + return; + + fpnt = (char *) elf_bss; + do { + put_user(0, fpnt++); + } while (--nbyte); + } +} + +unsigned long * create_irix_tables(char * p, int argc, int envc, + struct elfhdr * exec, unsigned int load_addr, + unsigned int interp_load_addr, + struct pt_regs *regs, struct elf_phdr *ephdr) +{ + unsigned long *argv,*envp, *dlinfo; + unsigned long * sp; + unsigned long * csp; + +#ifdef DEBUG_ELF + printk("create_irix_tables: p[%p] argc[%d] envc[%d] " + "load_addr[%08x] interp_load_addr[%08x]\n", + p, argc, envc, load_addr, interp_load_addr); +#endif + sp = (unsigned long *) (0xfffffffc & (unsigned long) p); + + /* Make sure we will be aligned properly at the end of this. */ + csp = sp; + csp -= exec ? DLINFO_ITEMS*2 : 2; + csp -= envc + 1; + csp -= argc+1; + if (!(((unsigned long) csp) & 4)) + sp--; + + sp -= exec ? DLINFO_ITEMS*2 : 2; + dlinfo = sp; + sp -= envc+1; + envp = sp; + sp -= argc+1; + argv = sp; + + put_user((unsigned long)argc, --sp); + +#define NEW_AUX_ENT(id, val) \ + put_user ((id), dlinfo++); \ + put_user ((val), dlinfo++) + +#define INTERP_ALIGN (~((64 * 1024) - 1)) + + if(exec) { + struct elf_phdr * eppnt; + eppnt = (struct elf_phdr *) exec->e_phoff; + + /* Put this here for an ELF program interpreter */ + NEW_AUX_ENT (AT_PHDR, ephdr->p_vaddr); + NEW_AUX_ENT (AT_PHENT, sizeof (struct elf_phdr)); + NEW_AUX_ENT (AT_PHNUM, exec->e_phnum); + NEW_AUX_ENT (AT_PAGESZ, PAGE_SIZE); + NEW_AUX_ENT (AT_BASE, (interp_load_addr & (INTERP_ALIGN))); + NEW_AUX_ENT (AT_FLAGS, 0); + NEW_AUX_ENT (AT_ENTRY, (unsigned long) exec->e_entry); + NEW_AUX_ENT (AT_UID, (unsigned long) current->uid); + NEW_AUX_ENT (AT_EUID, (unsigned long) current->euid); + NEW_AUX_ENT (AT_GID, (unsigned long) current->gid); + NEW_AUX_ENT (AT_EGID, (unsigned long) current->egid); + } + NEW_AUX_ENT (AT_NULL, 0); +#undef NEW_AUX_ENT + + current->mm->arg_start = (unsigned long) p; + while (argc-->0) { + put_user(p,argv++); + while (get_user(p++)) /* nothing */ ; + } + put_user(0,argv); + current->mm->arg_end = current->mm->env_start = (unsigned long) p; + while (envc-->0) { + put_user(p,envp++); + while (get_user(p++)) /* nothing */ ; + } + put_user(0,envp); + current->mm->env_end = (unsigned long) p; + return sp; +} + + +/* This is much more generalized than the library routine read function, + * so we keep this separate. Technically the library read function + * is only provided so that we can read a.out libraries that have + * an ELF header. + */ +static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, + struct inode * interpreter_inode, + unsigned int *interp_load_addr) +{ + struct file * file; + struct elf_phdr *elf_phdata = NULL; + struct elf_phdr *eppnt; + unsigned int len; + unsigned int load_addr; + int elf_exec_fileno; + int elf_bss; + int retval; + unsigned int last_bss; + int error; + int i; + unsigned int k; + + elf_bss = 0; + last_bss = 0; + error = load_addr = 0; + +#ifdef DEBUG_ELF + print_elfhdr(interp_elf_ex); +#endif + + /* First of all, some simple consistency checks */ + if((interp_elf_ex->e_type != ET_EXEC && + interp_elf_ex->e_type != ET_DYN) || + INCOMPATIBLE_MACHINE(interp_elf_ex->e_machine) || + (!interpreter_inode->i_op || + !interpreter_inode->i_op->default_file_ops->mmap)){ + printk("IRIX interp has bad e_type %d\n", interp_elf_ex->e_type); + return 0xffffffff; + } + + /* Now read in all of the header information */ + if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) { + printk("IRIX interp header bigger than a page (%d)\n", + (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum)); + return 0xffffffff; + } + + elf_phdata = (struct elf_phdr *) + kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, + GFP_KERNEL); + + if(!elf_phdata) { + printk("Cannot kmalloc phdata for IRIX interp.\n"); + return 0xffffffff; + } + + /* If the size of this structure has changed, then punt, since + * we will be doing the wrong thing. + */ + if(interp_elf_ex->e_phentsize != 32) { + printk("IRIX interp e_phentsize == %d != 32 ", + interp_elf_ex->e_phentsize); + kfree(elf_phdata); + return 0xffffffff; + } + + retval = read_exec(interpreter_inode, interp_elf_ex->e_phoff, + (char *) elf_phdata, + sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, 1); + +#ifdef DEBUG_ELF + dump_phdrs(elf_phdata, interp_elf_ex->e_phnum); +#endif + + elf_exec_fileno = open_inode(interpreter_inode, O_RDONLY); + if (elf_exec_fileno < 0) { + printk("Could not open IRIX interp inode.\n"); + kfree(elf_phdata); + return 0xffffffff; + } + + file = current->files->fd[elf_exec_fileno]; + + eppnt = elf_phdata; + for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { + if(eppnt->p_type == PT_LOAD) { + int elf_type = MAP_PRIVATE | MAP_DENYWRITE; + int elf_prot = 0; + unsigned long vaddr = 0; + if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; + if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; + if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; + elf_type |= MAP_FIXED; + vaddr = eppnt->p_vaddr; + +#ifdef DEBUG_ELF + printk("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ", + file, vaddr, + (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)), + (unsigned long) elf_prot, (unsigned long) elf_type, + (unsigned long) (eppnt->p_offset & 0xfffff000)); +#endif + error = do_mmap(file, vaddr, + eppnt->p_filesz + (eppnt->p_vaddr & 0xfff), + elf_prot, elf_type, + eppnt->p_offset & 0xfffff000); + + if(error < 0 && error > -1024) { + printk("Aieee IRIX interp mmap error=%d\n", error); + break; /* Real error */ + } +#ifdef DEBUG_ELF + printk("error=%08lx ", (unsigned long) error); +#endif + if(!load_addr && interp_elf_ex->e_type == ET_DYN) { + load_addr = error; +#ifdef DEBUG_ELF + printk("load_addr = error "); +#endif + } + + /* Find the end of the file mapping for this phdr, and keep + * track of the largest address we see for this. + */ + k = eppnt->p_vaddr + eppnt->p_filesz; + if(k > elf_bss) elf_bss = k; + + /* Do the same thing for the memory mapping - between + * elf_bss and last_bss is the bss section. + */ + k = eppnt->p_memsz + eppnt->p_vaddr; + if(k > last_bss) last_bss = k; +#ifdef DEBUG_ELF + printk("\n"); +#endif + } + } + + /* Now use mmap to map the library into memory. */ + sys_close(elf_exec_fileno); + if(error < 0 && error > -1024) { +#ifdef DEBUG_ELF + printk("got error %d\n", error); +#endif + kfree(elf_phdata); + return 0xffffffff; + } + + /* Now fill out the bss section. First pad the last page up + * to the page boundary, and then perform a mmap to make sure + * that there are zeromapped pages up to and including the last + * bss page. + */ +#ifdef DEBUG_ELF + printk("padzero(%08lx) ", (unsigned long) (elf_bss)); +#endif + padzero(elf_bss); + len = (elf_bss + 0xfff) & 0xfffff000; /* What we have mapped so far */ + +#ifdef DEBUG_ELF + printk("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss, + (unsigned long) len); +#endif + + /* Map the last of the bss segment */ + if (last_bss > len) { + do_mmap(NULL, len, (last_bss - len), + PROT_READ|PROT_WRITE|PROT_EXEC, + MAP_FIXED|MAP_PRIVATE, 0); + } + kfree(elf_phdata); + + *interp_load_addr = load_addr; + return ((unsigned int) interp_elf_ex->e_entry); +} + +/* Check sanity of IRIX elf executable header. */ +static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm) +{ + if (ehp->e_ident[0] != 0x7f || strncmp(&ehp->e_ident[1], "ELF", 3)) { + return -ENOEXEC; + } + + /* First of all, some simple consistency checks */ + if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || + INCOMPATIBLE_MACHINE(ehp->e_machine) || + (!bprm->inode->i_op || !bprm->inode->i_op->default_file_ops || + !bprm->inode->i_op->default_file_ops->mmap)) { + return -ENOEXEC; + } + + /* Only support MIPS ARCH2 or greater IRIX binaries for now. */ + if(!(ehp->e_flags & EF_MIPS_ARCH) && !(ehp->e_flags & 0x04)) { + return -ENOEXEC; + } + + /* XXX Don't support N32 or 64bit binaries yet because they can + * XXX and do execute 64 bit instructions and expect all registers + * XXX to be 64 bit as well. We need to make the kernel save + * XXX all registers as 64bits on cpu's capable of this at + * XXX exception time plus frob the XTLB exception vector. + */ + if((ehp->e_flags & 0x20)) { + return -ENOEXEC; + } + + return 0; /* It's ok. */ +} + +#define IRIX_INTERP_PREFIX "/usr/gnemul/irix" + +/* Look for an IRIX ELF interpreter. */ +static inline int look_for_irix_interpreter(char **name, + struct inode **interpreter_inode, + struct elfhdr *interp_elf_ex, + struct elf_phdr *epp, + struct linux_binprm *bprm, int pnum) +{ + int i, old_fs; + int retval = -EINVAL; + + *name = NULL; + for(i = 0; i < pnum; i++, epp++) { + if(epp->p_type != PT_INTERP) + continue; + + /* It is illegal to have two interpreters for one executable. */ + if(*name != NULL) + goto losing; + + *name = (char *) kmalloc((epp->p_filesz + + strlen(IRIX_INTERP_PREFIX)), + GFP_KERNEL); + if(!*name) + return -ENOMEM; + + strcpy(*name, IRIX_INTERP_PREFIX); + retval = read_exec(bprm->inode, epp->p_offset, (*name + 16), + epp->p_filesz, 1); + if(retval < 0) + goto losing; + + old_fs = get_fs(); set_fs(get_ds()); + retval = namei(*name, interpreter_inode); + set_fs(old_fs); + if(retval < 0) + goto losing; + + retval = read_exec(*interpreter_inode, 0, bprm->buf, 128, 1); + if(retval < 0) + goto losing; + + *interp_elf_ex = *((struct elfhdr *) bprm->buf); + } + return 0; + +losing: + kfree(*name); + return retval; +} + +static inline int verify_irix_interpreter(struct elfhdr *ihp) +{ + if(ihp->e_ident[0] != 0x7f || strncmp(&ihp->e_ident[1], "ELF", 3)) + return -ELIBBAD; + return 0; +} + +#define EXEC_MAP_FLAGS (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE) + +static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnum, + unsigned int *estack, unsigned int *laddr, + unsigned int *scode, unsigned int *ebss, + unsigned int *ecode, unsigned int *edata, + unsigned int *ebrk) +{ + unsigned int tmp; + int i, prot; + + for(i = 0; i < pnum; i++, epp++) { + if(epp->p_type != PT_LOAD) + continue; + + /* Map it. */ + prot = (epp->p_flags & PF_R) ? PROT_READ : 0; + prot |= (epp->p_flags & PF_W) ? PROT_WRITE : 0; + prot |= (epp->p_flags & PF_X) ? PROT_EXEC : 0; + (void) do_mmap(fp, (epp->p_vaddr & 0xfffff000), + (epp->p_filesz + (epp->p_vaddr & 0xfff)), + prot, EXEC_MAP_FLAGS, + (epp->p_offset & 0xfffff000)); + + /* Fixup location tracking vars. */ + if((epp->p_vaddr & 0xfffff000) < *estack) + *estack = (epp->p_vaddr & 0xfffff000); + if(!*laddr) + *laddr = epp->p_vaddr - epp->p_offset; + if(epp->p_vaddr < *scode) + *scode = epp->p_vaddr; + + tmp = epp->p_vaddr + epp->p_filesz; + if(tmp > *ebss) + *ebss = tmp; + if((epp->p_flags & PF_X) && *ecode < tmp) + *ecode = tmp; + if(*edata < tmp) + *edata = tmp; + + tmp = epp->p_vaddr + epp->p_memsz; + if(tmp > *ebrk) + *ebrk = tmp; + } + +} + +static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp, + struct inode *iino, unsigned int *iladdr, + int pnum, int old_fs, + unsigned int *eentry) +{ + int i; + + *eentry = 0xffffffff; + for(i = 0; i < pnum; i++, epp++) { + if(epp->p_type != PT_INTERP) + continue; + + /* We should have fielded this error elsewhere... */ + if(*eentry != 0xffffffff) + return -1; + + set_fs(old_fs); + *eentry = load_irix_interp(ihp, iino, iladdr); + old_fs = get_fs(); + set_fs(get_ds()); + + iput(iino); + + if(*eentry == 0xffffffff) + return -1; + } + return 0; +} + +/* These are the functions used to load ELF style executables and shared + * libraries. There is no binary dependent code anywhere else. + */ +static inline int do_load_irix_binary(struct linux_binprm * bprm, + struct pt_regs * regs) +{ + struct elfhdr elf_ex, interp_elf_ex; + struct inode *interpreter_inode; + struct elf_phdr *elf_phdata, *elf_ihdr, *elf_ephdr; + unsigned int load_addr, elf_bss, elf_brk; + unsigned int elf_entry, interp_load_addr = 0; + unsigned int start_code, end_code, end_data, elf_stack; + int old_fs, elf_exec_fileno, retval, has_interp, has_ephdr, i; + char *elf_interpreter; + + load_addr = 0; + has_interp = has_ephdr = 0; + elf_ihdr = elf_ephdr = 0; + elf_ex = *((struct elfhdr *) bprm->buf); + + if(verify_binary(&elf_ex, bprm)) + return -ENOEXEC; + +#ifdef DEBUG_ELF + print_elfhdr(&elf_ex); +#endif + + /* Now read in all of the header information */ + elf_phdata = (struct elf_phdr *) kmalloc(elf_ex.e_phentsize * + elf_ex.e_phnum, GFP_KERNEL); + if (elf_phdata == NULL) + return -ENOMEM; + + retval = read_exec(bprm->inode, elf_ex.e_phoff, (char *) elf_phdata, + elf_ex.e_phentsize * elf_ex.e_phnum, 1); + if (retval < 0) { + kfree (elf_phdata); + return retval; + } + +#ifdef DEBUG_ELF + dump_phdrs(elf_phdata, elf_ex.e_phnum); +#endif + + /* Set some things for later. */ + for(i = 0; i < elf_ex.e_phnum; i++) { + switch(elf_phdata[i].p_type) { + case PT_INTERP: + has_interp = 1; + elf_ihdr = &elf_phdata[i]; + break; + case PT_PHDR: + has_ephdr = 1; + elf_ephdr = &elf_phdata[i]; + break; + }; + } +#ifdef DEBUG_ELF + printk("\n"); +#endif + + elf_bss = 0; + elf_brk = 0; + elf_exec_fileno = open_inode(bprm->inode, O_RDONLY); + + if (elf_exec_fileno < 0) { + kfree (elf_phdata); + return elf_exec_fileno; + } + + elf_stack = 0xffffffff; + elf_interpreter = NULL; + start_code = 0xffffffff; + end_code = 0; + end_data = 0; + + retval = look_for_irix_interpreter(&elf_interpreter, &interpreter_inode, + &interp_elf_ex, elf_phdata, bprm, + elf_ex.e_phnum); + if(retval) { + kfree(elf_phdata); + sys_close(elf_exec_fileno); + return retval; + } + + if(elf_interpreter) { + retval = verify_irix_interpreter(&interp_elf_ex); + if(retval) { + kfree(elf_interpreter); + kfree(elf_phdata); + sys_close(elf_exec_fileno); + return retval; + } + } + + /* OK, we are done with that, now set up the arg stuff, + * and then start this sucker up. + */ + if (!bprm->sh_bang) { + if (!bprm->p) { + if(elf_interpreter) { + kfree(elf_interpreter); + } + kfree (elf_phdata); + sys_close(elf_exec_fileno); + return -E2BIG; + } + } + + /* OK, This is the point of no return. */ + flush_old_exec(bprm); + + current->mm->end_data = 0; + current->mm->end_code = 0; + current->mm->start_mmap = ELF_START_MMAP; + current->mm->mmap = NULL; + elf_entry = (unsigned int) elf_ex.e_entry; + + /* Do this so that we can load the interpreter, if need be. We will + * change some of these later. + */ + current->mm->rss = 0; + bprm->p = setup_arg_pages(bprm->p, bprm); + current->mm->start_stack = bprm->p; + + /* At this point, we assume that the image should be loaded at + * fixed address, not at a variable address. + */ + old_fs = get_fs(); + set_fs(get_ds()); + + map_executable(current->files->fd[elf_exec_fileno], elf_phdata, + elf_ex.e_phnum, &elf_stack, &load_addr, + &start_code, &elf_bss, &end_code, &end_data, &elf_brk); + + if(elf_interpreter) { + retval = map_interpreter(elf_phdata, &interp_elf_ex, + interpreter_inode, &interp_load_addr, + elf_ex.e_phnum, old_fs, &elf_entry); + kfree(elf_interpreter); + if(retval) { + set_fs(old_fs); + printk("Unable to load IRIX ELF interpreter\n"); + kfree(elf_phdata); + send_sig(SIGSEGV, current, 0); + return 0; + } + } + + set_fs(old_fs); + + kfree(elf_phdata); + sys_close(elf_exec_fileno); + current->personality = PER_IRIX32; + + if (current->exec_domain && current->exec_domain->use_count) + (*current->exec_domain->use_count)--; + if (current->binfmt && current->binfmt->use_count) + (*current->binfmt->use_count)--; + current->exec_domain = lookup_exec_domain(current->personality); + current->binfmt = &irix_format; + if (current->exec_domain && current->exec_domain->use_count) + (*current->exec_domain->use_count)++; + if (current->binfmt && current->binfmt->use_count) + (*current->binfmt->use_count)++; + + current->suid = current->euid = current->fsuid = bprm->e_uid; + current->sgid = current->egid = current->fsgid = bprm->e_gid; + current->flags &= ~PF_FORKNOEXEC; + bprm->p = (unsigned long) + create_irix_tables((char *)bprm->p, bprm->argc, bprm->envc, + (elf_interpreter ? &elf_ex : NULL), + load_addr, interp_load_addr, regs, elf_ephdr); + current->mm->start_brk = current->mm->brk = elf_brk; + current->mm->end_code = end_code; + current->mm->start_code = start_code; + current->mm->end_data = end_data; + current->mm->start_stack = bprm->p; + + /* Calling set_brk effectively mmaps the pages that we need for the + * bss and break sections. + */ + set_brk(elf_bss, elf_brk); + + padzero(elf_bss); + +#ifdef DEBUG_ELF + printk("(start_brk) %08lx\n" , current->mm->start_brk); + printk("(end_code) %08lx\n" , current->mm->end_code); + printk("(start_code) %08lx\n" , current->mm->start_code); + printk("(end_data) %08lx\n" , current->mm->end_data); + printk("(start_stack) %08lx\n" , current->mm->start_stack); + printk("(brk) %08lx\n" , current->mm->brk); +#endif + +#if 0 /* XXX No fucking way dude... */ + /* Why this, you ask??? Well SVr4 maps page 0 as read-only, + * and some applications "depend" upon this behavior. + * Since we do not have the power to recompile these, we + * emulate the SVr4 behavior. Sigh. + */ + (void) do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, + MAP_FIXED | MAP_PRIVATE, 0); +#endif + + start_thread(regs, elf_entry, bprm->p); + if (current->flags & PF_PTRACED) + send_sig(SIGTRAP, current, 0); + return 0; +} + +static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) +{ + int retval; + + MOD_INC_USE_COUNT; + retval = do_load_irix_binary(bprm, regs); + MOD_DEC_USE_COUNT; + return retval; +} + +/* This is really simpleminded and specialized - we are loading an + * a.out library that is given an ELF header. + */ +static inline int do_load_irix_library(int fd) +{ + struct file * file; + struct elfhdr elf_ex; + struct elf_phdr *elf_phdata = NULL; + struct inode * inode; + unsigned int len; + int elf_bss; + int retval; + unsigned int bss; + int error; + int i,j, k; + + len = 0; + file = current->files->fd[fd]; + inode = file->f_inode; + elf_bss = 0; + + if (!file || !file->f_op) + return -EACCES; + + /* Seek to the beginning of the file. */ + if (file->f_op->lseek) { + if ((error = file->f_op->lseek(inode, file, 0, 0)) != 0) + return -ENOEXEC; + } else + file->f_pos = 0; + + set_fs(KERNEL_DS); + error = file->f_op->read(inode, file, (char *) &elf_ex, sizeof(elf_ex)); + set_fs(USER_DS); + if (error != sizeof(elf_ex)) + return -ENOEXEC; + + if (elf_ex.e_ident[0] != 0x7f || + strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) + return -ENOEXEC; + + /* First of all, some simple consistency checks. */ + if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || + INCOMPATIBLE_MACHINE(elf_ex.e_machine) || + (!inode->i_op || !inode->i_op->default_file_ops->mmap)) + return -ENOEXEC; + + /* Now read in all of the header information. */ + if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE) + return -ENOEXEC; + + elf_phdata = (struct elf_phdr *) + kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL); + if (elf_phdata == NULL) + return -ENOMEM; + + retval = read_exec(inode, elf_ex.e_phoff, (char *) elf_phdata, + sizeof(struct elf_phdr) * elf_ex.e_phnum, 1); + + j = 0; + for(i=0; i<elf_ex.e_phnum; i++) + if((elf_phdata + i)->p_type == PT_LOAD) j++; + + if(j != 1) { + kfree(elf_phdata); + return -ENOEXEC; + } + + while(elf_phdata->p_type != PT_LOAD) elf_phdata++; + + /* Now use mmap to map the library into memory. */ + error = do_mmap(file, + elf_phdata->p_vaddr & 0xfffff000, + elf_phdata->p_filesz + (elf_phdata->p_vaddr & 0xfff), + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, + elf_phdata->p_offset & 0xfffff000); + + k = elf_phdata->p_vaddr + elf_phdata->p_filesz; + if(k > elf_bss) elf_bss = k; + + if (error != (elf_phdata->p_vaddr & 0xfffff000)) { + kfree(elf_phdata); + return error; + } + + padzero(elf_bss); + + len = (elf_phdata->p_filesz + elf_phdata->p_vaddr+ 0xfff) & 0xfffff000; + bss = elf_phdata->p_memsz + elf_phdata->p_vaddr; + if (bss > len) + do_mmap(NULL, len, bss-len, + PROT_READ|PROT_WRITE|PROT_EXEC, + MAP_FIXED|MAP_PRIVATE, 0); + kfree(elf_phdata); + return 0; +} + +static int load_irix_library(int fd) +{ + int retval; + + MOD_INC_USE_COUNT; + retval = do_load_irix_library(fd); + MOD_DEC_USE_COUNT; + return retval; +} + +/* Called through irix_syssgi() to map an elf image given an FD, + * a phdr ptr USER_PHDRP in userspace, and a count CNT telling how many + * phdrs there are in the USER_PHDRP array. We return the vaddr the + * first phdr was successfully mapped to. + */ +unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt) +{ + struct elf_phdr *hp; + struct file *filp; + int i, retval; + +#ifdef DEBUG_ELF + printk("irix_mapelf: fd[%d] user_phdrp[%p] cnt[%d]\n", + fd, user_phdrp, cnt); +#endif + + /* First get the verification out of the way. */ + hp = user_phdrp; + retval = verify_area(VERIFY_READ, hp, (sizeof(struct elf_phdr) * cnt)); + if(retval) { +#ifdef DEBUG_ELF + printk("irix_mapelf: verify_area fails!\n"); +#endif + return retval; + } + +#ifdef DEBUG_ELF + dump_phdrs(user_phdrp, cnt); +#endif + + for(i = 0; i < cnt; i++, hp++) + if(hp->p_type != PT_LOAD) { + printk("irix_mapelf: One section is not PT_LOAD!\n"); + return -ENOEXEC; + } + + filp = current->files->fd[fd]; + if(!filp || !filp->f_op) { + printk("irix_mapelf: Bogon filp!\n"); + return -EACCES; + } + + hp = user_phdrp; + for(i = 0; i < cnt; i++, hp++) { + int prot; + + prot = (hp->p_flags & PF_R) ? PROT_READ : 0; + prot |= (hp->p_flags & PF_W) ? PROT_WRITE : 0; + prot |= (hp->p_flags & PF_X) ? PROT_EXEC : 0; + retval = do_mmap(filp, (hp->p_vaddr & 0xfffff000), + (hp->p_filesz + (hp->p_vaddr & 0xfff)), + prot, (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), + (hp->p_offset & 0xfffff000)); + + if(retval != (hp->p_vaddr & 0xfffff000)) { + printk("irix_mapelf: do_mmap fails with %d!\n", retval); + return retval; + } + } + +#ifdef DEBUG_ELF + printk("irix_mapelf: Success, returning %08lx\n", user_phdrp->p_vaddr); +#endif + return user_phdrp->p_vaddr; +} + +/* + * ELF core dumper + * + * Modelled on fs/exec.c:aout_core_dump() + * Jeremy Fitzhardinge <jeremy@sw.oz.au> + */ + +/* These are the only things you should do on a core-file: use only these + * functions to write out all the necessary info. + */ +static int dump_write(struct file *file, const void *addr, int nr) +{ + return file->f_op->write(file->f_inode, file, addr, nr) == nr; +} + +static int dump_seek(struct file *file, off_t off) +{ + if (file->f_op->lseek) { + if (file->f_op->lseek(file->f_inode, file, off, 0) != off) + return 0; + } else + file->f_pos = off; + return 1; +} + +/* Decide whether a segment is worth dumping; default is yes to be + * sure (missing info is worse than too much; etc). + * Personally I'd include everything, and use the coredump limit... + * + * I think we should skip something. But I am not sure how. H.J. + */ +static inline int maydump(struct vm_area_struct *vma) +{ + if (!(vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC))) + return 0; +#if 1 + if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN)) + return 1; + if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED)) + return 0; +#endif + return 1; +} + +#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) + +/* An ELF note in memory. */ +struct memelfnote +{ + const char *name; + int type; + unsigned int datasz; + void *data; +}; + +static int notesize(struct memelfnote *en) +{ + int sz; + + sz = sizeof(struct elf_note); + sz += roundup(strlen(en->name), 4); + sz += roundup(en->datasz, 4); + + return sz; +} + +/* #define DEBUG */ + +#define DUMP_WRITE(addr, nr) \ + do { if (!dump_write(file, (addr), (nr))) return 0; } while(0) +#define DUMP_SEEK(off) \ + do { if (!dump_seek(file, (off))) return 0; } while(0) + +static int writenote(struct memelfnote *men, struct file *file) +{ + struct elf_note en; + + en.n_namesz = strlen(men->name); + en.n_descsz = men->datasz; + en.n_type = men->type; + + DUMP_WRITE(&en, sizeof(en)); + DUMP_WRITE(men->name, en.n_namesz); + /* XXX - cast from long long to long to avoid need for libgcc.a */ + DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ + DUMP_WRITE(men->data, men->datasz); + DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ + + return 1; +} +#undef DUMP_WRITE +#undef DUMP_SEEK + +#define DUMP_WRITE(addr, nr) \ + if (!dump_write(&file, (addr), (nr))) \ + goto close_coredump; +#define DUMP_SEEK(off) \ + if (!dump_seek(&file, (off))) \ + goto close_coredump; +/* Actual dumper. + * + * This is a two-pass process; first we find the offsets of the bits, + * and then they are actually written out. If we run out of core limit + * we just truncate. + */ +static int irix_core_dump(long signr, struct pt_regs * regs) +{ + int has_dumped = 0; + struct file file; + struct inode *inode; + unsigned short fs; + char corefile[6+sizeof(current->comm)]; + int segs; + int i; + size_t size; + struct vm_area_struct *vma; + struct elfhdr elf; + off_t offset = 0, dataoff; + int limit = current->rlim[RLIMIT_CORE].rlim_cur; + int numnote = 4; + struct memelfnote notes[4]; + struct elf_prstatus prstatus; /* NT_PRSTATUS */ + elf_fpregset_t fpu; /* NT_PRFPREG */ + struct elf_prpsinfo psinfo; /* NT_PRPSINFO */ + + if (!current->dumpable || limit < PAGE_SIZE) + return 0; + current->dumpable = 0; + +#ifndef CONFIG_BINFMT_IRIX + MOD_INC_USE_COUNT; +#endif + + /* Count what's needed to dump, up to the limit of coredump size. */ + segs = 0; + size = 0; + for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { + if (maydump(vma)) + { + int sz = vma->vm_end-vma->vm_start; + + if (size+sz >= limit) + break; + else + size += sz; + } + + segs++; + } +#ifdef DEBUG + printk("irix_core_dump: %d segs taking %d bytes\n", segs, size); +#endif + + /* Set up header. */ + memcpy(elf.e_ident, ELFMAG, SELFMAG); + elf.e_ident[EI_CLASS] = ELFCLASS32; + elf.e_ident[EI_DATA] = ELFDATA2LSB; + elf.e_ident[EI_VERSION] = EV_CURRENT; + memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); + + elf.e_type = ET_CORE; + elf.e_machine = ELF_EM_CPU; + elf.e_version = EV_CURRENT; + elf.e_entry = 0; + elf.e_phoff = sizeof(elf); + elf.e_shoff = 0; + elf.e_flags = 0; + elf.e_ehsize = sizeof(elf); + elf.e_phentsize = sizeof(struct elf_phdr); + elf.e_phnum = segs+1; /* Include notes. */ + elf.e_shentsize = 0; + elf.e_shnum = 0; + elf.e_shstrndx = 0; + + fs = get_fs(); + set_fs(KERNEL_DS); + memcpy(corefile,"core.",5); +#if 0 + memcpy(corefile+5,current->comm,sizeof(current->comm)); +#else + corefile[4] = '\0'; +#endif + if (open_namei(corefile,O_CREAT | 2 | O_TRUNC,0600,&inode,NULL)) { + inode = NULL; + goto end_coredump; + } + if (!S_ISREG(inode->i_mode)) + goto end_coredump; + if (!inode->i_op || !inode->i_op->default_file_ops) + goto end_coredump; + file.f_mode = 3; + file.f_flags = 0; + file.f_count = 1; + file.f_inode = inode; + file.f_pos = 0; + file.f_reada = 0; + file.f_op = inode->i_op->default_file_ops; + if (file.f_op->open) + if (file.f_op->open(inode,&file)) + goto end_coredump; + if (!file.f_op->write) + goto close_coredump; + has_dumped = 1; + current->flags |= PF_DUMPCORE; + + DUMP_WRITE(&elf, sizeof(elf)); + offset += sizeof(elf); /* Elf header. */ + offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers. */ + + /* Set up the notes in similar form to SVR4 core dumps made + * with info from their /proc. + */ + memset(&psinfo, 0, sizeof(psinfo)); + memset(&prstatus, 0, sizeof(prstatus)); + + notes[0].name = "CORE"; + notes[0].type = NT_PRSTATUS; + notes[0].datasz = sizeof(prstatus); + notes[0].data = &prstatus; + prstatus.pr_info.si_signo = prstatus.pr_cursig = signr; + copy_sigbits32(&prstatus.pr_sigpend, current->signal); + copy_sigbits32(&prstatus.pr_sighold, current->blocked); + psinfo.pr_pid = prstatus.pr_pid = current->pid; + psinfo.pr_ppid = prstatus.pr_ppid = current->p_pptr->pid; + psinfo.pr_pgrp = prstatus.pr_pgrp = current->pgrp; + psinfo.pr_sid = prstatus.pr_sid = current->session; + prstatus.pr_utime.tv_sec = CT_TO_SECS(current->utime); + prstatus.pr_utime.tv_usec = CT_TO_USECS(current->utime); + prstatus.pr_stime.tv_sec = CT_TO_SECS(current->stime); + prstatus.pr_stime.tv_usec = CT_TO_USECS(current->stime); + prstatus.pr_cutime.tv_sec = CT_TO_SECS(current->cutime); + prstatus.pr_cutime.tv_usec = CT_TO_USECS(current->cutime); + prstatus.pr_cstime.tv_sec = CT_TO_SECS(current->cstime); + prstatus.pr_cstime.tv_usec = CT_TO_USECS(current->cstime); + if (sizeof(elf_gregset_t) != sizeof(struct pt_regs)) { + printk("sizeof(elf_gregset_t) (%d) != sizeof(struct pt_regs) " + "(%d)\n", sizeof(elf_gregset_t), sizeof(struct pt_regs)); + } else { + *(struct pt_regs *)&prstatus.pr_reg = *regs; + } + + notes[1].name = "CORE"; + notes[1].type = NT_PRPSINFO; + notes[1].datasz = sizeof(psinfo); + notes[1].data = &psinfo; + psinfo.pr_state = current->state; + psinfo.pr_sname = + ((current->state < 0 || current->state > 5) ? + ('.') : ("RSDZTD"[current->state])); + psinfo.pr_zomb = psinfo.pr_sname == 'Z'; + psinfo.pr_nice = current->priority-15; + psinfo.pr_flag = current->flags; + psinfo.pr_uid = current->uid; + psinfo.pr_gid = current->gid; + { + int i, len; + + set_fs(fs); + + len = current->mm->arg_end - current->mm->arg_start; + len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len; + memcpy_fromfs(&psinfo.pr_psargs, + (const char *)current->mm->arg_start, len); + for(i = 0; i < len; i++) + if (psinfo.pr_psargs[i] == 0) + psinfo.pr_psargs[i] = ' '; + psinfo.pr_psargs[len] = 0; + + set_fs(KERNEL_DS); + } + strncpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname)); + + notes[2].name = "CORE"; + notes[2].type = NT_TASKSTRUCT; + notes[2].datasz = sizeof(*current); + notes[2].data = current; + + /* Try to dump the fpu. */ + prstatus.pr_fpvalid = dump_fpu (&fpu); + if (!prstatus.pr_fpvalid) { + numnote--; + } else { + notes[3].name = "CORE"; + notes[3].type = NT_PRFPREG; + notes[3].datasz = sizeof(fpu); + notes[3].data = &fpu; + } + + /* Write notes phdr entry. */ + { + struct elf_phdr phdr; + int sz = 0; + + for(i = 0; i < numnote; i++) + sz += notesize(¬es[i]); + + phdr.p_type = PT_NOTE; + phdr.p_offset = offset; + phdr.p_vaddr = 0; + phdr.p_paddr = 0; + phdr.p_filesz = sz; + phdr.p_memsz = 0; + phdr.p_flags = 0; + phdr.p_align = 0; + + offset += phdr.p_filesz; + DUMP_WRITE(&phdr, sizeof(phdr)); + } + + /* Page-align dumped data. */ + dataoff = offset = roundup(offset, PAGE_SIZE); + + /* Write program headers for segments dump. */ + for(vma = current->mm->mmap, i = 0; + i < segs && vma != NULL; vma = vma->vm_next) { + struct elf_phdr phdr; + size_t sz; + + i++; + + sz = vma->vm_end - vma->vm_start; + + phdr.p_type = PT_LOAD; + phdr.p_offset = offset; + phdr.p_vaddr = vma->vm_start; + phdr.p_paddr = 0; + phdr.p_filesz = maydump(vma) ? sz : 0; + phdr.p_memsz = sz; + offset += phdr.p_filesz; + phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; + if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W; + if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X; + phdr.p_align = PAGE_SIZE; + + DUMP_WRITE(&phdr, sizeof(phdr)); + } + + for(i = 0; i < numnote; i++) + if (!writenote(¬es[i], &file)) + goto close_coredump; + + set_fs(fs); + + DUMP_SEEK(dataoff); + + for(i = 0, vma = current->mm->mmap; + i < segs && vma != NULL; + vma = vma->vm_next) { + unsigned long addr = vma->vm_start; + unsigned long len = vma->vm_end - vma->vm_start; + + if (!maydump(vma)) + continue; + i++; +#ifdef DEBUG + printk("elf_core_dump: writing %08lx %lx\n", addr, len); +#endif + DUMP_WRITE((void *)addr, len); + } + + if ((off_t) file.f_pos != offset) { + /* Sanity check. */ + printk("elf_core_dump: file.f_pos (%ld) != offset (%ld)\n", + (off_t) file.f_pos, offset); + } + + close_coredump: + if (file.f_op->release) + file.f_op->release(inode,&file); + + end_coredump: + set_fs(fs); + iput(inode); +#ifndef CONFIG_BINFMT_ELF + MOD_DEC_USE_COUNT; +#endif + return has_dumped; +} + +int init_irix_binfmt(void) { + return register_binfmt(&irix_format); +} + +#ifdef MODULE + +int init_module(void) { + /* Install the COFF, ELF and XOUT loaders. + * N.B. We *rely* on the table being the right size with the + * right number of free slots... + */ + return init_irix_binfmt(); +} + + +void cleanup_module( void) { + /* Remove the IRIX ELF loaders. */ + unregister_binfmt(&irix_format); +} +#endif diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c new file mode 100644 index 000000000..a8498084b --- /dev/null +++ b/arch/mips/kernel/irixioctl.c @@ -0,0 +1,254 @@ +/* $Id: irixioctl.c,v 1.4 1996/07/14 09:36:16 dm Exp $ + * irixioctl.c: A fucking mess... + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/tty.h> + +#include <asm/uaccess.h> +#include <asm/ioctl.h> +#include <asm/ioctls.h> + +#undef DEBUG_IOCTLS + +struct irix_termios { + tcflag_t c_iflag, c_oflag, c_cflag, c_lflag; + cc_t c_cc[NCCS]; +}; + +extern asmlinkage int sys_ioctl(unsigned int fd, unsigned int cmd, + unsigned long arg); +extern asmlinkage int sys_write(unsigned int fd,char * buf,unsigned int count); +extern void start_tty(struct tty_struct *tty); + +static struct tty_struct *get_tty(int fd) +{ + struct file *filp; + + if(fd >= NR_OPEN || !(filp = current->files->fd[fd])) + return ((struct tty_struct *) 0); + if(filp->private_data) { + struct tty_struct *ttyp = (struct tty_struct *) filp->private_data; + + if(ttyp->magic == TTY_MAGIC) + return ttyp; + } + return ((struct tty_struct *) 0); +} + +static struct tty_struct *get_real_tty(struct tty_struct *tp) +{ + if(tp->driver.type == TTY_DRIVER_TYPE_PTY && + tp->driver.subtype == PTY_TYPE_MASTER) + return tp->link; + else + return tp; +} + +asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg) +{ + unsigned long *data; + struct tty_struct *tp, *rtp; + int error = 0; + int old_fs; + +#ifdef DEBUG_IOCTLS + printk("[%s:%d] irix_ioctl(%d, ", current->comm, current->pid, fd); +#endif + switch(cmd) { + case 0x00005401: +#ifdef DEBUG_IOCTLS + printk("TCGETA, %08lx) ", arg); +#endif + error = sys_ioctl(fd, TCGETA, arg); + break; + + case 0x0000540d: { + struct termios kt; + struct irix_termios *it = (struct irix_termios *) arg; + +#ifdef DEBUG_IOCTLS + printk("TCGETS, %08lx) ", arg); +#endif + if(!access_ok(VERIFY_WRITE, it, sizeof(*it))) { + error = -EFAULT; + break; + } + old_fs = get_fs(); set_fs(get_ds()); + error = sys_ioctl(fd, TCGETS, (unsigned long) &kt); + set_fs(old_fs); + if(error) + break; + __put_user(kt.c_iflag, &it->c_iflag); + __put_user(kt.c_oflag, &it->c_oflag); + __put_user(kt.c_cflag, &it->c_cflag); + __put_user(kt.c_lflag, &it->c_lflag); + for(error = 0; error < NCCS; error++) + __put_user(kt.c_cc[error], &it->c_cc[error]); + error = 0; + break; + } + + case 0x0000540e: { + struct termios kt; + struct irix_termios *it = (struct irix_termios *) arg; + +#ifdef DEBUG_IOCTLS + printk("TCSETS, %08lx) ", arg); +#endif + if(!access_ok(VERIFY_READ, it, sizeof(*it))) { + error = -EFAULT; + break; + } + old_fs = get_fs(); set_fs(get_ds()); + error = sys_ioctl(fd, TCGETS, (unsigned long) &kt); + set_fs(old_fs); + if(error) + break; + __get_user(kt.c_iflag, &it->c_iflag); + __get_user(kt.c_oflag, &it->c_oflag); + __get_user(kt.c_cflag, &it->c_cflag); + __get_user(kt.c_lflag, &it->c_lflag); + for(error = 0; error < NCCS; error++) + __get_user(kt.c_cc[error], &it->c_cc[error]); + old_fs = get_fs(); set_fs(get_ds()); + error = sys_ioctl(fd, TCSETS, (unsigned long) &kt); + set_fs(old_fs); + break; + } + + case 0x0000540f: +#ifdef DEBUG_IOCTLS + printk("TCSETSW, %08lx) ", arg); +#endif + error = sys_ioctl(fd, TCSETSW, arg); + break; + + case 0x00005471: +#ifdef DEBUG_IOCTLS + printk("TIOCNOTTY, %08lx) ", arg); +#endif + error = sys_ioctl(fd, TIOCNOTTY, arg); + break; + + case 0x00007416: +#ifdef DEBUG_IOCTLS + printk("TIOCGSID, %08lx) ", arg); +#endif + tp = get_tty(fd); + if(!tp) { + error = -EINVAL; + break; + } + rtp = get_real_tty(tp); +#ifdef DEBUG_IOCTLS + printk("rtp->session=%d ", rtp->session); +#endif + error = put_user(rtp->session, (unsigned long *) arg); + break; + + case 0x746e: + /* TIOCSTART, same effect as hitting ^Q */ +#ifdef DEBUG_IOCTLS + printk("TIOCSTART, %08lx) ", arg); +#endif + tp = get_tty(fd); + if(!tp) { + error = -EINVAL; + break; + } + rtp = get_real_tty(tp); + start_tty(rtp); + break; + + case 0x20006968: +#ifdef DEBUG_IOCTLS + printk("SIOCGETLABEL, %08lx) ", arg); +#endif + error = -ENOPKG; + break; + + case 0x40047477: +#ifdef DEBUG_IOCTLS + printk("TIOCGPGRP, %08lx) ", arg); +#endif + error = sys_ioctl(fd, TIOCGPGRP, arg); +#ifdef DEBUG_IOCTLS + printk("arg=%d ", *(int *)arg); +#endif + break; + + case 0x40087468: +#ifdef DEBUG_IOCTLS + printk("TIOCGWINSZ, %08lx) ", arg); +#endif + error = sys_ioctl(fd, TIOCGWINSZ, arg); + break; + + case 0x8004667e: +#ifdef DEBUG_IOCTLS + printk("FIONBIO, %08lx) arg=%d ", arg, *(int *)arg); +#endif + error = sys_ioctl(fd, FIONBIO, arg); + break; + + case 0x80047476: +#ifdef DEBUG_IOCTLS + printk("TIOCSPGRP, %08lx) arg=%d ", arg, *(int *)arg); +#endif + error = sys_ioctl(fd, TIOCSPGRP, arg); + break; + + case 0x8020690c: +#ifdef DEBUG_IOCTLS + printk("SIOCSIFADDR, %08lx) arg=%d ", arg, *(int *)arg); +#endif + error = sys_ioctl(fd, SIOCSIFADDR, arg); + break; + + case 0x80206910: +#ifdef DEBUG_IOCTLS + printk("SIOCSIFFLAGS, %08lx) arg=%d ", arg, *(int *)arg); +#endif + error = sys_ioctl(fd, SIOCSIFFLAGS, arg); + break; + + case 0xc0206911: +#ifdef DEBUG_IOCTLS + printk("SIOCGIFFLAGS, %08lx) arg=%d ", arg, *(int *)arg); +#endif + error = sys_ioctl(fd, SIOCGIFFLAGS, arg); + break; + + case 0xc020691b: +#ifdef DEBUG_IOCTLS + printk("SIOCGIFMETRIC, %08lx) arg=%d ", arg, *(int *)arg); +#endif + error = sys_ioctl(fd, SIOCGIFMETRIC, arg); + break; + + default: { + char *msg = "Unimplemented IOCTL cmd tell dm@engr.sgi.com\n"; + +#ifdef DEBUG_IOCTLS + printk("UNIMP_IOCTL, %08lx)\n", arg); +#endif + old_fs = get_fs(); set_fs(get_ds()); + sys_write(2, msg, strlen(msg)); + set_fs(old_fs); + printk("[%s:%d] Does unimplemented IRIX ioctl cmd %08lx\n", + current->comm, current->pid, cmd); + do_exit(255); + } + + }; +#ifdef DEBUG_IOCTLS + printk("error=%d\n", error); +#endif + return error; +} diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c new file mode 100644 index 000000000..94f8004f6 --- /dev/null +++ b/arch/mips/kernel/irixsig.c @@ -0,0 +1,826 @@ +/* $Id: irixsig.c,v 1.5 1996/08/05 09:20:56 dm Exp $ + * irixsig.c: WHEEE, IRIX signals! YOW, am I compatable or what?!?! + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/time.h> + +#include <asm/ptrace.h> +#include <asm/uaccess.h> + +#undef DEBUG_SIG + +#define _S(nr) (1<<((nr)-1)) + +#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP))) + +struct sigctx_irix5 { + u32 rmask, cp0_status; + u64 pc; + u64 regs[32]; + u64 fpregs[32]; + u32 usedfp, fpcsr, fpeir, sstk_flags; + u64 hi, lo; + u64 cp0_cause, cp0_badvaddr, _unused0; + u32 sigset[4]; + u64 weird_fpu_thing; + u64 _unused1[31]; +}; + +#ifdef DEBUG_SIG +/* Debugging */ +static inline void dump_irix5_sigctx(struct sigctx_irix5 *c) +{ + int i; + + printk("misc: rmask[%08lx] status[%08lx] pc[%08lx]\n", + (unsigned long) c->rmask, + (unsigned long) c->cp0_status, + (unsigned long) c->pc); + printk("regs: "); + for(i = 0; i < 16; i++) + printk("[%d]<%08lx> ", i, (unsigned long) c->regs[i]); + printk("\nregs: "); + for(i = 16; i < 32; i++) + printk("[%d]<%08lx> ", i, (unsigned long) c->regs[i]); + printk("\nfpregs: "); + for(i = 0; i < 16; i++) + printk("[%d]<%08lx> ", i, (unsigned long) c->fpregs[i]); + printk("\nfpregs: "); + for(i = 16; i < 32; i++) + printk("[%d]<%08lx> ", i, (unsigned long) c->fpregs[i]); + printk("misc: usedfp[%d] fpcsr[%08lx] fpeir[%08lx] stk_flgs[%08lx]\n", + (int) c->usedfp, (unsigned long) c->fpcsr, + (unsigned long) c->fpeir, (unsigned long) c->sstk_flags); + printk("misc: hi[%08lx] lo[%08lx] cause[%08lx] badvaddr[%08lx]\n", + (unsigned long) c->hi, (unsigned long) c->lo, + (unsigned long) c->cp0_cause, (unsigned long) c->cp0_badvaddr); + printk("misc: sigset<0>[%08lx] sigset<1>[%08lx] sigset<2>[%08lx] " + "sigset<3>[%08lx]\n", (unsigned long) c->sigset[0], + (unsigned long) c->sigset[1], (unsigned long) c->sigset[2], + (unsigned long) c->sigset[3]); +} +#endif + +static void setup_irix_frame(struct sigaction * sa, struct pt_regs *regs, + int signr, unsigned long oldmask) +{ + unsigned long sp; + struct sigctx_irix5 *ctx; + int i; + + sp = regs->regs[29]; + sp -= sizeof(struct sigctx_irix5); + sp &= ~(0xf); + ctx = (struct sigctx_irix5 *) sp; + if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx))) + do_exit(SIGSEGV); + + __put_user(0, &ctx->weird_fpu_thing); + __put_user(~(0x00000001), &ctx->rmask); + __put_user(0, &ctx->regs[0]); + for(i = 1; i < 32; i++) + __put_user((u64) regs->regs[i], &ctx->regs[i]); + + __put_user((u64) regs->hi, &ctx->hi); + __put_user((u64) regs->lo, &ctx->lo); + __put_user((u64) regs->cp0_epc, &ctx->pc); + __put_user(current->used_math, &ctx->usedfp); + __put_user((u64) regs->cp0_cause, &ctx->cp0_cause); + __put_user((u64) regs->cp0_badvaddr, &ctx->cp0_badvaddr); + + __put_user(0, &ctx->sstk_flags); /* XXX sigstack unimp... todo... */ + + __put_user(0, &ctx->sigset[1]); + __put_user(0, &ctx->sigset[2]); + __put_user(0, &ctx->sigset[3]); + __put_user(oldmask, &ctx->sigset[0]); + +#ifdef DEBUG_SIG + dump_irix5_sigctx(ctx); +#endif + + regs->regs[5] = 0; /* XXX sigcode XXX */ + regs->regs[4] = (unsigned long) signr; + regs->regs[6] = regs->regs[29] = sp; + regs->regs[7] = (unsigned long) sa->sa_handler; + regs->regs[25] = regs->cp0_epc = current->tss.irix_trampoline; +} + +extern asmlinkage int sys_waitpid(pid_t pid,unsigned long * stat_addr, int options); + +asmlinkage int do_irix_signal(unsigned long oldmask, struct pt_regs * regs) +{ + unsigned long mask = ~current->blocked; + unsigned long handler_signal = 0; + unsigned long signr; + struct sigaction * sa; + +#ifdef DEBUG_SIG + printk("[%s:%d] Delivering IRIX signal oldmask=%08lx\n", + current->comm, current->pid, oldmask); +#endif + while ((signr = current->signal & mask)) { + signr = ffz(~signr); + clear_bit(signr, ¤t->signal); + sa = current->sig->action + signr; + signr++; + if ((current->flags & PF_PTRACED) && signr != SIGKILL) { + current->exit_code = signr; + current->state = TASK_STOPPED; + notify_parent(current); + schedule(); + if (!(signr = current->exit_code)) + continue; + current->exit_code = 0; + if (signr == SIGSTOP) + continue; + if (_S(signr) & current->blocked) { + current->signal |= _S(signr); + continue; + } + sa = current->sig->action + signr - 1; + } + if (sa->sa_handler == SIG_IGN) { + if (signr != SIGCHLD) + continue; + /* check for SIGCHLD: it's special */ + while (sys_waitpid(-1,NULL,WNOHANG) > 0) + /* nothing */; + continue; + } + if (sa->sa_handler == SIG_DFL) { + if (current->pid == 1) + continue; + switch (signr) { + case SIGCONT: case SIGCHLD: case SIGWINCH: + continue; + + case SIGSTOP: case SIGTSTP: case SIGTTIN: case SIGTTOU: + if (current->flags & PF_PTRACED) + continue; + current->state = TASK_STOPPED; + current->exit_code = signr; + if (!(current->p_pptr->sig->action[SIGCHLD-1].sa_flags & + SA_NOCLDSTOP)) + notify_parent(current); + schedule(); + continue; + + case SIGQUIT: case SIGILL: case SIGTRAP: + case SIGIOT: case SIGFPE: case SIGSEGV: case SIGBUS: + if (current->binfmt && current->binfmt->core_dump) { + if (current->binfmt->core_dump(signr, regs)) + signr |= 0x80; + } + /* fall through */ + default: + current->signal |= _S(signr & 0x7f); + current->flags |= PF_SIGNALED; + do_exit(signr); + } + } + /* + * OK, we're invoking a handler + */ + if (regs->orig_reg2 >= 0) { + if (regs->regs[2] == ERESTARTNOHAND) { + regs->regs[2] = EINTR; + } else if((regs->regs[2] == ERESTARTSYS && + !(sa->sa_flags & SA_RESTART))) { + regs->regs[2] = regs->orig_reg2; + regs->cp0_epc -= 8; + } + } + handler_signal |= 1 << (signr-1); + mask &= ~*to_k_sigset_t(&sa->sa_mask); + } + /* + * Who's code doesn't conform to the restartable syscall convention + * dies here!!! The li instruction, a single machine instruction, + * must directly be followed by the syscall instruction. + */ + if (regs->orig_reg2 >= 0 && + (regs->regs[2] == ERESTARTNOHAND || + regs->regs[2] == ERESTARTSYS || + regs->regs[2] == ERESTARTNOINTR)) { + regs->regs[2] = regs->orig_reg2; + regs->cp0_epc -= 8; + } + if (!handler_signal) /* no handler will be called - return 0 */ + return 0; + signr = 1; + sa = current->sig->action; + for (mask = 1 ; mask ; sa++,signr++,mask += mask) { + if (mask > handler_signal) + break; + if (!(mask & handler_signal)) + continue; + setup_irix_frame(sa, regs, signr, oldmask); + if (sa->sa_flags & SA_ONESHOT) + sa->sa_handler = NULL; + current->blocked |= *to_k_sigset_t(&sa->sa_mask); + oldmask |= *to_k_sigset_t(&sa->sa_mask); + } + + return 1; +} + +asmlinkage unsigned long irix_sigreturn(struct pt_regs *regs) +{ + struct sigctx_irix5 *context, *magic; + unsigned long umask, mask; + u64 *fregs, res; + int sig, i, base = 0; + + if(regs->regs[2] == 1000) + base = 1; + + context = (struct sigctx_irix5 *) regs->regs[base + 4]; + magic = (struct sigctx_irix5 *) regs->regs[base + 5]; + sig = (int) regs->regs[base + 6]; +#ifdef DEBUG_SIG + printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n", + current->comm, current->pid, context, magic, sig); +#endif + if (!context) + context = magic; + if (!access_ok(VERIFY_READ, context, sizeof(struct sigctx_irix5))) + goto badframe; + +#ifdef DEBUG_SIG + dump_irix5_sigctx(context); +#endif + + __get_user(regs->cp0_epc, &context->pc); + umask = context->rmask; mask = 2; + for (i = 1; i < 32; i++, mask <<= 1) { + if(umask & mask) + __get_user(regs->regs[i], &context->regs[i]); + } + __get_user(regs->hi, &context->hi); + __get_user(regs->lo, &context->lo); + + if ((umask & 1) && context->usedfp) { + fregs = (u64 *) ¤t->tss.fpu; + for(i = 0; i < 32; i++) + fregs[i] = (u64) context->fpregs[i]; + __get_user(current->tss.fpu.hard.control, &context->fpcsr); + } + + /* XXX do sigstack crapola here... XXX */ + + regs->orig_reg2 = -1; + __get_user(current->blocked, &context->sigset[0]); + current->blocked &= _BLOCKABLE; + __get_user(res, &context->regs[2]); + return res; + +badframe: + do_exit(SIGSEGV); +} + +struct sigact_irix5 { + int flags; + void (*handler)(int); + u32 sigset[4]; + int _unused0[2]; +}; + +#ifdef DEBUG_SIG +static inline void dump_sigact_irix5(struct sigact_irix5 *p) +{ + printk("<f[%d] hndlr[%08lx] msk[%08lx]>", p->flags, + (unsigned long) p->handler, + (unsigned long) p->sigset[0]); +} +#endif + +static inline void check_pending(int signum) +{ + struct sigaction *p; + + p = signum - 1 + current->sig->action; + if (p->sa_handler == SIG_IGN) { + current->signal &= ~_S(signum); + return; + } + if (p->sa_handler == SIG_DFL) { + if (signum != SIGCONT && signum != SIGCHLD && signum != SIGWINCH) + return; + current->signal &= ~_S(signum); + return; + } +} + +asmlinkage int irix_sigaction(int sig, struct sigact_irix5 *new, + struct sigact_irix5 *old, unsigned long trampoline) +{ + struct sigaction new_sa, *p; + +#ifdef DEBUG_SIG + printk(" (%d,%s,%s,%08lx) ", sig, (!new ? "0" : "NEW"), + (!old ? "0" : "OLD"), trampoline); + if(new) { + dump_sigact_irix5(new); printk(" "); + } +#endif + if(sig < 1 || sig > 32) + return -EINVAL; + p = sig - 1 + current->sig->action; + + if(new) { + int err = verify_area(VERIFY_READ, new, sizeof(*new)); + if(err) + return err; + if(sig == SIGKILL || sig == SIGSTOP) + return -EINVAL; + new_sa.sa_flags = new->flags; + new_sa.sa_handler = (__sighandler_t) new->handler; + new_sa.sa_mask.__sigbits[1] = new_sa.sa_mask.__sigbits[2] = + new_sa.sa_mask.__sigbits[3] = 0; + new_sa.sa_mask.__sigbits[0] = new->sigset[0]; + + if(new_sa.sa_handler != SIG_DFL && new_sa.sa_handler != SIG_IGN) { + err = verify_area(VERIFY_READ, new_sa.sa_handler, 1); + if(err) + return err; + } + } + /* Hmmm... methinks IRIX libc always passes a valid trampoline + * value for all invocations of sigaction. Will have to + * investigate. POSIX POSIX, die die die... + */ + current->tss.irix_trampoline = trampoline; + if(old) { + int err = verify_area(VERIFY_WRITE, old, sizeof(*old)); + if(err) + return err; + old->flags = p->sa_flags; + old->handler = (void *) p->sa_handler; + old->sigset[1] = old->sigset[2] = old->sigset[3] = 0; + old->sigset[0] = p->sa_mask.__sigbits[0]; + old->_unused0[0] = old->_unused0[1] = 0; + } + + if(new) { + *p = new_sa; + check_pending(sig); + } + + return 0; +} + +asmlinkage int irix_sigpending(unsigned long *set) +{ + int err; + + err = verify_area(VERIFY_WRITE, set, (sizeof(unsigned long) * 4)); + if(!err) { + set[1] = set[2] = set[3] = 0; + set[0] = (current->blocked & current->signal); + } + return err; +} + +asmlinkage int irix_sigprocmask(int how, unsigned long *new, unsigned long *old) +{ + unsigned long bits, oldbits = current->blocked; + int error; + + if(new) { + error = verify_area(VERIFY_READ, new, (sizeof(unsigned long) * 4)); + if(error) + return error; + bits = new[0] & _BLOCKABLE; + switch(how) { + case 1: + current->blocked |= bits; + break; + + case 2: + current->blocked &= ~bits; + break; + + case 3: + case 256: + current->blocked = bits; + break; + + default: + return -EINVAL; + } + } + if(old) { + error = verify_area(VERIFY_WRITE, old, (sizeof(unsigned long) * 4)); + if(error) + return error; + old[1] = old[2] = old[3] = 0; + old[0] = oldbits; + } + return 0; +} + +asmlinkage int irix_sigsuspend(struct pt_regs *regs) +{ + unsigned int mask; + unsigned long *uset; + int base = 0; + + if(regs->regs[2] == 1000) + base = 1; + + mask = current->blocked; + uset = (unsigned long *) regs->regs[base + 4]; + if(verify_area(VERIFY_READ, uset, (sizeof(unsigned long) * 4))) + return -EFAULT; + current->blocked = uset[0] & _BLOCKABLE; + while(1) { + current->state = TASK_INTERRUPTIBLE; + schedule(); + if(do_irix_signal(mask, regs)) + return -EINTR; + } +} + +/* hate hate hate... */ +struct irix5_siginfo { + int sig, code, error; + union { + char unused[128 - (3 * 4)]; /* Safety net. */ + struct { + int pid; + union { + int uid; + struct { + int utime, status, stime; + } child; + } procdata; + } procinfo; + + unsigned long fault_addr; + + struct { + int fd; + long band; + } fileinfo; + + unsigned long sigval; + } stuff; +}; + +static inline unsigned long timespectojiffies(struct timespec *value) +{ + unsigned long sec = (unsigned) value->tv_sec; + long nsec = value->tv_nsec; + + if (sec > (LONG_MAX / HZ)) + return LONG_MAX; + nsec += 1000000000L / HZ - 1; + nsec /= 1000000000L / HZ; + return HZ * sec + nsec; +} + +asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info, + struct timespec *tp) +{ + unsigned long mask, kset, expire = 0; + int sig, error, timeo = 0; + +#ifdef DEBUG_SIG + printk("[%s:%d] irix_sigpoll_sys(%p,%p,%p)\n", + current->comm, current->pid, set, info, tp); +#endif + + /* Must always specify the signal set. */ + if(!set) + return -EINVAL; + kset = set[0]; + + error = verify_area(VERIFY_READ, set, (sizeof(unsigned long) * 4)); + if(error) + return error; + + if(info) { + error = verify_area(VERIFY_WRITE, info, sizeof(*info)); + if(error) + return error; + memset(info, 0, sizeof(*info)); + } + + if(tp) { + error = verify_area(VERIFY_READ, tp, sizeof(*tp)); + if(error) + return error; + if(!tp->tv_sec && !tp->tv_nsec) + return -EINVAL; + expire = timespectojiffies(tp)+(tp->tv_sec||tp->tv_nsec)+jiffies; + current->timeout = expire; + } + + while(1) { + current->state = TASK_INTERRUPTIBLE; schedule(); + if(current->signal & kset) break; + if(tp && expire <= jiffies) { + timeo = 1; + break; + } + if(current->signal & ~(current->blocked)) return -EINTR; + } + + if(timeo) return -EAGAIN; + for(sig = 1, mask = 2; mask; mask <<= 1, sig++) { + if(!(mask & kset)) continue; + if(mask & current->signal) { + /* XXX need more than this... */ + if(info) info->sig = sig; + return 0; + } + } + + /* Should not get here, but do something sane if we do. */ + return -EINTR; +} + +/* This is here because of irix5_siginfo definition. */ +#define P_PID 0 +#define P_PGID 2 +#define P_ALL 7 + +extern int getrusage(struct task_struct *, int, struct rusage *); +extern void release(struct task_struct * p); + +#define W_EXITED 1 +#define W_TRAPPED 2 +#define W_STOPPED 4 +#define W_CONT 8 +#define W_NOHANG 64 + +#define W_MASK (W_EXITED | W_TRAPPED | W_STOPPED | W_CONT | W_NOHANG) + +asmlinkage int irix_waitsys(int type, int pid, struct irix5_siginfo *info, + int options, struct rusage *ru) +{ + int flag, retval; + struct wait_queue wait = { current, NULL }; + struct task_struct *p; + + if(!info) + return -EINVAL; + flag = verify_area(VERIFY_WRITE, info, sizeof(*info)); + if(flag) + return flag; + if(ru) { + flag = verify_area(VERIFY_WRITE, ru, sizeof(*ru)); + if(flag) + return flag; + } + if(options & ~(W_MASK)) + return -EINVAL; + if(type != P_PID && type != P_PGID && type != P_ALL) + return -EINVAL; + add_wait_queue(¤t->wait_chldexit, &wait); +repeat: + flag = 0; + for(p = current->p_cptr; p; p = p->p_osptr) { + if((type == P_PID) && p->pid != pid) + continue; + if((type == P_PGID) && p->pgrp != pid) + continue; + if((p->exit_signal != SIGCHLD)) + continue; + flag = 1; + switch(p->state) { + case TASK_STOPPED: + if (!p->exit_code) + continue; + if (!(options & (W_TRAPPED|W_STOPPED)) && + !(p->flags & PF_PTRACED)) + continue; + if (ru != NULL) + getrusage(p, RUSAGE_BOTH, ru); + info->sig = SIGCHLD; + info->code = 0; + info->stuff.procinfo.pid = p->pid; + info->stuff.procinfo.procdata.child.status = + (p->exit_code >> 8) & 0xff; + info->stuff.procinfo.procdata.child.utime = + p->utime; + info->stuff.procinfo.procdata.child.stime = + p->stime; + p->exit_code = 0; + retval = 0; + goto end_waitsys; + case TASK_ZOMBIE: + current->cutime += p->utime + p->cutime; + current->cstime += p->stime + p->cstime; + if (ru != NULL) + getrusage(p, RUSAGE_BOTH, ru); + info->sig = SIGCHLD; + info->code = 1; /* CLD_EXITED */ + info->stuff.procinfo.pid = p->pid; + info->stuff.procinfo.procdata.child.status = + (p->exit_code >> 8) & 0xff; + info->stuff.procinfo.procdata.child.utime = + p->utime; + info->stuff.procinfo.procdata.child.stime = + p->stime; + retval = 0; + if (p->p_opptr != p->p_pptr) { + REMOVE_LINKS(p); + p->p_pptr = p->p_opptr; + SET_LINKS(p); + notify_parent(p); + } else + release(p); + goto end_waitsys; + default: + continue; + } + } + if(flag) { + retval = 0; + if(options & W_NOHANG) + goto end_waitsys; + retval = -ERESTARTSYS; + if(current->signal & ~current->blocked) + goto end_waitsys; + current->state = TASK_INTERRUPTIBLE; + schedule(); + goto repeat; + } + retval = -ECHILD; +end_waitsys: + remove_wait_queue(¤t->wait_chldexit, &wait); + return retval; +} + +struct irix5_context { + u32 flags; + u32 link; + u32 sigmask[4]; + struct { u32 sp, size, flags; } stack; + int regs[36]; + u32 fpregs[32]; + u32 fpcsr; + u32 _unused0; + u32 _unused1[47]; + u32 weird_graphics_thing; +}; + +asmlinkage int irix_getcontext(struct pt_regs *regs) +{ + int error, i, base = 0; + struct irix5_context *ctx; + + if(regs->regs[2] == 1000) + base = 1; + ctx = (struct irix5_context *) regs->regs[base + 4]; + +#ifdef DEBUG_SIG + printk("[%s:%d] irix_getcontext(%p)\n", + current->comm, current->pid, ctx); +#endif + + error = verify_area(VERIFY_WRITE, ctx, sizeof(*ctx)); + if(error) + return error; + ctx->flags = 0x0f; + ctx->link = current->tss.irix_oldctx; + ctx->sigmask[1] = ctx->sigmask[2] = ctx->sigmask[4] = 0; + ctx->sigmask[0] = current->blocked; + + /* XXX Do sigstack stuff someday... */ + ctx->stack.sp = ctx->stack.size = ctx->stack.flags = 0; + + ctx->weird_graphics_thing = 0; + ctx->regs[0] = 0; + for(i = 1; i < 32; i++) + ctx->regs[i] = regs->regs[i]; + ctx->regs[32] = regs->lo; + ctx->regs[33] = regs->hi; + ctx->regs[34] = regs->cp0_cause; + ctx->regs[35] = regs->cp0_epc; + if(!current->used_math) { + ctx->flags &= ~(0x08); + } else { + /* XXX wheee... */ + printk("Wheee, no code for saving IRIX FPU context yet.\n"); + } + return 0; +} + +asmlinkage unsigned long irix_setcontext(struct pt_regs *regs) +{ + int error, base = 0; + struct irix5_context *ctx; + + if(regs->regs[2] == 1000) + base = 1; + ctx = (struct irix5_context *) regs->regs[base + 4]; + +#ifdef DEBUG_SIG + printk("[%s:%d] irix_setcontext(%p)\n", + current->comm, current->pid, ctx); +#endif + + error = verify_area(VERIFY_READ, ctx, sizeof(*ctx)); + if(error) + return error; + + if(ctx->flags & 0x02) { + /* XXX sigstack garbage, todo... */ + printk("Wheee, cannot do sigstack stuff in setcontext\n"); + } + + if(ctx->flags & 0x04) { + int i; + + /* XXX extra control block stuff... todo... */ + for(i = 1; i < 32; i++) + regs->regs[i] = ctx->regs[i]; + regs->lo = ctx->regs[32]; + regs->hi = ctx->regs[33]; + regs->cp0_epc = ctx->regs[35]; + } + + if(ctx->flags & 0x08) { + /* XXX fpu context, blah... */ + printk("Wheee, cannot restore FPU context yet...\n"); + } + current->tss.irix_oldctx = ctx->link; + return regs->regs[2]; +} + +struct irix_sigstack { unsigned long sp; int status; }; + +asmlinkage int irix_sigstack(struct irix_sigstack *new, struct irix_sigstack *old) +{ + int error; + +#ifdef DEBUG_SIG + printk("[%s:%d] irix_sigstack(%p,%p)\n", + current->comm, current->pid, new, old); +#endif + if(new) { + error = verify_area(VERIFY_READ, new, sizeof(*new)); + if(error) + return error; + } + + if(old) { + error = verify_area(VERIFY_WRITE, old, sizeof(*old)); + if(error) + return error; + } + return 0; +} + +struct irix_sigaltstack { unsigned long sp; int size; int status; }; + +asmlinkage int irix_sigaltstack(struct irix_sigaltstack *new, + struct irix_sigaltstack *old) +{ + int error; + +#ifdef DEBUG_SIG + printk("[%s:%d] irix_sigaltstack(%p,%p)\n", + current->comm, current->pid, new, old); +#endif + if(new) { + error = verify_area(VERIFY_READ, new, sizeof(*new)); + if(error) + return error; + } + + if(old) { + error = verify_area(VERIFY_WRITE, old, sizeof(*old)); + if(error) + return error; + } + return 0; +} + +struct irix_procset { + int cmd, ltype, lid, rtype, rid; +}; + +asmlinkage int irix_sigsendset(struct irix_procset *pset, int sig) +{ + int error; + + error = verify_area(VERIFY_READ, pset, sizeof(*pset)); +#ifdef DEBUG_SIG + printk("[%s:%d] irix_sigsendset([%d,%d,%d,%d,%d],%d)\n", + current->comm, current->pid, + pset->cmd, pset->ltype, pset->lid, pset->rtype, pset->rid, + sig); +#endif + + return -EINVAL; +} diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index a6f257c88..2192112e9 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -11,6 +11,7 @@ * * Mips support by Ralf Baechle and Andreas Busse */ +#include <linux/config.h> #include <linux/errno.h> #include <linux/kernel_stat.h> #include <linux/signal.h> @@ -30,6 +31,9 @@ #include <asm/mipsregs.h> #include <asm/system.h> #include <asm/vector.h> +#ifdef CONFIG_SGI +#include <asm/sgialib.h> +#endif unsigned char cache_21 = 0xff; unsigned char cache_A1 = 0xff; @@ -88,10 +92,9 @@ void enable_irq(unsigned int irq_nr) } /* - * Low-level interrupt handlers: first the timer interrupt, then the - * general, then the fast and finally the bad interrupt handler. + * Pointers to the low-level handlers: first the general ones, then the + * fast ones, then the bad ones. */ -extern void timer_interrupt(void); extern void interrupt(void); extern void fast_interrupt(void); extern void bad_interrupt(void); @@ -112,7 +115,7 @@ int get_irq_list(char *buf) action = irq_action[i]; if (!action) continue; - len += sprintf(buf+len, "%2d: %8u %c %s", + len += sprintf(buf+len, "%2d: %8d %c %s", i, kstat.interrupts[i], (action->flags & SA_INTERRUPT) ? '+' : ' ', action->name); @@ -136,16 +139,18 @@ int get_irq_list(char *buf) asmlinkage void do_IRQ(int irq, struct pt_regs * regs) { struct irqaction * action = *(irq + irq_action); - int do_random = 0; - kstat.interrupts[irq]++; +#ifdef CONFIG_SGI + prom_printf("Got irq %d, press a key.", irq); + prom_getchar(); + romvec->imode(); +#endif while (action) { - do_random |= action->flags; + if (action->flags & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); action->handler(irq, action->dev_id, regs); action = action->next; } - if (do_random & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); } /* @@ -156,16 +161,14 @@ asmlinkage void do_IRQ(int irq, struct pt_regs * regs) asmlinkage void do_fast_IRQ(int irq) { struct irqaction * action = *(irq + irq_action); - int do_random = 0; kstat.interrupts[irq]++; while (action) { - do_random |= action->flags; + if (action->flags & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); action->handler(irq, action->dev_id, NULL); action = action->next; } - if (do_random & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); } /* @@ -209,10 +212,7 @@ int setup_x86_irq(int irq, struct irqaction * new) if (new->flags & SA_INTERRUPT) set_int_vector(irq,fast_interrupt); else - if (irq == 0) - set_int_vector(irq,timer_interrupt); - else - set_int_vector(irq,interrupt); + set_int_vector(irq,interrupt); unmask_irq(irq); } restore_flags(flags); @@ -307,7 +307,7 @@ int probe_irq_off (unsigned long irqs) irqmask = (((unsigned int)cache_A1)<<8) | (unsigned int)cache_21; #ifdef DEBUG - printk("probe_irq_off: irqs=0x%04lx irqmask=0x%04x\n", irqs, irqmask); + printk("probe_irq_off: irqs=0x%04x irqmask=0x%04x\n", irqs, irqmask); #endif irqs &= irqmask; if (!irqs) diff --git a/arch/mips/kernel/ksyms.c b/arch/mips/kernel/ksyms.c index c80bf588a..28650741e 100644 --- a/arch/mips/kernel/ksyms.c +++ b/arch/mips/kernel/ksyms.c @@ -8,8 +8,8 @@ * Copyright (C) 1996 by Ralf Baechle */ #include <linux/module.h> +#include <linux/string.h> #include <linux/mm.h> -#include <asm/cache.h> #include <asm/dma.h> #include <asm/floppy.h> #include <asm/io.h> @@ -20,12 +20,13 @@ static struct symbol_table arch_symbol_table = { /* * String functions */ - X(__generic_memset_b), - X(__generic_memset_dw), + X(memset), + X(memcpy), + X(memmove), + X(bcopy), /* * Functions to control caches. */ - X(cacheflush), X(fd_cacheflush), /* * Base address of ports for Intel style I/O. diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 2ce906ea4..99e3a3075 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -9,6 +9,7 @@ * though it does not yet currently fully support the DECStation, * or R3000 - PMA. */ +#include <linux/config.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -23,42 +24,47 @@ #include <linux/a.out.h> #include <asm/bootinfo.h> -#include <asm/cache.h> -#include <asm/uaccess.h> +#include <asm/segment.h> #include <asm/pgtable.h> -#include <asm/sgidefs.h> #include <asm/system.h> #include <asm/mipsregs.h> #include <asm/processor.h> #include <asm/stackframe.h> +#include <asm/uaccess.h> #include <asm/io.h> +#include <asm/elf.h> +#ifdef CONFIG_SGI +#include <asm/sgialib.h> +#endif + +int active_ds = USER_DS; asmlinkage void ret_from_sys_call(void); /* - * Free current thread data structures etc.. + * Do necessary setup to start up a newly executed thread. */ +void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) +{ + /* New thread looses kernel privileges. */ + regs->cp0_status = (regs->cp0_status & ~(ST0_CU0|ST0_KSU)) | KSU_USER; + regs->cp0_epc = pc; + regs->regs[29] = sp; + current->tss.current_ds = USER_DS; +} + void exit_thread(void) { - /* - * Nothing to do - */ } void flush_thread(void) { - /* - * Nothing to do - */ } void release_thread(struct task_struct *dead_task) { - /* - * Nothing to do - */ } - + void copy_thread(int nr, unsigned long clone_flags, unsigned long usp, struct task_struct * p, struct pt_regs * regs) { @@ -66,80 +72,66 @@ void copy_thread(int nr, unsigned long clone_flags, unsigned long usp, long childksp; childksp = p->kernel_stack_page + KERNEL_STACK_SIZE - 8; - /* - * set up new TSS - */ + + /* set up new TSS. */ childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1; *childregs = *regs; - childregs->regs[2] = (__register_t) 0; /* Child gets zero as return value */ - childregs->regs[7] = (__register_t) 0; /* Clear error flag */ - regs->regs[2] = (__register_t) p->pid; - if (childregs->cp0_status & ST0_CU0) - childregs->regs[29] = (__register_t) childksp; - else - childregs->regs[29] = (__register_t) usp; + childregs->regs[7] = 0; /* Clear error flag */ + if(current->personality == PER_LINUX) { + childregs->regs[2] = 0; /* Child gets zero as return value */ + regs->regs[2] = p->pid; + } else { + /* Under IRIX things are a little different. */ + childregs->regs[2] = 0; + childregs->regs[3] = 1; + regs->regs[2] = p->pid; + regs->regs[3] = 0; + } + if (childregs->cp0_status & ST0_CU0) { + childregs->regs[29] = childksp; + p->tss.current_ds = KERNEL_DS; + } else { + childregs->regs[29] = usp; + p->tss.current_ds = USER_DS; + } p->tss.ksp = childksp; - p->tss.reg29 = (__register_t)(long) childregs; /* new sp */ - p->tss.reg31 = (__register_t) ret_from_sys_call; - - /* - * Copy thread specific flags. - */ - p->tss.mflags = p->tss.mflags; + p->tss.reg29 = (unsigned long) childregs; + p->tss.reg31 = (unsigned long) ret_from_sys_call; /* * New tasks loose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu. */ -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) p->tss.cp0_status = read_32bit_cp0_register(CP0_STATUS) & - ~(ST0_CU3|ST0_CU2|ST0_CU1); -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) - p->tss.cp0_status = read_32bit_cp0_register(CP0_STATUS) & - ~(ST0_CU3|ST0_CU2|ST0_CU1|ST0_KSU|ST0_ERL|ST0_EXL); -#endif + ~(ST0_CU3|ST0_CU2|ST0_CU1|ST0_KSU|ST0_ERL|ST0_EXL); childregs->cp0_status &= ~(ST0_CU3|ST0_CU2|ST0_CU1); + p->mm->context = 0; } -/* - * Do necessary setup to start up a newly executed thread. - */ -extern void (*switch_to_user_mode)(struct pt_regs *regs); - -void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) -{ - set_fs(USER_DS); - regs->cp0_epc = (__register_t) pc; - /* - * New thread loses kernel privileges. - */ - switch_to_user_mode(regs); - regs->regs[29] = (__register_t) sp; - regs->regs[31] = 0; -} - -/* - * fill in the fpu structure for a core dump.. - * - * Actually this is "int dump_fpu (struct pt_regs * regs, struct user *fpu)" - */ -int dump_fpu (int shutup_the_gcc_warning_about_elf_fpregset_t) +/* Fill in the fpu structure for a core dump.. */ +int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) { - int fpvalid = 0; - /* - * To do... + /* We actually store the FPU info in the task->tss + * area. */ - - return fpvalid; + if(regs->cp0_status & ST0_CU1) { + memcpy(r, ¤t->tss.fpu, sizeof(current->tss.fpu)); + return 1; + } + return 0; /* Task didn't use the fpu at all. */ } -/* - * fill in the user structure for a core dump.. - */ -void dump_thread(struct pt_regs * regs, struct user * dump) +/* Fill in the user structure for a core dump.. */ +void dump_thread(struct pt_regs *regs, struct user *dump) { - /* - * To do... - */ + dump->magic = CMAGIC; + dump->start_code = current->mm->start_code; + dump->start_data = current->mm->start_data; + dump->start_stack = regs->regs[29] & ~(PAGE_SIZE - 1); + dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT; + dump->u_dsize = (current->mm->brk + (PAGE_SIZE - 1) - dump->start_data) >> PAGE_SHIFT; + dump->u_ssize = + (current->mm->start_stack - dump->start_stack + PAGE_SIZE - 1) >> PAGE_SHIFT; + memcpy(&dump->regs[0], regs, sizeof(struct pt_regs)); + memcpy(&dump->regs[EF_SIZE/4], ¤t->tss.fpu, sizeof(current->tss.fpu)); } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 93fae9961..018b68023 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -1,12 +1,8 @@ -/* - * Ptrace(2) syscall for MIPS. Based on arch/i386/kernel/ptrace.c. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1995, 1996 by Ralf Baechle. - */ +/* ptrace.c */ +/* By Ross Biro 1/23/92 */ +/* edited by Linus Torvalds */ +/* further hacked for MIPS by David S. Miller (dm@engr.sgi.com) */ + #include <linux/head.h> #include <linux/kernel.h> #include <linux/sched.h> @@ -17,26 +13,11 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> +#include <asm/page.h> #include <asm/system.h> -/* - * does not yet catch signals sent when the child dies. - * in exit.c or in signal.c. - */ - -/* determines which flags the user has access to. */ -/* 1 = access 0 = no access */ -#define FLAG_MASK 0x00044dd5 - -/* - * this is the number to subtract from the top of the stack. To find - * the local frame. - */ -#define MAGICNUMBER 68 - /* change a pid into a task struct. */ -static inline struct task_struct * -get_task(int pid) +static inline struct task_struct * get_task(int pid) { int i; @@ -48,51 +29,18 @@ get_task(int pid) } /* - * This routine will get a word off of the processes privileged stack. - * The offset is how far from the base addr as stored in the TSS. - */ -static inline int -get_stack_long(struct task_struct *task, int offset) -{ - unsigned char *stack; - - stack = (unsigned char *)(unsigned long)task->tss.reg29; - stack += offset; - return (*((int *)stack)); -} - -/* - * this routine will put a word on the processes privileged stack. - * the offset is how far from the base addr as stored in the TSS. - * this routine assumes that all the privileged stacks are in our - * data space. - */ -static inline int -put_stack_long(struct task_struct *task, int offset, - unsigned long data) -{ - unsigned char * stack; - - stack = (unsigned char *)(unsigned long)task->tss.reg29; - stack += offset; - *(unsigned long *) stack = data; - return 0; -} - -/* * This routine gets a long from any process space by following the page * tables. NOTE! You should check that the long isn't on a page boundary, * and that it is in the task area before calling this: this routine does * no checking. */ -static unsigned long -get_long(struct task_struct * tsk, - struct vm_area_struct * vma, unsigned long addr) +static unsigned long get_long(struct task_struct * tsk, + struct vm_area_struct * vma, unsigned long addr) { - pgd_t * pgdir; - pmd_t * pgmiddle; - pte_t * pgtable; - unsigned long page; + pgd_t *pgdir; + pmd_t *pgmiddle; + pte_t *pgtable; + unsigned long page, retval; repeat: pgdir = pgd_offset(vma->vm_mm, addr); @@ -120,11 +68,14 @@ repeat: do_no_page(tsk, vma, addr, 0); goto repeat; } + page = pte_page(*pgtable); /* this is a hack for non-kernel-mapped video buffers and similar */ - if (MAP_NR(page) < max_mapnr) + if (MAP_NR(page) >= MAP_NR(high_memory)) return 0; page += addr & ~PAGE_MASK; - return *(unsigned long *) page; + retval = *(unsigned long *) page; + flush_page_to_ram(page); + return retval; } /* @@ -136,9 +87,9 @@ repeat: * Now keeps R/W state of page so that a text page stays readonly * even if a debugger scribbles breakpoints into it. -M.U- */ -static void -put_long(struct task_struct * tsk, - struct vm_area_struct * vma, unsigned long addr, unsigned long data) +static void put_long(struct task_struct *tsk, + struct vm_area_struct * vma, unsigned long addr, + unsigned long data) { pgd_t *pgdir; pmd_t *pgmiddle; @@ -176,22 +127,19 @@ repeat: do_wp_page(tsk, vma, addr, 1); goto repeat; } - /* - * This is a hack for non-kernel-mapped video buffers and similar - */ - if (page >= high_memory) +/* this is a hack for non-kernel-mapped video buffers and similar */ + flush_cache_page(vma, addr); + if (MAP_NR(page) < MAP_NR(high_memory)) { *(unsigned long *) (page + (addr & ~PAGE_MASK)) = data; - /* - * We're bypassing pagetables, so we have to set the dirty bit - * ourselves. This should also re-instate whatever read-only mode - * there was before - */ + flush_page_to_ram(page); + } +/* we're bypassing pagetables, so we have to set the dirty bit ourselves */ +/* this should also re-instate whatever read-only mode there was before */ set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot))); - flush_tlb(); + flush_tlb_page(vma, addr); } -static struct vm_area_struct * -find_extend_vma(struct task_struct * tsk, unsigned long addr) +static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr) { struct vm_area_struct * vma; @@ -214,9 +162,8 @@ find_extend_vma(struct task_struct * tsk, unsigned long addr) * This routine checks the page boundaries, and that the offset is * within the task area. It then calls get_long() to read a long. */ -static int -read_long(struct task_struct * tsk, unsigned long addr, - unsigned long * result) +static int read_long(struct task_struct * tsk, unsigned long addr, + unsigned long * result) { struct vm_area_struct * vma = find_extend_vma(tsk, addr); @@ -257,9 +204,8 @@ read_long(struct task_struct * tsk, unsigned long addr, * This routine checks the page boundaries, and that the offset is * within the task area. It then calls put_long() to write a long. */ -static int -write_long(struct task_struct * tsk, unsigned long addr, - unsigned long data) +static int write_long(struct task_struct * tsk, unsigned long addr, + unsigned long data) { struct vm_area_struct * vma = find_extend_vma(tsk, addr); @@ -306,14 +252,15 @@ write_long(struct task_struct * tsk, unsigned long addr, return 0; } -asmlinkage int -sys_ptrace(long request, long pid, long addr, long data) +asmlinkage int sys_ptrace(long request, long pid, long addr, long data) { struct task_struct *child; - struct user * dummy; - - dummy = NULL; +#if 0 + printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n", + (int) request, (int) pid, (unsigned long) addr, + (unsigned long) data); +#endif if (request == PTRACE_TRACEME) { /* are we already being traced? */ if (current->flags & PF_PTRACED) @@ -334,7 +281,7 @@ sys_ptrace(long request, long pid, long addr, long data) (current->uid != child->suid) || (current->uid != child->uid) || (current->gid != child->egid) || - (current->gid != child->sgid) || + (current->gid != child->sgid) || (current->gid != child->gid)) && !suser()) return -EPERM; /* the same process cannot be attached many times */ @@ -359,6 +306,7 @@ sys_ptrace(long request, long pid, long addr, long data) return -ESRCH; switch (request) { + /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: { unsigned long tmp; @@ -369,83 +317,114 @@ sys_ptrace(long request, long pid, long addr, long data) return res; res = verify_area(VERIFY_WRITE, (void *) data, sizeof(long)); if (!res) - put_user(tmp, (unsigned long *)data); + put_user(tmp,(unsigned long *) data); return res; } -#if 0 - /* - * Read the word at location addr in the USER area. - */ + /* read the word at location addr in the USER area. */ +/* #define DEBUG_PEEKUSR */ case PTRACE_PEEKUSR: { + struct pt_regs *regs; unsigned long tmp; int res; - if (addr < 0 || addr > sizeof(struct user) - 3) - return -EIO; - - tmp = 0; /* Default return condition */ - if(addr < 17*sizeof(long)) { - addr = addr >> 2; /* temporary hack. */ - - tmp = get_stack_long(child, sizeof(long)*addr - MAGICNUMBER); - if (addr == DS || addr == ES || - addr == FS || addr == GS || - addr == CS || addr == SS) - tmp &= 0xffff; + regs = (struct pt_regs *) + (child->tss.ksp - sizeof(struct pt_regs)); + res = verify_area(VERIFY_WRITE, (void *) data, sizeof(long)); + if(res < 0) + return res; + res = tmp = 0; /* Default return value. */ + if(addr < 32 && addr >= 0) { + tmp = regs->regs[addr]; + } else if(addr >= 32 && addr < 64) { + unsigned long long *fregs; + + /* We don't want to do a FPU operation here. */ + fregs = (unsigned long long *) + &child->tss.fpu.hard.fp_regs[0]; + tmp = (unsigned long) fregs[(addr - 32)]; + } else { + addr -= 64; + switch(addr) { + case 0: + tmp = regs->cp0_epc; + break; + case 1: + tmp = regs->cp0_cause; + break; + case 2: + tmp = regs->cp0_badvaddr; + break; + case 3: + tmp = regs->lo; + break; + case 4: + tmp = regs->hi; + break; + case 5: + tmp = child->tss.fpu.hard.control; + break; + case 6: + tmp = 0; + break; + default: + tmp = 0; + res = -EIO; + break; + }; } - put_user(tmp, (unsigned long *)data); - return 0; + if(!res) + put_user(tmp, (unsigned long *) data); + return res; } -#endif - /* - * Write the word at location addr. - */ - case PTRACE_POKETEXT: + + /* when I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: return write_long(child,addr,data); - /* - * Write the word at location addr in the user area. - */ - case PTRACE_POKEUSR: -#if 0 - if ((addr & 3) || addr < 0 || - addr > sizeof(struct user) - 3) - return -EIO; - - addr = addr >> 2; /* temporary hack. */ - - if (addr == ORIG_EAX) - return -EIO; - if (addr == DS || addr == ES || - addr == FS || addr == GS || - addr == CS || addr == SS) { - data &= 0xffff; - if (data && (data & 3) != 3) - return -EIO; - } - if (addr == EFL) { /* flags. */ - data &= FLAG_MASK; - data |= get_stack_long(child, EFL*sizeof(long)-MAGICNUMBER) & ~FLAG_MASK; + case PTRACE_POKEUSR: { + struct pt_regs *regs; + int res = 0; + + regs = (struct pt_regs *) + (child->tss.ksp - sizeof(struct pt_regs)); + if(addr < 32 && addr >= 0) { + regs->regs[addr] = data; + } else if(addr >= 32 && addr < 64) { + unsigned long long *fregs; + + /* We don't want to do a FPU operation here. */ + fregs = (unsigned long long *) + &child->tss.fpu.hard.fp_regs[0]; + fregs[(addr - 32)] = (unsigned long long) data; + } else { + addr -= 64; + switch(addr) { + case 0: + regs->cp0_epc = data; + break; + case 3: + regs->lo = data; + break; + case 4: + regs->hi = data; + break; + case 5: + child->tss.fpu.hard.control = data; + break; + default: + /* The rest are not allowed. */ + res = -EIO; + break; + }; } - /* Do not allow the user to set the debug register for kernel - address space */ - if(addr < 17) { - if (put_stack_long(child, sizeof(long)*addr-MAGICNUMBER, data)) - return -EIO; - return 0; - } - - return -EIO; -#endif + return res; + } - /* - * Continue and stop at next (return from) syscall. - */ - case PTRACE_SYSCALL: + case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ - if ((unsigned long) data > _NSIG) + if ((unsigned long) data > NSIG) return -EIO; if (request == PTRACE_SYSCALL) child->flags |= PF_TRACESYS; @@ -453,35 +432,19 @@ sys_ptrace(long request, long pid, long addr, long data) child->flags &= ~PF_TRACESYS; child->exit_code = data; wake_up_process(child); - return 0; + return data; } - /* - * Make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ +/* + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ case PTRACE_KILL: { - if (child->state == TASK_ZOMBIE) /* already dead */ - return 0; - wake_up_process(child); - child->exit_code = SIGKILL; - return 0; - } - - case PTRACE_SINGLESTEP: { /* set the trap flag. */ - /* - * Not supported yet. - */ - return -ENOSYS; - - if ((unsigned long) data > NSIG) - return -EIO; - wake_up_process(child); - child->exit_code = data; - /* - * give it a chance to run. - */ + if (child->state != TASK_ZOMBIE) { + wake_up_process(child); + child->exit_code = SIGKILL; + } return 0; } @@ -502,8 +465,7 @@ sys_ptrace(long request, long pid, long addr, long data) } } -asmlinkage void -syscall_trace(void) +asmlinkage void syscall_trace(void) { if ((current->flags & (PF_PTRACED|PF_TRACESYS)) != (PF_PTRACED|PF_TRACESYS)) diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S new file mode 100644 index 000000000..666a9a180 --- /dev/null +++ b/arch/mips/kernel/r2300_fpu.S @@ -0,0 +1,139 @@ +/* $Id: r2300_fpu.S,v 1.1 1996/06/24 06:35:26 dm Exp $ + * r2300_fpu.S: Save/restore floating point context for signal handlers. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996 by Ralf Baechle + * + * Multi-arch abstraction and asm macros for easier reading: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <asm/asm.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> +#include <asm/sigcontext.h> + + .text + .set mips3 + .set noreorder + /* Save floating point context */ + .align 5 + LEAF(r2300_save_fp_context) + mfc0 t0,CP0_STATUS + sll t0,t0,2 + + bgez t0,1f + nop + + cfc1 t0,fcr31 + /* Store the 16 odd double precision registers */ + swc1 $f0,(SC_FPREGS+0)(a0) + swc1 $f1,(SC_FPREGS+8)(a0) + swc1 $f2,(SC_FPREGS+16)(a0) + swc1 $f3,(SC_FPREGS+24)(a0) + swc1 $f4,(SC_FPREGS+32)(a0) + swc1 $f5,(SC_FPREGS+40)(a0) + swc1 $f6,(SC_FPREGS+48)(a0) + swc1 $f7,(SC_FPREGS+56)(a0) + swc1 $f8,(SC_FPREGS+64)(a0) + swc1 $f9,(SC_FPREGS+72)(a0) + swc1 $f10,(SC_FPREGS+80)(a0) + swc1 $f11,(SC_FPREGS+88)(a0) + swc1 $f12,(SC_FPREGS+96)(a0) + swc1 $f13,(SC_FPREGS+104)(a0) + swc1 $f14,(SC_FPREGS+112)(a0) + swc1 $f15,(SC_FPREGS+120)(a0) + swc1 $f16,(SC_FPREGS+128)(a0) + swc1 $f17,(SC_FPREGS+136)(a0) + swc1 $f18,(SC_FPREGS+144)(a0) + swc1 $f19,(SC_FPREGS+152)(a0) + swc1 $f20,(SC_FPREGS+160)(a0) + swc1 $f21,(SC_FPREGS+168)(a0) + swc1 $f22,(SC_FPREGS+176)(a0) + swc1 $f23,(SC_FPREGS+184)(a0) + swc1 $f24,(SC_FPREGS+192)(a0) + swc1 $f25,(SC_FPREGS+200)(a0) + swc1 $f26,(SC_FPREGS+208)(a0) + swc1 $f27,(SC_FPREGS+216)(a0) + swc1 $f28,(SC_FPREGS+224)(a0) + swc1 $f29,(SC_FPREGS+232)(a0) + swc1 $f30,(SC_FPREGS+240)(a0) + swc1 $f31,(SC_FPREGS+248)(a0) + sw t0,SC_FPC_CSR(a0) + cfc1 t0,$0 # implementation/version + jr ra + .set nomacro + sw t0,SC_FPC_EIR(a0) + .set macro +1: + jr ra + .set nomacro + nop + .set macro + END(r2300_save_fp_context) + +/* Restore fpu state: + * - fp gp registers + * - cp1 status/control register + * + * We base the decission which registers to restore from the signal stack + * frame on the current content of c0_status, not on the content of the + * stack frame which might have been changed by the user. + */ + LEAF(r2300_restore_fp_context) + mfc0 t0,CP0_STATUS + sll t0,t0,2 + + bgez t0,1f + nop + + bgez t0,1f + lw t0,SC_FPC_CSR(a0) + /* Restore the 16 odd double precision registers only + * when enabled in the cp0 status register. + */ + ldc1 $f0,(SC_FPREGS+0)(a0) + ldc1 $f1,(SC_FPREGS+8)(a0) + ldc1 $f2,(SC_FPREGS+16)(a0) + ldc1 $f3,(SC_FPREGS+24)(a0) + ldc1 $f4,(SC_FPREGS+32)(a0) + ldc1 $f5,(SC_FPREGS+40)(a0) + ldc1 $f6,(SC_FPREGS+48)(a0) + ldc1 $f7,(SC_FPREGS+56)(a0) + ldc1 $f8,(SC_FPREGS+64)(a0) + ldc1 $f9,(SC_FPREGS+72)(a0) + ldc1 $f10,(SC_FPREGS+80)(a0) + ldc1 $f11,(SC_FPREGS+88)(a0) + ldc1 $f12,(SC_FPREGS+96)(a0) + ldc1 $f13,(SC_FPREGS+104)(a0) + ldc1 $f14,(SC_FPREGS+112)(a0) + ldc1 $f15,(SC_FPREGS+120)(a0) + ldc1 $f16,(SC_FPREGS+128)(a0) + ldc1 $f17,(SC_FPREGS+136)(a0) + ldc1 $f18,(SC_FPREGS+144)(a0) + ldc1 $f19,(SC_FPREGS+152)(a0) + ldc1 $f20,(SC_FPREGS+160)(a0) + ldc1 $f21,(SC_FPREGS+168)(a0) + ldc1 $f22,(SC_FPREGS+176)(a0) + ldc1 $f23,(SC_FPREGS+184)(a0) + ldc1 $f24,(SC_FPREGS+192)(a0) + ldc1 $f25,(SC_FPREGS+200)(a0) + ldc1 $f26,(SC_FPREGS+208)(a0) + ldc1 $f27,(SC_FPREGS+216)(a0) + ldc1 $f28,(SC_FPREGS+224)(a0) + ldc1 $f29,(SC_FPREGS+232)(a0) + ldc1 $f30,(SC_FPREGS+240)(a0) + ldc1 $f31,(SC_FPREGS+248)(a0) + jr ra + .set nomacro + ctc1 t0,fcr31 + .set macro +1: + jr ra + .set nomacro + nop + .set macro + END(r2300_restore_fp_context) diff --git a/arch/mips/kernel/r2300_misc.S b/arch/mips/kernel/r2300_misc.S new file mode 100644 index 000000000..0afdab4c9 --- /dev/null +++ b/arch/mips/kernel/r2300_misc.S @@ -0,0 +1,398 @@ +/* $Id: r2300_misc.S,v 1.2 1996/06/29 12:41:08 dm Exp $ + * r2300_misc.S: Misc. exception handling code for R3000/R2000. + * + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse + * + * Multi-cpu abstraction reworking: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <linux/config.h> + +#include <asm/asm.h> +#include <asm/bootinfo.h> +#include <asm/cachectl.h> +#include <asm/fpregdef.h> +#include <asm/mipsconfig.h> +#include <asm/mipsregs.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/regdef.h> +#include <asm/segment.h> +#include <asm/stackframe.h> + + .text + .set mips1 + .set noreorder + + .align 5 + NESTED(r2300_handle_tlbl, PT_SIZE, sp) + .set noat + /* Check whether this is a refill or an invalid exception */ + mfc0 k0,CP0_BADVADDR + mfc0 k1,CP0_ENTRYHI + ori k0,0xfff # clear ASID... + xori k0,0xfff # in BadVAddr + andi k1,0xfc0 # get current ASID + or k0,k1 # make new entryhi + mfc0 k1,CP0_ENTRYHI + mtc0 k0,CP0_ENTRYHI + nop # for pipeline + nop + nop + tlbp + nop # for pipeline + nop + mfc0 k0,CP0_INDEX + + bgez k0,invalid_tlbl # bad addr in c0_badvaddr + mtc0 k1,CP0_ENTRYHI + + /* Damn... The next nop is required on the R4400PC V5.0, but + * I don't know why - at least there is no documented + * reason as for the others :-( + * And I haven't tested it as being necessary on R3000 - PMA. + * (The R3000 pipeline has only 5 stages, so it's probably not + * required -- Ralf) + */ + nop + +#ifdef CONF_DEBUG_TLB + /* OK, this is a double fault. Let's see whether this is + * due to an invalid entry in the page_table. + */ + /* used to be dmfc0 */ + mfc0 k0,CP0_BADVADDR + /* FIXME: This srl/sll sequence is as it is for the R4xx0, + * and I suspect that it should be different for + * the R[23]000. PMA + * (No, it's the assembler way to do + * k0 = k0 / PAGE_SIZE; + * k0 = k0 * sizeof(pte_t) + * Acutally the R4xx0 code will have to change when + * switching to 64 bit ... -- Ralf) + */ + srl k0,12 # get PFN? + sll k0,2 + lui k1,%HI(TLBMAP) + addu k0,k1 + lw k1,(k0) + andi k1,(_PAGE_PRESENT|_PAGE_ACCESSED) + bnez k1,reload_pgd_entries + nop + + .set noat + SAVE_ALL + .set at + PRINT("Double fault caused by invalid entries in pgd:\n") + mfc0 a1,CP0_BADVADDR + PRINT("Double fault address : %08lx\n") + mfc0 a1,CP0_EPC + PRINT("c0_epc : %08lx\n") + + jal show_regs + move a0,sp + + jal dump_tlb_nonwired + nop + + mfc0 a0,CP0_BADVADDR + + jal dump_list_current + nop + + .set noat + STI + .set at + PANIC("Corrupted pagedir") + .set noat + +reload_pgd_entries: +#endif /* CONF_DEBUG_TLB */ + + /* Load missing pair of entries from the pgd and return. */ + mfc0 k1,CP0_CONTEXT + lw k0,(k1) # Never causes nested exception + mfc0 k1,CP0_EPC # get the return PC + srl k0,12 # Convert to EntryLo format + mtc0 k0,CP0_ENTRYLO0 + nop # for pipeline + tlbwr + nop # for pipeline + nop + nop + /* We don't know whether the original access was read or + * write, so return and see what happens... + */ + jr k1 + rfe + + /* Handle invalid exception + * + * There are two possible causes for an invalid (tlbl) + * exception: + * 1) pages with present bit set but the valid bit clear + * 2) nonexistant pages + * Case one needs fast handling, therefore don't save + * registers yet. + * + * k0 contains c0_index. + */ +invalid_tlbl: +#ifdef CONFIG_TLB_SHUTDOWN + /* Remove entry so we don't need to care later + * For sake of the pipeline the tlbwi insn has been moved down. + * Moving it around is juggling with explosives... + */ + /* FIXME: Why is Ralf setting bit 3 of k1? This may need to + * be changed for R[236]000! PMA + * (The new ENTRYHI value will then point represent a + * inique virtual address outside the 32 bit address + * limit. This is just paranoia to avoid a tlb + * shutdown. This whole part of the routine is probably + * no longer required and can be removed -- Ralf) + */ + lui k1,0x0008 + or k0,k1 + sll k0,12 # make it EntryHi format + mtc0 k0,CP0_ENTRYHI + mtc0 zero,CP0_ENTRYLO0 +#endif + /* Test present bit in entry */ + mfc0 k0,CP0_BADVADDR + /* FIXME: This srl/sll sequence is as it is for the R4xx0, + * and I suspect that it should be different for + * the R[23]000. PMA + * (No, it's the assembler way to do + * k0 = k0 / PAGE_SIZE; + * k0 = k0 * sizeof(pte_t) + * Acutally the R4xx0 code will have to change when + * switching to 64 bit ... -- Ralf) + */ + srl k0,12 + sll k0,2 +#ifdef CONFIG_TLB_SHUTDOWN + tlbwi # do not move! +#endif + lui k1,%HI(TLBMAP) + addu k0,k1 + lw k1,(k0) + andi k1,(_PAGE_PRESENT|_PAGE_READ) + xori k1,(_PAGE_PRESENT|_PAGE_READ) + + bnez k1,nopage_tlbl + lw k1,(k0) + + /* Present and read bits are set -> set valid and accessed bits */ + ori k1,(_PAGE_VALID|_PAGE_ACCESSED) + sw k1,(k0) + mfc0 k1,CP0_EPC + nop + + jr k1 + rfe + + /* Page doesn't exist. Lots of work which is less important + * for speed needs to be done, so hand it all over to the + * kernel memory management routines. + */ +nopage_tlbl: + SAVE_ALL + mfc0 a2,CP0_BADVADDR + STI + .set at + /* a0 (struct pt_regs *) regs + * a1 (unsigned long) 0 for read access + * a2 (unsigned long) faulting virtual address + */ + move a0,sp + jal do_page_fault + li a1,0 + + j ret_from_sys_call + nop + END(r2300_handle_tlbl) + + + .text + .align 5 + NESTED(r2300_handle_tlbs, PT_SIZE, sp) + .set noat + /* It is impossible that is a nested reload exception. + * Therefore this must be a invalid exception. + * Two possible cases: + * 1) Page exists but not dirty. + * 2) Page doesn't exist yet. Hand over to the kernel. + * + * Test whether present bit in entry is set + */ + /* used to be dmfc0 */ + mfc0 k0,CP0_BADVADDR + /* FIXME: This srl/sll sequence is as it is for the R4xx0, + * and I suspect that it should be different for + * the R[23]000. PMA + */ + srl k0,12 + sll k0,2 + lui k1,%HI(TLBMAP) + addu k0,k1 + lw k1,(k0) + tlbp # find faulting entry + andi k1,(_PAGE_PRESENT|_PAGE_WRITE) + xori k1,(_PAGE_PRESENT|_PAGE_WRITE) + + bnez k1,nopage_tlbs + lw k1,(k0) + + /* Present and writable bits set: set accessed and dirty bits. */ + ori k1,k1,(_PAGE_ACCESSED|_PAGE_MODIFIED| \ + _PAGE_VALID|_PAGE_DIRTY) + sw k1,(k0) + /* Now reload the entry into the TLB */ + /* FIXME: Why has Ralf set bit 2? Should it be different for + * R[23]000? PMA + * (The ori/xori combination actually _clears_ bit 2. + * This is required for the R4xx0 these CPUs always + * map page pairs; a page pair of 4k pages therfore + * has always an address with bit 2 set to zero. -- Ralf) + */ + ori k0,0x0004 + xori k0,0x0004 + lw k0,(k0) + srl k0,12 + mtc0 k0,CP0_ENTRYLO0 + mfc0 k1,CP0_EPC + nop # for pipeline + tlbwi + nop # for pipeline + nop + nop + + jr k1 + rfe + + /* Page doesn't exist. Lots of work which is less important + * for speed needs to be done, so hand it all over to the + * kernel memory management routines. + */ +nopage_tlbs: +nowrite_mod: +#ifdef CONFIG_TLB_SHUTDOWN + /* Remove entry so we don't need to care later */ + mfc0 k0,CP0_INDEX +#ifdef CONF_DEBUG_TLB + bgez k0,2f + nop + /* We got a tlbs exception but found no matching entry in + * the tlb. This should never happen. Paranoia makes us + * check it, though. + */ + SAVE_ALL + jal show_regs + move a0,sp + .set at + mfc0 a1,CP0_BADVADDR + PRINT("c0_badvaddr == %08lx\n") + mfc0 a1,CP0_INDEX + PRINT("c0_index == %08x\n") + mfc0 a1,CP0_ENTRYHI + PRINT("c0_entryhi == %08x\n") + .set noat + STI + .set at + PANIC("Tlbs or tlbm exception with no matching entry in tlb") +1: + j 1b + nop +2: +#endif /* CONF_DEBUG_TLB */ + /* FIXME: Why is Ralf setting bit 3 of k1? This may need to + * be changed for R[236]000! PMA + * (The new ENTRYHI value will then point represent a + * inique virtual address outside the 32 bit address + * limit. This is just paranoia to avoid a tlb + * shutdown. This whole part of the routine is probably + * no longer required and can be removed -- Ralf) + */ + lui k1,0x0008 + or k0,k1 + sll k0,12 + mtc0 k0,CP0_ENTRYHI + mtc0 zero,CP0_ENTRYLO0 + nop # for pipeline + nop # R4000 V2.2 requires 4 NOPs + nop + nop + tlbwi +#endif /* CONFIG_TLB_SHUTDOWN */ + .set noat + SAVE_ALL + mfc0 a2,CP0_BADVADDR + STI + .set at + /* a0 (struct pt_regs *) regs + * a1 (unsigned long) 1 for write access + * a2 (unsigned long) faulting virtual address + */ + move a0,sp + jal do_page_fault + li a1,1 + + j ret_from_sys_call + nop + END(r2300_handle_tlbs) + + + .align 5 + NESTED(r2300_handle_mod, PT_SIZE, sp) + .set noat + /* Two possible cases: + * 1) Page is writable but not dirty -> set dirty and return + * 2) Page is not writable -> call C handler + */ + /* used to be dmfc0 */ + mfc0 k0,CP0_BADVADDR + /* FIXME: This srl/sll sequence is as it is for the R4xx0, + * and I suspect that it should be different for + * the R[23]000. PMA + */ + srl k0,12 + sll k0,2 + lui k1,%HI(TLBMAP) + addu k0,k1 + lw k1,(k0) + tlbp # find faulting entry + andi k1,_PAGE_WRITE + + beqz k1,nowrite_mod + lw k1,(k0) + + /* Present and writable bits set: set accessed and dirty bits. */ + ori k1,(_PAGE_ACCESSED|_PAGE_DIRTY) + sw k1,(k0) + /* Now reload the entry into the tlb */ + /* FIXME: Why has Ralf set bit 2? Should it be different for + * R[23]000? PMA + * (The ori/xori combination actually _clears_ bit 2. + * This is required for the R4xx0 these CPUs always + * map page pairs; a page pair of 4k pages therfore + * has always an address with bit 2 set to zero. -- Ralf) + */ + ori k0,0x0004 + xori k0,0x0004 + lw k0,(k0) + srl k0,12 + mtc0 k0,CP0_ENTRYLO0 + mfc0 k1,CP0_EPC + nop # for pipeline + nop + nop + tlbwi + nop # for pipeline + nop + nop + + jr k1 + rfe + END(r2300_handle_mod) + .set at diff --git a/arch/mips/kernel/r2300_scall.S b/arch/mips/kernel/r2300_scall.S new file mode 100644 index 000000000..358f46664 --- /dev/null +++ b/arch/mips/kernel/r2300_scall.S @@ -0,0 +1,86 @@ +/* $Id: r2300_scall.S,v 1.3 1996/06/29 12:41:08 dm Exp $ + * r2300_scall.S: R2000/R3000 specific code to handle system calls. + * + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse + * + * Multi-arch abstraction and beautification: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <asm/asm.h> +#include <asm/bootinfo.h> +#include <asm/cachectl.h> +#include <asm/fpregdef.h> +#include <asm/mipsconfig.h> +#include <asm/mipsregs.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/regdef.h> +#include <asm/segment.h> +#include <asm/stackframe.h> + +/* + * do_syscall calls the function in a1 with upto 7 arguments. If over + * four arguments are being requested, the additional arguments will + * be copied from the user stack pointed to by a0->reg29. + * + * a0 (struct pt_regs *) pointer to user registers + * a1 (syscall_t) pointer to syscall to do + * a2 (int) number of arguments to syscall + */ + .set noreorder + .text +NESTED(r2300_do_syscalls, 32, sp) + subu sp,32 + sw ra,28(sp) + sll a2,a2,PTRLOG + lw t0,dst(a2) + move t2,a1 + jalr t0 + lw t0,PT_R29(a0) # get old user stack pointer + +7: + lw t1,24(t0) # parameter #7 from usp + nop # delay slot + sw t1,24(sp) +6: + lw t1,20(t0) # parameter #6 from usp + nop # delay slot + sw t1,20(sp) +5: + lw t1,16(t0) # parameter #5 from usp + nop # delay slot + sw t1,16(sp) +4: + lw a3,PT_R7(a0) # 4 args +3: + lw a2,PT_R6(a0) # 3 args +2: + lw a1,PT_R5(a0) # 2 args +1: + jalr t2 # 1 args + lw a0,PT_R4(a0) + + lw ra,28(sp) + addiu sp,32 + jr ra + nop + +0: + jalr t2 # 0 args, just pass a0 + lw ra,28(sp) + addiu sp,32 + + jr ra + nop # delay slot + END(r2300_do_syscalls) + + .rdata + .align PTRLOG +dst: PTR 0b, 1b, 2b, 3b, 4b, 5b, 6b, 7b + + .section __ex_table,"a" + PTR 7b,bad_stack + PTR 6b,bad_stack + PTR 5b,bad_stack + .text diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S new file mode 100644 index 000000000..db03636d1 --- /dev/null +++ b/arch/mips/kernel/r2300_switch.S @@ -0,0 +1,102 @@ +/* $Id: r2300_switch.S,v 1.3 1996/06/29 07:06:42 dm Exp $ + * r2300_switch.S: R3000/R2000 specific task switching code. + * + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse + * + * Multi-cpu abstraction and macros for easier reading: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <asm/asm.h> +#include <asm/bootinfo.h> +#include <asm/cachectl.h> +#include <asm/fpregdef.h> +#include <asm/mipsconfig.h> +#include <asm/mipsregs.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/regdef.h> +#include <asm/segment.h> +#include <asm/stackframe.h> + +#include <asm/asmmacro.h> + +/* XXX The following is fucking losing... find a better way dave. */ +MODE_ALIAS = 0x00e0 # uncachable, dirty, valid + + .text + .set mips3 +/* + * Code necessary to switch tasks on an Linux/MIPS machine. + * FIXME: We don't need to disable interrupts anymore. + */ + .align 5 + LEAF(r2300_resume) + lui t5, %hi(current_set) + lw t0, %lo(current_set)(t5) + mfc0 t1,CP0_STATUS # Save status register + addu t0,a1 # Add tss offset + sw t1,THREAD_STATUS(t0) + ori t2,t1,0x1f # Disable interrupts + xori t2,0x1e + mtc0 t2,CP0_STATUS + CPU_SAVE_NONSCRATCH(t0) + sll t2,t1,2 # Save floating point state + bgez t2,2f + sw ra,THREAD_REG31(t0) + sll t2,t1,5 + bgez t2,1f + swc1 $f0, (THREAD_FPU + 0x00)(t0) + FPU_SAVE_16ODD(t0) +1: + FPU_SAVE_16EVEN(t0, t1) +2: + sw a0,%lo(current_set)(t5) # Switch current task + addu a0,a1 # Add tss offset + lw t0,THREAD_PGDIR(a0) # Switch the root pointer + li t1,TLB_ROOT # get PFN + mtc0 t1,CP0_ENTRYHI + mtc0 zero,CP0_INDEX + srl t0,12 # PFN is 12 bits west + ori t0,MODE_ALIAS # want uncachable, dirty, valid + mtc0 t0,CP0_ENTRYLO0 + lw a2,THREAD_STATUS(a0) + tlbwi + + /* Flush TLB. */ + mfc0 t3,CP0_STATUS # disable interrupts... + ori t4,t3,1 + xori t4,1 + mtc0 t4,CP0_STATUS + lw t0,mips_tlb_entries + mtc0 zero,CP0_ENTRYLO0 +1: + subu t0,1 + mtc0 t0,CP0_INDEX + lui t1,0x0008 + or t1,t0,t1 + sll t1,12 + mtc0 t1,CP0_ENTRYHI + bne t2,t0,1b + tlbwi + + ori t1,a2,1 # Restore FPU, pipeline magic + xori t1,1 + mtc0 t1,CP0_STATUS + sll t0,a2,2 + bgez t0,2f + sll t0,a2,5 + bgez t0,1f + lwc1 $f0, (THREAD_FPU + 0x00)(a0) + FPU_RESTORE_16ODD(a0) +1: + FPU_RESTORE_16EVEN(a0, t0) +2: + CPU_RESTORE_NONSCRATCH(a0) + lw t0,THREAD_KSP(a0) # Restore status register + sw t0,kernelsp + jr ra + mtc0 a2,CP0_STATUS + END(r2300_resume) + + diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S new file mode 100644 index 000000000..6163b06a1 --- /dev/null +++ b/arch/mips/kernel/r4k_fpu.S @@ -0,0 +1,148 @@ +/* $Id: r4k_fpu.S,v 1.3 1996/07/27 09:41:18 dm Exp $ + * r4k_fpu.S: Save/restore floating point context for signal handlers. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996 by Ralf Baechle + * + * Multi-arch abstraction and asm macros for easier reading: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <asm/asm.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> +#include <asm/sigcontext.h> + + .set noreorder + .set mips3 + /* Save floating point context */ + LEAF(r4k_save_fp_context) + mfc0 t1,CP0_STATUS + sll t2,t1,2 + bgez t2,2f + sll t2,t1,5 + + cfc1 t1,fcr31 + bgez t2,1f + nop + /* Store the 16 odd double precision registers */ + swc1 $f1,(SC_FPREGS+8)(a0) + swc1 $f3,(SC_FPREGS+24)(a0) + swc1 $f5,(SC_FPREGS+40)(a0) + swc1 $f7,(SC_FPREGS+56)(a0) + swc1 $f9,(SC_FPREGS+72)(a0) + swc1 $f11,(SC_FPREGS+88)(a0) + swc1 $f13,(SC_FPREGS+104)(a0) + swc1 $f15,(SC_FPREGS+120)(a0) + swc1 $f17,(SC_FPREGS+136)(a0) + swc1 $f19,(SC_FPREGS+152)(a0) + swc1 $f21,(SC_FPREGS+168)(a0) + swc1 $f23,(SC_FPREGS+184)(a0) + swc1 $f25,(SC_FPREGS+200)(a0) + swc1 $f27,(SC_FPREGS+216)(a0) + swc1 $f29,(SC_FPREGS+232)(a0) + swc1 $f31,(SC_FPREGS+248)(a0) + + /* Store the 16 even double precision registers */ +1: + swc1 $f0,(SC_FPREGS+0)(a0) + swc1 $f2,(SC_FPREGS+16)(a0) + swc1 $f4,(SC_FPREGS+32)(a0) + swc1 $f6,(SC_FPREGS+48)(a0) + swc1 $f8,(SC_FPREGS+64)(a0) + swc1 $f10,(SC_FPREGS+80)(a0) + swc1 $f12,(SC_FPREGS+96)(a0) + swc1 $f14,(SC_FPREGS+112)(a0) + swc1 $f16,(SC_FPREGS+128)(a0) + swc1 $f18,(SC_FPREGS+144)(a0) + swc1 $f20,(SC_FPREGS+160)(a0) + swc1 $f22,(SC_FPREGS+176)(a0) + swc1 $f24,(SC_FPREGS+192)(a0) + swc1 $f26,(SC_FPREGS+208)(a0) + swc1 $f28,(SC_FPREGS+224)(a0) + swc1 $f30,(SC_FPREGS+240)(a0) + sw t1,SC_FPC_CSR(a0) + cfc1 t0,$0 # implementation/version + + jr ra + .set nomacro + sw t0,SC_FPC_EIR(a0) + .set macro +2: + jr ra + .set nomacro + nop + .set macro + END(r4k_save_fp_context) + +/* Restore fpu state: + * - fp gp registers + * - cp1 status/control register + * + * We base the decission which registers to restore from the signal stack + * frame on the current content of c0_status, not on the content of the + * stack frame which might have been changed by the user. + */ + LEAF(r4k_restore_fp_context) + mfc0 t1,CP0_STATUS + sll t0,t1,2 + bgez t0,2f + sll t0,t1,5 + + bgez t0,1f + lw t0,SC_FPC_CSR(a0) + /* Restore the 16 odd double precision registers only + * when enabled in the cp0 status register. + */ + lwc1 $f1,(SC_FPREGS+8)(a0) + lwc1 $f3,(SC_FPREGS+24)(a0) + lwc1 $f5,(SC_FPREGS+40)(a0) + lwc1 $f7,(SC_FPREGS+56)(a0) + lwc1 $f9,(SC_FPREGS+72)(a0) + lwc1 $f11,(SC_FPREGS+88)(a0) + lwc1 $f13,(SC_FPREGS+104)(a0) + lwc1 $f15,(SC_FPREGS+120)(a0) + lwc1 $f17,(SC_FPREGS+136)(a0) + lwc1 $f19,(SC_FPREGS+152)(a0) + lwc1 $f21,(SC_FPREGS+168)(a0) + lwc1 $f23,(SC_FPREGS+184)(a0) + lwc1 $f25,(SC_FPREGS+200)(a0) + lwc1 $f27,(SC_FPREGS+216)(a0) + lwc1 $f29,(SC_FPREGS+232)(a0) + lwc1 $f31,(SC_FPREGS+248)(a0) + + /* Restore the 16 even double precision registers + * when cp1 was enabled in the cp0 status register. + */ +1: + lwc1 $f0,(SC_FPREGS+0)(a0) + lwc1 $f2,(SC_FPREGS+16)(a0) + lwc1 $f4,(SC_FPREGS+32)(a0) + lwc1 $f6,(SC_FPREGS+48)(a0) + lwc1 $f8,(SC_FPREGS+64)(a0) + lwc1 $f10,(SC_FPREGS+80)(a0) + lwc1 $f12,(SC_FPREGS+96)(a0) + lwc1 $f14,(SC_FPREGS+112)(a0) + lwc1 $f16,(SC_FPREGS+128)(a0) + lwc1 $f18,(SC_FPREGS+144)(a0) + lwc1 $f20,(SC_FPREGS+160)(a0) + lwc1 $f22,(SC_FPREGS+176)(a0) + lwc1 $f24,(SC_FPREGS+192)(a0) + lwc1 $f26,(SC_FPREGS+208)(a0) + lwc1 $f28,(SC_FPREGS+224)(a0) + lwc1 $f30,(SC_FPREGS+240)(a0) + ctc1 t0,fcr31 + + jr ra + .set nomacro + nop + .set macro +2: + jr ra + .set nomacro + nop + .set macro + END(r4k_restore_fp_context) diff --git a/arch/mips/kernel/r4k_misc.S b/arch/mips/kernel/r4k_misc.S new file mode 100644 index 000000000..510f513aa --- /dev/null +++ b/arch/mips/kernel/r4k_misc.S @@ -0,0 +1,190 @@ +/* $Id: r4k_misc.S,v 1.8 1996/07/22 22:32:52 dm Exp $ + * r4k_misc.S: Misc. exception handling code for r4k. + * + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse + * + * Multi-cpu abstraction and reworking: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <asm/asm.h> +#include <asm/offset.h> +#include <asm/bootinfo.h> +#include <asm/cachectl.h> +#include <asm/fpregdef.h> +#include <asm/mipsconfig.h> +#include <asm/mipsregs.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/regdef.h> +#include <asm/segment.h> +#include <asm/stackframe.h> + +#define NOTLB_OPTIMIZE /* If you are paranoid, define this. */ + + /* ABUSE of CPP macros 101. */ + + /* After this macro runs, the pte faulted on is + * in register PTE, a ptr into the table in which + * the pte belongs is in PTR. + */ +#define LOAD_PTE(pte, ptr) \ + mfc0 pte, CP0_BADVADDR; \ + lui ptr, %hi(current_set); \ + srl pte, pte, 22; \ + lw ptr, %lo(current_set)(ptr); \ + sll pte, pte, 2; \ + lw ptr, THREAD_PGDIR(ptr); \ + addu ptr, pte, ptr; \ + mfc0 pte, CP0_BADVADDR; \ + lw ptr, (ptr); \ + srl pte, pte, 10; \ + and pte, pte, 0xffc; \ + addu ptr, ptr, pte; \ + lw pte, (ptr); + + /* This places the even/odd pte pair in the page + * table at PTR into ENTRYLO0 and ENTRYLO1 using + * TMP as a scratch register. + */ +#define PTE_RELOAD(ptr, tmp) \ + ori ptr, ptr, 0x4; \ + xori ptr, ptr, 0x4; \ + lw tmp, 4(ptr); \ + lw ptr, 0(ptr); \ + srl tmp, tmp, 6; \ + mtc0 tmp, CP0_ENTRYLO1; \ + srl ptr, ptr, 6; \ + mtc0 ptr, CP0_ENTRYLO0; + +#define DO_FAULT(write) \ + .set noat; \ + .set macro; \ + SAVE_ALL; \ + mfc0 a2, CP0_BADVADDR; \ + STI; \ + .set at; \ + move a0, sp; \ + jal do_page_fault; \ + li a1, write; \ + j ret_from_sys_call; \ + nop; \ + .set noat; \ + .set nomacro; + + /* Check is PTE is present, if not then jump to LABEL. + * PTR points to the page table where this PTE is located, + * when the macro is done executing PTE will be restored + * with it's original value. + */ +#define PTE_PRESENT(pte, ptr, label) \ + andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ + xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ + bnez pte, label; \ + lw pte, (ptr); + + /* Make PTE valid, store result in PTR. */ +#define PTE_MAKEVALID(pte, ptr) \ + ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \ + sw pte, (ptr); + + /* Check if PTE can be written to, if not branch to LABEL. + * Regardless restore PTE with value from PTR when done. + */ +#define PTE_WRITABLE(pte, ptr, label) \ + andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ + xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ + bnez pte, label; \ + lw pte, (ptr); + + /* Make PTE writable, update software status bits as well, + * then store at PTR. + */ +#define PTE_MAKEWRITE(pte, ptr) \ + ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \ + _PAGE_VALID | _PAGE_DIRTY); \ + sw pte, (ptr); + + .text + .set noreorder + .set mips3 + + /* Note for many R4k variants tlb probes cannot be executed out + * of the instruction cache else you get bogus results. + */ + + .align 5 + NESTED(r4k_handle_tlbl, PT_SIZE, sp) + .set noat + .set nomacro +invalid_tlbl: +#ifndef NOTLB_OPTIMIZE + /* Test present bit in entry. */ + LOAD_PTE(k0, k1) + tlbp + PTE_PRESENT(k0, k1, nopage_tlbl) + PTE_MAKEVALID(k0, k1) + PTE_RELOAD(k1, k0) + nop + b 1f + tlbwi +1: + nop + eret +#endif + +nopage_tlbl: + DO_FAULT(0) + END(r4k_handle_tlbl) + + .align 5 + NESTED(r4k_handle_tlbs, PT_SIZE, sp) + .set noat +#ifndef NOTLB_OPTIMIZE + LOAD_PTE(k0, k1) + tlbp # find faulting entry + PTE_WRITABLE(k0, k1, nopage_tlbs) + PTE_MAKEWRITE(k0, k1) + PTE_RELOAD(k1, k0) + nop + b 1f + tlbwi +1: + nop + eret +#endif + +nopage_tlbs: + DO_FAULT(1) + END(r4k_handle_tlbs) + + .align 5 + NESTED(r4k_handle_mod, PT_SIZE, sp) + .set noat +#ifndef NOTLB_OPTIMIZE + LOAD_PTE(k0, k1) + tlbp # find faulting entry + andi k0, k0, _PAGE_WRITE + beqz k0, nowrite_mod + lw k0, (k1) + + /* Present and writable bits set, set accessed and dirty bits. */ + PTE_MAKEWRITE(k0, k1) +#if 0 + ori k0, k0, (_PAGE_ACCESSED | _PAGE_DIRTY) + sw k0, (k1) +#endif + + /* Now reload the entry into the tlb. */ + PTE_RELOAD(k1, k0) + nop + b 1f + tlbwi +1: + nop + eret +#endif + +nowrite_mod: + DO_FAULT(1) + END(r4k_handle_mod) diff --git a/arch/mips/kernel/r4k_scall.S b/arch/mips/kernel/r4k_scall.S new file mode 100644 index 000000000..d4dd88732 --- /dev/null +++ b/arch/mips/kernel/r4k_scall.S @@ -0,0 +1,74 @@ +/* $Id: r4k_scall.S,v 1.4 1996/06/29 12:41:09 dm Exp $ + * r4k_scall.S: R4xx0 specific code to handle system calls. + * + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse + * + * Multi-arch abstraction and beautification: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <asm/asm.h> +#include <asm/bootinfo.h> +#include <asm/cachectl.h> +#include <asm/fpregdef.h> +#include <asm/mipsconfig.h> +#include <asm/mipsregs.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/regdef.h> +#include <asm/segment.h> +#include <asm/stackframe.h> + + .text + .set noreorder + .set mips3 + .align 5 +NESTED(r4k_do_syscalls, 64, sp) + subu sp, 64 + sw ra, 56(sp) + sll a2, a2, PTRLOG + lw t0, dst(a2) + move t2, a1 + jalr t0 + lw t0, PT_R29(a0) # get old user stack pointer +7: + lw t1, 24(t0) # parameter #7 from usp + sw t1, 24(sp) +6: + lw t1, 20(t0) # parameter #6 from usp + sw t1, 20(sp) +5: + lw t1, 16(t0) # parameter #5 from usp + sw t1, 16(sp) +4: + lw a3, PT_R7(a0) # 4 args +3: + lw a2, PT_R6(a0) # 3 args +2: + lw a1, PT_R5(a0) # 2 args +1: + jalr t2 # 1 args + lw a0, PT_R4(a0) + + .set reorder + lw ra, 56(sp) + addiu sp, 64 + jr ra +0: + jalr t2 # 0 args, just pass a0 + nop + lw ra, 56(sp) + addiu sp, 64 + jr ra + nop + END(r4k_do_syscalls) + + .rdata + .align PTRLOG +dst: PTR 0b, 1b, 2b, 3b, 4b, 5b, 6b, 7b + + .section __ex_table,"a" + PTR 7b,bad_stack + PTR 6b,bad_stack + PTR 5b,bad_stack + .text diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S new file mode 100644 index 000000000..22cd96fce --- /dev/null +++ b/arch/mips/kernel/r4k_switch.S @@ -0,0 +1,71 @@ +/* $Id: r4k_switch.S,v 1.8 1996/07/10 01:24:20 dm Exp $ + * r4k_switch.S: R4xx0 specific task switching code. + * + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse + * + * Multi-cpu abstraction and macros for easier reading: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <asm/asm.h> +#include <asm/bootinfo.h> +#include <asm/cachectl.h> +#include <asm/fpregdef.h> +#include <asm/mipsconfig.h> +#include <asm/mipsregs.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/regdef.h> +#include <asm/segment.h> +#include <asm/stackframe.h> + +#include <asm/asmmacro.h> + + .text + .set noreorder + .set mips3 + .align 5 + LEAF(r4xx0_resume) + lui t5, %hi(current_set) + lw t0, %lo(current_set)(t5) + mfc0 t1, CP0_STATUS + nop + sw t1, THREAD_STATUS(t0) + ori t2, t1, 0x1f + xori t2, t2, 0x1e + mtc0 t2, CP0_STATUS + CPU_SAVE_NONSCRATCH(t0) + sll t2, t1, 2 # Save floating point state + bgez t2, 2f + sw ra, THREAD_REG31(t0) + sll t2, t1, 5 + bgez t2, 1f + swc1 $f0, (THREAD_FPU + 0x00)(t0) + FPU_SAVE_16ODD(t0) +1: + FPU_SAVE_16EVEN(t0, t1) # clobbers t1 +2: + sw a0, %lo(current_set)(t5) + lw a3, TASK_MM(a0) + lw a2, THREAD_STATUS(a0) + lw a3, MM_CONTEXT(a3) + ori t1, a2, 1 # restore fpu, pipeline magic + andi a3, a3, 0xff + xori t1, t1, 1 + mtc0 a3, CP0_ENTRYHI + mtc0 t1, CP0_STATUS + sll t0, a2, 2 + bgez t0, 2f + sll t0, a2, 5 + bgez t0, 1f + lwc1 $f0, (THREAD_FPU + 0x00)(a0) + FPU_RESTORE_16ODD(a0) +1: + FPU_RESTORE_16EVEN(a0, t0) # clobbers t0 +2: + CPU_RESTORE_NONSCRATCH(a0) + lw t0, THREAD_KSP(a0) + sw t0, kernelsp + jr ra + mtc0 a2, CP0_STATUS + END(r4xx0_resume) diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S new file mode 100644 index 000000000..2d06f235f --- /dev/null +++ b/arch/mips/kernel/r6000_fpu.S @@ -0,0 +1,100 @@ +/* $Id: r6000_fpu.S,v 1.1 1996/06/24 06:35:28 dm Exp $ + * r6000_fpu.S: Save/restore floating point context for signal handlers. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996 by Ralf Baechle + * + * Multi-arch abstraction and asm macros for easier reading: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <asm/asm.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> +#include <asm/sigcontext.h> + + .set noreorder + /* Save floating point context */ + LEAF(r6000_save_fp_context) + mfc0 t0,CP0_STATUS + sll t0,t0,2 + bgez t0,1f + nop + + cfc1 t1,fcr31 + /* Store the 16 double precision registers */ + sdc1 $f0,(SC_FPREGS+0)(a0) + sdc1 $f2,(SC_FPREGS+16)(a0) + sdc1 $f4,(SC_FPREGS+32)(a0) + sdc1 $f6,(SC_FPREGS+48)(a0) + sdc1 $f8,(SC_FPREGS+64)(a0) + sdc1 $f10,(SC_FPREGS+80)(a0) + sdc1 $f12,(SC_FPREGS+96)(a0) + sdc1 $f14,(SC_FPREGS+112)(a0) + sdc1 $f16,(SC_FPREGS+128)(a0) + sdc1 $f18,(SC_FPREGS+144)(a0) + sdc1 $f20,(SC_FPREGS+160)(a0) + sdc1 $f22,(SC_FPREGS+176)(a0) + sdc1 $f24,(SC_FPREGS+192)(a0) + sdc1 $f26,(SC_FPREGS+208)(a0) + sdc1 $f28,(SC_FPREGS+224)(a0) + sdc1 $f30,(SC_FPREGS+240)(a0) + sw t0,SC_FPC_CSR(a0) + cfc1 t0,$0 # implementation/version + + jr ra + .set nomacro + sw t0,SC_FPC_EIR(a0) + .set macro +1: + jr ra + .set nomacro + nop + .set macro + END(r6000_save_fp_context) + +/* Restore fpu state: + * - fp gp registers + * - cp1 status/control register + * + * We base the decission which registers to restore from the signal stack + * frame on the current content of c0_status, not on the content of the + * stack frame which might have been changed by the user. + */ + LEAF(r6000_restore_fp_context) + mfc0 t0,CP0_STATUS + sll t0,t0,2 + + bgez t0,1f + lw t0,SC_FPC_CSR(a0) + /* Restore the 16 double precision registers */ + ldc1 $f0,(SC_FPREGS+0)(a0) + ldc1 $f2,(SC_FPREGS+16)(a0) + ldc1 $f4,(SC_FPREGS+32)(a0) + ldc1 $f6,(SC_FPREGS+48)(a0) + ldc1 $f8,(SC_FPREGS+64)(a0) + ldc1 $f10,(SC_FPREGS+80)(a0) + ldc1 $f12,(SC_FPREGS+96)(a0) + ldc1 $f14,(SC_FPREGS+112)(a0) + ldc1 $f16,(SC_FPREGS+128)(a0) + ldc1 $f18,(SC_FPREGS+144)(a0) + ldc1 $f20,(SC_FPREGS+160)(a0) + ldc1 $f22,(SC_FPREGS+176)(a0) + ldc1 $f24,(SC_FPREGS+192)(a0) + ldc1 $f26,(SC_FPREGS+208)(a0) + ldc1 $f28,(SC_FPREGS+224)(a0) + ldc1 $f30,(SC_FPREGS+240)(a0) + + jr ra + .set nomacro + ctc1 t0,fcr31 + .set macro +1: + jr ra + .set nomacro + nop + .set macro + END(r6000_restore_fp_context) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 30304abda..48de5d21a 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -26,12 +26,15 @@ #include <asm/asm.h> #include <asm/bootinfo.h> -#include <asm/cache.h> +#include <asm/cachectl.h> #include <asm/io.h> #include <asm/vector.h> -#include <asm/uaccess.h> +#include <asm/segment.h> #include <asm/stackframe.h> #include <asm/system.h> +#ifdef CONFIG_SGI +#include <asm/sgialib.h> +#endif /* * How to handle the machine's features @@ -83,11 +86,12 @@ struct screen_info screen_info = DEFAULT_SCREEN_INFO; */ unsigned long mips_memory_upper = KSEG0; /* this is set by kernel_entry() */ unsigned long mips_cputype = CPU_UNKNOWN; -unsigned long mips_machtype = MACH_UNKNOWN; /* this is set by bi_EarlySnarf() */ -unsigned long mips_machgroup = MACH_GROUP_UNKNOWN; /* this is set by bi_EarlySnarf() */ -unsigned long mips_tlb_entries = 48; /* this is set by bi_EarlySnarf() */ +unsigned long mips_machtype = MACH_UNKNOWN; +unsigned long mips_machgroup = MACH_GROUP_UNKNOWN; +unsigned long mips_tlb_entries = 48; /* Guess which CPU I've got :) */ unsigned long mips_vram_base = KSEG0; +unsigned char aux_device_present; extern int root_mountflags; extern int _end; @@ -97,7 +101,12 @@ extern char empty_zero_page[PAGE_SIZE]; * This is set up by the setup-routine at boot-time */ #define PARAM empty_zero_page +#if 0 +#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) +#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) +#endif #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) +#define KERNEL_START (*(unsigned long *) (PARAM+0x214)) #define INITRD_START (*(unsigned long *) (PARAM+0x218)) #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c)) @@ -110,6 +119,12 @@ static char command_line[CL_SIZE] = { 0, }; */ void (*irq_setup)(void); +/* + * isa_slot_offset is the address where E(ISA) busaddress 0 is is mapped + * for the processor. + */ +unsigned long isa_slot_offset; + static void default_irq_setup(void) { panic("Unknown machtype in init_IRQ"); @@ -119,69 +134,6 @@ static void default_fd_cacheflush(const void *addr, size_t size) { } -static asmlinkage void -default_cacheflush(unsigned long addr, unsigned long nbytes, unsigned int flags) -{ - /* - * Someone didn't set his cacheflush() handler ... - */ - panic("default_cacheflush() called.\n"); -} -asmlinkage void (*cacheflush)(unsigned long addr, unsigned long nbytes, unsigned int flags) = default_cacheflush; - -static __inline__ void -cpu_init(void) -{ - asmlinkage void handle_reserved(void); - void mips1_cpu_init(void); - void mips2_cpu_init(void); - void mips3_cpu_init(void); - void mips4_cpu_init(void); - int i; - - /* - * Setup default vectors - */ - for (i=0;i<=31;i++) - set_except_vector(i, handle_reserved); - - switch(mips_cputype) { -#ifdef CONFIG_CPU_R3000 - case CPU_R2000: case CPU_R3000: case CPU_R3000A: case CPU_R3041: - case CPU_R3051: case CPU_R3052: case CPU_R3081: case CPU_R3081E: - mips1_cpu_init(); - break; -#endif -#ifdef CONFIG_CPU_R6000 - case CPU_R6000: case CPU_R6000A: - mips2_cpu_init(); - break; -#endif -#ifdef CONFIG_CPU_R4X00 - case CPU_R4000MC: case CPU_R4400MC: case CPU_R4000SC: - case CPU_R4400SC: case CPU_R4000PC: case CPU_R4400PC: - case CPU_R4200: case CPU_R4300: /* case CPU_R4640: */ - case CPU_R4600: case CPU_R4700: - mips3_cpu_init(); - break; -#endif -#ifdef CONFIG_CPU_R8000 - case CPU_R8000: case CPU_R5000: - printk("Detected unsupported CPU type %s.\n", - cpu_names[mips_cputype]); - panic("Can't handle CPU"); - break; -#endif -#ifdef CONFIG_CPU_R10000 - case CPU_R10000: - mips4_cpu_init(); -#endif - case CPU_UNKNOWN: - default: - panic("Unknown or unsupported CPU type"); - } -} - void setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigned long * memory_end_p) { @@ -191,14 +143,16 @@ void setup_arch(char **cmdline_p, void deskstation_setup(void); void jazz_setup(void); void sni_rm200_pci_setup(void); + void sgi_setup(void); /* Perhaps a lot of tags are not getting 'snarfed' - */ /* please help yourself */ - atag = bi_TagFind(tag_cputype); - memcpy(&mips_cputype, TAGVALPTR(atag), atag->size); + atag = bi_TagFind(tag_machtype); + memcpy(&mips_machtype, TAGVALPTR(atag), atag->size); - cpu_init(); + atag = bi_TagFind(tag_machgroup); + memcpy(&mips_machgroup, TAGVALPTR(atag), atag->size); atag = bi_TagFind(tag_vram_base); memcpy(&mips_vram_base, TAGVALPTR(atag), atag->size); @@ -226,6 +180,11 @@ void setup_arch(char **cmdline_p, jazz_setup(); break; #endif +#ifdef CONFIG_SGI + case MACH_GROUP_SGI: + sgi_setup(); + break; +#endif #ifdef CONFIG_SNI_RM200_PCI case MACH_GROUP_SNI_RM: sni_rm200_pci_setup(); @@ -237,6 +196,9 @@ void setup_arch(char **cmdline_p, atag = bi_TagFind(tag_drive_info); memcpy(&drive_info, TAGVALPTR(atag), atag->size); +#if 0 + aux_device_present = AUX_DEVICE_INFO; +#endif memory_end = mips_memory_upper; /* @@ -245,7 +207,7 @@ void setup_arch(char **cmdline_p, * of one cache line at the end of memory unused to make shure we * don't catch this type of bus errors. */ - memory_end -= 32; + memory_end -= 128; memory_end &= PAGE_MASK; #ifdef CONFIG_BLK_DEV_RAM @@ -253,23 +215,14 @@ void setup_arch(char **cmdline_p, rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif -#ifdef CONFIG_MAX_16M - /* - * There is a quite large number of different PC chipset based boards - * available and so I include this option here just in case ... - */ - if (memory_end > PAGE_OFFSET + 16*1024*1024) - memory_end = PAGE_OFFSET + 16*1024*1024; -#endif - atag= bi_TagFind(tag_screen_info); + atag = bi_TagFind(tag_mount_root_rdonly); if (atag) - memcpy(&screen_info, TAGVALPTR(atag), atag->size); + root_mountflags |= MS_RDONLY; atag = bi_TagFind(tag_command_line); if (atag) - memcpy(&command_line, TAGVALPTR(atag), atag->size); - printk("Command line: '%s'\n", command_line); + memcpy(&command_line, TAGVALPTR(atag), atag->size); memcpy(saved_command_line, command_line, CL_SIZE); saved_command_line[CL_SIZE-1] = '\0'; @@ -280,8 +233,8 @@ void setup_arch(char **cmdline_p, #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE) { - initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0; - initrd_end = initrd_start+INITRD_SIZE; + initrd_start = INITRD_START; + initrd_end = INITRD_START+INITRD_SIZE; if (initrd_end > memory_end) { printk("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index b744823b1..fdbc86558 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -4,6 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994, 1995, 1996 Ralf Baechle */ +#include <linux/config.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/kernel.h> @@ -15,20 +16,8 @@ #include <asm/asm.h> #include <asm/bitops.h> +#include <asm/pgtable.h> #include <asm/uaccess.h> -#include <asm/cache.h> -#include <asm/mipsconfig.h> -#include <asm/sgidefs.h> - -/* - * Linux/MIPS misstreats the SA_NOMASK flag for signal handlers. - * Actually this is a bug in libc that was made visible by the POSIX.1 - * changes in Linux/MIPS 2.0.1. To keep old binaries alive enable - * this define but note that this is just a hack with sideeffects, not a - * perfect compatibility mode. This will go away, so rebuild your - * executables with libc 960709 or newer. - */ -#define CONF_NOMASK_BUG_COMPAT #define _S(nr) (1<<((nr)-1)) @@ -36,21 +25,19 @@ asmlinkage int sys_waitpid(pid_t pid,unsigned long * stat_addr, int options); asmlinkage int do_signal(unsigned long oldmask, struct pt_regs *regs); - -asmlinkage void (*save_fp_context)(struct sigcontext *sc); +extern asmlinkage void (*save_fp_context)(struct sigcontext *sc); extern asmlinkage void (*restore_fp_context)(struct sigcontext *sc); asmlinkage int sys_sigprocmask(int how, sigset_t *set, sigset_t *oset) { k_sigset_t new_set, old_set = current->blocked; + int error; if (set) { - if (!access_ok(VERIFY_READ, set, sizeof(sigset_t))) - return -EFAULT; - - __get_user(new_set, to_k_sigset_t(set)); - new_set &= _BLOCKABLE; - + error = verify_area(VERIFY_READ, set, sizeof(sigset_t)); + if (error) + return error; + new_set = *to_k_sigset_t(set) & _BLOCKABLE; switch (how) { case SIG_BLOCK: current->blocked |= new_set; @@ -73,14 +60,14 @@ asmlinkage int sys_sigprocmask(int how, sigset_t *set, sigset_t *oset) } } if (oset) { - if(!access_ok(VERIFY_WRITE, oset, sizeof(sigset_t))) - return -EFAULT; - __put_user(old_set, &oset->__sigbits[0]); - __put_user(0, &oset->__sigbits[1]); - __put_user(0, &oset->__sigbits[2]); - __put_user(0, &oset->__sigbits[3]); + error = verify_area(VERIFY_WRITE, oset, sizeof(sigset_t)); + if (error) + return error; + put_user(old_set, &oset->__sigbits[0]); + put_user(0, &oset->__sigbits[1]); + put_user(0, &oset->__sigbits[2]); + put_user(0, &oset->__sigbits[3]); } - return 0; } @@ -95,17 +82,15 @@ asmlinkage int sys_sigsuspend(struct pt_regs *regs) mask = current->blocked; uset = (sigset_t *)(long) regs->regs[4]; - if (!access_ok(VERIFY_READ, uset, sizeof(sigset_t))) + if (verify_area(VERIFY_READ, uset, sizeof(sigset_t))) return -EFAULT; - - __get_user(kset, to_k_sigset_t(uset)); - + kset = *to_k_sigset_t(uset); current->blocked = kset & _BLOCKABLE; regs->regs[2] = -EINTR; while (1) { current->state = TASK_INTERRUPTIBLE; schedule(); - if (do_signal(mask, regs)) + if (do_signal(mask,regs)) return -EINTR; } @@ -129,11 +114,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) current->blocked = context->sc_sigset.__sigbits[0] & _BLOCKABLE; regs->cp0_epc = context->sc_pc; -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) - for(i = 31;i >= 0;i--) - __get_user(regs->regs[i], &context->sc_regs[i]); -#endif -#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) + /* * We only allow user processes in 64bit mode (n32, 64 bit ABI) to * restore the upper half of registers. @@ -146,7 +127,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) __get_user(regs->regs[i], &context->sc_regs[i]); regs->regs[i] = (int) regs->regs[i]; } -#endif + __get_user(regs->hi, &context->sc_mdhi); __get_user(regs->lo, &context->sc_mdlo); restore_fp_context(context); @@ -159,7 +140,8 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) /* * Don't let your children do this ... */ - asm( "move\t$29,%0\n\t" + asm __volatile__( + "move\t$29,%0\n\t" "j\tret_from_sys_call" :/* no outputs */ :"r" (regs)); @@ -184,7 +166,7 @@ badframe: * when the signal handler is returned. * * The signal handler will be called with ra pointing to code1 (see below) and - * signal number and pointer to the saved sigcontext as the two parameters. + * one parameters in a0 (signum). * * usp -> [unused] ; first free word on stack * arg save space ; 16/32 bytes arg. save space @@ -217,17 +199,11 @@ static void setup_frame(struct sigaction * sa, struct pt_regs *regs, frame = (struct sc *) (long) regs->regs[29]; frame--; - /* - * We realign the stack to an adequate boundary for the architecture. - * The assignment to sc had to be moved over the if to prevent - * GCC from throwing warnings. - */ - frame = (struct sc *)((unsigned long)frame & ALMASK); - sc = &frame->scc; - if (!access_ok(VERIFY_WRITE, frame, sizeof (struct sc))) { + /* We realign the stack to an adequate boundary for the architecture. */ + if (verify_area(VERIFY_WRITE, frame, sizeof (struct sc))) do_exit(SIGSEGV); - return; - } + frame = (struct sc *)((unsigned long)frame & ALMASK); + sc = &frame->scc; /* * Set up the return code ... @@ -238,82 +214,75 @@ static void setup_frame(struct sigaction * sa, struct pt_regs *regs, * syscall * .set reorder */ - __put_user(0x27bd0000 + scc_offset, &frame->code[0]); + __put_user(0x27bd0000 + scc_offset, &frame->code[0]); __put_user(0x24020000 + __NR_sigreturn, &frame->code[1]); - __put_user(0x0000000c, &frame->code[2]); + __put_user(0x0000000c, &frame->code[2]); /* * Flush caches so that the instructions will be correctly executed. */ - cacheflush((unsigned long)frame->code, sizeof (frame->code), - CF_BCACHE|CF_ALL); + flush_cache_sigtramp((unsigned long) frame->code); /* * Set up the "normal" sigcontext */ - sc->sc_pc = regs->cp0_epc; /* Program counter */ - sc->sc_status = regs->cp0_status; /* Status register */ + __put_user(regs->cp0_epc, &sc->sc_pc); + __put_user(regs->cp0_status, &sc->sc_status); /* Status register */ for(i = 31;i >= 0;i--) __put_user(regs->regs[i], &sc->sc_regs[i]); save_fp_context(sc); __put_user(regs->hi, &sc->sc_mdhi); __put_user(regs->lo, &sc->sc_mdlo); __put_user(regs->cp0_cause, &sc->sc_cause); - __put_user((regs->cp0_status & ST0_CU0) != 0, &sc->sc_ownedfp); + __put_user((regs->cp0_status & ST0_CU1) != 0, &sc->sc_ownedfp); __put_user(oldmask, &sc->sc_sigset.__sigbits[0]); __put_user(0, &sc->sc_sigset.__sigbits[1]); __put_user(0, &sc->sc_sigset.__sigbits[2]); __put_user(0, &sc->sc_sigset.__sigbits[3]); - regs->regs[4] = signr; /* Args for handler */ - regs->regs[5] = (long) frame; /* Ptr to sigcontext */ + regs->regs[4] = signr; /* Arguments for handler */ + regs->regs[5] = 0; /* For now. */ + regs->regs[6] = (long) frame; /* Pointer to sigcontext */ regs->regs[29] = (unsigned long) frame; /* Stack pointer */ regs->regs[31] = (unsigned long) frame->code; /* Return address */ - regs->cp0_epc = regs->regs[25] /* "return" to the first handler */ - = (unsigned long) sa->sa_handler; + regs->cp0_epc = (unsigned long) sa->sa_handler; /* "return" to the first handler */ + regs->regs[25] = regs->cp0_epc; /* PIC shit... */ } -/* - * OK, we're invoking a handler - */ -static inline void -handle_signal(unsigned long signr, struct sigaction *sa, - unsigned long oldmask, struct pt_regs * regs) +static inline void handle_signal(unsigned long signr, struct sigaction *sa, + unsigned long oldmask, struct pt_regs * regs) { - /* are we from a failed system call? */ - if (regs->orig_reg2 >= 0 && regs->regs[7]) { - /* If so, check system call restarting.. */ - switch (regs->regs[2]) { - case ERESTARTNOHAND: - regs->regs[2] = EINTR; - break; - - case ERESTARTSYS: - if (!(sa->sa_flags & SA_RESTART)) { - regs->regs[2] = EINTR; - break; - } - /* fallthrough */ - case ERESTARTNOINTR: - regs->regs[7] = regs->orig_reg7; - regs->cp0_epc -= 8; - } - } - - /* set up the stack frame */ setup_frame(sa, regs, signr, oldmask); if (sa->sa_flags & SA_ONESHOT) sa->sa_handler = NULL; -#ifdef CONF_NOMASK_BUG_COMPAT - current->blocked |= *to_k_sigset_t(&sa->sa_mask); -#else if (!(sa->sa_flags & SA_NOMASK)) - current->blocked |= (*to_k_sigset_t(&sa->sa_mask) | - _S(signr)) & _BLOCKABLE; -#endif + current->blocked |= + ((*to_k_sigset_t(&sa->sa_mask) | _S(signr)) & _BLOCKABLE); +} + +static inline void syscall_restart(unsigned long r0, unsigned long or2, + unsigned long or7, struct pt_regs *regs, + struct sigaction *sa) +{ + switch(r0) { + case ERESTARTNOHAND: + no_system_call_restart: + regs->regs[0] = regs->regs[2] = EINTR; + break; + case ERESTARTSYS: + if(!(sa->sa_flags & SA_RESTART)) + goto no_system_call_restart; + /* fallthrough */ + case ERESTARTNOINTR: + regs->regs[0] = regs->regs[2] = or2; + regs->regs[7] = or7; + regs->cp0_epc -= 8; + } } +extern int do_irix_signal(unsigned long oldmask, struct pt_regs *regs); + /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by @@ -326,9 +295,14 @@ handle_signal(unsigned long signr, struct sigaction *sa, asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs) { unsigned long mask = ~current->blocked; - unsigned long signr; + unsigned long signr, r0 = regs->regs[0]; + unsigned long r7 = regs->orig_reg7; struct sigaction * sa; +#ifdef CONFIG_BINFMT_IRIX + if(current->personality != PER_LINUX) + return do_irix_signal(oldmask, regs); +#endif while ((signr = current->signal & mask)) { signr = ffz(~signr); clear_bit(signr, ¤t->signal); @@ -365,10 +339,7 @@ asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs) case SIGCONT: case SIGCHLD: case SIGWINCH: continue; - case SIGTSTP: case SIGTTIN: case SIGTTOU: - if (is_orphaned_pgrp(current->pgrp)) - continue; - case SIGSTOP: + case SIGSTOP: case SIGTSTP: case SIGTTIN: case SIGTTOU: if (current->flags & PF_PTRACED) continue; current->state = TASK_STOPPED; @@ -380,8 +351,7 @@ asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs) continue; case SIGQUIT: case SIGILL: case SIGTRAP: - case SIGABRT: case SIGFPE: case SIGSEGV: - case SIGBUS: + case SIGIOT: case SIGFPE: case SIGSEGV: case SIGBUS: if (current->binfmt && current->binfmt->core_dump) { if (current->binfmt->core_dump(signr, regs)) signr |= 0x80; @@ -393,19 +363,39 @@ asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs) do_exit(signr); } } + /* + * OK, we're invoking a handler + */ +#if 0 + printk("[%s:%d] send sig1: r0[%08lx] r7[%08lx] reg[2]=%08lx\n", + current->comm, current->pid, r0, r7, regs->regs[2]); +#endif + if(r0) + syscall_restart(r0, regs->orig_reg2, + r7, regs, sa); +#if 0 + printk("send sig2: r0[%08lx] r7[%08lx] reg[2]=%08lx\n", + r0, r7, regs->regs[2]); +#endif handle_signal(signr, sa, oldmask, regs); return 1; } - - /* Did we come from a system call? */ - if (regs->orig_reg2 >= 0) { - /* Restart the system call - no handlers present */ - if (regs->regs[2] == -ERESTARTNOHAND || - regs->regs[2] == -ERESTARTSYS || - regs->regs[2] == -ERESTARTNOINTR) { - regs->regs[2] = regs->orig_reg2; - regs->cp0_epc -= 8; - } + /* + * Who's code doesn't conform to the restartable syscall convention + * dies here!!! The li instruction, a single machine instruction, + * must directly be followed by the syscall instruction. + */ +#if 0 + printk("[%s:%d] send sig3: r0[%08lx] r7[%08lx] reg[2]=%08lx\n", + current->comm, current->pid, r0, r7, regs->regs[2]); +#endif + if (r0 && + (regs->regs[2] == ERESTARTNOHAND || + regs->regs[2] == ERESTARTSYS || + regs->regs[2] == ERESTARTNOINTR)) { + regs->regs[0] = regs->regs[2] = regs->orig_reg2; + regs->regs[7] = r7; + regs->cp0_epc -= 8; } return 0; } @@ -417,5 +407,5 @@ asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs) */ asmlinkage unsigned long sys_signal(int signum, __sighandler_t handler) { - return -ENOSYS; + return -ENOSYS; } diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 144a3c905..3b595e05d 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -12,7 +12,9 @@ * table. */ #undef CONF_PRINT_SYSCALLS +#undef CONF_DEBUG_IRIX +#include <linux/config.h> #include <linux/linkage.h> #include <linux/mm.h> #include <linux/mman.h> @@ -20,25 +22,16 @@ #include <linux/unistd.h> #include <asm/branch.h> #include <asm/ptrace.h> -#include <asm/uaccess.h> #include <asm/signal.h> +#include <asm/uaccess.h> extern asmlinkage void syscall_trace(void); typedef asmlinkage int (*syscall_t)(void *a0,...); -extern asmlinkage int do_syscalls(struct pt_regs *regs, syscall_t fun, - int narg); +extern asmlinkage int (*do_syscalls)(struct pt_regs *regs, syscall_t fun, + int narg); extern syscall_t sys_call_table[]; extern unsigned char sys_narg_table[]; -/* - * The pipe syscall has a unusual calling convention. We return the two - * filedescriptors in the result registers v0/v1. The syscall wrapper - * from libc places these results in the array to which the argument of - * pipe points to. This is like other MIPS operating systems and unlike - * Linux/i386 where the kernel itself places the results in the file - * descriptor array itself. This calling convention also has the advantage - * of lower overhead because we don't need to call verify_area. - */ asmlinkage int sys_pipe(struct pt_regs *regs) { int fd[2]; @@ -74,9 +67,10 @@ asmlinkage int sys_idle(void) current->counter = -100; for (;;) { /* - * Not all MIPS R-series CPUs have the wait instruction. + * R4[236]00 have wait, R4[04]00 don't. * FIXME: We should save power by reducing the clock where - * possible. + * possible. Should help alot for battery powered + * R4200/4300i systems. */ if (wait_available && !need_resched) __asm__(".set\tmips3\n\t" @@ -86,33 +80,6 @@ asmlinkage int sys_idle(void) } } -#if 0 -/* - * RISC/os compatible SysV flavoured fork(2) syscall. - * - * This call has a different calling sequence: - * child return value: pid of parent, secondary result = 1. - * parent return value: pid of child, secondary result value = 0. - * error: errno, secondary result = 0. - */ -asmlinkage int sys_sysv_fork(struct pt_regs *regs) -{ - int pid; - - pid = do_fork(SIGCHLD, regs->regs[29], regs); - if (pid == 0) { /* child */ - regs->regs[3] = 1; - return current->p_pptr->pid; - } /* parent or error */ - - regs->regs[3] = 0; - return pid; -} -#endif - -/* - * Normal Linux fork(2) syscall - */ asmlinkage int sys_fork(struct pt_regs *regs) { return do_fork(SIGCHLD, regs->regs[29], regs); @@ -144,33 +111,73 @@ asmlinkage int sys_execve(struct pt_regs *regs) error = do_execve(filename, (char **) (long)regs->regs[5], (char **) (long)regs->regs[6], regs); putname(filename); - return error; } /* * Do the indirect syscall syscall. */ -asmlinkage int sys_syscall(unsigned long a0, unsigned long a1, unsigned long a2, - unsigned long a3, unsigned long a4, unsigned long a5, - unsigned long a6) +asmlinkage int sys_syscall(struct pt_regs *regs) { syscall_t syscall; + unsigned long syscallnr = regs->regs[4]; + unsigned long a0, a1, a2, a3, a4, a5, a6; + int nargs, errno; - if (a0 > __NR_Linux + __NR_Linux_syscalls) + if (syscallnr > __NR_Linux + __NR_Linux_syscalls) return -ENOSYS; - syscall = sys_call_table[a0]; + syscall = sys_call_table[syscallnr]; + nargs = sys_narg_table[syscallnr]; /* * Prevent stack overflow by recursive * syscall(__NR_syscall, __NR_syscall,...); */ - if (syscall == (syscall_t) sys_syscall) + if (syscall == (syscall_t) sys_syscall) { return -EINVAL; + } - if (syscall == NULL) + if (syscall == NULL) { return -ENOSYS; + } + if(nargs > 3) { + unsigned long usp = regs->regs[29]; + unsigned long *sp = (unsigned long *) usp; + if(usp & 3) { + printk("unaligned usp -EFAULT\n"); + force_sig(SIGSEGV, current); + return -EFAULT; + } + errno = verify_area(VERIFY_READ, (void *) (usp + 16), + (nargs - 3) * sizeof(unsigned long)); + if(errno) { + return -EFAULT; + } + switch(nargs) { + case 7: + a3 = sp[4]; a4 = sp[5]; a5 = sp[6]; a6 = sp[7]; + break; + case 6: + a3 = sp[4]; a4 = sp[5]; a5 = sp[6]; a6 = 0; + break; + case 5: + a3 = sp[4]; a4 = sp[5]; a5 = a6 = 0; + break; + case 4: + a3 = sp[4]; a4 = a5 = a6 = 0; + break; + + default: + a3 = a4 = a5 = a6 = 0; + break; + } + } else { + a3 = a4 = a5 = a6 = 0; + } + a0 = regs->regs[5]; a1 = regs->regs[6]; a2 = regs->regs[7]; + if(nargs == 0) + a0 = (unsigned long) regs; return syscall((void *)a0, a1, a2, a3, a4, a5, a6); } @@ -190,13 +197,20 @@ static char *sfnames[] = { }; #endif -asmlinkage void do_sys(struct pt_regs *regs) +#if defined(CONFIG_BINFMT_IRIX) && defined(CONF_DEBUG_IRIX) +#define SYS(fun, narg) #fun, +static char *irix_sys_names[] = { +#include "irix5sys.h" +}; +#endif + +void do_sys(struct pt_regs *regs) { unsigned long syscallnr, usp; syscall_t syscall; int errno, narg; - /* Skip syscall instruction */ + /* Skip syscall instruction */ if (delay_slot(regs)) { /* * By convention "li v0,<syscallno>" is always preceeding @@ -211,70 +225,121 @@ asmlinkage void do_sys(struct pt_regs *regs) syscallnr = regs->regs[2]; if (syscallnr > (__NR_Linux + __NR_Linux_syscalls)) goto illegal_syscall; + syscall = sys_call_table[syscallnr]; + if (syscall == NULL) + goto illegal_syscall; + narg = sys_narg_table[syscallnr]; #ifdef CONF_PRINT_SYSCALLS - printk("do_sys(): %s()", sfnames[syscallnr - __NR_Linux]); + if(syscallnr >= 4000) + printk("do_sys(%s:%d): %s(%08lx,%08lx,%08lx,%08lx)<pc=%08lx>", + current->comm, current->pid, sfnames[syscallnr - __NR_Linux], + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7], + regs->cp0_epc); +#endif +#if defined(CONFIG_BINFMT_IRIX) && defined(CONF_DEBUG_IRIX) + if(syscallnr < 2000 && syscallnr >= 1000) { + printk("irix_sys(%s:%d): %s(", current->comm, + current->pid, irix_sys_names[syscallnr - 1000]); + if((narg < 4) && (narg != 0)) { + int i = 0; + + while(i < (narg - 1)) { + printk("%08lx, ", regs->regs[i + 4]); + i++; + } + printk("%08lx) ", regs->regs[i + 4]); + } else if(narg == 0) { + printk("%08lx, %08lx, %08lx, %08lx) ", + regs->regs[4], regs->regs[5], regs->regs[6], + regs->regs[7]); + } else + printk("narg=%d) ", narg); + } #endif - narg = sys_narg_table[syscallnr]; if (narg > 4) { /* * Verify that we can safely get the additional parameters - * from the user stack. + * from the user stack. Of course I could read the params + * from unaligned addresses ... Consider this a programming + * course caliber .45. */ usp = regs->regs[29]; if (usp & 3) { printk("unaligned usp\n"); - do_exit(SIGBUS); + force_sig(SIGSEGV, current); + regs->regs[2] = EFAULT; + regs->regs[7] = 1; return; } - if (!access_ok(VERIFY_READ, (void *) (usp + 16), - (narg - 4) * sizeof(unsigned long))) { - errno = -EFAULT; - goto syscall_error; + (narg - 4) * sizeof(unsigned long))) { + regs->regs[2] = EFAULT; + regs->regs[7] = 1; + return; } } - if ((current->flags & PF_TRACESYS) == 0) { + if ((current->flags & PF_TRACESYS) == 0) + { errno = do_syscalls(regs, syscall, narg); - if (errno < 0) - goto syscall_error; - + if ((errno < 0 && errno > (-ENOIOCTLCMD - 1)) || current->errno) { + goto bad_syscall; + } regs->regs[2] = errno; regs->regs[7] = 0; - } else { + } + else + { syscall_trace(); errno = do_syscalls(regs, syscall, narg); - if (errno < 0) { + if (errno < 0 || current->errno) + { regs->regs[2] = -errno; regs->regs[7] = 1; - } else { + } + else + { regs->regs[2] = errno; regs->regs[7] = 0; } syscall_trace(); } -#ifdef CONF_PRINT_SYSCALLS +#if defined(CONF_PRINT_SYSCALLS) || \ + (defined(CONFIG_BINFMT_IRIX) && defined(CONF_DEBUG_IRIX)) +#if 0 printk(" returning: normal\n"); +#else + if(syscallnr >= 4000 && syscallnr < 5000) + printk(" returning: %08lx\n", (unsigned long) errno); +#endif #endif return; -syscall_error: - regs->regs[2] = -errno; +bad_syscall: + regs->regs[0] = regs->regs[2] = -errno; regs->regs[7] = 1; -#ifdef CONF_PRINT_SYSCALLS - printk(" returning: syscall_error, errno=%d\n", -errno); +#if defined(CONF_PRINT_SYSCALLS) || \ + (defined(CONFIG_BINFMT_IRIX) && defined(CONF_DEBUG_IRIX)) +#if 0 + printk(" returning: bad_syscall\n"); +#else + if(syscallnr >= 4000 && syscallnr < 5000) + printk(" returning error: %d\n", errno); +#endif #endif return; - illegal_syscall: + regs->regs[2] = ENOSYS; regs->regs[7] = 1; -#ifdef CONF_PRINT_SYSCALLS - printk(" returning: illegal_syscall\n"); +#if defined(CONF_PRINT_SYSCALLS) || \ + (defined(CONFIG_BINFMT_IRIX) && defined(CONF_DEBUG_IRIX)) + if(syscallnr >= 1000 && syscallnr < 2000) + printk(" returning: illegal_syscall\n"); #endif return; } diff --git a/arch/mips/kernel/syscalls.h b/arch/mips/kernel/syscalls.h index 6a398c92d..723cb5e34 100644 --- a/arch/mips/kernel/syscalls.h +++ b/arch/mips/kernel/syscalls.h @@ -16,7 +16,7 @@ * * The binary compatibility calls are still missing in this list. */ -SYS(sys_syscall, 7) /* 4000 */ +SYS(sys_syscall, 0) /* 4000 */ SYS(sys_exit, 1) SYS(sys_fork, 0) SYS(sys_read, 3) @@ -201,5 +201,3 @@ SYS(sys_setsockopt, 5) SYS(sys_shutdown, 2) SYS(sys_socket, 3) SYS(sys_socketpair, 4) -SYS(sys_setresuid, 3) /* 4185 */ -SYS(sys_getresuid, 3) diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c new file mode 100644 index 000000000..c54f83595 --- /dev/null +++ b/arch/mips/kernel/sysirix.c @@ -0,0 +1,2100 @@ +/* $Id: sysirix.c,v 1.14 1996/07/14 01:59:51 dm Exp $ + * sysirix.c: IRIX system call emulation. + * + * Copyright (C) 1996 David S. Miller + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/pagemap.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/malloc.h> +#include <linux/swap.h> +#include <linux/errno.h> +#include <linux/timex.h> +#include <linux/times.h> +#include <linux/elf.h> +#include <linux/msg.h> +#include <linux/shm.h> +#include <linux/utsname.h> + +#include <asm/ptrace.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/uaccess.h> + +/* 2,000 lines of complete and utter shit coming up... */ + +/* Utility routines. */ +static inline struct task_struct *find_process_by_pid(pid_t pid) +{ + struct task_struct *p, *q; + + if (pid == 0) + p = current; + else { + p = 0; + for_each_task(q) { + if (q && q->pid == pid) { + p = q; + break; + } + } + } + return p; +} + +/* The sysmp commands supported thus far. */ +#define MP_PGSIZE 14 /* Return system page size in v1. */ + +asmlinkage int irix_sysmp(struct pt_regs *regs) +{ + unsigned long cmd; + int base = 0; + int error = 0; + + if(regs->regs[2] == 1000) + base = 1; + cmd = regs->regs[base + 4]; + switch(cmd) { + case MP_PGSIZE: + return PAGE_SIZE; + break; + + default: + printk("SYSMP[%s:%d]: Unsupported opcode %d\n", + current->comm, current->pid, (int)cmd); + error = -EINVAL; + break; + } + + return error; +} + +/* The prctl commands. */ +#define PR_MAXPROCS 1 /* Tasks/user. */ +#define PR_ISBLOCKED 2 /* If blocked, return 1. */ +#define PR_SETSTACKSIZE 3 /* Set largest task stack size. */ +#define PR_GETSTACKSIZE 4 /* Get largest task stack size. */ +#define PR_MAXPPROCS 5 /* Num parallel tasks. */ +#define PR_UNBLKONEXEC 6 /* When task exec/exit's, unblock. */ +#define PR_SETEXITSIG 8 /* When task exit's, set signal. */ +#define PR_RESIDENT 9 /* Make task unswappable. */ +#define PR_ATTACHADDR 10 /* (Re-)Connect a vma to a task. */ +#define PR_DETACHADDR 11 /* Disconnect a vma from a task. */ +#define PR_TERMCHILD 12 /* When parent sleeps with fishes, kill child. */ +#define PR_GETSHMASK 13 /* Get the sproc() share mask. */ +#define PR_GETNSHARE 14 /* Number of share group members. */ +#define PR_COREPID 15 /* Add task pid to name when it core. */ +#define PR_ATTACHADDRPERM 16 /* (Re-)Connect vma, with specified prot. */ +#define PR_PTHREADEXIT 17 /* Kill a pthread without prejudice. */ + +asmlinkage int irix_prctl(struct pt_regs *regs) +{ + unsigned long cmd; + int error = 0, base = 0; + + if(regs->regs[2] == 1000) + base = 1; + cmd = regs->regs[base + 4]; + switch(cmd) { + case PR_MAXPROCS: + printk("irix_prctl[%s:%d]: Wants PR_MAXPROCS\n", + current->comm, current->pid); + return NR_TASKS; + + case PR_ISBLOCKED: { + struct task_struct *task; + + printk("irix_prctl[%s:%d]: Wants PR_ISBLOCKED\n", + current->comm, current->pid); + task = find_process_by_pid(regs->regs[base + 5]); + if(!task) + return -ESRCH; + return (task->next_run ? 0 : 1); + /* Can _your_ OS find this out that fast? */ + } + + case PR_SETSTACKSIZE: { + long value = regs->regs[base + 5]; + + printk("irix_prctl[%s:%d]: Wants PR_SETSTACKSIZE<%08lx>\n", + current->comm, current->pid, (unsigned long) value); + if(value > RLIM_INFINITY) + value = RLIM_INFINITY; + if(suser()) { + current->rlim[RLIMIT_STACK].rlim_max = + current->rlim[RLIMIT_STACK].rlim_cur = value; + return value; + } + if(value > current->rlim[RLIMIT_STACK].rlim_max) + return -EINVAL; + current->rlim[RLIMIT_STACK].rlim_cur = value; + return value; + } + + case PR_GETSTACKSIZE: + printk("irix_prctl[%s:%d]: Wants PR_GETSTACKSIZE\n", + current->comm, current->pid); + return current->rlim[RLIMIT_STACK].rlim_cur; + + case PR_MAXPPROCS: + printk("irix_prctl[%s:%d]: Wants PR_MAXPROCS\n", + current->comm, current->pid); + return 1; + + case PR_UNBLKONEXEC: + printk("irix_prctl[%s:%d]: Wants PR_UNBLKONEXEC\n", + current->comm, current->pid); + return -EINVAL; + + case PR_SETEXITSIG: + printk("irix_prctl[%s:%d]: Wants PR_SETEXITSIG\n", + current->comm, current->pid); + + /* We can probably play some game where we set the task + * exit_code to some non-zero value when this is requested, + * and check whether exit_code is already set in do_exit(). + */ + return -EINVAL; + + case PR_RESIDENT: + printk("irix_prctl[%s:%d]: Wants PR_RESIDENT\n", + current->comm, current->pid); + return 0; /* Compatability indeed. */ + + case PR_ATTACHADDR: + printk("irix_prctl[%s:%d]: Wants PR_ATTACHADDR\n", + current->comm, current->pid); + return -EINVAL; + + case PR_DETACHADDR: + printk("irix_prctl[%s:%d]: Wants PR_DETACHADDR\n", + current->comm, current->pid); + return -EINVAL; + + case PR_TERMCHILD: + printk("irix_prctl[%s:%d]: Wants PR_TERMCHILD\n", + current->comm, current->pid); + return -EINVAL; + + case PR_GETSHMASK: + printk("irix_prctl[%s:%d]: Wants PR_GETSHMASK\n", + current->comm, current->pid); + return -EINVAL; /* Until I have the sproc() stuff in. */ + + case PR_GETNSHARE: + return 0; /* Until I have the sproc() stuff in. */ + + case PR_COREPID: + printk("irix_prctl[%s:%d]: Wants PR_COREPID\n", + current->comm, current->pid); + return -EINVAL; + + case PR_ATTACHADDRPERM: + printk("irix_prctl[%s:%d]: Wants PR_ATTACHADDRPERM\n", + current->comm, current->pid); + return -EINVAL; + + case PR_PTHREADEXIT: + printk("irix_prctl[%s:%d]: Wants PR_PTHREADEXIT\n", + current->comm, current->pid); + do_exit(regs->regs[base + 5]); + + default: + printk("irix_prctl[%s:%d]: Non-existant opcode %d\n", + current->comm, current->pid, (int)cmd); + error = -EINVAL; + break; + } + + return error; +} + +#undef DEBUG_PROCGRPS + +extern unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt); +extern asmlinkage int sys_setpgid(pid_t pid, pid_t pgid); +extern void sys_sync(void); +extern asmlinkage int sys_getsid(pid_t pid); +extern asmlinkage int sys_getgroups(int gidsetsize, gid_t *grouplist); +extern asmlinkage int sys_setgroups(int gidsetsize, gid_t *grouplist); +extern int getrusage(struct task_struct *p, int who, struct rusage *ru); + +/* The syssgi commands supported thus far. */ +#define SGI_SYSID 1 /* Return unique per-machine identifier. */ +#define SGI_RDNAME 6 /* Return string name of a process. */ +#define SGI_SETPGID 21 /* Set process group id. */ +#define SGI_SYSCONF 22 /* POSIX sysconf garbage. */ +#define SGI_SETGROUPS 40 /* POSIX sysconf garbage. */ +#define SGI_GETGROUPS 41 /* POSIX sysconf garbage. */ +#define SGI_RUSAGE 56 /* BSD style rusage(). */ +#define SGI_SSYNC 62 /* Synchronous fs sync. */ +#define SGI_GETSID 65 /* SysVr4 get session id. */ +#define SGI_ELFMAP 68 /* Map an elf image. */ +#define SGI_TOSSTSAVE 108 /* Toss saved vma's. */ +#define SGI_FP_BCOPY 129 /* Should FPU bcopy be used on this machine? */ +#define SGI_PHYSP 1011 /* Translate virtual into physical page. */ + +asmlinkage int irix_syssgi(struct pt_regs *regs) +{ + unsigned long cmd; + int retval, base = 0; + + if(regs->regs[2] == 1000) + base = 1; + + cmd = regs->regs[base + 4]; + switch(cmd) { + case SGI_SYSID: { + char *buf = (char *) regs->regs[base + 5]; + + /* XXX Use ethernet addr.... */ + return clear_user(buf, 64); + } + + case SGI_RDNAME: { + int pid = (int) regs->regs[base + 5]; + char *buf = (char *) regs->regs[base + 6]; + struct task_struct *p; + + retval = verify_area(VERIFY_WRITE, buf, 16); + if(retval) + return retval; + for_each_task(p) { + if(p->pid == pid) + goto found0; + } + return -ESRCH; + + found0: + copy_to_user(buf, p->comm, 16); + return 0; + } + + case SGI_SETPGID: { + int error; + +#ifdef DEBUG_PROCGRPS + printk("[%s:%d] setpgid(%d, %d) ", + current->comm, current->pid, + (int) regs->regs[base + 5], (int)regs->regs[base + 6]); +#endif + error = sys_setpgid(regs->regs[base + 5], regs->regs[base + 6]); + +#ifdef DEBUG_PROCGRPS + printk("error=%d\n", error); +#endif + return error; + } + + case SGI_SYSCONF: { + switch(regs->regs[base + 5]) { + case 1: + return (MAX_ARG_PAGES >> 4); /* XXX estimate... */ + case 2: + return NR_TASKS; + case 3: + return HZ; + case 4: + return NGROUPS; + case 5: + return NR_OPEN; + case 6: + return 1; + case 7: + return 1; + case 8: + return 199009; + case 11: + return PAGE_SIZE; + case 12: + return 4; + case 25: + case 26: + case 27: + case 28: + case 29: + case 30: + return 0; + case 31: + return 32; + default: + return -EINVAL; + }; + } + + case SGI_SETGROUPS: + return sys_setgroups((int) regs->regs[base + 5], + (gid_t *) regs->regs[base + 6]); + + case SGI_GETGROUPS: + return sys_getgroups((int) regs->regs[base + 5], + (gid_t *) regs->regs[base + 6]); + + case SGI_RUSAGE: { + struct rusage *ru = (struct rusage *) regs->regs[base + 6]; + + switch((int) regs->regs[base + 5]) { + case 0: + /* rusage self */ + return getrusage(current, RUSAGE_SELF, ru); + + case -1: + /* rusage children */ + return getrusage(current, RUSAGE_CHILDREN, ru); + + default: + return -EINVAL; + }; + } + + case SGI_SSYNC: + sys_sync(); + return 0; + + case SGI_GETSID: { + int error; + +#ifdef DEBUG_PROCGRPS + printk("[%s:%d] getsid(%d) ", current->comm, current->pid, + (int) regs->regs[base + 5]); +#endif + error = sys_getsid(regs->regs[base + 5]); +#ifdef DEBUG_PROCGRPS + printk("error=%d\n", error); +#endif + return error; + } + + case SGI_ELFMAP: + retval = irix_mapelf((int) regs->regs[base + 5], + (struct elf_phdr *) regs->regs[base + 6], + (int) regs->regs[base + 7]); + return retval; + + case SGI_TOSSTSAVE: + /* XXX We don't need to do anything? */ + return 0; + + case SGI_FP_BCOPY: + return 0; + + case SGI_PHYSP: { + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + unsigned long addr = regs->regs[base + 5]; + int *pageno = (int *) (regs->regs[base + 6]); + + retval = verify_area(VERIFY_WRITE, pageno, sizeof(int)); + if(retval) + return retval; + pgdp = pgd_offset(current->mm, addr); + pmdp = pmd_offset(pgdp, addr); + ptep = pte_offset(pmdp, addr); + if(ptep) { + if(pte_val(*ptep) & (_PAGE_VALID | _PAGE_PRESENT)) { + return put_user((pte_val(*ptep) & PAGE_MASK)>>PAGE_SHIFT, pageno); + return 0; + } + } + return -EINVAL; + } + + default: + printk("irix_syssgi: Unsupported command %d\n", (int)cmd); + return -EINVAL; + }; +} + +asmlinkage int irix_gtime(struct pt_regs *regs) +{ + return CURRENT_TIME; +} + +int vm_enough_memory(long pages); + +/* + * IRIX is completely broken... it returns 0 on success, otherwise + * ENOMEM. + */ +asmlinkage int irix_brk(unsigned long brk) +{ + unsigned long rlim; + unsigned long newbrk, oldbrk; + struct mm_struct *mm = current->mm; + + if (brk < current->mm->end_code) + return -ENOMEM; + + newbrk = PAGE_ALIGN(brk); + oldbrk = PAGE_ALIGN(mm->brk); + if (oldbrk == newbrk) { + mm->brk = brk; + return 0; + } + + /* + * Always allow shrinking brk + */ + if (brk <= current->mm->brk) { + mm->brk = brk; + do_munmap(newbrk, oldbrk-newbrk); + return 0; + } + /* + * Check against rlimit and stack.. + */ + rlim = current->rlim[RLIMIT_DATA].rlim_cur; + if (rlim >= RLIM_INFINITY) + rlim = ~0; + if (brk - mm->end_code > rlim) + return -ENOMEM; + + /* + * Check against existing mmap mappings. + */ + if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + return -ENOMEM; + + /* + * Check if we have enough memory.. + */ + if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT)) + return -ENOMEM; + + /* + * Ok, looks good - let it rip. + */ + mm->brk = brk; + do_mmap(NULL, oldbrk, newbrk-oldbrk, + PROT_READ|PROT_WRITE|PROT_EXEC, + MAP_FIXED|MAP_PRIVATE, 0); + + return 0; +} + +asmlinkage int irix_getpid(struct pt_regs *regs) +{ + regs->regs[3] = current->p_opptr->pid; + return current->pid; +} + +asmlinkage int irix_getuid(struct pt_regs *regs) +{ + regs->regs[3] = current->euid; + return current->uid; +} + +asmlinkage int irix_getgid(struct pt_regs *regs) +{ + regs->regs[3] = current->egid; + return current->gid; +} + +asmlinkage int irix_stime(int value) +{ + if(!suser()) + return -EPERM; + cli(); + xtime.tv_sec = value; + xtime.tv_usec = 0; + time_state = TIME_ERROR; + time_maxerror = MAXPHASE; + time_esterror = MAXPHASE; + sti(); + return 0; +} + +extern int _setitimer(int which, struct itimerval *value, struct itimerval *ovalue); + +static inline void jiffiestotv(unsigned long jiffies, struct timeval *value) +{ + value->tv_usec = (jiffies % HZ) * (1000000 / HZ); + value->tv_sec = jiffies / HZ; + return; +} + +static inline void getitimer_real(struct itimerval *value) +{ + register unsigned long val, interval; + + interval = current->it_real_incr; + val = 0; + if (del_timer(¤t->real_timer)) { + unsigned long now = jiffies; + val = current->real_timer.expires; + add_timer(¤t->real_timer); + /* look out for negative/zero itimer.. */ + if (val <= now) + val = now+1; + val -= now; + } + jiffiestotv(val, &value->it_value); + jiffiestotv(interval, &value->it_interval); +} + +asmlinkage unsigned int irix_alarm(unsigned int seconds) +{ + struct itimerval it_new, it_old; + unsigned int oldalarm; + + if(!seconds) { + getitimer_real(&it_old); + del_timer(¤t->real_timer); + } else { + it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0; + it_new.it_value.tv_sec = seconds; + it_new.it_value.tv_usec = 0; + _setitimer(ITIMER_REAL, &it_new, &it_old); + } + oldalarm = it_old.it_value.tv_sec; + /* ehhh.. We can't return 0 if we have an alarm pending.. */ + /* And we'd better return too much than too little anyway */ + if (it_old.it_value.tv_usec) + oldalarm++; + return oldalarm; +} + +asmlinkage int irix_pause(void) +{ + current->state = TASK_INTERRUPTIBLE; + schedule(); + return -EINTR; +} + +extern asmlinkage int sys_mount(char * dev_name, char * dir_name, char * type, + unsigned long new_flags, void * data); + +/* XXX need more than this... */ +asmlinkage int irix_mount(char *dev_name, char *dir_name, unsigned long flags, + char *type, void *data, int datalen) +{ + printk("[%s:%d] irix_mount(%p,%p,%08lx,%p,%p,%d)\n", + current->comm, current->pid, + dev_name, dir_name, flags, type, data, datalen); + return sys_mount(dev_name, dir_name, type, flags, data); + /* return -EINVAL; */ +} + +struct irix_statfs { + short f_type; + long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree; + char f_fname[6], f_fpack[6]; +}; + +asmlinkage int irix_statfs(const char *path, struct irix_statfs *buf, + int len, int fs_type) +{ + struct inode *inode; + struct statfs kbuf; + int error, old_fs, i; + + /* We don't support this feature yet. */ + if(fs_type) + return -EINVAL; + error = verify_area(VERIFY_WRITE, buf, sizeof(struct irix_statfs)); + if (error) + return error; + error = namei(path,&inode); + if (error) + return error; + if (!inode->i_sb->s_op->statfs) { + iput(inode); + return -ENOSYS; + } + + old_fs = get_fs(); set_fs(get_ds()); + inode->i_sb->s_op->statfs(inode->i_sb, &kbuf, sizeof(struct statfs)); + set_fs(old_fs); + + iput(inode); + __put_user(kbuf.f_type, &buf->f_type); + __put_user(kbuf.f_bsize, &buf->f_bsize); + __put_user(kbuf.f_frsize, &buf->f_frsize); + __put_user(kbuf.f_blocks, &buf->f_blocks); + __put_user(kbuf.f_bfree, &buf->f_bfree); + __put_user(kbuf.f_files, &buf->f_files); + __put_user(kbuf.f_ffree, &buf->f_ffree); + for(i = 0; i < 6; i++) { + __put_user(0, &buf->f_fname[i]); + __put_user(0, &buf->f_fpack[i]); + } + + return 0; +} + +asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs *buf) +{ + struct inode * inode; + struct statfs kbuf; + struct file *file; + int error, old_fs, i; + + error = verify_area(VERIFY_WRITE, buf, sizeof(struct irix_statfs)); + if (error) + return error; + if (fd >= NR_OPEN || !(file = current->files->fd[fd])) + return -EBADF; + if (!(inode = file->f_inode)) + return -ENOENT; + if (!inode->i_sb->s_op->statfs) + return -ENOSYS; + + old_fs = get_fs(); set_fs(get_ds()); + inode->i_sb->s_op->statfs(inode->i_sb, &kbuf, sizeof(struct statfs)); + set_fs(old_fs); + + __put_user(kbuf.f_type, &buf->f_type); + __put_user(kbuf.f_bsize, &buf->f_bsize); + __put_user(kbuf.f_frsize, &buf->f_frsize); + __put_user(kbuf.f_blocks, &buf->f_blocks); + __put_user(kbuf.f_bfree, &buf->f_bfree); + __put_user(kbuf.f_files, &buf->f_files); + __put_user(kbuf.f_ffree, &buf->f_ffree); + for(i = 0; i < 6; i++) { + __put_user(0, &buf->f_fname[i]); + __put_user(0, &buf->f_fpack[i]); + } + + return 0; +} + +extern asmlinkage int sys_setpgid(pid_t pid, pid_t pgid); +extern asmlinkage int sys_setsid(void); + +asmlinkage int irix_setpgrp(int flags) +{ + int error; + +#ifdef DEBUG_PROCGRPS + printk("[%s:%d] setpgrp(%d) ", current->comm, current->pid, flags); +#endif + if(!flags) + error = current->pgrp; + else + error = sys_setsid(); +#ifdef DEBUG_PROCGRPS + printk("returning %d\n", current->pgrp); +#endif + return error; +} + +asmlinkage int irix_times(struct tms * tbuf) +{ + if (tbuf) { + int error = verify_area(VERIFY_WRITE,tbuf,sizeof *tbuf); + if (error) + return error; + __put_user(current->utime,&tbuf->tms_utime); + __put_user(current->stime,&tbuf->tms_stime); + __put_user(current->cutime,&tbuf->tms_cutime); + __put_user(current->cstime,&tbuf->tms_cstime); + } + + return 0; +} + +asmlinkage int irix_exec(struct pt_regs *regs) +{ + int error, base = 0; + char * filename; + + if(regs->regs[2] == 1000) + base = 1; + error = getname((char *) (long)regs->regs[base + 4], &filename); + if (error) + return error; + error = do_execve(filename, (char **) (long)regs->regs[base + 5], + (char **) 0, regs); + putname(filename); + return error; +} + +asmlinkage int irix_exece(struct pt_regs *regs) +{ + int error, base = 0; + char * filename; + + if(regs->regs[2] == 1000) + base = 1; + error = getname((char *) (long)regs->regs[base + 4], &filename); + if (error) + return error; + error = do_execve(filename, (char **) (long)regs->regs[base + 5], + (char **) (long)regs->regs[base + 6], regs); + putname(filename); + return error; +} + +/* sys_poll() support... */ +#define POLL_ROUND_UP(x,y) (((x)+(y)-1)/(y)) + +#define POLLIN 1 +#define POLLPRI 2 +#define POLLOUT 4 +#define POLLERR 8 +#define POLLHUP 16 +#define POLLNVAL 32 +#define POLLRDNORM 64 +#define POLLWRNORM POLLOUT +#define POLLRDBAND 128 +#define POLLWRBAND 256 + +#define LINUX_POLLIN (POLLRDNORM | POLLRDBAND | POLLIN) +#define LINUX_POLLOUT (POLLWRBAND | POLLWRNORM | POLLOUT) +#define LINUX_POLLERR (POLLERR) + +static inline void free_wait(select_table * p) +{ + struct select_table_entry * entry = p->entry + p->nr; + + while (p->nr > 0) { + p->nr--; + entry--; + remove_wait_queue(entry->wait_address,&entry->wait); + } +} + + +/* Copied directly from fs/select.c */ +static int check(int flag, select_table * wait, struct file * file) +{ + struct inode * inode; + struct file_operations *fops; + int (*select) (struct inode *, struct file *, int, select_table *); + + inode = file->f_inode; + if ((fops = file->f_op) && (select = fops->select)) + return select(inode, file, flag, wait) + || (wait && select(inode, file, flag, NULL)); + if (S_ISREG(inode->i_mode)) + return 1; + return 0; +} + +struct poll { + int fd; + short events; + short revents; +}; + +int irix_poll(struct poll * ufds, size_t nfds, int timeout) +{ + int i,j, count, fdcount, error, retflag; + struct poll * fdpnt; + struct poll * fds, *fds1; + select_table wait_table, *wait; + struct select_table_entry *entry; + + if ((error = verify_area(VERIFY_READ, ufds, nfds*sizeof(struct poll)))) + return error; + + if (nfds > NR_OPEN) + return -EINVAL; + + if (!(entry = (struct select_table_entry*)__get_free_page(GFP_KERNEL)) + || !(fds = (struct poll *)kmalloc(nfds*sizeof(struct poll), GFP_KERNEL))) + return -ENOMEM; + + copy_from_user(fds, ufds, nfds*sizeof(struct poll)); + + if (timeout < 0) + current->timeout = 0x7fffffff; + else { + current->timeout = jiffies + POLL_ROUND_UP(timeout, (1000/HZ)); + if (current->timeout <= jiffies) + current->timeout = 0; + } + + count = 0; + wait_table.nr = 0; + wait_table.entry = entry; + wait = &wait_table; + + for(fdpnt = fds, j = 0; j < (int)nfds; j++, fdpnt++) { + i = fdpnt->fd; + fdpnt->revents = 0; + if (!current->files->fd[i] || !current->files->fd[i]->f_inode) + fdpnt->revents = POLLNVAL; + } +repeat: + current->state = TASK_INTERRUPTIBLE; + for(fdpnt = fds, j = 0; j < (int)nfds; j++, fdpnt++) { + i = fdpnt->fd; + + if(i < 0) continue; + if (!current->files->fd[i] || !current->files->fd[i]->f_inode) continue; + + if ((fdpnt->events & LINUX_POLLIN) + && check(SEL_IN, wait, current->files->fd[i])) { + retflag = 0; + if (fdpnt->events & POLLIN) + retflag = POLLIN; + if (fdpnt->events & POLLRDNORM) + retflag = POLLRDNORM; + fdpnt->revents |= retflag; + count++; + wait = NULL; + } + + if ((fdpnt->events & LINUX_POLLOUT) && + check(SEL_OUT, wait, current->files->fd[i])) { + fdpnt->revents |= (LINUX_POLLOUT & fdpnt->events); + count++; + wait = NULL; + } + + if (check(SEL_EX, wait, current->files->fd[i])) { + fdpnt->revents |= POLLHUP; + count++; + wait = NULL; + } + } + + if ((current->signal & (~current->blocked))) + return -EINTR; + + wait = NULL; + if (!count && current->timeout > jiffies) { + schedule(); + goto repeat; + } + + free_wait(&wait_table); + free_page((unsigned long) entry); + + /* OK, now copy the revents fields back to user space. */ + fds1 = fds; + fdcount = 0; + for(i=0; i < (int)nfds; i++, ufds++, fds++) { + if (fds->revents) { + fdcount++; + } + put_user(fds->revents, &ufds->revents); + } + kfree(fds1); + current->timeout = 0; + current->state = TASK_RUNNING; + return fdcount; +} + +asmlinkage unsigned long irix_gethostid(void) +{ + printk("[%s:%d]: irix_gethostid() called...\n", + current->comm, current->pid); + return -EINVAL; +} + +asmlinkage unsigned long irix_sethostid(unsigned long val) +{ + printk("[%s:%d]: irix_sethostid(%08lx) called...\n", + current->comm, current->pid, val); + return -EINVAL; +} + +extern asmlinkage int sys_socket(int family, int type, int protocol); + +asmlinkage int irix_socket(int family, int type, int protocol) +{ + switch(type) { + case 1: + type = SOCK_DGRAM; + break; + + case 2: + type = SOCK_STREAM; + break; + + case 3: + type = 9; /* Invalid... */ + break; + + case 4: + type = SOCK_RAW; + break; + + case 5: + type = SOCK_RDM; + break; + + case 6: + type = SOCK_SEQPACKET; + break; + + default: + break; + } + + return sys_socket(family, type, protocol); +} + +asmlinkage int irix_getdomainname(char *name, int len) +{ + int error; + + if(len > (__NEW_UTS_LEN - 1)) + len = __NEW_UTS_LEN - 1; + error = verify_area(VERIFY_WRITE, name, len); + if(error) + return -EFAULT; + if(copy_to_user(name, system_utsname.domainname, len)) + return -EFAULT; + + return 0; +} + +asmlinkage unsigned long irix_getpagesize(void) +{ + return PAGE_SIZE; +} + +asmlinkage int irix_msgsys(int opcode, unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4) +{ + switch(opcode) { + case 0: + return sys_msgget((key_t) arg0, (int) arg1); + case 1: + return sys_msgctl((int) arg0, (int) arg1, (struct msqid_ds *)arg2); + case 2: + return sys_msgrcv((int) arg0, (struct msgbuf *) arg1, + (size_t) arg2, (long) arg3, (int) arg4); + case 3: + return sys_msgsnd((int) arg0, (struct msgbuf *) arg1, + (size_t) arg2, (int) arg3); + default: + return -EINVAL; + } +} + +asmlinkage int irix_shmsys(int opcode, unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3) +{ + switch(opcode) { + case 0: + return sys_shmat((int) arg0, (char *)arg1, (int) arg2, + (unsigned long *) arg3); + case 1: + return sys_shmctl((int)arg0, (int)arg1, (struct shmid_ds *)arg2); + case 2: + return sys_shmdt((char *)arg0); + case 3: + return sys_shmget((key_t) arg0, (int) arg1, (int) arg2); + default: + return -EINVAL; + } +} + +asmlinkage int irix_semsys(int opcode, unsigned long arg0, unsigned long arg1, + unsigned long arg2, int arg3) +{ + switch(opcode) { + case 0: + return sys_semctl((int) arg0, (int) arg1, (int) arg2, + (union semun) arg3); + case 1: + return sys_semget((key_t) arg0, (int) arg1, (int) arg2); + case 2: + return sys_semop((int) arg0, (struct sembuf *)arg1, + (unsigned int) arg2); + default: + return -EINVAL; + } +} + +extern asmlinkage int sys_llseek(unsigned int fd, unsigned long offset_high, + unsigned long offset_low, loff_t * result, + unsigned int origin); + +asmlinkage int irix_lseek64(int fd, int _unused, int offhi, int offlow, int base) +{ + loff_t junk; + int old_fs, error; + + old_fs = get_fs(); set_fs(get_ds()); + error = sys_llseek(fd, offhi, offlow, &junk, base); + set_fs(old_fs); + + if(error) + return error; + return (int) junk; +} + +asmlinkage int irix_sginap(int ticks) +{ + if(ticks) { + current->timeout = ticks + jiffies; + current->state = TASK_INTERRUPTIBLE; + } + schedule(); + return 0; +} + +asmlinkage int irix_sgikopt(char *istring, char *ostring, int len) +{ + return -EINVAL; +} + +asmlinkage int irix_gettimeofday(struct timeval *tv) +{ + return copy_to_user(tv, &xtime, sizeof(*tv)) ? -EFAULT : 0; +} + +asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot, + int flags, int fd, off_t offset) +{ + struct file *file = NULL; + unsigned long retval; + + if(!(flags & MAP_ANONYMOUS)) { + if(fd >= NR_OPEN || !(file = current->files->fd[fd])) + return -EBADF; + } + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + + retval = do_mmap(file, addr, len, prot, flags, offset); + return retval; +} + +asmlinkage int irix_madvise(unsigned long addr, int len, int behavior) +{ + printk("[%s:%d] Wheee.. irix_madvise(%08lx,%d,%d)\n", + current->comm, current->pid, addr, len, behavior); + return -EINVAL; +} + +asmlinkage int irix_pagelock(char *addr, int len, int op) +{ + printk("[%s:%d] Wheee.. irix_pagelock(%p,%d,%d)\n", + current->comm, current->pid, addr, len, op); + return -EINVAL; +} + +asmlinkage int irix_quotactl(struct pt_regs *regs) +{ + printk("[%s:%d] Wheee.. irix_quotactl()\n", + current->comm, current->pid); + return -EINVAL; +} + +asmlinkage int irix_BSDsetpgrp(int pid, int pgrp) +{ + int error; + +#ifdef DEBUG_PROCGRPS + printk("[%s:%d] BSDsetpgrp(%d, %d) ", current->comm, current->pid, + pid, pgrp); +#endif + if(!pid) + pid = current->pid; + + /* Wheee, weird sysv thing... */ + if((pgrp == 0) && (pid == current->pid)) + error = sys_setsid(); + else + error = sys_setpgid(pid, pgrp); + +#ifdef DEBUG_PROCGRPS + printk("error = %d\n", error); +#endif + return error; +} + +asmlinkage int irix_systeminfo(int cmd, char *buf, int cnt) +{ + printk("[%s:%d] Wheee.. irix_systeminfo(%d,%p,%d)\n", + current->comm, current->pid, cmd, buf, cnt); + return -EINVAL; +} + +struct iuname { + char sysname[257], nodename[257], release[257]; + char version[257], machine[257]; + char m_type[257], base_rel[257]; + char _unused0[257], _unused1[257], _unused2[257]; + char _unused3[257], _unused4[257], _unused5[257]; +}; + +asmlinkage int irix_uname(struct iuname *buf) +{ + if(copy_to_user(system_utsname.sysname, buf->sysname, 65)) + return -EFAULT; + if(copy_to_user(system_utsname.nodename, buf->nodename, 65)) + return -EFAULT; + if(copy_to_user(system_utsname.release, buf->release, 65)) + return -EFAULT; + if(copy_to_user(system_utsname.version, buf->version, 65)) + return -EFAULT; + if(copy_to_user(system_utsname.machine, buf->machine, 65)) + return -EFAULT; + + return 1; +} + +#undef DEBUG_XSTAT + +static inline int irix_xstat32_xlate(struct stat *kb, struct stat *ubuf) +{ + struct xstat32 { + u32 st_dev, st_pad1[3], st_ino, st_mode, st_nlink, st_uid, st_gid; + u32 st_rdev, st_pad2[2], st_size, st_pad3; + u32 st_atime0, st_atime1; + u32 st_mtime0, st_mtime1; + u32 st_ctime0, st_ctime1; + u32 st_blksize, st_blocks; + char st_fstype[16]; + u32 st_pad4[8]; + } *ub = (struct xstat32 *) ubuf; + + return copy_to_user(ub, kb, sizeof(*ub)) ? -EFAULT : 0; +} + +static inline void irix_xstat64_xlate(struct stat *sb) +{ + struct xstat64 { + u32 st_dev; s32 st_pad1[3]; + unsigned long long st_ino; + u32 st_mode; + u32 st_nlink; s32 st_uid; s32 st_gid; u32 st_rdev; + s32 st_pad2[2]; + long long st_size; + s32 st_pad3; + struct { s32 tv_sec, tv_nsec; } st_atime, st_mtime, st_ctime; + s32 st_blksize; + long long st_blocks; + char st_fstype[16]; + s32 st_pad4[8]; + } ks; + + ks.st_dev = (u32) sb->st_dev; + ks.st_pad1[0] = ks.st_pad1[1] = ks.st_pad1[2] = 0; + ks.st_ino = (unsigned long long) sb->st_ino; + ks.st_mode = (u32) sb->st_mode; + ks.st_nlink = (u32) sb->st_nlink; + ks.st_uid = (s32) sb->st_uid; + ks.st_gid = (s32) sb->st_gid; + ks.st_rdev = (u32) sb->st_rdev; + ks.st_pad2[0] = ks.st_pad2[1] = 0; + ks.st_size = (long long) sb->st_size; + ks.st_pad3 = 0; + + /* XXX hackety hack... */ + ks.st_atime.tv_sec = (s32) sb->st_atime; ks.st_atime.tv_nsec = 0; + ks.st_mtime.tv_sec = (s32) sb->st_atime; ks.st_mtime.tv_nsec = 0; + ks.st_ctime.tv_sec = (s32) sb->st_atime; ks.st_ctime.tv_nsec = 0; + + ks.st_blksize = (s32) sb->st_blksize; + ks.st_blocks = (long long) sb->st_blocks; + memcpy(&ks.st_fstype[0], &sb->st_fstype[0], 16); + ks.st_pad4[0] = ks.st_pad4[1] = ks.st_pad4[2] = ks.st_pad4[3] = + ks.st_pad4[4] = ks.st_pad4[5] = ks.st_pad4[6] = ks.st_pad4[7] = 0; + + /* Now write it all back. */ + copy_to_user(sb, &ks, sizeof(struct xstat64)); +} + +extern asmlinkage int sys_newstat(char * filename, struct stat * statbuf); + +asmlinkage int irix_xstat(int version, char *filename, struct stat *statbuf) +{ +#ifdef DEBUG_XSTAT + printk("[%s:%d] Wheee.. irix_xstat(%d,%s,%p) ", + current->comm, current->pid, version, filename, statbuf); +#endif + switch(version) { + case 2: { + struct stat kb; + int errno, old_fs; + + old_fs = get_fs(); set_fs(get_ds()); + errno = sys_newstat(filename, &kb); + set_fs(old_fs); +#ifdef DEBUG_XSTAT + printk("errno[%d]\n", errno); +#endif + if(errno) + return errno; + errno = irix_xstat32_xlate(&kb, statbuf); + return errno; + } + + case 3: { + int errno = sys_newstat(filename, statbuf); +#ifdef DEBUG_XSTAT + printk("errno[%d]\n", errno); +#endif + if(errno) + return errno; + + irix_xstat64_xlate(statbuf); + return 0; + } + + default: + return -EINVAL; + } +} + +extern asmlinkage int sys_newlstat(char * filename, struct stat * statbuf); + +asmlinkage int irix_lxstat(int version, char *filename, struct stat *statbuf) +{ +#ifdef DEBUG_XSTAT + printk("[%s:%d] Wheee.. irix_lxstat(%d,%s,%p) ", + current->comm, current->pid, version, filename, statbuf); +#endif + switch(version) { + case 2: { + struct stat kb; + int errno, old_fs; + + old_fs = get_fs(); set_fs(get_ds()); + errno = sys_newlstat(filename, &kb); + set_fs(old_fs); +#ifdef DEBUG_XSTAT + printk("errno[%d]\n", errno); +#endif + if(errno) + return errno; + errno = irix_xstat32_xlate(&kb, statbuf); + return errno; + } + + case 3: { + int errno = sys_newlstat(filename, statbuf); +#ifdef DEBUG_XSTAT + printk("errno[%d]\n", errno); +#endif + if(errno) + return errno; + + irix_xstat64_xlate(statbuf); + return 0; + } + + default: + return -EINVAL; + } +} + +extern asmlinkage int sys_newfstat(unsigned int fd, struct stat * statbuf); + +asmlinkage int irix_fxstat(int version, int fd, struct stat *statbuf) +{ +#ifdef DEBUG_XSTAT + printk("[%s:%d] Wheee.. irix_fxstat(%d,%d,%p) ", + current->comm, current->pid, version, fd, statbuf); +#endif + switch(version) { + case 2: { + struct stat kb; + int errno, old_fs; + + old_fs = get_fs(); set_fs(get_ds()); + errno = sys_newfstat(fd, &kb); + set_fs(old_fs); +#ifdef DEBUG_XSTAT + printk("errno[%d]\n", errno); +#endif + if(errno) + return errno; + errno = irix_xstat32_xlate(&kb, statbuf); + return errno; + } + + case 3: { + int errno = sys_newfstat(fd, statbuf); +#ifdef DEBUG_XSTAT + printk("errno[%d]\n", errno); +#endif + if(errno) + return errno; + + irix_xstat64_xlate(statbuf); + return 0; + } + + default: + return -EINVAL; + } +} + +extern asmlinkage int sys_mknod(const char * filename, int mode, dev_t dev); + +asmlinkage int irix_xmknod(int ver, char *filename, int mode, dev_t dev) +{ + printk("[%s:%d] Wheee.. irix_xmknod(%d,%s,%x,%x)\n", + current->comm, current->pid, ver, filename, mode, (int) dev); + switch(ver) { + case 2: + return sys_mknod(filename, mode, dev); + + default: + return -EINVAL; + }; +} + +asmlinkage int irix_swapctl(int cmd, char *arg) +{ + printk("[%s:%d] Wheee.. irix_swapctl(%d,%p)\n", + current->comm, current->pid, cmd, arg); + return -EINVAL; +} + +struct irix_statvfs { + u32 f_bsize; u32 f_frsize; u32 f_blocks; + u32 f_bfree; u32 f_bavail; u32 f_files; u32 f_ffree; u32 f_favail; + u32 f_fsid; char f_basetype[16]; + u32 f_flag; u32 f_namemax; + char f_fstr[32]; u32 f_filler[16]; +}; + +asmlinkage int irix_statvfs(char *fname, struct irix_statvfs *buf) +{ + struct inode *inode; + struct statfs kbuf; + int error, old_fs, i; + + printk("[%s:%d] Wheee.. irix_statvfs(%s,%p)\n", + current->comm, current->pid, fname, buf); + error = verify_area(VERIFY_WRITE, buf, sizeof(struct irix_statvfs)); + if(error) + return error; + error = namei(fname, &inode); + if(error) + return error; + if(!inode->i_sb->s_op->statfs) { + iput(inode); + return -ENOSYS; + } + + old_fs = get_fs(); set_fs(get_ds()); + inode->i_sb->s_op->statfs(inode->i_sb, &kbuf, sizeof(struct statfs)); + set_fs(old_fs); + + iput(inode); + __put_user(kbuf.f_bsize, &buf->f_bsize); + __put_user(kbuf.f_frsize, &buf->f_frsize); + __put_user(kbuf.f_blocks, &buf->f_blocks); + __put_user(kbuf.f_bfree, &buf->f_bfree); + __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ + __put_user(kbuf.f_files, &buf->f_files); + __put_user(kbuf.f_ffree, &buf->f_ffree); + __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ +#ifdef __MIPSEB__ + __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); +#else + __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); +#endif + for(i = 0; i < 16; i++) + __put_user(0, &buf->f_basetype[i]); + __put_user(0, &buf->f_flag); + __put_user(kbuf.f_namelen, &buf->f_namemax); + for(i = 0; i < 32; i++) + __put_user(0, &buf->f_fstr[i]); + + return 0; +} + +asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs *buf) +{ + struct inode * inode; + struct statfs kbuf; + struct file *file; + int error, old_fs, i; + + printk("[%s:%d] Wheee.. irix_fstatvfs(%d,%p)\n", + current->comm, current->pid, fd, buf); + + error = verify_area(VERIFY_WRITE, buf, sizeof(struct irix_statvfs)); + if (error) + return error; + if (fd >= NR_OPEN || !(file = current->files->fd[fd])) + return -EBADF; + if (!(inode = file->f_inode)) + return -ENOENT; + if (!inode->i_sb->s_op->statfs) + return -ENOSYS; + + old_fs = get_fs(); set_fs(get_ds()); + inode->i_sb->s_op->statfs(inode->i_sb, &kbuf, sizeof(struct statfs)); + set_fs(old_fs); + + __put_user(kbuf.f_bsize, &buf->f_bsize); + __put_user(kbuf.f_frsize, &buf->f_frsize); + __put_user(kbuf.f_blocks, &buf->f_blocks); + __put_user(kbuf.f_bfree, &buf->f_bfree); + __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ + __put_user(kbuf.f_files, &buf->f_files); + __put_user(kbuf.f_ffree, &buf->f_ffree); + __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ +#ifdef __MIPSEB__ + __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); +#else + __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); +#endif + for(i = 0; i < 16; i++) + __put_user(0, &buf->f_basetype[i]); + __put_user(0, &buf->f_flag); + __put_user(kbuf.f_namelen, &buf->f_namemax); + for(i = 0; i < 32; i++) + __put_user(0, &buf->f_fstr[i]); + + return 0; +} + +#define NOFOLLOW_LINKS 0 +#define FOLLOW_LINKS 1 + +static inline int chown_common(char *filename, uid_t user, gid_t group, int follow) +{ + struct inode * inode; + int error; + struct iattr newattrs; + + if(follow == NOFOLLOW_LINKS) + error = lnamei(filename,&inode); + else + error = namei(filename,&inode); + if (error) + return error; + if (IS_RDONLY(inode)) { + iput(inode); + return -EROFS; + } + if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { + iput(inode); + return -EPERM; + } + if (user == (uid_t) -1) + user = inode->i_uid; + if (group == (gid_t) -1) + group = inode->i_gid; + newattrs.ia_mode = inode->i_mode; + newattrs.ia_uid = user; + newattrs.ia_gid = group; + newattrs.ia_valid = ATTR_UID | ATTR_GID | ATTR_CTIME; + /* + * If the owner has been changed, remove the setuid bit + */ + if (inode->i_mode & S_ISUID) { + newattrs.ia_mode &= ~S_ISUID; + newattrs.ia_valid |= ATTR_MODE; + } + /* + * If the group has been changed, remove the setgid bit + * + * Don't remove the setgid bit if no group execute bit. + * This is a file marked for mandatory locking. + */ + if (((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) { + newattrs.ia_mode &= ~S_ISGID; + newattrs.ia_valid |= ATTR_MODE; + } + inode->i_dirt = 1; + if (inode->i_sb->dq_op) { + inode->i_sb->dq_op->initialize(inode, -1); + if (inode->i_sb->dq_op->transfer(inode, &newattrs, 0)) + return -EDQUOT; + error = notify_change(inode, &newattrs); + if (error) + inode->i_sb->dq_op->transfer(inode, &newattrs, 1); + } else + error = notify_change(inode, &newattrs); + iput(inode); + return(error); +} + +asmlinkage int irix_chown(char *fname, int uid, int gid) +{ + /* Do follow any and all links... */ + return chown_common(fname, uid, gid, FOLLOW_LINKS); +} + +asmlinkage int irix_lchown(char *fname, int uid, int gid) +{ + /* Do _not_ follow any links... */ + return chown_common(fname, uid, gid, NOFOLLOW_LINKS); +} + +asmlinkage int irix_priocntl(struct pt_regs *regs) +{ + printk("[%s:%d] Wheee.. irix_priocntl()\n", + current->comm, current->pid); + return -EINVAL; +} + +asmlinkage int irix_sigqueue(int pid, int sig, int code, int val) +{ + printk("[%s:%d] Wheee.. irix_sigqueue(%d,%d,%d,%d)\n", + current->comm, current->pid, pid, sig, code, val); + return -EINVAL; +} + +extern asmlinkage int sys_truncate(const char * path, unsigned long length); +extern asmlinkage int sys_ftruncate(unsigned int fd, unsigned long length); + +asmlinkage int irix_truncate64(char *name, int pad, int size1, int size2) +{ + if(size1) + return -EINVAL; + return sys_truncate(name, size2); +} + +asmlinkage int irix_ftruncate64(int fd, int pad, int size1, int size2) +{ + if(size1) + return -EINVAL; + return sys_ftruncate(fd, size2); +} + +extern asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len, int prot, + int flags, int fd, off_t offset); + +asmlinkage int irix_mmap64(struct pt_regs *regs) +{ + unsigned long addr, *sp; + int len, prot, flags, fd, off1, off2, base = 0; + int error; + + if(regs->regs[2] == 1000) + base = 1; + sp = (unsigned long *) (regs->regs[29] + 16); + addr = regs->regs[base + 4]; + len = regs->regs[base + 5]; + prot = regs->regs[base + 6]; + if(!base) { + flags = regs->regs[base + 7]; + error = verify_area(VERIFY_READ, sp, (4 * sizeof(unsigned long))); + if(error) + return error; + fd = sp[0]; + __get_user(off1, &sp[1]); + __get_user(off2, &sp[2]); + } else { + error = verify_area(VERIFY_READ, sp, (5 * sizeof(unsigned long))); + if(error) + return error; + __get_user(flags, &sp[0]); + __get_user(fd, &sp[1]); + __get_user(off1, &sp[2]); + __get_user(off2, &sp[3]); + } + if(off1) + return -EINVAL; + return sys_mmap(addr, (size_t) len, prot, flags, fd, off2); +} + +asmlinkage int irix_dmi(struct pt_regs *regs) +{ + printk("[%s:%d] Wheee.. irix_dmi()\n", + current->comm, current->pid); + return -EINVAL; +} + +asmlinkage int irix_pread(int fd, char *buf, int cnt, int off64, + int off1, int off2) +{ + printk("[%s:%d] Wheee.. irix_pread(%d,%p,%d,%d,%d,%d)\n", + current->comm, current->pid, fd, buf, cnt, off64, off1, off2); + return -EINVAL; +} + +asmlinkage int irix_pwrite(int fd, char *buf, int cnt, int off64, + int off1, int off2) +{ + printk("[%s:%d] Wheee.. irix_pwrite(%d,%p,%d,%d,%d,%d)\n", + current->comm, current->pid, fd, buf, cnt, off64, off1, off2); + return -EINVAL; +} + +asmlinkage int irix_sgifastpath(int cmd, unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5) +{ + printk("[%s:%d] Wheee.. irix_fastpath(%d,%08lx,%08lx,%08lx,%08lx," + "%08lx,%08lx)\n", + current->comm, current->pid, cmd, arg0, arg1, arg2, + arg3, arg4, arg5); + return -EINVAL; +} + +struct irix_statvfs64 { + u32 f_bsize; u32 f_frsize; + u64 f_blocks; u64 f_bfree; u64 f_bavail; + u64 f_files; u64 f_ffree; u64 f_favail; + u32 f_fsid; + char f_basetype[16]; + u32 f_flag; u32 f_namemax; + char f_fstr[32]; + u32 f_filler[16]; +}; + +asmlinkage int irix_statvfs64(char *fname, struct irix_statvfs64 *buf) +{ + struct inode *inode; + struct statfs kbuf; + int error, old_fs, i; + + printk("[%s:%d] Wheee.. irix_statvfs(%s,%p)\n", + current->comm, current->pid, fname, buf); + error = verify_area(VERIFY_WRITE, buf, sizeof(struct irix_statvfs)); + if(error) + return error; + error = namei(fname, &inode); + if(error) + return error; + if(!inode->i_sb->s_op->statfs) { + iput(inode); + return -ENOSYS; + } + + old_fs = get_fs(); set_fs(get_ds()); + inode->i_sb->s_op->statfs(inode->i_sb, &kbuf, sizeof(struct statfs)); + set_fs(old_fs); + + iput(inode); + __put_user(kbuf.f_bsize, &buf->f_bsize); + __put_user(kbuf.f_frsize, &buf->f_frsize); + __put_user(kbuf.f_blocks, &buf->f_blocks); + __put_user(kbuf.f_bfree, &buf->f_bfree); + __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ + __put_user(kbuf.f_files, &buf->f_files); + __put_user(kbuf.f_ffree, &buf->f_ffree); + __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ +#ifdef __MIPSEB__ + __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); +#else + __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); +#endif + for(i = 0; i < 16; i++) + __put_user(0, &buf->f_basetype[i]); + __put_user(0, &buf->f_flag); + __put_user(kbuf.f_namelen, &buf->f_namemax); + for(i = 0; i < 32; i++) + __put_user(0, &buf->f_fstr[i]); + + return 0; +} + +asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs *buf) +{ + struct inode * inode; + struct statfs kbuf; + struct file *file; + int error, old_fs, i; + + printk("[%s:%d] Wheee.. irix_fstatvfs(%d,%p)\n", + current->comm, current->pid, fd, buf); + + error = verify_area(VERIFY_WRITE, buf, sizeof(struct irix_statvfs)); + if (error) + return error; + if (fd >= NR_OPEN || !(file = current->files->fd[fd])) + return -EBADF; + if (!(inode = file->f_inode)) + return -ENOENT; + if (!inode->i_sb->s_op->statfs) + return -ENOSYS; + + old_fs = get_fs(); set_fs(get_ds()); + inode->i_sb->s_op->statfs(inode->i_sb, &kbuf, sizeof(struct statfs)); + set_fs(old_fs); + + __put_user(kbuf.f_bsize, &buf->f_bsize); + __put_user(kbuf.f_frsize, &buf->f_frsize); + __put_user(kbuf.f_blocks, &buf->f_blocks); + __put_user(kbuf.f_bfree, &buf->f_bfree); + __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ + __put_user(kbuf.f_files, &buf->f_files); + __put_user(kbuf.f_ffree, &buf->f_ffree); + __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ +#ifdef __MIPSEB__ + __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); +#else + __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); +#endif + for(i = 0; i < 16; i++) + __put_user(0, &buf->f_basetype[i]); + __put_user(0, &buf->f_flag); + __put_user(kbuf.f_namelen, &buf->f_namemax); + for(i = 0; i < 32; i++) + __put_user(0, &buf->f_fstr[i]); + + return 0; +} + +asmlinkage int irix_getmountid(char *fname, unsigned long *midbuf) +{ + int errno; + + printk("[%s:%d] irix_getmountid(%s, %p)\n", + current->comm, current->pid, fname, midbuf); + errno = verify_area(VERIFY_WRITE, midbuf, (sizeof(unsigned long) * 4)); + if(errno) + return errno; + + /* + * The idea with this system call is that when trying to determine + * 'pwd' and it's a toss-up for some reason, userland can use the + * fsid of the filesystem to try and make the right decision, but + * we don't have this so for now. XXX + */ + __put_user(0, &midbuf[0]); + __put_user(0, &midbuf[1]); + __put_user(0, &midbuf[2]); + __put_user(0, &midbuf[3]); + + return 0; +} + +asmlinkage int irix_nsproc(unsigned long entry, unsigned long mask, + unsigned long arg, unsigned long sp, int slen) +{ + printk("[%s:%d] Wheee.. irix_nsproc(%08lx,%08lx,%08lx,%08lx,%d)\n", + current->comm, current->pid, entry, mask, arg, sp, slen); + return -EINVAL; +} + +#undef DEBUG_GETDENTS + +struct irix_dirent32 { + u32 d_ino; + u32 d_off; + unsigned short d_reclen; + char d_name[1]; +}; + +struct irix_dirent32_callback { + struct irix_dirent32 *current_dir; + struct irix_dirent32 *previous; + int count; + int error; +}; + +#define NAME_OFFSET32(de) ((int) ((de)->d_name - (char *) (de))) +#define ROUND_UP32(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1)) + +static int irix_filldir32(void *__buf, const char *name, int namlen, off_t offset, ino_t ino) +{ + struct irix_dirent32 *dirent; + struct irix_dirent32_callback *buf = (struct irix_dirent32_callback *)__buf; + unsigned short reclen = ROUND_UP32(NAME_OFFSET32(dirent) + namlen + 1); + +#ifdef DEBUG_GETDENTS + printk("\nirix_filldir32[reclen<%d>namlen<%d>count<%d>]", + reclen, namlen, buf->count); +#endif + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; + dirent = buf->previous; + if (dirent) + __put_user(offset, &dirent->d_off); + dirent = buf->current_dir; + buf->previous = dirent; + __put_user(ino, &dirent->d_ino); + __put_user(reclen, &dirent->d_reclen); + copy_to_user(dirent->d_name, name, namlen); + __put_user(0, &dirent->d_name[namlen]); + ((char *) dirent) += reclen; + buf->current_dir = dirent; + buf->count -= reclen; + + return 0; +} + +asmlinkage int irix_ngetdents(unsigned int fd, void * dirent, unsigned int count, int *eob) +{ + struct file *file; + struct irix_dirent32 *lastdirent; + struct irix_dirent32_callback buf; + int error; + +#ifdef DEBUG_GETDENTS + printk("[%s:%d] ngetdents(%d, %p, %d, %p) ", current->comm, + current->pid, fd, dirent, count, eob); +#endif + if (fd >= NR_OPEN || !(file = current->files->fd[fd])) + return -EBADF; + if (!file->f_op || !file->f_op->readdir) + return -ENOTDIR; + if(verify_area(VERIFY_WRITE, dirent, count) || + verify_area(VERIFY_WRITE, eob, sizeof(*eob))) + return -EFAULT; + __put_user(0, eob); + buf.current_dir = (struct irix_dirent32 *) dirent; + buf.previous = NULL; + buf.count = count; + buf.error = 0; + error = file->f_op->readdir(file->f_inode, file, &buf, irix_filldir32); + if (error < 0) + return error; + lastdirent = buf.previous; + if (!lastdirent) + return buf.error; + lastdirent->d_off = (u32) file->f_pos; +#ifdef DEBUG_GETDENTS + printk("eob=%d returning %d\n", *eob, count - buf.count); +#endif + return count - buf.count; +} + +struct irix_dirent64 { + u64 d_ino; + u64 d_off; + unsigned short d_reclen; + char d_name[1]; +}; + +struct irix_dirent64_callback { + struct irix_dirent64 *curr; + struct irix_dirent64 *previous; + int count; + int error; +}; + +#define NAME_OFFSET64(de) ((int) ((de)->d_name - (char *) (de))) +#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1)) + +static int irix_filldir64(void * __buf, const char * name, int namlen, + off_t offset, ino_t ino) +{ + struct irix_dirent64 *dirent; + struct irix_dirent64_callback * buf = + (struct irix_dirent64_callback *) __buf; + unsigned short reclen = ROUND_UP64(NAME_OFFSET64(dirent) + namlen + 1); + + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; + dirent = buf->previous; + if (dirent) + __put_user(offset, &dirent->d_off); + dirent = buf->curr; + buf->previous = dirent; + __put_user(ino, &dirent->d_ino); + __put_user(reclen, &dirent->d_reclen); + copy_to_user(dirent->d_name, name, namlen); + __put_user(0, &dirent->d_name[namlen]); + ((char *) dirent) += reclen; + buf->curr = dirent; + buf->count -= reclen; + + return 0; +} + +asmlinkage int irix_getdents64(int fd, void *dirent, int cnt) +{ + struct file *file; + struct irix_dirent64 *lastdirent; + struct irix_dirent64_callback buf; + int error; + +#ifdef DEBUG_GETDENTS + printk("[%s:%d] getdents64(%d, %p, %d) ", current->comm, + current->pid, fd, dirent, cnt); +#endif + if (fd >= NR_OPEN || !(file = current->files->fd[fd])) + return -EBADF; + if (!file->f_op || !file->f_op->readdir) + return -ENOTDIR; + if(verify_area(VERIFY_WRITE, dirent, cnt)) + return -EFAULT; + if(cnt < (sizeof(struct irix_dirent64) + 255)) + return -EINVAL; + + buf.curr = (struct irix_dirent64 *) dirent; + buf.previous = NULL; + buf.count = cnt; + buf.error = 0; + error = file->f_op->readdir(file->f_inode, file, &buf, irix_filldir64); + if (error < 0) + return error; + lastdirent = buf.previous; + if (!lastdirent) + return buf.error; + lastdirent->d_off = (u64) file->f_pos; +#ifdef DEBUG_GETDENTS + printk("returning %d\n", cnt - buf.count); +#endif + return cnt - buf.count; +} + +asmlinkage int irix_ngetdents64(int fd, void *dirent, int cnt, int *eob) +{ + struct file *file; + struct irix_dirent64 *lastdirent; + struct irix_dirent64_callback buf; + int error; + +#ifdef DEBUG_GETDENTS + printk("[%s:%d] ngetdents64(%d, %p, %d) ", current->comm, + current->pid, fd, dirent, cnt); +#endif + if (fd >= NR_OPEN || !(file = current->files->fd[fd])) + return -EBADF; + if (!file->f_op || !file->f_op->readdir) + return -ENOTDIR; + if(verify_area(VERIFY_WRITE, dirent, cnt) || + verify_area(VERIFY_WRITE, eob, sizeof(*eob))) + return -EFAULT; + if(cnt < (sizeof(struct irix_dirent64) + 255)) + return -EINVAL; + + *eob = 0; + buf.curr = (struct irix_dirent64 *) dirent; + buf.previous = NULL; + buf.count = cnt; + buf.error = 0; + error = file->f_op->readdir(file->f_inode, file, &buf, irix_filldir64); + if (error < 0) + return error; + lastdirent = buf.previous; + if (!lastdirent) + return buf.error; + lastdirent->d_off = (u64) file->f_pos; +#ifdef DEBUG_GETDENTS + printk("eob=%d returning %d\n", *eob, cnt - buf.count); +#endif + return cnt - buf.count; +} + +asmlinkage int irix_uadmin(unsigned long op, unsigned long func, unsigned long arg) +{ + switch(op) { + case 1: + /* Reboot */ + printk("[%s:%d] irix_uadmin: Wants to reboot...\n", + current->comm, current->pid); + return -EINVAL; + + case 2: + /* Shutdown */ + printk("[%s:%d] irix_uadmin: Wants to shutdown...\n", + current->comm, current->pid); + return -EINVAL; + + case 4: + /* Remount-root */ + printk("[%s:%d] irix_uadmin: Wants to remount root...\n", + current->comm, current->pid); + return -EINVAL; + + case 8: + /* Kill all tasks. */ + printk("[%s:%d] irix_uadmin: Wants to kill all tasks...\n", + current->comm, current->pid); + return -EINVAL; + + case 256: + /* Set magic mushrooms... */ + printk("[%s:%d] irix_uadmin: Wants to set magic mushroom[%d]...\n", + current->comm, current->pid, (int) func); + return -EINVAL; + + default: + printk("[%s:%d] irix_uadmin: Unknown operation [%d]...\n", + current->comm, current->pid, (int) op); + return -EINVAL; + }; +} + +asmlinkage int irix_utssys(char *inbuf, int arg, int type, char *outbuf) +{ + switch(type) { + case 0: + /* uname() */ + return irix_uname((struct iuname *)inbuf); + + case 2: + /* ustat() */ + printk("[%s:%d] irix_utssys: Wants to do ustat()\n", + current->comm, current->pid); + return -EINVAL; + + case 3: + /* fusers() */ + printk("[%s:%d] irix_utssys: Wants to do fusers()\n", + current->comm, current->pid); + return -EINVAL; + + default: + printk("[%s:%d] irix_utssys: Wants to do unknown type[%d]\n", + current->comm, current->pid, (int) type); + return -EINVAL; + } +} + +#undef DEBUG_FCNTL + +extern asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, + unsigned long arg); + +asmlinkage int irix_fcntl(int fd, int cmd, int arg) +{ + int retval; + +#ifdef DEBUG_FCNTL + printk("[%s:%d] irix_fcntl(%d, %d, %d) ", current->comm, + current->pid, fd, cmd, arg); +#endif + + retval = sys_fcntl(fd, cmd, arg); +#ifdef DEBUG_FCNTL + printk("%d\n", retval); +#endif + return retval; +} + +asmlinkage int irix_ulimit(int cmd, int arg) +{ + switch(cmd) { + case 1: + printk("[%s:%d] irix_ulimit: Wants to get file size limit.\n", + current->comm, current->pid); + return -EINVAL; + + case 2: + printk("[%s:%d] irix_ulimit: Wants to set file size limit.\n", + current->comm, current->pid); + return -EINVAL; + + case 3: + printk("[%s:%d] irix_ulimit: Wants to get brk limit.\n", + current->comm, current->pid); + return -EINVAL; + + case 4: +#if 0 + printk("[%s:%d] irix_ulimit: Wants to get fd limit.\n", + current->comm, current->pid); + return -EINVAL; +#endif + return current->rlim[RLIMIT_NOFILE].rlim_cur; + + case 5: + printk("[%s:%d] irix_ulimit: Wants to get txt offset.\n", + current->comm, current->pid); + return -EINVAL; + + default: + printk("[%s:%d] irix_ulimit: Unknown command [%d].\n", + current->comm, current->pid, cmd); + return -EINVAL; + } +} + +asmlinkage int irix_unimp(struct pt_regs *regs) +{ + printk("irix_unimp [%s:%d] v0=%d v1=%d a0=%08lx a1=%08lx a2=%08lx " + "a3=%08lx\n", current->comm, current->pid, + (int) regs->regs[2], (int) regs->regs[3], + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); + + return -ENOSYS; +} diff --git a/arch/mips/kernel/sysmips.c b/arch/mips/kernel/sysmips.c index d15aaa4dd..4aff0bc61 100644 --- a/arch/mips/kernel/sysmips.c +++ b/arch/mips/kernel/sysmips.c @@ -15,10 +15,9 @@ #include <linux/utsname.h> #include <asm/cachectl.h> -#include <asm/cache.h> -#include <asm/ipc.h> -#include <asm/uaccess.h> +#include <asm/pgtable.h> #include <asm/sysmips.h> +#include <asm/uaccess.h> static inline size_t strnlen_user(const char *s, size_t count) @@ -26,12 +25,35 @@ strnlen_user(const char *s, size_t count) return strnlen(s, count); } +/* + * How long a hostname can we get from user space? + * -EFAULT if invalid area or too long + * 0 if ok + * >0 EFAULT after xx bytes + */ +static inline int +get_max_hostname(unsigned long address) +{ + struct vm_area_struct * vma; + + vma = find_vma(current->mm, address); + if (!vma || vma->vm_start > address || !(vma->vm_flags & VM_READ)) + return -EFAULT; + address = vma->vm_end - address; + if (address > PAGE_SIZE) + return 0; + if (vma->vm_next && vma->vm_next->vm_start == vma->vm_end && + (vma->vm_next->vm_flags & VM_READ)) + return 0; + return address; +} + asmlinkage int sys_sysmips(int cmd, int arg1, int arg2, int arg3) { int *p; char *name; - int flags, tmp, len, retval; + int flags, tmp, len, retval = -EINVAL; switch(cmd) { @@ -39,70 +61,39 @@ sys_sysmips(int cmd, int arg1, int arg2, int arg3) if (!suser()) return -EPERM; name = (char *) arg1; + len = get_max_hostname((unsigned long)name); + if (retval < 0) + return len; len = strnlen_user(name, retval); - if (len < 0) - retval = len; - break; if (len == 0 || len > __NEW_UTS_LEN) - retval = -EINVAL; - break; + return -EINVAL; copy_from_user(system_utsname.nodename, name, len); system_utsname.nodename[len] = '\0'; return 0; - case MIPS_ATOMIC_SET: - /* This is broken in case of page faults and SMP ... - Risc/OS fauls after maximum 20 tries with EAGAIN. */ p = (int *) arg1; retval = verify_area(VERIFY_WRITE, p, sizeof(*p)); - if (retval) - return retval; + if(retval) + return -EINVAL; save_flags(flags); cli(); retval = *p; *p = arg2; restore_flags(flags); - break; - + return retval; case MIPS_FIXADE: tmp = current->tss.mflags & ~3; current->tss.mflags = tmp | (arg1 & 3); retval = 0; break; - case FLUSH_CACHE: - cacheflush(0, ~0, CF_BCACHE|CF_ALL); - break; - - case MIPS_RDNVRAM: - retval = -EIO; - break; - - default: - retval = -EINVAL; + flush_cache_all(); break; } return retval; } -asmlinkage int -sys_cacheflush(void *addr, int nbytes, int cache) -{ - unsigned int rw; - int ok; - - if ((cache & ~(DCACHE | ICACHE)) != 0) - return -EINVAL; - rw = (cache & DCACHE) ? VERIFY_WRITE : VERIFY_READ; - if (!access_ok(rw, addr, nbytes)) - return -EFAULT; - - cacheflush((unsigned long)addr, (unsigned long)nbytes, cache|CF_ALL); - - return 0; -} - /* * No implemented yet ... */ @@ -111,3 +102,36 @@ sys_cachectl(char *addr, int nbytes, int op) { return -ENOSYS; } + +/* For emulation of various binary types, and their shared libs, + * we need this. + */ + +extern int do_open_namei(const char *pathname, int flag, int mode, + struct inode **res_inode, struct inode *base); + +/* Only one at this time. */ +#define IRIX32_EMUL "/usr/gnemul/irix" + +int open_namei(const char *pathname, int flag, int mode, + struct inode **res_inode, struct inode *base) +{ + if(!base && (current->personality == PER_IRIX32) && + *pathname == '/') { + struct inode *emul_ino; + const char *p = pathname; + char *emul_path = IRIX32_EMUL; + int v; + + while (*p == '/') + p++; + + if(do_open_namei (emul_path, flag, mode, &emul_ino, NULL) >= 0 && + emul_ino) { + v = do_open_namei (p, flag, mode, res_inode, emul_ino); + if(v >= 0) + return v; + } + } + return do_open_namei (pathname, flag, mode, res_inode, base); +} diff --git a/arch/mips/kernel/tags.c b/arch/mips/kernel/tags.c deleted file mode 100644 index 4bf480c54..000000000 --- a/arch/mips/kernel/tags.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * linux/arch/mips/kernel/tags.c - * - * Copyright (C) 1996 Stoned Elipot - */ -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <asm/bootinfo.h> - -/* - * Parse the tags present in upper memory to find out - * a pecular one. - * - * Parameter: type - tag type to find - * - * returns : NULL - failure - * !NULL - pointer on the tag structure found - */ -tag * -bi_TagFind(enum bi_tag type) -{ - tag* t = (tag*)(mips_memory_upper - sizeof(tag)); - - while((t->tag != tag_dummy) && (t->tag != type)) - t = (tag*)(NEXTTAGPTR(t)); - - if (t->tag == tag_dummy) /* tag not found */ - return (tag*)NULL; - - return t; -} - -/* - * Snarf from the tag list in memory end some tags needed - * before the kernel reachs setup_arch() - * - * add yours here if you want to, but *beware*: the kernel var - * that will hold the values you want to snarf have to be - * in .data section of the kernel, so initialized in to whatever - * value in the kernel's sources. - */ -void bi_EarlySnarf(void) -{ - tag* atag; - - /* for wire_mappings() */ - atag = bi_TagFind(tag_machgroup); - if (atag) - memcpy(&mips_machgroup, TAGVALPTR(atag), atag->size); - else - /* useless for boxes without text video mode but....*/ - panic("machine group not specified by bootloader"); - - atag = bi_TagFind(tag_machtype); - if (atag) - memcpy(&mips_machtype, TAGVALPTR(atag), atag->size); - else - /* useless for boxes without text video mode but....*/ - panic("machine type not specified by bootloader"); - - /* for tlbflush() */ - atag = bi_TagFind(tag_tlb_entries); - if (atag) - memcpy(&mips_tlb_entries, TAGVALPTR(atag), atag->size); - else - /* useless for boxes without text video mode but....*/ - panic("number of TLB entries not specified by bootloader"); - return; -} diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 0fb16f1a5..20cd6fe06 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -4,22 +4,35 @@ * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. + */ + +/* + * 'traps.c' handles hardware traps and faults after we have saved some + * state in 'asm.s'. Currently mostly a debugging-aid, will be extended + * to mainly kill the offending process (probably by giving it a signal, + * but possibly by killing it outright if necessary). + * + * FIXME: This is the place for a fpu emulator. * - * Copyright (C) 1994, 1995, 1996 by Ralf Baechle - * Copyright (C) 1994, 1995, 1996 by Paul M. Antoine + * Modified for R3000 by Paul M. Antoine, 1995, 1996 */ +#include <linux/config.h> #include <linux/mm.h> #include <asm/branch.h> -#include <asm/cache.h> +#include <asm/cachectl.h> #include <asm/jazz.h> #include <asm/vector.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/bootinfo.h> -#include <asm/sgidefs.h> -#include <asm/uaccess.h> #include <asm/watch.h> +#include <asm/system.h> +#include <asm/uaccess.h> + +#ifdef CONFIG_SGI +#include <asm/sgialib.h> +#endif #undef CONF_DEBUG_EXCEPTIONS @@ -38,9 +51,12 @@ extern asmlinkage void deskstation_rpc44_handle_int(void); extern asmlinkage void deskstation_tyne_handle_int(void); extern asmlinkage void mips_magnum_4000_handle_int(void); -extern asmlinkage void handle_mod(void); -extern asmlinkage void handle_tlbl(void); -extern asmlinkage void handle_tlbs(void); +extern asmlinkage void r4k_handle_mod(void); +extern asmlinkage void r2300_handle_mod(void); +extern asmlinkage void r4k_handle_tlbl(void); +extern asmlinkage void r2300_handle_tlbl(void); +extern asmlinkage void r4k_handle_tlbs(void); +extern asmlinkage void r2300_handle_tlbs(void); extern asmlinkage void handle_adel(void); extern asmlinkage void handle_ades(void); extern asmlinkage void handle_ibe(void); @@ -51,11 +67,15 @@ extern asmlinkage void handle_ri(void); extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); extern asmlinkage void handle_tr(void); +extern asmlinkage void handle_vcei(void); extern asmlinkage void handle_fpe(void); +extern asmlinkage void handle_vced(void); extern asmlinkage void handle_watch(void); +extern asmlinkage void handle_reserved(void); -char *cpu_names[] = CPU_NAMES; +static char *cpu_names[] = CPU_NAMES; +unsigned long page_colour_mask; unsigned int watch_available = 0; void (*ibe_board_handler)(struct pt_regs *regs); @@ -82,6 +102,7 @@ void die_if_kernel(char * str, struct pt_regs * regs, long err) /* * Just return if in user mode. + * XXX */ #if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) if (!((regs)->cp0_status & 0x4)) @@ -208,25 +229,26 @@ void do_fpe(struct pt_regs *regs, unsigned int fcr31) static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) { - unsigned int *addr; + unsigned int *epc; - addr = (unsigned int *) (unsigned long) regs->cp0_epc; + epc = (unsigned int *) (unsigned long) regs->cp0_epc; if (regs->cp0_cause & CAUSEF_BD) - addr += 4; + epc += 4; - if (get_user(*opcode, addr)) { + if (verify_area(VERIFY_READ, epc, 4)) { force_sig(SIGSEGV, current); - return -EFAULT; + return 1; } + *opcode = *epc; return 0; } -static __inline__ void +static inline void do_bp_and_tr(struct pt_regs *regs, char *exc, unsigned int trapcode) { /* - * (A quick test says that IRIX 5.3 sends SIGTRAP for all break + * (A short test says that IRIX 5.3 sends SIGTRAP for all break * insns, even for break codes that indicate arithmetic failures. * Wiered ...) */ @@ -234,8 +256,6 @@ do_bp_and_tr(struct pt_regs *regs, char *exc, unsigned int trapcode) #ifdef CONF_DEBUG_EXCEPTIONS show_regs(regs); #endif - if (compute_return_epc(regs)) - return; } void do_bp(struct pt_regs *regs) @@ -247,11 +267,17 @@ void do_bp(struct pt_regs *regs) * code starts left to bit 16 instead to bit 6 in the opcode. * Gas is bug-compatible ... */ +#ifdef CONF_DEBUG_EXCEPTIONS + printk("BREAKPOINT at %08lx\n", regs->cp0_epc); +#endif if (get_insn_opcode(regs, &opcode)) return; bcode = ((opcode >> 16) & ((1 << 20) - 1)); do_bp_and_tr(regs, "bp", bcode); + + if (compute_return_epc(regs)) + return; } void do_tr(struct pt_regs *regs) @@ -265,16 +291,13 @@ void do_tr(struct pt_regs *regs) do_bp_and_tr(regs, "tr", bcode); } -/* - * TODO: add emulation of higher ISAs' instruction. In particular - * interest in MUL, MAD MADU has been expressed such that R4640/R4650 - * code can be run on other MIPS CPUs. - */ void do_ri(struct pt_regs *regs) { #ifdef CONF_DEBUG_EXCEPTIONS show_regs(regs); #endif + printk("[%s:%d] Illegal instruction at %08lx ra=%08lx\n", + current->comm, current->pid, regs->cp0_epc, regs->regs[31]); if (compute_return_epc(regs)) return; force_sig(SIGILL, current); @@ -285,11 +308,11 @@ void do_cpu(struct pt_regs *regs) unsigned int cpid; cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; - if (cpid == 1) { + if (cpid == 1) + { regs->cp0_status |= ST0_CU1; return; } - force_sig(SIGILL, current); } @@ -349,8 +372,49 @@ static void watch_init(unsigned long cputype) } } +typedef asmlinkage int (*syscall_t)(void *a0,...); +asmlinkage int (*do_syscalls)(struct pt_regs *regs, syscall_t fun, int narg); +extern asmlinkage int r4k_do_syscalls(struct pt_regs *regs, + syscall_t fun, int narg); +extern asmlinkage int r2300_do_syscalls(struct pt_regs *regs, + syscall_t fun, int narg); + +asmlinkage void (*save_fp_context)(struct sigcontext *sc); +extern asmlinkage void r4k_save_fp_context(struct sigcontext *sc); +extern asmlinkage void r2300_save_fp_context(struct sigcontext *sc); +extern asmlinkage void r6000_save_fp_context(struct sigcontext *sc); + +asmlinkage void (*restore_fp_context)(struct sigcontext *sc); +extern asmlinkage void r4k_restore_fp_context(struct sigcontext *sc); +extern asmlinkage void r2300_restore_fp_context(struct sigcontext *sc); +extern asmlinkage void r6000_restore_fp_context(struct sigcontext *sc); + +extern asmlinkage void r4xx0_resume(void *tsk); +extern asmlinkage void r2300_resume(void *tsk); + void trap_init(void) { + extern char except_vec0_r4000, except_vec0_r4600, except_vec0_r2300; + extern char except_vec1_generic, except_vec2_generic; + extern char except_vec3_generic, except_vec3_r4000; + unsigned long i; + + if(mips_machtype == MACH_MIPS_MAGNUM_4000 || + mips_machtype == MACH_DESKSTATION_RPC44 || + mips_machtype == MACH_SNI_RM200_PCI) + EISA_bus = 1; + + /* Copy the generic exception handler code to it's final destination. */ + memcpy((void *)(KSEG0 + 0x80), &except_vec1_generic, 0x80); + memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80); + memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80); + + /* + * Setup default vectors + */ + for(i = 0; i <= 31; i++) + set_except_vector(i, handle_reserved); + /* * Only some CPUs have the watch exception. */ @@ -368,21 +432,55 @@ void trap_init(void) write_32bit_cp0_register(CP0_FRAMEMASK, 0); set_cp0_status(ST0_XX, ST0_XX); /* + * Actually this mask stands for only 16k cache. This is + * correct since the R10000 has multiple ways in it's cache. + */ + page_colour_mask = 0x3000; + /* * The R10k might even work for Linux/MIPS - but we're paranoid * and refuse to run until this is tested on real silicon */ panic("CPU too expensive - making holiday in the ANDES!"); break; + case CPU_R4000MC: + case CPU_R4400MC: + case CPU_R4000SC: + case CPU_R4400SC: + /* XXX The following won't work because we _cannot_ + * XXX perform any load/store before the VCE handler. + */ + set_except_vector(14, handle_vcei); + set_except_vector(31, handle_vced); + case CPU_R4000PC: + case CPU_R4400PC: + case CPU_R4200: + /* case CPU_R4300: */ + /* case CPU_R4640: */ + case CPU_R4600: + case CPU_R5000: + if(mips_cputype != CPU_R4600) + memcpy((void *)KSEG0, &except_vec0_r4000, 0x80); + else + memcpy((void *)KSEG0, &except_vec0_r4600, 0x80); - case CPU_R4000MC: case CPU_R4400MC: case CPU_R4000SC: - case CPU_R4400SC: case CPU_R4000PC: case CPU_R4400PC: - case CPU_R4200: /*case CPU_R4300: case CPU_R4640: */ - case CPU_R4600: case CPU_R4700: - set_except_vector(1, handle_mod); - set_except_vector(2, handle_tlbl); - set_except_vector(3, handle_tlbs); + /* + * The idea is that this special r4000 general exception + * vector will check for VCE exceptions before calling + * out of the exception array. XXX TODO + */ + memcpy((void *)(KSEG0 + 0x100), (void *) KSEG0, 0x80); + memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000, 0x80); + + do_syscalls = r4k_do_syscalls; + save_fp_context = r4k_save_fp_context; + restore_fp_context = r4k_restore_fp_context; + resume = r4xx0_resume; + set_except_vector(1, r4k_handle_mod); + set_except_vector(2, r4k_handle_tlbl); + set_except_vector(3, r4k_handle_tlbs); set_except_vector(4, handle_adel); set_except_vector(5, handle_ades); + /* * The following two are signaled by onboard hardware and * should get board specific handlers to get maximum @@ -398,11 +496,22 @@ void trap_init(void) set_except_vector(12, handle_ov); set_except_vector(13, handle_tr); set_except_vector(15, handle_fpe); - break; - case CPU_R6000: case CPU_R6000A: + /* + * Compute mask for page_colour(). This is based on the + * size of the data cache. + */ + i = read_32bit_cp0_register(CP0_CONFIG); + i = (i >> 26) & 7; + page_colour_mask = 1 << (12 + i); + break; + case CPU_R6000: + case CPU_R6000A: + save_fp_context = r6000_save_fp_context; + restore_fp_context = r6000_restore_fp_context; #if 0 - /* The R6000 is the only R-series CPU that features a machine + /* + * The R6000 is the only R-series CPU that features a machine * check exception (similar to the R4000 cache error) and * unaligned ldc1/sdc1 exception. The handlers have not been * written yet. Well, anyway there is no R6000 machine on the @@ -411,22 +520,20 @@ void trap_init(void) set_except_vector(14, handle_mc); set_except_vector(15, handle_ndc); #endif - case CPU_R2000: case CPU_R3000: case CPU_R3000A: case CPU_R3041: - case CPU_R3051: case CPU_R3052: case CPU_R3081: case CPU_R3081E: - /* - * Clear BEV, we are ready to handle exceptions using - * the in-RAM dispatchers. This will not be useful on all - * machines, but it can't hurt (the worst that can happen is - * that BEV is already 0). - */ - set_cp0_status(ST0_BEV,0); - + case CPU_R2000: + case CPU_R3000: + case CPU_R3000A: /* * Actually don't know about these, but let's guess - PMA */ - set_except_vector(1, handle_mod); - set_except_vector(2, handle_tlbl); - set_except_vector(3, handle_tlbs); + memcpy((void *)KSEG0, &except_vec0_r2300, 0x80); + do_syscalls = r2300_do_syscalls; + save_fp_context = r2300_save_fp_context; + restore_fp_context = r2300_restore_fp_context; + resume = r2300_resume; + set_except_vector(1, r2300_handle_mod); + set_except_vector(2, r2300_handle_tlbl); + set_except_vector(3, r2300_handle_tlbs); set_except_vector(4, handle_adel); set_except_vector(5, handle_ades); /* @@ -446,9 +553,27 @@ void trap_init(void) set_except_vector(12, handle_ov); set_except_vector(13, handle_tr); set_except_vector(15, handle_fpe); - break; - case CPU_R8000: case CPU_R5000: + /* + * Compute mask for page_colour(). This is based on the + * size of the data cache. Does the size of the icache + * need to be accounted for? + * + * FIXME: is any of this necessary for the R3000, which + * doesn't have a config register? + * (No, the R2000, R3000 family has a physical indexed + * cache and doesn't need this braindamage.) + i = read_32bit_cp0_register(CP0_CONFIG); + i = (i >> 26) & 7; + page_colour_mask = 1 << (12 + i); + */ + break; + case CPU_R3041: + case CPU_R3051: + case CPU_R3052: + case CPU_R3081: + case CPU_R3081E: + case CPU_R8000: printk("Detected unsupported CPU type %s.\n", cpu_names[mips_cputype]); panic("Can't handle CPU"); @@ -456,6 +581,7 @@ void trap_init(void) case CPU_UNKNOWN: default: - panic("Unsupported CPU type"); + panic("Unknown CPU type"); } + flush_cache_all(); } diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 8bf2ad9b7..ba3a612fa 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -96,6 +96,8 @@ #define STR(x) __STR(x) #define __STR(x) #x +typedef unsigned long register_t; + /* * User code may only access USEG; kernel code may access the * entire address space. @@ -104,11 +106,11 @@ if ((long)(~(pc) & ((a) | ((a)+(s)))) < 0) \ goto sigbus; -static __inline__ void +static inline void emulate_load_store_insn(struct pt_regs *regs, unsigned long addr, unsigned long pc) { union mips_instruction insn; - __register_t value; + register_t value; regs->regs[0] = 0; /* @@ -369,7 +371,7 @@ sigbus: unsigned long unaligned_instructions; -static __inline__ void +static inline void fix_ade(struct pt_regs *regs, unsigned long pc) { /* @@ -400,8 +402,8 @@ fix_ade(struct pt_regs *regs, unsigned long pc) asmlinkage void do_ade(struct pt_regs *regs) { - __register_t pc = regs->cp0_epc; - __register_t badvaddr __attribute__ ((unused)) = regs->cp0_badvaddr; + register_t pc = regs->cp0_epc; + register_t badvaddr __attribute__ ((unused)) = regs->cp0_badvaddr; char *adels; adels = (((regs->cp0_cause & CAUSEF_EXCCODE) >> @@ -426,7 +428,7 @@ do_ade(struct pt_regs *regs) #ifdef CONF_LOG_UNALIGNED_ACCESSES if (current->tss.mflags & MF_LOGADE) { - __register_t logpc = pc; + register_t logpc = pc; if (regs->cp0_cause & CAUSEF_BD) logpc += 4; #ifdef __mips64 diff --git a/arch/mips/kernel/vm86.c b/arch/mips/kernel/vm86.c index 53627201a..c0c775fba 100644 --- a/arch/mips/kernel/vm86.c +++ b/arch/mips/kernel/vm86.c @@ -1,15 +1,13 @@ /* - * arch/mips/kernel/vm86.c + * arch/mips/vm86.c * - * Copyright (C) 1994, 1996 Waldorf GMBH, + * Copyright (C) 1994 Waldorf GMBH, * written by Ralf Baechle */ #include <linux/linkage.h> #include <linux/errno.h> -struct vm86_struct; - -asmlinkage int sys_vm86(struct vm86_struct * v86) +asmlinkage int sys_vm86(void *v86) { return -ENOSYS; } diff --git a/arch/mips/ld.script.big b/arch/mips/ld.script.big new file mode 100644 index 000000000..88da74972 --- /dev/null +++ b/arch/mips/ld.script.big @@ -0,0 +1,106 @@ +OUTPUT_FORMAT("elf32-bigmips") +OUTPUT_ARCH(mips) +ENTRY(kernel_entry) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = 0x80000000; + .rel.text : { *(.rel.text) } + .rela.text : { *(.rela.text) } + .rel.data : { *(.rel.data) } + .rela.data : { *(.rela.data) } + .rel.rodata : { *(.rel.rodata) } + .rela.rodata : { *(.rela.rodata) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.bss : { *(.rel.bss) } + .rela.bss : { *(.rela.bss) } + .rel.plt : { *(.rel.plt) } + .rela.plt : { *(.rela.plt) } + .init : { *(.init) } =0 + .text : + { + _ftext = . ; + *(.text) + *(.rodata) + *(.rodata1) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } =0 + _etext = .; + PROVIDE (etext = .); + .fini : { *(.fini) } =0 + .reginfo : { *(.reginfo) } + /* Adjust the address for the data segment. We want to adjust up to + the same address within the page on the next page up. It would + be more correct to do this: + . = .; + The current expression does not correctly handle the case of a + text segment ending precisely at the end of a page; it causes the + data segment to skip a page. The above expression does not have + this problem, but it will currently (2/95) cause BFD to allocate + a single segment, combining both text and data, for this case. + This will prevent the text segment from being shared among + multiple executions of the program; I think that is more + important than losing a page of the virtual address space (note + that no actual memory is lost; the page which is skipped can not + be referenced). */ + . = .; + .data : + { + _fdata = . ; + *(.data) + CONSTRUCTORS + } + .data1 : { *(.data1) } + _gp = . + 0x8000; + .lit8 : { *(.lit8) } + .lit4 : { *(.lit4) } + .ctors : { *(.ctors) } + .dtors : { *(.dtors) } + .got : { *(.got.plt) *(.got) } + .dynamic : { *(.dynamic) } + /* We want the small data sections together, so single-instruction offsets + can access them all, and initialized data all before uninitialized, so + we can shorten the on-disk segment size. */ + .sdata : { *(.sdata) } + _edata = .; + PROVIDE (edata = .); + __bss_start = .; + _fbss = .; + .bss : + { + *(.dynbss) + *(.bss) + *(COMMON) + _end = . ; + PROVIDE (end = .); + *(.sbss) + *(.scommon) + } + /* These are needed for ELF backends which have not yet been + converted to the new style linker. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + /* DWARF debug sections. + Symbols in the .debug DWARF section are relative to the beginning of the + section so we begin .debug at 0. It's not clear yet what needs to happen + for the others. */ + .debug 0 : { *(.debug) } + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + .debug_sfnames 0 : { *(.debug_sfnames) } + .line 0 : { *(.line) } + /* These must appear regardless of . */ + .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) } + .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) } +} diff --git a/arch/mips/ld.script b/arch/mips/ld.script.little index debe96282..26464d9f7 100644 --- a/arch/mips/ld.script +++ b/arch/mips/ld.script.little @@ -1,5 +1,4 @@ OUTPUT_FORMAT("elf32-littlemips") -/* OUTPUT_FORMAT("a.out-mips-little-linux") */ OUTPUT_ARCH(mips) ENTRY(kernel_entry) SECTIONS @@ -82,11 +81,11 @@ SECTIONS *(.dynbss) *(.bss) *(COMMON) + _end = . ; + PROVIDE (end = .); *(.sbss) *(.scommon) } - _end = . ; - PROVIDE (end = .); /* These are needed for ELF backends which have not yet been converted to the new style linker. */ .stab 0 : { *(.stab) } diff --git a/arch/mips/lib/.cvsignore b/arch/mips/lib/.cvsignore new file mode 100644 index 000000000..4671378ae --- /dev/null +++ b/arch/mips/lib/.cvsignore @@ -0,0 +1 @@ +.depend diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index ac3cf45ad..0b2eec6d9 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -1,8 +1,8 @@ # # Makefile for MIPS-specific library files.. # -# Some of these routines are just left over debugging trash of ancient -# times when I just could make my Tyne beep and so ... +# Many of these routines are just left over debugging trash of ancient +# times when I just make my Tyne beep and so ... # # ...and for when I need to get the DECStation to use the boot prom to # do things... Paul M. Antoine. @@ -14,9 +14,10 @@ $(CC) $(CFLAGS) -c $< -o $*.o L_TARGET = lib.a -L_OBJS = beep.o bitags.o checksum.o csum.o dump_tlb.o io.o memmove.o \ - strncpy_user.o strlen_user.o watch.o -ifdef CONFIG_MIPS_DECSTATION +L_OBJS = beep.o byteorder.o checksum.o copy_user.o csum.o dump_tlb.o io.o \ + memset.o memcpy.o strlen_user.o strncpy_user.o tags.o watch.o + +ifdef CONFIG_DECSTATION L_OBJS += pmaxcon.o pmaxio.o else L_OBJS += tinycon.o diff --git a/arch/mips/lib/bcopy.c b/arch/mips/lib/bcopy.c deleted file mode 100644 index 4afd557bf..000000000 --- a/arch/mips/lib/bcopy.c +++ /dev/null @@ -1,20 +0,0 @@ -/* - * arch/mips/lib/bcopy.c - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 1994, 1995, 1996 by Ralf Baechle - * - * bcopy() only exists here such that it doesn't get compiled into - * lib/strings.o. Though it's more efficient ... - */ -#include <linux/string.h> - -char * bcopy(const char *src, char *dest, size_t count) -{ - __memcpy(dest, src, count); - - return dest; -} diff --git a/arch/mips/lib/bitags.c b/arch/mips/lib/bitags.c deleted file mode 100644 index 4427c4195..000000000 --- a/arch/mips/lib/bitags.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * milo/bitags.c -- handles the tags passed to the kernel - * - * Copyright (C) 1995 by Stoned Elipot <Stoned.Elipot@fnet.fr> - * written by Stoned Elipot from an original idea of - * Ralf Baechle <ralf@waldorf-gmbh.de> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive for more - */ -#include <stdio.h> -#include <asm/bootinfo.h> - -static unsigned int debuglevel = 0; - -extern unsigned long mach_mem_upper; /* from milo.c */ -extern unsigned long mach_mem_lower; /* from milo.c */ - -static unsigned long next_tag = (unsigned long)NULL; - -/* - * Create a tag - * - * Parameters: tag - tag type to create - * size - tag's data size - * tagdata - pointer on tag's data - * - * returns : 0 - success - * 1 - failure - */ -int -bi_TagAdd(enum bi_tag type, unsigned long size, void *data) -{ - tag t; - unsigned long addr; - - t.tag = type; - t.size = size; - if (next_tag == (unsigned long)NULL) /* HuHo... first tag to create */ - { - if (mach_mem_upper != (unsigned long)NULL) /* RAM detection code had run */ - { - next_tag = mach_mem_upper; - } - else - /* RAM dectection code had not run, let's hope the - * tag we are creating is a memupper one, else fail - * ...miserably, hopelessly, lonely - */ - { - if (type != tag_memupper) - { - return 1; - } - else - { - /* - * saved, it's a memupper tag: put it's value in - * mach_mem_upper so launch() can pass it to the kernel - * in a0 and well we're going to create a tag anyway... - */ - next_tag = *(unsigned long*)data; - memcpy((void*)&mach_mem_upper, data, size); - } - } - } - addr = next_tag - (sizeof(tag)); - if (debuglevel >=2) - { - printk("bi_TagAdd: adding tag struct at %08x, tag: %d, size: %08x\r\n", addr, t.tag, t.size); - } - memcpy((void*)addr, (void*)&t, (size_t)(sizeof(tag))); - if (size != 0) - { - addr = addr - size; - if (debuglevel >=2) - { - printk("bi_TagAdd: adding tag value at %08x\r\n", addr); - } - memcpy((void*)addr, data, (size_t)(t.size)); - } - next_tag = addr; - return 0; -} - -/* - * Create tags from a "null-terminated" array of tag - * (tag type of the tag_def struct in array must be 'dummy') - * - * Parameter: taglist - tag array pointer - * - * returns : 0 - success - * 1 - failure - */ -int -bi_TagAddList(tag_def* taglist) -{ - int ret_val = 0; - for(; (taglist->t.tag != tag_dummy) && (!ret_val); taglist++) - { - /* - * we assume this tag is present in the default taglist - * for the machine we're running on - */ - if (taglist->t.tag == tag_memlower) - { - mach_mem_lower = (unsigned long)(*((unsigned long*)taglist->d)); -/* ajouter detection de memupper pour simplifier le code de bi_TagAdd: soit mach_mem_upper - a ete initialise par <machine_ident>() soit est initialise par le pre;ier tag de la list - par default pour la machine */ - } - ret_val = bi_TagAdd(taglist->t.tag, taglist->t.size, taglist->d); - } - return ret_val; -} - -/* - * Parse the tags present in upper memory to find out - * a pecular one. - * - * Parameter: type - tag type to find - * - * returns : NULL - failure - * !NULL - pointer on the tag structure found - * - * Note: Just a 'prototype', the kernel's one is in - * arch/mips/kernel/setup.c - */ -/* tag* */ -/* bi_TagFind(enum bi_tag type) */ -/* { */ -/* tag* t; */ -/* t = (tag*)(mach_mem_upper - sizeof(tag)); */ -/* while((t->tag != tag_dummy) && (t->tag != type)) */ -/* t = (tag*)(NEXTTAGPTR(t)); */ -/* if (t->tag == tag_dummy) */ -/* { */ -/* return (tag*)NULL; */ -/* } */ -/* return t; */ -/* } */ - - -/* - * Make a listing of tags available in memory: debug helper. - */ -/* void */ -/* bi_TagWalk(void) */ -/* { */ -/* tag* t; */ -/* int i=0; */ -/* t = (tag*)(mach_mem_upper - sizeof(tag)); */ -/* while(t->tag != tag_dummy) */ -/* { */ -/* printk("tag #02%dm addr: %08x, type %d, size %u\n\r", i, (void*)t, t->tag, t->size); */ -/* t = (tag*)(NEXTTAGPTR(t)); */ -/* i++; */ -/* } */ -/* return; */ -/* } */ - diff --git a/arch/mips/lib/byteorder.c b/arch/mips/lib/byteorder.c new file mode 100644 index 000000000..188ed4b44 --- /dev/null +++ b/arch/mips/lib/byteorder.c @@ -0,0 +1,13 @@ + +/* + * Library versions of the ntoh and hton functions. These are needed + * so that the address of the functions can be used. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996 by Ralf Baechle + */ +#define __EXTERN_INLINE +#include <asm/byteorder.h> diff --git a/arch/mips/lib/checksum.c b/arch/mips/lib/checksum.c index 0e04ac5e8..dd1583892 100644 --- a/arch/mips/lib/checksum.c +++ b/arch/mips/lib/checksum.c @@ -17,124 +17,76 @@ #include <net/checksum.h> #include <asm/string.h> +static inline unsigned short from32to16(unsigned long x) +{ + /* 32 bits --> 16 bits + carry */ + x = (x & 0xffff) + (x >> 16); + /* 16 bits + carry --> 16 bits including carry */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +static inline unsigned long do_csum(const unsigned char * buff, int len) +{ + int odd, count; + unsigned long result = 0; + + if (len <= 0) + goto out; + odd = 1 & (unsigned long) buff; + if (odd) { + result = *buff; + len--; + buff++; + } + count = len >> 1; /* nr of 16-bit words.. */ + if (count) { + if (2 & (unsigned long) buff) { + result += *(unsigned short *) buff; + count--; + len -= 2; + buff += 2; + } + count >>= 1; /* nr of 32-bit words.. */ + if (count) { + unsigned long carry = 0; + do { + unsigned long w = *(unsigned long *) buff; + count--; + buff += 4; + result += carry; + result += w; + carry = (w > result); + } while (count); + result += carry; + result = (result & 0xffff) + (result >> 16); + } + if (len & 2) { + result += *(unsigned short *) buff; + buff += 2; + } + } + if (len & 1) + result += (*buff << 8); + result = from32to16(result); + if (odd) + result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); +out: + return result; +} + /* * computes a partial checksum, e.g. for TCP/UDP fragments */ unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum) { - unsigned long scratch1; - unsigned long scratch2; - - /* - * This is for 32-bit MIPS processors. - */ - __asm__(" - .set noreorder - .set noat - andi $1,%4,2 # Check alignment - beqz $1,2f # Branch if ok - nop # delay slot - subu $1,%3,2 # Alignment uses up two bytes - bgez $1,1f # Jump if we had at least two bytes - move %3,$1 # delay slot - j 4f - addiu %3,2 # delay slot; len was < 2. Deal with it - -1: lhu %2,(%4) - addiu %4,2 - addu %0,%2 - sltu $1,%0,%2 - addu %0,$1 - -2: srl %1,%3,5 - beqz %1,2f - sll %1,%1,5 # delay slot - - addu %1,%4 -1: lw %2,0(%4) - addu %4,32 - addu %0,%2 - sltu $1,%0,%2 + unsigned long result = do_csum(buff, len); - lw %2,-28(%4) - addu %0,$1 - addu %0,%2 - sltu $1,%0,%2 - - lw %2,-24(%4) - addu %0,$1 - addu %0,%2 - sltu $1,%0,%2 - - lw %2,-20(%4) - addu %0,$1 - addu %0,%2 - sltu $1,%0,%2 - - lw %2,-16(%4) - addu %0,$1 - addu %0,%2 - sltu $1,%0,%2 - - lw %2,-12(%4) - addu %0,$1 - addu %0,%2 - sltu $1,%0,%2 - - lw %2,-8(%4) - addu %0,$1 - addu %0,%2 - sltu $1,%0,%2 - - lw %2,-4(%4) - addu %0,$1 - addu %0,%2 - sltu $1,%0,%2 - - bne %4,%1,1b - addu %0,$1 # delay slot - -2: andi %1,%3,0x1c - beqz %1,4f - addu %1,%4 # delay slot -3: lw %2,0(%4) - addu %4,4 - addu %0,%2 - sltu $1,%0,%2 - bne %4,%1,3b - addu %0,$1 # delay slot - -4: andi $1,%3,3 - beqz $1,7f - andi $1,%3,2 # delay slot - beqz $1,5f - move %2,$0 # delay slot - lhu %2,(%4) - addiu %4,2 # delay slot - -5: andi $1,%3,1 - beqz $1,6f - nop # delay slot - lbu %1,(%4) - sll %2,16\n\t" -#ifdef __MIPSEB__ - "sll %1,8\n\t" -#endif - "or %2,%1 -6: addu %0,%2 - sltu $1,%0,%2 - addu %0,$1 -7: .set at - .set reorder" - : "=r"(sum), - "=&r" (scratch1), - "=&r" (scratch2), - "=r" (len), - "=r" (buff) - : "0"(sum), "3"(len), "4"(buff) - : "$1"); - - return sum; + /* add in old sum, and carry.. */ + result += sum; + if(sum > result) + result += 1; + return result; } /* @@ -144,12 +96,10 @@ unsigned int csum_partial_copy(const char *src, char *dst, int len, unsigned int sum) { /* - * It's 2:30 am and I don't feel like doing it right ... + * It's 2:30 am and I don't feel like doing it real ... * This is lots slower than the real thing (tm) * * XXX This may nuke the kernel for unaligned src addresses!!! - * (Due to software address error fixing no longer true, but - * seems to happen very rarely only anyway.) */ sum = csum_partial(src, len, sum); memcpy(dst, src, len); diff --git a/arch/mips/mips1/memcpy.S b/arch/mips/lib/copy_user.S index 9685fa8df..ea691f4fa 100644 --- a/arch/mips/mips1/memcpy.S +++ b/arch/mips/lib/copy_user.S @@ -18,7 +18,7 @@ #define EX(addr,handler) \ .section __ex_table,"a"; \ PTR addr, handler; \ - .text + .previous #define UEX(addr,handler) \ EX(addr,handler); \ EX(addr+4,handler) @@ -107,7 +107,7 @@ not_w_aligned: andi v1,3 sltu t0,v0,v1 MOVN(v1,v0,t0) - beqz v1,align4 # -> finished + beqz v1,3f # -> finished LONG_ADDU v1,a0 # delay slot 1: lb $1,(a1) EX(1b, fault) @@ -119,6 +119,7 @@ not_w_aligned: LONG_SUBU v0,1 # delay slot b align4 nop # delay slot +3: /* ---------------------------------------------------------------------- */ diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index e7082e1ac..1ee4c79f0 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -10,12 +10,12 @@ #include <linux/string.h> #include <asm/bootinfo.h> -#include <asm/cache.h> +#include <asm/cachectl.h> #include <asm/mipsconfig.h> #include <asm/mipsregs.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/uaccess.h> +#include <asm/segment.h> void dump_tlb(int first, int last) diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S new file mode 100644 index 000000000..8039d21ae --- /dev/null +++ b/arch/mips/lib/memcpy.S @@ -0,0 +1,222 @@ +/* memcpy.S: Mips optimized memcpy based upon SparcLinux code. + * + * Copyright(C) 1995 Linus Torvalds + * Copyright(C) 1996 David S. Miller + * Copyright(C) 1996 Eddie C. Dost + * + * derived from: + * e-mail between David and Eddie. + */ + +#include <asm/asm.h> +#include <asm/regdef.h> +#include <asm/segment.h> + +#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5) \ + lw t0, (offset + 0x18)(src); \ + lw t1, (offset + 0x1c)(src); \ + sw t0, (offset + 0x18)(dst); \ + lw t2, (offset + 0x10)(src); \ + sw t1, (offset + 0x1c)(dst); \ + lw t3, (offset + 0x14)(src); \ + sw t2, (offset + 0x10)(dst); \ + lw t4, (offset + 0x08)(src); \ + sw t3, (offset + 0x14)(dst); \ + lw t5, (offset + 0x0c)(src); \ + sw t4, (offset + 0x08)(dst); \ + lw t0, (offset + 0x00)(src); \ + sw t5, (offset + 0x0c)(dst); \ + lw t1, (offset + 0x04)(src); \ + sw t0, (offset + 0x00)(dst); \ + sw t1, (offset + 0x04)(dst); \ + + /* Alignment cases are: + * 1) (src&0x3)=0x0 (dst&0x3)=0x0 can optimize + * 2) (src&0x3)=0x1 (dst&0x3)=0x1 can optimize + * 3) (src&0x3)=0x2 (dst&0x3)=0x2 can optimize + * 4) (src&0x3)=0x3 (dst&0x3)=0x3 can optimize + * 5) anything else cannot optimize + */ + + /* I hate MIPS register names... AIEEE, it's a SPARC! */ +#define o0 a0 +#define o1 a1 +#define o2 a2 +#define o3 a3 +#define o4 t0 +#define o5 t1 +#define o6 sp +#define o7 ra +#define g0 zero +#define g1 t2 +#define g2 t3 +#define g3 t4 +#define g4 t5 +#define g5 t6 +#define g6 t7 +#define g7 t8 + + .text + .set noreorder + .set noat + + .globl bcopy + .globl amemmove + .globl memmove + .globl memcpy + .align 2 +bcopy: + move o3, o0 + move o0, o1 + move o1, o3 + +amemmove: +memmove: +memcpy: /* o0=dst o1=src o2=len */ + xor o4, o0, o1 + andi o4, o4, 0x3 + move g6, o0 + beq o4, g0, can_align + sltiu g7, o2, 0x8 + + b cannot_optimize + move g1, o2 + +can_align: + bne g7, g0, cannot_optimize + move g1, o2 + + beq o2, g0, out + andi g7, o1, 0x1 + +hword_align: + beq g7, g0, word_align + andi g7, o1, 0x2 + + lbu o4, 0x00(o1) + subu o2, o2, 0x1 + sb o4, 0x00(o0) + addu o1, o1, 0x1 + addu o0, o0, 0x1 + andi g7, o1, 0x2 + +word_align: + beq g7, g0, dword_align + sltiu g7, o2, 56 + + lhu o4, 0x00(o1) + subu o2, o2, 0x2 + sh o4, 0x00(o0) + sltiu g7, o2, 56 + addu o0, o0, 0x2 + addu o1, o1, 0x2 + +dword_align: + bne g7, g0, do_end_words + move g7, o2 + + andi g7, o1, 0x4 + beq g7, zero, qword_align + andi g7, o1, 0x8 + + lw o4, 0x00(o1) + subu o2, o2, 0x4 + sw o4, 0x00(o0) + addu o1, o1, 0x4 + addu o0, o0, 0x4 + andi g7, o1, 0x8 + +qword_align: + beq g7, g0, oword_align + andi g7, o1, 0x10 + + lw o4, 0x00(o1) + lw o5, 0x04(o1) + subu o2, o2, 0x8 + sw o4, 0x00(o0) + addu o1, o1, 0x8 + sw o5, 0x04(o0) + andi g7, o1, 0x10 + addu o0, o0, 0x8 + +oword_align: + beq g7, g0, begin_movement + srl g7, o2, 0x7 + + lw g2, 0x08(o1) + lw g3, 0x0c(o1) + lw o4, 0x00(o1) + lw o5, 0x04(o1) + sw g2, 0x08(o0) + subu o2, o2, 0x10 + sw g3, 0x0c(o0) + addu o1, o1, 0x10 + sw o4, 0x00(o0) + srl g7, o2, 0x7 + addu o0, o0, 0x10 + sw o5, -0x0c(o0) + +begin_movement: + beq g7, g0, 0f + andi g1, o2, 0x40 + +move_128bytes: + MOVE_BIGCHUNK(o1, o0, 0x00, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x20, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x40, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x60, o4, o5, g2, g3, g4, g5) + subu g7, g7, 0x01 + addu o1, o1, 0x80 + bne g7, g0, move_128bytes + addu o0, o0, 0x80 + +0: + beq g1, g0, 1f + andi g1, o2, 0x20 + +move_64bytes: + MOVE_BIGCHUNK(o1, o0, 0x00, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x20, o4, o5, g2, g3, g4, g5) + addu o1, o1, 0x40 + addu o0, o0, 0x40 + +1: + beq g1, g0, do_end_words + andi g7, o2, 0x1c + +move_32bytes: + MOVE_BIGCHUNK(o1, o0, 0x00, o4, o5, g2, g3, g4, g5) + andi g7, o2, 0x1c + addu o1, o1, 0x20 + addu o0, o0, 0x20 + +do_end_words: + beq g7, g0, maybe_end_cruft + srl g7, g7, 0x2 + +end_words: + lw o4, 0x00(o1) + subu g7, g7, 0x1 + sw o4, 0x00(o0) + addu o1, o1, 0x4 + bne g7, g0, end_words + addu o0, o0, 0x4 + +maybe_end_cruft: + andi g1, o2, 0x3 + +cannot_optimize: + beq g1, g0, out + move o2, g1 + +end_bytes: + lbu o4, 0x00(o1) + subu o2, o2, 0x1 + sb o4, 0x00(o0) + addu o1, o1, 0x1 + bne o2, g0, end_bytes + addu o0, o0, 0x1 + +out: + jr o7 + move v0, g6 diff --git a/arch/mips/lib/memmove.c b/arch/mips/lib/memmove.c deleted file mode 100644 index c3927ad49..000000000 --- a/arch/mips/lib/memmove.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * arch/mips/lib/memmove.c - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 1994, 1995, 1996 by Ralf Baechle - * - * Less stupid implementation of memmove. - */ -#include <linux/string.h> - -void __memmove(void *dest, const void *src, size_t n) -{ - if (!n) - return; - - if (dest < src - || dest > src + n) - /* Copy forward */ - __memcpy(dest, src, n); - else - /* Copy backwards */ - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n" - "1:\tlbu\t$1,-1(%1)\n\t" - "subu\t%1,1\n\t" - "sb\t$1,-1(%0)\n\t" - "subu\t%2,1\n\t" - "bnez\t%2,1b\n\t" - "subu\t%0,1\n\t" - ".set\tat\n\t" - ".set\treorder" - : "=r" (dest), "=r" (src), "=r" (n) - : "0" (dest + n), "1" (src + n), "2" (n) - : "$1","memory" ); -} diff --git a/arch/mips/lib/memset.c b/arch/mips/lib/memset.c new file mode 100644 index 000000000..bbdbcbb31 --- /dev/null +++ b/arch/mips/lib/memset.c @@ -0,0 +1,71 @@ +/* linux/arch/mips/lib/memset.c + * + * This is from GNU libc. + */ + +#include <linux/types.h> + +#define op_t unsigned long int +#define OPSIZ (sizeof(op_t)) + +typedef unsigned char byte; + +void *memset(void *dstpp, char c, size_t len) +{ + long int dstp = (long int) dstpp; + + if (len >= 8) { + size_t xlen; + op_t cccc; + + cccc = (unsigned char) c; + cccc |= cccc << 8; + cccc |= cccc << 16; + + /* There are at least some bytes to set. + No need to test for LEN == 0 in this alignment loop. */ + while (dstp % OPSIZ != 0) { + ((byte *) dstp)[0] = c; + dstp += 1; + len -= 1; + } + + /* Write 8 `op_t' per iteration until less + * than 8 `op_t' remain. + */ + xlen = len / (OPSIZ * 8); + while (xlen > 0) { + ((op_t *) dstp)[0] = cccc; + ((op_t *) dstp)[1] = cccc; + ((op_t *) dstp)[2] = cccc; + ((op_t *) dstp)[3] = cccc; + ((op_t *) dstp)[4] = cccc; + ((op_t *) dstp)[5] = cccc; + ((op_t *) dstp)[6] = cccc; + ((op_t *) dstp)[7] = cccc; + dstp += 8 * OPSIZ; + xlen -= 1; + } + len %= OPSIZ * 8; + + /* Write 1 `op_t' per iteration until less than + * OPSIZ bytes remain. + */ + xlen = len / OPSIZ; + while (xlen > 0) { + ((op_t *) dstp)[0] = cccc; + dstp += OPSIZ; + xlen -= 1; + } + len %= OPSIZ; + } + + /* Write the last few bytes. */ + while (len > 0) { + ((byte *) dstp)[0] = c; + dstp += 1; + len -= 1; + } + + return dstpp; +} diff --git a/arch/mips/lib/pmaxio.S b/arch/mips/lib/pmaxio.S index 97ee46a23..66e216a55 100644 --- a/arch/mips/lib/pmaxio.S +++ b/arch/mips/lib/pmaxio.S @@ -1,5 +1,5 @@ #include <asm/regdef.h> -#include <asm/dec/decstation.h> +#include <asm/decstation.h> .text .set reorder @@ -31,43 +31,6 @@ pmax_putch: j v0 /* - * pmax_callfn - call the PROM function - */ - .globl pmax_callfn -pmax_callfn: - lw v0,pmax_rex_base - addu v0,v0,a0 - lw v0,(v0) - j v0 - -/* - * pmax_getbitmap - call the PROM for memory bitmap function - */ - .globl pmax_getbitmap -pmax_getbitmap: - lw v0,pmax_rex_base - lw v0,REX_GETBITMAP(v0) - j v0 - -/* - * pmax_getgetenv - call the PROM to get environment variable - */ - .globl pmax_getenv -pmax_getenv: - lw v0,pmax_rex_base - lw v0,REX_GETENV(v0) - j v0 - -/* - * pmax_getsysid - call the PROM to get system id - */ - .globl pmax_getsysid -pmax_getsysid: - lw v0,pmax_rex_base - lw v0,REX_GETSYSID(v0) - j v0 - -/* * pmax_halt - call the PROM halt() function */ .globl pmax_halt @@ -75,4 +38,3 @@ pmax_halt: lw v0,pmax_rex_base lw v0,REX_HALT(v0) j v0 - diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S index 3ef8ed18d..3569313ba 100644 --- a/arch/mips/lib/strlen_user.S +++ b/arch/mips/lib/strlen_user.S @@ -23,7 +23,7 @@ LEAF(__strlen_user) LONG_ADDIU a0,1 bnez t0,1b jr ra - END(strlen_user) + END(__strlen_user) .section __ex_table,"a" PTR 1b,fault diff --git a/arch/mips/lib/tags.c b/arch/mips/lib/tags.c new file mode 100644 index 000000000..5425db712 --- /dev/null +++ b/arch/mips/lib/tags.c @@ -0,0 +1,74 @@ +/* + * linux/arch/mips/kernel/tags.c + * + * Copyright (C) 1996 Stoned Elipot + */ +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <asm/bootinfo.h> + +/* + * Parse the tags present in upper memory to find out + * a pecular one. + * + * Parameter: type - tag type to find + * + * returns : NULL - failure + * !NULL - pointer on the tag structure found + */ +tag * +bi_TagFind(enum bi_tag type) +{ + tag* t = (tag*)(mips_memory_upper - sizeof(tag)); + + while((t->tag != tag_dummy) && (t->tag != type)) + t = (tag*)(NEXTTAGPTR(t)); + + if (t->tag == tag_dummy) /* tag not found */ + return (tag*)NULL; + + return t; +} + +/* + * Snarf from the tag list in memory end some tags needed + * before the kernel reachs setup_arch() + * + * add yours here if you want to, but *beware*: the kernel var + * that will hold the values you want to snarf have to be + * in .data section of the kernel, so initialized in to whatever + * value in the kernel's sources. + */ +void bi_EarlySnarf(void) +{ + tag* atag; + + /* for wire_mappings() */ + atag = bi_TagFind(tag_machgroup); + if (atag) + memcpy(&mips_machgroup, TAGVALPTR(atag), atag->size); + else { + /* useless for boxes without text video mode but....*/ + panic("machine group not specified by bootloader"); + } + + atag = bi_TagFind(tag_machtype); + if (atag) + memcpy(&mips_machtype, TAGVALPTR(atag), atag->size); + else { + /* useless for boxes without text video mode but....*/ + panic("machine type not specified by bootloader"); + } + + /* for tlbflush() */ + atag = bi_TagFind(tag_tlb_entries); + if (atag) + memcpy(&mips_tlb_entries, TAGVALPTR(atag), atag->size); + else { + /* useless for boxes without text video mode but....*/ + panic("number of TLB entries not specified by bootloader"); + } + + return; +} diff --git a/arch/mips/lib/tinycon.c b/arch/mips/lib/tinycon.c index 4b75dec20..ba25982df 100644 --- a/arch/mips/lib/tinycon.c +++ b/arch/mips/lib/tinycon.c @@ -33,12 +33,10 @@ extern struct screen_info screen_info; void init_console(void) { size_x = 80; - // size_y = 50; size_y = 25; cursor_x = 0; cursor_y = 0; - // vram_addr = (unsigned short *)0xe10b8000; vram_addr = (unsigned short *)0xb00b8000; console_needs_init = 0; @@ -131,3 +129,5 @@ void print_string(const unsigned char *str) break; } } + +/* end of file */ diff --git a/arch/mips/lib/watch.S b/arch/mips/lib/watch.S index e460de6db..096375257 100644 --- a/arch/mips/lib/watch.S +++ b/arch/mips/lib/watch.S @@ -28,8 +28,9 @@ or a0,a1 mtc0 a0,CP0_WATCHLO sw a0,watch_savelo + jr ra - mtc0 zero,CP0_WATCHHI + mtc0 zero,CP0_WATCHHI END(__watch_set) /* @@ -38,7 +39,7 @@ */ LEAF(__watch_clear) jr ra - mtc0 zero,CP0_WATCHLO + mtc0 zero,CP0_WATCHLO END(__watch_clear) /* @@ -47,8 +48,9 @@ */ LEAF(__watch_reenable) lw t0,watch_savelo + jr ra - mtc0 t0,CP0_WATCHLO + mtc0 t0,CP0_WATCHLO END(__watch_reenable) /* @@ -69,7 +71,7 @@ watch_savelo: .word 0 */ LEAF(get_sp) jr ra - move v0,sp + move v0,sp END(get_sp) /* @@ -78,7 +80,7 @@ watch_savelo: .word 0 */ LEAF(get_ra) jr ra - move v0,ra + move v0,ra END(get_ra) /* @@ -87,7 +89,7 @@ watch_savelo: .word 0 */ LEAF(get_status) jr ra - mfc0 v0,CP0_STATUS + mfc0 v0,CP0_STATUS END(get_status) /* @@ -101,8 +103,9 @@ watch_savelo: .word 0 move a1,sp PRINT("$sp == %08lx\n") REG_L ra,4*SZREG(sp) + jr ra - PTR_ADDU sp,((5*SZREG)+ALSZ)&ALMASK + PTR_ADDU sp,((5*SZREG)+ALSZ)&ALMASK END(print_sp) /* @@ -116,6 +119,7 @@ watch_savelo: .word 0 mfc0 a1,CP0_STATUS PRINT("cp0_status == %08lx\n") REG_L ra,4*SZREG(sp) + jr ra - PTR_ADDU sp,((5*SZREG)+ALSZ)&ALMASK + PTR_ADDU sp,((5*SZREG)+ALSZ)&ALMASK END(print_st) diff --git a/arch/mips/man/man8/hardware.8 b/arch/mips/man/man8/hardware.8 index b19e2d4e0..46b519f69 100644 --- a/arch/mips/man/man8/hardware.8 +++ b/arch/mips/man/man8/hardware.8 @@ -20,7 +20,7 @@ .\" Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, .\" USA. .\" -.TH HARDWARE 8 "29 May 96" "Linux" "Supported hardware" +.TH HARDWARE 8 "12 May 96" "Linux" "Supported hardware" .SH ABSTRACT This file contains information about the status of hardware support @@ -29,14 +29,14 @@ of the various ports of Linux/MIPS. .TP .B "Acer PICA 61" Supported; the onboard S3 graphics is fully supported in the - text mode as well as the parallel port and the floppy (buggy). There - are no drivers available yet for the onboard Ethernet interface and - SCSI controller. However 35c03 Ethernet cards are working in PIO mode - after disabeling the shared memory detection code, IDE harddisk and - CD ROM have been successfully tested as well as NE2000 Ethernet cards - and the DPT PM2041W ISA SCSI hostadapter (drivers EATA-DMA and - EATA-PIO). In general all drivers that do PIO via ports are supposed - to work. + text mode as well as the onboard Ethernet interface, the parallel + port and the floppy (buggy). There is no drivers available yet for + the onboard SCSI controller. However 35c03 Ethernet cards are + working in PIO mode after disabeling the shared memory detection code, + IDE harddisk and CD ROM have been successfully tested as well as + NE2000 Ethernet cards and the DPT PM2041W ISA SCSI hostadapter + (drivers EATA-DMA and EATA-PIO). In general all drivers that do PIO + via ports are supposed to work. .TP .B "Mips Magnum 4000" Same status as Acer PICA 61; additionally the serial interface is @@ -56,12 +56,12 @@ of the various ports of Linux/MIPS. .TP .B "SNI RM 200" - Current status is that the ARC bootloader Milo is now also working - on the unusually bahaving SNI ARC BIOS 4.05 (which is supposed to - be the current version as of this writing); the kernel itself - supports the builtin VGA card and floppy controller. It boots via - an ISA NE2000 card from NFS; ISA DPT hostadapters have also - successfully been tested. Other SNI hardware isn't supported yet. + This port has just started (10 May 1996). Current status is that + the ARC bootloader Milo is now also working on the (buggy) SNI + ARC BIOS 4.05 (which is supposed to be the current version as of + this writing); the kernel itself supports the builtin VGA card and + and boots via an ISA NE2000 card from NFS. Other SNI hardware + isn't supported yet. .TP .B "Deskstation Tyne" @@ -77,11 +77,10 @@ of the various ports of Linux/MIPS. .TP .B "SGI" - There is currently no port to SGI hardware; a port is about to - start. Due to the resources SGI is willing to invest in this - project this port will probably advance rather fast and help to - improve Linux/MIPS in general so stay tuned. According to SGI - also support of SMP machines is planned. + The SGI port was started at the end of May 1996 but has stalled + later in the year. Works have now been restarted. Currenly + the port supports only Indy machines and has a complete set of + drivers. .TP .B "DECstation" diff --git a/arch/mips/mips1/Makefile b/arch/mips/mips1/Makefile deleted file mode 100644 index 3e9f13037..000000000 --- a/arch/mips/mips1/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -# -# Makefile for the MIPS I specific parts of the Linux/MIPS kernel. -# -# Note! Dependencies are done automagically by 'make dep', which also -# removes any old dependencies. DON'T put your own dependencies here -# unless it's something special (ie not a .c file). -# - -.S.s: - $(CPP) $(CFLAGS) $< -o $*.s -.S.o: - $(CC) $(CFLAGS) -c $< -o $*.o - -all: mips.o -EXTRA_ASFLAGS = -mips1 -mcpu=r3000 -O_TARGET := mips.o -O_OBJS := cache.o cpu.o memcpy.o memset.o r3000.o pagetables.o showregs.o - fp-context.o - -r3000.o: r3000.S - -fp-context.o: fp-context.S - -cache.o: cache.S - -clean: - -include $(TOPDIR)/Rules.make diff --git a/arch/mips/mips1/cache.S b/arch/mips/mips1/cache.S deleted file mode 100644 index 788a567e8..000000000 --- a/arch/mips/mips1/cache.S +++ /dev/null @@ -1,162 +0,0 @@ -# R3000 cache routines lifted from IDT documentation -# by Ruud Riem-Viis. Adapted for linux by Didier Frick -# (dfrick@dial.eunet.ch) - -#include <asm/addrspace.h> -#include <asm/asm.h> -#include <asm/processor.h> -#include <asm/regdef.h> -#include <asm/cache.h> -#include <asm/mipsregs.h> -#include <asm/mipsconfig.h> -#include <asm/stackframe.h> -#include <asm/bootinfo.h> - -#define MINCACHE 0x00200 /* minimum cache size 512 */ -#define MAXCACHE 0x04000 /* maximum cache size 16K */ - -/* - * Figure out the size of the I- and D-caches, using the diagnostic isolate - * swap features. The cache size is left in an environment variable because - * the system will want to know it later. - * Flush the cache so that it is in a known state. - */ -NESTED(cache_init,8,ra) - subu sp, sp, 24 # keep sp aligned on 8 words - sw ra, 20(sp) # push return address on stack - sw s0, 16(sp) # save s0 on stack - mfc0 s0, CP0_STATUS # save sr - mtc0 zero, CP0_STATUS - nop - .set reorder - jal cache_size - sw v0, mips_dcache_size - li v0, ST0_CE # swap caches - .set noreorder - mtc0 v0, CP0_STATUS - nop - jal cache_size - nop - sw v0, mips_icache_size - mtc0 zero, CP0_STATUS # swap back caches - nop - mtc0 s0, CP0_STATUS # restore sr - nop - .set reorder - jal cache_flush - lw s0, 16(sp) # restore s0 - lw ra, 20(sp) - addu sp, sp, 24 - j ra - nop -END(cache_init) - -LEAF(cache_size) - .set noreorder - mfc0 t0, CP0_STATUS # save current SR - nop - and t0, ~ST0_SR # do not clear parity error bit - or v0, t0, ST0_DE # isolate cache - mtc0 v0, CP0_STATUS - nop - - move v0, zero - li v1, 0xa5a5a5a5 - nop - sw v1, KSEG0 # try to write in cache - lw t1, KSEG0 # try to read from cache - nop - mfc0 t2, CP0_STATUS - nop - .set reorder - and t2, (1<<19) - bne t2, zero, 3f # cache miss, must be no cache - bne v1, t1, 3f # data not equal -> no cache - -/* - * Clear cache boundries to known state. - */ - li v0, MINCACHE -1: - sw zero, KSEG0(v0) - sll v0, 1 - ble v0, MAXCACHE, 1b - - li v0, -1 - sw v0, KSEG0(zero) # store marker in cache - li v0, MINCACHE # MIN cache size -2: - lw v1, KSEG0(v0) # look for marker - bne v1, zero, 3f # found marker - sll v0, 1 # cache size * 2 - ble v0, MAXCACHE, 2b # keep looking - move v0, zero # must be no cache - .set noreorder -3: - mtc0 t0, CP0_STATUS # restore sr - nop - j ra - nop - .set reorder -END(cache_size) - -LEAF(cache_flush) - lw t1, mips_icache_size - lw t2, mips_dcache_size - .set noreorder - mfc0 t3, CP0_STATUS # save sr - nop - and t3, ~ST0_SR # do not clear PE - beq t1, zero, check_dcache # if no icache, check dcache - nop - li v0, ST0_DE | ST0_CE # isolate and swap - nop - mtc0 v0, CP0_STATUS - nop - li t0, KSEG0 - .set reorder - or t1, t0, t1 -1: - sb zero, 0(t0) - sb zero, 4(t0) - sb zero, 8(t0) - sb zero, 12(t0) - sb zero, 16(t0) - sb zero, 20(t0) - sb zero, 24(t0) - addu t0, 32 - sb zero, -4(t0) - bne t0, t1, 1b # continue until done - -check_dcache: - li v0, ST0_DE - nop - .set noreorder - mtc0 v0, CP0_STATUS - nop - beq t2, zero, flush_done # if no dcache, done - .set reorder - li t0, KSEG0 - or t1, t0, t2 -1: - sb zero, 0(t0) - sb zero, 4(t0) - sb zero, 8(t0) - sb zero, 12(t0) - sb zero, 16(t0) - sb zero, 20(t0) - sb zero, 24(t0) - addu t0, 32 - sb zero, -4(t0) - bne t0, t1, 1b # continue until done - - .set noreorder -flush_done: - mtc0 t3, CP0_STATUS # restore old sr - nop - j ra - nop - .set reorder -END(cache_flush) - - diff --git a/arch/mips/mips1/cpu.c b/arch/mips/mips1/cpu.c deleted file mode 100644 index fd41ce15b..000000000 --- a/arch/mips/mips1/cpu.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 Ralf Baechle - */ -#include <linux/sched.h> - -#include <asm/cache.h> -#include <asm/mipsregs.h> -#include <asm/page.h> -#include <asm/processor.h> - -extern asmlinkage void mips1_cacheflush(void *addr, int nbytes, unsigned int flags); - -void (*mips_cache_init)(void); - -static void -mips1_cache_init(void) -{ - cacheflush = mips1_cacheflush; -} - -void (*switch_to_user_mode)(struct pt_regs *regs); - -static void -mips1_switch_to_user_mode(struct pt_regs *regs) -{ - regs->cp0_status = regs->cp0_status | ST0_KUC; -} - -unsigned long (*thread_saved_pc)(struct thread_struct *t); - -/* - * Return saved PC of a blocked thread. - */ -static unsigned long mips1_thread_saved_pc(struct thread_struct *t) -{ - return ((unsigned long *)(unsigned long)t->reg29)[13]; -} - -unsigned long (*get_wchan)(struct task_struct *p); - -static unsigned long mips1_get_wchan(struct task_struct *p) -{ - /* - * This one depends on the frame size of schedule(). Do a - * "disass schedule" in gdb to find the frame size. Also, the - * code assumes that sleep_on() follows immediately after - * interruptible_sleep_on() and that add_timer() follows - * immediately after interruptible_sleep(). Ugly, isn't it? - * Maybe adding a wchan field to task_struct would be better, - * after all... - */ - unsigned long schedule_frame; - unsigned long pc; - - pc = thread_saved_pc(&p->tss); - if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) { - schedule_frame = ((unsigned long *)(long)p->tss.reg30)[13]; - return ((unsigned long *)schedule_frame)[11]; - } - return pc; -} - -void (*pgd_init)(unsigned long page); -void (*copy_page)(unsigned long to, unsigned long from); -asmlinkage void (*restore_fp_context)(struct sigcontext *sc); -asmlinkage void (*save_fp_context)(struct sigcontext *sc); - -void -mips1_cpu_init(void) -{ - extern void mips1_cache_init(void); - extern void mips1_pgd_init(unsigned long page); - extern void mips1_clear_page(unsigned long page); - extern void mips1_copy_page(unsigned long to, unsigned long from); - extern asmlinkage void mips1_restore_fp_context(struct sigcontext *sc); - extern asmlinkage void mips1_save_fp_context(struct sigcontext *sc); - - mips_cache_init = mips1_cache_init; - pgd_init = mips1_pgd_init; - switch_to_user_mode = mips1_switch_to_user_mode; - thread_saved_pc = mips1_thread_saved_pc; - get_wchan = mips1_get_wchan; - clear_page = mips1_clear_page; - copy_page = mips1_copy_page; - restore_fp_context = mips1_restore_fp_context; - save_fp_context = mips1_save_fp_context; -} diff --git a/arch/mips/mips1/fp-context.S b/arch/mips/mips1/fp-context.S deleted file mode 100644 index 6ff3c6be3..000000000 --- a/arch/mips/mips1/fp-context.S +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Save/restore floating point context for signal handlers. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - */ -#include <asm/asm.h> -#include <asm/fpregdef.h> -#include <asm/mipsregs.h> -#include <asm/regdef.h> -#include <asm/sigcontext.h> - -#define SWC1(r,m) \ -7: swc1 r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - -#define SW(r,m) \ -7: sw r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - -#define LWC1(r,m) \ -7: lwc1 r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - -#define LW(r,m) \ -7: lw r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - - .set noreorder -/* - * Save floating point context - */ -LEAF(mips1_save_fp_context) - mfc0 t0,CP0_STATUS - sll t0,t0,2 - bgez t0,1f - nop # delay slot - cfc1 t0,fcr31 - /* - * Store the 16 odd double precision registers - */ - SWC1 ($f0,(SC_FPREGS+0)(a0)) - SWC1 ($f1,(SC_FPREGS+8)(a0)) - SWC1 ($f2,(SC_FPREGS+16)(a0)) - SWC1 ($f3,(SC_FPREGS+24)(a0)) - SWC1 ($f4,(SC_FPREGS+32)(a0)) - SWC1 ($f5,(SC_FPREGS+40)(a0)) - SWC1 ($f6,(SC_FPREGS+48)(a0)) - SWC1 ($f7,(SC_FPREGS+56)(a0)) - SWC1 ($f8,(SC_FPREGS+64)(a0)) - SWC1 ($f9,(SC_FPREGS+72)(a0)) - SWC1 ($f10,(SC_FPREGS+80)(a0)) - SWC1 ($f11,(SC_FPREGS+88)(a0)) - SWC1 ($f12,(SC_FPREGS+96)(a0)) - SWC1 ($f13,(SC_FPREGS+104)(a0)) - SWC1 ($f14,(SC_FPREGS+112)(a0)) - SWC1 ($f15,(SC_FPREGS+120)(a0)) - SWC1 ($f16,(SC_FPREGS+128)(a0)) - SWC1 ($f17,(SC_FPREGS+136)(a0)) - SWC1 ($f18,(SC_FPREGS+144)(a0)) - SWC1 ($f19,(SC_FPREGS+152)(a0)) - SWC1 ($f20,(SC_FPREGS+160)(a0)) - SWC1 ($f21,(SC_FPREGS+168)(a0)) - SWC1 ($f22,(SC_FPREGS+176)(a0)) - SWC1 ($f23,(SC_FPREGS+184)(a0)) - SWC1 ($f24,(SC_FPREGS+192)(a0)) - SWC1 ($f25,(SC_FPREGS+200)(a0)) - SWC1 ($f26,(SC_FPREGS+208)(a0)) - SWC1 ($f27,(SC_FPREGS+216)(a0)) - SWC1 ($f28,(SC_FPREGS+224)(a0)) - SWC1 ($f29,(SC_FPREGS+232)(a0)) - SWC1 ($f30,(SC_FPREGS+240)(a0)) - SWC1 ($f31,(SC_FPREGS+248)(a0)) - SW (t0,SC_FPC_CSR(a0)) - cfc1 t0,$0 # implementation/version - jr ra - .set nomacro - SW t0,SC_FPC_EIR(a0) # delay slot - .set macro - -1: jr ra - .set nomacro - nop # delay slot - .set macro - END(mips1_save_fp_context) - -/* - * Restore fpu state: - * - fp gp registers - * - cp1 status/control register - * - * We base the decission which registers to restore from the signal stack - * frame on the current content of c0_status, not on the content of the - * stack frame which might have been changed by the user. - */ -LEAF(mips1_restore_fp_context) - mfc0 t0,CP0_STATUS - sll t0,t0,2 - bgez t0,1f - nop # delay slot - bgez t0,1f - LW (t0,SC_FPC_CSR(a0)) # delay slot - /* - * Restore the 16 odd double precision registers only - * when enabled in the cp0 status register. - */ - LWC1 ($f0,(SC_FPREGS+0)(a0)) - LWC1 ($f1,(SC_FPREGS+8)(a0)) - LWC1 ($f2,(SC_FPREGS+16)(a0)) - LWC1 ($f3,(SC_FPREGS+24)(a0)) - LWC1 ($f4,(SC_FPREGS+32)(a0)) - LWC1 ($f5,(SC_FPREGS+40)(a0)) - LWC1 ($f6,(SC_FPREGS+48)(a0)) - LWC1 ($f7,(SC_FPREGS+56)(a0)) - LWC1 ($f8,(SC_FPREGS+64)(a0)) - LWC1 ($f9,(SC_FPREGS+72)(a0)) - LWC1 ($f10,(SC_FPREGS+80)(a0)) - LWC1 ($f11,(SC_FPREGS+88)(a0)) - LWC1 ($f12,(SC_FPREGS+96)(a0)) - LWC1 ($f13,(SC_FPREGS+104)(a0)) - LWC1 ($f14,(SC_FPREGS+112)(a0)) - LWC1 ($f15,(SC_FPREGS+120)(a0)) - LWC1 ($f16,(SC_FPREGS+128)(a0)) - LWC1 ($f17,(SC_FPREGS+136)(a0)) - LWC1 ($f18,(SC_FPREGS+144)(a0)) - LWC1 ($f19,(SC_FPREGS+152)(a0)) - LWC1 ($f20,(SC_FPREGS+160)(a0)) - LWC1 ($f21,(SC_FPREGS+168)(a0)) - LWC1 ($f22,(SC_FPREGS+176)(a0)) - LWC1 ($f23,(SC_FPREGS+184)(a0)) - LWC1 ($f24,(SC_FPREGS+192)(a0)) - LWC1 ($f25,(SC_FPREGS+200)(a0)) - LWC1 ($f26,(SC_FPREGS+208)(a0)) - LWC1 ($f27,(SC_FPREGS+216)(a0)) - LWC1 ($f28,(SC_FPREGS+224)(a0)) - LWC1 ($f29,(SC_FPREGS+232)(a0)) - LWC1 ($f30,(SC_FPREGS+240)(a0)) - LWC1 ($f31,(SC_FPREGS+248)(a0)) - jr ra - .set nomacro - ctc1 t0,fcr31 # delay slot - .set macro - -1: jr ra - .set nomacro - nop # delay slot - .set macro - END(mips1_restore_fp_context) diff --git a/arch/mips/mips1/memset.S b/arch/mips/mips1/memset.S deleted file mode 100644 index 5cfb5d9a7..000000000 --- a/arch/mips/mips1/memset.S +++ /dev/null @@ -1,27 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - * - * Generic memset for all MIPS CPUs. - * This is time critical. Hear it crying "optimize me" ... - */ -#include <asm/asm.h> -#include <asm/regdef.h> - -LEAF(__generic_memset_b) -__generic_memset_dw = __generic_memset_b - .set noreorder - beqz a2,2f - LONG_ADDU a3,a0,a2 - .set reorder - LONG_SUBU a3,1 -1: sb a1,(a0) - .set noreorder - bne a0,a3,1b - LONG_ADDIU a0,1 - .set reorder -2: jr ra - END(__generic_memset_b) diff --git a/arch/mips/mips1/pagetables.c b/arch/mips/mips1/pagetables.c deleted file mode 100644 index 22419d1c9..000000000 --- a/arch/mips/mips1/pagetables.c +++ /dev/null @@ -1,86 +0,0 @@ -/* - * 32 bit MIPS specific page handling. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 Ralf Baechle - */ -#include <linux/mm.h> -#include <asm/cache.h> -#include <asm/mipsconfig.h> -#include <asm/page.h> -#include <asm/pgtable.h> - -void (*pgd_init)(unsigned long page); - -/* - * Initialize new page directory with pointers to invalid ptes - */ -void mips1_pgd_init(unsigned long page) -{ - unsigned long dummy1, dummy2; - - /* - * The plain and boring version for the R3000. No cache flushing - * stuff is needed since the R3000 has physical caches. - */ - __asm__ __volatile__( - ".set\tnoreorder\n" - "1:\tsw\t%2,(%0)\n\t" - "sw\t%2,4(%0)\n\t" - "sw\t%2,8(%0)\n\t" - "sw\t%2,12(%0)\n\t" - "sw\t%2,16(%0)\n\t" - "sw\t%2,20(%0)\n\t" - "sw\t%2,24(%0)\n\t" - "sw\t%2,28(%0)\n\t" - "subu\t%1,1\n\t" - "bnez\t%1,1b\n\t" - "addiu\t%0,32\n\t" - ".set\treorder" - :"=r" (dummy1), - "=r" (dummy2) - :"r" (((unsigned long) invalid_pte_table /* - PAGE_OFFSET */ ) | - _PAGE_TABLE), - "0" (page), - "1" (PAGE_SIZE/(sizeof(pmd_t)*8))); -} - -void (*clear_page)(unsigned long page) - -/* - * To do: cache magic ... - */ -void mips1_clear_page(unsigned long page) -{ - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - "addiu\t$1,%0,%2\n" - "1:\tsw\t$0,(%0)\n\t" - "sw\t$0,4(%0)\n\t" - "sw\t$0,8(%0)\n\t" - "sw\t$0,12(%0)\n\t" - "addiu\t%0,32\n\t" - "sw\t$0,-16(%0)\n\t" - "sw\t$0,-12(%0)\n\t" - "sw\t$0,-8(%0)\n\t" - "bne\t$1,%0,1b\n\t" - "sw\t$0,-4(%0)\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=r" (page) - :"0" (page), - "I" (PAGE_SIZE) - :"$1","memory"); -} - -void (*copy_page)(unsigned long to, unsigned long from); - -void mips1_copy_page(unsigned long to, unsigned long from) -{ - memcpy((void *) to, - (void *) (from + (PT_OFFSET - PAGE_OFFSET)), PAGE_SIZE); -} diff --git a/arch/mips/mips1/r3000.S b/arch/mips/mips1/r3000.S deleted file mode 100644 index 25529d9a0..000000000 --- a/arch/mips/mips1/r3000.S +++ /dev/null @@ -1,1125 +0,0 @@ -/* - * arch/mips/kernel/r3000.S - * - * Copyright (C) 1994, 1995 Waldorf Electronics, 1996 Paul M. Antoine - * Written by Ralf Baechle and Andreas Busse - * Modified for R3000 by Paul M. Antoine - * - * Additional R3000 support by Didier Frick <dfrick@dial.eunet.ch> - * for ACN S.A, Copyright (C) 1996 by ACN S.A - * - * This file contains most of the R3000/R3000A specific routines, which would - * probably work on the R2000 (if anyone's interested!). - * - * This code is evil magic. Read appendix f (coprocessor 0 hazards) of - * all R3000/MIPS manuals and think about that MIPS means "Microprocessor without - * Interlocked Pipeline Stages" before you even think about changing this code! - * - * Then remember that some bugs here are due to my not having completely - * converted the R4xx0 code to R3000 and that the R4xx0 CPU's are more - * forgiving than the R3000/A!! All that, and the fact that I'm not up to - * 'guru' level on R3000 - PMA. - * (Paul, I replaced all occurances of TLBMAPHI with %HI(TLBMAP) -- Ralf) - */ -#include <linux/config.h> - -#include <asm/asm.h> -#include <asm/bootinfo.h> -#include <asm/cache.h> -#include <asm/fpregdef.h> -#include <asm/mipsconfig.h> -#include <asm/mipsregs.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/mipsregs.h> -#include <asm/regdef.h> -#include <asm/stackframe.h> - -#ifdef __SMP__ -#error "Fix this for SMP!" -#else -#define current current_set -#endif - -/* - -FIXME: - - First of all, this really screams for a light version of SAVE_ALL - and RESTORE_ALL, saving and restoring only the context actually - needed in this case. I'm afraid it's necessary to save some context - on the stack because on the R3000 tlb exceptions can nest in some - cases where they wouldn't on the R4000. - - - The TLB handling code should be completely rewritten for the R3000 - because too many things are different from the R4000. - For instance, the CP0_CONTEXT register has a different format - and cannot be reused with the current setup. - I really had to do a fast hack to get it to work, but no time to do - it cleanly for now, sorry. - We also introduced a tlb_softindex variable to point to the next - TLB entry to write. This variable is incremented everytime we add a - new entry to the TLB. We did this because we felt that using the - CP0_RANDOM register could be unsafe in some cases (like trashing - the TLB entry for the handler's return address in user space). - It's very possible that we are wrong on this one, but we had so - much trouble with this TLB thing that we chose the safe side. -*/ - -#define CONF_DEBUG_TLB -#undef CONFIG_TLB_SHUTDOWN -#undef TLB_LOG - -MODE_ALIAS = 0x00e0 # cachable - - .text - .set mips1 - .set noreorder - - .align 5 - NESTED(handle_tlbl, FR_SIZE, sp) - .set noat - /* - * Check whether this is a refill or an invalid exception - */ - mfc0 k0,CP0_BADVADDR - nop - mfc0 k1,CP0_ENTRYHI - ori k0,0xfff # clear ASID... - xori k0,0xfff # in BadVAddr - andi k1,0xfc0 # get current ASID - or k0,k1 # make new entryhi - mfc0 k1,CP0_ENTRYHI - nop - mtc0 k0,CP0_ENTRYHI - nop # for pipeline - tlbp - nop # for pipeline - mfc0 k0,CP0_INDEX - nop - mtc0 k1,CP0_ENTRYHI # delay slot - bgez k0,invalid_tlbl # bad addr in c0_badvaddr - nop - - - mfc0 k0,CP0_BADVADDR - lui k1,0xe000 - subu k0,k0,k1 - bgez k0,1f - nop - j real_utlb - nop - -1: - - -#ifdef CONF_DEBUG_TLB - /* - * OK, this is a double fault. Let's see whether this is - * due to an invalid entry in the page_table. - */ - - lw k0, tlbl_lock - nop - bnez k0,1f - li k1,1 - la k0, tlbl_lock - sw k1,(k0) - - - mfc0 k0,CP0_BADVADDR - lui k1,58368 - srl k0,12 # get PFN? - sll k0,2 - addu k0,k1 - lw k1,(k0) - nop - andi k1,(_PAGE_PRESENT|_PAGE_ACCESSED) - bnez k1,reload_pgd_entries - nop # delay slot - -1: - SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) - - PRINT("Double fault caused by invalid entries in pgd:\n") - mfc0 a1,CP0_BADVADDR - nop - PRINT("Double fault address : %08lx\n") - mfc0 a1,CP0_EPC - nop - PRINT("c0_epc : %08lx\n") - jal show_regs - move a0,sp - jal dump_tlb_nonwired - nop - mfc0 a0,CP0_BADVADDR - jal dump_list_current - nop - .set noat - STI - .set at - PANIC("Corrupted pagedir") - .set noat - -reload_pgd_entries: -#endif /* CONF_DEBUG_TLB */ - - /* - * Load missing pair of entries from the pgd and return. - */ - - mfc0 k0,CP0_BADVADDR - nop - lui k1,58368 - - srl k0,12 - sll k0,2 - addu k0,k1 - lw k0,(k0) - nop - mtc0 k0,CP0_ENTRYLO0 - - la k0, tlb_softIndex - lw k1,(k0) - nop - mtc0 k1,CP0_INDEX - nop - addu k1,(1<<8) - andi k0,k1,(63<<8) - bnez k0, 1f - nop - li k1,(8<<8) -1: - la k0, tlb_softIndex - sw k1,(k0) - - - nop - nop - nop # for pipeline - tlbwi - nop # for pipeline - nop - nop - - -#ifdef CONF_DEBUG_TLB - la k0, tlbl_lock - sw zero,(k0) -#endif - mfc0 k0,CP0_EPC - nop - jr k0 - rfe - nop - - - /* - * Handle invalid exception - * - * There are two possible causes for an invalid (tlbl) - * exception: - * 1) pages with present bit set but the valid bit clear - * 2) nonexistant pages - * Case one needs fast handling, therefore don't save - * registers yet. - * - * k0 contains c0_index. - */ -invalid_tlbl: - SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) - -#ifdef TLB_LOG - PRINT ("tlbl: invalid\n"); - nop -#endif - /* - * Test present bit in entry - */ - lw s0,FR_BADVADDR(sp) - nop - srl s0,12 - sll s0,2 - lui k1,58368 - addu s0,k1 - - lw k1,(s0) - nop - andi k1,(_PAGE_PRESENT|_PAGE_READ) - xori k1,(_PAGE_PRESENT|_PAGE_READ) - bnez k1,nopage_tlbl - nop - /* - * Present and read bits are set -> set valid and accessed bits - */ - lw k1,(s0) # delay slot - nop - ori k1,(_PAGE_VALID|_PAGE_ACCESSED) - sw k1,(s0) - - mtc0 k1,CP0_ENTRYLO0 - nop - tlbwi - nop - nop - - j return - nop - - /* - * Page doesn't exist. Lots of work which is less important - * for speed needs to be done, so hand it all over to the - * kernel memory management routines. - */ -nopage_tlbl: -/* SAVE_ALL */ - REG_S sp,FR_ORIG_REG2(sp) */ -#ifdef TLB_LOG - PRINT ("nopage_tlbl\n"); - nop -#endif -#ifdef CONFIG_TLB_SHUTDOWN - mfc0 t0,CP0_INDEX - sll t0,4 - la t1,KSEG1 - or t0,t1 - mtc0 t0,CP0_ENTRYHI - mtc0 zero,CP0_ENTRYLO0 - nop - nop - tlbwi - nop - nop -#endif - - lw a2,FR_BADVADDR(sp) - li t1,-1 # not a sys call - sw t1,FR_ORIG_REG2(sp) - nop - STI - .set at - /* - * a0 (struct pt_regs *) regs - * a1 (unsigned long) 0 for read access - * a2 (unsigned long) faulting virtual address - */ - move a0,sp - jal do_page_fault - li a1,0 # delay slot - j ret_from_sys_call - nop # delay slot - END(handle_tlbl) - - .text - .align 5 - NESTED(handle_tlbs, FR_SIZE, sp) - .set noat - /* - * It is impossible that is a nested reload exception. - * Therefore this must be a invalid exception. - * Two possible cases: - * 1) Page exists but not dirty. - * 2) Page doesn't exist yet. Hand over to the kernel. - * - * Test whether present bit in entry is set - */ - /* used to be dmfc0 */ - -#ifdef CONF_DEBUG_TLB - - la k0,tlbs_lock - lw k1,(k0) - nop - beqz k1,3f - nop - .set noat - SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) - .set at - PRINT("Nested tlbs exception:\n") - mfc0 a1,CP0_BADVADDR - nop - PRINT("Virtual address : %08lx\n") - mfc0 a1,CP0_EPC - nop - PRINT("c0_epc : %08lx\n") - jal show_regs - move a0,sp - jal dump_tlb_nonwired - nop - mfc0 a0,CP0_BADVADDR - jal dump_list_current - nop - .set noat - STI - .set at - PANIC("Nested tlbs exception") - -3: - li k1,1 - sw k1,(k0) - -#endif - .set noat - SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) - .set at - - mfc0 s0,CP0_BADVADDR - - lui k1,58368 - srl s0,12 - sll s0,2 - addu s0,k1 - nop - lw k1,(s0) # may cause nested xcpt. - nop - move k0,s0 - - lw k1,FR_ENTRYHI(sp) - nop - mtc0 k1,CP0_ENTRYHI - nop - nop - tlbp # find faulting entry - nop - lw k1,(k0) - nop - andi k1,(_PAGE_PRESENT|_PAGE_WRITE) - xori k1,(_PAGE_PRESENT|_PAGE_WRITE) - bnez k1,nopage_tlbs - nop - /* - * Present and writable bits set: set accessed and dirty bits. - */ - lw k1,(k0) # delay slot - nop - ori k1,k1,(_PAGE_ACCESSED|_PAGE_MODIFIED| \ - _PAGE_VALID|_PAGE_DIRTY) - sw k1,(k0) - /* - * Now reload the entry into the TLB - */ - mtc0 k1,CP0_ENTRYLO0 - nop - nop - nop # for pipeline - tlbwi - nop # for pipeline -#ifdef CONF_DEBUG_TLB - la k0,tlbs_lock - li k1,0 - sw k1,(k0) -#endif - j return - nop - - /* - * Page doesn't exist. Lots of work which is less important - * for speed needs to be done, so hand it all over to the - * kernel memory management routines. - */ -nowrite_mod: -nopage_tlbs: - -#ifdef CONFIG_TLB_SHUTDOWN - /* - * Remove entry so we don't need to care later - */ - mfc0 k0,CP0_INDEX - nop -#ifdef CONF_DEBUG_TLB - bgez k0,2f - nop - /* - * We got a tlbs exception but found no matching entry in - * the tlb. This should never happen. Paranoia makes us - * check it, though. - */ - .set noat -/* SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) */ - jal show_regs - move a0,sp - .set at - la a1,FR_BADVADDR(sp) - lw a1,(a1) - nop - PRINT("c0_badvaddr == %08lx\n") - nop - mfc0 a1,CP0_INDEX - nop - PRINT("c0_index == %08x\n") - nop - la a1,FR_ENTRYHI(sp) - lw a1,(a1) - nop - PRINT("c0_entryhi == %08x\n") - nop - jal dump_tlb_nonwired - nop - la a0,FR_BADVADDR(sp) - lw a0,(a0) - jal dump_list_current - nop - - .set noat - STI - .set at - PANIC("Tlbs or tlbm exception with no matching entry in tlb") -1: j 1b - nop -2: -#endif /* CONF_DEBUG_TLB */ - lui k1,0xa000 - sll k0,4 - or k0,k1 - xor k0,k1 - or k0,k1 # make it a KSEG1 address - mtc0 k0,CP0_ENTRYHI - nop - mtc0 zero,CP0_ENTRYLO0 - nop - nop - nop - tlbwi - nop -#endif /* CONFIG_TLB_SHUTDOWN */ - -#ifdef CONF_DEBUG_TLB - la k0,tlbs_lock - li k1,0 - sw k1,(k0) -#endif - .set noat -/* SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) */ - lw a2,FR_BADVADDR(sp) - li t1,-1 - sw t1,FR_ORIG_REG2(sp) # not a sys call - nop - STI - .set at - /* - * a0 (struct pt_regs *) regs - * a1 (unsigned long) 1 for write access - * a2 (unsigned long) faulting virtual address - */ - move a0,sp - jal do_page_fault - li a1,1 # delay slot - j ret_from_sys_call - nop # delay slot - END(handle_tlbs) - - .align 5 - NESTED(handle_mod, FR_SIZE, sp) - .set noat - /* - * Two possible cases: - * 1) Page is writable but not dirty -> set dirty and return - * 2) Page is not writable -> call C handler - */ - /* used to be dmfc0 */ - - SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) - - mfc0 s0,CP0_BADVADDR - nop - - srl s0,12 - sll s0,2 - lui k1,58368 - addu s0,k1 - lw k1,(s0) - nop - move k0,s0 - nop - - lw k1,FR_ENTRYHI(sp) - nop - mtc0 k1,CP0_ENTRYHI - nop - tlbp - nop - lw k1,(k0) - nop - andi k1,_PAGE_WRITE - beqz k1,nowrite_mod - nop - /* - * Present and writable bits set: set accessed and dirty bits. - */ - lw k1,(k0) # delay slot - nop - ori k1,(_PAGE_ACCESSED|_PAGE_DIRTY) - sw k1,(k0) - /* - * Now reload the entry into the tlb - */ - lw k0,(k0) - nop - mtc0 k0,CP0_ENTRYLO0 - nop - nop # for pipeline - nop - tlbwi - nop # for pipeline - j return - nop - END(handle_mod) - .set at - - .set reorder - LEAF(tlbflush) - - .set noreorder - - mfc0 t3,CP0_STATUS # disable interrupts... - nop - ori t4,t3,1 - xori t4,1 - mtc0 t4,CP0_STATUS - lw t1,mips_tlb_entries /* mips_tlb_enbtries is set */ - /* by bi_EarlySnarf() */ - mfc0 t0,CP0_ENTRYHI - nop - mtc0 zero,CP0_ENTRYLO0 - sll t1,t1,8 - li t2,KSEG1 - li t5,(7<<8) /* R3000 has 8 wired entries */ -1: - subu t1,(1<<8) - beq t1,t5,2f /* preserve wired entries */ - - sll t6,t1,4 - addu t6,t2 - mtc0 t6,CP0_ENTRYHI - nop - mtc0 t1,CP0_INDEX - nop - nop - nop - tlbwi - nop - b 1b - nop -2: - - mtc0 t0,CP0_ENTRYHI - nop - mtc0 t3,CP0_STATUS - nop - jr ra - nop - END(tlbflush) - -/* - * Flush a single entry from the TLB - * - * Parameters: a0 - unsigned long address - */ - .set noreorder - LEAF(tlbflush_page) - /* - * Step 1: Wipe out old TLB information. Not shure if - * we really need that step; call it paranoia ... - * In order to do that we need to disable interrupts. - */ - mfc0 t0,CP0_STATUS # interrupts off - nop - ori t1,t0,1 - xori t1,1 - mtc0 t1,CP0_STATUS - li t3,TLBMAP # then wait 3 cycles - ori t1,a0,0xfff # mask off low 12 bits - xori t1,0xfff - mfc0 t2,CP0_ENTRYHI # copy ASID into address - nop - andi t2,0xfc0 # ASID in bits 11-6 - or t2,t1 - mtc0 t2,CP0_ENTRYHI -/* FIXME: - shouldn't we save ENTRYHI before trashing it ? -*/ - - srl t4,a0,12 # wait again three cycles - sll t4,t4,PTRLOG - mtc0 zero,CP0_ENTRYLO0 - nop - tlbp # now query the TLB - addu t3,t4 # wait another three cycles - ori t3,0xffff - xori t3,0xffff - mfc0 t1,CP0_INDEX - nop - blez t1,1f # No old entry? - nop # delay slot - li t5, KSEG1 - sll t1,4 - addu t5,t1 - mtc0 t5,CP0_ENTRYHI - nop - nop - tlbwi - /* - * But there still might be a entry for the pgd ... - */ -1: mtc0 t3,CP0_ENTRYHI - nop # wait 3 cycles - nop - nop - tlbp # TLB lookup - nop - nop - mfc0 t1,CP0_INDEX # wait 3 cycles - nop - blez t1,1f # No old entry? - nop - li t5, KSEG1 - sll t1,4 - addu t5,t1 - mtc0 t5,CP0_ENTRYHI - nop - nop - tlbwi # gotcha ... - nop - nop - nop - -1: - mtc0 t0,CP0_STATUS - nop - jr ra - nop - - END(tlbflush_page) - - .set noreorder - LEAF(tlbload) - /* - address in a0 - pte in a1 - */ - - mfc0 t1,CP0_STATUS - nop - ori t0,t1,1 - xori t0,1 - mtc0 t0,CP0_STATUS - nop - mfc0 t0,CP0_ENTRYHI - nop - ori a0,0xfff - xori a0,0xfff - andi t2,t0,0xfc0 - or a0,t2 - mtc0 a0,CP0_ENTRYHI - nop - nop - mtc0 a1,CP0_ENTRYLO0 - - la t2, tlb_softIndex - lw t3,(t2) - nop - mtc0 t3, CP0_INDEX - nop - addu t3,(1<<8) - andi t2,t3,(63<<8) - bnez t2, 1f - nop - li t3,(8<<8) -1: - la t2, tlb_softIndex - sw t3,(t2) - - - nop - nop - nop - tlbwi - nop - nop - mtc0 t0,CP0_ENTRYHI - nop - mtc0 t1,CP0_STATUS - - jr ra - nop - - END(tlbload) - - - -/* - * Code necessary to switch tasks on an Linux/MIPS machine. - * FIXME: We don't need to disable interrupts anymore. - */ - .align 5 - LEAF(resume) - /* - * Current task's task_struct - */ - lui t5,%hi(current) - lw t0,%lo(current)(t5) - - /* - * Save status register - */ - mfc0 t1,CP0_STATUS - addu t0,a1 # Add tss offset - sw t1,TOFF_CP0_STATUS(t0) - -/* - li t2,ST0_CU0 - and t2,t1 - beqz t2,1f - nop - sw sp,TOFF_KSP(t0) -1: -*/ - /* - * Disable interrupts - */ -#ifndef __R3000__ - ori t2,t1,0x1f - xori t2,0x1e -#else - ori t2,t1,1 - xori t2,1 -#endif - mtc0 t2,CP0_STATUS - - /* - * Save non-scratch registers - * All other registers have been saved on the kernel stack - */ - sw s0,TOFF_REG16(t0) - sw s1,TOFF_REG17(t0) - sw s2,TOFF_REG18(t0) - sw s3,TOFF_REG19(t0) - sw s4,TOFF_REG20(t0) - sw s5,TOFF_REG21(t0) - sw s6,TOFF_REG22(t0) - sw s7,TOFF_REG23(t0) - sw gp,TOFF_REG28(t0) - sw sp,TOFF_REG29(t0) - sw fp,TOFF_REG30(t0) - - /* - * Save floating point state - */ - sll t2,t1,2 - bgez t2,2f - sw ra,TOFF_REG31(t0) # delay slot - sll t2,t1,5 - bgez t2,1f - swc1 $f0,(TOFF_FPU+0)(t0) # delay slot - /* - * Store the 16 odd double precision registers - */ - swc1 $f1,(TOFF_FPU+8)(t0) - swc1 $f3,(TOFF_FPU+24)(t0) - swc1 $f5,(TOFF_FPU+40)(t0) - swc1 $f7,(TOFF_FPU+56)(t0) - swc1 $f9,(TOFF_FPU+72)(t0) - swc1 $f11,(TOFF_FPU+88)(t0) - swc1 $f13,(TOFF_FPU+104)(t0) - swc1 $f15,(TOFF_FPU+120)(t0) - swc1 $f17,(TOFF_FPU+136)(t0) - swc1 $f19,(TOFF_FPU+152)(t0) - swc1 $f21,(TOFF_FPU+168)(t0) - swc1 $f23,(TOFF_FPU+184)(t0) - swc1 $f25,(TOFF_FPU+200)(t0) - swc1 $f27,(TOFF_FPU+216)(t0) - swc1 $f29,(TOFF_FPU+232)(t0) - swc1 $f31,(TOFF_FPU+248)(t0) - - /* - * Store the 16 even double precision registers - */ -1: cfc1 t1,fcr31 - swc1 $f2,(TOFF_FPU+16)(t0) - swc1 $f4,(TOFF_FPU+32)(t0) - swc1 $f6,(TOFF_FPU+48)(t0) - swc1 $f8,(TOFF_FPU+64)(t0) - swc1 $f10,(TOFF_FPU+80)(t0) - swc1 $f12,(TOFF_FPU+96)(t0) - swc1 $f14,(TOFF_FPU+112)(t0) - swc1 $f16,(TOFF_FPU+128)(t0) - swc1 $f18,(TOFF_FPU+144)(t0) - swc1 $f20,(TOFF_FPU+160)(t0) - swc1 $f22,(TOFF_FPU+176)(t0) - swc1 $f24,(TOFF_FPU+192)(t0) - swc1 $f26,(TOFF_FPU+208)(t0) - swc1 $f28,(TOFF_FPU+224)(t0) - swc1 $f30,(TOFF_FPU+240)(t0) - sw t1,(TOFF_FPU+256)(t0) - - /* - * Switch current task - */ -2: sw a0,%lo(current)(t5) - addu a0,a1 # Add tss offset - - /* - * Switch address space - */ - - /* - * (Choose new ASID for process) - * This isn't really required, but would speed up - * context switching. - */ - - /* - * Switch the root pointer - */ - lw t0,TOFF_PG_DIR(a0) # get PFN - li t1,TLB_ROOT - mtc0 t1,CP0_ENTRYHI - nop - mtc0 zero,CP0_INDEX - ori t0,MODE_ALIAS # want cachable, dirty, valid - mtc0 t0,CP0_ENTRYLO0 - nop - nop - nop - tlbwi # delay slot - nop - - /* - * Flush tlb - * (probably not needed, doesn't clobber a0-a3) - */ - jal tlbflush - nop - - lw a2,TOFF_CP0_STATUS(a0) - nop - - /* - * Restore fpu state: - * - cp0 status register bits - * - fp gp registers - * - cp1 status/control register - */ - ori t1,a2,1 # pipeline magic - xori t1,1 - mtc0 t1,CP0_STATUS - sll t0,a2,2 - bgez t0,2f - sll t0,a2,5 # delay slot - bgez t0,1f - nop - lwc1 $f0,(TOFF_FPU+0)(a0) # delay slot - /* - * Restore the 16 odd double precision registers only - * when enabled in the cp0 status register. - */ - lwc1 $f1,(TOFF_FPU+8)(a0) - lwc1 $f3,(TOFF_FPU+24)(a0) - lwc1 $f5,(TOFF_FPU+40)(a0) - lwc1 $f7,(TOFF_FPU+56)(a0) - lwc1 $f9,(TOFF_FPU+72)(a0) - lwc1 $f11,(TOFF_FPU+88)(a0) - lwc1 $f13,(TOFF_FPU+104)(a0) - lwc1 $f15,(TOFF_FPU+120)(a0) - lwc1 $f17,(TOFF_FPU+136)(a0) - lwc1 $f19,(TOFF_FPU+152)(a0) - lwc1 $f21,(TOFF_FPU+168)(a0) - lwc1 $f23,(TOFF_FPU+184)(a0) - lwc1 $f25,(TOFF_FPU+200)(a0) - lwc1 $f27,(TOFF_FPU+216)(a0) - lwc1 $f29,(TOFF_FPU+232)(a0) - lwc1 $f31,(TOFF_FPU+248)(a0) - - /* - * Restore the 16 even double precision registers - * when cp1 was enabled in the cp0 status register. - */ -1: lw t0,(TOFF_FPU+256)(a0) - lwc1 $f2,(TOFF_FPU+16)(a0) - lwc1 $f4,(TOFF_FPU+32)(a0) - lwc1 $f6,(TOFF_FPU+48)(a0) - lwc1 $f8,(TOFF_FPU+64)(a0) - lwc1 $f10,(TOFF_FPU+80)(a0) - lwc1 $f12,(TOFF_FPU+96)(a0) - lwc1 $f14,(TOFF_FPU+112)(a0) - lwc1 $f16,(TOFF_FPU+128)(a0) - lwc1 $f18,(TOFF_FPU+144)(a0) - lwc1 $f20,(TOFF_FPU+160)(a0) - lwc1 $f22,(TOFF_FPU+176)(a0) - lwc1 $f24,(TOFF_FPU+192)(a0) - lwc1 $f26,(TOFF_FPU+208)(a0) - lwc1 $f28,(TOFF_FPU+224)(a0) - lwc1 $f30,(TOFF_FPU+240)(a0) - ctc1 t0,fcr31 - - /* - * Restore non-scratch registers - */ -2: lw s0,TOFF_REG16(a0) - lw s1,TOFF_REG17(a0) - lw s2,TOFF_REG18(a0) - lw s3,TOFF_REG19(a0) - lw s4,TOFF_REG20(a0) - lw s5,TOFF_REG21(a0) - lw s6,TOFF_REG22(a0) - lw s7,TOFF_REG23(a0) - lw gp,TOFF_REG28(a0) - lw sp,TOFF_REG29(a0) - lw fp,TOFF_REG30(a0) - lw ra,TOFF_REG31(a0) - - /* - * Restore status register - */ - lw t0,TOFF_KSP(a0) - nop - sw t0,kernelsp - - mtc0 a2,CP0_STATUS # delay slot - jr ra - nop - END(resume) - - /* - * Load a new root pointer into the tlb - */ - .set noreorder - LEAF(load_pgd) - /* - * Switch the root pointer - */ - mfc0 t0,CP0_STATUS - nop - ori t1,t0,1 - xori t1,1 - mtc0 t1,CP0_STATUS - - ori a0,MODE_ALIAS - li t1,TLB_ROOT - mtc0 t1,CP0_ENTRYHI - nop - mtc0 zero,CP0_INDEX - nop - mtc0 a0,CP0_ENTRYLO0 - nop - nop - nop - tlbwi - nop - nop - mtc0 t0,CP0_STATUS - nop - jr ra - nop - END(load_pgd) - -/* - * Some bits in the config register - */ -#define CONFIG_DB (1<<4) -#define CONFIG_IB (1<<5) - -/* - * Flush instruction/data caches - FIXME: Don't know how to do this on R[236]000! - * (Actually most of this flushing stuff isn't needed for the R2000/R3000/R6000 - * since these CPUs have physical indexed caches unlike R4000 and better - * which have virtual indexed caches.) - * - * Parameters: a0 - starting address to flush - * a1 - size of area to be flushed - * a2 - which caches to be flushed - * - * FIXME: - ignores parameters in a0/a1 - * - doesn't know about second level caches - - */ - .set noreorder - LEAF(mips1_cacheflush) - -done: j cache_flush - nop - END(mips1_cacheflush) - -/* - * Invalidate virtual addresses. - FIXME: Don't know how on R[236]000 yet! - * (Flushing is relativly expensive; it isn't required at all if a - * particular machines' chipset keeps the external cache in a state that is - * consistent with memory -- Ralf) - * - * Parameters: a0 - starting address to flush - * a1 - size of area to be flushed - * - * FIXME: - ignores parameters in a0/a1 - * - doesn't know about second level caches - */ - .set noreorder - LEAF(fd_cacheflush) - jr ra - nop - END(fd_cacheflush) - -/* - * do_syscall calls the function in a1 with upto 7 arguments. If over - * four arguments are being requested, the additional arguments will - * be copied from the user stack pointed to by a0->reg29. - * - * a0 (struct pt_regs *) pointer to user registers - * a1 (syscall_t) pointer to syscall to do - * a2 (int) number of arguments to syscall - */ - .set reorder - .text -NESTED(do_syscalls, 32, sp) - subu sp,32 - sw ra,28(sp) - sll a2,a2,PTRLOG - lw t1,dst(a2) - move t2,a1 - lw t0,FR_REG29(a0) # get old user stack pointer - jalr t1 - -7: lw t1,24(t0) # parameter #7 from usp - sw t1,24(sp) -6: lw t1,20(t0) # parameter #6 from usp - sw t1,20(sp) -5: lw t1,16(t0) # parameter #5 from usp - sw t1,16(sp) -4: lw a3,FR_REG7(a0) # 4 args -3: lw a2,FR_REG6(a0) # 3 args -2: lw a1,FR_REG5(a0) # 2 args -1: lw a0,FR_REG4(a0) # delay slot - jalr t2 # 1 args - lw ra,28(sp) - addiu sp,32 - jr ra -0: jalr t2 # 0 args, just pass a0 - lw ra,28(sp) - addiu sp,32 - jr ra - END(do_syscalls) - - .rdata - .align PTRLOG -dst: PTR 0b, 1b, 2b, 3b, 4b, 5b, 6b, 7b - - .section __ex_table,"a" - PTR 7b,bad_stack - PTR 6b,bad_stack - PTR 5b,bad_stack - - .data - - EXPORT(tlbl_lock) - .word 0 - -tlbs_lock: - .word 0 - - EXPORT(tlb_softIndex) - .word 0 diff --git a/arch/mips/mips1/showregs.c b/arch/mips/mips1/showregs.c deleted file mode 100644 index 0be6db80e..000000000 --- a/arch/mips/mips1/showregs.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * linux/arch/mips/mips1/showregs.c - * - * Copyright (C) 1995, 1996 Ralf Baechle, Paul M. Antoine. - */ -#include <linux/kernel.h> -#include <linux/ptrace.h> - -void show_regs(struct pt_regs * regs) -{ - /* - * Saved main processor registers - */ - printk("$0 : %08x %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - 0, regs->regs[1], regs->regs[2], regs->regs[3], - regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); - printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11], - regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]); - printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19], - regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]); - printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx\n", - regs->regs[24], regs->regs[25], regs->regs[28], regs->regs[29], - regs->regs[30], regs->regs[31]); - - /* - * Saved cp0 registers - */ - printk("epc : %08lx\nStatus: %08x\nCause : %08x\nBadVAdddr : %08x\n", - regs->cp0_epc, regs->cp0_status, regs->cp0_cause,regs->cp0_badvaddr); -} diff --git a/arch/mips/mips2/Makefile b/arch/mips/mips2/Makefile deleted file mode 100644 index 1d53d027c..000000000 --- a/arch/mips/mips2/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# -# Makefile for the MIPS II specific parts of the Linux/MIPS kernel. -# -# Note! Dependencies are done automagically by 'make dep', which also -# removes any old dependencies. DON'T put your own dependencies here -# unless it's something special (ie not a .c file). -# - -.S.s: - $(CPP) $(CFLAGS) $< -o $*.s -.S.o: - $(CC) $(CFLAGS) -c $< -o $*.o - -all: mips.o -EXTRA_ASFLAGS = -mips2 -mcpu=r6000 -O_TARGET := mips.o -O_OBJS := cpu.o pagetables.o showregs.o - -clean: - -include $(TOPDIR)/Rules.make diff --git a/arch/mips/mips2/README b/arch/mips/mips2/README deleted file mode 100644 index c64808874..000000000 --- a/arch/mips/mips2/README +++ /dev/null @@ -1,3 +0,0 @@ -This directory contains the R6000 specific part. I (Ralf) tried to support -this CPU as good as possible without a machine and without detailed -documentation. So don't look to close on this part ... diff --git a/arch/mips/mips2/cpu.c b/arch/mips/mips2/cpu.c deleted file mode 100644 index bcc021ee7..000000000 --- a/arch/mips/mips2/cpu.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 Ralf Baechle - */ -#include <linux/sched.h> - -#include <asm/cache.h> -#include <asm/mipsregs.h> -#include <asm/page.h> -#include <asm/processor.h> - -extern asmlinkage void mips2_cacheflush(void *addr, int nbytes, unsigned int flags); - -void (*mips_cache_init)(void); - -static void -mips2_cache_init(void) -{ - cacheflush = mips2_cacheflush; -} - -void (*switch_to_user_mode)(struct pt_regs *regs); - -static void -mips2_switch_to_user_mode(struct pt_regs *regs) -{ - regs->cp0_status = regs->cp0_status | ST0_KUC; -} - -unsigned long (*thread_saved_pc)(struct thread_struct *t); - -/* - * Return saved PC of a blocked thread. - */ -static unsigned long mips2_thread_saved_pc(struct thread_struct *t) -{ - return ((unsigned long *)(unsigned long)t->reg29)[13]; -} - -unsigned long (*get_wchan)(struct task_struct *p); - -static unsigned long mips2_get_wchan(struct task_struct *p) -{ - /* - * This one depends on the frame size of schedule(). Do a - * "disass schedule" in gdb to find the frame size. Also, the - * code assumes that sleep_on() follows immediately after - * interruptible_sleep_on() and that add_timer() follows - * immediately after interruptible_sleep(). Ugly, isn't it? - * Maybe adding a wchan field to task_struct would be better, - * after all... - */ - unsigned long schedule_frame; - unsigned long pc; - - pc = thread_saved_pc(&p->tss); - if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) { - schedule_frame = ((unsigned long *)(long)p->tss.reg30)[13]; - return ((unsigned long *)schedule_frame)[11]; - } - return pc; -} - -void (*pgd_init)(unsigned long page); -void (*copy_page)(unsigned long to, unsigned long from); -asmlinkage void (*restore_fp_context)(struct sigcontext *sc); -asmlinkage void (*save_fp_context)(struct sigcontext *sc); - -void -mips2_cpu_init(void) -{ - extern void mips2_cache_init(void); - extern void mips2_pgd_init(unsigned long page); - extern void mips2_clear_page(unsigned long page); - extern void mips2_copy_page(unsigned long to, unsigned long from); - extern asmlinkage void mips2_restore_fp_context(struct sigcontext *sc); - extern asmlinkage void mips2_save_fp_context(struct sigcontext *sc); - - mips_cache_init = mips2_cache_init; - pgd_init = mips2_pgd_init; - switch_to_user_mode = mips2_switch_to_user_mode; - thread_saved_pc = mips2_thread_saved_pc; - get_wchan = mips2_get_wchan; - clear_page = mips2_clear_page; - copy_page = mips2_copy_page; - restore_fp_context = mips2_restore_fp_context; - save_fp_context = mips2_save_fp_context; -} diff --git a/arch/mips/mips2/fp-context.S b/arch/mips/mips2/fp-context.S deleted file mode 100644 index fc11a7fed..000000000 --- a/arch/mips/mips2/fp-context.S +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Save/restore floating point context for signal handlers. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - */ -#include <asm/asm.h> -#include <asm/fpregdef.h> -#include <asm/mipsregs.h> -#include <asm/regdef.h> -#include <asm/sigcontext.h> - -#define SDC1(r,m) \ -7: sdc1 r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - -#define SW(r,m) \ -7: sw r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - -#define LDC1(r,m) \ -7: ldc1 r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - -#define LW(r,m) \ -7: lw r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - - .set noreorder -/* - * Save floating point context - */ -LEAF(mips2_save_fp_context) - mfc0 t0,CP0_STATUS - sll t0,t0,2 - bgez t0,1f - nop # delay slot - cfc1 t1,fcr31 - /* - * Store the 16 double precision registers - */ - SDC1 ($f0,(SC_FPREGS+0)(a0)) - SDC1 ($f2,(SC_FPREGS+16)(a0)) - SDC1 ($f4,(SC_FPREGS+32)(a0)) - SDC1 ($f6,(SC_FPREGS+48)(a0)) - SDC1 ($f8,(SC_FPREGS+64)(a0)) - SDC1 ($f10,(SC_FPREGS+80)(a0)) - SDC1 ($f12,(SC_FPREGS+96)(a0)) - SDC1 ($f14,(SC_FPREGS+112)(a0)) - SDC1 ($f16,(SC_FPREGS+128)(a0)) - SDC1 ($f18,(SC_FPREGS+144)(a0)) - SDC1 ($f20,(SC_FPREGS+160)(a0)) - SDC1 ($f22,(SC_FPREGS+176)(a0)) - SDC1 ($f24,(SC_FPREGS+192)(a0)) - SDC1 ($f26,(SC_FPREGS+208)(a0)) - SDC1 ($f28,(SC_FPREGS+224)(a0)) - SDC1 ($f30,(SC_FPREGS+240)(a0)) - SW (t0,SC_FPC_CSR(a0)) - cfc1 t0,$0 # implementation/version - jr ra - .set nomacro - SW (t0,SC_FPC_EIR(a0)) # delay slot - .set macro - -1: jr ra - .set nomacro - nop # delay slot - .set macro - END(mips2_save_fp_context) - -/* - * Restore fpu state: - * - fp gp registers - * - cp1 status/control register - * - * We base the decission which registers to restore from the signal stack - * frame on the current content of c0_status, not on the content of the - * stack frame which might have been changed by the user. - */ -LEAF(mips2_restore_fp_context) - mfc0 t0,CP0_STATUS - sll t0,t0,2 - bgez t0,1f - LW t0,SC_FPC_CSR(a0) # delay slot - /* - * Restore the 16 double precision registers - */ - LDC1 ($f0,(SC_FPREGS+0)(a0)) - LDC1 ($f2,(SC_FPREGS+16)(a0)) - LDC1 ($f4,(SC_FPREGS+32)(a0)) - LDC1 ($f6,(SC_FPREGS+48)(a0)) - LDC1 ($f8,(SC_FPREGS+64)(a0)) - LDC1 ($f10,(SC_FPREGS+80)(a0)) - LDC1 ($f12,(SC_FPREGS+96)(a0)) - LDC1 ($f14,(SC_FPREGS+112)(a0)) - LDC1 ($f16,(SC_FPREGS+128)(a0)) - LDC1 ($f18,(SC_FPREGS+144)(a0)) - LDC1 ($f20,(SC_FPREGS+160)(a0)) - LDC1 ($f22,(SC_FPREGS+176)(a0)) - LDC1 ($f24,(SC_FPREGS+192)(a0)) - LDC1 ($f26,(SC_FPREGS+208)(a0)) - LDC1 ($f28,(SC_FPREGS+224)(a0)) - LDC1 ($f30,(SC_FPREGS+240)(a0)) - jr ra - .set nomacro - ctc1 t0,fcr31 # delay slot - .set macro - -1: jr ra - .set nomacro - nop # delay slot - .set macro - END(mips2_restore_fp_context) diff --git a/arch/mips/mips2/pagetables.c b/arch/mips/mips2/pagetables.c deleted file mode 100644 index 2c0061d39..000000000 --- a/arch/mips/mips2/pagetables.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * 32 bit MIPS specific page handling. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 Ralf Baechle - */ -#include <linux/mm.h> -#include <asm/cache.h> -#include <asm/mipsconfig.h> -#include <asm/page.h> -#include <asm/pgtable.h> - -void (*pgd_init)(unsigned long page); - -/* - * Initialize new page directory with pointers to invalid ptes - */ -void mips2_pgd_init(unsigned long page) -{ - unsigned long dummy1, dummy2; - - /* - * This version is optimized for the R6000. We generate dirty lines - * in the datacache, overwrite these lines with zeros and then flush - * the cache. Sounds horribly complicated but is just a trick to - * avoid unnecessary loads of from memory and uncached stores which - * are very expensive. Not tested yet as the R6000 is a rare CPU only - * available in SGI machines and I don't have one. - */ - __asm__ __volatile__( - ".set\tnoreorder\n" - "1:\t" - "cache\t%5,(%0)\n\t" - "sw\t%2,(%0)\n\t" - "sw\t%2,4(%0)\n\t" - "sw\t%2,8(%0)\n\t" - "sw\t%2,12(%0)\n\t" - "cache\t%5,16(%0)\n\t" - "sw\t%2,16(%0)\n\t" - "sw\t%2,20(%0)\n\t" - "sw\t%2,24(%0)\n\t" - "sw\t%2,28(%0)\n\t" - "subu\t%1,1\n\t" - "bnez\t%1,1b\n\t" - "addiu\t%0,32\n\t" - ".set\treorder" - :"=r" (dummy1), - "=r" (dummy2) - :"r" (((unsigned long) invalid_pte_table - PAGE_OFFSET) | - _PAGE_TABLE), - "0" (page), - "1" (PAGE_SIZE/(sizeof(pmd_t)*8)), - "i" (Create_Dirty_Excl_D)); -#endif - /* - * Now force writeback to ashure values are in the RAM. - */ - cacheflush(page, PAGE_SIZE, CF_DCACHE|CF_PHYSICAL); -} - -void (*clear_page)(unsigned long page); - -/* - * To do: cache magic, maybe FPU for 64 accesses when clearing cache pages. - */ -void mips2_clear_page(unsigned long page) -{ - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - "addiu\t$1,%0,%2\n" - "1:\tsw\t$0,(%0)\n\t" - "sw\t$0,4(%0)\n\t" - "sw\t$0,8(%0)\n\t" - "sw\t$0,12(%0)\n\t" - "addiu\t%0,32\n\t" - "sw\t$0,-16(%0)\n\t" - "sw\t$0,-12(%0)\n\t" - "sw\t$0,-8(%0)\n\t" - "bne\t$1,%0,1b\n\t" - "sw\t$0,-4(%0)\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=r" (page) - :"0" (page), - "I" (PAGE_SIZE) - :"$1","memory"); -} - -void (*copy_page)(unsigned long to, unsigned long from); - -void mips2_copy_page(unsigned long to, unsigned long from) -{ - memcpy((void *) to, - (void *) (from + (PT_OFFSET - PAGE_OFFSET)), PAGE_SIZE); -} diff --git a/arch/mips/mips2/showregs.c b/arch/mips/mips2/showregs.c deleted file mode 100644 index d207d231b..000000000 --- a/arch/mips/mips2/showregs.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * linux/arch/mips/mips1/showregs.c - * - * Copyright (C) 1995, 1996 Ralf Baechle, Paul M. Antoine. - */ -#include <linux/kernel.h> -#include <linux/ptrace.h> - -void show_regs(struct pt_regs * regs) -{ - /* - * Saved main processor registers - */ - printk("$0 : %08x %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - 0, regs->regs[1], regs->regs[2], regs->regs[3], - regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); - printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11], - regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]); - printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19], - regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]); - printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx\n", - regs->regs[24], regs->regs[25], regs->regs[28], regs->regs[29], - regs->regs[30], regs->regs[31]); - - /* - * Saved cp0 registers - */ - printk("epc : %08lx\nStatus: %08x\nCause : %08x\n", - regs->cp0_epc, regs->cp0_status, regs->cp0_cause); -} diff --git a/arch/mips/mips3/Makefile b/arch/mips/mips3/Makefile deleted file mode 100644 index e0f6bcf29..000000000 --- a/arch/mips/mips3/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -# -# Makefile for the MIPS III specific parts of the Linux/MIPS kernel. -# -# Note! Dependencies are done automagically by 'make dep', which also -# removes any old dependencies. DON'T put your own dependencies here -# unless it's something special (ie not a .c file). -# - -.S.s: - $(CPP) $(CFLAGS) $< -o $*.s -.S.o: - $(CC) $(CFLAGS) -c $< -o $*.o - -all: mips.o -EXTRA_ASFLAGS = -mips3 -mcpu=r4400 -O_TARGET := mips.o -O_OBJS := cache.o cpu.o fp-context.o memcpy.o memset.o r4xx0.o pagetables.o \ - showregs.o - -copy_user.o: copy_user.S -r4xx0.o: r4xx0.S - -clean: - -include $(TOPDIR)/Rules.make diff --git a/arch/mips/mips3/cache.c b/arch/mips/mips3/cache.c deleted file mode 100644 index 3fac95b75..000000000 --- a/arch/mips/mips3/cache.c +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Cache maintenance for R4000/R4400/R4600 CPUs. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * (C) Copyright 1996 by Ralf Baechle - * FIXME: Support for SC/MC version is missing. - */ -#include <linux/kernel.h> -#include <asm/addrspace.h> -#include <asm/asm.h> -#include <asm/bootinfo.h> -#include <asm/cache.h> -#include <asm/mipsregs.h> -#include <asm/page.h> -#include <asm/system.h> - -#define STR(x) __STR(x) -#define __STR(x) #x - -unsigned long page_colour_mask; - -/* - * Size of the caches - * Line size of the caches - */ -unsigned int dcache_size, icache_size; -unsigned int dcache_line_size, icache_line_size; -unsigned long dcache_line_mask, icache_line_mask; - -/* - * Profiling counter - */ -extern unsigned int dflushes; -extern unsigned int iflushes; - -/* - * Pointers to code for particular CPU sub family. - */ -static void (*wb_inv_d_cache)(void); -static void (*inv_i_cache)(void); - -#define CACHELINES 512 /* number of cachelines (kludgy) */ - -extern inline void cache(unsigned int cacheop, unsigned long addr, - unsigned long offset, void *fault) -{ - __asm__ __volatile__ ( - "1:\tcache\t%0,%2+%1\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b,%3\n\t" - ".text" - : /* no outputs */ - :"ri" (cacheop), - "o" (*(unsigned char *)addr), - "ri" (offset), - "ri" (fault)); -} - -/* - * Code for R4000 style primary caches. - * - * R4000 style caches are direct-mapped, virtual indexed and physical tagged. - * The size of cache line is either 16 or 32 bytes. - * SC/MC versions of the CPUs add support for an second level cache with - * upto 4mb configured as either joint or split I/D. These level two - * caches with direct support from CPU aren't yet supported. - */ - -static void r4000_wb_inv_d_cache(void) -{ - unsigned long addr = KSEG0; - int i; - - for (i=CACHELINES;i;i--) { - cache(Index_Writeback_Inv_D, addr, 0, &&fault); - addr += 32; - } - if (read_32bit_cp0_register(CP0_CONFIG) & CONFIG_DB) - return; - for (i=CACHELINES;i;i--) { - cache(Index_Writeback_Inv_D, addr, 16, &&fault); - addr += 32; - } -fault: -} - -static void r4000_inv_i_cache(void) -{ - unsigned long addr = KSEG0; - int i; - - for (i=CACHELINES;i;i--) { - cache(Index_Invalidate_I, addr, 0, &&fault); - addr += 32; - } - if (read_32bit_cp0_register(CP0_CONFIG) & CONFIG_IB) - return; - for (i=CACHELINES;i;i--) { - cache(Index_Invalidate_I, addr, 16, &&fault); - addr += 32; - } -fault: -} - -/* - * Code for R4600 style primary caches. - * - * R4600 has two way primary caches with 32 bytes line size. The way to - * flush is selected by bith 12 of the physical address given as argument - * to an Index_* cache operation. CPU supported second level caches are - * not available. - * - * R4600 v1.0 bug: Flush way 2, then way 1 of the instruction cache when - * using Index_Invalidate_I. IDT says this should work but is untested. - * If this should not work, we have to disable interrupts for the broken - * chips. The CPU might otherwise execute code from the wrong cache way - * during an interrupt. - */ -static void r4600_wb_inv_d_cache(void) -{ - unsigned long addr = KSEG0; - int i; - - for (i=CACHELINES;i;i-=2) { - cache(Index_Writeback_Inv_D, addr, 8192, &&fault); - cache(Index_Writeback_Inv_D, addr, 0, &&fault); - addr += 32; - } -fault: -} - -static void r4600_inv_i_cache(void) -{ - unsigned long addr = KSEG0; - int i; - - for (i=CACHELINES;i;i-=2) { - cache(Index_Invalidate_I, addr, 8192, &&fault); - cache(Index_Invalidate_I, addr, 0, &&fault); - addr += 32; - } -fault: -} - -/* - * Flush the cache of R4x00. - * - * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Invalidate_D, - * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only - * operate correctly if the internal data cache refill buffer is empty. These - * CACHE instructions should be separated from any potential data cache miss - * by a load instruction to an uncached address to empty the response buffer." - * (Revision 2.0 device errata from IDT available on http://www.idt.com/ - * in .pdf format.) - * - * To do: Use Hit_Invalidate where possible to be more economic. - * Handle SC & MC versions. - * The decission to nuke the entire cache might be based on a better - * decission algorithem based on the real costs. - * Handle different cache sizes. - * Combine the R4000 and R4600 cases. - */ -extern inline void -flush_d_cache(unsigned long addr, unsigned long size) -{ - unsigned long end; - unsigned long a; - - dflushes++; - if (1 || size >= dcache_size) { - wb_inv_d_cache(); - return; - } - - /* - * Workaround for R4600 bug. Explanation see above. - */ - *(volatile unsigned long *)KSEG1; - - /* - * Ok, we only have to invalidate parts of the cache. - */ - a = addr & dcache_line_mask; - end = (addr + size) & dcache_line_mask; - while (1) { - cache(Hit_Writeback_Inv_D, a, 0, &&fault); - if (a == end) break; - a += dcache_line_size; - } -fault: - return; -} - -extern inline void -flush_i_cache(unsigned long addr, unsigned long size) -{ - unsigned long end; - unsigned long a; - - iflushes++; - if (1 || size >= icache_size) { - inv_i_cache(); - return; - } - - /* - * Ok, we only have to invalidate parts of the cache. - */ - a = addr & icache_line_mask; - end = (addr + size) & dcache_line_mask; - while (1) { - cache(Hit_Invalidate_I, a, 0, &&fault); - if (a == end) break; - a += icache_line_size; - } -fault: - return; -} - -asmlinkage void -mips3_cacheflush(unsigned long addr, unsigned long size, unsigned int flags) -{ - if (!(flags & CF_ALL)) - printk("mips3_cacheflush called without cachetype parameter\n"); - if (!(flags & CF_VIRTUAL)) - return; /* Nothing to do */ - if (flags & CF_DCACHE) - flush_d_cache(addr, size); - if (flags & CF_ICACHE) - flush_i_cache(addr, size); -} - -/* Going away. */ -asmlinkage void fd_cacheflush(unsigned long addr, unsigned long size) -{ - cacheflush(addr, size, CF_DCACHE|CF_VIRTUAL); -} - -void mips3_cache_init(void) -{ - extern asmlinkage void handle_vcei(void); - extern asmlinkage void handle_vced(void); - unsigned int c0_config = read_32bit_cp0_register(CP0_CONFIG); - - switch (mips_cputype) { - case CPU_R4000MC: case CPU_R4400MC: - case CPU_R4000SC: case CPU_R4400SC: - /* - * Handlers not implemented yet. - */ - set_except_vector(14, handle_vcei); - set_except_vector(31, handle_vced); - break; - default: - } - - /* - * Which CPU are we running on? There are different styles - * of primary caches in the MIPS R4xx0 CPUs. - */ - switch (mips_cputype) { - case CPU_R4000MC: case CPU_R4400MC: - case CPU_R4000SC: case CPU_R4400SC: - case CPU_R4000PC: case CPU_R4400PC: - inv_i_cache = r4000_inv_i_cache; - wb_inv_d_cache = r4000_wb_inv_d_cache; - break; - case CPU_R4600: case CPU_R4700: - inv_i_cache = r4600_inv_i_cache; - wb_inv_d_cache = r4600_wb_inv_d_cache; - break; - default: - panic("Don't know about cache type ..."); - } - cacheflush = mips3_cacheflush; - - /* - * Find the size of primary instruction and data caches. - * For most CPUs these sizes are the same. - */ - dcache_size = 1 << (12 + ((c0_config >> 6) & 7)); - icache_size = 1 << (12 + ((c0_config >> 9) & 7)); - page_colour_mask = (dcache_size - 1) & ~(PAGE_SIZE - 1); - - /* - * Cache line sizes - */ - dcache_line_size = (c0_config & CONFIG_DB) ? 32 : 16; - dcache_line_mask = ~(dcache_line_size - 1); - icache_line_size = (c0_config & CONFIG_IB) ? 32 : 16; - icache_line_mask = ~(icache_line_size - 1); - - printk("Primary D-cache size %dkb bytes, %d byte lines.\n", - dcache_size >> 10, dcache_line_size); - printk("Primary I-cache size %dkb bytes, %d byte lines.\n", - icache_size >> 10, icache_line_size); - - /* - * Second level cache. - * FIXME ... - */ - if (!(c0_config & CONFIG_SC)) { - printk("S-cache detected. This type of of cache is not " - "supported yet.\n"); - } -} diff --git a/arch/mips/mips3/cpu.c b/arch/mips/mips3/cpu.c deleted file mode 100644 index cd8a293e5..000000000 --- a/arch/mips/mips3/cpu.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 Ralf Baechle - */ -#include <linux/sched.h> - -#include <asm/bootinfo.h> -#include <asm/cache.h> -#include <asm/mipsregs.h> -#include <asm/page.h> -#include <asm/processor.h> -#include <asm/system.h> - -extern asmlinkage void mips3_cacheflush(unsigned long addr, unsigned long nbytes, unsigned int flags); - -void (*mips_cache_init)(void); -void (*switch_to_user_mode)(struct pt_regs *regs); - -static void -mips3_switch_to_user_mode(struct pt_regs *regs) -{ - regs->cp0_status = (regs->cp0_status & ~(ST0_CU0|ST0_KSU)) | KSU_USER; -} - -unsigned long (*thread_saved_pc)(struct thread_struct *t); - -/* - * Return saved PC of a blocked thread. - */ -static unsigned long mips3_thread_saved_pc(struct thread_struct *t) -{ - return ((unsigned long long *)(unsigned long)t->reg29)[11]; -} - -unsigned long (*get_wchan)(struct task_struct *p); - -static unsigned long mips3_get_wchan(struct task_struct *p) -{ - /* - * This one depends on the frame size of schedule(). Do a - * "disass schedule" in gdb to find the frame size. Also, the - * code assumes that sleep_on() follows immediately after - * interruptible_sleep_on() and that add_timer() follows - * immediately after interruptible_sleep(). Ugly, isn't it? - * Maybe adding a wchan field to task_struct would be better, - * after all... - */ - unsigned long schedule_frame; - unsigned long pc; - - pc = thread_saved_pc(&p->tss); - if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) { - schedule_frame = ((unsigned long long *)(long)p->tss.reg30)[10]; - return (unsigned long)((unsigned long long *)schedule_frame)[9]; - } - return pc; -} - -void (*pgd_init)(unsigned long page); -void (*copy_page)(unsigned long to, unsigned long from); -asmlinkage void (*restore_fp_context)(struct sigcontext *sc); -asmlinkage void (*save_fp_context)(struct sigcontext *sc); - -void -mips3_cpu_init(void) -{ - extern void mips3_cache_init(void); - extern void mips3_pgd_init_32byte_lines(unsigned long page); - extern void mips3_pgd_init_16byte_lines(unsigned long page); - extern void mips3_clear_page_32byte_lines(unsigned long page); - extern void mips3_clear_page_16byte_lines(unsigned long page); - extern void mips3_copy_page_32byte_lines(unsigned long to, unsigned long from); - extern void mips3_copy_page_16byte_lines(unsigned long to, unsigned long from); - extern void mips3_copy_page(unsigned long to, unsigned long from); - extern asmlinkage void mips3_restore_fp_context(struct sigcontext *sc); - extern asmlinkage void mips3_save_fp_context(struct sigcontext *sc); - - mips_cache_init = mips3_cache_init; - if (read_32bit_cp0_register(CP0_CONFIG) & CONFIG_DB) { - pgd_init = mips3_pgd_init_32byte_lines; - clear_page = mips3_clear_page_32byte_lines; - copy_page = mips3_copy_page_32byte_lines; - } else { - pgd_init = mips3_pgd_init_16byte_lines; - clear_page = mips3_clear_page_16byte_lines; - copy_page = mips3_copy_page_16byte_lines; - } - switch_to_user_mode = mips3_switch_to_user_mode; - thread_saved_pc = mips3_thread_saved_pc; - get_wchan = mips3_get_wchan; - restore_fp_context = mips3_restore_fp_context; - save_fp_context = mips3_save_fp_context; -} diff --git a/arch/mips/mips3/fp-context.S b/arch/mips/mips3/fp-context.S deleted file mode 100644 index 5e04aa61f..000000000 --- a/arch/mips/mips3/fp-context.S +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Save/restore floating point context for signal handlers. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - */ -#include <asm/asm.h> -#include <asm/fpregdef.h> -#include <asm/mipsregs.h> -#include <asm/regdef.h> -#include <asm/sigcontext.h> - -#define SDC1(r,m) \ -7: sdc1 r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - -#define SW(r,m) \ -7: sw r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - -#define LDC1(r,m) \ -7: ldc1 r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - -#define LW(r,m) \ -7: lw r,m; \ - .section __ex_table,"a"; \ - PTR 7b,bad_stack; \ - .text - - .set noreorder -/* - * Save floating point context - */ -LEAF(mips3_save_fp_context) - mfc0 t1,CP0_STATUS - sll t2,t1,2 - bgez t2,2f - sll t2,t1,5 # delay slot - bgez t2,1f - cfc1 t1,fcr31 # delay slot - /* - * Store the 16 odd double precision registers - */ - SDC1 ($f1,(SC_FPREGS+8)(a0)) - SDC1 ($f3,(SC_FPREGS+24)(a0)) - SDC1 ($f5,(SC_FPREGS+40)(a0)) - SDC1 ($f7,(SC_FPREGS+56)(a0)) - SDC1 ($f9,(SC_FPREGS+72)(a0)) - SDC1 ($f11,(SC_FPREGS+88)(a0)) - SDC1 ($f13,(SC_FPREGS+104)(a0)) - SDC1 ($f15,(SC_FPREGS+120)(a0)) - SDC1 ($f17,(SC_FPREGS+136)(a0)) - SDC1 ($f19,(SC_FPREGS+152)(a0)) - SDC1 ($f21,(SC_FPREGS+168)(a0)) - SDC1 ($f23,(SC_FPREGS+184)(a0)) - SDC1 ($f25,(SC_FPREGS+200)(a0)) - SDC1 ($f27,(SC_FPREGS+216)(a0)) - SDC1 ($f29,(SC_FPREGS+232)(a0)) - SDC1 ($f31,(SC_FPREGS+248)(a0)) - - /* - * Store the 16 even double precision registers - */ -1: SDC1 ($f0,(SC_FPREGS+0)(a0)) - SDC1 ($f2,(SC_FPREGS+16)(a0)) - SDC1 ($f4,(SC_FPREGS+32)(a0)) - SDC1 ($f6,(SC_FPREGS+48)(a0)) - SDC1 ($f8,(SC_FPREGS+64)(a0)) - SDC1 ($f10,(SC_FPREGS+80)(a0)) - SDC1 ($f12,(SC_FPREGS+96)(a0)) - SDC1 ($f14,(SC_FPREGS+112)(a0)) - SDC1 ($f16,(SC_FPREGS+128)(a0)) - SDC1 ($f18,(SC_FPREGS+144)(a0)) - SDC1 ($f20,(SC_FPREGS+160)(a0)) - SDC1 ($f22,(SC_FPREGS+176)(a0)) - SDC1 ($f24,(SC_FPREGS+192)(a0)) - SDC1 ($f26,(SC_FPREGS+208)(a0)) - SDC1 ($f28,(SC_FPREGS+224)(a0)) - SDC1 ($f30,(SC_FPREGS+240)(a0)) - SW t1,SC_FPC_CSR(a0) - cfc1 t0,$0 # implementation/version - jr ra - .set nomacro - SW t1,SC_FPC_EIR(a0) # delay slot - .set macro - -2: jr ra - .set nomacro - nop # delay slot - .set macro - END(mips3_save_fp_context) - -/* - * Restore fpu state: - * - fp gp registers - * - cp1 status/control register - * - * We base the decission which registers to restore from the signal stack - * frame on the current content of c0_status, not on the content of the - * stack frame which might have been changed by the user. - */ -LEAF(mips3_restore_fp_context) - mfc0 t1,CP0_STATUS - sll t0,t1,2 - bgez t0,2f - sll t0,t1,5 # delay slot - bgez t0,1f - LW t0,SC_FPC_CSR(a0) # delay slot - /* - * Restore the 16 odd double precision registers only - * when enabled in the cp0 status register. - */ - LDC1 ($f1,(SC_FPREGS+8)(a0)) - LDC1 ($f3,(SC_FPREGS+24)(a0)) - LDC1 ($f5,(SC_FPREGS+40)(a0)) - LDC1 ($f7,(SC_FPREGS+56)(a0)) - LDC1 ($f9,(SC_FPREGS+72)(a0)) - LDC1 ($f11,(SC_FPREGS+88)(a0)) - LDC1 ($f13,(SC_FPREGS+104)(a0)) - LDC1 ($f15,(SC_FPREGS+120)(a0)) - LDC1 ($f17,(SC_FPREGS+136)(a0)) - LDC1 ($f19,(SC_FPREGS+152)(a0)) - LDC1 ($f21,(SC_FPREGS+168)(a0)) - LDC1 ($f23,(SC_FPREGS+184)(a0)) - LDC1 ($f25,(SC_FPREGS+200)(a0)) - LDC1 ($f27,(SC_FPREGS+216)(a0)) - LDC1 ($f29,(SC_FPREGS+232)(a0)) - LDC1 ($f31,(SC_FPREGS+248)(a0)) - - /* - * Restore the 16 even double precision registers - * when cp1 was enabled in the cp0 status register. - */ -1: LDC1 ($f0,(SC_FPREGS+0)(a0)) - LDC1 ($f2,(SC_FPREGS+16)(a0)) - LDC1 ($f4,(SC_FPREGS+32)(a0)) - LDC1 ($f6,(SC_FPREGS+48)(a0)) - LDC1 ($f8,(SC_FPREGS+64)(a0)) - LDC1 ($f10,(SC_FPREGS+80)(a0)) - LDC1 ($f12,(SC_FPREGS+96)(a0)) - LDC1 ($f14,(SC_FPREGS+112)(a0)) - LDC1 ($f16,(SC_FPREGS+128)(a0)) - LDC1 ($f18,(SC_FPREGS+144)(a0)) - LDC1 ($f20,(SC_FPREGS+160)(a0)) - LDC1 ($f22,(SC_FPREGS+176)(a0)) - LDC1 ($f24,(SC_FPREGS+192)(a0)) - LDC1 ($f26,(SC_FPREGS+208)(a0)) - LDC1 ($f28,(SC_FPREGS+224)(a0)) - LDC1 ($f30,(SC_FPREGS+240)(a0)) - jr ra - .set nomacro - ctc1 t0,fcr31 # delay slot - .set macro - -2: jr ra - .set nomacro - nop # delay slot - .set macro - END(mips3_restore_fp_context) - .set reorder diff --git a/arch/mips/mips3/memcpy.S b/arch/mips/mips3/memcpy.S deleted file mode 100644 index 6f03032a6..000000000 --- a/arch/mips/mips3/memcpy.S +++ /dev/null @@ -1,185 +0,0 @@ -/* - * arch/mips/mips3/memcpy.S - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 1996 by Ralf Baechle - * - * Less stupid memcpy/user_copy implementation for 64 bit MIPS CPUs. - * Much faster than the old memcpy but definately work in progress. - * The list of tricks for a good memcpy is long ... - */ -#include <asm/asm.h> -#include <asm/regdef.h> -#include <asm/mipsregs.h> - -#define BLOCK_SIZE (SZREG*4) - -#define SWAP(x,y) \ - subu x,x,y; \ - addu y,x,y; \ - subu x,y,x - -#define EX(addr,handler) \ - .section __ex_table,"a"; \ - PTR addr, handler; \ - .text -#define UEX(addr,handler) \ - EX(addr,handler); \ - EX(addr+4,handler) - - .set noreorder - .set noat - -/* ---------------------------------------------------------------------- */ - -not_dw_aligned: -/* - * At least one address is missaligned. - * Let's see if we can fix the alignment. - */ - LONG_SUBU v1,zero,a0 - andi v1,SZREG-1 - sltu t0,v0,v1 - MOVN(v1,v0,t0) - beqz v1,still_not_aligned - LONG_ADDU v1,a0 # delay slot -1: lb $1,(a1) - EX(1b, fault) - LONG_ADDIU a1,1 -2: sb $1,(a0) - EX(2b, fault) - LONG_ADDIU a0,1 - bne a0,v1,1b - LONG_SUBU v0,1 # delay slot - -/* - * Ok, now the destination address is 8-byte aligned. - * Is the source also aligned? - */ - andi t0,a1,SZREG-1 - beqz t0,align8 # fine ... - -/* - * Bad. We could only fix the alignment of the destination address. - * Now let's copy in the usual BLOCK_SIZE byte blocks using unaligned - * load and aligned stores. - */ -still_not_aligned: - ori v1,v0,BLOCK_SIZE-1 # delay slot - xori v1,BLOCK_SIZE-1 - beqz v1,copy_left_over - nop # delay slot - LONG_SUBU v0,v1 - LONG_ADDU v1,a0 - -1: uld t0,(a1) - UEX(1b, fault) -2: uld t1,8(a1) - UEX(2b, fault) -2: uld t2,16(a1) - UEX(2b, fault) -2: uld t3,24(a1) - UEX(2b, fault) -2: sd t0,(a0) - EX(2b, fault) -2: sd t1,8(a0) - EX(2b, fault_plus_one_reg) -2: sd t2,16(a0) - EX(2b, fault_plus_two_reg) -2: sd t3,24(a0) - EX(2b, fault_plus_three_reg) - LONG_ADDIU a0,BLOCK_SIZE - bne a0,v1,1b - LONG_ADDIU a1,BLOCK_SIZE # delay slot -9: - b copy_left_over # < BLOCK_SIZE bytes left - nop # delay slot - -/* ---------------------------------------------------------------------- */ - -LEAF(__copy_user) - - or t1,a0,a1 - andi t1,SZREG-1 - bnez t1,not_dw_aligned - move v0,a2 # delay slot - -align8: - ori v1,v0,BLOCK_SIZE-1 - xori v1,BLOCK_SIZE-1 - beqz v1,copy_left_over - nop # delay slot - LONG_SUBU v0,v1 - LONG_ADDU v1,a0 - -1: ld t0,(a1) - EX(1b, fault) -2: ld t1,8(a1) - EX(2b, fault) -2: ld t2,16(a1) - EX(2b, fault) -2: ld t3,24(a1) - EX(2b, fault) -2: sd t0,(a0) - EX(2b, fault) -2: sd t1,8(a0) - EX(2b, fault_plus_one_reg) -2: sd t2,16(a0) - EX(2b, fault_plus_two_reg) -2: sd t3,24(a0) - EX(2b, fault_plus_three_reg) - LONG_ADDIU a0,BLOCK_SIZE - bne a0,v1,1b - LONG_ADDIU a1,BLOCK_SIZE # delay slot -9: - -/* - * We've got upto 31 bytes left to copy ... - */ -copy_left_over: - beqz v0,3f - nop # delay slot -1: lb $1,(a1) - EX(1b, fault) - LONG_ADDIU a1,1 -2: sb $1,(a0) - EX(2b, fault) - LONG_SUBU v0,1 - bnez v0,1b - LONG_ADDIU a0,1 -3: jr ra - nop # delay slot - - END(__copy_user) - .set at - .set reorder - -/* ---------------------------------------------------------------------- */ - -/* - * Access fault. The number of not copied bytes is in v0. We have to - * correct the number of the not copied bytes in v0 in case of a access - * fault in an unrolled loop, then return. - */ - -fault: jr ra -fault_plus_one_reg: LONG_ADDIU v0,SZREG - jr ra -fault_plus_two_reg: LONG_ADDIU v0,SZREG*2 - jr ra -fault_plus_three_reg: LONG_ADDIU v0,SZREG*3 - jr ra - -/* ---------------------------------------------------------------------- */ - -/* - * For now we use __copy_user for __memcpy, too. This is efficient (one - * instruction penatly) and smaller but adds unwanted error checking we don't - * need. This hopefully doesn't cover any bugs. The memcpy() wrapper in - * <asm/string.h> takes care of the return value in a way GCC can optimize. - */ - .globl __memcpy -__memcpy = __copy_user diff --git a/arch/mips/mips3/memset.c b/arch/mips/mips3/memset.c deleted file mode 100644 index e92a0907f..000000000 --- a/arch/mips/mips3/memset.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - * - * Less stupid memset for 64 bit MIPS CPUs. - */ -#include <linux/linkage.h> -#include <linux/string.h> - -static void __inline__ b_memset(void *s, unsigned long long c, size_t count) -{ - unsigned char *p = s; - - while(count--) - *(p++) = c; -} - -static void __inline__ dw_memset(void *s, unsigned long long c, size_t count) -{ - unsigned long long *p = s; - - count >>= 3; - while(count--) - *(p++) = c; -} - -asm( ".globl\t__generic_memset_b\n\t" - ".align\t2\n\t" - ".type\t__generic_memset_b,@function\n\t" - ".ent\t__generic_memset_b,0\n\t" - ".frame\t$29,0,$31\n" - "__generic_memset_b:\n\t" - "andi\t$5,0xff\n\t" - "dsll\t$2,$5,8\n\t" - "or\t$5,$2\n\t" - "dsll\t$2,$5,16\n\t" - "or\t$5,$2\n\t" - "dsll32\t$2,$5,0\n\t" - "or\t$5,$2\n\t" - ".end\t__generic_memset_b\n\t" - ".size\t__generic_memset_b,.-t__generic_memset_b"); - -/* - * Fill small area bytewise. For big areas fill the source bytewise - * until the pointer is doubleword aligned, then fill in doublewords. - * Fill the rest again in single bytes. - */ -void __generic_memset_dw(void *s, unsigned long long c, size_t count) -{ - unsigned long i; - - /* - * Fill small areas bytewise. - */ - if (count <= 16) { - b_memset(s, c, count); - return; - } - - /* - * Pad for 8 byte boundary - */ - i = 8 - ((unsigned long)s & 7); - b_memset(s, c, i); - s += i; - count -= i; - - /* - * Now start filling with aligned doublewords - */ - dw_memset(s, c, count); - s += (count | 7) ^ 7; - count &= 7; - - /* - * And do what ever is left over again with single bytes. - */ - b_memset(s, c, count); -} diff --git a/arch/mips/mips3/pagetables.c b/arch/mips/mips3/pagetables.c deleted file mode 100644 index e8cb83d03..000000000 --- a/arch/mips/mips3/pagetables.c +++ /dev/null @@ -1,297 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 Ralf Baechle - * - * Functions that manipulate entire pages. - * - * Not nice to have all these functions in two versions for cpus with - * different cache line size but it seems to be by far the fastest thing - * to schedule the cache instructions immediately before the store - * instructions. For example using the clear_page() version for 16 byte - * lines on machine with 32 byte lines gives a measured penalty of - * ~1280 cycles per page. - */ -#include <linux/mm.h> -#include <asm/cache.h> -#include <asm/mipsconfig.h> -#include <asm/page.h> -#include <asm/pgtable.h> - -extern unsigned int dflushes; - -/* - * Initialize new page directory with pointers to invalid ptes. - */ -void mips3_pgd_init_32byte_lines(unsigned long page) -{ - unsigned long dummy1, dummy2; - - /* - * We generate dirty lines in the datacache, overwrite them - * then writeback the cache. - */ - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - ".set\tmips3\n\t" - "dsll32\t$1,%2,0\n\t" - "dsrl32\t%2,$1,0\n\t" - "or\t%2,$1\n" - "1:\tcache\t%5,(%0)\n\t" - "sd\t%2,(%0)\n\t" - "sd\t%2,8(%0)\n\t" - "sd\t%2,16(%0)\n\t" - "sd\t%2,24(%0)\n\t" - "daddiu\t%0,64\n\t" - "cache\t%5,-32(%0)\n\t" - "sd\t%2,-32(%0)\n\t" - "sd\t%2,-24(%0)\n\t" - "sd\t%2,-16(%0)\n\t" - "bne\t%0,%1,1b\n\t" - "sd\t%2,-8(%0)\n\t" - ".set\tmips0\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=&r" (dummy1), - "=&r" (dummy2) - :"r" (((unsigned long) invalid_pte_table - PAGE_OFFSET) | - _PAGE_TABLE), - "0" (page), - "1" (page + PAGE_SIZE - 64), - "i" (Create_Dirty_Excl_D) - :"$1"); - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - ".set\tmips3\n\t" - "1:\tcache\t%3,(%0)\n\t" - "bne\t%0,%1,1b\n\t" - "daddiu\t%0,32\n\t" - ".set\tmips0\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=r" (dummy1) - :"0" (page), - "r" (page + PAGE_SIZE - 32), - "i" (Hit_Writeback_D)); - dflushes++; - -#if 0 - cacheflush(page, PAGE_SIZE, CF_DCACHE|CF_VIRTUAL); -#endif -} - -/* - * Initialize new page directory with pointers to invalid ptes - */ -void mips3_pgd_init_16byte_lines(unsigned long page) -{ - unsigned long dummy1, dummy2; - - /* - * We generate dirty lines in the datacache, overwrite them - * then writeback the cache. - */ - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - ".set\tmips3\n\t" - "dsll32\t$1,%2,0\n\t" - "dsrl32\t%2,$1,0\n\t" - "or\t%2,$1\n" - "1:\tcache\t%5,(%0)\n\t" - "sd\t%2,(%0)\n\t" - "sd\t%2,8(%0)\n\t" - "cache\t%5,16(%0)\n\t" - "sd\t%2,16(%0)\n\t" - "sd\t%2,24(%0)\n\t" - "daddiu\t%0,64\n\t" - "cache\t%5,-32(%0)\n\t" - "sd\t%2,-32(%0)\n\t" - "sd\t%2,-24(%0)\n\t" - "cache\t%5,-16(%0)\n\t" - "sd\t%2,-16(%0)\n\t" - "bne\t%0,%1,1b\n\t" - "sd\t%2,-8(%0)\n\t" - ".set\tmips0\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=&r" (dummy1), - "=&r" (dummy2) - :"r" (((unsigned long) invalid_pte_table - PAGE_OFFSET) | - _PAGE_TABLE), - "0" (page), - "1" (page + PAGE_SIZE - 64), - "i" (Create_Dirty_Excl_D) - :"$1"); - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - ".set\tmips3\n\t" - "1:\tcache\t%3,(%0)\n\t" - "bne\t%0,%1,1b\n\t" - "daddiu\t%0,16\n\t" - ".set\tmips0\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=r" (dummy1) - :"0" (page), - "r" (page + PAGE_SIZE - 16), - "i" (Hit_Writeback_D)); - dflushes++; - -#if 0 - cacheflush(page, PAGE_SIZE, CF_DCACHE|CF_VIRTUAL); -#endif -} - -/* - * Zero an entire page. - */ - -void (*clear_page)(unsigned long page); - -void mips3_clear_page_32byte_lines(unsigned long page) -{ - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - "daddiu\t$1,%0,%2\n" - "1:\tcache\t%3,(%0)\n\t" - "sd\t$0,(%0)\n\t" - "sd\t$0,8(%0)\n\t" - "sd\t$0,16(%0)\n\t" - "sd\t$0,24(%0)\n\t" - "daddiu\t%0,64\n\t" - "cache\t%3,-32(%0)\n\t" - "sd\t$0,-32(%0)\n\t" - "sd\t$0,-24(%0)\n\t" - "sd\t$0,-16(%0)\n\t" - "bne\t$1,%0,1b\n\t" - "sd\t$0,-8(%0)\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=r" (page) - :"0" (page), - "I" (PAGE_SIZE), - "i" (Create_Dirty_Excl_D) - :"$1","memory"); -} - -void mips3_clear_page_16byte_lines(unsigned long page) -{ - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - "daddiu\t$1,%0,%2\n" - "1:\tcache\t%3,(%0)\n\t" - "sd\t$0,(%0)\n\t" - "sd\t$0,8(%0)\n\t" - "cache\t%3,16(%0)\n\t" - "sd\t$0,16(%0)\n\t" - "sd\t$0,24(%0)\n\t" - "daddiu\t%0,64\n\t" - "cache\t%3,-32(%0)\n\t" - "sd\t$0,-32(%0)\n\t" - "sd\t$0,-24(%0)\n\t" - "cache\t%3,-16(%0)\n\t" - "sd\t$0,-16(%0)\n\t" - "bne\t$1,%0,1b\n\t" - "sd\t$0,-8(%0)\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=r" (page) - :"0" (page), - "I" (PAGE_SIZE), - "i" (Create_Dirty_Excl_D) - :"$1","memory"); -} - -/* - * This is still inefficient. We only can do better if we know the - * virtual address where the copy will be accessed. - */ -void (*copy_page)(unsigned long to, unsigned long from); - -void mips3_copy_page_32byte_lines(unsigned long to, unsigned long from) -{ - unsigned long dummy1, dummy2; - unsigned long reg1, reg2, reg3, reg4; - - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - "daddiu\t$1,%0,%8\n" - "1:\tcache\t%9,(%0)\n\t" - "ld\t%2,(%1)\n\t" - "ld\t%3,8(%1)\n\t" - "ld\t%4,16(%1)\n\t" - "ld\t%5,24(%1)\n\t" - "sd\t%2,(%0)\n\t" - "sd\t%3,8(%0)\n\t" - "sd\t%4,16(%0)\n\t" - "sd\t%5,24(%0)\n\t" - "cache\t%9,32(%0)\n\t" - "daddiu\t%0,64\n\t" - "daddiu\t%1,64\n\t" - "ld\t%2,-32(%1)\n\t" - "ld\t%3,-24(%1)\n\t" - "ld\t%4,-16(%1)\n\t" - "ld\t%5,-8(%1)\n\t" - "sd\t%2,-32(%0)\n\t" - "sd\t%3,-24(%0)\n\t" - "sd\t%4,-16(%0)\n\t" - "bne\t$1,%0,1b\n\t" - "sd\t%5,-8(%0)\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=r" (dummy1), "=r" (dummy2), - "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4) - :"0" (to), "1" (from), - "I" (PAGE_SIZE), - "i" (Create_Dirty_Excl_D)); -} - -void mips3_copy_page_16byte_lines(unsigned long to, unsigned long from) -{ - unsigned dummy1, dummy2; - unsigned long reg1, reg2; - - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - "daddiu\t$1,%0,%6\n" - "1:\tcache\t%7,(%0)\n\t" - "ld\t%2,(%1)\n\t" - "ld\t%3,8(%1)\n\t" - "sd\t%2,(%0)\n\t" - "sd\t%3,8(%0)\n\t" - "cache\t%7,16(%0)\n\t" - "ld\t%2,16(%1)\n\t" - "ld\t%3,24(%1)\n\t" - "sd\t%2,16(%0)\n\t" - "sd\t%3,24(%0)\n\t" - "cache\t%7,32(%0)\n\t" - "daddiu\t%0,64\n\t" - "daddiu\t%1,64\n\t" - "ld\t%2,-32(%1)\n\t" - "ld\t%3,-24(%1)\n\t" - "sd\t%2,-32(%0)\n\t" - "sd\t%3,-24(%0)\n\t" - "cache\t%7,-16(%0)\n\t" - "ld\t%2,-16(%1)\n\t" - "ld\t%3,-8(%1)\n\t" - "sd\t%2,-16(%0)\n\t" - "bne\t$1,%0,1b\n\t" - "sd\t%3,-8(%0)\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=r" (dummy1), "=r" (dummy2), - "=&r" (reg1), "=&r" (reg2) - :"0" (to), "1" (from), - "I" (PAGE_SIZE), - "i" (Create_Dirty_Excl_D)); -} diff --git a/arch/mips/mips3/r4xx0.S b/arch/mips/mips3/r4xx0.S deleted file mode 100644 index a282998ac..000000000 --- a/arch/mips/mips3/r4xx0.S +++ /dev/null @@ -1,841 +0,0 @@ -/* - * arch/mips/mips3/r4xx0.S - * - * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse - * - * This file contains most of the R4xx0 specific routines. Due to the - * similarities this should hopefully also be fine for the R10000. For - * now we especially support the R10000 by not invalidating entries out of - * the TLB before calling the C handlers. - * - * This code is evil magic. Read appendix F (coprocessor 0 hazards) of - * all R4xx0 manuals and think about that MIPS means "Microprocessor without - * Interlocked Pipeline Stages" before you even think about changing this code! - * - * CAVEATS: The R4000/R4400/R4600 manual say that the operation of a memory - * reference associated with a instruction immediately after a tlpb - * instruction is undefined. It seems that the R4600 v2.0 also - * failes to handle the case where a tlbp instruction follows a - * (mapped???) memory reference. In this case c0_index gets - * overwritten by some value which I suppose to be the entry - * mapping the associated instruction's memory reference. - * - * This needs a complete overhaul; it was written for a Linux kernel that - * handled it's page tables the old i386 way ... - */ -#include <linux/config.h> - -#include <asm/asm.h> -#include <asm/bootinfo.h> -#include <asm/cache.h> -#include <asm/fpregdef.h> -#include <asm/mipsconfig.h> -#include <asm/mipsregs.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/regdef.h> -#include <asm/stackframe.h> - -/* - * Use this to activate extra TLB error checking - */ -#define CONF_DEBUG_TLB - -/* - * Use this to activate extra TLB profiling code - * (not implemented yet) - */ -#undef CONF_PROFILE_TLB - -#ifdef __SMP__ -#error "Fix this for SMP" -#else -#define current current_set -#endif - -MODE_ALIAS = 0x0016 # uncachable - - .text - .set noreorder - - .align 5 - NESTED(handle_tlbl, FR_SIZE, sp) - .set noat - /* - * Check whether this is a refill or an invalid exception - * - * NOTE: Some MIPS manuals say that the R4x00 sets the - * BadVAddr only when EXL == 0. This is wrong - BadVAddr - * is being set for all Reload, Invalid and Modified - * exceptions. - */ - mfc0 k0,CP0_BADVADDR - mfc0 k1,CP0_ENTRYHI - ori k0,0x1fff - xori k0,0x1fff - andi k1,0xff - or k0,k1 - mfc0 k1,CP0_ENTRYHI - mtc0 k0,CP0_ENTRYHI - nop # for R4[04]00 pipeline - nop - nop - nop - tlbp - nop - nop # for R4[04]00 pipeline - nop - mfc0 k0,CP0_INDEX - bgez k0,invalid_tlbl # bad addr in c0_badvaddr - mtc0 k1,CP0_ENTRYHI # delay slot - /* - * Damn... The next nop is required on my R4400PC V5.0, but - * I don't know why - at least there is no documented - * reason as for the others :-( - */ - nop - -#ifdef CONF_DEBUG_TLB - /* - * OK, this is a double fault. Let's see whether this is - * due to an invalid entry in the page_table. - */ - dmfc0 k0,CP0_BADVADDR - srl k0,12 - sll k0,2 - lui k1,%HI(TLBMAP) - addu k0,k1 - lw k1,(k0) - andi k1,(_PAGE_PRESENT|_PAGE_ACCESSED) - bnez k1,reload_pgd_entries - nop # delay slot - - .set noat - SAVE_ALL - .set at - PRINT("Double fault caused by invalid entries in pgd:\n") - dmfc0 a1,CP0_BADVADDR - PRINT("Double fault address : %016Lx\n") - dmfc0 a1,CP0_EPC - PRINT("c0_epc : %016Lx\n") - jal show_regs - move a0,sp - jal dump_tlb_nonwired - nop - dmfc0 a0,CP0_BADVADDR - jal dump_list_current - nop - .set noat - STI - .set at - PANIC("Corrupted pagedir") - .set noat - -reload_pgd_entries: -#endif /* CONF_DEBUG_TLB */ - - /* - * Load missing pair of entries from the pgd and return. - */ - dmfc0 k1,CP0_CONTEXT -// mfc0 k1,CP0_CONTEXT - dsra k1,1 - lwu k0,(k1) # Never causes nested exception - lwu k1,4(k1) - dsrl k0,6 # Convert to EntryLo format - dsrl k1,6 # Convert to EntryLo format - dmtc0 k0,CP0_ENTRYLO0 - dmtc0 k1,CP0_ENTRYLO1 - nop # for R4[04]00 pipeline - tlbwr - nop # for R4[04]00 pipeline - nop - nop - /* - * We don't know whether the original access was read or - * write, so return and see what happens... - */ - eret - - /* - * Handle invalid exception - * - * There are two possible causes for an invalid (tlbl) - * exception: - * 1) pages with present bit set but the valid bit clear - * 2) nonexistant pages - * Case one needs fast handling, therefore don't save - * registers yet. - * - * k0 contains c0_index. - */ -invalid_tlbl: -#ifdef CONFIG_TLB_SHUTDOWN - /* - * Remove entry so we don't need to care later - * For sake of the R4000 V2.2 pipeline the tlbwi insn - * has been moved down. Moving it around is juggling with - * explosives... - */ - lui k1,0x0008 - or k0,k1 - dsll k0,13 - dmtc0 k0,CP0_ENTRYHI - dmtc0 zero,CP0_ENTRYLO0 - dmtc0 zero,CP0_ENTRYLO1 -#endif - /* - * Test present bit in entry - */ - dmfc0 k0,CP0_BADVADDR - srl k0,12 - sll k0,2 -#ifdef CONFIG_TLB_SHUTDOWN - tlbwi # do not move! -#endif - lui k1,%HI(TLBMAP) - addu k0,k1 - lw k1,(k0) - andi k1,(_PAGE_PRESENT|_PAGE_READ) - xori k1,(_PAGE_PRESENT|_PAGE_READ) - bnez k1,nopage_tlbl - /* - * Present and read bits are set -> set valid and accessed bits - */ - lw k1,(k0) # delay slot - ori k1,(_PAGE_VALID|_PAGE_ACCESSED) - sw k1,(k0) - eret - - /* - * Page doesn't exist. Lots of work which is less important - * for speed needs to be done, so hand it all over to the - * kernel memory management routines. - */ -nopage_tlbl: SAVE_ALL - dmfc0 a2,CP0_BADVADDR - STI - REG_S sp,FR_ORIG_REG2(sp) - .set at - /* - * a0 (struct pt_regs *) regs - * a1 (unsigned long) 0 for read access - * a2 (unsigned long) faulting virtual address - */ - move a0,sp - jal do_page_fault - li a1,0 # delay slot - j ret_from_sys_call - nop # delay slot - END(handle_tlbl) - - .text - .align 5 - NESTED(handle_tlbs, FR_SIZE, sp) - .set noat - /* - * It is impossible that is a nested reload exception. - * Therefore this must be a invalid exception. - * Two possible cases: - * 1) Page exists but not dirty. - * 2) Page doesn't exist yet. Hand over to the kernel. - * - * Test whether present bit in entry is set - */ - dmfc0 k0,CP0_BADVADDR - srl k0,12 - sll k0,2 - lui k1,%HI(TLBMAP) - addu k0,k1 - lw k1,(k0) - nop - nop - tlbp # find faulting entry - nop - andi k1,(_PAGE_PRESENT|_PAGE_WRITE) - xori k1,(_PAGE_PRESENT|_PAGE_WRITE) - bnez k1,nopage_tlbs - /* - * Present and writable bits set: set accessed and dirty bits. - */ - lw k1,(k0) # delay slot - ori k1,k1,(_PAGE_ACCESSED|_PAGE_MODIFIED| \ - _PAGE_VALID|_PAGE_DIRTY) - sw k1,(k0) - /* - * Now reload the entry into the TLB - */ - ori k0,0x0004 - xori k0,0x0004 - lw k1,4(k0) - lw k0,(k0) - srl k1,6 - srl k0,6 - dmtc0 k1,CP0_ENTRYLO1 - dmtc0 k0,CP0_ENTRYLO0 - nop # for R4[04]00 pipeline - tlbwi - nop # for R4[04]00 pipeline - nop - nop - eret - - /* - * Page doesn't exist. Lots of work which is less important - * for speed needs to be done, so hand it all over to the - * kernel memory management routines. - */ -nopage_tlbs: -nowrite_mod: -#ifdef CONFIG_TLB_SHUTDOWN - /* - * Remove entry so we don't need to care later - */ - mfc0 k0,CP0_INDEX -#ifdef CONF_DEBUG_TLB - bgez k0,2f - nop - /* - * We got a tlbs exception but found no matching entry in - * the tlb. This should never happen. Paranoia makes us - * check it, though. - */ - SAVE_ALL - jal show_regs - move a0,sp - .set at - dmfc0 a1,CP0_BADVADDR - PRINT("c0_badvaddr == %016Lx\n") - mfc0 a1,CP0_INDEX - PRINT("c0_index == %08x\n") - dmfc0 a1,CP0_ENTRYHI - PRINT("c0_entryhi == %016Lx\n") - .set noat - STI - .set at - PANIC("Tlbs or tlbm exception with no matching entry in tlb") -1: j 1b - nop -2: -#endif /* CONF_DEBUG_TLB */ - lui k1,0x0008 - or k0,k1 - dsll k0,13 - dmtc0 k0,CP0_ENTRYHI - dmtc0 zero,CP0_ENTRYLO0 - dmtc0 zero,CP0_ENTRYLO1 - nop # for R4[04]00 pipeline - nop # R4000 V2.2 requires 4 NOPs - nop - nop - tlbwi -#endif /* CONFIG_TLB_SHUTDOWN */ - .set noat - SAVE_ALL - dmfc0 a2,CP0_BADVADDR - STI - REG_S sp,FR_ORIG_REG2(sp) - .set at - /* - * a0 (struct pt_regs *) regs - * a1 (unsigned long) 1 for write access - * a2 (unsigned long) faulting virtual address - */ - move a0,sp - jal do_page_fault - li a1,1 # delay slot - j ret_from_sys_call - nop # delay slot - END(handle_tlbs) - - .align 5 - NESTED(handle_mod, FR_SIZE, sp) - .set noat - /* - * Two possible cases: - * 1) Page is writable but not dirty -> set dirty and return - * 2) Page is not writable -> call C handler - */ - dmfc0 k0,CP0_BADVADDR - srl k0,12 - sll k0,2 - lui k1,%HI(TLBMAP) - addu k0,k1 - lw k1,(k0) - nop - nop - tlbp # find faulting entry - nop - andi k1,_PAGE_WRITE - beqz k1,nowrite_mod - /* - * Present and writable bits set: set accessed and dirty bits. - */ - lw k1,(k0) # delay slot - ori k1,(_PAGE_ACCESSED|_PAGE_DIRTY) - sw k1,(k0) - /* - * Now reload the entry into the tlb - */ - ori k0,0x0004 - xori k0,0x0004 - lw k1,4(k0) - lw k0,(k0) - srl k1,6 - srl k0,6 - dmtc0 k1,CP0_ENTRYLO1 - dmtc0 k0,CP0_ENTRYLO0 - nop # for R4[04]00 pipeline - nop - nop - tlbwi - nop # for R4[04]00 pipeline - nop - nop - eret - END(handle_mod) - .set at - -/* - * Flush the complete TLB - */ - .set noreorder - LEAF(tlbflush) - li t0,PM_4K - mtc0 t0,CP0_PAGEMASK - lw t0,mips_tlb_entries /* mips_tlb_enbtries is set */ - /* by bi_EarlySnarf() */ - dmtc0 zero,CP0_ENTRYLO0 - dmtc0 zero,CP0_ENTRYLO1 - mfc0 t2,CP0_WIRED -1: subu t0,1 - mtc0 t0,CP0_INDEX - lui t1,0x0008 - or t1,t0,t1 - dsll t1,13 - dmtc0 t1,CP0_ENTRYHI - bne t2,t0,1b - tlbwi # delay slot - jr ra - nop # delay slot - END(tlbflush) - -/* - * Flush a single entry from the TLB - * - * Parameters: a0 - unsigned long address - */ - .set noreorder - LEAF(tlbflush_page) - /* - * Step 1: Wipe out old TLB information. Not shure if - * we really need that step; call it paranoia ... - * In order to do that we need to disable interrupts. - */ - li t3,TLBMAP # then wait 3 cycles - ori t1,a0,0xfff # mask off low 12 bits - xori t1,0xfff - mfc0 t2,CP0_ENTRYHI # copy ASID into address - andi t2,0xff - or t2,t1 - mtc0 t2,CP0_ENTRYHI - srl t4,a0,12 # wait again three cycles - sll t4,t4,PTRLOG - dmtc0 zero,CP0_ENTRYLO0 - nop - tlbp # now query the TLB - nop - addu t3,t4 # wait another three cycles - ori t3,0xffff - xori t3,0xffff - mfc0 t1,CP0_INDEX - bltz t1,1f # No old entry? - dmtc0 zero,CP0_ENTRYLO1 - or t3,t1 # wait one cycle - tlbwi - /* - * But there still might be an entry for the pgd ... - */ -1: mtc0 t3,CP0_ENTRYHI - nop # wait 3 cycles - nop - nop - nop - tlbp # TLB lookup - nop - nop - nop - mfc0 t1,CP0_INDEX # wait 3 cycles - bltz t1,1f # No old entry? - nop - tlbwi # gotcha ... -1: jr ra - nop # delay slot - END(tlbflush_page) - -/* - * Code necessary to switch tasks on an Linux/MIPS machine. - * FIXME: We don't need to disable interrupts anymore. - * FIXME: Do some cache magic for faster loading/saving. - */ - .align 5 - LEAF(resume) - /* - * Current task's task_struct - */ - lui t5,%hi(current) - lw t0,%lo(current)(t5) - - /* - * Save status register - */ - mfc0 t1,CP0_STATUS - addu t0,a1 # Add tss offset - sw t1,TOFF_CP0_STATUS(t0) - - /* - * Disable interrupts - */ - ori t2,t1,0x1f - xori t2,0x1e - mtc0 t2,CP0_STATUS - - /* - * Save non-scratch registers - * All other registers have been saved on the kernel stack - */ - sd s0,TOFF_REG16(t0) - sd s1,TOFF_REG17(t0) - sd s2,TOFF_REG18(t0) - sd s3,TOFF_REG19(t0) - sd s4,TOFF_REG20(t0) - sd s5,TOFF_REG21(t0) - sd s6,TOFF_REG22(t0) - sd s7,TOFF_REG23(t0) - sd gp,TOFF_REG28(t0) - sd sp,TOFF_REG29(t0) - sd fp,TOFF_REG30(t0) - - /* - * Save floating point state - */ - sll t2,t1,2 - bgez t2,2f - sd ra,TOFF_REG31(t0) # delay slot - sll t2,t1,5 - bgez t2,1f - sdc1 $f0,(TOFF_FPU+0)(t0) # delay slot - /* - * Store the 16 odd double precision registers - */ - sdc1 $f1,(TOFF_FPU+8)(t0) - sdc1 $f3,(TOFF_FPU+24)(t0) - sdc1 $f5,(TOFF_FPU+40)(t0) - sdc1 $f7,(TOFF_FPU+56)(t0) - sdc1 $f9,(TOFF_FPU+72)(t0) - sdc1 $f11,(TOFF_FPU+88)(t0) - sdc1 $f13,(TOFF_FPU+104)(t0) - sdc1 $f15,(TOFF_FPU+120)(t0) - sdc1 $f17,(TOFF_FPU+136)(t0) - sdc1 $f19,(TOFF_FPU+152)(t0) - sdc1 $f21,(TOFF_FPU+168)(t0) - sdc1 $f23,(TOFF_FPU+184)(t0) - sdc1 $f25,(TOFF_FPU+200)(t0) - sdc1 $f27,(TOFF_FPU+216)(t0) - sdc1 $f29,(TOFF_FPU+232)(t0) - sdc1 $f31,(TOFF_FPU+248)(t0) - - /* - * Store the 16 even double precision registers - */ -1: cfc1 t1,fcr31 - sdc1 $f2,(TOFF_FPU+16)(t0) - sdc1 $f4,(TOFF_FPU+32)(t0) - sdc1 $f6,(TOFF_FPU+48)(t0) - sdc1 $f8,(TOFF_FPU+64)(t0) - sdc1 $f10,(TOFF_FPU+80)(t0) - sdc1 $f12,(TOFF_FPU+96)(t0) - sdc1 $f14,(TOFF_FPU+112)(t0) - sdc1 $f16,(TOFF_FPU+128)(t0) - sdc1 $f18,(TOFF_FPU+144)(t0) - sdc1 $f20,(TOFF_FPU+160)(t0) - sdc1 $f22,(TOFF_FPU+176)(t0) - sdc1 $f24,(TOFF_FPU+192)(t0) - sdc1 $f26,(TOFF_FPU+208)(t0) - sdc1 $f28,(TOFF_FPU+224)(t0) - sdc1 $f30,(TOFF_FPU+240)(t0) - sw t1,(TOFF_FPU+256)(t0) - - /* - * Switch current task - */ -2: sw a0,%lo(current)(t5) - addu a0,a1 # Add tss offset - - /* - * Switch address space - */ - - /* - * (Choose new ASID for process) - * This isn't really required, but would speed up - * context switching. - */ - - /* - * Switch the root pointer - */ - lw t0,TOFF_PG_DIR(a0) - li t1,TLB_ROOT - mtc0 t1,CP0_ENTRYHI - mtc0 zero,CP0_INDEX - srl t0,6 - ori t0,MODE_ALIAS - mtc0 t0,CP0_ENTRYLO0 - mtc0 zero,CP0_ENTRYLO1 - lw a2,TOFF_CP0_STATUS(a0) - - /* - * Flush tlb - * (probably not needed, doesn't clobber a0-a3) - */ - jal tlbflush - tlbwi # delay slot - - /* - * Restore fpu state: - * - cp0 status register bits - * - fp gp registers - * - cp1 status/control register - */ - ori t1,a2,1 # pipeline magic - xori t1,1 - mtc0 t1,CP0_STATUS - sll t0,a2,2 - bgez t0,2f - sll t0,a2,5 # delay slot - bgez t0,1f - ldc1 $f0,(TOFF_FPU+0)(a0) # delay slot - /* - * Restore the 16 odd double precision registers only - * when enabled in the cp0 status register. - */ - ldc1 $f1,(TOFF_FPU+8)(a0) - ldc1 $f3,(TOFF_FPU+24)(a0) - ldc1 $f5,(TOFF_FPU+40)(a0) - ldc1 $f7,(TOFF_FPU+56)(a0) - ldc1 $f9,(TOFF_FPU+72)(a0) - ldc1 $f11,(TOFF_FPU+88)(a0) - ldc1 $f13,(TOFF_FPU+104)(a0) - ldc1 $f15,(TOFF_FPU+120)(a0) - ldc1 $f17,(TOFF_FPU+136)(a0) - ldc1 $f19,(TOFF_FPU+152)(a0) - ldc1 $f21,(TOFF_FPU+168)(a0) - ldc1 $f23,(TOFF_FPU+184)(a0) - ldc1 $f25,(TOFF_FPU+200)(a0) - ldc1 $f27,(TOFF_FPU+216)(a0) - ldc1 $f29,(TOFF_FPU+232)(a0) - ldc1 $f31,(TOFF_FPU+248)(a0) - - /* - * Restore the 16 even double precision registers - * when cp1 was enabled in the cp0 status register. - */ -1: lw t0,(TOFF_FPU+256)(a0) - ldc1 $f2,(TOFF_FPU+16)(a0) - ldc1 $f4,(TOFF_FPU+32)(a0) - ldc1 $f6,(TOFF_FPU+48)(a0) - ldc1 $f8,(TOFF_FPU+64)(a0) - ldc1 $f10,(TOFF_FPU+80)(a0) - ldc1 $f12,(TOFF_FPU+96)(a0) - ldc1 $f14,(TOFF_FPU+112)(a0) - ldc1 $f16,(TOFF_FPU+128)(a0) - ldc1 $f18,(TOFF_FPU+144)(a0) - ldc1 $f20,(TOFF_FPU+160)(a0) - ldc1 $f22,(TOFF_FPU+176)(a0) - ldc1 $f24,(TOFF_FPU+192)(a0) - ldc1 $f26,(TOFF_FPU+208)(a0) - ldc1 $f28,(TOFF_FPU+224)(a0) - ldc1 $f30,(TOFF_FPU+240)(a0) - ctc1 t0,fcr31 - - /* - * Restore non-scratch registers - */ -2: ld s0,TOFF_REG16(a0) - ld s1,TOFF_REG17(a0) - ld s2,TOFF_REG18(a0) - ld s3,TOFF_REG19(a0) - ld s4,TOFF_REG20(a0) - ld s5,TOFF_REG21(a0) - ld s6,TOFF_REG22(a0) - ld s7,TOFF_REG23(a0) - ld gp,TOFF_REG28(a0) - ld sp,TOFF_REG29(a0) - ld fp,TOFF_REG30(a0) - ld ra,TOFF_REG31(a0) - - /* - * Restore status register - */ - lw t0,TOFF_KSP(a0) - sw t0,kernelsp - - jr ra - mtc0 a2,CP0_STATUS # delay slot - END(resume) - -/* - * Load a new root pointer into the tlb - */ - .set noreorder - LEAF(load_pgd) - /* - * Switch the root pointer - */ - mfc0 t0,CP0_STATUS - ori t1,t0,1 - xori t1,1 - mtc0 t1,CP0_STATUS - srl a0,6 - ori a0,MODE_ALIAS - li t1,TLB_ROOT - mtc0 t1,CP0_ENTRYHI - mtc0 zero,CP0_INDEX - mtc0 a0,CP0_ENTRYLO0 - mtc0 zero,CP0_ENTRYLO1 - mtc0 t0,CP0_STATUS - jr ra - tlbwi # delay slot - END(load_pgd) - -/* - * do_syscall calls the function in a1 with upto 7 arguments. If over - * four arguments are being requested, the additional arguments will - * be copied from the user stack pointed to by a0->reg29. - * - * FIXME: This function will fail for syscalls with more than four - * arguments from kernelspace. - * - * a0 (struct pt_regs *) pointer to user registers - * a1 (syscall_t) pointer to syscall to do - * a2 (int) number of arguments to syscall - */ -#if defined (__MIPSEB__) && defined (__mips64) -#define bo 4 -#else -#define bo 0 -#endif - .set reorder - .text -NESTED(do_syscalls, 64, sp) - LONG_SUBU sp,64 - REG_S ra,56(sp) - dsll a2,a2,PTRLOG - lw t1,dst(a2) - move t2,a1 - REG_L t0,FR_REG29(a0) # get old user stack pointer - jalr t1 - -7: lw t1,24+bo(t0) # parameter #7 from usp - REG_S t1,SZREG*6(sp) -6: lw t1,20+bo(t0) # parameter #6 from usp - REG_S t1,SZREG*5(sp) -5: lw t1,16+bo(t0) # parameter #5 from usp - REG_S t1,SZREG*4(sp) - -4: lw a3,FR_REG7+bo(a0) # 4 args -3: lw a2,FR_REG6+bo(a0) # 3 args -2: lw a1,FR_REG5+bo(a0) # 2 args -1: lw a0,FR_REG4+bo(a0) # 1 args - jalr t2 - ld ra,56(sp) - addiu sp,64 - jr ra -0: jalr t2 # 0 args, just pass a0 - ld ra,56(sp) - LONG_ADDIU sp,64 - jr ra - END(do_syscalls) - - .rdata - .align PTRLOG -dst: PTR 0b, 1b, 2b, 3b, 4b, 5b, 6b, 7b - - .section __ex_table,"a" - PTR 7b,bad_stack - PTR 6b,bad_stack - PTR 5b,bad_stack - .text - -#ifdef CONFIG_REVERSE - -/* - * Same as above but for processes running with reverse byte order. - * When exec(2)ing a file the system detects normal/reverse byteorder in - * an executable and - * - sets the appropriate vector for syscalls and other low level - * routines that depend of userspace byteorder. - * - set the reverse byteorder flag in c0_status - * - marks the process as reverse in the process table. - */ -#define REG_SWAP(r,t) \ - .set push; \ - .set noat; \ - sll $1,r,24; \ - andi t,r,0xff00; \ - sll t,$t,8; \ - or $1,t; \ - srl t,r,8; \ - andi t,$t,0xff00; \ - or $1,t; \ - srl r,r,24; \ - or $1,r; \ - .set pop - - .set reorder - .text -NESTED(do_syscalls_rev, 64, sp) - LONG_SUBU sp,64 - REG_S ra,56(sp) - dsll a2,a2,PTRLOG - lw t1,dst_rev(a2) - move t2,a1 - REG_L t0,FR_REG29(a0) # get old user stack pointer - jalr t1 - -7: lw t1,24+bo(t0) # parameter #7 from usp - REG_SWAP(t1,v0) # convert byteorder - REG_S t1,SZREG*6(sp) -6: lw t1,20+bo(t0) # parameter #6 from usp - REG_SWAP(t1,v0) # convert byteorder - REG_S t1,SZREG*5(sp) -5: lw t1,16+bo(t0) # parameter #5 from usp - REG_SWAP(t1,v0) # convert byteorder - REG_S t1,SZREG*4(sp) - -4: lw a3,FR_REG7+bo(a0) # 4 args -3: lw a2,FR_REG6+bo(a0) # 3 args -2: lw a1,FR_REG5+bo(a0) # 2 args -1: lw a0,FR_REG4+bo(a0) # 1 args - jalr t2 - ld ra,56(sp) - addiu sp,64 - jr ra -0: jalr t2 # 0 args, just pass a0 - ld ra,56(sp) - LONG_ADDIU sp,64 - jr ra - END(do_syscalls) - - .rdata - .align PTRLOG -dst_rev: PTR 0b, 1b, 2b, 3b, 4b, 5b, 6b, 7b - - .section __ex_table,"a" - PTR 7b,bad_stack - PTR 6b,bad_stack - PTR 5b,bad_stack - .text - -#endif /* CONFIG_REVERSE */ diff --git a/arch/mips/mips3/showregs.c b/arch/mips/mips3/showregs.c deleted file mode 100644 index 651cb6a24..000000000 --- a/arch/mips/mips3/showregs.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - * linux/arch/mips/mips3/showregs.c - * - * Copyright (C) 1995, 1996 Ralf Baechle - */ -#include <linux/kernel.h> -#include <linux/ptrace.h> - -void show_regs(struct pt_regs * regs) -{ - /* - * Saved main processor registers - */ - printk("$0 : %016Lx %016Lx %016Lx %016Lx\n", - 0ULL, regs->regs[1], regs->regs[2], regs->regs[3]); - printk("$4 : %016Lx %016Lx %016Lx %016Lx\n", - regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); - printk("$8 : %016Lx %016Lx %016Lx %016Lx\n", - regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]); - printk("$12: %016Lx %016Lx %016Lx %016Lx\n", - regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]); - printk("$16: %016Lx %016Lx %016Lx %016Lx\n", - regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]); - printk("$20: %016Lx %016Lx %016Lx %016Lx\n", - regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]); - printk("$24: %016Lx %016Lx\n", - regs->regs[24], regs->regs[25]); - printk("$28: %016Lx %016Lx %016Lx %016Lx\n", - regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]); - - /* - * Saved cp0 registers - */ - printk("epc : %016Lx\nStatus: %08x\nCause : %08x\n", - regs->cp0_epc, regs->cp0_status, regs->cp0_cause); -} diff --git a/arch/mips/mips4/Makefile b/arch/mips/mips4/Makefile deleted file mode 100644 index d5827861b..000000000 --- a/arch/mips/mips4/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# -# Makefile for the MIPS IV specific parts of the Linux/MIPS kernel. -# -# Note! Dependencies are done automagically by 'make dep', which also -# removes any old dependencies. DON'T put your own dependencies here -# unless it's something special (ie not a .c file). -# - -.S.s: - $(CPP) $(CFLAGS) $< -o $*.s -.S.o: - $(CC) $(CFLAGS) -c $< -o $*.o - -all: mips.o -EXTRA_ASFLAGS = -mips4 -mcpu=r8000 -O_TARGET := mips.o -O_OBJS := cpu.o pagetables.o showregs.o - -clean: - -include $(TOPDIR)/Rules.make diff --git a/arch/mips/mips4/README b/arch/mips/mips4/README deleted file mode 100644 index 22e90921b..000000000 --- a/arch/mips/mips4/README +++ /dev/null @@ -1,3 +0,0 @@ -This directory contains the start of the R8000/R10000 specific part. I -tried to support this CPU as good as possible without a machine and -without detailed documentation. diff --git a/arch/mips/mips4/cpu.c b/arch/mips/mips4/cpu.c deleted file mode 100644 index ef5a3f8db..000000000 --- a/arch/mips/mips4/cpu.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 Ralf Baechle - */ -#include <linux/sched.h> - -#include <asm/cache.h> -#include <asm/mipsregs.h> -#include <asm/page.h> -#include <asm/processor.h> - -extern asmlinkage void mips4_cacheflush(void *addr, int nbytes, unsigned int flags); - -unsigned long page_colour_mask; - -void (*mips_cache_init)(void); - -static void -mips4_cache_init(void) -{ - /* - * The R10000 is in most aspects similar to the R4400. It - * should get some special optimizations. - */ - write_32bit_cp0_register(CP0_FRAMEMASK, 0); - set_cp0_status(ST0_XX, ST0_XX); - /* - * Actually this mask stands for only 16k cache. This is - * correct since the R10000 has multiple ways in it's cache. - */ - page_colour_mask = 0x3000; - cacheflush = mips4_cacheflush; - /* - * The R10k might even work for Linux/MIPS - but we're paranoid - * and refuse to run until this is tested on real silicon - */ - panic("CPU too expensive - making holiday in the ANDES!"); -} - -void (*switch_to_user_mode)(struct pt_regs *regs); - -static void -mips4_switch_to_user_mode(struct pt_regs *regs) -{ - regs->cp0_status = (regs->cp0_status & ~(ST0_CU0|ST0_KSU)) | KSU_USER; -} - -unsigned long (*thread_saved_pc)(struct thread_struct *t); - -/* - * Return saved PC of a blocked thread. - * XXX This works only for 64 bit kernels. - */ -static unsigned long mips4_thread_saved_pc(struct thread_struct *t) -{ - return ((unsigned long long *)(unsigned long)t->reg29)[11]; -} - -unsigned long (*get_wchan)(struct task_struct *p); - -static unsigned long mips4_get_wchan(struct task_struct *p) -{ - /* - * This one depends on the frame size of schedule(). Do a - * "disass schedule" in gdb to find the frame size. Also, the - * code assumes that sleep_on() follows immediately after - * interruptible_sleep_on() and that add_timer() follows - * immediately after interruptible_sleep(). Ugly, isn't it? - * Maybe adding a wchan field to task_struct would be better, - * after all... - */ - unsigned long schedule_frame; - unsigned long pc; - - pc = thread_saved_pc(&p->tss); - if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) { - schedule_frame = ((unsigned long long *)(long)p->tss.reg30)[10]; - return (unsigned long)((unsigned long long *)schedule_frame)[9]; - } - return pc; -} - -extern void mips4_clear_page(unsigned long page); -extern void mips4_copy_page(unsigned long to, unsigned long from); -asmlinkage void (*restore_fp_context)(struct sigcontext *sc); -asmlinkage void (*save_fp_context)(struct sigcontext *sc); - -void -mips4_cpu_init(void) -{ - extern void mips4_pgd_init(unsigned long page); - extern asmlinkage void mips4_restore_fp_context(struct sigcontext *sc); - extern asmlinkage void mips4_save_fp_context(struct sigcontext *sc); - - mips_cache_init = mips4_cache_init; - pgd_init = mips1_pgd_init; - switch_to_user_mode = mips4_switch_to_user_mode; - thread_saved_pc = mips4_thread_saved_pc; - get_wchan = mips4_get_wchan; - clear_page = mips4_clear_page; - restore_fp_context = mips4_restore_fp_context; - save_fp_context = mips4_save_fp_context; -} diff --git a/arch/mips/mips4/pagetables.c b/arch/mips/mips4/pagetables.c deleted file mode 100644 index b1b86290a..000000000 --- a/arch/mips/mips4/pagetables.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * 64 bit MIPS specific page handling. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 Ralf Baechle - */ -#include <linux/mm.h> -#include <asm/cache.h> -#include <asm/mipsconfig.h> -#include <asm/page.h> -#include <asm/pgtable.h> - -void (*pgd_init)(unsigned long page); - -/* - * Initialize new page directory with pointers to invalid ptes - */ -void mips4_pgd_init(unsigned long page) -{ - unsigned long dummy1, dummy2; - - /* - * We generate dirty lines in the datacache, overwrite these lines - * with zeros and then flush the cache. Sounds horribly complicated - * but is just a trick to avoid unnecessary loads of from memory - * and uncached stores which are very expensive. - * FIXME: This is the same like the R4000 version. We could do some - * R10000 trickery using caching mode "uncached accelerated". - */ - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - ".set\tmips3\n\t" - "dsll32\t$1,%2,0\n\t" - "dsrl32\t%2,$1,0\n\t" - "or\t%2,$1\n" - "1:\t" - "cache\t%5,(%0)\n\t" - "sd\t%2,(%0)\n\t" - "sd\t%2,8(%0)\n\t" - "cache\t%5,16(%0)\n\t" - "sd\t%2,16(%0)\n\t" - "sd\t%2,24(%0)\n\t" - "cache\t%5,32(%0)\n\t" - "sd\t%2,32(%0)\n\t" - "sd\t%2,40(%0)\n\t" - "cache\t%5,48(%0)\n\t" - "sd\t%2,48(%0)\n\t" - "sd\t%2,56(%0)\n\t" - "subu\t%1,1\n\t" - "bnez\t%1,1b\n\t" - "addiu\t%0,64\n\t" - ".set\tmips0\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=&r" (dummy1), - "=&r" (dummy2) - :"r" (((unsigned long) invalid_pte_table - PAGE_OFFSET) | - _PAGE_TABLE), - "0" (page), - "1" (PAGE_SIZE/(sizeof(pmd_t)*16)), - "i" (Create_Dirty_Excl_D) - :"$1"); - /* - * Now force writeback to ashure values are in the RAM. - */ - cacheflush(page, PAGE_SIZE, CF_DCACHE|CF_VIRTUAL); -} - -void (*clear_page)(unsigned long page); - -/* - * To do: cache magic - */ -void mips4_clear_page(unsigned long page) -{ - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - "daddiu\t$1,%0,%2\n" - "1:\tsd\t$0,(%0)\n\t" - "sd\t$0,8(%0)\n\t" - "sd\t$0,16(%0)\n\t" - "sd\t$0,24(%0)\n\t" - "daddiu\t%0,64\n\t" - "sd\t$0,-32(%0)\n\t" - "sd\t$0,-24(%0)\n\t" - "sd\t$0,-16(%0)\n\t" - "bne\t$1,%0,1b\n\t" - "sd\t$0,-8(%0)\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=r" (page) - :"0" (page), - "I" (PAGE_SIZE) - :"$1","memory"); -} - -void (*copy_page)(unsigned long to, unsigned long from); - -/* - * This is horribly inefficient ... - */ -void mips4_copy_page(unsigned long to, unsigned long from) -{ - /* - * Force writeback of old page to memory. We don't know the - * virtual address, so we have to flush the entire cache ... - */ - cacheflush(0, ~0, CF_DCACHE|CF_VIRTUAL); - sync_mem(); - memcpy((void *) to, - (void *) (from + (PT_OFFSET - PAGE_OFFSET)), PAGE_SIZE); - /* - * Now writeback the page again if colour has changed. - */ - if (page_colour(from) != page_colour(to)) - cacheflush(0, ~0, CF_DCACHE|CF_VIRTUAL); -} diff --git a/arch/mips/mm/.cvsignore b/arch/mips/mm/.cvsignore new file mode 100644 index 000000000..4671378ae --- /dev/null +++ b/arch/mips/mm/.cvsignore @@ -0,0 +1 @@ +.depend diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 6f69a0ee6..1205b2bf3 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -8,6 +8,7 @@ # Note 2! The CFLAGS definition is now in the main makefile... O_TARGET := mm.o -O_OBJS := extable.o init.o fault.o stack.o +O_OBJS := extable.o init.o fault.o r4xx0.o r2300.o r6000.o tfp.o \ + andes.o loadmmu.o include $(TOPDIR)/Rules.make diff --git a/arch/mips/mm/andes.c b/arch/mips/mm/andes.c new file mode 100644 index 000000000..977fa1a30 --- /dev/null +++ b/arch/mips/mm/andes.c @@ -0,0 +1,102 @@ +/* $Id: andes.c,v 1.3 1996/07/29 11:10:06 dm Exp $ + * andes.c: MMU and cache operations for the R10000 (ANDES). + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> + +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/segment.h> +#include <asm/system.h> +#include <asm/sgialib.h> + +extern unsigned long mips_tlb_entries; + +/* Cache operations. XXX Write these dave... */ +static inline void andes_flush_cache_all(void) +{ + /* XXX */ +} + +static void andes_flush_cache_mm(struct mm_struct *mm) +{ + /* XXX */ +} + +static void andes_flush_cache_range(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + /* XXX */ +} + +static void andes_flush_cache_page(struct vm_area_struct *vma, + unsigned long page) +{ + /* XXX */ +} + +static void andes_flush_page_to_ram(unsigned long page) +{ + /* XXX */ +} + +static void andes_flush_cache_sigtramp(unsigned long page) +{ + /* XXX */ +} + +/* TLB operations. XXX Write these dave... */ +static inline void andes_flush_tlb_all(void) +{ + /* XXX */ +} + +static void andes_flush_tlb_mm(struct mm_struct *mm) +{ + /* XXX */ +} + +static void andes_flush_tlb_range(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + /* XXX */ +} + +static void andes_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + /* XXX */ +} + +static void andes_load_pgd(unsigned long pg_dir) +{ +} + +static void andes_pgd_init(unsigned long page) +{ +} + +void ld_mmu_andes(void) +{ + flush_cache_all = andes_flush_cache_all; + flush_cache_mm = andes_flush_cache_mm; + flush_cache_range = andes_flush_cache_range; + flush_cache_page = andes_flush_cache_page; + flush_cache_sigtramp = andes_flush_cache_sigtramp; + flush_page_to_ram = andes_flush_page_to_ram; + + flush_tlb_all = andes_flush_tlb_all; + flush_tlb_mm = andes_flush_tlb_mm; + flush_tlb_range = andes_flush_tlb_range; + flush_tlb_page = andes_flush_tlb_page; + + load_pgd = andes_load_pgd; + pgd_init = andes_pgd_init; + + flush_cache_all(); + flush_tlb_all(); +} diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index f1462eff9..1ad0ff48e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -1,7 +1,7 @@ /* * arch/mips/mm/fault.c * - * Copyright (C) 1995, 1996 by Ralf Baechle + * Copyright (C) 1995, 1996, 1997 by Ralf Baechle */ #include <linux/signal.h> #include <linux/sched.h> @@ -14,13 +14,15 @@ #include <linux/mman.h> #include <linux/mm.h> -#include <asm/cache.h> +#include <asm/pgtable.h> +#include <asm/mmu_context.h> #include <asm/system.h> #include <asm/uaccess.h> -#include <asm/pgtable.h> extern void die_if_kernel(char *, struct pt_regs *, long); +unsigned long asid_cache = ASID_FIRST_VERSION; + /* * Macro for exception fixup code to access integer registers. */ @@ -31,9 +33,8 @@ extern void die_if_kernel(char *, struct pt_regs *, long); * and the problem, and then passes it off to one of the appropriate * routines. */ -asmlinkage void -do_page_fault(struct pt_regs *regs, unsigned long writeaccess, - unsigned long address) +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, + unsigned long address) { struct vm_area_struct * vma; struct task_struct *tsk = current; @@ -41,9 +42,8 @@ do_page_fault(struct pt_regs *regs, unsigned long writeaccess, unsigned long fixup; #if 0 - printk("do_page_fault() #1: %s %08lx (epc == %08lx, ra == %08lx)\n", - writeaccess ? "writeaccess to" : "readaccess from", - address, regs->cp0_epc, regs->regs[31]); + printk("[%s:%d:%08lx:%ld:%08lx]", current->comm, current->pid, + address, writeaccess, regs->cp0_epc); #endif down(&mm->mmap_sem); vma = find_vma(mm, address); @@ -70,7 +70,7 @@ good_area: handle_mm_fault(vma, address, writeaccess); up(&mm->mmap_sem); - return; + return; /* * Something tried to access memory that isn't in our memory map.. @@ -89,6 +89,7 @@ bad_area: regs->cp0_epc = new_epc; return; } + if (user_mode(regs)) { tsk->tss.cp0_badvaddr = address; tsk->tss.error_code = writeaccess; @@ -101,6 +102,9 @@ bad_area: (unsigned long) regs->cp0_epc, (unsigned long) regs->regs[31]); #endif + + current->tss.cp0_badvaddr = address; + current->tss.error_code = writeaccess; force_sig(SIGSEGV, tsk); return; } @@ -109,11 +113,7 @@ bad_area: * terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel paging request at virtual " -#ifdef __mips64 - "address %08lx, epc == %08Lx\n", address, regs->cp0_epc); -#else - "address %08lx, epc == %016lx\n", address, regs->cp0_epc); -#endif + "address %08lx, epc == %08lx\n", address, regs->cp0_epc); die_if_kernel("Oops", regs, writeaccess); do_exit(SIGKILL); } diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 149349102..cfcfcaf00 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -21,23 +21,32 @@ #endif #include <asm/bootinfo.h> -#include <asm/cache.h> +#include <asm/cachectl.h> #include <asm/dma.h> #include <asm/jazzdma.h> #include <asm/vector.h> #include <asm/system.h> -#include <asm/uaccess.h> +#include <asm/segment.h> #include <asm/pgtable.h> -#include <asm/page.h> +#ifdef CONFIG_SGI +#include <asm/sgialib.h> +#endif extern void deskstation_tyne_dma_init(void); extern void show_net_buffers(void); +asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) +{ + /* XXX Just get it working for now... */ + flush_cache_all(); + return 0; +} + /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a * do_exit(), but using this instead means there is less risk - * for a process dying in kernel mode, possibly leaving an inode + * for a process dying in kernel mode, possibly leaving a inode * unused etc.. * * BAD_PAGETABLE is the accompanying page-table: it is initialized @@ -56,7 +65,6 @@ pte_t * __bad_pagetable(void) #endif page = (unsigned long) empty_bad_page_table; - page = page_to_ptp(page); /* * As long as we only save the low 32 bit of the 64 bit wide * R4000 registers on interrupt we cannot use 64 bit memory accesses @@ -104,13 +112,59 @@ pte_t * __bad_pagetable(void) return (pte_t *)page; } +static inline void +__zeropage(unsigned long page) +{ + unsigned long dummy1, dummy2; + +#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) + /* + * Use 64bit code even for Linux/MIPS 32bit on R4000 + */ + __asm__ __volatile__( + ".set\tnoreorder\n" + ".set\tnoat\n\t" + ".set\tmips3\n" + "1:\tsd\t$0,(%0)\n\t" + "subu\t%1,1\n\t" + "bnez\t%1,1b\n\t" + "addiu\t%0,8\n\t" + ".set\tmips0\n\t" + ".set\tat\n" + ".set\treorder" + :"=r" (dummy1), + "=r" (dummy2) + :"0" (page), + "1" (PAGE_SIZE/8)); +#else /* (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) */ + __asm__ __volatile__( + ".set\tnoreorder\n" + "1:\tsw\t$0,(%0)\n\t" + "subu\t%1,1\n\t" + "bnez\t%1,1b\n\t" + "addiu\t%0,4\n\t" + ".set\treorder" + :"=r" (dummy1), + "=r" (dummy2) + :"0" (page), + "1" (PAGE_SIZE/4)); +#endif +} + +static inline void +zeropage(unsigned long page) +{ + flush_page_to_ram(page); + sync_mem(); + __zeropage(page); +} + pte_t __bad_page(void) { extern char empty_bad_page[PAGE_SIZE]; unsigned long page = (unsigned long)empty_bad_page; - clear_page(page_to_ptp(page)); - cacheflush(page, PAGE_SIZE, CF_DCACHE|CF_VIRTUAL); + zeropage(page); return pte_mkdirty(mk_pte(page, PAGE_SHARED)); } @@ -146,8 +200,7 @@ extern unsigned long free_area_init(unsigned long, unsigned long); unsigned long paging_init(unsigned long start_mem, unsigned long end_mem) { - mips_cache_init(); - pgd_init((unsigned long)swapper_pg_dir - (PT_OFFSET - PAGE_OFFSET)); + pgd_init((unsigned long)swapper_pg_dir); return free_area_init(start_mem, end_mem); } @@ -156,7 +209,7 @@ void mem_init(unsigned long start_mem, unsigned long end_mem) int codepages = 0; int datapages = 0; unsigned long tmp; - extern int _etext; + extern int _etext, _ftext; #ifdef CONFIG_MIPS_JAZZ if (mips_machgroup == MACH_GROUP_JAZZ) @@ -177,9 +230,10 @@ void mem_init(unsigned long start_mem, unsigned long end_mem) clear_bit(PG_reserved, &mem_map[tmp].flags); /* - * For rPC44 we've reserved some memory too much. Free the memory - * from PAGE_SIZE to PAGE_OFFSET + 0xa0000 again. We don't free the - * lowest page where the exception handlers will reside. + * For rPC44 and RM200 we've reserved some memory too much. Free + * the memory from PAGE_SIZE to PAGE_OFFSET + 0xa0000 again. We + * don't free the lowest page where the exception handlers will + * reside. */ if (mips_machgroup == MACH_GROUP_ARC && mips_machtype == MACH_DESKSTATION_RPC44) @@ -187,10 +241,15 @@ void mem_init(unsigned long start_mem, unsigned long end_mem) tmp < MAP_NR(PAGE_OFFSET + 0xa000); tmp++) clear_bit(PG_reserved, &mem_map[tmp].flags); + +#ifdef CONFIG_SGI + prom_fixup_mem_map(start_mem, high_memory); +#endif + #ifdef CONFIG_DESKSTATION_TYNE - if (mips_machtype == MACH_DESKSTATION_TYNE) - deskstation_tyne_dma_init(); + deskstation_tyne_dma_init(); #endif + for (tmp = PAGE_OFFSET; tmp < end_mem; tmp += PAGE_SIZE) { /* * This is only for PC-style DMA. The onboard DMA @@ -200,9 +259,11 @@ void mem_init(unsigned long start_mem, unsigned long end_mem) if (tmp >= MAX_DMA_ADDRESS) clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags); if (PageReserved(mem_map+MAP_NR(tmp))) { - if (tmp < (unsigned long) &_etext) + if ((tmp < (unsigned long) &_etext) && + (tmp >= (unsigned long) &_ftext)) codepages++; - else if (tmp < start_mem) + else if ((tmp < start_mem) && + (tmp > (unsigned long) &_etext)) datapages++; continue; } diff --git a/arch/mips/mm/loadmmu.c b/arch/mips/mm/loadmmu.c new file mode 100644 index 000000000..9a0c33328 --- /dev/null +++ b/arch/mips/mm/loadmmu.c @@ -0,0 +1,105 @@ +/* $Id: loadmmu.c,v 1.6 1996/07/29 11:10:07 dm Exp $ + * loadmmu.c: Setup cpu/cache specific function ptrs at boot time. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> + +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/segment.h> +#include <asm/system.h> +#include <asm/bootinfo.h> +#include <asm/sgialib.h> + +/* memory functions */ +void (*clear_page)(unsigned long page); +void (*copy_page)(unsigned long to, unsigned long from); + +/* Cache operations. */ +void (*flush_cache_all)(void); +void (*flush_cache_mm)(struct mm_struct *mm); +void (*flush_cache_range)(struct mm_struct *mm, unsigned long start, + unsigned long end); +void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page); +void (*flush_cache_sigtramp)(unsigned long addr); +void (*flush_page_to_ram)(unsigned long page); + +/* TLB operations. */ +void (*flush_tlb_all)(void); +void (*flush_tlb_mm)(struct mm_struct *mm); +void (*flush_tlb_range)(struct mm_struct *mm, unsigned long start, + unsigned long end); +void (*flush_tlb_page)(struct vm_area_struct *vma, unsigned long page); + +/* Miscellaneous. */ +void (*load_pgd)(unsigned long pg_dir); +void (*pgd_init)(unsigned long page); +void (*update_mmu_cache)(struct vm_area_struct * vma, + unsigned long address, pte_t pte); + +void (*show_regs)(struct pt_regs *); +asmlinkage void (*resume)(void *tsk); + +extern void ld_mmu_r2300(void); +extern void ld_mmu_r4xx0(void); +extern void ld_mmu_r6000(void); +extern void ld_mmu_tfp(void); +extern void ld_mmu_andes(void); + +void loadmmu(void) +{ + switch(mips_cputype) { + case CPU_R2000: + case CPU_R3000: + printk("Loading R[23]00 MMU routines.\n"); + ld_mmu_r2300(); + break; + + case CPU_R4000PC: + case CPU_R4000SC: + case CPU_R4000MC: + case CPU_R4200: + case CPU_R4300: + case CPU_R4400PC: + case CPU_R4400SC: + case CPU_R4400MC: + case CPU_R4600: + case CPU_R4640: + case CPU_R4650: + case CPU_R4700: + case CPU_R5000: + case CPU_R5000A: + printk("Loading R4000 MMU routines.\n"); + ld_mmu_r4xx0(); + break; + + case CPU_R6000: + case CPU_R6000A: + printk("Loading R6000 MMU routines.\n"); + ld_mmu_r6000(); + break; + + case CPU_R8000: + printk("Loading TFP MMU routines.\n"); + ld_mmu_tfp(); + break; + + case CPU_R10000: + printk("Loading R10000 MMU routines.\n"); + ld_mmu_andes(); + break; + + default: + /* XXX We need an generic routine in the MIPS port + * XXX to jabber stuff onto the screen on all machines + * XXX before the console is setup. The ARCS prom + * XXX routines look good for this, but only the SGI + * XXX code has a full library for that at this time. + */ + panic("Yeee, unsupported mmu/cache architecture."); + } +} diff --git a/arch/mips/mm/r2300.c b/arch/mips/mm/r2300.c new file mode 100644 index 000000000..b43d7a2bc --- /dev/null +++ b/arch/mips/mm/r2300.c @@ -0,0 +1,271 @@ +/* $Id: r2300.c,v 1.5 1996/07/29 11:10:07 dm Exp $ + * r2300.c: R2000 and R3000 specific mmu/cache code. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> + +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/segment.h> +#include <asm/system.h> +#include <asm/sgialib.h> + +extern unsigned long mips_tlb_entries; + +/* page functions */ +void r2300_clear_page(unsigned long page) +{ + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n\t" + "addiu\t$1,%0,%2\n" + "1:\tsw\t$0,(%0)\n\t" + "sw\t$0,4(%0)\n\t" + "sw\t$0,8(%0)\n\t" + "sw\t$0,12(%0)\n\t" + "addiu\t%0,32\n\t" + "sw\t$0,-16(%0)\n\t" + "sw\t$0,-12(%0)\n\t" + "sw\t$0,-8(%0)\n\t" + "bne\t$1,%0,1b\n\t" + "sw\t$0,-4(%0)\n\t" + ".set\tat\n\t" + ".set\treorder" + :"=r" (page) + :"0" (page), + "I" (PAGE_SIZE) + :"$1","memory"); +} + +static void r2300_copy_page(unsigned long to, unsigned long from) +{ + unsigned long dummy1, dummy2; + unsigned long reg1, reg2, reg3, reg4; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n\t" + "addiu\t$1,%0,%8\n" + "1:\tlw\t%2,(%1)\n\t" + "lw\t%3,4(%1)\n\t" + "lw\t%4,8(%1)\n\t" + "lw\t%5,12(%1)\n\t" + "sw\t%2,(%0)\n\t" + "sw\t%3,4(%0)\n\t" + "sw\t%4,8(%0)\n\t" + "sw\t%5,12(%0)\n\t" + "lw\t%2,16(%1)\n\t" + "lw\t%3,20(%1)\n\t" + "lw\t%4,24(%1)\n\t" + "lw\t%5,28(%1)\n\t" + "sw\t%2,16(%0)\n\t" + "sw\t%3,20(%0)\n\t" + "sw\t%4,24(%0)\n\t" + "sw\t%5,28(%0)\n\t" + "addiu\t%0,64\n\t" + "addiu\t%1,64\n\t" + "lw\t%2,-32(%1)\n\t" + "lw\t%3,-28(%1)\n\t" + "lw\t%4,-24(%1)\n\t" + "lw\t%5,-20(%1)\n\t" + "sw\t%2,-32(%0)\n\t" + "sw\t%3,-28(%0)\n\t" + "sw\t%4,-24(%0)\n\t" + "sw\t%5,-20(%0)\n\t" + "lw\t%2,-16(%1)\n\t" + "lw\t%3,-12(%1)\n\t" + "lw\t%4,-8(%1)\n\t" + "lw\t%5,-4(%1)\n\t" + "sd\t%2,-16(%0)\n\t" + "sd\t%3,-12(%0)\n\t" + "sd\t%4,-8(%0)\n\t" + "bne\t$1,%0,1b\n\t" + "sd\t%5,-4(%0)\n\t" + ".set\tat\n\t" + ".set\treorder" + :"=r" (dummy1), "=r" (dummy2), + "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4) + :"0" (to), "1" (from), + "I" (PAGE_SIZE)); +} + +/* Cache operations. */ +static inline void r2300_flush_cache_all(void) { } +static void r2300_flush_cache_mm(struct mm_struct *mm) { } +static void r2300_flush_cache_range(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ +} + +static void r2300_flush_cache_page(struct vm_area_struct *vma, + unsigned long page) +{ +} + +static void r2300_flush_page_to_ram(unsigned long page) +{ + /* XXX What we want to do here is perform a displacement + * XXX flush because there are circumstances where you do + * XXX indeed want to remove stale data from the cache. + * XXX (DMA operations for example, where the cache cannot + * XXX "see" this data get changed.) + */ +} + +static void r2300_flush_cache_sigtramp(unsigned long page) +{ +} + +/* TLB operations. */ +static inline void r2300_flush_tlb_all(void) +{ + unsigned long flags; + int entry; + + save_flags(flags); cli(); + write_32bit_cp0_register(CP0_ENTRYLO0, 0); + for(entry = 0; entry < mips_tlb_entries; entry++) { + write_32bit_cp0_register(CP0_INDEX, entry); + write_32bit_cp0_register(CP0_ENTRYHI, ((entry | 0x8) << 12)); + __asm__ __volatile__("tlbwi"); + } + restore_flags(flags); +} + +static void r2300_flush_tlb_mm(struct mm_struct *mm) +{ + if(mm == current->mm) + r2300_flush_tlb_all(); +} + +static void r2300_flush_tlb_range(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + if(mm == current->mm) + r2300_flush_tlb_all(); +} + +static void r2300_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + if(vma->vm_mm == current->mm) + r2300_flush_tlb_all(); +} + +/* Load a new root pointer into the TLB. */ +static void r2300_load_pgd(unsigned long pg_dir) +{ + unsigned long flags; + + save_flags(flags); cli(); + write_32bit_cp0_register(CP0_ENTRYHI, TLB_ROOT); + write_32bit_cp0_register(CP0_INDEX, 0); + write_32bit_cp0_register(CP0_ENTRYLO0, ((pg_dir >> 6) | 0x00e0)); + __asm__ __volatile__("tlbwi"); + restore_flags(flags); +} + +/* + * Initialize new page directory with pointers to invalid ptes + */ +static void r2300_pgd_init(unsigned long page) +{ + unsigned long dummy1, dummy2; + + /* + * The plain and boring version for the R3000. No cache flushing + * stuff is implemented since the R3000 has physical caches. + */ + __asm__ __volatile__( + ".set\tnoreorder\n" + "1:\tsw\t%2,(%0)\n\t" + "sw\t%2,4(%0)\n\t" + "sw\t%2,8(%0)\n\t" + "sw\t%2,12(%0)\n\t" + "sw\t%2,16(%0)\n\t" + "sw\t%2,20(%0)\n\t" + "sw\t%2,24(%0)\n\t" + "sw\t%2,28(%0)\n\t" + "subu\t%1,1\n\t" + "bnez\t%1,1b\n\t" + "addiu\t%0,32\n\t" + ".set\treorder" + :"=r" (dummy1), + "=r" (dummy2) + :"r" ((unsigned long) invalid_pte_table), + "0" (page), + "1" (PAGE_SIZE/(sizeof(pmd_t)*8))); +} + +static void r2300_update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte) +{ + r2300_flush_tlb_page(vma, address); + /* + * FIXME: We should also reload a new entry into the TLB to + * avoid unnecessary exceptions. + */ +} + +static void r2300_show_regs(struct pt_regs * regs) +{ + /* + * Saved main processor registers + */ + printk("$0 : %08x %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + 0, (unsigned long) regs->regs[1], (unsigned long) regs->regs[2], + (unsigned long) regs->regs[3], (unsigned long) regs->regs[4], + (unsigned long) regs->regs[5], (unsigned long) regs->regs[6], + (unsigned long) regs->regs[7]); + printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + (unsigned long) regs->regs[8], (unsigned long) regs->regs[9], + (unsigned long) regs->regs[10], (unsigned long) regs->regs[11], + (unsigned long) regs->regs[12], (unsigned long) regs->regs[13], + (unsigned long) regs->regs[14], (unsigned long) regs->regs[15]); + printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + (unsigned long) regs->regs[16], (unsigned long) regs->regs[17], + (unsigned long) regs->regs[18], (unsigned long) regs->regs[19], + (unsigned long) regs->regs[20], (unsigned long) regs->regs[21], + (unsigned long) regs->regs[22], (unsigned long) regs->regs[23]); + printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx\n", + (unsigned long) regs->regs[24], (unsigned long) regs->regs[25], + (unsigned long) regs->regs[28], (unsigned long) regs->regs[29], + (unsigned long) regs->regs[30], (unsigned long) regs->regs[31]); + + /* + * Saved cp0 registers + */ + printk("epc : %08lx\nStatus: %08x\nCause : %08x\n", + (unsigned long) regs->cp0_epc, (unsigned int) regs->cp0_status, + (unsigned int) regs->cp0_cause); +} + +void ld_mmu_r2300(void) +{ + clear_page = r2300_clear_page; + copy_page = r2300_copy_page; + + flush_cache_all = r2300_flush_cache_all; + flush_cache_mm = r2300_flush_cache_mm; + flush_cache_range = r2300_flush_cache_range; + flush_cache_page = r2300_flush_cache_page; + flush_cache_sigtramp = r2300_flush_cache_sigtramp; + flush_page_to_ram = r2300_flush_page_to_ram; + + flush_tlb_all = r2300_flush_tlb_all; + flush_tlb_mm = r2300_flush_tlb_mm; + flush_tlb_range = r2300_flush_tlb_range; + flush_tlb_page = r2300_flush_tlb_page; + + load_pgd = r2300_load_pgd; + pgd_init = r2300_pgd_init; + update_mmu_cache = r2300_update_mmu_cache; + + show_regs = r2300_show_regs; + + flush_tlb_all(); +} diff --git a/arch/mips/mm/r4xx0.c b/arch/mips/mm/r4xx0.c new file mode 100644 index 000000000..ea983656b --- /dev/null +++ b/arch/mips/mm/r4xx0.c @@ -0,0 +1,2456 @@ +/* $Id: r4xx0.c,v 1.19 1996/08/02 11:11:36 dm Exp $ + * r4xx0.c: R4000 processor variant specific MMU/Cache routines. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <linux/config.h> + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/autoconf.h> + +#include <asm/sgi.h> +#include <asm/sgimc.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/segment.h> +#include <asm/system.h> +#include <asm/bootinfo.h> +#include <asm/sgialib.h> +#include <asm/mmu_context.h> + +/* CP0 hazard avoidance. */ +#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ + "nop; nop; nop; nop; nop; nop;\n\t" \ + ".set reorder\n\t") + +/* Primary cache parameters. */ +static int icache_size, dcache_size; /* Size in bytes */ +static int ic_lsize, dc_lsize; /* LineSize in bytes */ + +/* Secondary cache (if present) parameters. */ +static scache_size, sc_lsize; /* Again, in bytes */ + +#include <asm/cacheops.h> +#include <asm/r4kcache.h> + +#undef DEBUG_CACHE + +/* + * Zero an entire page. + */ + +static void r4k_clear_page_d16(unsigned long page) +{ + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n\t" + ".set\tmips3\n\t" + "daddiu\t$1,%0,%2\n" + "1:\tcache\t%3,(%0)\n\t" + "sd\t$0,(%0)\n\t" + "sd\t$0,8(%0)\n\t" + "cache\t%3,16(%0)\n\t" + "sd\t$0,16(%0)\n\t" + "sd\t$0,24(%0)\n\t" + "daddiu\t%0,64\n\t" + "cache\t%3,-32(%0)\n\t" + "sd\t$0,-32(%0)\n\t" + "sd\t$0,-24(%0)\n\t" + "cache\t%3,-16(%0)\n\t" + "sd\t$0,-16(%0)\n\t" + "bne\t$1,%0,1b\n\t" + "sd\t$0,-8(%0)\n\t" + ".set\tmips0\n\t" + ".set\tat\n\t" + ".set\treorder" + :"=r" (page) + :"0" (page), + "I" (PAGE_SIZE), + "i" (Create_Dirty_Excl_D) + :"$1","memory"); +} + +static void r4k_clear_page_d32(unsigned long page) +{ + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n\t" + ".set\tmips3\n\t" + "daddiu\t$1,%0,%2\n" + "1:\tcache\t%3,(%0)\n\t" + "sd\t$0,(%0)\n\t" + "sd\t$0,8(%0)\n\t" + "sd\t$0,16(%0)\n\t" + "sd\t$0,24(%0)\n\t" + "daddiu\t%0,64\n\t" + "cache\t%3,-32(%0)\n\t" + "sd\t$0,-32(%0)\n\t" + "sd\t$0,-24(%0)\n\t" + "sd\t$0,-16(%0)\n\t" + "bne\t$1,%0,1b\n\t" + "sd\t$0,-8(%0)\n\t" + ".set\tmips0\n\t" + ".set\tat\n\t" + ".set\treorder" + :"=r" (page) + :"0" (page), + "I" (PAGE_SIZE), + "i" (Create_Dirty_Excl_D) + :"$1","memory"); +} + + +/* + * This is still inefficient. We only can do better if we know the + * virtual address where the copy will be accessed. + */ + +static void r4k_copy_page_d16(unsigned long to, unsigned long from) +{ + unsigned long dummy1, dummy2; + unsigned long reg1, reg2, reg3, reg4; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n\t" + ".set\tmips3\n\t" + "daddiu\t$1,%0,%8\n" + "1:\tcache\t%9,(%0)\n\t" + "lw\t%2,(%1)\n\t" + "lw\t%3,4(%1)\n\t" + "lw\t%4,8(%1)\n\t" + "lw\t%5,12(%1)\n\t" + "sw\t%2,(%0)\n\t" + "sw\t%3,4(%0)\n\t" + "sw\t%4,8(%0)\n\t" + "sw\t%5,12(%0)\n\t" + "cache\t%9,16(%0)\n\t" + "lw\t%2,16(%1)\n\t" + "lw\t%3,20(%1)\n\t" + "lw\t%4,24(%1)\n\t" + "lw\t%5,28(%1)\n\t" + "sw\t%2,16(%0)\n\t" + "sw\t%3,20(%0)\n\t" + "sw\t%4,24(%0)\n\t" + "sw\t%5,28(%0)\n\t" + "cache\t%9,32(%0)\n\t" + "daddiu\t%0,64\n\t" + "daddiu\t%1,64\n\t" + "lw\t%2,-32(%1)\n\t" + "lw\t%3,-28(%1)\n\t" + "lw\t%4,-24(%1)\n\t" + "lw\t%5,-20(%1)\n\t" + "sw\t%2,-32(%0)\n\t" + "sw\t%3,-28(%0)\n\t" + "sw\t%4,-24(%0)\n\t" + "sw\t%5,-20(%0)\n\t" + "cache\t%9,-16(%0)\n\t" + "lw\t%2,-16(%1)\n\t" + "lw\t%3,-12(%1)\n\t" + "lw\t%4,-8(%1)\n\t" + "lw\t%5,-4(%1)\n\t" + "sd\t%2,-16(%0)\n\t" + "sd\t%3,-12(%0)\n\t" + "sd\t%4,-8(%0)\n\t" + "bne\t$1,%0,1b\n\t" + "sd\t%5,-4(%0)\n\t" + ".set\tmips0\n\t" + ".set\tat\n\t" + ".set\treorder" + :"=r" (dummy1), "=r" (dummy2), + "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4) + :"0" (to), "1" (from), + "I" (PAGE_SIZE), + "i" (Create_Dirty_Excl_D)); +} + +static void r4k_copy_page_d32(unsigned long to, unsigned long from) +{ + unsigned long dummy1, dummy2; + unsigned long reg1, reg2, reg3, reg4; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n\t" + ".set\tmips3\n\t" + "daddiu\t$1,%0,%8\n" + "1:\tcache\t%9,(%0)\n\t" + "lw\t%2,(%1)\n\t" + "lw\t%3,4(%1)\n\t" + "lw\t%4,8(%1)\n\t" + "lw\t%5,12(%1)\n\t" + "sw\t%2,(%0)\n\t" + "sw\t%3,4(%0)\n\t" + "sw\t%4,8(%0)\n\t" + "sw\t%5,12(%0)\n\t" + "lw\t%2,16(%1)\n\t" + "lw\t%3,20(%1)\n\t" + "lw\t%4,24(%1)\n\t" + "lw\t%5,28(%1)\n\t" + "sw\t%2,16(%0)\n\t" + "sw\t%3,20(%0)\n\t" + "sw\t%4,24(%0)\n\t" + "sw\t%5,28(%0)\n\t" + "cache\t%9,32(%0)\n\t" + "daddiu\t%0,64\n\t" + "daddiu\t%1,64\n\t" + "lw\t%2,-32(%1)\n\t" + "lw\t%3,-28(%1)\n\t" + "lw\t%4,-24(%1)\n\t" + "lw\t%5,-20(%1)\n\t" + "sw\t%2,-32(%0)\n\t" + "sw\t%3,-28(%0)\n\t" + "sw\t%4,-24(%0)\n\t" + "sw\t%5,-20(%0)\n\t" + "lw\t%2,-16(%1)\n\t" + "lw\t%3,-12(%1)\n\t" + "lw\t%4,-8(%1)\n\t" + "lw\t%5,-4(%1)\n\t" + "sd\t%2,-16(%0)\n\t" + "sd\t%3,-12(%0)\n\t" + "sd\t%4,-8(%0)\n\t" + "bne\t$1,%0,1b\n\t" + "sd\t%5,-4(%0)\n\t" + ".set\tmips0\n\t" + ".set\tat\n\t" + ".set\treorder" + :"=r" (dummy1), "=r" (dummy2), + "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4) + :"0" (to), "1" (from), + "I" (PAGE_SIZE), + "i" (Create_Dirty_Excl_D)); +} + +/* + * If you think for one second that this stuff coming up is a lot + * of bulky code eating too many kernel cache lines. Think _again_. + * + * Consider: + * 1) Taken branches have a 3 cycle penalty on R4k + * 2) The branch itself is a real dead cycle on even R4600/R5000. + * 3) Only one of the following variants of each type is even used by + * the kernel based upon the cache parameters we detect at boot time. + * + * QED. + */ + +static inline void r4k_flush_cache_all_s16d16i16(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache16(); blast_icache16(); blast_scache16(); + restore_flags(flags); +} + +static inline void r4k_flush_cache_all_s32d16i16(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache16(); blast_icache16(); blast_scache32(); + restore_flags(flags); +} + +static inline void r4k_flush_cache_all_s64d16i16(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache16(); blast_icache16(); blast_scache64(); + restore_flags(flags); +} + +static inline void r4k_flush_cache_all_s128d16i16(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache16(); blast_icache16(); blast_scache128(); + restore_flags(flags); +} + +static inline void r4k_flush_cache_all_s16d32i32(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache32(); blast_icache32(); blast_scache16(); + restore_flags(flags); +} + +static inline void r4k_flush_cache_all_s32d32i32(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache32(); blast_icache32(); blast_scache32(); + restore_flags(flags); +} + +static inline void r4k_flush_cache_all_s64d32i32(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache32(); blast_icache32(); blast_scache64(); + restore_flags(flags); +} + +static inline void r4k_flush_cache_all_s128d32i32(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache32(); blast_icache32(); blast_scache128(); + restore_flags(flags); +} + +static inline void r4k_flush_cache_all_d16i16(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache16(); blast_icache16(); + restore_flags(flags); +} + +static inline void r4k_flush_cache_all_d32i32(void) +{ + unsigned long flags; + + save_flags(flags); cli(); + blast_dcache32(); blast_icache32(); + restore_flags(flags); +} + +static inline struct vm_area_struct * +find_mm_vma(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct * result = NULL; + + if (mm) { + struct vm_area_struct * tree = mm->mmap_avl; + for (;;) { + if (tree == avl_empty) + break; + if (tree->vm_end > addr) { + result = tree; + if (tree->vm_start <= addr) + break; + tree = tree->vm_avl_left; + } else + tree = tree->vm_avl_right; + } + } + return result; +} + +static void +r4k_flush_cache_range_s16d16i16(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct vm_area_struct *vma; + unsigned long flags; + + if(mm->context == 0) + return; + + start &= PAGE_MASK; +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + vma = find_mm_vma(mm, start); + if(vma) { + if(mm->context != current->mm->context) { + r4k_flush_cache_all_s16d16i16(); + } else { + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int text; + + save_flags(flags); cli(); + text = vma->vm_flags & VM_EXEC; + while(start < end) { + pgd = pgd_offset(mm, start); + pmd = pmd_offset(pgd, start); + pte = pte_offset(pmd, start); + + if(pte_val(*pte) & _PAGE_VALID) { + blast_dcache16_page(start); + if(text) + blast_icache16_page(start); + blast_scache16_page(start); + } + start += PAGE_SIZE; + } + restore_flags(flags); + } + } +} + +static void +r4k_flush_cache_range_s32d16i16(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct vm_area_struct *vma; + unsigned long flags; + + if(mm->context == 0) + return; + + start &= PAGE_MASK; +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + vma = find_mm_vma(mm, start); + if(vma) { + if(mm->context != current->mm->context) { + r4k_flush_cache_all_s32d16i16(); + } else { + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int text; + + save_flags(flags); cli(); + text = vma->vm_flags & VM_EXEC; + while(start < end) { + pgd = pgd_offset(mm, start); + pmd = pmd_offset(pgd, start); + pte = pte_offset(pmd, start); + + if(pte_val(*pte) & _PAGE_VALID) { + blast_dcache16_page(start); + if(text) + blast_icache16_page(start); + blast_scache32_page(start); + } + start += PAGE_SIZE; + } + restore_flags(flags); + } + } +} + +static void r4k_flush_cache_range_s64d16i16(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct vm_area_struct *vma; + unsigned long flags; + + if(mm->context == 0) + return; + + start &= PAGE_MASK; +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + vma = find_mm_vma(mm, start); + if(vma) { + if(mm->context != current->mm->context) { + r4k_flush_cache_all_s64d16i16(); + } else { + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int text; + + save_flags(flags); cli(); + text = vma->vm_flags & VM_EXEC; + while(start < end) { + pgd = pgd_offset(mm, start); + pmd = pmd_offset(pgd, start); + pte = pte_offset(pmd, start); + + if(pte_val(*pte) & _PAGE_VALID) { + blast_dcache16_page(start); + if(text) + blast_icache16_page(start); + blast_scache64_page(start); + } + start += PAGE_SIZE; + } + restore_flags(flags); + } + } +} + +static void r4k_flush_cache_range_s128d16i16(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct vm_area_struct *vma; + unsigned long flags; + + if(mm->context == 0) + return; + + start &= PAGE_MASK; +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + vma = find_mm_vma(mm, start); + if(vma) { + if(mm->context != current->mm->context) { + r4k_flush_cache_all_s128d16i16(); + } else { + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int text; + + save_flags(flags); cli(); + text = vma->vm_flags & VM_EXEC; + while(start < end) { + pgd = pgd_offset(mm, start); + pmd = pmd_offset(pgd, start); + pte = pte_offset(pmd, start); + + if(pte_val(*pte) & _PAGE_VALID) { + blast_dcache16_page(start); + if(text) + blast_icache16_page(start); + blast_scache128_page(start); + } + start += PAGE_SIZE; + } + restore_flags(flags); + } + } +} + +static void r4k_flush_cache_range_s16d32i32(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct vm_area_struct *vma; + unsigned long flags; + + if(mm->context == 0) + return; + + start &= PAGE_MASK; +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + vma = find_mm_vma(mm, start); + if(vma) { + if(mm->context != current->mm->context) { + r4k_flush_cache_all_s16d32i32(); + } else { + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int text; + + save_flags(flags); cli(); + text = vma->vm_flags & VM_EXEC; + while(start < end) { + pgd = pgd_offset(mm, start); + pmd = pmd_offset(pgd, start); + pte = pte_offset(pmd, start); + + if(pte_val(*pte) & _PAGE_VALID) { + blast_dcache32_page(start); + if(text) + blast_icache32_page(start); + blast_scache16_page(start); + } + start += PAGE_SIZE; + } + restore_flags(flags); + } + } +} + +static void r4k_flush_cache_range_s32d32i32(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct vm_area_struct *vma; + unsigned long flags; + + if(mm->context == 0) + return; + + start &= PAGE_MASK; +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + vma = find_mm_vma(mm, start); + if(vma) { + if(mm->context != current->mm->context) { + r4k_flush_cache_all_s32d32i32(); + } else { + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int text; + + save_flags(flags); cli(); + text = vma->vm_flags & VM_EXEC; + while(start < end) { + pgd = pgd_offset(mm, start); + pmd = pmd_offset(pgd, start); + pte = pte_offset(pmd, start); + + if(pte_val(*pte) & _PAGE_VALID) { + blast_dcache32_page(start); + if(text) + blast_icache32_page(start); + blast_scache32_page(start); + } + start += PAGE_SIZE; + } + restore_flags(flags); + } + } +} + +static void r4k_flush_cache_range_s64d32i32(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct vm_area_struct *vma; + unsigned long flags; + + if(mm->context == 0) + return; + + start &= PAGE_MASK; +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + vma = find_mm_vma(mm, start); + if(vma) { + if(mm->context != current->mm->context) { + r4k_flush_cache_all_s64d32i32(); + } else { + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int text; + + save_flags(flags); cli(); + text = vma->vm_flags & VM_EXEC; + while(start < end) { + pgd = pgd_offset(mm, start); + pmd = pmd_offset(pgd, start); + pte = pte_offset(pmd, start); + + if(pte_val(*pte) & _PAGE_VALID) { + blast_dcache32_page(start); + if(text) + blast_icache32_page(start); + blast_scache64_page(start); + } + start += PAGE_SIZE; + } + restore_flags(flags); + } + } +} + +static void r4k_flush_cache_range_s128d32i32(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct vm_area_struct *vma; + unsigned long flags; + + if(mm->context == 0) + return; + + start &= PAGE_MASK; +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + vma = find_mm_vma(mm, start); + if(vma) { + if(mm->context != current->mm->context) { + r4k_flush_cache_all_s128d32i32(); + } else { + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int text; + + save_flags(flags); cli(); + text = vma->vm_flags & VM_EXEC; + while(start < end) { + pgd = pgd_offset(mm, start); + pmd = pmd_offset(pgd, start); + pte = pte_offset(pmd, start); + + if(pte_val(*pte) & _PAGE_VALID) { + blast_dcache32_page(start); + if(text) + blast_icache32_page(start); + blast_scache128_page(start); + } + start += PAGE_SIZE; + } + restore_flags(flags); + } + } +} + +static void r4k_flush_cache_range_d16i16(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + if(mm->context != 0) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + save_flags(flags); cli(); + blast_dcache16(); blast_icache16(); + restore_flags(flags); + } +} + +static void r4k_flush_cache_range_d32i32(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + if(mm->context != 0) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end); +#endif + save_flags(flags); cli(); + blast_dcache32(); blast_icache32(); + restore_flags(flags); + } +} + +/* + * On architectures like the Sparc, we could get rid of lines in + * the cache created only by a certain context, but on the MIPS + * (and actually certain Sparc's) we cannot. + */ +static void r4k_flush_cache_mm_s16d16i16(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_s16d16i16(); + } +} + +static void r4k_flush_cache_mm_s32d16i16(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_s32d16i16(); + } +} + +static void r4k_flush_cache_mm_s64d16i16(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_s64d16i16(); + } +} + +static void r4k_flush_cache_mm_s128d16i16(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_s128d16i16(); + } +} + +static void r4k_flush_cache_mm_s16d32i32(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_s16d32i32(); + } +} + +static void r4k_flush_cache_mm_s32d32i32(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_s32d32i32(); + } +} + +static void r4k_flush_cache_mm_s64d32i32(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_s64d32i32(); + } +} + +static void r4k_flush_cache_mm_s128d32i32(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_s128d32i32(); + } +} + +static void r4k_flush_cache_mm_d16i16(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_d16i16(); + } +} + +static void r4k_flush_cache_mm_d32i32(struct mm_struct *mm) +{ + if(mm->context != 0) { +#ifdef DEBUG_CACHE + printk("cmm[%d]", (int)mm->context); +#endif + r4k_flush_cache_all_d32i32(); + } +} + +static void r4k_flush_cache_page_s16d16i16(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* + * If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_VALID)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if(mm->context != current->mm->context) { + /* Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (scache_size - 1))); + blast_dcache16_page_indexed(page); + if(text) + blast_icache16_page_indexed(page); + blast_scache16_page_indexed(page); + } else { + blast_dcache16_page(page); + if(text) + blast_icache16_page(page); + blast_scache16_page(page); + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_s32d16i16(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_VALID)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if(mm->context != current->mm->context) { + /* Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (scache_size - 1))); + blast_dcache16_page_indexed(page); + if(text) + blast_icache16_page_indexed(page); + blast_scache32_page_indexed(page); + } else { + blast_dcache16_page(page); + if(text) + blast_icache16_page(page); + blast_scache32_page(page); + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_VALID)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* + * Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if(mm->context != current->mm->context) { + /* Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (scache_size - 1))); + blast_dcache16_page_indexed(page); + if(text) + blast_icache16_page_indexed(page); + blast_scache64_page_indexed(page); + } else { + blast_dcache16_page(page); + if(text) + blast_icache16_page(page); + blast_scache64_page(page); + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* + * If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_VALID)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if(mm->context != current->mm->context) { + /* + * Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (scache_size - 1))); + blast_dcache16_page_indexed(page); + if(text) + blast_icache16_page_indexed(page); + blast_scache128_page_indexed(page); + } else { + blast_dcache16_page(page); + if(text) + blast_icache16_page(page); + blast_scache128_page(page); + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_s16d32i32(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_VALID)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* + * Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if(mm->context != current->mm->context) { + /* Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (scache_size - 1))); + blast_dcache32_page_indexed(page); + if(text) + blast_icache32_page_indexed(page); + blast_scache16_page_indexed(page); + } else { + blast_dcache32_page(page); + if(text) + blast_icache32_page(page); + blast_scache16_page(page); + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* + * If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_VALID)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* + * Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if(mm->context != current->mm->context) { + /* + * Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (scache_size - 1))); + blast_dcache32_page_indexed(page); + if(text) + blast_icache32_page_indexed(page); + blast_scache32_page_indexed(page); + } else { + blast_dcache32_page(page); + if(text) + blast_icache32_page(page); + blast_scache32_page(page); + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* + * If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_VALID)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* + * Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if(mm->context != current->mm->context) { + /* + * Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (scache_size - 1))); + blast_dcache32_page_indexed(page); + if(text) + blast_icache32_page_indexed(page); + blast_scache64_page_indexed(page); + } else { + blast_dcache32_page(page); + if(text) + blast_icache32_page(page); + blast_scache64_page(page); + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_VALID)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* + * Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if(mm->context != current->mm->context) { + /* Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (scache_size - 1))); + blast_dcache32_page_indexed(page); + if(text) + blast_icache32_page_indexed(page); + blast_scache128_page_indexed(page); + } else { + blast_dcache32_page(page); + if(text) + blast_icache32_page(page); + blast_scache128_page(page); + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_VALID)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* + * Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if(mm == current->mm) { + blast_dcache16_page(page); + if(text) + blast_icache16_page(page); + } else { + /* Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (dcache_size - 1))); + blast_dcache16_page_indexed(page); + blast_dcache16_page_indexed(page ^ 0x2000); + if(text) { + blast_icache16_page_indexed(page); + blast_icache16_page_indexed(page ^ 0x2000); + } + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* + * If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_PRESENT)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* + * Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if((mm == current->mm) && (pte_val(*ptep) & _PAGE_VALID)) { + blast_dcache32_page(page); + if(text) + blast_icache32_page(page); + } else { + /* + * Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (dcache_size - 1))); + blast_dcache32_page_indexed(page); + if(text) + blast_icache32_page_indexed(page); + } +out: + restore_flags(flags); +} + +static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma, + unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int text; + + /* + * If ownes no valid ASID yet, cannot possibly have gotten + * this page into the cache. + */ + if(mm->context == 0) + return; + +#ifdef DEBUG_CACHE + printk("cpage[%d,%08lx]", (int)mm->context, page); +#endif + save_flags(flags); cli(); + page &= PAGE_MASK; + pgdp = pgd_offset(mm, page); + pmdp = pmd_offset(pgdp, page); + ptep = pte_offset(pmdp, page); + + /* + * If the page isn't marked valid, the page cannot possibly be + * in the cache. + */ + if(!(pte_val(*ptep) & _PAGE_PRESENT)) + goto out; + + text = (vma->vm_flags & VM_EXEC); + /* + * Doing flushes for another ASID than the current one is + * too difficult since stupid R4k caches do a TLB translation + * for every cache flush operation. So we do indexed flushes + * in that case, which doesn't overly flush the cache too much. + */ + if((mm == current->mm) && (pte_val(*ptep) & _PAGE_VALID)) { + blast_dcache32_page(page); + if(text) + blast_icache32_page(page); + } else { + /* Do indexed flush, too much work to get the (possible) + * tlb refills to work correctly. + */ + page = (KSEG0 + (page & (dcache_size - 1))); + blast_dcache32_page_indexed(page); + blast_dcache32_page_indexed(page ^ 0x2000); + if(text) { + blast_icache32_page_indexed(page); + blast_icache32_page_indexed(page ^ 0x2000); + } + } +out: + restore_flags(flags); +} + +/* If the addresses passed to these routines are valid, they are + * either: + * + * 1) In KSEG0, so we can do a direct flush of the page. + * 2) In KSEG2, and since every process can translate those + * addresses all the time in kernel mode we can do a direct + * flush. + * 3) In KSEG1, no flush necessary. + */ +static void r4k_flush_page_to_ram_s16d16i16(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache16_page(page); + blast_scache16_page(page); + restore_flags(flags); + } +} + +static void r4k_flush_page_to_ram_s32d16i16(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache16_page(page); + blast_scache32_page(page); + restore_flags(flags); + } +} + +static void r4k_flush_page_to_ram_s64d16i16(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache16_page(page); + blast_scache64_page(page); + restore_flags(flags); + } +} + +static void r4k_flush_page_to_ram_s128d16i16(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache16_page(page); + blast_scache128_page(page); + restore_flags(flags); + } +} + +static void r4k_flush_page_to_ram_s16d32i32(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache32_page(page); + blast_scache16_page(page); + restore_flags(flags); + } +} + +static void r4k_flush_page_to_ram_s32d32i32(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache32_page(page); + blast_scache32_page(page); + restore_flags(flags); + } +} + +static void r4k_flush_page_to_ram_s64d32i32(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache32_page(page); + blast_scache64_page(page); + restore_flags(flags); + } +} + +static void r4k_flush_page_to_ram_s128d32i32(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache32_page(page); + blast_scache128_page(page); + restore_flags(flags); + } +} + +static void r4k_flush_page_to_ram_d16i16(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache16_page(page); + restore_flags(flags); + } +} + +static void r4k_flush_page_to_ram_d32i32(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags; + +#ifdef DEBUG_CACHE + printk("cram[%08lx]", page); +#endif + save_flags(flags); cli(); + blast_dcache32_page(page); + restore_flags(flags); + } +} + +/* + * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Invalidate_D, + * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only + * operate correctly if the internal data cache refill buffer is empty. These + * CACHE instructions should be separated from any potential data cache miss + * by a load instruction to an uncached address to empty the response buffer." + * (Revision 2.0 device errata from IDT available on http://www.idt.com/ + * in .pdf format.) + */ +static void r4k_flush_page_to_ram_d32i32_r4600(unsigned long page) +{ + page &= PAGE_MASK; + if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) { + unsigned long flags, tmp1, tmp2; + +#ifdef DEBUG_CACHE + /* #if 1 */ + printk("r4600_cram[%08lx]", page); +#endif + /* + * Workaround for R4600 bug. Explanation see above. + */ + *(volatile unsigned long *)KSEG1; + + save_flags(flags); cli(); + blast_dcache32_page(page); + blast_dcache32_page(page ^ 0x2000); +#ifdef CONFIG_SGI + /* + * SGI goo. Have to check this closer ... + */ + __asm__ __volatile__(" + .set noreorder + .set mips3 + li %0, 0x1 + dsll %0, 31 + or %0, %0, %2 + lui %1, 0x9000 + dsll32 %1, 0 + or %0, %0, %1 + daddu %1, %0, 0x0fe0 + li %2, 0x80 + mtc0 %2, $12 + nop; nop; nop; nop; +1: sw $0, 0(%0) + bltu %0, %1, 1b + daddu %0, 32 + mtc0 $0, $12 + nop; nop; nop; nop; + mtc0 %3, $12 + nop; nop; nop; nop; + .set mips0 + .set reorder" + : "=&r" (tmp1), "=&r" (tmp2), + "=&r" (page), "=&r" (flags) + : "2" (page & 0x0007f000), "3" (flags)); +#endif /* CONFIG_SGI */ + } +} + +static void r4k_flush_cache_sigtramp(unsigned long addr) +{ + addr &= ~(dc_lsize - 1); + flush_dcache_line(addr); + flush_dcache_line(addr + dc_lsize); + flush_icache_line(addr); + flush_icache_line(addr + dc_lsize); +} + +#undef DEBUG_TLB +#undef DEBUG_TLBUPDATE + +#define NTLB_ENTRIES 48 /* Fixed on all R4XX0 variants... */ +#define NTLB_ENTRIES_HALF 24 /* Fixed on all R4XX0 variants... */ + +static inline void r4k_flush_tlb_all(void) +{ + unsigned long flags; + unsigned long old_ctx; + int entry; + +#ifdef DEBUG_TLB + printk("[tlball]"); +#endif + + save_flags(flags); cli(); + /* Save old context and create impossible VPN2 value */ + old_ctx = (get_entryhi() & 0xff); + set_entryhi(KSEG0); + set_entrylo0(0); + set_entrylo1(0); + BARRIER; + + entry = 0; + + /* Blast 'em all away. */ + while(entry < NTLB_ENTRIES) { + set_index(entry); + BARRIER; + tlb_write_indexed(); + BARRIER; + entry++; + } + BARRIER; + set_entryhi(old_ctx); + restore_flags(flags); +} + +static void r4k_flush_tlb_mm(struct mm_struct *mm) +{ + if(mm->context != 0) { + unsigned long flags; + +#ifdef DEBUG_TLB + printk("[tlbmm<%d>]", mm->context); +#endif + save_flags(flags); cli(); + get_new_mmu_context(mm, asid_cache); + if(mm == current->mm) + set_entryhi(mm->context & 0xff); + restore_flags(flags); + } +} + +static void r4k_flush_tlb_range(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + if(mm->context != 0) { + unsigned long flags; + int size; + +#ifdef DEBUG_TLB + printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & 0xff), + start, end); +#endif + save_flags(flags); cli(); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + size = (size + 1) >> 1; + if(size <= NTLB_ENTRIES_HALF) { + int oldpid = (get_entryhi() & 0xff); + int newpid = (mm->context & 0xff); + + start &= (PAGE_MASK << 1); + end += ((PAGE_SIZE << 1) - 1); + end &= (PAGE_MASK << 1); + while(start < end) { + int idx; + + set_entryhi(start | newpid); + start += (PAGE_SIZE << 1); + BARRIER; + tlb_probe(); + BARRIER; + idx = get_index(); + set_entrylo0(0); + set_entrylo1(0); + set_entryhi(KSEG0); + BARRIER; + if(idx < 0) + continue; + tlb_write_indexed(); + BARRIER; + } + set_entryhi(oldpid); + } else { + get_new_mmu_context(mm, asid_cache); + if(mm == current->mm) + set_entryhi(mm->context & 0xff); + } + restore_flags(flags); + } +} + +static void r4k_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + if(vma->vm_mm->context != 0) { + unsigned long flags; + int oldpid, newpid, idx; + +#ifdef DEBUG_TLB + printk("[tlbpage<%d,%08lx>]", vma->vm_mm->context, page); +#endif + newpid = (vma->vm_mm->context & 0xff); + page &= (PAGE_MASK << 1); + save_flags(flags); cli(); + oldpid = (get_entryhi() & 0xff); + set_entryhi(page | newpid); + BARRIER; + tlb_probe(); + BARRIER; + idx = get_index(); + set_entrylo0(0); + set_entrylo1(0); + set_entryhi(KSEG0); + if(idx < 0) + goto finish; + BARRIER; + tlb_write_indexed(); + + finish: + BARRIER; + set_entryhi(oldpid); + restore_flags(flags); + } +} + +/* Load a new root pointer into the TLB. */ +static void r4k_load_pgd(unsigned long pg_dir) +{ +} + +static void r4k_pgd_init(unsigned long page) +{ + unsigned long *p = (unsigned long *) page; + int i; + + for(i = 0; i < 1024; i+=8) { + p[i + 0] = (unsigned long) invalid_pte_table; + p[i + 1] = (unsigned long) invalid_pte_table; + p[i + 2] = (unsigned long) invalid_pte_table; + p[i + 3] = (unsigned long) invalid_pte_table; + p[i + 4] = (unsigned long) invalid_pte_table; + p[i + 5] = (unsigned long) invalid_pte_table; + p[i + 6] = (unsigned long) invalid_pte_table; + p[i + 7] = (unsigned long) invalid_pte_table; + } +} + +#ifdef DEBUG_TLBUPDATE +static unsigned long ehi_debug[NTLB_ENTRIES]; +static unsigned long el0_debug[NTLB_ENTRIES]; +static unsigned long el1_debug[NTLB_ENTRIES]; +#endif + +/* We will need multiple versions of update_mmu_cache(), one that just + * updates the TLB with the new pte(s), and another which also checks + * for the R4k "end of page" hardware bug and does the needy. + */ +static void r4k_update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte) +{ + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int idx, pid; + + pid = (get_entryhi() & 0xff); + +#ifdef DEBUG_TLB + if((pid != (vma->vm_mm->context & 0xff)) || (vma->vm_mm->context == 0)) { + printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n", + (int) (vma->vm_mm->context & 0xff), pid); + } +#endif + + save_flags(flags); cli(); + address &= (PAGE_MASK << 1); + set_entryhi(address | (pid)); + pgdp = pgd_offset(vma->vm_mm, address); + BARRIER; + tlb_probe(); + BARRIER; + pmdp = pmd_offset(pgdp, address); + idx = get_index(); + ptep = pte_offset(pmdp, address); + BARRIER; + set_entrylo0(pte_val(*ptep++) >> 6); + set_entrylo1(pte_val(*ptep) >> 6); + set_entryhi(address | (pid)); + BARRIER; + if(idx < 0) { + tlb_write_random(); +#if 0 + BARRIER; + printk("[MISS]"); +#endif + } else { + tlb_write_indexed(); +#if 0 + BARRIER; + printk("[HIT]"); +#endif + } +#if 0 + if(!strcmp(current->comm, "args")) { + printk("<"); + for(idx = 0; idx < NTLB_ENTRIES; idx++) { + BARRIER; + set_index(idx); BARRIER; + tlb_read(); BARRIER; + address = get_entryhi(); BARRIER; + if((address & 0xff) != 0) + printk("[%08lx]", address); + } + printk(">"); + } + BARRIER; +#endif + BARRIER; + set_entryhi(pid); + BARRIER; + restore_flags(flags); +} + +#if 0 +static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, + unsigned long address, pte_t pte) +{ + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int idx; + + save_flags(flags); cli(); + address &= (PAGE_MASK << 1); + set_entryhi(address | (get_entryhi() & 0xff)); + pgdp = pgd_offset(vma->vm_mm, address); + tlb_probe(); + pmdp = pmd_offset(pgdp, address); + idx = get_index(); + ptep = pte_offset(pmdp, address); + set_entrylo0(pte_val(*ptep++) >> 6); + set_entrylo1(pte_val(*ptep) >> 6); + BARRIER; + if(idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + BARRIER; + restore_flags(flags); +} +#endif + +static void r4k_show_regs(struct pt_regs * regs) +{ + /* Saved main processor registers. */ + printk("$0 : %08lx %08lx %08lx %08lx\n", + 0UL, regs->regs[1], regs->regs[2], regs->regs[3]); + printk("$4 : %08lx %08lx %08lx %08lx\n", + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); + printk("$8 : %08lx %08lx %08lx %08lx\n", + regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]); + printk("$12: %08lx %08lx %08lx %08lx\n", + regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]); + printk("$16: %08lx %08lx %08lx %08lx\n", + regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]); + printk("$20: %08lx %08lx %08lx %08lx\n", + regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]); + printk("$24: %08lx %08lx\n", + regs->regs[24], regs->regs[25]); + printk("$28: %08lx %08lx %08lx %08lx\n", + regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]); + + /* Saved cp0 registers. */ + printk("epc : %08lx\nStatus: %08lx\nCause : %08lx\n", + regs->cp0_epc, regs->cp0_status, regs->cp0_cause); +} + +/* Detect and size the various r4k caches. */ +static void probe_icache(unsigned long config) +{ + unsigned long tmp; + + tmp = (config >> 9) & 7; + icache_size = (1 << (12 + tmp)); + if((config >> 5) & 1) + ic_lsize = 32; + else + ic_lsize = 16; + + printk("Primary ICACHE %dK (linesize %d bytes)\n", + (int)(icache_size >> 10), (int)ic_lsize); +} + +static void probe_dcache(unsigned long config) +{ + unsigned long tmp; + + tmp = (config >> 6) & 7; + dcache_size = (1 << (12 + tmp)); + if((config >> 4) & 1) + dc_lsize = 32; + else + dc_lsize = 16; + + printk("Primary DCACHE %dK (linesize %d bytes)\n", + (int)(dcache_size >> 10), (int)dc_lsize); +} + +static int probe_scache_eeprom(unsigned long config) +{ +#ifdef CONFIG_SGI + volatile unsigned int *cpu_control; + unsigned short cmd = 0xc220; + unsigned long data = 0; + int i, n; + +#ifdef __MIPSEB__ + cpu_control = (volatile unsigned int *) KSEG1ADDR(0x1fa00034); +#else + cpu_control = (volatile unsigned int *) KSEG1ADDR(0x1fa00030); +#endif +#define DEASSERT(bit) (*(cpu_control) &= (~(bit))) +#define ASSERT(bit) (*(cpu_control) |= (bit)) +#define DELAY for(n = 0; n < 100000; n++) __asm__ __volatile__("") + DEASSERT(SGIMC_EEPROM_PRE); + DEASSERT(SGIMC_EEPROM_SDATAO); + DEASSERT(SGIMC_EEPROM_SECLOCK); + DEASSERT(SGIMC_EEPROM_PRE); + DELAY; + ASSERT(SGIMC_EEPROM_CSEL); ASSERT(SGIMC_EEPROM_SECLOCK); + for(i = 0; i < 11; i++) { + if(cmd & (1<<15)) + ASSERT(SGIMC_EEPROM_SDATAO); + else + DEASSERT(SGIMC_EEPROM_SDATAO); + DEASSERT(SGIMC_EEPROM_SECLOCK); + ASSERT(SGIMC_EEPROM_SECLOCK); + cmd <<= 1; + } + DEASSERT(SGIMC_EEPROM_SDATAO); + for(i = 0; i < (sizeof(unsigned short) * 8); i++) { + unsigned int tmp; + + DEASSERT(SGIMC_EEPROM_SECLOCK); + DELAY; + ASSERT(SGIMC_EEPROM_SECLOCK); + DELAY; + data <<= 1; + tmp = *cpu_control; + if(tmp & SGIMC_EEPROM_SDATAI) + data |= 1; + } + DEASSERT(SGIMC_EEPROM_SECLOCK); + DEASSERT(SGIMC_EEPROM_CSEL); + ASSERT(SGIMC_EEPROM_PRE); + ASSERT(SGIMC_EEPROM_SECLOCK); + data <<= PAGE_SHIFT; + printk("R4600/R5000 SCACHE size %dK ", (int) (data >> 10)); + switch(mips_cputype) { + case CPU_R4600: + case CPU_R4640: + sc_lsize = 32; + break; + + default: + sc_lsize = 128; + break; + } + printk("linesize %d bytes\n", sc_lsize); + scache_size = data; + if(data) { + unsigned long addr, tmp1, tmp2; + + /* Enable r4600/r5000 cache. But flush it first. */ + for(addr = KSEG0; addr < (KSEG0 + dcache_size); + addr += dc_lsize) + flush_dcache_line_indexed(addr); + for(addr = KSEG0; addr < (KSEG0 + icache_size); + addr += ic_lsize) + flush_icache_line_indexed(addr); + for(addr = KSEG0; addr < (KSEG0 + scache_size); + addr += sc_lsize) + flush_scache_line_indexed(addr); + + /* R5000 scache enable is in CP0 config, on R4600 variants + * the scache is enable by the memory mapped cache controller. + */ + if(mips_cputype == CPU_R5000) { + unsigned long config; + + config = read_32bit_cp0_register(CP0_CONFIG); + config |= 0x1000; + write_32bit_cp0_register(CP0_CONFIG, config); + } else { + /* This is really cool... */ + printk("Enabling R4600 SCACHE\n"); + __asm__ __volatile__(" + .set noreorder + .set mips3 + li %0, 0x1 + dsll %0, 31 + lui %1, 0x9000 + dsll32 %1, 0 + or %0, %1, %0 + mfc0 %2, $12 + nop; nop; nop; nop; + li %1, 0x80 + mtc0 %1, $12 + nop; nop; nop; nop; + sb $0, 0(%0) + mtc0 $0, $12 + nop; nop; nop; nop; + mtc0 %2, $12 + nop; nop; nop; nop; + .set mips0 + .set reorder + " : "=r" (tmp1), "=r" (tmp2), "=r" (addr)); + } + + return 1; + } else { + if(mips_cputype == CPU_R5000) + return -1; + else + return 0; + } +#else + /* + * XXX For now we don't panic and assume that existing chipset + * controlled caches are setup correnctly and are completly + * transparent. Works fine for those MIPS machines I know. + * Morituri the salutant ... + */ + return 0; + + panic("Cannot probe SCACHE on this machine."); +#endif +} + +/* If you even _breathe_ on this function, look at the gcc output + * and make sure it does not pop things on and off the stack for + * the cache sizing loop that executes in KSEG1 space or else + * you will crash and burn badly. You have been warned. + */ +static int probe_scache(unsigned long config) +{ + extern unsigned long stext; + unsigned long flags, addr, begin, end, pow2; + int tmp; + + tmp = ((config >> 17) & 1); + if(tmp) + return 0; + tmp = ((config >> 22) & 3); + switch(tmp) { + case 0: + sc_lsize = 16; + break; + case 1: + sc_lsize = 32; + break; + case 2: + sc_lsize = 64; + break; + case 3: + sc_lsize = 128; + break; + } + + begin = (unsigned long) &stext; + begin &= ~((4 * 1024 * 1024) - 1); + end = begin + (4 * 1024 * 1024); + + /* This is such a bitch, you'd think they would make it + * easy to do this. Away you daemons of stupidity! + */ + save_flags(flags); cli(); + + /* Fill each size-multiple cache line with a valid tag. */ + pow2 = (64 * 1024); + for(addr = begin; addr < end; addr = (begin + pow2)) { + unsigned long *p = (unsigned long *) addr; + __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ + pow2 <<= 1; + } + + /* Load first line with zero (therefore invalid) tag. */ + set_taglo(0); + set_taghi(0); + __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ + __asm__ __volatile__("\n\t.set noreorder\n\t" + ".set mips3\n\t" + "cache 8, (%0)\n\t" + ".set mips0\n\t" + ".set reorder\n\t" : : "r" (begin)); + __asm__ __volatile__("\n\t.set noreorder\n\t" + ".set mips3\n\t" + "cache 9, (%0)\n\t" + ".set mips0\n\t" + ".set reorder\n\t" : : "r" (begin)); + __asm__ __volatile__("\n\t.set noreorder\n\t" + ".set mips3\n\t" + "cache 11, (%0)\n\t" + ".set mips0\n\t" + ".set reorder\n\t" : : "r" (begin)); + + /* Now search for the wrap around point. */ + pow2 = (128 * 1024); + tmp = 0; + for(addr = (begin + (128 * 1024)); addr < (end); addr = (begin + pow2)) { + __asm__ __volatile__("\n\t.set noreorder\n\t" + ".set mips3\n\t" + "cache 7, (%0)\n\t" + ".set mips0\n\t" + ".set reorder\n\t" : : "r" (addr)); + __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ + if(!get_taglo()) + break; + pow2 <<= 1; + } + restore_flags(flags); + addr -= begin; + printk("Secondary cache sized at %dK linesize %d\n", (int) (addr >> 10), + sc_lsize); + scache_size = addr; + return 1; +} + +static void setup_noscache_funcs(void) +{ + switch(dc_lsize) { + case 16: + clear_page = r4k_clear_page_d16; + copy_page = r4k_copy_page_d16; + flush_cache_all = r4k_flush_cache_all_d16i16; + flush_cache_mm = r4k_flush_cache_mm_d16i16; + flush_cache_range = r4k_flush_cache_range_d16i16; + flush_cache_page = r4k_flush_cache_page_d16i16; + flush_page_to_ram = r4k_flush_page_to_ram_d16i16; + break; + case 32: + clear_page = r4k_clear_page_d32; + copy_page = r4k_copy_page_d32; + flush_cache_all = r4k_flush_cache_all_d32i32; + flush_cache_mm = r4k_flush_cache_mm_d32i32; + flush_cache_range = r4k_flush_cache_range_d32i32; + flush_cache_page = r4k_flush_cache_page_d32i32; + flush_page_to_ram = r4k_flush_page_to_ram_d32i32; + break; + } +} + +static void setup_scache_funcs(void) +{ + switch(sc_lsize) { + case 16: + switch(dc_lsize) { + case 16: + clear_page = r4k_clear_page_d16; + copy_page = r4k_copy_page_d16; + flush_cache_all = r4k_flush_cache_all_s16d16i16; + flush_cache_mm = r4k_flush_cache_mm_s16d16i16; + flush_cache_range = r4k_flush_cache_range_s16d16i16; + flush_cache_page = r4k_flush_cache_page_s16d16i16; + flush_page_to_ram = r4k_flush_page_to_ram_s16d16i16; + break; + case 32: + clear_page = r4k_clear_page_d32; + copy_page = r4k_copy_page_d32; + flush_cache_all = r4k_flush_cache_all_s16d32i32; + flush_cache_mm = r4k_flush_cache_mm_s16d32i32; + flush_cache_range = r4k_flush_cache_range_s16d32i32; + flush_cache_page = r4k_flush_cache_page_s16d32i32; + flush_page_to_ram = r4k_flush_page_to_ram_s16d32i32; + break; + }; + break; + case 32: + switch(dc_lsize) { + case 16: + clear_page = r4k_clear_page_d16; + copy_page = r4k_copy_page_d16; + flush_cache_all = r4k_flush_cache_all_s32d16i16; + flush_cache_mm = r4k_flush_cache_mm_s32d16i16; + flush_cache_range = r4k_flush_cache_range_s32d16i16; + flush_cache_page = r4k_flush_cache_page_s32d16i16; + flush_page_to_ram = r4k_flush_page_to_ram_s32d16i16; + break; + case 32: + clear_page = r4k_clear_page_d32; + copy_page = r4k_copy_page_d32; + flush_cache_all = r4k_flush_cache_all_s32d32i32; + flush_cache_mm = r4k_flush_cache_mm_s32d32i32; + flush_cache_range = r4k_flush_cache_range_s32d32i32; + flush_cache_page = r4k_flush_cache_page_s32d32i32; + flush_page_to_ram = r4k_flush_page_to_ram_s32d32i32; + break; + }; + case 64: + switch(dc_lsize) { + case 16: + clear_page = r4k_clear_page_d16; + copy_page = r4k_copy_page_d16; + flush_cache_all = r4k_flush_cache_all_s64d16i16; + flush_cache_mm = r4k_flush_cache_mm_s64d16i16; + flush_cache_range = r4k_flush_cache_range_s64d16i16; + flush_cache_page = r4k_flush_cache_page_s64d16i16; + flush_page_to_ram = r4k_flush_page_to_ram_s64d16i16; + break; + case 32: + clear_page = r4k_clear_page_d32; + copy_page = r4k_copy_page_d32; + flush_cache_all = r4k_flush_cache_all_s64d32i32; + flush_cache_mm = r4k_flush_cache_mm_s64d32i32; + flush_cache_range = r4k_flush_cache_range_s64d32i32; + flush_cache_page = r4k_flush_cache_page_s64d32i32; + flush_page_to_ram = r4k_flush_page_to_ram_s64d32i32; + break; + }; + case 128: + switch(dc_lsize) { + case 16: + clear_page = r4k_clear_page_d16; + copy_page = r4k_copy_page_d16; + flush_cache_all = r4k_flush_cache_all_s128d16i16; + flush_cache_mm = r4k_flush_cache_mm_s128d16i16; + flush_cache_range = r4k_flush_cache_range_s128d16i16; + flush_cache_page = r4k_flush_cache_page_s128d16i16; + flush_page_to_ram = r4k_flush_page_to_ram_s128d16i16; + break; + case 32: + clear_page = r4k_clear_page_d32; + copy_page = r4k_copy_page_d32; + flush_cache_all = r4k_flush_cache_all_s128d32i32; + flush_cache_mm = r4k_flush_cache_mm_s128d32i32; + flush_cache_range = r4k_flush_cache_range_s128d32i32; + flush_cache_page = r4k_flush_cache_page_s128d32i32; + flush_page_to_ram = r4k_flush_page_to_ram_s128d32i32; + break; + }; + break; + } +} + +typedef int (*probe_func_t)(unsigned long); +static probe_func_t probe_scache_kseg1; + +void ld_mmu_r4xx0(void) +{ + unsigned long cfg = read_32bit_cp0_register(CP0_CONFIG); + int sc_present = 0; + + printk("CPU REVISION IS: %08x\n", read_32bit_cp0_register(CP0_PRID)); + + probe_icache(cfg); + probe_dcache(cfg); + + switch(mips_cputype) { + case CPU_R4000PC: + case CPU_R4000SC: + case CPU_R4000MC: + case CPU_R4400PC: + case CPU_R4400SC: + case CPU_R4400MC: +try_again: + probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache)); + sc_present = probe_scache_kseg1(cfg); + break; + + case CPU_R4600: + case CPU_R4640: + case CPU_R4700: + case CPU_R5000: + probe_scache_kseg1 = (probe_func_t) + (KSEG1ADDR(&probe_scache_eeprom)); + sc_present = probe_scache_eeprom(cfg); + + /* Try using tags if eeprom give us bogus data. */ + if(sc_present == -1) + goto try_again; + break; + }; + + if(!sc_present) { + /* Lacks secondary cache. */ + setup_noscache_funcs(); + } else { + /* Has a secondary cache. */ + if(mips_cputype != CPU_R4600 && + mips_cputype != CPU_R4640 && + mips_cputype != CPU_R4700 && + mips_cputype != CPU_R5000) { + setup_scache_funcs(); + } else { + setup_noscache_funcs(); + if((mips_cputype != CPU_R5000)) { + flush_cache_page = + r4k_flush_cache_page_d32i32_r4600; + flush_page_to_ram = + r4k_flush_page_to_ram_d32i32_r4600; + } + } + } + + flush_cache_sigtramp = r4k_flush_cache_sigtramp; + + flush_tlb_all = r4k_flush_tlb_all; + flush_tlb_mm = r4k_flush_tlb_mm; + flush_tlb_range = r4k_flush_tlb_range; + flush_tlb_page = r4k_flush_tlb_page; + + load_pgd = r4k_load_pgd; + pgd_init = r4k_pgd_init; + update_mmu_cache = r4k_update_mmu_cache; + + show_regs = r4k_show_regs; + + flush_cache_all(); + write_32bit_cp0_register(CP0_WIRED, 0); + flush_tlb_all(); +} diff --git a/arch/mips/mm/r6000.c b/arch/mips/mm/r6000.c new file mode 100644 index 000000000..ad9332d3f --- /dev/null +++ b/arch/mips/mm/r6000.c @@ -0,0 +1,182 @@ +/* $Id: r6000.c,v 1.5 1996/07/29 11:10:08 dm Exp $ + * r6000.c: MMU and cache routines for the R6000 processors. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> + +#include <asm/cacheops.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/segment.h> +#include <asm/system.h> +#include <asm/sgialib.h> + +__asm__(".set mips3"); /* because we know... */ + +/* Cache operations. XXX Write these dave... */ +static inline void r6000_flush_cache_all(void) +{ + /* XXX */ +} + +static void r6000_flush_cache_mm(struct mm_struct *mm) +{ + /* XXX */ +} + +static void r6000_flush_cache_range(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + /* XXX */ +} + +static void r6000_flush_cache_page(struct vm_area_struct *vma, + unsigned long page) +{ + /* XXX */ +} + +static void r6000_flush_page_to_ram(unsigned long page) +{ + /* XXX */ +} + +static void r6000_flush_cache_sigtramp(unsigned long page) +{ + /* XXX */ +} + +/* TLB operations. XXX Write these dave... */ +static inline void r6000_flush_tlb_all(void) +{ + /* XXX */ +} + +static void r6000_flush_tlb_mm(struct mm_struct *mm) +{ + /* XXX */ +} + +static void r6000_flush_tlb_range(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + /* XXX */ +} + +static void r6000_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + /* XXX */ +} + +static void r6000_load_pgd(unsigned long pg_dir) +{ +} + +static void r6000_pgd_init(unsigned long page) +{ + unsigned long dummy1, dummy2; + + /* + * This version is optimized for the R6000. We generate dirty lines + * in the datacache, overwrite these lines with zeros and then flush + * the cache. Sounds horribly complicated but is just a trick to + * avoid unnecessary loads of from memory and uncached stores which + * are very expensive. Not tested yet as the R6000 is a rare CPU only + * available in SGI machines and I don't have one. + */ + __asm__ __volatile__( + ".set\tnoreorder\n" + "1:\t" + "cache\t%5,(%0)\n\t" + "sw\t%2,(%0)\n\t" + "sw\t%2,4(%0)\n\t" + "sw\t%2,8(%0)\n\t" + "sw\t%2,12(%0)\n\t" + "cache\t%5,16(%0)\n\t" + "sw\t%2,16(%0)\n\t" + "sw\t%2,20(%0)\n\t" + "sw\t%2,24(%0)\n\t" + "sw\t%2,28(%0)\n\t" + "subu\t%1,1\n\t" + "bnez\t%1,1b\n\t" + "addiu\t%0,32\n\t" + ".set\treorder" + :"=r" (dummy1), + "=r" (dummy2) + :"r" ((unsigned long) invalid_pte_table), + "0" (page), + "1" (PAGE_SIZE/(sizeof(pmd_t)*8)), + "i" (Create_Dirty_Excl_D)); +} + +static void r6000_update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte) +{ + r6000_flush_tlb_page(vma, address); + /* + * FIXME: We should also reload a new entry into the TLB to + * avoid unnecessary exceptions. + */ +} + +static void r6000_show_regs(struct pt_regs * regs) +{ + /* + * Saved main processor registers + */ + printk("$0 : %08x %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + 0, (unsigned long) regs->regs[1], (unsigned long) regs->regs[2], + (unsigned long) regs->regs[3], (unsigned long) regs->regs[4], + (unsigned long) regs->regs[5], (unsigned long) regs->regs[6], + (unsigned long) regs->regs[7]); + printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + (unsigned long) regs->regs[8], (unsigned long) regs->regs[9], + (unsigned long) regs->regs[10], (unsigned long) regs->regs[11], + (unsigned long) regs->regs[12], (unsigned long) regs->regs[13], + (unsigned long) regs->regs[14], (unsigned long) regs->regs[15]); + printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + (unsigned long) regs->regs[16], (unsigned long) regs->regs[17], + (unsigned long) regs->regs[18], (unsigned long) regs->regs[19], + (unsigned long) regs->regs[20], (unsigned long) regs->regs[21], + (unsigned long) regs->regs[22], (unsigned long) regs->regs[23]); + printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx\n", + (unsigned long) regs->regs[24], (unsigned long) regs->regs[25], + (unsigned long) regs->regs[28], (unsigned long) regs->regs[29], + (unsigned long) regs->regs[30], (unsigned long) regs->regs[31]); + + /* + * Saved cp0 registers + */ + printk("epc : %08lx\nStatus: %08x\nCause : %08x\n", + (unsigned long) regs->cp0_epc, (unsigned int) regs->cp0_status, + (unsigned int) regs->cp0_cause); +} + +void ld_mmu_r6000(void) +{ + flush_cache_all = r6000_flush_cache_all; + flush_cache_mm = r6000_flush_cache_mm; + flush_cache_range = r6000_flush_cache_range; + flush_cache_page = r6000_flush_cache_page; + flush_cache_sigtramp = r6000_flush_cache_sigtramp; + flush_page_to_ram = r6000_flush_page_to_ram; + + flush_tlb_all = r6000_flush_tlb_all; + flush_tlb_mm = r6000_flush_tlb_mm; + flush_tlb_range = r6000_flush_tlb_range; + flush_tlb_page = r6000_flush_tlb_page; + + load_pgd = r6000_load_pgd; + pgd_init = r6000_pgd_init; + update_mmu_cache = r6000_update_mmu_cache; + + show_regs = r6000_show_regs; + + flush_cache_all(); + flush_tlb_all(); +} diff --git a/arch/mips/mm/stack.c b/arch/mips/mm/stack.c deleted file mode 100644 index 3a4dccd26..000000000 --- a/arch/mips/mm/stack.c +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Kernel stack allocation/deallocation - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - * - * (This is _bad_ if the free page pool is fragmented ...) - */ -#include <linux/sched.h> -#include <linux/mm.h> - -extern unsigned long alloc_kernel_stack(void) -{ - unsigned long stack; - stack = __get_free_pages(GFP_KERNEL, 1, 0); - - return stack; -} - -extern void free_kernel_stack(unsigned long stack) -{ - free_pages(stack, 1); -} diff --git a/arch/mips/mm/tfp.c b/arch/mips/mm/tfp.c new file mode 100644 index 000000000..9bd7f2b4a --- /dev/null +++ b/arch/mips/mm/tfp.c @@ -0,0 +1,103 @@ +/* $Id: tfp.c,v 1.3 1996/07/29 11:10:08 dm Exp $ + * tfp.c: MMU and cache routines specific to the r8000 (TFP). + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> + +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/segment.h> +#include <asm/system.h> +#include <asm/sgialib.h> + +extern unsigned long mips_tlb_entries; + +/* Cache operations. XXX Write these dave... */ +static inline void tfp_flush_cache_all(void) +{ + /* XXX */ +} + +static void tfp_flush_cache_mm(struct mm_struct *mm) +{ + /* XXX */ +} + +static void tfp_flush_cache_range(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + /* XXX */ +} + +static void tfp_flush_cache_page(struct vm_area_struct *vma, + unsigned long page) +{ + /* XXX */ +} + +static void tfp_flush_page_to_ram(unsigned long page) +{ + /* XXX */ +} + +static void tfp_flush_cache_sigtramp(unsigned long page) +{ + /* XXX */ +} + +/* TLB operations. XXX Write these dave... */ +static inline void tfp_flush_tlb_all(void) +{ + /* XXX */ +} + +static void tfp_flush_tlb_mm(struct mm_struct *mm) +{ + /* XXX */ +} + +static void tfp_flush_tlb_range(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + /* XXX */ +} + +static void tfp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + /* XXX */ +} + +static void tfp_load_pgd(unsigned long pg_dir) +{ +} + +static void tfp_pgd_init(unsigned long page) +{ +} + +void ld_mmu_tfp(void) +{ + flush_cache_all = tfp_flush_cache_all; + flush_cache_mm = tfp_flush_cache_mm; + flush_cache_range = tfp_flush_cache_range; + flush_cache_page = tfp_flush_cache_page; + flush_cache_sigtramp = tfp_flush_cache_sigtramp; + flush_page_to_ram = tfp_flush_page_to_ram; + + flush_tlb_all = tfp_flush_tlb_all; + flush_tlb_mm = tfp_flush_tlb_mm; + flush_tlb_range = tfp_flush_tlb_range; + flush_tlb_page = tfp_flush_tlb_page; + + load_pgd = tfp_load_pgd; + pgd_init = tfp_pgd_init; + + flush_cache_all(); + flush_tlb_all(); +} + diff --git a/arch/mips/sgi/kernel/.cvsignore b/arch/mips/sgi/kernel/.cvsignore new file mode 100644 index 000000000..4671378ae --- /dev/null +++ b/arch/mips/sgi/kernel/.cvsignore @@ -0,0 +1 @@ +.depend diff --git a/arch/mips/sgi/kernel/Makefile b/arch/mips/sgi/kernel/Makefile new file mode 100644 index 000000000..c79023b23 --- /dev/null +++ b/arch/mips/sgi/kernel/Makefile @@ -0,0 +1,30 @@ +# $Id: Makefile,v 1.5 1996/06/08 12:08:38 dm Exp $ +# Makefile for the SGI specific kernel interface routines +# under Linux. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definitions are now in the main makefile... + +.S.s: + $(CPP) $(CFLAGS) $< -o $*.s +.S.o: + $(CC) $(CFLAGS) -c $< -o $*.o + +OBJS = indy_mc.o indy_hpc.o indy_int.o system.o indy_timer.o indyIRQ.o \ + reset.o setup.o time.o + +all: sgikern.a + +sgikern.a: $(OBJS) + $(AR) rcs sgikern.a $(OBJS) + sync + +indyIRQ.o: indyIRQ.S + +dep: + $(CPP) -M *.c > .depend + +include $(TOPDIR)/Rules.make diff --git a/arch/mips/sgi/kernel/indyIRQ.S b/arch/mips/sgi/kernel/indyIRQ.S new file mode 100644 index 000000000..a7058d3ee --- /dev/null +++ b/arch/mips/sgi/kernel/indyIRQ.S @@ -0,0 +1,138 @@ +/* $Id: indyIRQ.S,v 1.5 1996/06/29 12:41:12 dm Exp $ + * indyIRQ.S: Interrupt exception dispatch code for FullHouse and + * Guiness. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <asm/asm.h> +#include <asm/mipsconfig.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> +#include <asm/stackframe.h> + + /* A lot of complication here is taken away because: + * + * 1) We handle one interrupt and return, sitting in a loop + * and moving across all the pending IRQ bits in the cause + * register is _NOT_ the answer, the common case is one + * pending IRQ so optimize in that direction. + * + * 2) We need not check against bits in the status register + * IRQ mask, that would make this routine slow as hell. + * + * 3) Linux only thinks in terms of all IRQs on or all IRQs + * off, nothing in between like BSD spl() brain-damage. + * + * Furthermore, the IRQs on the INDY look basically (barring + * software IRQs which we don't use at all) like: + * + * MIPS IRQ Source + * -------- ------ + * 0 Software (ignored) + * 1 Software (ignored) + * 2 Local IRQ level zero + * 3 Local IRQ level one + * 4 8254 Timer zero + * 5 8254 Timer one + * 6 Bus Error + * 7 R4k timer (what we use) + * + * We handle the IRQ according to _our_ priority which is: + * + * Highest ---- R4k Timer + * Local IRQ zero + * Local IRQ one + * Bus Error + * 8254 Timer zero + * Lowest ---- 8254 Timer one + * + * then we just return, if multiple IRQs are pending then + * we will just take another exception, big deal. + */ + + .text + .set noreorder + .set noat + .align 5 + NESTED(indyIRQ, PT_SIZE, sp) + SAVE_ALL + CLI + .set at + mfc0 s0, CP0_CAUSE # get irq mask + lui s3, %hi(intr_count) + + /* First we check for r4k counter/timer IRQ. */ + andi a0, s0, CAUSEF_IP7 + beq a0, zero, 1f + andi a0, s0, CAUSEF_IP2 # delay slot, check local level zero + + /* Wheee, a timer interrupt. */ + lw s7, %lo(intr_count)(s3) + move a0, sp + addiu t0, s7, 1 + jal indy_timer_interrupt + sw t0, %lo(intr_count)(s3) # delay slot, set intr_count + + j ret_from_sys_call + sw s7, %lo(intr_count)(s3) # delay slot, restore intr_count + +1: + beq a0, zero, 1f + andi a0, s0, CAUSEF_IP3 # delay slot, check local level one + + /* Wheee, local level zero interrupt. */ + lw s7, %lo(intr_count)(s3) + move a0, sp + addiu t0, s7, 1 + + jal indy_local0_irqdispatch + sw t0, %lo(intr_count)(s3) + + j ret_from_sys_call + sw s7, %lo(intr_count)(s3) # delay slot, restore intr_count + +1: + beq a0, zero, 1f + andi a0, s0, CAUSEF_IP6 # delay slot, check bus error + + /* Wheee, local level one interrupt. */ + lw s7, %lo(intr_count)(s3) + move a0, sp + addiu t0, s7, 1 + jal indy_local1_irqdispatch + sw t0, %lo(intr_count)(s3) + + j ret_from_sys_call + sw s7, %lo(intr_count)(s3) + +1: + beq a0, zero, 1f + lw s7, %lo(intr_count)(s3) + + /* Wheee, an asynchronous bus error... */ + addiu t0, s7, 1 + move a0, sp + jal indy_buserror_irq + sw t0, %lo(intr_count)(s3) + + j ret_from_sys_call + sw s7, %lo(intr_count)(s3) + +1: + /* Here by mistake? This is possible, what can happen + * is that by the time we take the exception the IRQ + * pin goes low, so just leave if this is the case. + */ + andi a0, s0, (CAUSEF_IP4 | CAUSEF_IP5) + beq a0, zero, 1f + addiu t0, s7, 1 + + /* Must be one of the 8254 timers... */ + move a0, sp + jal indy_8254timer_irq + sw t0, %lo(intr_count)(s3) +1: + j ret_from_sys_call + sw s7, %lo(intr_count)(s3) + END(indyIRQ) diff --git a/arch/mips/sgi/kernel/indy_hpc.c b/arch/mips/sgi/kernel/indy_hpc.c new file mode 100644 index 000000000..30f3fe36b --- /dev/null +++ b/arch/mips/sgi/kernel/indy_hpc.c @@ -0,0 +1,114 @@ +/* $Id: indy_hpc.c,v 1.4 1996/06/29 07:06:50 dm Exp $ + * indy_hpc.c: Routines for generic manipulation of the HPC controllers. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <asm/segment.h> +#include <asm/ptrace.h> +#include <asm/processor.h> +#include <asm/sgihpc.h> +#include <asm/sgint23.h> +#include <asm/sgialib.h> + +/* #define DEBUG_SGIHPC */ + +struct hpc3_regs *hpc3c0, *hpc3c1; +struct hpc3_miscregs *hpc3mregs; + +/* We need software copies of these because they are write only. */ +static unsigned long write1, write2; + +/* Machine specific identifier knobs. */ +int sgi_has_ioc2 = 0; +int sgi_guiness = 0; +int sgi_boardid; + +void sgihpc_write1_modify(int set, int clear) +{ + write1 |= set; + write1 &= ~clear; + hpc3mregs->write1 = write1; +} + +void sgihpc_write2_modify(int set, int clear) +{ + write2 |= set; + write2 &= ~clear; + hpc3mregs->write2 = write2; +} + +void sgihpc_init(void) +{ + unsigned long sid, crev, brev; + + hpc3c0 = (struct hpc3_regs *) (KSEG1 + HPC3_CHIP0_PBASE); + hpc3c1 = (struct hpc3_regs *) (KSEG1 + HPC3_CHIP1_PBASE); + hpc3mregs = (struct hpc3_miscregs *) (KSEG1 + HPC3_MREGS_PBASE); + sid = hpc3mregs->sysid; + + sid &= 0xff; + crev = (sid & 0xe0) >> 5; + brev = (sid & 0x1e) >> 1; + +#ifdef DEBUG_SGIHPC + prom_printf("sgihpc_init: crev<%2x> brev<%2x>\n", crev, brev); + prom_printf("sgihpc_init: "); +#endif + + if(sid & 1) { +#ifdef DEBUG_SGIHPC + prom_printf("GUINESS "); +#endif + sgi_guiness = 1; + } else { +#ifdef DEBUG_SGIHPC + prom_printf("FULLHOUSE "); +#endif + sgi_guiness = 0; + } + sgi_boardid = brev; + +#ifdef DEBUG_SGIHPC + prom_printf("sgi_boardid<%d> ", sgi_boardid); +#endif + + if(crev == 1) { + if((sid & 1) || (brev >= 2)) { +#ifdef DEBUG_SGIHPC + prom_printf("IOC2 "); +#endif + sgi_has_ioc2 = 1; + } else { +#ifdef DEBUG_SGIHPC + prom_printf("IOC1 revision 1 "); +#endif + } + } else { +#ifdef DEBUG_SGIHPC + prom_printf("IOC1 revision 0 "); +#endif + } +#ifdef DEBUG_SGIHPC + prom_printf("\n"); +#endif + + write1 = (HPC3_WRITE1_PRESET | + HPC3_WRITE1_KMRESET | + HPC3_WRITE1_ERESET | + HPC3_WRITE1_LC0OFF); + + write2 = (HPC3_WRITE2_EASEL | + HPC3_WRITE2_NTHRESH | + HPC3_WRITE2_TPSPEED | + HPC3_WRITE2_EPSEL | + HPC3_WRITE2_U0AMODE | + HPC3_WRITE2_U1AMODE); + + if(!sgi_guiness) + write1 |= HPC3_WRITE1_GRESET; + hpc3mregs->write1 = write1; + hpc3mregs->write2 = write2; + + hpc3c0->pbus_piocfgs[0][6] |= HPC3_PIOPCFG_HW; +} diff --git a/arch/mips/sgi/kernel/indy_int.c b/arch/mips/sgi/kernel/indy_int.c new file mode 100644 index 000000000..45600d2de --- /dev/null +++ b/arch/mips/sgi/kernel/indy_int.c @@ -0,0 +1,579 @@ +/* $Id: indy_int.c,v 1.12 1996/08/07 02:54:11 dm Exp $ + * indy_int.c: Routines for generic manipulation of the INT[23] ASIC + * found on INDY workstations.. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <linux/config.h> + +#include <linux/errno.h> +#include <linux/kernel_stat.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/timex.h> +#include <linux/malloc.h> +#include <linux/random.h> + +#include <asm/bitops.h> +#include <asm/bootinfo.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/mipsregs.h> +#include <asm/system.h> +#include <asm/vector.h> + +#include <asm/segment.h> +#include <asm/ptrace.h> +#include <asm/processor.h> +#include <asm/sgi.h> +#include <asm/sgihpc.h> +#include <asm/sgint23.h> +#include <asm/sgialib.h> + +/* #define DEBUG_SGINT */ + +struct sgi_int2_regs *sgi_i2regs; +struct sgi_int3_regs *sgi_i3regs; +struct sgi_ioc_ints *ioc_icontrol; +struct sgi_ioc_timers *ioc_timers; +volatile unsigned char *ioc_tclear; + +static char lc0msk_to_irqnr[256]; +static char lc1msk_to_irqnr[256]; +static char lc2msk_to_irqnr[256]; +static char lc3msk_to_irqnr[256]; + +extern asmlinkage void indyIRQ(void); + +#ifdef CONFIG_REMOTE_DEBUG +extern void rs_kgdb_hook(int); +#endif + +unsigned long spurious_count = 0; + +/* Local IRQ's are layed out logically like this: + * + * 0 --> 7 == local 0 interrupts + * 8 --> 15 == local 1 interrupts + * 16 --> 23 == vectored level 2 interrupts + * 24 --> 31 == vectored level 3 interrupts (not used) + */ +void disable_local_irq(unsigned int irq_nr) +{ + unsigned long flags; + + save_flags(flags); + cli(); + switch(irq_nr) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + ioc_icontrol->imask0 &= ~(1 << irq_nr); + break; + + case 8: case 9: case 10: case 11: case 12: case 13: case 14: case 15: + ioc_icontrol->imask1 &= ~(1 << (irq_nr - 8)); + break; + + case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: + ioc_icontrol->cmeimask0 &= ~(1 << (irq_nr - 16)); + break; + + default: + /* This way we'll see if anyone would ever want vectored + * level 3 interrupts. Highly unlikely. + */ + printk("Yeeee, got passed irq_nr %d at disable_irq\n", irq_nr); + panic("INVALID IRQ level!"); + }; + restore_flags(flags); +} + +void enable_local_irq(unsigned int irq_nr) +{ + unsigned long flags; + save_flags(flags); + cli(); + switch(irq_nr) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + ioc_icontrol->imask0 |= (1 << irq_nr); + break; + + case 8: case 9: case 10: case 11: case 12: case 13: case 14: case 15: + ioc_icontrol->imask1 |= (1 << (irq_nr - 8)); + break; + + case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: + enable_local_irq(7); + ioc_icontrol->cmeimask0 |= (1 << (irq_nr - 16)); + break; + + default: + printk("Yeeee, got passed irq_nr %d at disable_irq\n", irq_nr); + panic("INVALID IRQ level!"); + }; + restore_flags(flags); +} + +void disable_gio_irq(unsigned int irq_nr) +{ + /* XXX TODO XXX */ +} + +void enable_gio_irq(unsigned int irq_nr) +{ + /* XXX TODO XXX */ +} + +void disable_hpcdma_irq(unsigned int irq_nr) +{ + /* XXX TODO XXX */ +} + +void enable_hpcdma_irq(unsigned int irq_nr) +{ + /* XXX TODO XXX */ +} + +void disable_irq(unsigned int irq_nr) +{ + unsigned int n = irq_nr; + if(n >= SGINT_END) { + printk("whee, invalid irq_nr %d\n", irq_nr); + panic("IRQ, you lose..."); + } + if(n >= SGINT_LOCAL0 && n < SGINT_GIO) { + disable_local_irq(n - SGINT_LOCAL0); + } else if(n >= SGINT_GIO && n < SGINT_HPCDMA) { + disable_gio_irq(n - SGINT_GIO); + } else if(n >= SGINT_HPCDMA && n < SGINT_END) { + disable_hpcdma_irq(n - SGINT_HPCDMA); + } else { + panic("how did I get here?"); + } +} + +void enable_irq(unsigned int irq_nr) +{ + unsigned int n = irq_nr; + if(n >= SGINT_END) { + printk("whee, invalid irq_nr %d\n", irq_nr); + panic("IRQ, you lose..."); + } + if(n >= SGINT_LOCAL0 && n < SGINT_GIO) { + enable_local_irq(n - SGINT_LOCAL0); + } else if(n >= SGINT_GIO && n < SGINT_HPCDMA) { + enable_gio_irq(n - SGINT_GIO); + } else if(n >= SGINT_HPCDMA && n < SGINT_END) { + enable_hpcdma_irq(n - SGINT_HPCDMA); + } else { + panic("how did I get here?"); + } +} + +static void local_unex(int irq, void *data, struct pt_regs *regs) +{ + printk("Whee: unexpected local IRQ at %08lx\n", + (unsigned long) regs->cp0_epc); + printk("DUMP: stat0<%x> stat1<%x> vmeistat<%x>\n", + ioc_icontrol->istat0, ioc_icontrol->istat1, + ioc_icontrol->vmeistat); +} + +static struct irqaction *local_irq_action[24] = { + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL +}; + +int setup_indy_irq(int irq, struct irqaction * new) +{ + printk("setup_indy_irq: Yeee, don't know how to setup irq<%d> for %s %p\n", + irq, new->name, new->handler); + return 0; +} + +static struct irqaction r4ktimer_action = { + NULL, 0, 0, "R4000 timer/counter", NULL, NULL, +}; + +static struct irqaction indy_berr_action = { + NULL, 0, 0, "IP22 Bus Error", NULL, NULL, +}; + +static struct irqaction *irq_action[16] = { + NULL, NULL, NULL, NULL, + NULL, NULL, &indy_berr_action, &r4ktimer_action, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL +}; + +int get_irq_list(char *buf) +{ + int i, len = 0; + int num = 0; + struct irqaction * action; + + for (i = 0 ; i < 16 ; i++, num++) { + action = irq_action[i]; + if (!action) + continue; + len += sprintf(buf+len, "%2d: %8d %c %s", + num, kstat.interrupts[num], + (action->flags & SA_INTERRUPT) ? '+' : ' ', + action->name); + for (action=action->next; action; action = action->next) { + len += sprintf(buf+len, ",%s %s", + (action->flags & SA_INTERRUPT) ? " +" : "", + action->name); + } + len += sprintf(buf+len, " [on-chip]\n"); + } + for (i = 0 ; i < 24 ; i++, num++) { + action = local_irq_action[i]; + if (!action) + continue; + len += sprintf(buf+len, "%2d: %8d %c %s", + num, kstat.interrupts[num], + (action->flags & SA_INTERRUPT) ? '+' : ' ', + action->name); + for (action=action->next; action; action = action->next) { + len += sprintf(buf+len, ",%s %s", + (action->flags & SA_INTERRUPT) ? " +" : "", + action->name); + } + len += sprintf(buf+len, " [local]\n"); + } + return len; +} + +/* + * do_IRQ handles IRQ's that have been installed without the + * SA_INTERRUPT flag: it uses the full signal-handling return + * and runs with other interrupts enabled. All relatively slow + * IRQ's should use this format: notably the keyboard/timer + * routines. + */ +asmlinkage void do_IRQ(int irq, struct pt_regs * regs) +{ + struct irqaction * action = *(irq + irq_action); + kstat.interrupts[irq]++; + printk("Got irq %d, press a key.", irq); + prom_getchar(); + romvec->imode(); + while (action) { + if (action->flags & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + action->handler(irq, action->dev_id, regs); + action = action->next; + } +} + +/* + * do_fast_IRQ handles IRQ's that don't need the fancy interrupt return + * stuff - the handler is also running with interrupts disabled unless + * it explicitly enables them later. + */ +asmlinkage void do_fast_IRQ(int irq) +{ + struct irqaction * action = *(irq + irq_action); + + printk("Got irq %d, press a key.", irq); + prom_getchar(); + romvec->imode(); + kstat.interrupts[irq]++; + while (action) { + if (action->flags & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + action->handler(irq, action->dev_id, NULL); + action = action->next; + } +} + +int request_local_irq(unsigned int lirq, void (*func)(int, void *, struct pt_regs *), + unsigned long iflags, const char *dname, void *devid) +{ + struct irqaction *action; + + lirq -= SGINT_LOCAL0; + if(lirq >= 24 || !func) + return -EINVAL; + + action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); + if(!action) + return -ENOMEM; + + action->handler = func; + action->flags = iflags; + action->mask = 0; + action->name = dname; + action->dev_id = devid; + action->next = 0; + local_irq_action[lirq] = action; + enable_irq(lirq + SGINT_LOCAL0); + return 0; +} + +void free_local_irq(unsigned int lirq, void *dev_id) +{ + struct irqaction *action; + + lirq -= SGINT_LOCAL0; + if(lirq >= 24) { + printk("Aieee: trying to free bogus local irq %d\n", + lirq + SGINT_LOCAL0); + return; + } + action = local_irq_action[lirq]; + local_irq_action[lirq] = NULL; + disable_irq(lirq + SGINT_LOCAL0); + kfree(action); +} + +int request_irq(unsigned int irq, + void (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, + const char * devname, + void *dev_id) +{ + int retval; + struct irqaction * action; + + if (irq >= SGINT_END) + return -EINVAL; + if (!handler) + return -EINVAL; + + if((irq >= SGINT_LOCAL0) && (irq < SGINT_GIO)) + return request_local_irq(irq, handler, irqflags, devname, dev_id); + + action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); + if (!action) + return -ENOMEM; + + action->handler = handler; + action->flags = irqflags; + action->mask = 0; + action->name = devname; + action->next = NULL; + action->dev_id = dev_id; + + retval = setup_indy_irq(irq, action); + + if (retval) + kfree(action); + return retval; +} + +void free_irq(unsigned int irq, void *dev_id) +{ + struct irqaction * action, **p; + unsigned long flags; + + if (irq >= SGINT_END) { + printk("Trying to free IRQ%d\n",irq); + return; + } + if((irq >= SGINT_LOCAL0) && (irq < SGINT_GIO)) { + free_local_irq(irq, dev_id); + return; + } + for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) { + if (action->dev_id != dev_id) + continue; + + /* Found it - now free it */ + save_flags(flags); + cli(); + *p = action->next; + restore_flags(flags); + kfree(action); + return; + } + printk("Trying to free free IRQ%d\n",irq); +} + +void init_IRQ(void) +{ + int i; + + for (i = 0; i < 16 ; i++) + set_int_vector(i, 0); + irq_setup(); +} + +void indy_local0_irqdispatch(struct pt_regs *regs) +{ + struct irqaction *action; + unsigned char mask = ioc_icontrol->istat0; + unsigned char mask2 = 0; + int irq; + + mask &= ioc_icontrol->imask0; + if(mask & ISTAT0_LIO2) { + mask2 = ioc_icontrol->vmeistat; + mask2 &= ioc_icontrol->cmeimask0; + irq = lc2msk_to_irqnr[mask2]; + action = local_irq_action[irq]; + } else { + irq = lc0msk_to_irqnr[mask]; + action = local_irq_action[irq]; + } +#if 0 + printk("local0_dispatch: got irq %d mask %2x mask2 %2x\n", + irq, mask, mask2); + prom_getchar(); +#endif + kstat.interrupts[irq + 16]++; + action->handler(irq, action->dev_id, regs); +} + +void indy_local1_irqdispatch(struct pt_regs *regs) +{ + struct irqaction *action; + unsigned char mask = ioc_icontrol->istat1; + unsigned char mask2 = 0; + int irq; + + mask &= ioc_icontrol->imask1; + if(mask & ISTAT1_LIO3) { + printk("WHee: Got an LIO3 irq, winging it...\n"); + mask2 = ioc_icontrol->vmeistat; + mask2 &= ioc_icontrol->cmeimask1; + irq = lc3msk_to_irqnr[ioc_icontrol->vmeistat]; + action = local_irq_action[irq]; + } else { + irq = lc1msk_to_irqnr[mask]; + action = local_irq_action[irq]; + } +#if 0 + printk("local1_dispatch: got irq %d mask %2x mask2 %2x\n", + irq, mask, mask2); + prom_getchar(); +#endif + kstat.interrupts[irq + 24]++; + action->handler(irq, action->dev_id, regs); +} + +void indy_buserror_irq(struct pt_regs *regs) +{ + kstat.interrupts[6]++; + printk("Got a bus error IRQ, shouldn't happen yet\n"); + show_regs(regs); + printk("Spinning...\n"); + while(1) + ; +} + +/* Misc. crap just to keep the kernel linking... */ +unsigned long probe_irq_on (void) +{ + return 0; +} + +int probe_irq_off (unsigned long irqs) +{ + return 0; +} + +void sgint_init(void) +{ + int i; +#ifdef CONFIG_REMOTE_DEBUG + char *ctype; +#endif + + sgi_i2regs = (struct sgi_int2_regs *) (KSEG1 + SGI_INT2_BASE); + sgi_i3regs = (struct sgi_int3_regs *) (KSEG1 + SGI_INT3_BASE); + + /* Init local mask --> irq tables. */ + for(i = 0; i < 256; i++) { + if(i & 0x80) { + lc0msk_to_irqnr[i] = 7; + lc1msk_to_irqnr[i] = 15; + lc2msk_to_irqnr[i] = 23; + lc3msk_to_irqnr[i] = 31; + } else if(i & 0x40) { + lc0msk_to_irqnr[i] = 6; + lc1msk_to_irqnr[i] = 14; + lc2msk_to_irqnr[i] = 22; + lc3msk_to_irqnr[i] = 30; + } else if(i & 0x20) { + lc0msk_to_irqnr[i] = 5; + lc1msk_to_irqnr[i] = 13; + lc2msk_to_irqnr[i] = 21; + lc3msk_to_irqnr[i] = 29; + } else if(i & 0x10) { + lc0msk_to_irqnr[i] = 4; + lc1msk_to_irqnr[i] = 12; + lc2msk_to_irqnr[i] = 20; + lc3msk_to_irqnr[i] = 28; + } else if(i & 0x08) { + lc0msk_to_irqnr[i] = 3; + lc1msk_to_irqnr[i] = 11; + lc2msk_to_irqnr[i] = 19; + lc3msk_to_irqnr[i] = 27; + } else if(i & 0x04) { + lc0msk_to_irqnr[i] = 2; + lc1msk_to_irqnr[i] = 10; + lc2msk_to_irqnr[i] = 18; + lc3msk_to_irqnr[i] = 26; + } else if(i & 0x02) { + lc0msk_to_irqnr[i] = 1; + lc1msk_to_irqnr[i] = 9; + lc2msk_to_irqnr[i] = 17; + lc3msk_to_irqnr[i] = 25; + } else if(i & 0x01) { + lc0msk_to_irqnr[i] = 0; + lc1msk_to_irqnr[i] = 8; + lc2msk_to_irqnr[i] = 16; + lc3msk_to_irqnr[i] = 24; + } else { + lc0msk_to_irqnr[i] = 0; + lc1msk_to_irqnr[i] = 0; + lc2msk_to_irqnr[i] = 0; + lc3msk_to_irqnr[i] = 0; + } + } + + ioc_icontrol = &sgi_i3regs->ints; + ioc_timers = &sgi_i3regs->timers; + ioc_tclear = &sgi_i3regs->tclear; + + /* Mask out all interrupts. */ + ioc_icontrol->imask0 = 0; + ioc_icontrol->imask1 = 0; + ioc_icontrol->cmeimask0 = 0; + ioc_icontrol->cmeimask1 = 0; + + /* Now safe to set the exception vector. */ + set_except_vector(0, indyIRQ); + +#ifdef CONFIG_REMOTE_DEBUG + ctype = prom_getcmdline(); + for(i = 0; i < strlen(ctype); i++) { + if(ctype[i]=='k' && ctype[i+1]=='g' && + ctype[i+2]=='d' && ctype[i+3]=='b' && + ctype[i+4]=='=' && ctype[i+5]=='t' && + ctype[i+6]=='t' && ctype[i+7]=='y' && + ctype[i+8]=='d' && + (ctype[i+9] == '1' || ctype[i+9] == '2')) { + printk("KGDB: Using serial line /dev/ttyd%d for " + "session\n", (ctype[i+9] - '0')); + if(ctype[i+9]=='1') + rs_kgdb_hook(1); + else if(ctype[i+9]=='2') + rs_kgdb_hook(0); + else { + printk("KGDB: whoops bogon tty line " + "requested, disabling session\n"); + } + + } + } +#endif +} diff --git a/arch/mips/sgi/kernel/indy_mc.c b/arch/mips/sgi/kernel/indy_mc.c new file mode 100644 index 000000000..b2f35018c --- /dev/null +++ b/arch/mips/sgi/kernel/indy_mc.c @@ -0,0 +1,153 @@ +/* $Id: indy_mc.c,v 1.5 1996/06/29 07:06:51 dm Exp $ + * indy_mc.c: Routines for manipulating the INDY memory controller. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <asm/segment.h> +#include <asm/ptrace.h> +#include <asm/processor.h> +#include <asm/sgimc.h> +#include <asm/sgihpc.h> +#include <asm/sgialib.h> + +/* #define DEBUG_SGIMC */ + +struct sgimc_misc_ctrl *mcmisc_regs; +unsigned long *rpsscounter; +struct sgimc_dma_ctrl *dmactrlregs; + +static inline char *mconfig_string(unsigned long val) +{ + switch(val & SGIMC_MCONFIG_RMASK) { + case SGIMC_MCONFIG_FOURMB: + return "4MB"; + + case SGIMC_MCONFIG_EIGHTMB: + return "8MB"; + + case SGIMC_MCONFIG_SXTEENMB: + return "16MB"; + + case SGIMC_MCONFIG_TTWOMB: + return "32MB"; + + case SGIMC_MCONFIG_SFOURMB: + return "64MB"; + + case SGIMC_MCONFIG_OTEIGHTMB: + return "128MB"; + + default: + return "wheee, unknown"; + }; +} + +void sgimc_init(void) +{ + unsigned long tmpreg; + + mcmisc_regs = (struct sgimc_misc_ctrl *)(KSEG1+0x1fa00000); + rpsscounter = (unsigned long *) (KSEG1 + 0x1fa01004); + dmactrlregs = (struct sgimc_dma_ctrl *) (KSEG1+0x1fa02000); + + printk("MC: SGI memory controller Revision %d\n", + (int) mcmisc_regs->systemid & SGIMC_SYSID_MASKREV); + +#if 0 /* XXX Until I figure out what this bit really indicates XXX */ + /* XXX Is this systemid bit reliable? */ + if(mcmisc_regs->systemid & SGIMC_SYSID_EPRESENT) { + EISA_bus = 1; + printk("with EISA\n"); + } else { + EISA_bus = 0; + printk("no EISA\n"); + } +#endif + +#ifdef DEBUG_SGIMC + prom_printf("sgimc_init: memconfig0<%s> mconfig1<%s>\n", + mconfig_string(mcmisc_regs->mconfig0), + mconfig_string(mcmisc_regs->mconfig1)); + + prom_printf("mcdump: cpuctrl0<%08lx> cpuctrl1<%08lx>\n", + mcmisc_regs->cpuctrl0, mcmisc_regs->cpuctrl1); + prom_printf("mcdump: divider<%08lx>, gioparm<%04x>\n", + mcmisc_regs->divider, mcmisc_regs->gioparm); +#endif + + /* Place the MC into a known state. This must be done before + * interrupts are first enabled etc. + */ + + /* Step 1: The CPU/GIO error status registers will not latch + * up a new error status until the register has been + * cleared by the cpu. These status registers are + * cleared by writing any value to them. + */ + mcmisc_regs->cstat = mcmisc_regs->gstat = 0; + + /* Step 2: Enable all parity checking in cpu control register + * zero. + */ + tmpreg = mcmisc_regs->cpuctrl0; + tmpreg |= (SGIMC_CCTRL0_EPERRGIO | SGIMC_CCTRL0_EPERRMEM | + SGIMC_CCTRL0_R4KNOCHKPARR); + mcmisc_regs->cpuctrl0 = tmpreg; + + /* Step 3: Setup the MC write buffer depth, this is controlled + * in cpu control register 1 in the lower 4 bits. + */ + tmpreg = mcmisc_regs->cpuctrl1; + tmpreg &= ~0xf; + tmpreg |= 0xd; + mcmisc_regs->cpuctrl1 = tmpreg; + + /* Step 4: Initialize the RPSS divider register to run as fast + * as it can correctly operate. The register is laid + * out as follows: + * + * ---------------------------------------- + * | RESERVED | INCREMENT | DIVIDER | + * ---------------------------------------- + * 31 16 15 8 7 0 + * + * DIVIDER determines how often a 'tick' happens, + * INCREMENT determines by how the RPSS increment + * registers value increases at each 'tick'. Thus, + * for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101 + */ + mcmisc_regs->divider = 0x101; + + /* Step 5: Initialize GIO64 arbitrator configuration register. + * + * NOTE: If you dork with startup code the HPC init code in + * sgihpc_init() must run before us because of how we + * need to know Guiness vs. FullHouse and the board + * revision on this machine. You have been warned. + */ + + /* First the basic invariants across all gio64 implementations. */ + tmpreg = SGIMC_GIOPARM_HPC64; /* All 1st HPC's interface at 64bits. */ + tmpreg |= SGIMC_GIOPARM_ONEBUS; /* Only one physical GIO bus exists. */ + + if(sgi_guiness) { + /* Guiness specific settings. */ + tmpreg |= SGIMC_GIOPARM_EISA64; /* MC talks to EISA at 64bits */ + tmpreg |= SGIMC_GIOPARM_MASTEREISA; /* EISA bus can act as master */ + } else { + /* Fullhouse specific settings. */ + if(sgi_boardid < 2) { + tmpreg |= SGIMC_GIOPARM_HPC264; /* 2nd HPC at 64bits */ + tmpreg |= SGIMC_GIOPARM_PLINEEXP0; /* exp0 pipelines */ + tmpreg |= SGIMC_GIOPARM_MASTEREXP1;/* exp1 masters */ + tmpreg |= SGIMC_GIOPARM_RTIMEEXP0; /* exp0 is realtime */ + } else { + tmpreg |= SGIMC_GIOPARM_HPC264; /* 2nd HPC 64bits */ + tmpreg |= SGIMC_GIOPARM_PLINEEXP0; /* exp[01] pipelined */ + tmpreg |= SGIMC_GIOPARM_PLINEEXP1; + tmpreg |= SGIMC_GIOPARM_MASTEREISA;/* EISA masters */ + } + } + mcmisc_regs->gioparm = tmpreg; /* poof */ +} diff --git a/arch/mips/sgi/kernel/indy_timer.c b/arch/mips/sgi/kernel/indy_timer.c new file mode 100644 index 000000000..9aa88cb18 --- /dev/null +++ b/arch/mips/sgi/kernel/indy_timer.c @@ -0,0 +1,298 @@ +/* $Id: indy_timer.c,v 1.10 1996/08/07 02:54:11 dm Exp $ + * indy_timer.c: Setting up the clock on the INDY 8254 controller. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/timex.h> +#include <linux/kernel_stat.h> + +#include <asm/bootinfo.h> +#include <asm/segment.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/sgi.h> +#include <asm/sgialib.h> +#include <asm/sgihpc.h> +#include <asm/sgint23.h> + +/* The layout of registers for the INDY Dallas 1286 clock chipset. */ +struct indy_clock { + volatile unsigned int hsec; + volatile unsigned int sec; + volatile unsigned int min; + volatile unsigned int malarm; + volatile unsigned int hr; + volatile unsigned int halarm; + volatile unsigned int day; + volatile unsigned int dalarm; + volatile unsigned int date; + volatile unsigned int month; + volatile unsigned int year; + volatile unsigned int cmd; + volatile unsigned int whsec; + volatile unsigned int wsec; + volatile unsigned int _unused0[50]; +}; + +#define INDY_CLOCK_REGS ((struct indy_clock *)(KSEG1ADDR(0x1fbe0000))) + +/* Because of a bug in the i8254 timer we need to use the onchip r4k + * counter as our system wide timer interrupt running at 100HZ. + */ +static unsigned long r4k_offset; /* Amount to increment compare reg each time */ +static unsigned long r4k_cur; /* What counter should be at next timer irq */ + +static inline void ack_r4ktimer(unsigned long newval) +{ + write_32bit_cp0_register(CP0_COMPARE, newval); +} + +static int set_rtc_mmss(unsigned long nowtime) +{ + struct indy_clock *clock = INDY_CLOCK_REGS; + int retval = 0; + int real_seconds, real_minutes, clock_minutes; + +#define FROB_FROM_CLOCK(x) (((x) & 0xf) | ((((x) & 0xf0) >> 4) * 10)); +#define FROB_TO_CLOCK(x) ((((((x) & 0xff) / 10)<<4) | (((x) & 0xff) % 10)) & 0xff) + + clock->cmd &= ~(0x80); + clock_minutes = clock->min; + clock->cmd |= (0x80); + + clock_minutes = FROB_FROM_CLOCK(clock_minutes); + real_seconds = nowtime % 60; + real_minutes = nowtime / 60; + + if(((abs(real_minutes - clock_minutes) + 15)/30) & 1) + real_minutes += 30; /* correct for half hour time zone */ + + real_minutes %= 60; + if(abs(real_minutes - clock_minutes) < 30) { + /* Force clock oscillator to be on. */ + clock->month &= ~(0x80); + + /* Write real_seconds and real_minutes into the Dallas. */ + clock->cmd &= ~(0x80); + clock->sec = real_seconds; + clock->min = real_minutes; + clock->cmd |= (0x80); + } else + return -1; + +#undef FROB_FROM_CLOCK +#undef FROB_TO_CLOCK + + return retval; +} + +static long last_rtc_update = 0; + +void indy_timer_interrupt(struct pt_regs *regs) +{ + /* Ack timer and compute new compare. */ + r4k_cur = (read_32bit_cp0_register(CP0_COUNT) + r4k_offset); + ack_r4ktimer(r4k_cur); + kstat.interrupts[7]++; + do_timer(regs); + + /* We update the Dallas time of day approx. every 11 minutes, + * because of how the numbers work out we need to make + * absolutely sure we do this update within 500ms before the + * next second starts, thus the following code. + */ + if (time_state != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 && + xtime.tv_usec > 500000 - (tick >> 1) && + xtime.tv_usec < 500000 + (tick >> 1)) + if (set_rtc_mmss(xtime.tv_sec) == 0) + last_rtc_update = xtime.tv_sec; + else + last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */ +} + +static inline unsigned long dosample(volatile unsigned char *tcwp, + volatile unsigned char *tc2p) +{ + unsigned long ct0, ct1; + unsigned char msb, lsb; + + /* Start the counter. */ + *tcwp = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | SGINT_TCWORD_MRGEN); + *tc2p = (SGINT_TCSAMP_COUNTER & 0xff); + *tc2p = (SGINT_TCSAMP_COUNTER >> 8); + + /* Get initial counter invariant */ + ct0 = read_32bit_cp0_register(CP0_COUNT); + + /* Latch and spin until top byte of counter2 is zero */ + *tcwp = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT); + ct1 = read_32bit_cp0_register(CP0_COUNT); + lsb = *tc2p; + msb = *tc2p; + while(msb) { + *tcwp = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT); + ct1 = read_32bit_cp0_register(CP0_COUNT); + lsb = *tc2p; + msb = *tc2p; + } + + /* Stop the counter. */ + *tcwp = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | SGINT_TCWORD_MSWST); + + /* Return the difference, this is how far the r4k counter increments + * for every one HZ. + */ + return ct1 - ct0; +} + +/* Converts Gregorian date to seconds since 1970-01-01 00:00:00. + * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 + * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. + * + * [For the Julian calendar (which was used in Russia before 1917, + * Britain & colonies before 1752, anywhere else before 1582, + * and is still in use by some communities) leave out the + * -year/100+year/400 terms, and add 10.] + * + * This algorithm was first published by Gauss (I think). + * + * WARNING: this function will overflow on 2106-02-07 06:28:16 on + * machines were long is 32-bit! (However, as time_t is signed, we + * will already get problems at other places on 2038-01-19 03:14:08) + */ +static inline unsigned long mktime(unsigned int year, unsigned int mon, + unsigned int day, unsigned int hour, + unsigned int min, unsigned int sec) +{ + if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */ + mon += 12; /* Puts Feb last since it has leap day */ + year -= 1; + } + return ((( + (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) + + year*365 - 719499 + )*24 + hour /* now have hours */ + )*60 + min /* now have minutes */ + )*60 + sec; /* finally seconds */ +} + +unsigned long get_indy_time(void) +{ + struct indy_clock *clock = INDY_CLOCK_REGS; + unsigned int year, mon, day, hour, min, sec; + + /* Freeze it. */ + clock->cmd &= ~(0x80); + + /* Read regs. */ + sec = clock->sec; + min = clock->min; + hour = (clock->hr & 0x3f); + day = (clock->date & 0x3f); + mon = (clock->month & 0x1f); + year = clock->year; + + /* Unfreeze clock. */ + clock->cmd |= 0x80; + + /* Frob the bits. */ +#define FROB1(x) (((x) & 0xf) + ((((x) & 0xf0) >> 4) * 10)); +#define FROB2(x) (((x) & 0xf) + (((((x) & 0xf0) >> 4) & 0x3) * 10)); + + /* XXX Should really check that secs register is the same + * XXX as when we first read it and if not go back and + * XXX read the regs above again. + */ + sec = FROB1(sec); min = FROB1(min); day = FROB1(day); + mon = FROB1(mon); year = FROB1(year); + hour = FROB2(hour); + +#undef FROB1 +#undef FROB2 + + /* Wheee... */ + if(year < 45) + year += 30; + if ((year += 1940) < 1970) + year += 100; + + return mktime(year, mon, day, hour, min, sec); +} + +#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5) + +void indy_timer_init(void) +{ + struct sgi_ioc_timers *p; + volatile unsigned char *tcwp, *tc2p; + + /* Figure out the r4k offset, the algorithm is very simple + * and works in _all_ cases as long as the 8254 counter + * register itself works ok (as an interrupt driving timer + * it does not because of bug, this is why we are using + * the onchip r4k counter/compare register to serve this + * purpose, but for r4k_offset calculation it will work + * ok for us). There are other very complicated ways + * of performing this calculation but this one works just + * fine so I am not going to futz around. ;-) + */ + p = ioc_timers; + tcwp = &p->tcword; + tc2p = &p->tcnt2; + + printk("calculating r4koff... "); + r4k_offset = dosample(tcwp, tc2p); /* First sample. */ + dosample(tcwp, tc2p); /* Eat one... */ + r4k_offset += dosample(tcwp, tc2p); /* Second sample. */ + r4k_offset = (r4k_offset >> 1); /* Get average. */ + r4k_offset = HZ * r4k_offset; /* Multiply by HZ */ + + printk("%08lx(%d)\n", r4k_offset, (int) r4k_offset); + + r4k_cur = (read_32bit_cp0_register(CP0_COUNT) + r4k_offset); + write_32bit_cp0_register(CP0_COMPARE, r4k_cur); + set_cp0_status(ST0_IM, ALLINTS); + sti(); + + /* Read time from the dallas chipset. */ + xtime.tv_sec = get_indy_time(); + xtime.tv_usec = 0; +} + +void indy_8254timer_irq(void) +{ + kstat.interrupts[4]++; + printk("indy_8254timer_irq: Whoops, should not have gotten this IRQ\n"); + prom_getchar(); + prom_imode(); +} + +void do_gettimeofday(struct timeval *tv) +{ + unsigned long flags; + + save_flags(flags); cli(); + *tv = xtime; + restore_flags(flags); +} + +void do_settimeofday(struct timeval *tv) +{ + cli(); + xtime = *tv; + time_state = TIME_BAD; + time_maxerror = MAXPHASE; + time_esterror = MAXPHASE; + sti(); +} + diff --git a/arch/mips/sgi/kernel/reset.c b/arch/mips/sgi/kernel/reset.c new file mode 100644 index 000000000..4e7455952 --- /dev/null +++ b/arch/mips/sgi/kernel/reset.c @@ -0,0 +1,14 @@ +/* + * linux/arch/mips/sgi/kernel/process.c + * + * Reset a SGI. + */ +#include <asm/io.h> +#include <asm/system.h> + +void +sgi_hard_reset_now(void) +{ + for(;;) + prom_imode(); +} diff --git a/arch/mips/sgi/kernel/setup.c b/arch/mips/sgi/kernel/setup.c new file mode 100644 index 000000000..61b67df75 --- /dev/null +++ b/arch/mips/sgi/kernel/setup.c @@ -0,0 +1,83 @@ +/* $Id: setup.c,v 1.11 1996/06/29 07:06:51 dm Exp $ + * setup.c: SGI specific setup, including init of the feature struct. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#ifndef __GOGOGO__ +#error "... about to fuckup your Indy?" +#endif +#include <linux/kernel.h> +#include <linux/sched.h> + +#include <asm/vector.h> +#include <asm/sgialib.h> +#include <asm/sgi.h> +#include <asm/sgimc.h> +#include <asm/sgihpc.h> +#include <asm/sgint23.h> + +extern int serial_console; /* in console.c, of course */ + +struct feature sgi_feature = { +}; + +static void sgi_irq_setup(void) +{ + sgint_init(); +} + +#if 0 +extern void register_console(void (*proc)(const char *)); + +static void sgi_print(const char *p) +{ + char c; + + while((c = *p++) != 0) { + if(c == '\n') + prom_putchar('\r'); + prom_putchar(c); + } +} +#endif + +void sgi_setup(void) +{ + char *ctype; + + irq_setup = sgi_irq_setup; + feature = &sgi_feature; + hard_reset_now = acn_hard_reset_now; + + /* register_console(sgi_print); */ + + sgi_sysinit(); + + /* Init the INDY HPC I/O controller. Need to call this before + * fucking with the memory controller because it needs to know the + * boardID and whether this is a Guiness or a FullHouse machine. + */ + sgihpc_init(); + + /* Init INDY memory controller. */ + sgimc_init(); + + /* ARCS console environment variable is set to "g?" for + * graphics console, it is set to "d" for the first serial + * line and "d2" for the second serial line. + */ + ctype = prom_getenv("console"); + serial_console = 0; + if(*ctype == 'd') { + if(*(ctype+1)=='2') + serial_console = 1; + else + serial_console = 2; + if(!serial_console) { + prom_printf("Weird console env setting %s\n", ctype); + prom_printf("Press a key to reboot.\n"); + prom_getchar(); + prom_imode(); + } + } +} diff --git a/arch/mips/sgi/kernel/system.c b/arch/mips/sgi/kernel/system.c new file mode 100644 index 000000000..051ad09c5 --- /dev/null +++ b/arch/mips/sgi/kernel/system.c @@ -0,0 +1,168 @@ +/* $Id: system.c,v 1.2 1996/06/10 16:38:32 dm Exp $ + * system.c: Probe the system type using ARCS prom interface library. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> + +#include <asm/sgi.h> +#include <asm/sgialib.h> +#include <asm/bootinfo.h> + +#ifndef __GOGOGO__ +#error "... You're fearless, aren't you?" +#endif + +enum sgi_mach sgimach; + +struct smatch { + char *name; + int type; +}; + +static struct smatch sgi_mtable[] = { + { "SGI-IP4", ip4 }, + { "SGI-IP5", ip5 }, + { "SGI-IP6", ip6 }, + { "SGI-IP7", ip7 }, + { "SGI-IP9", ip9 }, + { "SGI-IP12", ip12 }, + { "SGI-IP15", ip15 }, + { "SGI-IP17", ip17 }, + { "SGI-IP19", ip19 }, + { "SGI-IP20", ip20 }, + { "SGI-IP21", ip21 }, + { "SGI-IP22", ip22 }, + { "SGI-IP25", ip25 }, + { "SGI-IP26", ip26 }, + { "SGI-IP28", ip28 }, + { "SGI-IP30", ip30 }, + { "SGI-IP32", ip32 } +}; + +#define NUM_MACHS 17 /* for now */ + +static struct smatch sgi_cputable[] = { + { "MIPS-R2000", CPU_R2000 }, + { "MIPS-R3000", CPU_R3000 }, + { "MIPS-R3000A", CPU_R3000A }, + { "MIPS-R4000", CPU_R4000SC }, + { "MIPS-R4400", CPU_R4400SC }, + { "MIPS-R4600", CPU_R4600 }, + { "MIPS-R8000", CPU_R8000 }, + { "MIPS-R5000", CPU_R5000 }, + { "MIPS-R5000A", CPU_R5000A } +}; + +#define NUM_CPUS 9 /* for now */ + +static enum sgi_mach string_to_mach(char *s) +{ + int i; + + for(i = 0; i < NUM_MACHS; i++) { + if(!strcmp(s, sgi_mtable[i].name)) + return (enum sgi_mach) sgi_mtable[i].type; + } + prom_printf("\nYeee, could not determine SGI architecture type <%s>\n", s); + prom_printf("press a key to reboot\n"); + prom_getchar(); + romvec->imode(); + return (enum sgi_mach) 0; +} + +static int string_to_cpu(char *s) +{ + int i; + + for(i = 0; i < NUM_CPUS; i++) { + if(!strcmp(s, sgi_cputable[i].name)) + return sgi_mtable[i].type; + } + prom_printf("\nYeee, could not determine MIPS cpu type <%s>\n", s); + prom_printf("press a key to reboot\n"); + prom_getchar(); + romvec->imode(); + return 0; +} + +void sgi_sysinit(void) +{ + pcomponent *p, *toplev, *cpup = 0; + int cputype = -1; + + + /* The root component tells us what machine architecture we + * have here. + */ + p = prom_getchild(PROM_NULL_COMPONENT); + printk("ARCH: %s\n", p->iname); + sgimach = string_to_mach(p->iname); + + /* Now scan for cpu(s). */ + toplev = p = prom_getchild(p); + while(p) { + int ncpus = 0; + + if(p->type == Cpu) { + if(++ncpus > 1) { + prom_printf("\nYeee, SGI MP not ready yet\n"); + prom_printf("press a key to reboot\n"); + prom_getchar(); + romvec->imode(); + } + printk("CPU: %s ", p->iname); + cpup = p; + cputype = string_to_cpu(cpup->iname); + } + p = prom_getsibling(p); + } + if(cputype == -1) { + prom_printf("\nYeee, could not find cpu ARCS component\n"); + prom_printf("press a key to reboot\n"); + prom_getchar(); + romvec->imode(); + } + p = prom_getchild(cpup); + while(p) { + switch(p->class) { + case processor: + switch(p->type) { + case Fpu: + printk("FPU<%s> ", p->iname); + break; + + default: + break; + }; + break; + + case cache: + switch(p->type) { + case picache: + printk("ICACHE "); + break; + + case pdcache: + printk("DCACHE "); + break; + + case sccache: + printk("SCACHE "); + break; + + default: + break; + + }; + break; + + default: + break; + }; + p = prom_getsibling(p); + } + printk("\n"); +} diff --git a/arch/mips/sgi/kernel/time.c b/arch/mips/sgi/kernel/time.c new file mode 100644 index 000000000..1f5137c27 --- /dev/null +++ b/arch/mips/sgi/kernel/time.c @@ -0,0 +1,14 @@ +/* $Id: time.c,v 1.1 1996/06/08 12:07:08 dm Exp $ + * time.c: Generic SGI time_init() code, this will dispatch to the + * appropriate per-architecture time/counter init code. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +extern void indy_timer_init(void); + +void time_init(void) +{ + /* XXX assume INDY for now XXX */ + indy_timer_init(); +} diff --git a/arch/mips/sgi/prom/.cvsignore b/arch/mips/sgi/prom/.cvsignore new file mode 100644 index 000000000..4671378ae --- /dev/null +++ b/arch/mips/sgi/prom/.cvsignore @@ -0,0 +1 @@ +.depend diff --git a/arch/mips/sgi/prom/Makefile b/arch/mips/sgi/prom/Makefile new file mode 100644 index 000000000..8dbfedf80 --- /dev/null +++ b/arch/mips/sgi/prom/Makefile @@ -0,0 +1,23 @@ +# $Id: Makefile,v 1.6 1996/06/08 04:48:41 dm Exp $ +# Makefile for the SGI arcs prom monitor library routines +# under Linux. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definitions are now in the main makefile... + +OBJS = console.o init.o printf.o memory.o tree.o tags.o env.o \ + cmdline.o misc.o time.o file.o + +all: promlib.a + +promlib.a: $(OBJS) + $(AR) rcs promlib.a $(OBJS) + sync + +dep: + $(CPP) -M *.c > .depend + +include $(TOPDIR)/Rules.make diff --git a/arch/mips/sgi/prom/cmdline.c b/arch/mips/sgi/prom/cmdline.c new file mode 100644 index 000000000..4c7da5e43 --- /dev/null +++ b/arch/mips/sgi/prom/cmdline.c @@ -0,0 +1,60 @@ +/* $Id: cmdline.c,v 1.1 1996/06/08 03:23:10 dm Exp $ + * cmdline.c: Kernel command line creation using ARCS argc/argv. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/string.h> + +#include <asm/sgialib.h> +#include <asm/bootinfo.h> + +/* #define DEBUG_CMDLINE */ + +extern char arcs_cmdline[CL_SIZE]; + +char *prom_getcmdline(void) +{ + return &(arcs_cmdline[0]); +} + +static char *ignored[] = { + "ConsoleIn=", + "ConsoleOut=", + "SystemPartition=", + "OSLoader=", + "OSLoadPartition=", + "OSLoadFilename=" +}; +#define NENTS(foo) ((sizeof((foo)) / (sizeof((foo[0]))))) + +void prom_init_cmdline(void) +{ + char *cp; + int actr, i; + + actr = 1; /* Always ignore argv[0] */ + + cp = &(arcs_cmdline[0]); + while(actr < prom_argc) { + for(i = 0; i < NENTS(ignored); i++) { + int len = strlen(ignored[i]); + + if(!strncmp(prom_argv[actr], ignored[i], len)) + goto pic_cont; + } + /* Ok, we want it. */ + strcpy(cp, prom_argv[actr]); + cp += strlen(prom_argv[actr]); + *cp++ = ' '; + + pic_cont: + actr++; + } + *cp = '\0'; + +#ifdef DEBUG_CMDLINE + prom_printf("prom_init_cmdline: %s\n", &(arcs_cmdline[0])); +#endif +} diff --git a/arch/mips/sgi/prom/console.c b/arch/mips/sgi/prom/console.c new file mode 100644 index 000000000..3f4d69f45 --- /dev/null +++ b/arch/mips/sgi/prom/console.c @@ -0,0 +1,24 @@ +/* $Id: console.c,v 1.1 1996/06/04 00:57:05 dm Exp $ + * console.c: SGI arcs console code. + * + * Copyright (C) 1996 David S. Miller (dm@sgi.com) + */ + +#include <asm/sgialib.h> + +void prom_putchar(char c) +{ + long cnt; + char it = c; + + romvec->write(1, &it, 1, &cnt); +} + +char prom_getchar(void) +{ + long cnt; + char c; + + romvec->read(0, &c, 1, &cnt); + return c; +} diff --git a/arch/mips/sgi/prom/env.c b/arch/mips/sgi/prom/env.c new file mode 100644 index 000000000..5aff47efd --- /dev/null +++ b/arch/mips/sgi/prom/env.c @@ -0,0 +1,20 @@ +/* $Id: env.c,v 1.2 1996/06/08 04:48:41 dm Exp $ + * env.c: ARCS environment variable routines. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/string.h> + +#include <asm/sgialib.h> + +char *prom_getenv(char *name) +{ + return romvec->get_evar(name); +} + +long prom_setenv(char *name, char *value) +{ + return romvec->set_evar(name, value); +} diff --git a/arch/mips/sgi/prom/file.c b/arch/mips/sgi/prom/file.c new file mode 100644 index 000000000..b62d33dda --- /dev/null +++ b/arch/mips/sgi/prom/file.c @@ -0,0 +1,58 @@ +/* $Id: file.c,v 1.1 1996/06/08 04:47:22 dm Exp $ + * file.c: ARCS firmware interface to files. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <asm/sgialib.h> + +long prom_getvdirent(unsigned long fd, struct linux_vdirent *ent, unsigned long num, + unsigned long *cnt) +{ + return romvec->get_vdirent(fd, ent, num, cnt); +} + +long prom_open(char *name, enum linux_omode md, unsigned long *fd) +{ + return romvec->open(name, md, fd); +} + +long prom_close(unsigned long fd) +{ + return romvec->close(fd); +} + +long prom_read(unsigned long fd, void *buf, unsigned long num, unsigned long *cnt) +{ + return romvec->read(fd, buf, num, cnt); +} + +long prom_getrstatus(unsigned long fd) +{ + return romvec->get_rstatus(fd); +} + +long prom_write(unsigned long fd, void *buf, unsigned long num, unsigned long *cnt) +{ + return romvec->write(fd, buf, num, cnt); +} + +long prom_seek(unsigned long fd, struct linux_bigint *off, enum linux_seekmode sm) +{ + return romvec->seek(fd, off, sm); +} + +long prom_mount(char *name, enum linux_mountops op) +{ + return romvec->mount(name, op); +} + +long prom_getfinfo(unsigned long fd, struct linux_finfo *buf) +{ + return romvec->get_finfo(fd, buf); +} + +long prom_setfinfo(unsigned long fd, unsigned long flags, unsigned long msk) +{ + return romvec->set_finfo(fd, flags, msk); +} diff --git a/arch/mips/sgi/prom/init.c b/arch/mips/sgi/prom/init.c new file mode 100644 index 000000000..6b6167efd --- /dev/null +++ b/arch/mips/sgi/prom/init.c @@ -0,0 +1,59 @@ +/* $Id: init.c,v 1.6 1996/06/10 16:38:33 dm Exp $ + * init.c: PROM library initialisation code. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> + +#include <asm/sgialib.h> + +/* #define DEBUG_PROM_INIT */ + +/* Master romvec interface. */ +struct linux_romvec *romvec; +struct linux_promblock *sgi_pblock; +int prom_argc; +char **prom_argv, **prom_envp; +unsigned short prom_vers, prom_rev; + +extern void prom_testtree(void); + +int prom_init(int argc, char **argv, char **envp) +{ + struct linux_promblock *pb; + + romvec = ROMVECTOR; + pb = sgi_pblock = PROMBLOCK; + prom_argc = argc; + prom_argv = argv; + prom_envp = envp; + + if(pb->magic != 0x53435241) { + prom_printf("Aieee, bad prom vector magic %08lx\n", pb->magic); + while(1) + ; + } + + prom_init_cmdline(); + + prom_vers = pb->ver; + prom_rev = pb->rev; + printk("PROMLIB: SGI ARCS firmware Version %d Revision %d\n", + prom_vers, prom_rev); + prom_meminit(); + prom_setup_archtags(); + +#if 0 + prom_testtree(); +#endif + +#ifdef DEBUG_PROM_INIT + { + prom_printf("Press a key to reboot\n"); + (void) prom_getchar(); + romvec->imode(); + } +#endif + return 0; +} diff --git a/arch/mips/sgi/prom/memory.c b/arch/mips/sgi/prom/memory.c new file mode 100644 index 000000000..cb392a805 --- /dev/null +++ b/arch/mips/sgi/prom/memory.c @@ -0,0 +1,129 @@ +/* $Id: memory.c,v 1.5 1996/06/10 16:38:33 dm Exp $ + * memory.c: PROM library functions for acquiring/using memory descriptors + * given to us from the ARCS firmware. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/swap.h> + +#include <asm/sgialib.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/bootinfo.h> + +/* #define DEBUG */ + +struct linux_mdesc *prom_getmdesc(struct linux_mdesc *curr) +{ + return romvec->get_mdesc(curr); +} + +#ifdef DEBUG /* convenient for debugging */ +static char *mtypes[8] = { + "Exception Block", + "ARCS Romvec Page", + "Free/Contig RAM", + "Generic Free RAM", + "Bad Memory", + "Standlong Program Pages", + "ARCS Temp Storage Area", + "ARCS Permanent Storage Area" +}; +#endif + +static struct prom_pmemblock prom_pblocks[PROM_MAX_PMEMBLOCKS]; + +struct prom_pmemblock *prom_getpblock_array(void) +{ + return &prom_pblocks[0]; +} + +static void prom_setup_memupper(void) +{ + struct prom_pmemblock *p, *highest; + + for(p = prom_getpblock_array(), highest = 0; p->size != 0; p++) { + if(p->base == 0xdeadbeef) + prom_printf("WHEEE, bogus pmemblock\n"); + if(!highest || p->base > highest->base) + highest = p; + } + mips_memory_upper = highest->base + highest->size; +#ifdef DEBUG + prom_printf("prom_setup_memupper: mips_memory_upper = %08lx\n", + mips_memory_upper); +#endif +} + +void prom_meminit(void) +{ + struct linux_mdesc *p; + int totram; + int i = 0; + + p = prom_getmdesc(PROM_NULL_MDESC); +#ifdef DEBUG + prom_printf("ARCS MEMORY DESCRIPTOR dump:\n"); + while(p) { + prom_printf("[%d,%p]: base<%08lx> pages<%08lx> type<%s>\n", + i, p, p->base, p->pages, mtypes[p->type]); + p = prom_getmdesc(p); + i++; + } +#endif + p = prom_getmdesc(PROM_NULL_MDESC); + totram = 0; + i = 0; + while(p) { + if(p->type == free || p->type == fcontig) { + prom_pblocks[i].base = + ((p->base<<PAGE_SHIFT) + 0x80000000); + prom_pblocks[i].size = p->pages << PAGE_SHIFT; + totram += prom_pblocks[i].size; +#ifdef DEBUG + prom_printf("free_chunk[%d]: base=%08lx size=%d\n", + i, prom_pblocks[i].base, + prom_pblocks[i].size); +#endif + i++; + } + p = prom_getmdesc(p); + } + prom_pblocks[i].base = 0xdeadbeef; + prom_pblocks[i].size = 0; /* indicates last elem. of array */ + printk("PROMLIB: Total free ram %d bytes (%dK,%dMB)\n", + totram, (totram/1024), (totram/1024/1024)); + + /* Setup upper physical memory bound. */ + prom_setup_memupper(); +} + +/* Called from mem_init() to fixup the mem_map page settings. */ +void prom_fixup_mem_map(unsigned long start, unsigned long end) +{ + struct prom_pmemblock *p; + int i, nents; + + /* Determine number of pblockarray entries. */ + p = prom_getpblock_array(); + for(i = 0; p[i].size; i++) + ; + nents = i; + while(start < end) { + for(i = 0; i < nents; i++) { + if((start >= (p[i].base)) && + (start < (p[i].base + p[i].size))) { + start = p[i].base + p[i].size; + start &= PAGE_MASK; + continue; + } + } + set_bit(PG_reserved, &mem_map[MAP_NR(start)].flags); + start += PAGE_SIZE; + } +} diff --git a/arch/mips/sgi/prom/misc.c b/arch/mips/sgi/prom/misc.c new file mode 100644 index 000000000..47051a1b3 --- /dev/null +++ b/arch/mips/sgi/prom/misc.c @@ -0,0 +1,109 @@ +/* $Id: misc.c,v 1.3 1996/08/07 02:54:12 dm Exp $ + * misc.c: Miscellaneous ARCS PROM routines. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> + +#include <asm/sgialib.h> +#include <asm/bootinfo.h> +#include <asm/system.h> + +extern unsigned long mips_cputype; +extern int initialize_kbd(void); +extern void *sgiwd93_host; +extern void reset_wd33c93(void *instance); + +static inline void shutoff_r4600_cache(void) +{ + unsigned long tmp1, tmp2, tmp3; + + if(mips_cputype != CPU_R4600 && + mips_cputype != CPU_R4640 && + mips_cputype != CPU_R4700) + return; + printk("Disabling R4600 SCACHE\n"); + __asm__ __volatile__(" + .set noreorder + .set mips3 + li %0, 0x1 + dsll %0, 31 + lui %1, 0x9000 + dsll32 %1, 0 + or %0, %1, %0 + mfc0 %2, $12 + nop; nop; nop; nop; + li %1, 0x80 + mtc0 %1, $12 + nop; nop; nop; nop; + sh $0, 0(%0) + mtc0 $0, $12 + nop; nop; nop; nop; + mtc0 %2, $12 + nop; nop; nop; nop; + .set mips2 + .set reorder + " : "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)); +} + +void prom_halt(void) +{ + shutoff_r4600_cache(); + initialize_kbd(); + reset_wd33c93(sgiwd93_host); + cli(); + romvec->halt(); +} + +void prom_powerdown(void) +{ + shutoff_r4600_cache(); + initialize_kbd(); + reset_wd33c93(sgiwd93_host); + cli(); + romvec->pdown(); +} + +/* XXX is this a soft reset basically? XXX */ +void prom_restart(void) +{ + shutoff_r4600_cache(); + initialize_kbd(); + reset_wd33c93(sgiwd93_host); + cli(); + romvec->restart(); +} + +void prom_reboot(void) +{ + shutoff_r4600_cache(); + initialize_kbd(); + reset_wd33c93(sgiwd93_host); + cli(); + romvec->reboot(); +} + +void prom_imode(void) +{ + shutoff_r4600_cache(); + initialize_kbd(); + reset_wd33c93(sgiwd93_host); + cli(); + romvec->imode(); +} + +long prom_cfgsave(void) +{ + return romvec->cfg_save(); +} + +struct linux_sysid *prom_getsysid(void) +{ + return romvec->get_sysid(); +} + +void prom_cacheflush(void) +{ + romvec->cache_flush(); +} diff --git a/arch/mips/sgi/prom/printf.c b/arch/mips/sgi/prom/printf.c new file mode 100644 index 000000000..02e7e4734 --- /dev/null +++ b/arch/mips/sgi/prom/printf.c @@ -0,0 +1,34 @@ +/* $Id: printf.c,v 1.1 1996/06/04 00:57:06 dm Exp $ + * printf.c: Putting things on the screen using SGI arcs + * PROM facilities. + * + * Copyright (C) 1996 David S. Miller (dm@sgi.com) + */ + +#include <linux/kernel.h> + +#include <asm/sgialib.h> + +static char ppbuf[1024]; + +void +prom_printf(char *fmt, ...) +{ + va_list args; + char ch, *bptr; + int i; + + va_start(args, fmt); + i = vsprintf(ppbuf, fmt, args); + + bptr = ppbuf; + + while((ch = *(bptr++)) != 0) { + if(ch == '\n') + prom_putchar('\r'); + + prom_putchar(ch); + } + va_end(args); + return; +} diff --git a/arch/mips/sgi/prom/salone.c b/arch/mips/sgi/prom/salone.c new file mode 100644 index 000000000..4f120af3a --- /dev/null +++ b/arch/mips/sgi/prom/salone.c @@ -0,0 +1,24 @@ +/* $Id: salone.c,v 1.1 1996/06/08 04:47:22 dm Exp $ + * salone.c: Routines to load into memory and execute stand-along + * program images using ARCS PROM firmware. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <asm/sgialib.h> + +long prom_load(char *name, unsigned long end, unsigned long *pc, unsigned long *eaddr) +{ + return romvec->load(name, end, pc, eaddr); +} + +long prom_invoke(unsigned long pc, unsigned long sp, long argc, + char **argv, char **envp) +{ + return romvec->invoke(pc, sp, argc, argv, envp); +} + +long prom_exec(char *name, long argc, char **argv, char **envp) +{ + return romvec->exec(name, argc, argv, envp); +} diff --git a/arch/mips/sgi/prom/tags.c b/arch/mips/sgi/prom/tags.c new file mode 100644 index 000000000..4a9fe0b2b --- /dev/null +++ b/arch/mips/sgi/prom/tags.c @@ -0,0 +1,67 @@ +/* $Id: tags.c,v 1.5 1996/06/24 07:12:22 dm Exp $ + * tags.c: Initialize the arch tags the way the MIPS kernel setup + * expects. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <linux/kernel.h> +#include <linux/string.h> + +#include <asm/segment.h> +#include <asm/sgialib.h> +#include <asm/bootinfo.h> +#include <asm/sgimc.h> + +/* XXX This tag thing is a fucking rats nest, I'm very inclined to completely + * XXX rework the MIPS people's multi-arch code _NOW_. + */ + +static unsigned long machtype_SGI_INDY = MACH_SGI_INDY; +static unsigned long machgroup_SGI = MACH_GROUP_SGI; +static unsigned long memlower_SGI_INDY = (KSEG0 + SGIMC_SEG0_BADDR); +static unsigned long cputype_SGI_INDY = CPU_R4400SC; +static unsigned long tlb_entries_SGI_INDY = 48; +static unsigned long dummy_SGI_INDY = 0; +static struct drive_info_struct dummy_dinfo_SGI_INDY = { { 0, }, }; +char arcs_cmdline[CL_SIZE]; + +#define TAG(t,l) {tag_##t,(l)} /* XXX RATS NEST CODE!!! XXX */ +#define TAGVAL(v) (void*)&(v) /* XXX FUCKING LOSING!!! XXX */ + +tag_def taglist_sgi_indy[] = { + {TAG(machtype, ULONGSIZE), TAGVAL(machtype_SGI_INDY)}, + {TAG(machgroup, ULONGSIZE), TAGVAL(machgroup_SGI)}, + {TAG(memlower, ULONGSIZE), TAGVAL(memlower_SGI_INDY)}, + {TAG(cputype, ULONGSIZE), TAGVAL(cputype_SGI_INDY)}, + {TAG(tlb_entries, ULONGSIZE), TAGVAL(tlb_entries_SGI_INDY)}, + {TAG(vram_base, ULONGSIZE), TAGVAL(dummy_SGI_INDY)}, + {TAG(drive_info, DRVINFOSIZE), TAGVAL(dummy_dinfo_SGI_INDY)}, + {TAG(mount_root_rdonly, ULONGSIZE), TAGVAL(dummy_SGI_INDY)}, + {TAG(command_line, CL_SIZE), TAGVAL(arcs_cmdline[0])}, + {TAG(dummy, 0), NULL} + /* XXX COLOSTOMY BAG!!!! XXX */ +}; + +void prom_setup_archtags(void) +{ + tag_def *tdp = &taglist_sgi_indy[0]; + tag *tp; + + tp = (tag *) (mips_memory_upper - sizeof(tag)); + while(tdp->t.tag != tag_dummy) { + unsigned long size; + char *d; + + *tp = tdp->t; + size = tp->size; + d = (char *) tdp->d; + tp = (tag *)(((unsigned long)tp) - (tp->size)); + if(size) + memcpy(tp, d, size); + + tp--; + tdp++; + } + *tp = tdp->t; /* copy last dummy element over */ +} diff --git a/arch/mips/sgi/prom/time.c b/arch/mips/sgi/prom/time.c new file mode 100644 index 000000000..9a836b810 --- /dev/null +++ b/arch/mips/sgi/prom/time.c @@ -0,0 +1,17 @@ +/* $Id: time.c,v 1.1 1996/06/08 04:47:23 dm Exp $ + * time.c: Extracting time information from ARCS prom. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <asm/sgialib.h> + +struct linux_tinfo *prom_gettinfo(void) +{ + return romvec->get_tinfo(); +} + +unsigned long prom_getrtime(void) +{ + return romvec->get_rtime(); +} diff --git a/arch/mips/sgi/prom/tree.c b/arch/mips/sgi/prom/tree.c new file mode 100644 index 000000000..1cefd4964 --- /dev/null +++ b/arch/mips/sgi/prom/tree.c @@ -0,0 +1,107 @@ +/* $Id: tree.c,v 1.4 1996/06/08 04:48:41 dm Exp $ + * tree.c: PROM component device tree code. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + */ + +#include <asm/sgialib.h> + +#define DEBUG_PROM_TREE + +pcomponent *prom_getsibling(pcomponent *this) +{ + if(this == PROM_NULL_COMPONENT) + return PROM_NULL_COMPONENT; + return romvec->next_component(this); +} + +pcomponent *prom_getchild(pcomponent *this) +{ + return romvec->child_component(this); +} + +pcomponent *prom_getparent(pcomponent *child) +{ + if(child == PROM_NULL_COMPONENT) + return PROM_NULL_COMPONENT; + return romvec->parent_component(child); +} + +long prom_getcdata(void *buffer, pcomponent *this) +{ + return romvec->component_data(buffer, this); +} + +pcomponent *prom_childadd(pcomponent *this, pcomponent *tmp, void *data) +{ + return romvec->child_add(this, tmp, data); +} + +long prom_delcomponent(pcomponent *this) +{ + return romvec->comp_del(this); +} + +pcomponent *prom_componentbypath(char *path) +{ + return romvec->component_by_path(path); +} + +#ifdef DEBUG_PROM_TREE +static char *classes[] = { + "system", "processor", "cache", "adapter", "controller", "peripheral", + "memory" +}; + +static char *types[] = { + "arc", "cpu", "fpu", "picache", "pdcache", "sicache", "sdcache", "sccache", + "memdev", "eisa adapter", "tc adapter", "scsi adapter", "dti adapter", + "multi-func adapter", "disk controller", "tp controller", + "cdrom controller", "worm controller", "serial controller", + "net controller", "display controller", "parallel controller", + "pointer controller", "keyboard controller", "audio controller", + "misc controller", "disk peripheral", "floppy peripheral", + "tp peripheral", "modem peripheral", "monitor peripheral", + "printer peripheral", "pointer peripheral", "keyboard peripheral", + "terminal peripheral", "line peripheral", "net peripheral", + "misc peripheral", "anonymous" +}; + +static char *iflags[] = { + "bogus", "read only", "removable", "console in", "console out", + "input", "output" +}; + +static void dump_component(pcomponent *p) +{ + prom_printf("[%p]:class<%s>type<%s>flags<%s>ver<%d>rev<%d>", + p, classes[p->class], types[p->type], + iflags[p->iflags], p->vers, p->rev); + prom_printf("key<%08lx>\n\tamask<%08lx>cdsize<%d>ilen<%d>iname<%s>\n", + p->key, p->amask, (int)p->cdsize, (int)p->ilen, p->iname); +} + +static void traverse(pcomponent *p, int op) +{ + dump_component(p); + if(prom_getchild(p)) + traverse(prom_getchild(p), 1); + if(prom_getsibling(p) && op) + traverse(prom_getsibling(p), 1); +} + +void prom_testtree(void) +{ + pcomponent *p; + + p = prom_getchild(PROM_NULL_COMPONENT); + dump_component(p); + p = prom_getchild(p); + while(p) { + dump_component(p); + p = prom_getsibling(p); + } + prom_printf("press a key\n"); + prom_getchar(); +} +#endif diff --git a/arch/mips/sni/Makefile b/arch/mips/sni/Makefile index bfa8e2cad..3821d7b62 100644 --- a/arch/mips/sni/Makefile +++ b/arch/mips/sni/Makefile @@ -11,16 +11,10 @@ .S.o: $(CC) $(CFLAGS) -c $< -o $*.o -include ../../../.config - all: sni.o O_TARGET := sni.o O_OBJS := hw-access.o int-handler.o reset.o setup.o -ifdef CONFIG_PCI -O_OBJS += bios32.o -endif - int-handler.o: int-handler.S clean: diff --git a/arch/mips/sni/bios32.c b/arch/mips/sni/bios32.c deleted file mode 100644 index 366347902..000000000 --- a/arch/mips/sni/bios32.c +++ /dev/null @@ -1,264 +0,0 @@ -/* - * bios32.c - Fake PCI BIOS functions for RM200 C systems. Chainsawed - * from the Alpha version. - * - * Written by Ralf Baechle (ralf@gnu.ai.mit.edu) - * - * For more information, please consult - * - * PCI BIOS Specification Revision - * PCI Local Bus Specification - * PCI System Design Guide - * - * PCI Special Interest Group - * M/S HF3-15A - * 5200 N.E. Elam Young Parkway - * Hillsboro, Oregon 97124-6497 - * +1 (503) 696-2000 - * +1 (800) 433-5177 - * - * Manuals are $25 each or $50 for all three, plus $7 shipping - * within the United States, $35 abroad. - */ -#include <linux/config.h> - -#include <linux/kernel.h> -#include <linux/bios32.h> -#include <linux/pci.h> -#include <linux/types.h> -#include <linux/malloc.h> -#include <linux/mm.h> - -#include <asm/ptrace.h> -#include <asm/system.h> -#include <asm/io.h> - -/* - * This is a table of the onboard PCI devices of the RM200 C - * onboard devices. - */ -static struct board { - unsigned short vendor; - unsigned short device_id; - unsigned int memory; - unsigned int io; - unsigned char irq; - unsigned char revision; - } boards[] = { - { - PCI_VENDOR_ID_NCR, - PCI_DEVICE_ID_NCR_53C810, - 0x1b000000, - 0x00000000, - 5, - 0x11 - }, - { - PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_AMD_LANCE, - 0x1b000100, - 0x17beffe0, - 6, - 0x16 - }, - { - PCI_VENDOR_ID_CIRRUS, - PCI_DEVICE_ID_CIRRUS_5434_8, - 0x18000000, - 0x00000000, - 4, - 0x8e - }, - { 0xffff, } -}; - -/* - * Given the vendor and device ids, find the n'th instance of that device - * in the system. - */ -int pcibios_find_device (unsigned short vendor, unsigned short device_id, - unsigned short index, unsigned char *bus, - unsigned char *devfn) -{ - unsigned int curr = 0; - struct board *p; - - for (p = pci_devices; p->vendor != 0xffff; p++) { - if (p->vendor == vendor && p->device == device_id) { - if (curr == index) { - *devfn = p->devfn; - *bus = 0; - return PCIBIOS_SUCCESSFUL; - } - ++curr; - } - } - - return PCIBIOS_DEVICE_NOT_FOUND; -} - -/* - * Given the class, find the n'th instance of that device - * in the system. - */ -int pcibios_find_class (unsigned int class_code, unsigned short index, - unsigned char *bus, unsigned char *devfn) -{ - unsigned int curr = 0; - struct pci_dev *dev; - - for (dev = pci_devices; dev; dev = dev->next) { - if (dev->class == class_code) { - if (curr == index) { - *devfn = dev->devfn; - *bus = dev->bus->number; - return PCIBIOS_SUCCESSFUL; - } - ++curr; - } - } - return PCIBIOS_DEVICE_NOT_FOUND; -} - -int pcibios_present(void) -{ - return 1; -} - -unsigned long pcibios_init(unsigned long mem_start, - unsigned long mem_end) -{ - printk("SNI RM200 C BIOS32 fake implementation\n"); - - return mem_start; -} - -const char *pcibios_strerror (int error) -{ - static char buf[80]; - - switch (error) { - case PCIBIOS_SUCCESSFUL: - return "SUCCESSFUL"; - - case PCIBIOS_FUNC_NOT_SUPPORTED: - return "FUNC_NOT_SUPPORTED"; - - case PCIBIOS_BAD_VENDOR_ID: - return "SUCCESSFUL"; - - case PCIBIOS_DEVICE_NOT_FOUND: - return "DEVICE_NOT_FOUND"; - - case PCIBIOS_BAD_REGISTER_NUMBER: - return "BAD_REGISTER_NUMBER"; - - default: - sprintf (buf, "UNKNOWN RETURN 0x%x", error); - return buf; - } -} - -/* - * BIOS32-style PCI interface: - */ - -int pcibios_read_config_byte (unsigned char bus, unsigned char device_fn, - unsigned char where, unsigned char *value) -{ - unsigned long addr = LCA_CONF; - unsigned long pci_addr; - - *value = 0xff; - - if (mk_conf_addr(bus, device_fn, where, &pci_addr) < 0) { - return PCIBIOS_SUCCESSFUL; - } - addr |= (pci_addr << 5) + 0x00; - *value = conf_read(addr) >> ((where & 3) * 8); - return PCIBIOS_SUCCESSFUL; -} - - -int pcibios_read_config_word (unsigned char bus, unsigned char device_fn, - unsigned char where, unsigned short *value) -{ - unsigned long addr = LCA_CONF; - unsigned long pci_addr; - - *value = 0xffff; - - if (where & 0x1) { - return PCIBIOS_BAD_REGISTER_NUMBER; - } - if (mk_conf_addr(bus, device_fn, where, &pci_addr)) { - return PCIBIOS_SUCCESSFUL; - } - addr |= (pci_addr << 5) + 0x08; - *value = conf_read(addr) >> ((where & 3) * 8); - return PCIBIOS_SUCCESSFUL; -} - - -int pcibios_read_config_dword (unsigned char bus, unsigned char device_fn, - unsigned char where, unsigned int *value) -{ - unsigned long addr = LCA_CONF; - unsigned long pci_addr; - - *value = 0xffffffff; - if (where & 0x3) { - return PCIBIOS_BAD_REGISTER_NUMBER; - } - if (mk_conf_addr(bus, device_fn, where, &pci_addr)) { - return PCIBIOS_SUCCESSFUL; - } - addr |= (pci_addr << 5) + 0x18; - *value = conf_read(addr); - return PCIBIOS_SUCCESSFUL; -} - - -int pcibios_write_config_byte (unsigned char bus, unsigned char device_fn, - unsigned char where, unsigned char value) -{ - unsigned long addr = LCA_CONF; - unsigned long pci_addr; - - if (mk_conf_addr(bus, device_fn, where, &pci_addr) < 0) { - return PCIBIOS_SUCCESSFUL; - } - addr |= (pci_addr << 5) + 0x00; - conf_write(addr, value << ((where & 3) * 8)); - return PCIBIOS_SUCCESSFUL; -} - - -int pcibios_write_config_word (unsigned char bus, unsigned char device_fn, - unsigned char where, unsigned short value) -{ - unsigned long addr = LCA_CONF; - unsigned long pci_addr; - - if (mk_conf_addr(bus, device_fn, where, &pci_addr) < 0) { - return PCIBIOS_SUCCESSFUL; - } - addr |= (pci_addr << 5) + 0x08; - conf_write(addr, value << ((where & 3) * 8)); - return PCIBIOS_SUCCESSFUL; -} - - -int pcibios_write_config_dword (unsigned char bus, unsigned char device_fn, - unsigned char where, unsigned int value) -{ - unsigned long addr = LCA_CONF; - unsigned long pci_addr; - - if (mk_conf_addr(bus, device_fn, where, &pci_addr) < 0) { - return PCIBIOS_SUCCESSFUL; - } - addr |= (pci_addr << 5) + 0x18; - conf_write(addr, value << ((where & 3) * 8)); - return PCIBIOS_SUCCESSFUL; -} diff --git a/arch/mips/sni/hw-access.c b/arch/mips/sni/hw-access.c index 42c69590a..39cbec410 100644 --- a/arch/mips/sni/hw-access.c +++ b/arch/mips/sni/hw-access.c @@ -11,12 +11,14 @@ #include <linux/kernel.h> #include <linux/linkage.h> #include <linux/types.h> +#include <linux/mm.h> #include <asm/bootinfo.h> -#include <asm/cache.h> +#include <asm/cachectl.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/mc146818rtc.h> +#include <asm/pgtable.h> #include <asm/vector.h> extern int FLOPPY_IRQ; @@ -109,7 +111,7 @@ fd_disable_irq(void) void sni_fd_cacheflush(const void *addr, size_t size) { - cacheflush((unsigned long)addr, (unsigned long)size, CF_DCACHE|CF_ALL); + flush_cache_all(); } /* diff --git a/arch/mips/sni/int-handler.S b/arch/mips/sni/int-handler.S index 367c07d84..c01c34c9d 100644 --- a/arch/mips/sni/int-handler.S +++ b/arch/mips/sni/int-handler.S @@ -12,21 +12,22 @@ #include <asm/mipsconfig.h> #include <asm/mipsregs.h> #include <asm/regdef.h> +#include <asm/sni.h> #include <asm/stackframe.h> .text .set noreorder .set noat .align 5 - NESTED(sni_rm200_pci_handle_int, FR_SIZE, sp) + NESTED(sni_rm200_pci_handle_int, PT_SIZE, sp) SAVE_ALL - REG_S sp,FR_ORIG_REG2(sp) + REG_S sp,PT_OR2(sp) CLI .set at - lui s0,%hi(PORT_BASE_SNI) + lui s0,%hi(SNI_PORT_BASE) li a0,0x0f - sb a0,%lo(PORT_BASE_SNI+0x20)(s0) # poll command - lb a0,%lo(PORT_BASE_SNI+0x20)(s0) # read result + sb a0,%lo(SNI_PORT_BASE+0x20)(s0) # poll command + lb a0,%lo(SNI_PORT_BASE+0x20)(s0) # read result bgtz a0,poll_second andi a0,7 beq a0,2,poll_second # cascade? @@ -34,17 +35,17 @@ /* * Acknowledge first pic */ - lb t2,%lo(PORT_BASE_SNI+0x21)(s0) + lb t2,%lo(SNI_PORT_BASE+0x21)(s0) lui s4,%hi(cache_21) lb t0,%lo(cache_21)(s4) sllv s1,s1,a0 or t0,s1 sb t0,%lo(cache_21)(s4) - sb t0,%lo(PORT_BASE_SNI+0x21)(s0) + sb t0,%lo(SNI_PORT_BASE+0x21)(s0) lui s3,%hi(intr_count) lw s7,%lo(intr_count)(s3) li t2,0x20 - sb t2,%lo(PORT_BASE_SNI+0x20)(s0) + sb t2,%lo(SNI_PORT_BASE+0x20)(s0) /* * Now call the real handler */ @@ -59,38 +60,38 @@ /* * Unblock first pic */ - lbu t1,%lo(PORT_BASE_SNI+0x21)(s0) + lbu t1,%lo(SNI_PORT_BASE+0x21)(s0) lb t1,%lo(cache_21)(s4) nor s1,zero,s1 and t1,s1 sb t1,%lo(cache_21)(s4) jr v0 - sb t1,%lo(PORT_BASE_SNI+0x21)(s0) # delay slot + sb t1,%lo(SNI_PORT_BASE+0x21)(s0) # delay slot /* * Cascade interrupt from second PIC */ .align 5 poll_second: li a0,0x0f - sb a0,%lo(PORT_BASE_SNI+0xa0)(s0) # poll command - lb a0,%lo(PORT_BASE_SNI+0xa0)(s0) # read result + sb a0,%lo(SNI_PORT_BASE+0xa0)(s0) # poll command + lb a0,%lo(SNI_PORT_BASE+0xa0)(s0) # read result bgtz a0,3f andi a0,7 /* * Acknowledge second pic */ - lbu t2,%lo(PORT_BASE_SNI+0xa1)(s0) + lbu t2,%lo(SNI_PORT_BASE+0xa1)(s0) lui s4,%hi(cache_A1) lb t3,%lo(cache_A1)(s4) sllv s1,s1,a0 or t3,s1 sb t3,%lo(cache_A1)(s4) - sb t3,%lo(PORT_BASE_SNI+0xa1)(s0) + sb t3,%lo(SNI_PORT_BASE+0xa1)(s0) li t3,0x20 - sb t3,%lo(PORT_BASE_SNI+0xa0)(s0) + sb t3,%lo(SNI_PORT_BASE+0xa0)(s0) lui s3,%hi(intr_count) lw s7,%lo(intr_count)(s3) - sb t3,%lo(PORT_BASE_SNI+0x20)(s0) + sb t3,%lo(SNI_PORT_BASE+0x20)(s0) /* * Now call the real handler */ @@ -106,14 +107,14 @@ poll_second: li a0,0x0f /* * Unblock second pic */ - lb t1,%lo(PORT_BASE_SNI+0xa1)(s0) + lb t1,%lo(SNI_PORT_BASE+0xa1)(s0) lb t1,%lo(cache_A1)(s4) subu t0,1 nor s1,zero,s1 and t1,t1,s1 sb t1,%lo(cache_A1)(s4) jr v0 - sb t1,%lo(PORT_BASE_SNI+0xa1)(s0) # delay slot + sb t1,%lo(SNI_PORT_BASE+0xa1)(s0) # delay slot /* * FIXME: This is definatly wrong but I'll have to do it this way diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c index a4cc4a57f..ca958ca2a 100644 --- a/arch/mips/sni/setup.c +++ b/arch/mips/sni/setup.c @@ -16,6 +16,7 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/processor.h> +#include <asm/sni.h> #include <asm/vector.h> /* @@ -93,7 +94,7 @@ sni_rm200_pci_setup(void) irq_setup = sni_irq_setup; fd_cacheflush = sni_fd_cacheflush; // Will go away feature = &sni_rm200_pci_feature; - port_base = PORT_BASE_SNI; + port_base = SNI_PORT_BASE; isa_slot_offset = 0xb0000000; request_region(0x00,0x20,"dma1"); request_region(0x40,0x20,"timer"); diff --git a/arch/mips/tools/Makefile b/arch/mips/tools/Makefile new file mode 100644 index 000000000..7216277b3 --- /dev/null +++ b/arch/mips/tools/Makefile @@ -0,0 +1,26 @@ +# Makefile for MIPS kernel build tools. +# +# Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) +# Copyright (C) 1997 Ralf Baechle (ralf@gnu.ai.mit.edu) +# +TARGET := $(TOPDIR)/include/asm-$(ARCH)/offset.h + +.S.s: + $(CPP) $(CFLAGS) $< -o $*.s +.S.o: + $(CC) $(CFLAGS) -c $< -o $*.o + +all: $(TARGET) + +$(TARGET): offset.h + cmp -s $^ $@ || (cp $^ $(TARGET).new && mv $(TARGET).new $(TARGET)) + +offset.h: offset.s + sed -n '/^@@@/s///p' $^ >$@ + +offset.s: offset.c + +clean: + rm -f offset.s $(TARGET).new + +include $(TOPDIR)/Rules.make diff --git a/arch/mips/tools/offset.c b/arch/mips/tools/offset.c new file mode 100644 index 000000000..3bb46de6e --- /dev/null +++ b/arch/mips/tools/offset.c @@ -0,0 +1,128 @@ +/* + * offset.c: Calculate pt_regs and task_struct offsets. + * + * Copyright (C) 1996 David S. Miller + * Made portable by Ralf Baechle + */ + +#include <linux/types.h> +#include <linux/sched.h> + +#include <asm/ptrace.h> +#include <asm/processor.h> + +#define text(t) __asm__("\n@@@" t) +#define _offset(type, member) (&(((type *)NULL)->member)) + +#define offset(string, ptr, member) \ + __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member))) +#define size(string, size) \ + __asm__("\n@@@" string "%0" : : "i" (sizeof(size))) +#define linefeed text("") + +text("/* DO NOT TOUCH, AUTOGENERATED BY OFFSET.C */"); +text(""); +text("#ifndef _MIPS_OFFSET_H"); +text("#define _MIPS_OFFSET_H"); +text(""); + +void output_ptreg_defines(void) +{ + text("/* MIPS pt_regs offsets. */"); + offset("#define PT_R0 ", struct pt_regs, regs[0]); + offset("#define PT_R1 ", struct pt_regs, regs[1]); + offset("#define PT_R2 ", struct pt_regs, regs[2]); + offset("#define PT_R3 ", struct pt_regs, regs[3]); + offset("#define PT_R4 ", struct pt_regs, regs[4]); + offset("#define PT_R5 ", struct pt_regs, regs[5]); + offset("#define PT_R6 ", struct pt_regs, regs[6]); + offset("#define PT_R7 ", struct pt_regs, regs[7]); + offset("#define PT_R8 ", struct pt_regs, regs[8]); + offset("#define PT_R9 ", struct pt_regs, regs[9]); + offset("#define PT_R10 ", struct pt_regs, regs[10]); + offset("#define PT_R11 ", struct pt_regs, regs[11]); + offset("#define PT_R12 ", struct pt_regs, regs[12]); + offset("#define PT_R13 ", struct pt_regs, regs[13]); + offset("#define PT_R14 ", struct pt_regs, regs[14]); + offset("#define PT_R15 ", struct pt_regs, regs[15]); + offset("#define PT_R16 ", struct pt_regs, regs[16]); + offset("#define PT_R17 ", struct pt_regs, regs[17]); + offset("#define PT_R18 ", struct pt_regs, regs[18]); + offset("#define PT_R19 ", struct pt_regs, regs[19]); + offset("#define PT_R20 ", struct pt_regs, regs[20]); + offset("#define PT_R21 ", struct pt_regs, regs[21]); + offset("#define PT_R22 ", struct pt_regs, regs[22]); + offset("#define PT_R23 ", struct pt_regs, regs[23]); + offset("#define PT_R24 ", struct pt_regs, regs[24]); + offset("#define PT_R25 ", struct pt_regs, regs[25]); + offset("#define PT_R26 ", struct pt_regs, regs[26]); + offset("#define PT_R27 ", struct pt_regs, regs[27]); + offset("#define PT_R28 ", struct pt_regs, regs[28]); + offset("#define PT_R29 ", struct pt_regs, regs[29]); + offset("#define PT_R30 ", struct pt_regs, regs[30]); + offset("#define PT_R31 ", struct pt_regs, regs[31]); + offset("#define PT_LO ", struct pt_regs, lo); + offset("#define PT_HI ", struct pt_regs, hi); + offset("#define PT_OR2 ", struct pt_regs, orig_reg2); + offset("#define PT_OR7 ", struct pt_regs, orig_reg7); + offset("#define PT_EPC ", struct pt_regs, cp0_epc); + offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); + offset("#define PT_STATUS ", struct pt_regs, cp0_status); + offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); + size("#define PT_SIZE ", struct pt_regs); + linefeed; +} + +void output_task_defines(void) +{ + text("/* MIPS task_struct offsets. */"); + offset("#define TASK_STATE ", struct task_struct, state); + offset("#define TASK_PRIORITY ", struct task_struct, priority); + offset("#define TASK_SIGNAL ", struct task_struct, signal); + offset("#define TASK_BLOCKED ", struct task_struct, blocked); + offset("#define TASK_FLAGS ", struct task_struct, flags); + offset("#define TASK_SAVED_KSTACK ", struct task_struct, saved_kernel_stack); + offset("#define TASK_KSTACK_PG ", struct task_struct, kernel_stack_page); + offset("#define TASK_MM ", struct task_struct, mm); + linefeed; +} + +void output_thread_defines(void) +{ + text("/* MIPS specific thread_struct offsets. */"); + offset("#define THREAD_REG16 ", struct task_struct, tss.reg16); + offset("#define THREAD_REG17 ", struct task_struct, tss.reg17); + offset("#define THREAD_REG18 ", struct task_struct, tss.reg18); + offset("#define THREAD_REG19 ", struct task_struct, tss.reg19); + offset("#define THREAD_REG20 ", struct task_struct, tss.reg20); + offset("#define THREAD_REG21 ", struct task_struct, tss.reg21); + offset("#define THREAD_REG22 ", struct task_struct, tss.reg22); + offset("#define THREAD_REG23 ", struct task_struct, tss.reg23); + offset("#define THREAD_REG28 ", struct task_struct, tss.reg28); + offset("#define THREAD_REG29 ", struct task_struct, tss.reg29); + offset("#define THREAD_REG30 ", struct task_struct, tss.reg30); + offset("#define THREAD_REG31 ", struct task_struct, tss.reg31); + offset("#define THREAD_STATUS ", struct task_struct, tss.cp0_status); + offset("#define THREAD_FPU ", struct task_struct, tss.fpu); + offset("#define THREAD_BVADDR ", struct task_struct, tss.cp0_badvaddr); + offset("#define THREAD_ECODE ", struct task_struct, tss.error_code); + offset("#define THREAD_TRAPNO ", struct task_struct, tss.trap_no); + offset("#define THREAD_KSP ", struct task_struct, tss.ksp); + offset("#define THREAD_PGDIR ", struct task_struct, tss.pg_dir); + offset("#define THREAD_MFLAGS ", struct task_struct, tss.mflags); + offset("#define THREAD_CURDS ", struct task_struct, tss.current_ds); + offset("#define THREAD_TRAMP ", struct task_struct, tss.irix_trampoline); + offset("#define THREAD_OLDCTX ", struct task_struct, tss.irix_oldctx); + linefeed; +} + +void output_mm_defines(void) +{ + text("/* Linux mm_struct offsets. */"); + offset("#define MM_COUNT ", struct mm_struct, count); + offset("#define MM_PGD ", struct mm_struct, pgd); + offset("#define MM_CONTEXT ", struct mm_struct, context); + linefeed; +} + +text("#endif /* !(_MIPS_OFFSET_H) */"); diff --git a/arch/mips/tools/offset.h b/arch/mips/tools/offset.h new file mode 100644 index 000000000..c5e6f37fa --- /dev/null +++ b/arch/mips/tools/offset.h @@ -0,0 +1,89 @@ +/* DO NOT TOUCH, AUTOGENERATED BY OFFSET.C */ + +#ifndef _MIPS_OFFSET_H +#define _MIPS_OFFSET_H + +/* MIPS pt_regs offsets. */ +#define PT_R0 24 +#define PT_R1 28 +#define PT_R2 32 +#define PT_R3 36 +#define PT_R4 40 +#define PT_R5 44 +#define PT_R6 48 +#define PT_R7 52 +#define PT_R8 56 +#define PT_R9 60 +#define PT_R10 64 +#define PT_R11 68 +#define PT_R12 72 +#define PT_R13 76 +#define PT_R14 80 +#define PT_R15 84 +#define PT_R16 88 +#define PT_R17 92 +#define PT_R18 96 +#define PT_R19 100 +#define PT_R20 104 +#define PT_R21 108 +#define PT_R22 112 +#define PT_R23 116 +#define PT_R24 120 +#define PT_R25 124 +#define PT_R26 128 +#define PT_R27 132 +#define PT_R28 136 +#define PT_R29 140 +#define PT_R30 144 +#define PT_R31 148 +#define PT_LO 152 +#define PT_HI 156 +#define PT_OR2 160 +#define PT_OR7 164 +#define PT_EPC 168 +#define PT_BVADDR 172 +#define PT_STATUS 176 +#define PT_CAUSE 180 +#define PT_SIZE 184 + +/* MIPS task_struct offsets. */ +#define TASK_STATE 0 +#define TASK_PRIORITY 8 +#define TASK_SIGNAL 12 +#define TASK_BLOCKED 16 +#define TASK_FLAGS 20 +#define TASK_SAVED_KSTACK 84 +#define TASK_KSTACK_PG 88 +#define TASK_MM 912 + +/* MIPS specific thread_struct offsets. */ +#define THREAD_REG16 544 +#define THREAD_REG17 548 +#define THREAD_REG18 552 +#define THREAD_REG19 556 +#define THREAD_REG20 560 +#define THREAD_REG21 564 +#define THREAD_REG22 568 +#define THREAD_REG23 572 +#define THREAD_REG28 576 +#define THREAD_REG29 580 +#define THREAD_REG30 584 +#define THREAD_REG31 588 +#define THREAD_STATUS 592 +#define THREAD_FPU 600 +#define THREAD_BVADDR 864 +#define THREAD_ECODE 868 +#define THREAD_TRAPNO 872 +#define THREAD_KSP 876 +#define THREAD_PGDIR 880 +#define THREAD_MFLAGS 884 +#define THREAD_CURDS 888 +#define THREAD_TRAMP 892 +#define THREAD_OLDCTX 896 + +/* Linux mm_struct offsets. */ +#define MM_COUNT 0 +#define MM_PGD 4 +#define MM_CONTEXT 8 + +#endif /* !(_MIPS_OFFSET_H) */ |