From 116674acc97ba75a720329996877077d988443a2 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 9 Mar 2001 20:33:35 +0000 Subject: Merge with Linux 2.4.2. --- arch/s390x/Makefile | 70 + arch/s390x/boot/Makefile | 37 + arch/s390x/boot/ipldump.S | 178 ++ arch/s390x/boot/ipleckd.S | 303 +++ arch/s390x/boot/iplfba.S | 131 ++ arch/s390x/config.in | 73 + arch/s390x/defconfig | 219 ++ arch/s390x/kernel/Makefile | 38 + arch/s390x/kernel/binfmt_elf32.c | 203 ++ arch/s390x/kernel/bitmap.S | 37 + arch/s390x/kernel/cpcmd.c | 49 + arch/s390x/kernel/cpcmd.h | 14 + arch/s390x/kernel/cpprintk.c | 25 + arch/s390x/kernel/debug.c | 1167 ++++++++++ arch/s390x/kernel/ebcdic.c | 391 ++++ arch/s390x/kernel/entry.S | 868 ++++++++ arch/s390x/kernel/exec32.c | 85 + arch/s390x/kernel/gdb-stub.c | 575 +++++ arch/s390x/kernel/head.S | 598 +++++ arch/s390x/kernel/ieee.h | 90 + arch/s390x/kernel/init_task.c | 32 + arch/s390x/kernel/ioctl32.c | 563 +++++ arch/s390x/kernel/irq.c | 423 ++++ arch/s390x/kernel/irqextras390.c | 35 + arch/s390x/kernel/linux32.c | 4326 ++++++++++++++++++++++++++++++++++++ arch/s390x/kernel/linux32.h | 246 ++ arch/s390x/kernel/lowcore.S | 28 + arch/s390x/kernel/mathemu.c | 920 ++++++++ arch/s390x/kernel/process.c | 516 +++++ arch/s390x/kernel/ptrace.c | 613 +++++ arch/s390x/kernel/reipl.S | 94 + arch/s390x/kernel/s390_ext.c | 77 + arch/s390x/kernel/s390_ksyms.c | 157 ++ arch/s390x/kernel/s390fpu.c | 87 + arch/s390x/kernel/semaphore.c | 302 +++ arch/s390x/kernel/setup.c | 380 ++++ arch/s390x/kernel/signal.c | 595 +++++ arch/s390x/kernel/signal32.c | 725 ++++++ arch/s390x/kernel/smp.c | 760 +++++++ arch/s390x/kernel/sys_s390.c | 205 ++ arch/s390x/kernel/time.c | 258 +++ arch/s390x/kernel/traps.c | 248 +++ arch/s390x/kernel/wrapper32.S | 1071 +++++++++ arch/s390x/lib/Makefile | 18 + arch/s390x/lib/checksum.c | 40 + arch/s390x/lib/delay.c | 50 + arch/s390x/lib/memset.S | 30 + arch/s390x/lib/strcmp.S | 27 + arch/s390x/lib/strncpy.S | 30 + arch/s390x/lib/uaccess.S | 45 + arch/s390x/mm/Makefile | 14 + arch/s390x/mm/extable.c | 61 + arch/s390x/mm/fault.c | 266 +++ arch/s390x/mm/init.c | 405 ++++ arch/s390x/mm/ioremap.c | 129 ++ arch/s390x/tools/dasdfmt/Makefile | 9 + arch/s390x/tools/dasdfmt/dasdfmt.8 | 68 + arch/s390x/tools/dasdfmt/dasdfmt.c | 830 +++++++ arch/s390x/tools/silo/Makefile | 15 + arch/s390x/tools/silo/cfg.c | 373 ++++ arch/s390x/tools/silo/cfg.h | 58 + arch/s390x/tools/silo/silo.c | 573 +++++ arch/s390x/tools/silo/silo.conf | 7 + arch/s390x/vmlinux.lds | 78 + 64 files changed, 20938 insertions(+) create mode 100644 arch/s390x/Makefile create mode 100644 arch/s390x/boot/Makefile create mode 100644 arch/s390x/boot/ipldump.S create mode 100644 arch/s390x/boot/ipleckd.S create mode 100644 arch/s390x/boot/iplfba.S create mode 100644 arch/s390x/config.in create mode 100644 arch/s390x/defconfig create mode 100644 arch/s390x/kernel/Makefile create mode 100644 arch/s390x/kernel/binfmt_elf32.c create mode 100644 arch/s390x/kernel/bitmap.S create mode 100644 arch/s390x/kernel/cpcmd.c create mode 100644 arch/s390x/kernel/cpcmd.h create mode 100644 arch/s390x/kernel/cpprintk.c create mode 100644 arch/s390x/kernel/debug.c create mode 100644 arch/s390x/kernel/ebcdic.c create mode 100644 arch/s390x/kernel/entry.S create mode 100644 arch/s390x/kernel/exec32.c create mode 100644 arch/s390x/kernel/gdb-stub.c create mode 100644 arch/s390x/kernel/head.S create mode 100644 arch/s390x/kernel/ieee.h create mode 100644 arch/s390x/kernel/init_task.c create mode 100644 arch/s390x/kernel/ioctl32.c create mode 100644 arch/s390x/kernel/irq.c create mode 100644 arch/s390x/kernel/irqextras390.c create mode 100644 arch/s390x/kernel/linux32.c create mode 100644 arch/s390x/kernel/linux32.h create mode 100644 arch/s390x/kernel/lowcore.S create mode 100644 arch/s390x/kernel/mathemu.c create mode 100644 arch/s390x/kernel/process.c create mode 100644 arch/s390x/kernel/ptrace.c create mode 100644 arch/s390x/kernel/reipl.S create mode 100644 arch/s390x/kernel/s390_ext.c create mode 100644 arch/s390x/kernel/s390_ksyms.c create mode 100644 arch/s390x/kernel/s390fpu.c create mode 100644 arch/s390x/kernel/semaphore.c create mode 100644 arch/s390x/kernel/setup.c create mode 100644 arch/s390x/kernel/signal.c create mode 100644 arch/s390x/kernel/signal32.c create mode 100644 arch/s390x/kernel/smp.c create mode 100644 arch/s390x/kernel/sys_s390.c create mode 100644 arch/s390x/kernel/time.c create mode 100644 arch/s390x/kernel/traps.c create mode 100644 arch/s390x/kernel/wrapper32.S create mode 100644 arch/s390x/lib/Makefile create mode 100644 arch/s390x/lib/checksum.c create mode 100644 arch/s390x/lib/delay.c create mode 100644 arch/s390x/lib/memset.S create mode 100644 arch/s390x/lib/strcmp.S create mode 100644 arch/s390x/lib/strncpy.S create mode 100644 arch/s390x/lib/uaccess.S create mode 100644 arch/s390x/mm/Makefile create mode 100644 arch/s390x/mm/extable.c create mode 100644 arch/s390x/mm/fault.c create mode 100644 arch/s390x/mm/init.c create mode 100644 arch/s390x/mm/ioremap.c create mode 100644 arch/s390x/tools/dasdfmt/Makefile create mode 100644 arch/s390x/tools/dasdfmt/dasdfmt.8 create mode 100644 arch/s390x/tools/dasdfmt/dasdfmt.c create mode 100644 arch/s390x/tools/silo/Makefile create mode 100644 arch/s390x/tools/silo/cfg.c create mode 100644 arch/s390x/tools/silo/cfg.h create mode 100644 arch/s390x/tools/silo/silo.c create mode 100644 arch/s390x/tools/silo/silo.conf create mode 100644 arch/s390x/vmlinux.lds (limited to 'arch/s390x') diff --git a/arch/s390x/Makefile b/arch/s390x/Makefile new file mode 100644 index 000000000..7ea330273 --- /dev/null +++ b/arch/s390x/Makefile @@ -0,0 +1,70 @@ + +# s390/Makefile +# +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. Remember to do have actions +# for "archclean" and "archdep" for cleaning up and making dependencies for +# this architecture +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1994 by Linus Torvalds +# + +LD=$(CROSS_COMPILE)ld -m elf64_s390 +CPP=$(CC) -E +OBJCOPY=$(CROSS_COMPILE)objcopy -O binary -R .note -R .comment -S +LDFLAGS=-e start +LINKFLAGS =-T $(TOPDIR)/arch/s390x/vmlinux.lds $(LDFLAGS) +MODFLAGS += -fpic + +CFLAGS_PIPE := -pipe +CFLAGS_NSR := -fno-strength-reduce +CFLAGS := $(CFLAGS) $(CFLAGS_PIPE) $(CFLAGS_NSR) + +HEAD := arch/s390x/kernel/head.o arch/s390x/kernel/init_task.o + +SUBDIRS := $(SUBDIRS) arch/s390x/mm arch/s390x/kernel arch/s390x/lib \ + drivers/s390 +CORE_FILES := arch/s390x/mm/mm.o arch/s390x/kernel/kernel.o $(CORE_FILES) \ + drivers/s390/io.o +LIBS := $(TOPDIR)/arch/s390x/lib/lib.a $(LIBS) $(TOPDIR)/arch/s390x/lib/lib.a + +all: image listing + +listing: vmlinux + @$(MAKEBOOT) listing + +arch/s390x/kernel: dummy + $(MAKE) linuxsubdirs SUBDIRS=arch/s390x/kernel + +arch/s390x/mm: dummy + $(MAKE) linuxsubdirs SUBDIRS=arch/s390x/mm + +drivers/s390: dummy + $(MAKE) linuxsubdirs SUBDIRS=drivers/s390 + +MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot + +MAKESILO = $(MAKE) -C arch/$(ARCH)/tools/silo + +MAKEDASDFMT = $(MAKE) -C arch/$(ARCH)/tools/dasdfmt + +silo: + @$(MAKE) -C arch/$(ARCH)/tools/silo + +dasdfmt: + @$(MAKE) -C arch/$(ARCH)/tools/dasdfmt + +image: vmlinux + @$(MAKEBOOT) image + +archclean: + @$(MAKEBOOT) clean + +archmrproper: + +archdep: + @$(MAKEBOOT) dep diff --git a/arch/s390x/boot/Makefile b/arch/s390x/boot/Makefile new file mode 100644 index 000000000..fb112b964 --- /dev/null +++ b/arch/s390x/boot/Makefile @@ -0,0 +1,37 @@ +# +# Makefile for the linux s390-specific parts of the memory manager. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definition is now in the main makefile... + +OBJCOPY = $(CROSS_COMPILE)objcopy + +O_TARGET := + +include $(TOPDIR)/Rules.make + +.S.o: + $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $< -o $*.o + +%.lnk: %.o + $(LD) -Ttext 0x0 -o $@ $< + +%.boot: %.lnk + $(OBJCOPY) -O binary $< $@ + +image: $(CONFIGURE) $(TOPDIR)/vmlinux \ + iplfba.boot ipleckd.boot ipldump.boot + $(OBJCOPY) -O binary $(TOPDIR)/vmlinux image + $(NM) $(TOPDIR)/vmlinux | grep -v '\(compiled\)\|\( [aU] \)\|\(\.\)\|\(LASH[RL]DI\)' | sort > $(TOPDIR)/System.map + +listing: ../../../vmlinux + $(OBJDUMP) --disassemble --disassemble-all --disassemble-zeroes --reloc $(TOPDIR)/vmlinux > listing + +dep: + +clean: + rm -f image listing iplfba.boot ipleckd.boot ipldump.boot + diff --git a/arch/s390x/boot/ipldump.S b/arch/s390x/boot/ipldump.S new file mode 100644 index 000000000..84150b5af --- /dev/null +++ b/arch/s390x/boot/ipldump.S @@ -0,0 +1,178 @@ +/* + * arch/s390/boot/ipldump.S + * + * S390 version + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * + * Tape dump ipl record. Put it on a tape and ipl from it and it will + * write a dump of the real storage after the ipl record on that tape. + */ + +#include +#include + +#define IPL_BS 1024 + .org 0 + .long 0x00080000,0x80000000+_start # The first 24 bytes are loaded + .long 0x07000000,0x60000001 # by ipl to addresses 0-23. + .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs). + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 # svc old psw + .long 0x00000000,0x00000000 # program check old psw + .long 0x00000000,0x00000000 # machine check old psw + .long 0x00000000,0x00000000 # io old psw + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x000a0000,0x00000058 # external new psw + .long 0x000a0000,0x00000060 # svc new psw + .long 0x000a0000,0x00000068 # program check new psw + .long 0x000a0000,0x00000070 # machine check new psw + .long 0x00080000,0x80000000+.Lioint # io new psw + + .org 0x100 + .globl _start +_start: + l %r1,0xb8 # load ipl subchannel number +# +# find out memory size +# + mvc 104(8),.Lpcmem0 # setup program check handler + slr %r3,%r3 + lhi %r2,1 + sll %r2,20 +.Lloop0: + l %r0,0(%r3) # test page + ar %r3,%r2 # add 1M + jnm .Lloop0 # r1 < 0x80000000 -> loop +.Lchkmem0: + n %r3,.L4malign0 # align to multiples of 4M + st %r3,.Lmemsize # store memory size +.Lmemok: + +# +# first write a tape mark +# + bras %r14,.Ltapemark +# +# write real storage to tape +# + slr %r2,%r2 # start at address 0 + bras %r14,.Lwriter # load ramdisk +# +# write another tape mark +# + bras %r14,.Ltapemark +# +# everything written, stop processor +# + lpsw .Lstopped +# +# subroutine for writing to tape +# Paramters: +# R1 = device number +# R2 = start address +# R3 = length +.Lwriter: + st %r14,.Lldret + la %r12,.Lorbread # r12 = address of orb + la %r5,.Lirb # r5 = address of irb + st %r2,.Lccwwrite+4 # initialize CCW data addresses + lctl %c6,%c6,.Lcr6 + slr %r2,%r2 +.Lldlp: + lhi %r6,3 # 3 retries +.Lssch: + ssch 0(%r12) # write chunk of IPL_BS bytes + jnz .Llderr +.Lw4end: + bras %r14,.Lwait4io + tm 8(%r5),0x82 # do we have a problem ? + jnz .Lrecov + l %r0,.Lccwwrite+4 # update CCW data addresses + ahi %r0,IPL_BS + st %r0,.Lccwwrite+4 + clr %r0,%r3 # enough ? + jl .Lldlp +.Ldone: + l %r14,.Lldret + br %r14 # r2 contains the total size +.Lrecov: + bras %r14,.Lsense # do the sensing + brct %r6,.Lssch # dec. retry count & branch + j .Llderr +.Ltapemark: + st %r14,.Lldret + la %r12,.Lorbmark # r12 = address of orb + la %r5,.Lirb # r5 = address of irb + lctl %c6,%c6,.Lcr6 + ssch 0(%r12) # write a tape mark + jnz .Llderr + bras %r14,.Lwait4io + l %r14,.Lldret + br %r14 +# +# Sense subroutine +# +.Lsense: + st %r14,.Lsnsret + la %r7,.Lorbsense + ssch 0(%r7) # start sense command + jnz .Llderr + bras %r14,.Lwait4io + l %r14,.Lsnsret + tm 8(%r5),0x82 # do we have a problem ? + jnz .Llderr + br %r14 +# +# Wait for interrupt subroutine +# +.Lwait4io: + lpsw .Lwaitpsw +.Lioint: + c %r1,0xb8 # compare subchannel number + jne .Lwait4io + tsch 0(%r5) + slr %r0,%r0 + tm 8(%r5),0x82 # do we have a problem ? + jnz .Lwtexit + tm 8(%r5),0x04 # got device end ? + jz .Lwait4io +.Lwtexit: + br %r14 +.Llderr: + lpsw .Lcrash + + .align 8 +.Lorbread: + .long 0x00000000,0x0080ff00,.Lccwwrite + .align 8 +.Lorbsense: + .long 0x00000000,0x0080ff00,.Lccwsense + .align 8 +.Lorbmark: + .long 0x00000000,0x0080ff00,.Lccwmark + .align 8 +.Lccwwrite: + .long 0x01200000+IPL_BS,0x00000000 +.Lccwsense: + .long 0x04200001,0x00000000 +.Lccwmark: + .long 0x1f200001,0x00000000 +.Lwaitpsw: + .long 0x020a0000,0x80000000+.Lioint + +.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +.Lcr6: .long 0xff000000 + .align 8 +.Lcrash:.long 0x000a0000,0x00000000 +.Lstopped: .long 0x000a0000,0x00001234 +.Lpcmem0:.long 0x00080000,0x80000000 + .Lchkmem0 +.L4malign0:.long 0xffc00000 +.Lmemsize:.long 0 +.Lldret:.long 0 +.Lsnsret: .long 0 + + .org IPL_BS + diff --git a/arch/s390x/boot/ipleckd.S b/arch/s390x/boot/ipleckd.S new file mode 100644 index 000000000..d66a8d684 --- /dev/null +++ b/arch/s390x/boot/ipleckd.S @@ -0,0 +1,303 @@ +# +# arch/s390/boot/ipleckd.S +# IPL record for 3380/3390 DASD +# +# S390 version +# Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation +# Author(s): Holger Smolinski +# +# +# FIXME: should use the countarea to determine the blocksize +# FIXME: should insert zeroes into memory when filling holes +# FIXME: calculate blkpertrack from rdc data and blksize + +# change 09/20/00 removed obsolete store of ipldevice to textesegment + +# Usage of registers +# r1: ipl subchannel ( general use, dont overload without save/restore !) +# r10: +# r13: base register index to 0x0000 +# r14: callers address +# r15: temporary save register (we have no stack!) + +# storage layout: + +#include + + .org 0 +.psw: .long 0x00080000,0x80000000+_start +.ccw1: .long 0x06000000,0x00001000 # Re-Read enough of bootsector to start +.ccw2: .long 0x00000000,0x00000000 # read countarea of record 1 to s/w. + + .org 0x58 +.Lextn: .long 0x000a0000,0x00000000+.Lextn +.Lsvcn: .long 0x000a0000,0x00000000+.Lsvcn +.Lprgn: .long 0x00080000,0x00000000+.Lecs +.Lmcn: .long 0x000a0000,0x00000000+.Lmcn +.Lion: .long 0x00080000,0x80000000+.Lionewaddr + + .org 0xe0 +.Llstad:.long 0x00000000,0x00000000 # sectorno + ct of bootlist + + .org 0xf0 # Lets start now... +_start: .globl _start + l %r1,__LC_SUBCHANNEL_ID # get IPL-subchannel from lowcore + st %r1,__LC_IPLDEV # keep it for reipl + stsch .Lrdcdata + oi .Lrdcdata+5,0x84 # enable ssch and multipath mode +.Lecs: xi .Lrdcdata+27,0x01 # enable concurrent sense + msch .Lrdcdata + xi .Lprgn,6 # restore Wait and d/a bit in PCnew PSW + l %r2,.Lparm + mvc 0x0(8,%r2),.Lnull # set parmarea to null + lctl %c6,%c6,.Lc6 # enable all interrupts +.Lrdc: # read device characteristics + la %r6,.Lrdcccw + st %r6,.Lorb+8 # store cp-address to orb + bras %r15,.Lssch # start I/O + oi .Llodata+1,0x80 + lh %r5,.Lcountarea+6 # init r5 from countarea + stcm %r5,3,.Lrdccw+2 # and store into rd template *FIXME* + stcm %r5,3,.Llodata+14 # and store into lodata *FIXME* +.Lbootlist: + l %r2,.Llstad + l %r3,.Lblklst + lhi %r4,1 + bras %r14,.Lreadblks +.Lloader: + l %r10,.Lblklst # r10 is index to bootlist + lhi %r5,4 # r5: skip 4 blocks = firstpage.... +.Lkloop: + clc .Lnull(8),0(%r10) # test blocklist + jz .Lchkparm # end of list? + l %r2,0(%r10) # get startblock to r2 + slr %r4,%r4 # erase r4 + icm %r4,1,7(%r10) # get blockcount + slr %r3,%r3 # get address to r3 + icm %r3,0xe,4(%r10) + chi %r5,0 # still blocks to skip? + jz .Ldoread # no: start reading + cr %r5,%r4 # #skipblocks >= blockct? + jm .L007 # no: skip the blocks one by one +.L006: + sr %r5,%r4 # decrease number of blocks to skip + j .Lkcont # advance to next entry +.L007: + ahi %r2,1 # skip 1 block... + bctr %r4,0 # update blockct + ah %r3,.Lcountarea+6 # increment address + bct %r5,.L007 # 4 blocks skipped? +.Ldoread: + ltr %r2,%r2 # test startblock + jz .Lzeroes # startblocks is zero (hole) +.Ldiskread: + bras %r14,.Lreadblks + j .Lkcont +.Lzeroes: + lr %r2,%r3 +.L001: slr %r3,%r3 + icm %r3,3,.Lcountarea+6 # get blocksize + slr %r5,%r5 # no bytes to move +.L008: mvcle %r2,%r4,0 # fill zeroes to storage + jo .L008 # until block is filled + brct %r4,.L001 # skip to next block +.Lkcont: + ahi %r10,8 + j .Lkloop +.Lchkparm: + lm %r3,%r4,.Lstart # load .Lstart and .Lparm + clc 0x0(4,%r4),.Lnull + je .Lrunkern + mvc 0x480(128,%r3),0(%r4) # move 1k-0x80 to parmarea + mvc 0x500(256,%r3),0x80(%r4) + mvc 0x600(256,%r3),0x180(%r4) + mvc 0x700(256,%r3),0x280(%r4) +.Lrunkern: +# lhi %r2,17 +# sll %r2,12 +# st %r1,0xc6c(%r2) # store iplsubchannel to lowcore +# st %r1,0xc6c # store iplsubchannel to lowcore + br %r3 +# This function does the start IO +# r2: number of first block to read ( input by caller ) +# r3: address to read data to ( input by caller ) +# r4: number of blocks to read ( input by caller ) +# r5: destroyed +# r6: blocks per track ( input by caller ) +# r7: number of heads +# r8: +# r9: +# r10: +# r11: temporary register +# r12: local use for base address +# r13: base address for module +# r14: address of caller for subroutine +# r15: temporary save register (since we have no stack) +.Lreadblks: + la %r12,.Ldeccw + st %r12,8+.Lorb # store cpaddr to orb + ahi %r12,0x10 # increment r12 to point to rdccw + oi 1(%r12),0x40 # set CC in rd template + # first setup the read CCWs + lr %r15,%r4 # save number or blocks + slr %r7,%r7 + icm %r7,3,.Lrdcdata+14 # load heads to r7 + lhi %r6,9 + clc .Lrdcdata+3(2),.L9345 + je .L011 + lhi %r6,10 + clc .Lrdcdata+3(2),.L3380 + je .L011 + lhi %r6,12 + clc .Lrdcdata+3(2),.L3390 + je .L011 + bras %r14,.Ldisab +.L011: + # loop for nbl times +.Lrdloop: + mvc 0(8,%r12),.Lrdccw # copy template to this ccw + st %r3,4(%r12) # store target address to this ccw + bct %r4,.L005 # decrement no of blks still to do + ni 1(%r12),0x3f # delete CC from last ccw + lr %r4,%r15 # restore number of blocks + # read CCWs are setup now + stcm %r4,3,.Llodata+2 # store blockno to lodata clears r4 + ar %r4,%r2 # r4 (clear): ebl = blk + nbl + bctr %r4,0 # decrement r4 ( last blk touched + srda %r2,32 # trk = blk / bpt, bot = blk % bpt + dr %r2,%r6 # r3: trk, r2: bot + ahi %r2,1 # bot++ ( we start counting at 1 ) + stcm %r2,1,.Llodata+12 # store bot to lodata + xr %r2,%r2 # cy = trk / heads, hd = trk % heads + dr %r2,%r7 # r3: cy, r2: hd + sll %r3,16 # combine to CCHH in r3 + or %r3,%r2 + st %r3,.Ldedata+8 # store cchh to dedata + st %r3,.Llodata+4 # store cchh to lodata + st %r3,.Llodata+8 # store cchh to lodata + lr %r15,%r5 # save r5 + srda %r4,32 # tr2 = ebl / bpt + dr %r4,%r6 # r5: tr2, r4: bot2 + xr %r4,%r4 # cy2 = tr2 / heads, hd2 = hd2 % heads + dr %r4,%r7 # r5: cy2, r4: hd2 + stcm %r5,3,.Ldedata+12 # store cy2,hd2 to dedata + stcm %r4,3,.Ldedata+14 # store cy2,hd2 to dedata + lr %r5,%r15 # restore r5 + # CCWs are setup now, arent they? + bras %r15,.Lssch # start I/O + br %r14 # return to caller +.L005: + ah %r3,.Lcountarea+6 # add blocksize to target address + ahi %r12,8 # add sizeof(ccw) to base address + j .Lrdloop +# end of function +# This function does the start IO +# r1: Subchannel number +# r8: ORB address +# r9: IRB address +.Lssch: + lhi %r13,10 # initialize retries +.L012: + ssch .Lorb # start I/O + jz .Ltpi # ok? + bras %r14,.Ldisab # error +.Ltpi: + lpsw .Lwaitpsw # load wait-PSW +.Lionewaddr: + c %r1,0xb8 # compare to ipl subhchannel + jnz .Ltpi # not equal: loop + clc 0xbc(4),.Lorb # cross check the intparm + jnz .Ltpi # not equal: loop + tsch .Lirb # get status + tm .Lirb+9,0xff # channel status ? + jz .L003 # CS == 0x00 + bras %r14,.Ldisab # error +.L003: + tm .Lirb+8,0xf3 # DS different from CE/DE + jz .L004 # ok ? + bct %r13,.L012 # retries <= 5 ? + bras %r14,.Ldisab # error +.L004: + tm .Lirb+8,0x04 # DE set? + jz .Ltpi # DE not set, loop +.Lsschend: + br %r15 # return to caller +# end of function +# In case of error goto disabled wait with %r14 containing the caller +.Ldisab: + st %r14,.Ldisabpsw+4 + lpsw .Ldisabpsw + +# FIXME pre-initialized data should be listed first +# NULLed storage can be taken from anywhere ;) +.Lblklst: + .long 0x00002000 + .align 8 +.Ldisabpsw: + .long 0x000a0000,0x00000000 +.Lwaitpsw: + .long 0x020a0000,0x00000000+.Ltpi +.Lorb: + .long 0x0049504c,0x0080ff00 # intparm is " IPL" +.Lc6: .long 0xff000000 +.Lstart: + .long 0x00010000 # do not separate .Lstart and .Lparm +.Lparm: + .long 0x00008000 # they are loaded with a LM +.L3390: + .word 0x3390 +.L9345: + .word 0x9345 +.L3380: + .word 0x3380 +.Lnull: + .long 0x00000000,0x00000000 + .align 4 +.Lrdcdata: + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 +.Lirb: + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 +.Lcountarea: + .word 0x0000 # cyl; + .word 0x0000 # head; + .byte 0x00 # record; + .byte 0x00 # key length; + .word 0x0000 # data length == blocksize; +.Ldedata: + .long 0x40c00000,0x00000000 + .long 0x00000000,0x00000000 +.Llodata: + .long 0x06000001,0x00000000 + .long 0x00000000,0x01000000 + .long 0x12345678 + .org 0x7c8 +.Lrdcccw: # CCW read device characteristics + .long 0x64400040,0x00000000+.Lrdcdata + .long 0x63400010,0x00000000+.Ldedata + .long 0x47400010,0x00000000+.Llodata + .long 0x12000008,0x00000000+.Lcountarea +.Ldeccw: + .long 0x63400010,0x00000000+.Ldedata +.Lloccw: + .long 0x47400010,0x00000000+.Llodata +.Lrdccw: + .long 0x86400000,0x00000000 + .org 0x800 +# end of pre initialized data is here CCWarea follows +# from here we load 1k blocklist +# end of function + diff --git a/arch/s390x/boot/iplfba.S b/arch/s390x/boot/iplfba.S new file mode 100644 index 000000000..732a9848c --- /dev/null +++ b/arch/s390x/boot/iplfba.S @@ -0,0 +1,131 @@ +# +# Ipl block for fba devices +# Copyright (C) 1998 IBM Corporation +# Author(s): Martin Schwidefsky +# +# startup for ipl at address 0 +# start with restart + +# The first 24 byes are loaded by ipl to addresses 0-23 (a PSW and two CCWs). +# The CCWs on 8-23 are used as a continuation of the implicit ipl channel +# program. The fba ipl loader only uses the CCW on 8-15 to load the first 512 +# byte block to location 0-511 (the reading starts again at block 0, byte 0). +# The second CCW is used to store the location of the load list. + .org 0 + .long 0x00080000,0x80000000+_start # The first 24 byte are loaded + .long 0x02000000,0x20000200 # by ipl to addresses 0-23. + .long 0x00000001,0x00000001 # (PSW, one CCW & loadlist info). + + .globl _start +_start: + basr %r13,0 +.LPG0: + l %r1,0xb8 # load ipl subchannel number + lhi %r2,0x200 # location for the loadlist + lm %r3,%r4,0x10 # blocknr and length of loadlist + bras %r14,.Lloader # load loadlist + + lhi %r11,0x400 + lhi %r12,0x200 # load address of loadlist + l %r3,0(%r12) # get first block number + l %r4,4(%r12) # get first block count + la %r12,8(%r12) + j .Llistloop + .org 0x50 +.Llistloop: + lr %r2,%r11 # load address + lr %r5,%r4 # block count + mhi %r5,512 + la %r11,0(%r5,%r11) # update load address + bras %r14,.Lloader # load chunk of the image + l %r3,0(%r12) # get next block number + icm %r4,15,4(%r12) # get next block count + la %r12,8(%r12) + jnz .Llistloop + +# +# everything loaded, go for it +# + l %r1,.Lstart-.LPG0(%r13) + br %r1 + +# +# subroutine for loading a sequence of block from fba +# %r2: load address (24 bit address) +# %r3: number of first block (unsigned long) +# %r4: number of blocks to load (unsigned short) +# + .org 0xC0 +.Lloader: + la %r5,.Llo-.LPG0(%r13) + sth %r4,2(%r5) # initialize block count + st %r3,4(%r5) # initialize block number + la %r5,.Lccws-.LPG0(%r13) + mhi %r4,512 + sth %r4,22(%r5) # initialize byte count + icm %r2,8,16(%r5) + st %r2,16(%r5) # initialize CCW data address + + slr %r2,%r2 + la %r3,.Lorb-.LPG0(%r13) # r2 = address of orb into r2 + la %r4,.Ltinfo-.LPG0(%r13) # r3 = address of tpi info block + la %r5,.Lirb-.LPG0(%r13) # r4 = address of irb + + lctl %c6,%c6,.Lc6-.LPG0(%r13) +.Lldlp: + ssch 0(%r3) # read blocks +.Ltpi: + tpi 0(%r4) # test pending interrupt + jz .Ltpi + c %r1,0(%r4) # compare subchannel number + jne .Ltpi + tsch 0(%r5) + slr %r0,%r0 + tm 8(%r5),0x82 # do we have a problem ? + jnz .Ldwpsw + tm 8(%r5),0x04 # got device end ? + jz .Ltpi +.Lexit: + br %r14 + + .align 8 +.Ldwpsw:.long 0x000a0000,0x00000000 +.Lorb: .long 0x00000000,0x0000ff00,.Lccws +.Ltinfo:.long 0 +.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +.Lc6: .long 0xff000000 +.Lloadp:.long 0,0 +.Lparm: .long 0x10400 +.Lstart:.long 0x10000 + .align 8 +.Lccws: .long 0x63000000+.Lde,0x60000010 # define extent + .long 0x43000000+.Llo,0x60000008 # locate +# offset 1 in read CCW: data address (24 bit) +# offset 6 in read CCW: number of bytes (16 bit) + .long 0x42000000,0x20000000 # read +.Lde: .long 0x40000200,0x00000000 + .long 0x00000000,0x00001000 +# offset 2 in .Llo: block count (unsigned short) +# offset 4 in .Llo: block number (unsigned long) +.Llo: .long 0x06000000,0x00000000 + + .org 0x200 + .long 0x00000002,0x0000007f + .long 0x00000081,0x0000007f + .long 0x00000100,0x0000007f + .long 0x0000017f,0x0000007f + .long 0x000001fe,0x0000007f + .long 0x0000027d,0x0000007f + .long 0x000002fc,0x0000007f + .long 0x0000037b,0x0000007f + .long 0x000003fa,0x0000007f + .long 0x00000479,0x0000007f + .long 0x000004f8,0x0000007f + .long 0x00000577,0x0000007f + .long 0x000005f6,0x0000007f + .long 0x00000675,0x0000007f + .long 0x000006f4,0x0000007f + .long 0x00000773,0x0000003f + .long 0x00000000,0x00000000 + .org 0x400 + diff --git a/arch/s390x/config.in b/arch/s390x/config.in new file mode 100644 index 000000000..a94f4b7ba --- /dev/null +++ b/arch/s390x/config.in @@ -0,0 +1,73 @@ +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/config-language.txt. +# + +define_bool CONFIG_ISA n +define_bool CONFIG_EISA n +define_bool CONFIG_MCA n + +mainmenu_name "Linux Kernel Configuration" +define_bool CONFIG_ARCH_S390 y +define_bool CONFIG_ARCH_S390X y + +mainmenu_option next_comment +comment 'Code maturity level options' +bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL +endmenu + +mainmenu_option next_comment +comment 'Processor type and features' +bool 'Symmetric multi-processing support' CONFIG_SMP +bool 'Kernel support for 31 bit emulation' CONFIG_S390_SUPPORT +if [ "$CONFIG_S390_SUPPORT" = "y" ]; then + tristate 'Kernel support for 31 bit ELF binaries' CONFIG_BINFMT_ELF32 +fi +endmenu + +mainmenu_option next_comment +comment 'Loadable module support' +bool 'Enable loadable module support' CONFIG_MODULES +if [ "$CONFIG_MODULES" = "y" ]; then + bool 'Set version information on all symbols for modules' CONFIG_MODVERSIONS + bool 'Kernel module loader' CONFIG_KMOD +fi +endmenu + +mainmenu_option next_comment +comment 'General setup' +bool 'Fast IRQ handling' CONFIG_FAST_IRQ +bool 'Builtin IPL record support' CONFIG_IPL +if [ "$CONFIG_IPL" = "y" ]; then + choice 'IPL method generated into head.S' \ + "tape CONFIG_IPL_TAPE \ + vm_reader CONFIG_IPL_VM" tape +fi +bool 'Networking support' CONFIG_NET +bool 'System V IPC' CONFIG_SYSVIPC +bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT +bool 'Sysctl support' CONFIG_SYSCTL +define CONFIG_KCORE ELF +tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF +tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC +bool 'Show crashed user process info' CONFIG_PROCESS_DEBUG +endmenu + +source drivers/s390/Config.in + +if [ "$CONFIG_NET" = "y" ]; then + source net/Config.in +fi + +source fs/Config.in + +mainmenu_option next_comment +comment 'Kernel hacking' + +#bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC +if [ "$CONFIG_CTC" = "y" ]; then + bool 'Remote GDB kernel debugging' CONFIG_REMOTE_DEBUG +fi +# this does not work. bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ +endmenu + diff --git a/arch/s390x/defconfig b/arch/s390x/defconfig new file mode 100644 index 000000000..391f42823 --- /dev/null +++ b/arch/s390x/defconfig @@ -0,0 +1,219 @@ +# +# Automatically generated by make menuconfig: don't edit +# +# CONFIG_ISA is not set +# CONFIG_EISA is not set +# CONFIG_MCA is not set +CONFIG_ARCH_S390=y +CONFIG_ARCH_S390X=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y + +# +# Processor type and features +# +CONFIG_SMP=y +CONFIG_S390_SUPPORT=y +CONFIG_BINFMT_ELF32=y + +# +# Loadable module support +# +CONFIG_MODULES=y +# CONFIG_MODVERSIONS is not set +CONFIG_KMOD=y + +# +# General setup +# +CONFIG_FAST_IRQ=y +CONFIG_IPL=y +# CONFIG_IPL_TAPE is not set +CONFIG_IPL_VM=y +CONFIG_NET=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +# CONFIG_PROCESS_DEBUG is not set + +# +# Block device drivers +# +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=24576 +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_XPRAM=m +CONFIG_DASD=y +CONFIG_DASD_ECKD=y +CONFIG_DASD_FBA=y + +# +# Multi-device support (RAID and LVM) +# +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +# CONFIG_MD_LINEAR is not set +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID5=m +CONFIG_BLK_DEV_LVM=m +CONFIG_LVM_PROC_FS=y + +# +# Character device drivers +# +CONFIG_UNIX98_PTYS=y +CONFIG_UNIX98_PTY_COUNT=256 +CONFIG_3215=y +CONFIG_3215_CONSOLE=y +CONFIG_HWC=y +CONFIG_HWC_CONSOLE=y +CONFIG_S390_TAPE=m +CONFIG_S390_TAPE_CHAR=y +CONFIG_S390_TAPE_BLOCK=y +CONFIG_S390_TAPE_3490=y +CONFIG_S390_TAPE_3480=y + +# +# Network device drivers +# +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +CONFIG_NET_ETHERNET=y +CONFIG_TR=y +# CONFIG_FDDI is not set +# CONFIG_CHANDEV is not set +CONFIG_CTC=m +CONFIG_IUCV=m + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_NETLINK=y +# CONFIG_RTNETLINK is not set +# CONFIG_NETLINK_DEV is not set +# CONFIG_NETFILTER is not set +# CONFIG_FILTER is not set +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_INET_ECN is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_IPV6 is not set +# CONFIG_KHTTPD is not set +# CONFIG_ATM is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_DECNET is not set +# CONFIG_BRIDGE is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_LLC is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# File systems +# +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_ADFS_FS_RW is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_FAT_FS is not set +# CONFIG_MSDOS_FS is not set +# CONFIG_UMSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_RAMFS is not set +# CONFIG_ISO9660_FS is not set +# CONFIG_JOLIET is not set +# CONFIG_MINIX_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS_RW is not set +# CONFIG_HPFS_FS is not set +CONFIG_PROC_FS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVFS_MOUNT is not set +# CONFIG_DEVFS_DEBUG is not set +# CONFIG_DEVPTS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX4FS_RW is not set +# CONFIG_ROMFS_FS is not set +CONFIG_EXT2_FS=y +# CONFIG_SYSV_FS is not set +# CONFIG_SYSV_FS_WRITE is not set +# CONFIG_UDF_FS is not set +# CONFIG_UDF_RW is not set +# CONFIG_UFS_FS is not set +# CONFIG_UFS_FS_WRITE is not set + +# +# Network File Systems +# +# CONFIG_CODA_FS is not set +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_ROOT_NFS is not set +# CONFIG_NFSD is not set +# CONFIG_NFSD_V3 is not set +CONFIG_SUNRPC=y +CONFIG_LOCKD=y +# CONFIG_SMB_FS is not set +# CONFIG_NCP_FS is not set +# CONFIG_NCPFS_PACKET_SIGNING is not set +# CONFIG_NCPFS_IOCTL_LOCKING is not set +# CONFIG_NCPFS_STRONG is not set +# CONFIG_NCPFS_NFS_NS is not set +# CONFIG_NCPFS_OS2_NS is not set +# CONFIG_NCPFS_SMALLDOS is not set +# CONFIG_NCPFS_MOUNT_SUBDIR is not set +# CONFIG_NCPFS_NDS_DOMAINS is not set +# CONFIG_NCPFS_NLS is not set +# CONFIG_NCPFS_EXTRAS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +CONFIG_IBM_PARTITION=y +# CONFIG_MAC_PARTITION is not set +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_NLS is not set + +# +# Kernel hacking +# diff --git a/arch/s390x/kernel/Makefile b/arch/s390x/kernel/Makefile new file mode 100644 index 000000000..21f353f67 --- /dev/null +++ b/arch/s390x/kernel/Makefile @@ -0,0 +1,38 @@ +# +# Makefile for the linux kernel. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definitions are now in the main makefile... + +.S.o: + $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $< -o $*.o + +all: kernel.o head.o init_task.o + +O_TARGET := kernel.o + +export-objs := s390_ksyms.o +obj-y := lowcore.o entry.o bitmap.o traps.o time.o process.o irq.o \ + setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ + semaphore.o s390fpu.o reipl.o s390_ext.o debug.o + +obj-$(CONFIG_MODULES) += s390_ksyms.o +obj-$(CONFIG_SMP) += smp.o + +# +# Kernel debugging +# +obj-$(CONFIG_REMOTE_DEBUG) += gdb-stub.o #gdb-low.o + +obj-$(CONFIG_S390_SUPPORT) += linux32.o signal32.o ioctl32.o wrapper32.o exec32.o +obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o + +# +# This is just to get the dependencies... +# +binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c + +include $(TOPDIR)/Rules.make diff --git a/arch/s390x/kernel/binfmt_elf32.c b/arch/s390x/kernel/binfmt_elf32.c new file mode 100644 index 000000000..b08f0f686 --- /dev/null +++ b/arch/s390x/kernel/binfmt_elf32.c @@ -0,0 +1,203 @@ +/* + * Support for 32-bit Linux for S390 ELF binaries. + * + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Gerhard Tonn (ton@de.ibm.com) + * + * Heavily inspired by the 32-bit Sparc compat code which is + * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) + * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) + */ + + +#define __ASMS390_ELF_H + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2MSB +#define ELF_ARCH EM_S390 + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) \ + ((x)->e_machine == ELF_ARCH && (x)->e_ident[EI_CLASS] == ELF_CLASS) + +/* ELF register definitions */ +#define NUM_GPRS 16 +#define NUM_FPRS 16 +#define NUM_ACRS 16 + +#define TASK31_SIZE (0x80000000UL) + +/* For SVR4/S390 the function pointer to be registered with `atexit` is + passed in R14. */ +#define ELF_PLAT_INIT(_r) \ + do { \ + _r->gprs[14] = 0; \ + current->thread.flags |= S390_FLAG_31BIT; \ + } while(0) + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE ((TASK31_SIZE & 0x80000000) \ + ? TASK31_SIZE / 3 * 2 \ + : 2 * TASK31_SIZE / 3) + +/* Wow, the "main" arch needs arch dependent functions too.. :) */ + +/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is + now struct_user_regs, they are different) */ + +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ + { \ + int i; \ + memcpy(&pr_reg.psw.mask, ®s->psw.mask, 4); \ + memcpy(&pr_reg.psw.addr, ((char*)®s->psw.addr)+4, 4); \ + for(i=0; igprs[i]; \ + for(i=0; iacrs[i]; \ + pr_reg.orig_gpr2 = regs->orig_gpr2; \ + } + + + +/* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (NULL) + +#ifdef __KERNEL__ +#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) +#endif + +#include "linux32.h" + +typedef _s390_fp_regs32 elf_fpregset_t; + +typedef struct +{ + + _psw_t32 psw; + __u32 gprs[__NUM_GPRS]; + __u32 acrs[__NUM_ACRS]; + __u32 orig_gpr2; +} s390_regs32; +typedef s390_regs32 elf_gregset_t; + +#include +#include +#include +#include + +int setup_arg_pages32(struct linux_binprm *bprm); + +struct timeval32 +{ + int tv_sec, tv_usec; +}; + +#define elf_prstatus elf_prstatus32 +struct elf_prstatus32 +{ + struct elf_siginfo pr_info; /* Info associated with signal */ + short pr_cursig; /* Current signal */ + u32 pr_sigpend; /* Set of pending signals */ + u32 pr_sighold; /* Set of held signals */ + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct timeval32 pr_utime; /* User time */ + struct timeval32 pr_stime; /* System time */ + struct timeval32 pr_cutime; /* Cumulative user time */ + struct timeval32 pr_cstime; /* Cumulative system time */ + elf_gregset_t pr_reg; /* GP registers */ + int pr_fpvalid; /* True if math co-processor being used. */ +}; + +#define elf_prpsinfo elf_prpsinfo32 +struct elf_prpsinfo32 +{ + char pr_state; /* numeric process state */ + char pr_sname; /* char for pr_state */ + char pr_zomb; /* zombie */ + char pr_nice; /* nice val */ + u32 pr_flag; /* flags */ + u16 pr_uid; + u16 pr_gid; + pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; + /* Lots missing */ + char pr_fname[16]; /* filename of executable */ + char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ +}; + +#include + +#undef NEW_TO_OLD_UID +#undef NEW_TO_OLD_GID +#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid) +#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid) + +#define elf_addr_t u32 +#define elf_caddr_t u32 +/* +#define init_elf_binfmt init_elf32_binfmt +*/ +#undef CONFIG_BINFMT_ELF +#ifdef CONFIG_BINFMT_ELF32 +#define CONFIG_BINFMT_ELF CONFIG_BINFMT_ELF32 +#endif +#undef CONFIG_BINFMT_ELF_MODULE +#ifdef CONFIG_BINFMT_ELF32_MODULE +#define CONFIG_BINFMT_ELF_MODULE CONFIG_BINFMT_ELF32_MODULE +#endif + +#undef start_thread +#define start_thread start_thread31 +#define setup_arg_pages(bprm) setup_arg_pages32(bprm) +#define elf_map elf_map32 + +MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries," + " Copyright 2000 IBM Corporation"); +MODULE_AUTHOR("Gerhard Tonn "); + +#undef MODULE_DESCRIPTION +#undef MODULE_AUTHOR + +#include "../../../fs/binfmt_elf.c" + +static unsigned long +elf_map32 (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type) +{ + unsigned long map_addr; + + if(!addr) + addr = 0x40000000; + + down(¤t->mm->mmap_sem); + map_addr = do_mmap(filep, ELF_PAGESTART(addr), + eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type, + eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr)); + up(¤t->mm->mmap_sem); + return(map_addr); +} + diff --git a/arch/s390x/kernel/bitmap.S b/arch/s390x/kernel/bitmap.S new file mode 100644 index 000000000..a212ba450 --- /dev/null +++ b/arch/s390x/kernel/bitmap.S @@ -0,0 +1,37 @@ +/* + * arch/s390/kernel/bitmap.S + * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... + * See include/asm-s390/{bitops.h|posix_types.h} for details + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + + .globl _oi_bitmap +_oi_bitmap: + .byte 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80 + + .globl _ni_bitmap +_ni_bitmap: + .byte 0xFE,0xFD,0xFB,0xF7,0xEF,0xDF,0xBF,0x7F + + .globl _zb_findmap +_zb_findmap: + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 + .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 + diff --git a/arch/s390x/kernel/cpcmd.c b/arch/s390x/kernel/cpcmd.c new file mode 100644 index 000000000..7329b9b6c --- /dev/null +++ b/arch/s390x/kernel/cpcmd.c @@ -0,0 +1,49 @@ +/* + * arch/s390/kernel/cpcmd.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#include +#include +#include +#include + +void cpcmd(char *cmd, char *response, int rlen) +{ + const int mask = 0x40000000L; + char obuffer[128]; + int olen; + + olen = strlen(cmd); + strcpy(obuffer, cmd); + ASCEBC(obuffer,olen); + + if (response != NULL && rlen > 0) { + asm volatile (" lrag 2,0(%0)\n" + " lgr 4,%1\n" + " o 4,%4\n" + " lrag 3,0(%2)\n" + " lgr 5,%3\n" + " sam31\n" + " .long 0x83240008 # Diagnose 83\n" + " sam64" + : /* no output */ + : "a" (obuffer), "d" (olen), + "a" (response), "d" (rlen), "m" (mask) + : "2", "3", "4", "5" ); + EBCASC(response, rlen); + } else { + asm volatile (" lrag 2,0(%0)\n" + " lgr 3,%1\n" + " sam31\n" + " .long 0x83230008 # Diagnose 83\n" + " sam64" + : /* no output */ + : "a" (obuffer), "d" (olen) + : "2", "3" ); + } +} + diff --git a/arch/s390x/kernel/cpcmd.h b/arch/s390x/kernel/cpcmd.h new file mode 100644 index 000000000..38b88d9ea --- /dev/null +++ b/arch/s390x/kernel/cpcmd.h @@ -0,0 +1,14 @@ +/* + * arch/s390/kernel/cpcmd.h + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#ifndef __CPCMD__ +#define __CPCMD__ + +extern void cpcmd(char *cmd, char *response, int rlen); + +#endif diff --git a/arch/s390x/kernel/cpprintk.c b/arch/s390x/kernel/cpprintk.c new file mode 100644 index 000000000..aa6d3b41e --- /dev/null +++ b/arch/s390x/kernel/cpprintk.c @@ -0,0 +1,25 @@ +#include "cpcmd.h" +#include +#include +#include +#include +#include + +#include + +static char buf[1024]; + +asmlinkage int s390printk(const char *fmt, ...) +{ + va_list args; + int i; + unsigned long flags; + spin_lock_irqsave(&console_lock, flags); + va_start(args, fmt); + i = vsprintf(&buf[0],"MSG * ",args); + i = vsprintf(&buf[i], fmt, args); + va_end(args); + cpcmd(buf,0,0); + spin_unlock_irqrestore(&console_lock, flags); + return i; +} diff --git a/arch/s390x/kernel/debug.c b/arch/s390x/kernel/debug.c new file mode 100644 index 000000000..bb3dfe5de --- /dev/null +++ b/arch/s390x/kernel/debug.c @@ -0,0 +1,1167 @@ +/* + * arch/s390/kernel/debug.c + * S/390 debug facility + * + * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH, + * IBM Corporation + * Author(s): Michael Holzheu (holzheu@de.ibm.com), + * Holger Smolinski (Holger.Smolinski@de.ibm.com) + * + * Bugreports to: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef MODULE +#include +#endif + +#include + +#define MIN(a,b) (((a)<(b))?(a):(b)) + +#if defined(CONFIG_ARCH_S390X) +#define DEBUG_PROC_HEADER_SIZE 46 +#else +#define DEBUG_PROC_HEADER_SIZE 38 +#endif + +#define ADD_BUFFER 1000 + +/* typedefs */ + +typedef struct file_private_info { + loff_t len; /* length of output in byte */ + int size; /* size of buffer for output */ + char *data; /* buffer for output */ + debug_info_t *debug_info; /* the debug information struct */ + struct debug_view *view; /* used view of debug info */ +} file_private_info_t; + +extern void tod_to_timeval(uint64_t todval, struct timeval *xtime); + +/* internal function prototyes */ + +static int debug_init(void); +static int debug_format_output(debug_info_t * debug_area, char *buf, + int size, struct debug_view *view); +static ssize_t debug_output(struct file *file, char *user_buf, + size_t user_len, loff_t * offset); +static ssize_t debug_input(struct file *file, const char *user_buf, + size_t user_len, loff_t * offset); +static int debug_open(struct inode *inode, struct file *file); +static int debug_close(struct inode *inode, struct file *file); +static struct proc_dir_entry +*debug_create_proc_dir_entry(struct proc_dir_entry *root, + const char *name, mode_t mode, + struct inode_operations *iops, + struct file_operations *fops); +static void debug_delete_proc_dir_entry(struct proc_dir_entry *root, + struct proc_dir_entry *entry); +static debug_info_t* debug_info_create(char *name, int page_order, int nr_areas, int buf_size); +static void debug_info_get(debug_info_t *); +static void debug_info_put(debug_info_t *); +static int debug_prolog_level_fn(debug_info_t * id, + struct debug_view *view, char *out_buf); +static int debug_input_level_fn(debug_info_t * id, struct debug_view *view, + struct file *file, const char *user_buf, + size_t user_buf_size, loff_t * offset); +static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view, + char *out_buf, const char *in_buf); +static int debug_raw_format_fn(debug_info_t * id, + struct debug_view *view, char *out_buf, + const char *in_buf); +static int debug_raw_header_fn(debug_info_t * id, struct debug_view *view, + int area, debug_entry_t * entry, char *out_buf); + +/* globals */ + +struct debug_view debug_raw_view = { + "raw", + NULL, + &debug_raw_header_fn, + &debug_raw_format_fn, + NULL +}; + +struct debug_view debug_hex_ascii_view = { + "hex_ascii", + NULL, + &debug_dflt_header_fn, + &debug_hex_ascii_format_fn, + NULL +}; + +struct debug_view debug_level_view = { + "level", + &debug_prolog_level_fn, + NULL, + NULL, + &debug_input_level_fn +}; + +/* static globals */ + +static debug_info_t *debug_area_first = NULL; +static debug_info_t *debug_area_last = NULL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98)) +static struct semaphore debug_lock = MUTEX; +#else +DECLARE_MUTEX(debug_lock); +#endif + +static int initialized = 0; + +static struct file_operations debug_file_ops = { + read: debug_output, + write: debug_input, + open: debug_open, + release: debug_close, +}; + +static struct inode_operations debug_inode_ops = { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98)) + default_file_ops: &debug_file_ops, /* file ops */ +#endif +}; + + +static struct proc_dir_entry *debug_proc_root_entry; + + +/* functions */ + +/* + * debug_info_create + * - create new debug-info + */ + +static debug_info_t* debug_info_create(char *name, int page_order, + int nr_areas, int buf_size) +{ + debug_info_t* rc; + int i; + + /* alloc everything */ + + rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_ATOMIC); + if(!rc) + goto fail_malloc_rc; + rc->active_entry = (int*)kmalloc(nr_areas * sizeof(int), GFP_ATOMIC); + if(!rc->active_entry) + goto fail_malloc_active_entry; + memset(rc->active_entry, 0, nr_areas * sizeof(int)); + rc->areas = (debug_entry_t **) kmalloc(nr_areas * + sizeof(debug_entry_t *), + GFP_ATOMIC); + if (!rc->areas) + goto fail_malloc_areas; + for (i = 0; i < nr_areas; i++) { + rc->areas[i] = + (debug_entry_t *) __get_free_pages(GFP_ATOMIC, + page_order); + if (!rc->areas[i]) { + for (i--; i >= 0; i--) { + free_pages((unsigned long) rc->areas[i], + page_order); + } + goto fail_malloc_areas2; + } else { + memset(rc->areas[i], 0, PAGE_SIZE << page_order); + } + } + + /* initialize members */ + + spin_lock_init(&rc->lock); + rc->page_order = page_order; + rc->nr_areas = nr_areas; + rc->active_area = 0; + rc->level = DEBUG_DEFAULT_LEVEL; + rc->buf_size = buf_size; + rc->entry_size = sizeof(debug_entry_t) + buf_size; + strncpy(rc->name, name, MIN(strlen(name), (DEBUG_MAX_PROCF_LEN - 1))); + rc->name[MIN(strlen(name), (DEBUG_MAX_PROCF_LEN - 1))] = 0; + memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); + memset(rc->proc_entries, 0 ,DEBUG_MAX_VIEWS * + sizeof(struct proc_dir_entry*)); + atomic_set(&(rc->ref_count), 0); + rc->proc_root_entry = + debug_create_proc_dir_entry(debug_proc_root_entry, rc->name, + S_IFDIR | S_IRUGO | S_IXUGO | + S_IWUSR | S_IWGRP, NULL, NULL); + + /* append new element to linked list */ + + if(debug_area_first == NULL){ + /* first element in list */ + debug_area_first = rc; + rc->prev = NULL; + } + else{ + /* append element to end of list */ + debug_area_last->next = rc; + rc->prev = debug_area_last; + } + debug_area_last = rc; + rc->next = NULL; + + debug_info_get(rc); + return rc; + +fail_malloc_areas2: + kfree(rc->areas); +fail_malloc_areas: + kfree(rc->active_entry); +fail_malloc_active_entry: + kfree(rc); +fail_malloc_rc: + return NULL; +} + +/* + * debug_info_get + * - increments reference count for debug-info + */ + +static void debug_info_get(debug_info_t * db_info) +{ + if (db_info) + atomic_inc(&db_info->ref_count); +} + +/* + * debug_info_put: + * - decreases reference count for debug-info and frees it if necessary + */ + +static void debug_info_put(debug_info_t *db_info) +{ + int i; + + if (!db_info) + return; + if (atomic_dec_and_test(&db_info->ref_count)) { + printk(KERN_INFO "debug: freeing debug area %p (%s)\n", + db_info, db_info->name); + for (i = 0; i < DEBUG_MAX_VIEWS; i++) { + if (db_info->views[i] != NULL) + debug_delete_proc_dir_entry + (db_info->proc_root_entry, + db_info->proc_entries[i]); + } + debug_delete_proc_dir_entry(debug_proc_root_entry, + db_info->proc_root_entry); + for (i = 0; i < db_info->nr_areas; i++) { + free_pages((unsigned long) db_info->areas[i], + db_info->page_order); + } + kfree(db_info->areas); + kfree(db_info->active_entry); + if(db_info == debug_area_first) + debug_area_first = db_info->next; + if(db_info == debug_area_last) + debug_area_last = db_info->prev; + if(db_info->prev) db_info->prev->next = db_info->next; + if(db_info->next) db_info->next->prev = db_info->prev; + kfree(db_info); + } +} + + +/* + * debug_output: + * - called for user read() + * - copies formated output form private_data of the file + * handle to the user buffer + */ + +static ssize_t debug_output(struct file *file, /* file descriptor */ + char *user_buf, /* user buffer */ + size_t user_len, /* length of buffer */ + loff_t *offset /* offset in the file */ ) +{ + loff_t len; + int rc; + file_private_info_t *p_info; + + p_info = ((file_private_info_t *) file->private_data); + if (*offset >= p_info->len) { + return 0; /* EOF */ + } else { + len = MIN(user_len, (p_info->len - *offset)); + if ((rc = copy_to_user(user_buf, &(p_info->data[*offset]),len))) + return rc;; + (*offset) += len; + return len; /* number of bytes "read" */ + } +} + +/* + * debug_input: + * - called for user write() + * - calls input function of view + */ + +static ssize_t debug_input(struct file *file, + const char *user_buf, size_t length, + loff_t *offset) +{ + int rc = 0; + file_private_info_t *p_info; + + down(&debug_lock); + p_info = ((file_private_info_t *) file->private_data); + if (p_info->view->input_proc) + rc = p_info->view->input_proc(p_info->debug_info, + p_info->view, file, user_buf, + length, offset); + up(&debug_lock); + return rc; /* number of input characters */ +} + +/* + * debug_format_output: + * - calls prolog, header and format functions of view to format output + */ + +static int debug_format_output(debug_info_t * debug_area, char *buf, + int size, struct debug_view *view) +{ + int len = 0; + int i, j; + int nr_of_entries; + debug_entry_t *act_entry; + + /* print prolog */ + if (view->prolog_proc) + len += view->prolog_proc(debug_area, view, buf); + /* print debug records */ + if (!(view->format_proc) && !(view->header_proc)) + goto out; + nr_of_entries = PAGE_SIZE / debug_area->entry_size + << debug_area->page_order; + for (i = 0; i < debug_area->nr_areas; i++) { + act_entry = debug_area->areas[i]; + for (j = 0; j < nr_of_entries; j++) { + if (act_entry->id.fields.used == 0) + break; /* empty entry */ + if (view->header_proc) + len += view->header_proc(debug_area, view, i, + act_entry, buf + len); + if (view->format_proc) + len += view->format_proc(debug_area, view, + buf + len, + DEBUG_DATA(act_entry)); + if (len > size) { + printk(KERN_ERR + "debug: error -- memory exceeded for (%s/%s)\n", + debug_area->name, view->name); + printk(KERN_ERR "debug: fix view %s!!\n", + view->name); + printk(KERN_ERR + "debug: area: %i (0 - %i) entry: %i (0 - %i)\n", + i, debug_area->nr_areas - 1, j, + nr_of_entries - 1); + goto out; + } + act_entry = (debug_entry_t *) (((char *) act_entry) + + debug_area->entry_size); + } + } + out: + return len; +} + + +/* + * debug_open: + * - called for user open() + * - copies formated output to private_data area of the file + * handle + */ + +static int debug_open(struct inode *inode, struct file *file) +{ + int i = 0, size = 0, rc = 0, f_entry_size = 0; + file_private_info_t *p_info; + debug_info_t* debug_info; + +#ifdef DEBUG + printk("debug_open\n"); +#endif + +#ifdef MODULE + MOD_INC_USE_COUNT; +#endif + down(&debug_lock); + + /* find debug log and view */ + + debug_info = debug_area_first; + while(debug_info != NULL){ + for (i = 0; i < DEBUG_MAX_VIEWS; i++) { + if (debug_info->views[i] == NULL) + continue; + else if (debug_info->proc_entries[i]->low_ino == + file->f_dentry->d_inode->i_ino) { + goto found; /* found view ! */ + } + } + debug_info = debug_info->next; + } + /* no entry found */ + rc = -EINVAL; + goto out; + found: + if ((file->private_data = + kmalloc(sizeof(file_private_info_t), GFP_ATOMIC)) == 0) { + printk(KERN_ERR "debug_open: kmalloc failed\n"); + rc = -ENOMEM; + goto out; + } + p_info = (file_private_info_t *) file->private_data; + + /* + * the size for the formated output is calculated + * with the following formula: + * + * prolog-size + * + + * (record header size + record data field size) + * * number of entries per page + * * number of pages per area + * * number of areas + */ + + if (debug_info->views[i]->prolog_proc) + size += + debug_info->views[i]->prolog_proc(debug_info, + debug_info-> + views[i], NULL); + + if (debug_info->views[i]->header_proc) + f_entry_size = + debug_info->views[i]->header_proc(debug_info, + debug_info-> + views[i], 0, NULL, + NULL); + if (debug_info->views[i]->format_proc) + f_entry_size += + debug_info->views[i]->format_proc(debug_info, + debug_info-> + views[i], NULL, + NULL); + + size += f_entry_size + * (PAGE_SIZE / debug_info->entry_size + << debug_info->page_order) + * debug_info->nr_areas + 1; /* terminating \0 */ +#ifdef DEBUG + printk("debug_open: size: %i\n", size); +#endif + + /* alloc some bytes more to be safe against bad views */ + if ((p_info->data = vmalloc(size + ADD_BUFFER)) == 0) { + printk(KERN_ERR "debug_open: vmalloc failed\n"); + vfree(file->private_data); + rc = -ENOMEM; + goto out; + } + + p_info->size = size; + p_info->debug_info = debug_info; + p_info->view = debug_info->views[i]; + + spin_lock_irq(&debug_info->lock); + + p_info->len = + debug_format_output(debug_info, p_info->data, size, + debug_info->views[i]); +#ifdef DEBUG + { + int ilen = p_info->len; + printk("debug_open: len: %i\n", ilen); + } +#endif + + spin_unlock_irq(&debug_info->lock); + debug_info_get(debug_info); + + out: + up(&debug_lock); +#ifdef MODULE + if (rc != 0) + MOD_DEC_USE_COUNT; +#endif + return rc; +} + +/* + * debug_close: + * - called for user close() + * - deletes private_data area of the file handle + */ + +static int debug_close(struct inode *inode, struct file *file) +{ + file_private_info_t *p_info; +#ifdef DEBUG + printk("debug_close\n"); +#endif + down(&debug_lock); + p_info = (file_private_info_t *) file->private_data; + debug_info_put(p_info->debug_info); + if (p_info->data) { + vfree(p_info->data); + kfree(file->private_data); + } + up(&debug_lock); + +#ifdef MODULE + MOD_DEC_USE_COUNT; +#endif + return 0; /* success */ +} + +/* + * debug_create_proc_dir_entry: + * - initializes proc-dir-entry and registers it + */ + +static struct proc_dir_entry *debug_create_proc_dir_entry + (struct proc_dir_entry *root, const char *name, mode_t mode, + struct inode_operations *iops, struct file_operations *fops) +{ + struct proc_dir_entry *rc = NULL; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98)) + const char *fn = name; + int len; + len = strlen(fn); + + rc = (struct proc_dir_entry *) kmalloc(sizeof(struct proc_dir_entry) + + len + 1, GFP_ATOMIC); + if (!rc) + goto out; + + memset(rc, 0, sizeof(struct proc_dir_entry)); + memcpy(((char *) rc) + sizeof(*rc), fn, len + 1); + rc->name = ((char *) rc) + sizeof(*rc); + rc->namelen = len; + rc->low_ino = 0, rc->mode = mode; + rc->nlink = 1; + rc->uid = 0; + rc->gid = 0; + rc->size = 0; + rc->get_info = NULL; + rc->ops = iops; + + proc_register(root, rc); +#else + rc = create_proc_entry(name, mode, root); + if (!rc) + goto out; + if (fops) + rc->proc_fops = fops; +#endif + + out: + return rc; +} + + +/* + * delete_proc_dir_entry: + */ + +static void debug_delete_proc_dir_entry + (struct proc_dir_entry *root, struct proc_dir_entry *proc_entry) +{ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98)) + proc_unregister(root, proc_entry->low_ino); + kfree(proc_entry); +#else + remove_proc_entry(proc_entry->name, root); +#endif +} + +/* + * debug_register: + * - creates and initializes debug area for the caller + * - returns handle for debug area + */ + +debug_info_t *debug_register + (char *name, int page_order, int nr_areas, int buf_size) +{ + debug_info_t *rc = NULL; + +#ifdef MODULE + MOD_INC_USE_COUNT; +#endif + if (!initialized) + debug_init(); + down(&debug_lock); + + /* create new debug_info */ + + rc = debug_info_create(name, page_order, nr_areas, buf_size); + if(!rc) + goto out; + debug_register_view(rc, &debug_level_view); + printk(KERN_INFO + "debug: reserved %d areas of %d pages for debugging %s\n", + nr_areas, 1 << page_order, rc->name); + out: + if (rc == NULL){ + printk(KERN_ERR "debug: debug_register failed for %s\n",name); +#ifdef MODULE + MOD_DEC_USE_COUNT; +#endif + } + up(&debug_lock); + return rc; +} + +/* + * debug_unregister: + * - give back debug area + */ + +void debug_unregister(debug_info_t * id) +{ + if (!id) + goto out; + down(&debug_lock); + printk(KERN_INFO "debug: unregistering %s\n", id->name); + debug_info_put(id); + up(&debug_lock); + +#ifdef MODULE + MOD_DEC_USE_COUNT; +#endif + out: + return; +} + +/* + * debug_set_level: + * - set actual debug level + */ + +void debug_set_level(debug_info_t* id, int new_level) +{ + long flags; + if(!id) + return; + spin_lock_irqsave(&id->lock,flags); + if(new_level == DEBUG_OFF_LEVEL){ + id->level = DEBUG_OFF_LEVEL; + printk(KERN_INFO "debug: %s: switched off\n",id->name); + } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) { + printk(KERN_INFO + "debug: %s: level %i is out of range (%i - %i)\n", + id->name, new_level, 0, DEBUG_MAX_LEVEL); + } else { + id->level = new_level; + printk(KERN_INFO + "debug: %s: new level %i\n",id->name,id->level); + } + spin_unlock_irqrestore(&id->lock,flags); +} + + +/* + * proceed_active_entry: + * - set active entry to next in the ring buffer + */ + +static inline void proceed_active_entry(debug_info_t * id) +{ + if ((id->active_entry[id->active_area] += id->entry_size) + > ((PAGE_SIZE << (id->page_order)) - id->entry_size)) + id->active_entry[id->active_area] = 0; +} + +/* + * proceed_active_area: + * - set active area to next in the ring buffer + */ + +static inline void proceed_active_area(debug_info_t * id) +{ + id->active_area++; + id->active_area = id->active_area % id->nr_areas; +} + +/* + * get_active_entry: + */ + +static inline debug_entry_t *get_active_entry(debug_info_t * id) +{ + return (debug_entry_t *) ((char *) id->areas[id->active_area] + + id->active_entry[id->active_area]); +} + +/* + * debug_common: + * - set timestamp, caller address, cpu number etc. + */ + +static inline debug_entry_t *debug_common(debug_info_t * id) +{ + debug_entry_t *active; + + active = get_active_entry(id); + STCK(active->id.stck); + active->id.fields.cpuid = smp_processor_id(); + active->id.fields.used = 1; + active->caller = __builtin_return_address(0); + return active; +} + +/* + * debug_event: + */ + +debug_entry_t *debug_event(debug_info_t * id, int level, void *buf, + int len) +{ + long flags; + debug_entry_t *active = NULL; + + if ((!id) || (level > id->level)) + goto out; + spin_lock_irqsave(&id->lock, flags); + active = debug_common(id); + active->id.fields.exception = 0; + memset(DEBUG_DATA(active), 0, id->buf_size); + memcpy(DEBUG_DATA(active), buf, MIN(len, id->buf_size)); + proceed_active_entry(id); + spin_unlock_irqrestore(&id->lock, flags); + out: + return active; +} + +/* + * debug_int_event: + */ + +debug_entry_t *debug_int_event(debug_info_t * id, int level, + unsigned int tag) +{ + long flags; + debug_entry_t *active = NULL; + + if ((!id) || (level > id->level)) + goto out; + spin_lock_irqsave(&id->lock, flags); + active = debug_common(id); + active->id.fields.exception = 0; + memset(DEBUG_DATA(active), 0, id->buf_size); + memcpy(DEBUG_DATA(active), &tag, MIN(sizeof(unsigned int), id->buf_size)); + proceed_active_entry(id); + spin_unlock_irqrestore(&id->lock, flags); + out: + return active; +} + +/* + * debug_text_event: + */ + +debug_entry_t *debug_text_event(debug_info_t * id, int level, + const char *txt) +{ + long flags; + debug_entry_t *active = NULL; + + if ((!id) || (level > id->level)) + goto out; + spin_lock_irqsave(&id->lock, flags); + active = debug_common(id); + memset(DEBUG_DATA(active), 0, id->buf_size); + strncpy(DEBUG_DATA(active), txt, MIN(strlen(txt), id->buf_size)); + active->id.fields.exception = 0; + proceed_active_entry(id); + spin_unlock_irqrestore(&id->lock, flags); + out: + return active; + +} + +/* + * debug_exception: + */ + +debug_entry_t *debug_exception(debug_info_t * id, int level, void *buf, + int len) +{ + long flags; + debug_entry_t *active = NULL; + + if ((!id) || (level > id->level)) + goto out; + spin_lock_irqsave(&id->lock, flags); + active = debug_common(id); + active->id.fields.exception = 1; + memset(DEBUG_DATA(active), 0, id->buf_size); + memcpy(DEBUG_DATA(active), buf, MIN(len, id->buf_size)); + proceed_active_entry(id); + proceed_active_area(id); + spin_unlock_irqrestore(&id->lock, flags); + out: + return active; +} + +/* + * debug_int_exception: + */ + +debug_entry_t *debug_int_exception(debug_info_t * id, int level, + unsigned int tag) +{ + long flags; + debug_entry_t *active = NULL; + + if ((!id) || (level > id->level)) + goto out; + spin_lock_irqsave(&id->lock, flags); + active = debug_common(id); + active->id.fields.exception = 1; + memset(DEBUG_DATA(active), 0, id->buf_size); + memcpy(DEBUG_DATA(active), &tag, + MIN(sizeof(unsigned int), id->buf_size)); + proceed_active_entry(id); + proceed_active_area(id); + spin_unlock_irqrestore(&id->lock, flags); + out: + return active; +} + +/* + * debug_text_exception: + */ + +debug_entry_t *debug_text_exception(debug_info_t * id, int level, + const char *txt) +{ + long flags; + debug_entry_t *active = NULL; + + if ((!id) || (level > id->level)) + goto out; + spin_lock_irqsave(&id->lock, flags); + active = debug_common(id); + memset(DEBUG_DATA(active), 0, id->buf_size); + strncpy(DEBUG_DATA(active), txt, MIN(strlen(txt), id->buf_size)); + active->id.fields.exception = 1; + proceed_active_entry(id); + proceed_active_area(id); + spin_unlock_irqrestore(&id->lock, flags); + out: + return active; + +} + +/* + * debug_init: + * - is called exactly once to initialize the debug feature + */ + +int debug_init(void) +{ + int rc = 0; + + down(&debug_lock); + if (!initialized) { + debug_proc_root_entry = + debug_create_proc_dir_entry(&proc_root, DEBUG_DIR_ROOT, + S_IFDIR | S_IRUGO | S_IXUGO + | S_IWUSR | S_IWGRP, NULL, + NULL); + printk(KERN_INFO "debug: Initialization complete\n"); + initialized = 1; + } + up(&debug_lock); + + return rc; +} + +/* + * debug_register_view: + */ + +int debug_register_view(debug_info_t * id, struct debug_view *view) +{ + int rc = 0; + int i; + long flags; + mode_t mode = S_IFREG; + + if (!id) + goto out; + spin_lock_irqsave(&id->lock, flags); + for (i = 0; i < DEBUG_MAX_VIEWS; i++) { + if (id->views[i] == NULL) + break; + } + if (i == DEBUG_MAX_VIEWS) { + printk(KERN_WARNING "debug: cannot register view %s/%s\n", + id->name,view->name); + printk(KERN_WARNING + "debug: maximum number of views reached (%i)!\n", i); + rc = -1; + } + else { + id->views[i] = view; + if (view->prolog_proc || view->format_proc || view->header_proc) + mode |= S_IRUSR; + if (view->input_proc) + mode |= S_IWUSR; + id->proc_entries[i] = + debug_create_proc_dir_entry(id->proc_root_entry, + view->name, mode, + &debug_inode_ops, + &debug_file_ops); + rc = 0; + } + spin_unlock_irqrestore(&id->lock, flags); + out: + return rc; +} + +/* + * debug_unregister_view: + */ + +int debug_unregister_view(debug_info_t * id, struct debug_view *view) +{ + int rc = 0; + int i; + long flags; + + if (!id) + goto out; + spin_lock_irqsave(&id->lock, flags); + for (i = 0; i < DEBUG_MAX_VIEWS; i++) { + if (id->views[i] == view) + break; + } + if (i == DEBUG_MAX_VIEWS) + rc = -1; + else { + debug_delete_proc_dir_entry(id->proc_root_entry, + id->proc_entries[i]); + id->views[i] = NULL; + rc = 0; + } + spin_unlock_irqrestore(&id->lock, flags); + out: + return rc; +} + +/* + * functions for debug-views + *********************************** +*/ + +/* + * prints out actual debug level + */ + +static int debug_prolog_level_fn(debug_info_t * id, + struct debug_view *view, char *out_buf) +{ + int rc = 0; + + if (out_buf == NULL) { + rc = 2; + goto out; + } + if(id->level == -1) rc = sprintf(out_buf,"-\n"); + else rc = sprintf(out_buf, "%i\n", id->level); + out: + return rc; +} + +/* + * reads new debug level + */ + +static int debug_input_level_fn(debug_info_t * id, struct debug_view *view, + struct file *file, const char *user_buf, + size_t in_buf_size, loff_t * offset) +{ + char input_buf[1]; + int rc = in_buf_size; + + if (*offset != 0) + goto out; + if ((rc = copy_from_user(input_buf, user_buf, 1))) + goto out; + if (isdigit(input_buf[0])) { + int new_level = ((int) input_buf[0] - (int) '0'); + debug_set_level(id, new_level); + } else if(input_buf[0] == '-') { + debug_set_level(id, DEBUG_OFF_LEVEL); + } else { + printk(KERN_INFO "debug: level `%c` is not valid\n", + input_buf[0]); + } + out: + *offset += in_buf_size; + return rc; /* number of input characters */ +} + +/* + * prints debug header in raw format + */ + +int debug_raw_header_fn(debug_info_t * id, struct debug_view *view, + int area, debug_entry_t * entry, char *out_buf) +{ + int rc; + + rc = sizeof(debug_entry_t); + if (out_buf == NULL) + goto out; + memcpy(out_buf,entry,sizeof(debug_entry_t)); + out: + return rc; +} + +/* + * prints debug data in raw format + */ + +static int debug_raw_format_fn(debug_info_t * id, struct debug_view *view, + char *out_buf, const char *in_buf) +{ + int rc; + + rc = id->buf_size; + if (out_buf == NULL || in_buf == NULL) + goto out; + memcpy(out_buf, in_buf, id->buf_size); + out: + return rc; +} + +/* + * prints debug data in hex/ascii format + */ + +static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view, + char *out_buf, const char *in_buf) +{ + int i, rc = 0; + + if (out_buf == NULL || in_buf == NULL) { + rc = id->buf_size * 4 + 3; + goto out; + } + for (i = 0; i < id->buf_size; i++) { + rc += sprintf(out_buf + rc, "%02x ", + ((unsigned char *) in_buf)[i]); + } + rc += sprintf(out_buf + rc, "| "); + for (i = 0; i < id->buf_size; i++) { + unsigned char c = in_buf[i]; + if (!isprint(c)) + rc += sprintf(out_buf + rc, "."); + else + rc += sprintf(out_buf + rc, "%c", c); + } + rc += sprintf(out_buf + rc, "\n"); + out: + return rc; +} + +/* + * prints header for debug entry + */ + +int debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, + int area, debug_entry_t * entry, char *out_buf) +{ + struct timeval time_val; + unsigned long long time; + char *except_str; + unsigned long caller; + int rc = 0; + + if (out_buf == NULL) { + rc = DEBUG_PROC_HEADER_SIZE; + goto out; + } + + time = entry->id.stck; + /* adjust todclock to 1970 */ + time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); + tod_to_timeval(time, &time_val); + + if (entry->id.fields.exception) + except_str = "*"; + else + except_str = "-"; + caller = (unsigned long) entry->caller; +#if defined(CONFIG_ARCH_S390X) + rc += sprintf(out_buf, "%02i %011lu:%06lu %1s %02i %016lx ", + area, time_val.tv_sec, + time_val.tv_usec, except_str, + entry->id.fields.cpuid, caller); +#else + caller &= 0x7fffffff; + rc += sprintf(out_buf, "%02i %011lu:%06lu %1s %02i %08lx ", + area, time_val.tv_sec, + time_val.tv_usec, except_str, + entry->id.fields.cpuid, caller); +#endif + out: + return rc; +} + +/* + * init_module: + */ + +#ifdef MODULE +int init_module(void) +{ + int rc = 0; +#ifdef DEBUG + printk("debug_module_init: \n"); +#endif + rc = debug_init(); + if (rc) + printk(KERN_INFO "debug: an error occurred with debug_init\n"); + return rc; +} + +/* + * cleanup_module: + */ + +void cleanup_module(void) +{ +#ifdef DEBUG + printk("debug_cleanup_module: \n"); +#endif + debug_delete_proc_dir_entry(&proc_root, debug_proc_root_entry); + return; +} + +#endif /* MODULE */ diff --git a/arch/s390x/kernel/ebcdic.c b/arch/s390x/kernel/ebcdic.c new file mode 100644 index 000000000..fc7740649 --- /dev/null +++ b/arch/s390x/kernel/ebcdic.c @@ -0,0 +1,391 @@ +/* + * arch/s390/kernel/ebcdic.c + * ECBDIC -> ASCII, ASCII -> ECBDIC, + * upper to lower case (EBCDIC) conversion tables. + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky + * Martin Peschke + */ + +#include + +/* + * ASCII (IBM PC 437) -> EBCDIC 037 + */ +__u8 _ascebc[256] = +{ + /*00 NUL SOH STX ETX EOT ENQ ACK BEL */ + 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F, + /*08 BS HT LF VT FF CR SO SI */ + /* ->NL */ + 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + /*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ + 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26, + /*18 CAN EM SUB ESC FS GS RS US */ + /* ->IGS ->IRS ->IUS */ + 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F, + /*20 SP ! " # $ % & ' */ + 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D, + /*28 ( ) * + , - . / */ + 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61, + /*30 0 1 2 3 4 5 6 7 */ + 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, + /*38 8 9 : ; < = > ? */ + 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F, + /*40 @ A B C D E F G */ + 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, + /*48 H I J K L M N O */ + 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, + /*50 P Q R S T U V W */ + 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, + /*58 X Y Z [ \ ] ^ _ */ + 0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D, + /*60 ` a b c d e f g */ + 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + /*68 h i j k l m n o */ + 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, + /*70 p q r s t u v w */ + 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, + /*78 x y z { | } ~ DL */ + 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07, + /*80*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*88*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*90*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*98*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*A0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*A8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*B0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*B8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*C0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*C8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*D0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*D8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*E0 sz */ + 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*E8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*F0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*F8*/ + 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF +}; + +/* + * EBCDIC 037 -> ASCII (IBM PC 437) + */ +__u8 _ebcasc[256] = +{ + /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */ + 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F, + /* 0x08 -GE -SPS -RPT VT FF CR SO SI */ + 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC + -ENP ->LF */ + 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07, + /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB + -IUS */ + 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC + -INP */ + 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B, + /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL + -SW */ + 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07, + /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */ + 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04, + /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */ + 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A, + /* 0x40 SP RSP ä ---- */ + 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86, + /* 0x48 . < ( + | */ + 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C, + /* 0x50 & ---- */ + 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07, + /* 0x58 ß ! $ * ) ; */ + 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA, + /* 0x60 - / ---- Ä ---- ---- ---- */ + 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F, + /* 0x68 ---- , % _ > ? */ + 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F, + /* 0x70 ---- ---- ---- ---- ---- ---- ---- */ + 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + /* 0x78 * ` : # @ ' = " */ + 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22, + /* 0x80 * a b c d e f g */ + 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + /* 0x88 h i ---- ---- ---- */ + 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1, + /* 0x90 ° j k l m n o p */ + 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, + /* 0x98 q r ---- ---- */ + 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07, + /* 0xA0 ~ s t u v w x */ + 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + /* 0xA8 y z ---- ---- ---- ---- */ + 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07, + /* 0xB0 ^ ---- § ---- */ + 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC, + /* 0xB8 ---- [ ] ---- ---- ---- ---- */ + 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07, + /* 0xC0 { A B C D E F G */ + 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + /* 0xC8 H I ---- ö ---- */ + 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07, + /* 0xD0 } J K L M N O P */ + 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, + /* 0xD8 Q R ---- ü */ + 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98, + /* 0xE0 \ S T U V W X */ + 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + /* 0xE8 Y Z ---- Ö ---- ---- ---- */ + 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07, + /* 0xF0 0 1 2 3 4 5 6 7 */ + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */ + 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07 +}; + + +/* + * ASCII (IBM PC 437) -> EBCDIC 500 + */ +__u8 _ascebc_500[256] = +{ + /*00 NUL SOH STX ETX EOT ENQ ACK BEL */ + 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F, + /*08 BS HT LF VT FF CR SO SI */ + /* ->NL */ + 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + /*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ + 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26, + /*18 CAN EM SUB ESC FS GS RS US */ + /* ->IGS ->IRS ->IUS */ + 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F, + /*20 SP ! " # $ % & ' */ + 0x40, 0x4F, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D, + /*28 ( ) * + , - . / */ + 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61, + /*30 0 1 2 3 4 5 6 7 */ + 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, + /*38 8 9 : ; < = > ? */ + 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F, + /*40 @ A B C D E F G */ + 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, + /*48 H I J K L M N O */ + 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, + /*50 P Q R S T U V W */ + 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, + /*58 X Y Z [ \ ] ^ _ */ + 0xE7, 0xE8, 0xE9, 0x4A, 0xE0, 0x5A, 0x5F, 0x6D, + /*60 ` a b c d e f g */ + 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + /*68 h i j k l m n o */ + 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, + /*70 p q r s t u v w */ + 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, + /*78 x y z { | } ~ DL */ + 0xA7, 0xA8, 0xA9, 0xC0, 0xBB, 0xD0, 0xA1, 0x07, + /*80*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*88*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*90*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*98*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*A0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*A8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*B0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*B8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*C0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*C8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*D0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*D8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*E0 sz */ + 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*E8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*F0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*F8*/ + 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF +}; + +/* + * EBCDIC 500 -> ASCII (IBM PC 437) + */ +__u8 _ebcasc_500[256] = +{ + /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */ + 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F, + /* 0x08 -GE -SPS -RPT VT FF CR SO SI */ + 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC + -ENP ->LF */ + 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07, + /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB + -IUS */ + 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC + -INP */ + 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B, + /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL + -SW */ + 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07, + /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */ + 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04, + /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */ + 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A, + /* 0x40 SP RSP ä ---- */ + 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86, + /* 0x48 [ . < ( + ! */ + 0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21, + /* 0x50 & ---- */ + 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07, + /* 0x58 ß ] $ * ) ; ^ */ + 0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E, + /* 0x60 - / ---- Ä ---- ---- ---- */ + 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F, + /* 0x68 ---- , % _ > ? */ + 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F, + /* 0x70 ---- ---- ---- ---- ---- ---- ---- */ + 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + /* 0x78 * ` : # @ ' = " */ + 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22, + /* 0x80 * a b c d e f g */ + 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + /* 0x88 h i ---- ---- ---- */ + 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1, + /* 0x90 ° j k l m n o p */ + 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, + /* 0x98 q r ---- ---- */ + 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07, + /* 0xA0 ~ s t u v w x */ + 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + /* 0xA8 y z ---- ---- ---- ---- */ + 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07, + /* 0xB0 ---- § ---- */ + 0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC, + /* 0xB8 ---- | ---- ---- ---- ---- */ + 0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07, + /* 0xC0 { A B C D E F G */ + 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + /* 0xC8 H I ---- ö ---- */ + 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07, + /* 0xD0 } J K L M N O P */ + 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, + /* 0xD8 Q R ---- ü */ + 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98, + /* 0xE0 \ S T U V W X */ + 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + /* 0xE8 Y Z ---- Ö ---- ---- ---- */ + 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07, + /* 0xF0 0 1 2 3 4 5 6 7 */ + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */ + 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07 +}; + + +/* + * EBCDIC 037/500 conversion table: + * from upper to lower case + */ +__u8 _ebc_tolower[256] = +{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, + 0x60, 0x61, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, + 0x70, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9C, 0x9F, + 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, + 0xA8, 0xA9, 0xAA, 0xAB, 0x8C, 0x8D, 0x8E, 0xAF, + 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, + 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, + 0xC0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, + 0xD0, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF, + 0xE0, 0xE1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, + 0xA8, 0xA9, 0xEA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, + 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, + 0xF8, 0xF9, 0xFA, 0xDB, 0xDC, 0xDD, 0xDE, 0xFF +}; + + +/* + * EBCDIC 037/500 conversion table: + * from lower to upper case + */ +__u8 _ebc_toupper[256] = +{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, + 0x40, 0x41, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, + 0x50, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, + 0x80, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, + 0xC8, 0xC9, 0x8A, 0x8B, 0xAC, 0xAD, 0xAE, 0x8F, + 0x90, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, + 0xD8, 0xD9, 0x9A, 0x9B, 0x9E, 0x9D, 0x9E, 0x9F, + 0xA0, 0xA1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, + 0xE8, 0xE9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, + 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, + 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, + 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, + 0xC8, 0xC9, 0xCA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF, + 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, + 0xD8, 0xD9, 0xDA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF, + 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, + 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF, + 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, + 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF +}; diff --git a/arch/s390x/kernel/entry.S b/arch/s390x/kernel/entry.S new file mode 100644 index 000000000..c5fbdd61f --- /dev/null +++ b/arch/s390x/kernel/entry.S @@ -0,0 +1,868 @@ +/* + * arch/s390/kernel/entry.S + * S390 low-level entry points. + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Hartmut Penner (hp@de.ibm.com), + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), + */ + +#include +#include +#include +#include +#include +#define ASSEMBLY +#include +#include + + +/* + * stack layout for the system_call stack entry + * Martin please don't modify these back to hard coded values + * You know how bad I'm at mental arithmetic DJB & it gives + * me grief when I modify the pt_regs + */ +SP_PTREGS = STACK_FRAME_OVERHEAD +SP_PSW = SP_PTREGS +SP_R0 = (SP_PSW+PSW_MASK_SIZE+PSW_ADDR_SIZE) +SP_R1 = (SP_R0+GPR_SIZE) +SP_R2 = (SP_R1+GPR_SIZE) +SP_R3 = (SP_R2+GPR_SIZE) +SP_R4 = (SP_R3+GPR_SIZE) +SP_R5 = (SP_R4+GPR_SIZE) +SP_R6 = (SP_R5+GPR_SIZE) +SP_R7 = (SP_R6+GPR_SIZE) +SP_R8 = (SP_R7+GPR_SIZE) +SP_R9 = (SP_R8+GPR_SIZE) +SP_RA = (SP_R9+GPR_SIZE) +SP_RB = (SP_RA+GPR_SIZE) +SP_RC = (SP_RB+GPR_SIZE) +SP_RD = (SP_RC+GPR_SIZE) +SP_RE = (SP_RD+GPR_SIZE) +SP_RF = (SP_RE+GPR_SIZE) +SP_AREGS = (SP_RF+GPR_SIZE) +SP_ORIG_R2 = (SP_AREGS+(NUM_ACRS*ACR_SIZE)) +SP_TRAP = (SP_ORIG_R2+GPR_SIZE) +#if CONFIG_REMOTE_DEBUG +SP_CRREGS = (SP_TRAP+4) +/* fpu registers are saved & restored by the gdb stub itself */ +SP_FPC = (SP_CRREGS+(NUM_CRS*CR_SIZE)) +SP_FPRS = (SP_FPC+FPC_SIZE+FPC_PAD_SIZE) +/* SP_PGM_OLD_ILC etc are not part of pt_regs & they are not + defined in ptrace.h but space is needed for this too */ +SP_PGM_OLD_ILC= (SP_FPRS+(NUM_FPRS*FPR_SIZE)) +#else +SP_PGM_OLD_ILC= (SP_TRAP+4) +#endif +SP_SVC_STEP = (SP_PGM_OLD_ILC+4) +SP_SIZE = (SP_SVC_STEP+4) +/* + * these defines are offsets into the thread_struct + */ +_TSS_PTREGS = 0 +_TSS_FPRS = (_TSS_PTREGS+8) +_TSS_AR2 = (_TSS_FPRS+136) +_TSS_AR4 = (_TSS_AR2+4) +_TSS_KSP = (_TSS_AR4+4) +_TSS_USERSEG = (_TSS_KSP+8) +_TSS_PROT = (_TSS_USERSEG+8) +_TSS_ERROR = (_TSS_PROT+8) +_TSS_TRAP = (_TSS_ERROR+4) +_TSS_PER = (_TSS_TRAP+4) + +/* + * these are offsets into the task-struct. + */ +state = 0 +flags = 8 +sigpending = 16 +need_resched = 32 +tsk_ptrace = 40 +processor = 100 + +/* PSW related defines */ +disable = 0xFC +enable = 0x03 +daton = 0x04 + + +#if 0 +/* some code left lying around in case we need a + * printk for debugging purposes + */ + sysc_printk: .long printk + sysc_msg: .string "<2>r15 %X\n" + .align 4 + +# basr %r13,0 + lg %r0,SP_PSW+8(%r15) + sllg %r0,%r0,1 + chi %r0,0 + jnz sysc_dn + la %r2,sysc_msg-sysc_lit(%r13) + lgr %r3,%r15 + brasl %r14,sysc_printk +sysc_dn: +#endif + +/* + * Register usage in interrupt handlers: + * R9 - pointer to current task structure + * R13 - pointer to literal pool + * R14 - return register for function calls + * R15 - kernel stack pointer + */ + +#define SAVE_ALL(psworg) \ + stmg %r14,%r15,__LC_SAVE_AREA ; \ + stam %a2,%a4,__LC_SAVE_AREA+16 ; \ + tm psworg+1,0x01 ; /* test problem state bit */ \ + jz 0f ; /* skip stack setup save */ \ + lg %r15,__LC_KERNEL_STACK ; /* problem state -> load ksp */ \ + slr %r14,%r14 ; \ + sar %a2,%r14 ; /* set ac.reg. 2 to primary space */ \ + lhi %r14,1 ; \ + sar %a4,%r14 ; /* set access reg. 4 to home space */ \ +0: aghi %r15,-SP_SIZE ; /* make room for registers & psw */ \ + nill %r15,0xfff8 ; /* align stack pointer to 8 */ \ + stmg %r0,%r14,SP_R0(%r15) ; /* store gprs 0-14 to kernel stack */ \ + stg %r2,SP_ORIG_R2(%r15) ; /* store original content of gpr 2 */ \ + mvc SP_RE(16,%r15),__LC_SAVE_AREA ; /* move R15 to stack */ \ + stam %a0,%a15,SP_AREGS(%r15) ; /* store access registers to kst. */\ + mvc SP_AREGS+8(12,%r15),__LC_SAVE_AREA+16 ; /* store ac. regs */ \ + mvc SP_PSW(16,%r15),psworg; /* move user PSW to stack */ \ + lhi %r0,psworg ; /* store trap indication */ \ + st %r0,SP_TRAP(%r15) ; \ + xc 0(8,%r15),0(%r15) ; /* clear back chain */ + +#define RESTORE_ALL \ + mvc __LC_RETURN_PSW(16),SP_PSW(%r15) ; /* move user PSW to lowcore */ \ + lam %a0,%a15,SP_AREGS(%r15) ; /* load the access registers */ \ + lmg %r0,%r15,SP_R0(%r15) ; /* load gprs 0-15 of user */ \ + ni __LC_RETURN_PSW+1,0xfd ; /* clear wait state bit */ \ + lpswe __LC_RETURN_PSW /* back to caller */ + +#define GET_CURRENT /* load pointer to task_struct to R9 */ \ + lghi %r9,-16384 ; \ + ngr %r9,15 + + +/* + * Scheduler resume function, called by switch_to + * grp2 = (thread_struct *) prev->tss + * grp3 = (thread_struct *) next->tss + * Returns: + * gpr2 = prev + */ + .globl resume +resume: + lg %r4,_TSS_PTREGS(%r3) + tm SP_PSW-SP_PTREGS(%r4),0x40 # is the new process using per ? + jz resume_noper # if not we're fine + stctg %r9,%r11,48(%r15) # We are using per stuff + clc _TSS_PER(24,%r3),48(%r15) + je resume_noper # we got away without bashing TLB's + lctlg %c9,%c11,_TSS_PER(%r3) # Nope we didn't +resume_noper: + stmg %r6,%r15,48(%r15) # store resume registers of prev task + stg %r15,_TSS_KSP(%r2) # store kernel stack ptr to prev->tss.ksp + lghi %r0,-16384 + ngr %r0,%r15 + lg %r15,_TSS_KSP(%r3) # load kernel stack ptr from next->tss.ksp + lghi %r1,16383 + ogr %r1,%r15 + aghi %r1,1 + stg %r1,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack + stam %a2,%a2,_TSS_AR2(%r2) # store kernel access reg. 2 + stam %a4,%a4,_TSS_AR4(%r2) # store kernel access reg. 4 + lam %a2,%a2,_TSS_AR2(%r3) # load kernel access reg. 2 + lam %a4,%a4,_TSS_AR4(%r3) # load kernel access reg. 4 + lgr %r2,%r0 # return task_struct of last task + lmg %r6,%r15,48(%r15) # load resume registers of next task + br %r14 + +/* + * SVC interrupt handler routine. System calls are synchronous events and + * are executed with interrupts enabled. + */ + + .globl system_call +system_call: + SAVE_ALL(__LC_SVC_OLD_PSW) + xc SP_SVC_STEP(4,%r15),SP_SVC_STEP(%r15) +pgm_system_call: + larl %r1,sys_call_table + llgc %r8,__LC_SVC_INT_CODE+1 # get svc number from lowcore + stosm 48(%r15),0x03 # reenable interrupts + sll %r8,3 + tm SP_PSW+3(%r15),0x01 # are we running in 31 bit mode ? + jo sysc_noemu + la %r8,4(%r8) # use 31 bit emulation system calls +sysc_noemu: + GET_CURRENT # load pointer to task_struct to R9 + lgf %r8,0(%r8,%r1) # load address of system call routine + tm tsk_ptrace+7(%r9),0x02 # PT_TRACESYS + jnz sysc_tracesys + basr %r14,%r8 # call sys_xxxx + stg %r2,SP_R2(%r15) # store return value (change R2 on stack) + # ATTENTION: check sys_execve_glue before + # changing anything here !! + +sysc_return: + GET_CURRENT # load pointer to task_struct to R9 + tm SP_PSW+1(%r15),0x01 # returning to user ? + jno sysc_leave # no-> skip bottom half, resched & signal +# +# check, if bottom-half has to be done +# + l %r0,__LC_IRQ_STAT # get softirq_active + n %r0,__LC_IRQ_STAT+4 # and it with softirq_mask + jnz sysc_handle_bottom_half +# +# check, if reschedule is needed +# +sysc_return_bh: + lg %r0,need_resched(%r9) # get need_resched from task_struct + ltgr %r0,%r0 + jnz sysc_reschedule + icm %r0,15,sigpending(%r9) # get sigpending from task_struct + jnz sysc_signal_return +sysc_leave: + icm %r0,15,SP_SVC_STEP(%r15) # get sigpending from task_struct + jnz pgm_svcret + stnsm 48(%r15),disable # disable I/O and ext. interrupts + RESTORE_ALL + +# +# call do_signal before return +# +sysc_signal_return: + la %r2,SP_PTREGS(%r15) # load pt_regs + sgr %r3,%r3 # clear *oldset + larl %r14,sysc_leave + jg do_signal # return point is sysc_leave + +# +# call trace before and after sys_call +# +sysc_tracesys: + lghi %r2,-ENOSYS + stg %r2,SP_R2(%r15) # give sysc_trace an -ENOSYS retval + brasl %r14,syscall_trace + lmg %r3,%r6,SP_R3(%r15) + lg %r2,SP_ORIG_R2(%r15) + basr %r14,%r8 # call sys_xxx + stg %r2,SP_R2(%r15) # store return value + larl %r14,sysc_return + jg syscall_trace # return point is sysc_return + + +# +# call do_softirq and return from syscall, if interrupt-level +# is zero +# +sysc_handle_bottom_half: + larl %r14,sysc_return_bh + jg do_softirq # return point is sysc_return_bh + +# +# call schedule with sysc_return as return-address +# +sysc_reschedule: + larl %r14,sysc_return + jg schedule # return point is sysc_return + +# +# a new process exits the kernel with ret_from_fork +# + .globl ret_from_fork +ret_from_fork: + GET_CURRENT # load pointer to task_struct to R9 + stosm 48(%r15),0x03 # reenable interrupts + xc SP_R2(8,%r15),SP_R2(%r15) # child returns 0 +#ifdef CONFIG_SMP + larl %r14,sysc_return + jg schedule_tail # return to sysc_return +#else + j sysc_return +#endif + +# +# clone, fork, vfork, exec and sigreturn need glue, +# because they all expect pt_regs as parameter, +# but are called with different parameter. +# return-address is set up above +# +sys_clone_glue: + la %r2,SP_PTREGS(%r15) # load pt_regs + jg sys_clone # branch to sys_clone + +sys_fork_glue: + la %r2,SP_PTREGS(%r15) # load pt_regs + jg sys_fork # branch to sys_fork + +sys_vfork_glue: + la %r2,SP_PTREGS(%r15) # load pt_regs + jg sys_vfork # branch to sys_vfork + +sys_execve_glue: + la %r2,SP_PTREGS(%r15) # load pt_regs + lgr %r12,%r14 # save return address + brasl %r14,sys_execve # call sys_execve + ltgr %r2,%r2 # check if execve failed + bnz 0(%r12) # it did fail -> store result in gpr2 + b 6(%r12) # SKIP STG 2,SP_R2(15) in + # system_call/sysc_tracesys +#ifdef CONFIG_S390_SUPPORT +sys32_execve_glue: + la %r2,SP_PTREGS(%r15) # load pt_regs + lgr %r12,%r14 # save return address + brasl %r14,sys32_execve # call sys32_execve + ltgr %r2,%r2 # check if execve failed + bnz 0(%r12) # it did fail -> store result in gpr2 + b 6(%r12) # SKIP STG 2,SP_R2(15) in + # system_call/sysc_tracesys +#endif + +sys_sigreturn_glue: + la %r2,SP_PTREGS(%r15) # load pt_regs as parameter + jg sys_sigreturn # branch to sys_sigreturn + +#ifdef CONFIG_S390_SUPPORT +sys32_sigreturn_glue: + la %r2,SP_PTREGS(%r15) # load pt_regs as parameter + jg sys32_sigreturn # branch to sys32_sigreturn +#endif + +sys_rt_sigreturn_glue: + la %r2,SP_PTREGS(%r15) # load pt_regs as parameter + jg sys_rt_sigreturn # branch to sys_sigreturn + +#ifdef CONFIG_S390_SUPPORT +sys32_rt_sigreturn_glue: + la %r2,SP_PTREGS(%r15) # load pt_regs as parameter + jg sys32_rt_sigreturn # branch to sys32_sigreturn +#endif + +# +# sigsuspend and rt_sigsuspend need pt_regs as an additional +# parameter and they have to skip the store of %r2 into the +# user register %r2 because the return value was set in +# sigsuspend and rt_sigsuspend already and must not be overwritten! +# + +sys_sigsuspend_glue: + lgr %r5,%r4 # move mask back + lgr %r4,%r3 # move history1 parameter + lgr %r3,%r2 # move history0 parameter + la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter + la %r14,6(%r14) # skip store of return value + jg sys_sigsuspend # branch to sys_sigsuspend + +#ifdef CONFIG_S390_SUPPORT +sys32_sigsuspend_glue: + llgfr %r4,%r4 # unsigned long + lgr %r5,%r4 # move mask back + lgfr %r3,%r3 # int + lgr %r4,%r3 # move history1 parameter + lgfr %r2,%r2 # int + lgr %r3,%r2 # move history0 parameter + la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter + la %r14,6(%r14) # skip store of return value + jg sys32_sigsuspend # branch to sys32_sigsuspend +#endif + +sys_rt_sigsuspend_glue: + lgr %r4,%r3 # move sigsetsize parameter + lgr %r3,%r2 # move unewset parameter + la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter + la %r14,6(%r14) # skip store of return value + jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend + +#ifdef CONFIG_S390_SUPPORT +sys32_rt_sigsuspend_glue: + llgfr %r3,%r3 # size_t + lgr %r4,%r3 # move sigsetsize parameter + llgtr %r2,%r2 # sigset_emu31_t * + lgr %r3,%r2 # move unewset parameter + la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter + la %r14,6(%r14) # skip store of return value + jg sys32_rt_sigsuspend # branch to sys32_rt_sigsuspend +#endif + +sys_sigaltstack_glue: + la %r4,SP_PTREGS(%r15) # load pt_regs as parameter + jg sys_sigaltstack # branch to sys_sigreturn + +#ifdef CONFIG_S390_SUPPORT +sys32_sigaltstack_glue: + la %r4,SP_PTREGS(%r15) # load pt_regs as parameter + jg sys32_sigaltstack_wrapper # branch to sys_sigreturn +#endif + +#ifdef CONFIG_S390_SUPPORT +#define SYSCALL(esame,esa) esame,esa +#else +#define SYSCALL(esame,esa) esame,sys_ni_syscall +#endif + + .globl sys_call_table +sys_call_table: + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 0 */ + .long SYSCALL(sys_exit,sys32_exit_wrapper) + .long SYSCALL(sys_fork_glue,sys_fork_glue) + .long SYSCALL(sys_read,sys32_read_wrapper) + .long SYSCALL(sys_write,sys32_write_wrapper) + .long SYSCALL(sys_open,sys32_open_wrapper) /* 5 */ + .long SYSCALL(sys_close,sys32_close_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old waitpid syscall */ + .long SYSCALL(sys_creat,sys32_creat_wrapper) + .long SYSCALL(sys_link,sys32_link_wrapper) + .long SYSCALL(sys_unlink,sys32_unlink_wrapper) /* 10 */ + .long SYSCALL(sys_execve_glue,sys32_execve_glue) + .long SYSCALL(sys_chdir,sys32_chdir_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_time_wrapper) /* old time syscall */ + .long SYSCALL(sys_mknod,sys32_mknod_wrapper) + .long SYSCALL(sys_chmod,sys32_chmod_wrapper) /* 15 */ + .long SYSCALL(sys_ni_syscall,sys32_lchown16_wrapper) /* old lchown16 syscall*/ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old break syscall */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old stat syscall */ + .long SYSCALL(sys_lseek,sys32_lseek_wrapper) + .long SYSCALL(sys_getpid,sys_getpid) /* 20 */ + .long SYSCALL(sys_mount,sys32_mount_wrapper) + .long SYSCALL(sys_oldumount,sys32_oldumount_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_setuid16_wrapper) /* old setuid16 syscall*/ + .long SYSCALL(sys_ni_syscall,sys32_getuid16) /* old getuid16 syscall*/ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 25 old stime syscall */ + .long SYSCALL(sys_ptrace,sys32_ptrace_wrapper) + .long SYSCALL(sys_alarm,sys32_alarm_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old fstat syscall */ + .long SYSCALL(sys_pause,sys32_pause) + .long SYSCALL(sys_utime,sys32_utime_wrapper) /* 30 */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old stty syscall */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old gtty syscall */ + .long SYSCALL(sys_access,sys32_access_wrapper) + .long SYSCALL(sys_nice,sys32_nice_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old ftime syscall */ + .long SYSCALL(sys_sync,sys_sync) + .long SYSCALL(sys_kill,sys32_kill_wrapper) + .long SYSCALL(sys_rename,sys32_rename_wrapper) + .long SYSCALL(sys_mkdir,sys32_mkdir_wrapper) + .long SYSCALL(sys_rmdir,sys32_rmdir_wrapper) /* 40 */ + .long SYSCALL(sys_dup,sys32_dup_wrapper) + .long SYSCALL(sys_pipe,sys32_pipe_wrapper) + .long SYSCALL(sys_times,sys32_times_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old prof syscall */ + .long SYSCALL(sys_brk,sys32_brk_wrapper) /* 45 */ + .long SYSCALL(sys_ni_syscall,sys32_setgid16) /* old setgid16 syscall*/ + .long SYSCALL(sys_ni_syscall,sys32_getgid16) /* old getgid16 syscall*/ + .long SYSCALL(sys_signal,sys32_signal_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_geteuid16) /* old geteuid16 syscall */ + .long SYSCALL(sys_ni_syscall,sys32_getegid16) /* old getegid16 syscall */ + .long SYSCALL(sys_acct,sys32_acct_wrapper) + .long SYSCALL(sys_umount,sys32_umount_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old lock syscall */ + .long SYSCALL(sys_ioctl,sys32_ioctl_wrapper) + .long SYSCALL(sys_fcntl,sys32_fcntl_wrapper) /* 55 */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* intel mpx syscall */ + .long SYSCALL(sys_setpgid,sys32_setpgid_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old ulimit syscall */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old uname syscall */ + .long SYSCALL(sys_umask,sys32_umask_wrapper) /* 60 */ + .long SYSCALL(sys_chroot,sys32_chroot_wrapper) + .long SYSCALL(sys_ustat,sys32_ustat_wrapper) + .long SYSCALL(sys_dup2,sys32_dup2_wrapper) + .long SYSCALL(sys_getppid,sys_getppid) + .long SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */ + .long SYSCALL(sys_setsid,sys_setsid) + .long SYSCALL(sys_sigaction,sys32_sigaction_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old sgetmask syscall*/ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old ssetmask syscall*/ + .long SYSCALL(sys_ni_syscall,sys32_setreuid16_wrapper) /* old setreuid16 syscall */ + .long SYSCALL(sys_ni_syscall,sys32_setregid16_wrapper) /* old setregid16 syscall */ + .long SYSCALL(sys_sigsuspend_glue,sys32_sigsuspend_glue) + .long SYSCALL(sys_sigpending,sys32_sigpending_wrapper) + .long SYSCALL(sys_sethostname,sys32_sethostname_wrapper) + .long SYSCALL(sys_setrlimit,sys32_setrlimit_wrapper) /* 75 */ + .long SYSCALL(sys_getrlimit,sys32_old_getrlimit_wrapper) + .long SYSCALL(sys_getrusage,sys32_getrusage_wrapper) + .long SYSCALL(sys_gettimeofday,sys32_gettimeofday_wrapper) + .long SYSCALL(sys_settimeofday,sys32_settimeofday_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_getgroups16_wrapper) /* old getgroups16 syscall */ + .long SYSCALL(sys_ni_syscall,sys32_setgroups16_wrapper) /* old setgroups16 syscall */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old select syscall */ + .long SYSCALL(sys_symlink,sys32_symlink_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old lstat syscall */ + .long SYSCALL(sys_readlink,sys32_readlink_wrapper) /* 85 */ + .long SYSCALL(sys_uselib,sys32_uselib_wrapper) + .long SYSCALL(sys_swapon,sys32_swapon_wrapper) + .long SYSCALL(sys_reboot,sys32_reboot_wrapper) + .long SYSCALL(sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */ + .long SYSCALL(old_mmap,old32_mmap_wrapper) /* 90 */ + .long SYSCALL(sys_munmap,sys32_munmap_wrapper) + .long SYSCALL(sys_truncate,sys32_truncate_wrapper) + .long SYSCALL(sys_ftruncate,sys32_ftruncate_wrapper) + .long SYSCALL(sys_fchmod,sys32_fchmod_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_fchown16_wrapper) /* old fchown16 syscall*/ + .long SYSCALL(sys_getpriority,sys32_getpriority_wrapper) + .long SYSCALL(sys_setpriority,sys32_setpriority_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old profil syscall */ + .long SYSCALL(sys_statfs,sys32_statfs_wrapper) + .long SYSCALL(sys_fstatfs,sys32_fstatfs_wrapper) /* 100 */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) + .long SYSCALL(sys_socketcall,sys32_socketcall_wrapper) + .long SYSCALL(sys_syslog,sys32_syslog_wrapper) + .long SYSCALL(sys_setitimer,sys32_setitimer_wrapper) + .long SYSCALL(sys_getitimer,sys32_getitimer_wrapper) /* 105 */ + .long SYSCALL(sys_newstat,sys32_newstat_wrapper) + .long SYSCALL(sys_newlstat,sys32_newlstat_wrapper) + .long SYSCALL(sys_newfstat,sys32_newfstat_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old uname syscall */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* iopl for i386 */ + .long SYSCALL(sys_vhangup,sys_vhangup) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old "idle" system call */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* vm86old for i386 */ + .long SYSCALL(sys_wait4,sys32_wait4_wrapper) + .long SYSCALL(sys_swapoff,sys32_swapoff_wrapper) /* 115 */ + .long SYSCALL(sys_sysinfo,sys32_sysinfo_wrapper) + .long SYSCALL(sys_ipc,sys32_ipc_wrapper) + .long SYSCALL(sys_fsync,sys32_fsync_wrapper) + .long SYSCALL(sys_sigreturn_glue,sys32_sigreturn_glue) + .long SYSCALL(sys_clone_glue,sys_clone_glue) /* 120 */ + .long SYSCALL(sys_setdomainname,sys32_setdomainname_wrapper) + .long SYSCALL(sys_newuname,sys32_newuname_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* modify_ldt for i386 */ + .long SYSCALL(sys_adjtimex,sys32_adjtimex_wrapper) + .long SYSCALL(sys_mprotect,sys32_mprotect_wrapper) /* 125 */ + .long SYSCALL(sys_sigprocmask,sys32_sigprocmask_wrapper) + .long SYSCALL(sys_create_module,sys32_create_module_wrapper) + .long SYSCALL(sys_init_module,sys32_init_module_wrapper) + .long SYSCALL(sys_delete_module,sys32_delete_module_wrapper) + .long SYSCALL(sys_get_kernel_syms,sys32_get_kernel_syms_wrapper) /* 130 */ + .long SYSCALL(sys_quotactl,sys32_quotactl_wrapper) + .long SYSCALL(sys_getpgid,sys32_getpgid_wrapper) + .long SYSCALL(sys_fchdir,sys32_fchdir_wrapper) + .long SYSCALL(sys_bdflush,sys32_bdflush_wrapper) + .long SYSCALL(sys_sysfs,sys32_sysfs_wrapper) /* 135 */ + .long SYSCALL(sys_personality,sys32_personality_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* for afs_syscall */ + .long SYSCALL(sys_ni_syscall,sys32_setfsuid16_wrapper) /* old setfsuid16 syscall */ + .long SYSCALL(sys_ni_syscall,sys32_setfsgid16_wrapper) /* old setfsgid16 syscall */ + .long SYSCALL(sys_llseek,sys32_llseek_wrapper) /* 140 */ + .long SYSCALL(sys_getdents,sys32_getdents_wrapper) + .long SYSCALL(sys_select,sys32_select_wrapper) + .long SYSCALL(sys_flock,sys32_flock_wrapper) + .long SYSCALL(sys_msync,sys32_msync_wrapper) + .long SYSCALL(sys_readv,sys32_readv_wrapper) /* 145 */ + .long SYSCALL(sys_writev,sys32_writev_wrapper) + .long SYSCALL(sys_getsid,sys32_getsid_wrapper) + .long SYSCALL(sys_fdatasync,sys32_fdatasync_wrapper) + .long SYSCALL(sys_sysctl,sys_ni_syscall) + .long SYSCALL(sys_mlock,sys32_mlock_wrapper) /* 150 */ + .long SYSCALL(sys_munlock,sys32_munlock_wrapper) + .long SYSCALL(sys_mlockall,sys32_mlockall_wrapper) + .long SYSCALL(sys_munlockall,sys_munlockall) + .long SYSCALL(sys_sched_setparam,sys32_sched_setparam_wrapper) + .long SYSCALL(sys_sched_getparam,sys32_sched_getparam_wrapper) /* 155 */ + .long SYSCALL(sys_sched_setscheduler,sys32_sched_setscheduler_wrapper) + .long SYSCALL(sys_sched_getscheduler,sys32_sched_getscheduler_wrapper) + .long SYSCALL(sys_sched_yield,sys_sched_yield) + .long SYSCALL(sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper) + .long SYSCALL(sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper) + .long SYSCALL(sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper) + .long SYSCALL(sys_nanosleep,sys32_nanosleep_wrapper) + .long SYSCALL(sys_mremap,sys32_mremap_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_setresuid16_wrapper) /* old setresuid16 syscall */ + .long SYSCALL(sys_ni_syscall,sys32_getresuid16_wrapper) /* old getresuid16 syscall */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* for vm86 */ + .long SYSCALL(sys_query_module,sys32_query_module_wrapper) + .long SYSCALL(sys_poll,sys32_poll_wrapper) + .long SYSCALL(sys_nfsservctl,sys32_nfsservctl_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_setresgid16_wrapper) /* old setresgid16 syscall */ + .long SYSCALL(sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ + .long SYSCALL(sys_prctl,sys32_prctl_wrapper) + .long SYSCALL(sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue) + .long SYSCALL(sys_rt_sigaction,sys32_rt_sigaction_wrapper) + .long SYSCALL(sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ + .long SYSCALL(sys_rt_sigpending,sys32_rt_sigpending_wrapper) + .long SYSCALL(sys_rt_sigtimedwait,sys32_rt_sigtimedwait_wrapper) + .long SYSCALL(sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper) + .long SYSCALL(sys_rt_sigsuspend_glue,sys32_rt_sigsuspend_glue) + .long SYSCALL(sys_pread,sys32_pread_wrapper) /* 180 */ + .long SYSCALL(sys_pwrite,sys32_pwrite_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall */ + .long SYSCALL(sys_getcwd,sys32_getcwd_wrapper) + .long SYSCALL(sys_capget,sys32_capget_wrapper) + .long SYSCALL(sys_capset,sys32_capset_wrapper) /* 185 */ + .long SYSCALL(sys_sigaltstack_glue,sys32_sigaltstack_glue) + .long SYSCALL(sys_sendfile,sys32_sendfile_wrapper) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* streams1 */ + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* streams2 */ + .long SYSCALL(sys_vfork_glue,sys_vfork_glue) /* 190 */ + .long SYSCALL(sys_getrlimit,sys32_old_getrlimit_wrapper) + .long SYSCALL(sys_mmap2,sys32_mmap2_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_truncate64_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_ftruncate64_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_stat64) /* 195 */ + .long SYSCALL(sys_ni_syscall,sys32_lstat64) + .long SYSCALL(sys_ni_syscall,sys32_fstat64) + .long SYSCALL(sys_lchown,sys32_lchown_wrapper) + .long SYSCALL(sys_getuid,sys_getuid) + .long SYSCALL(sys_getgid,sys_getgid) /* 200 */ + .long SYSCALL(sys_geteuid,sys_geteuid) + .long SYSCALL(sys_getegid,sys_getegid) + .long SYSCALL(sys_setreuid,sys32_setreuid_wrapper) + .long SYSCALL(sys_setregid,sys32_setregid_wrapper) + .long SYSCALL(sys_getgroups,sys32_getgroups_wrapper) /* 205 */ + .long SYSCALL(sys_setgroups,sys32_setgroups_wrapper) + .long SYSCALL(sys_fchown,sys32_fchown_wrapper) + .long SYSCALL(sys_setresuid,sys32_setresuid_wrapper) + .long SYSCALL(sys_getresuid,sys32_getresuid_wrapper) + .long SYSCALL(sys_setresgid,sys32_setresgid_wrapper) /* 210 */ + .long SYSCALL(sys_getresgid,sys32_getresgid_wrapper) + .long SYSCALL(sys_chown,sys32_chown_wrapper) + .long SYSCALL(sys_setuid,sys32_setuid_wrapper) + .long SYSCALL(sys_setgid,sys32_setgid_wrapper) + .long SYSCALL(sys_setfsuid,sys32_setfsuid_wrapper) /* 215 */ + .long SYSCALL(sys_setfsgid,sys32_setfsgid_wrapper) + .long SYSCALL(sys_pivot_root,sys32_pivot_root_wrapper) + .long SYSCALL(sys_mincore,sys32_mincore_wrapper) + .long SYSCALL(sys_madvise,sys32_madvise_wrapper) + .long SYSCALL(sys_ni_syscall,sys32_getdents64_wrapper)/* 220 */ + .long SYSCALL(sys_ni_syscall,sys32_fcntl64_wrapper) + .rept 255-221 + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) + .endr + +/* + * Program check handler routine + */ + + .globl pgm_check_handler +pgm_check_handler: +/* + * First we need to check for a special case: + * Single stepping an instruction that disables the PER event mask will + * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. + * For a single stepped SVC the program check handler gets control after + * the SVC new PSW has been loaded. But we want to execute the SVC first and + * then handle the PER event. Therefore we update the SVC old PSW to point + * to the pgm_check_handler and branch to the SVC handler after we checked + * if we have to load the kernel stack register. + * For every other possible cause for PER event without the PER mask set + * we just ignore the PER event (FIXME: is there anything we have to do + * for LPSW?). + */ + stmg %r14,%r15,__LC_SAVE_AREA + stam %a2,%a4,__LC_SAVE_AREA+16 + tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception + jz pgm_sv # skip if not + tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on + jnz pgm_sv # skip if it is +# ok its one of the special cases, now we need to find out which one + clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW + je pgm_svcper +# no interesting special case, ignore PER event + lm %r13,%r15,__LC_SAVE_AREA + lpsw __LC_PGM_OLD_PSW +# it was a single stepped SVC that is causing all the trouble +pgm_svcper: + tm __LC_SVC_OLD_PSW+1,0x01 # test problem state bit + jz 0f # skip stack & access regs setup + lg %r15,__LC_KERNEL_STACK # problem state -> load ksp + slr %r14,%r14 + sar %a2,%r14 # set ac.reg. 2 to primary space + lhi %r14,1 + sar %a4,%r14 # and access reg. 4 to home space +0: aghi %r15,-SP_SIZE # make room for registers & psw + nill %r15,0xfff8 # align stack pointer to 8 + stmg %r0,%r14,SP_R0(%r15) # store gprs 0-14 to kernel stack + stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 + mvc SP_RE(16,%r15),__LC_SAVE_AREA # move R14-R15 to stack + stam %a0,%a15,SP_AREGS(%r15) # store access registers to kst. + mvc SP_AREGS+8(12,%r15),__LC_SAVE_AREA+16 # store ac. regs + mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW # move user PSW to stack + lhi %r0,__LC_SVC_OLD_PSW # store trap indication + st %r0,SP_TRAP(%r15) + xc 0(8,%r15),0(%r15) # clear back chain + + mvi SP_SVC_STEP(%r15),1 # make SP_SVC_STEP nonzero + mvc SP_PGM_OLD_ILC(4,%r15),__LC_PGM_ILC # save program check information + j pgm_system_call # now do the svc +pgm_svcret: + lhi %r0,__LC_PGM_OLD_PSW # set trap indication back to pgm_chk + st %r0,SP_TRAP(%r15) + llgh %r7,SP_PGM_OLD_ILC(%r15) # get ilc from stack + xc SP_SVC_STEP(4,%r15),SP_SVC_STEP(%r15) + j pgm_no_sv +pgm_sv: + tm __LC_PGM_OLD_PSW+1,0x01 # test problem state bit + jz 1f # skip stack setup save + lg %r15,__LC_KERNEL_STACK # problem state -> load ksp + slr %r14,%r14 + sar %a2,%r14 # set ac.reg. 2 to primary space + lhi %r14,1 + sar %a4,%r14 # set access reg. 4 to home space +1: aghi %r15,-SP_SIZE # make room for registers & psw + nill %r15,0xfff8 # align stack pointer to 8 + stmg %r0,%r14,SP_R0(%r15) # store gprs 0-14 to kernel stack + stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 + mvc SP_RE(16,%r15),__LC_SAVE_AREA # move R14-R15 to stack + stam %a0,%a15,SP_AREGS(%r15) # store access registers to kst. + mvc SP_AREGS+8(12,%r15),__LC_SAVE_AREA+16 # store ac. regs + mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW # move user PSW to stack + lhi %r0,__LC_PGM_OLD_PSW # store trap indication + st %r0,SP_TRAP(%r15) + xc 0(8,%r15),0(%r15) # clear back chain + xc SP_SVC_STEP(4,%r15),SP_SVC_STEP(%r15) + llgh %r7,__LC_PGM_ILC # load instruction length +pgm_no_sv: + llgh %r8,__LC_PGM_INT_CODE # N.B. saved int code used later KEEP it + stosm 48(%r15),0x03 # reenable interrupts + lghi %r3,0x7f + nr %r3,%r8 # clear per-event-bit & move to r3 + je pgm_dn # none of Martins exceptions occured bypass + sll %r3,3 + larl %r9,pgm_check_table + lg %r9,0(%r3,%r9) # load address of handler routine + srl %r3,3 + la %r2,SP_PTREGS(%r15) # address of register-save area + chi %r3,0x4 # protection-exception ? + jne pgm_go # if not, + lg %r5,SP_PSW+8(15) # load psw addr + slgr %r5,%r7 # substract ilc from psw + stg %r5,SP_PSW+8(15) # store corrected psw addr +pgm_go: basr %r14,%r9 # branch to interrupt-handler +pgm_dn: nill %r8,0x80 # check for per exception + je sysc_return + la %r2,SP_PTREGS(15) # address of register-save area + larl %r14,sysc_return # load adr. of system return + jg handle_per_exception + +/* + * IO interrupt handler routine + */ + .globl io_int_handler +io_int_handler: + SAVE_ALL(__LC_IO_OLD_PSW) + la %r2,SP_PTREGS(%r15) # address of register-save area + llgh %r3,__LC_SUBCHANNEL_NR # load subchannel number + llgf %r4,__LC_IO_INT_PARM # load interruption parm + llgf %r5,__LC_IO_INT_WORD # load interruption word + brasl %r14,do_IRQ # call standard irq handler + +io_return: + GET_CURRENT # load pointer to task_struct to R9 + tm SP_PSW+1(%r15),0x01 # returning to user ? + jz io_leave # no-> skip resched & signal + stosm 48(%r15),0x03 # reenable interrupts +# +# check, if bottom-half has to be done +# + l %r0,__LC_IRQ_STAT # get softirq_active + n %r0,__LC_IRQ_STAT+4 # and it with softirq_mask + jnz io_handle_bottom_half +io_return_bh: +# +# check, if reschedule is needed +# + lg %r0,need_resched(%r9) # get need_resched from task_struct + ltgr %r0,%r0 + jnz io_reschedule + icm %r0,15,sigpending(%r9) # get sigpending from task_struct + jnz io_signal_return +io_leave: + stnsm 48(%r15),disable # disable I/O and ext. interrupts + RESTORE_ALL + +# +# call do_softirq and return from syscall, if interrupt-level +# is zero +# +io_handle_bottom_half: + larl %r14,io_return_bh + jg do_softirq # return point is io_return_bh + +# +# call schedule with io_return as return-address +# +io_reschedule: + larl %r14,io_return + jg schedule # call scheduler, return to io_return + +# +# call do_signal before return +# +io_signal_return: + la %r2,SP_PTREGS(%r15) # load pt_regs + slgr %r3,%r3 # clear *oldset + larl %r14,io_leave + jg do_signal # return point is io_leave + +/* + * External interrupt handler routine + */ + .globl ext_int_handler +ext_int_handler: + SAVE_ALL(__LC_EXT_OLD_PSW) + la %r2,SP_PTREGS(%r15) # address of register-save area + llgh %r3,__LC_EXT_INT_CODE # error code + lgr %r1,%r3 # calculate index = code & 0xff + nill %r1,0xff + sll %r1,3 + larl %r9,ext_int_hash + lg %r9,0(%r1,%r9) # get first list entry for hash value + ltgr %r9,%r9 # == NULL ? + jz io_return # yes, nothing to do, exit +ext_int_loop: + ch %r3,16(%r9) # compare external interrupt code + je ext_int_found + lg %r9,0(%r9) # next list entry + ltgr %r9,%r9 + jnz ext_int_loop + j io_return +ext_int_found: + lg %r9,8(%r9) # get handler address + larl %r14,io_return + br %r9 # branch to ext call handler + +/* + * Machine check handler routines + */ + .globl mcck_int_handler +mcck_int_handler: + SAVE_ALL(__LC_MCK_OLD_PSW) + brasl %r14,s390_do_machine_check +mcck_return: + RESTORE_ALL + +#ifdef CONFIG_SMP +/* + * Restart interruption handler, kick starter for additional CPUs + */ + .globl restart_int_handler +restart_int_handler: + lg %r15,__LC_KERNEL_STACK # load ksp + lctlg %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs + lam %a0,%a15,__LC_AREGS_SAVE_AREA + stosm 0(%r15),daton # now we can turn dat on + lmg %r6,%r15,48(%r15) # load registers from clone + jg start_secondary +#else +/* + * If we do not run with SMP enabled, let the new CPU crash ... + */ + .globl restart_int_handler +restart_int_handler: + basr %r1,0 +restart_base: + lpswe restart_crash-restart_base(%r1) + .align 8 +restart_crash: + .long 0x000a0000,0x00000000,0x00000000,0x00000000 +restart_go: +#endif + diff --git a/arch/s390x/kernel/exec32.c b/arch/s390x/kernel/exec32.c new file mode 100644 index 000000000..3e6f44558 --- /dev/null +++ b/arch/s390x/kernel/exec32.c @@ -0,0 +1,85 @@ +/* + * Support for 32-bit Linux for S390 ELF binaries. + * + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Gerhard Tonn (ton@de.ibm.com) + * + * Seperated from binfmt_elf32.c to reduce exports for module enablement. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define __NO_VERSION__ +#include + +#include +#include +#include + +#ifdef CONFIG_KMOD +#include +#endif + + +extern void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address); + +#undef STACK_TOP +#define STACK_TOP TASK31_SIZE + +int setup_arg_pages32(struct linux_binprm *bprm) +{ + unsigned long stack_base; + struct vm_area_struct *mpnt; + int i; + + stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE; + + bprm->p += stack_base; + if (bprm->loader) + bprm->loader += stack_base; + bprm->exec += stack_base; + + mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + if (!mpnt) + return -ENOMEM; + + down(¤t->mm->mmap_sem); + { + mpnt->vm_mm = current->mm; + mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p; + mpnt->vm_end = STACK_TOP; + mpnt->vm_page_prot = PAGE_COPY; + mpnt->vm_flags = VM_STACK_FLAGS; + mpnt->vm_ops = NULL; + mpnt->vm_pgoff = 0; + mpnt->vm_file = NULL; + mpnt->vm_private_data = (void *) 0; + insert_vm_struct(current->mm, mpnt); + current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; + } + + for (i = 0 ; i < MAX_ARG_PAGES ; i++) { + struct page *page = bprm->page[i]; + if (page) { + bprm->page[i] = NULL; + current->mm->rss++; + put_dirty_page(current,page,stack_base); + } + stack_base += PAGE_SIZE; + } + up(¤t->mm->mmap_sem); + + return 0; +} + diff --git a/arch/s390x/kernel/gdb-stub.c b/arch/s390x/kernel/gdb-stub.c new file mode 100644 index 000000000..06e3adbb0 --- /dev/null +++ b/arch/s390x/kernel/gdb-stub.c @@ -0,0 +1,575 @@ +/* + * arch/s390/kernel/gdb-stub.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), + * + * Originally written by Glenn Engel, Lake Stevens Instrument Division + * + * Contributed by HP Systems + * + * Modified for SPARC by Stu Grossman, Cygnus Support. + * + * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse + * Send complaints, suggestions etc. to + * + * Copyright (C) 1995 Andreas Busse + */ + +/* + * To enable debugger support, two things need to happen. One, a + * call to set_debug_traps() is necessary in order to allow any breakpoints + * or error conditions to be properly intercepted and reported to gdb. + * Two, a breakpoint needs to be generated to begin communication. This + * is most easily accomplished by a call to breakpoint(). Breakpoint() + * simulates a breakpoint by executing a BREAK instruction. + * + * + * The following gdb commands are supported: + * + * command function Return value + * + * g return the value of the CPU registers hex data or ENN + * G set the value of the CPU registers OK or ENN + * + * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN + * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN + * + * c Resume at current address SNN ( signal NN) + * cAA..AA Continue at address AA..AA SNN + * + * s Step one instruction SNN + * sAA..AA Step one instruction from AA..AA SNN + * + * k kill + * + * ? What was the last sigval ? SNN (signal NN) + * + * + * All commands and responses are sent with a packet which includes a + * checksum. A packet consists of + * + * $#. + * + * where + * :: + * :: < two hex digits computed as modulo 256 sum of > + * + * When a packet is received, it is first acknowledged with either '+' or '-'. + * '+' indicates a successful transfer. '-' indicates a failed transfer. + * + * Example: + * + * Host: Reply: + * $m0,10#2a +$00010203040506070809101112131415#42 + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * external low-level support routines + */ + +extern int putDebugChar(char c); /* write a single character */ +extern char getDebugChar(void); /* read and return a single char */ +extern void fltr_set_mem_err(void); +extern void trap_low(void); + +/* + * breakpoint and test functions + */ +extern void breakpoint(void); +extern void breakinst(void); + +/* + * local prototypes + */ + +static void getpacket(char *buffer); +static void putpacket(char *buffer); +static int hex(unsigned char ch); +static int hexToInt(char **ptr, int *intValue); +static unsigned char *mem2hex(char *mem, char *buf, int count, int may_fault); + + +/* + * BUFMAX defines the maximum number of characters in inbound/outbound buffers + * at least NUMREGBYTES*2 are needed for register packets + */ +#define BUFMAX 2048 + +static char input_buffer[BUFMAX]; +static char output_buffer[BUFMAX]; +int gdb_stub_initialised = FALSE; +static const char hexchars[]="0123456789abcdef"; + + +/* + * Convert ch from a hex digit to an int + */ +static int hex(unsigned char ch) +{ + if (ch >= 'a' && ch <= 'f') + return ch-'a'+10; + if (ch >= '0' && ch <= '9') + return ch-'0'; + if (ch >= 'A' && ch <= 'F') + return ch-'A'+10; + return -1; +} + +/* + * scan for the sequence $# + */ +static void getpacket(char *buffer) +{ + unsigned char checksum; + unsigned char xmitcsum; + int i; + int count; + unsigned char ch; + + do { + /* + * wait around for the start character, + * ignore all other characters + */ + while ((ch = (getDebugChar() & 0x7f)) != '$') ; + + checksum = 0; + xmitcsum = -1; + count = 0; + + /* + * now, read until a # or end of buffer is found + */ + while (count < BUFMAX) { + ch = getDebugChar() & 0x7f; + if (ch == '#') + break; + checksum = checksum + ch; + buffer[count] = ch; + count = count + 1; + } + + if (count >= BUFMAX) + continue; + + buffer[count] = 0; + + if (ch == '#') { + xmitcsum = hex(getDebugChar() & 0x7f) << 4; + xmitcsum |= hex(getDebugChar() & 0x7f); + + if (checksum != xmitcsum) + putDebugChar('-'); /* failed checksum */ + else { + putDebugChar('+'); /* successful transfer */ + + /* + * if a sequence char is present, + * reply the sequence ID + */ + if (buffer[2] == ':') { + putDebugChar(buffer[0]); + putDebugChar(buffer[1]); + + /* + * remove sequence chars from buffer + */ + count = strlen(buffer); + for (i=3; i <= count; i++) + buffer[i-3] = buffer[i]; + } + } + } + } + while (checksum != xmitcsum); +} + +/* + * send the packet in buffer. + */ +static void putpacket(char *buffer) +{ + unsigned char checksum; + int count; + unsigned char ch; + + /* + * $#. + */ + + do { + putDebugChar('$'); + checksum = 0; + count = 0; + + while ((ch = buffer[count]) != 0) { + if (!(putDebugChar(ch))) + return; + checksum += ch; + count += 1; + } + + putDebugChar('#'); + putDebugChar(hexchars[checksum >> 4]); + putDebugChar(hexchars[checksum & 0xf]); + + } + while ((getDebugChar() & 0x7f) != '+'); +} + + + +/* + * Convert the memory pointed to by mem into hex, placing result in buf. + * Return a pointer to the last char put in buf (null), in case of mem fault, + * return 0. + * If MAY_FAULT is non-zero, then we will handle memory faults by returning + * a 0, else treat a fault like any other fault in the stub. + */ +static unsigned char *mem2hex(char *mem, char *buf, int count, int may_fault) +{ + unsigned char ch; + +/* set_mem_fault_trap(may_fault); */ + + while (count-- > 0) { + ch = *(mem++); + if (mem_err) + return 0; + *buf++ = hexchars[ch >> 4]; + *buf++ = hexchars[ch & 0xf]; + } + + *buf = 0; + +/* set_mem_fault_trap(0); */ + + return buf; +} + +/* + * convert the hex array pointed to by buf into binary to be placed in mem + * return a pointer to the character AFTER the last byte written + */ +static char *hex2mem(char *buf, char *mem, int count, int may_fault) +{ + int i; + unsigned char ch; + +/* set_mem_fault_trap(may_fault); */ + + for (i=0; ifp_regs; + int has_ieee=save_fp_regs1(fpregs); + + if(!has_ieee) + { + fpregs->fpc=0; + fpregs->fprs[1].d= + fpregs->fprs[3].d= + fpregs->fprs[5].d= + fpregs->fprs[7].d=0; + memset(&fpregs->fprs[8].d,0,sizeof(freg_t)*8); + } +} + +void gdb_stub_set_non_pt_regs(gdb_pt_regs *regs) +{ + restore_fp_regs1(®s->fp_regs); +} + +void gdb_stub_send_signal(int sigval) +{ + char *ptr; + ptr = output_buffer; + + /* + * Send trap type (converted to signal) + */ + *ptr++ = 'S'; + *ptr++ = hexchars[sigval >> 4]; + *ptr++ = hexchars[sigval & 0xf]; + *ptr++ = 0; + putpacket(output_buffer); /* send it off... */ +} + +/* + * This function does all command processing for interfacing to gdb. It + * returns 1 if you should skip the instruction at the trap address, 0 + * otherwise. + */ +void gdb_stub_handle_exception(gdb_pt_regs *regs,int sigval) +{ + int trap; /* Trap type */ + int addr; + int length; + char *ptr; + unsigned long *stack; + + + /* + * reply to host that an exception has occurred + */ + send_signal(sigval); + + /* + * Wait for input from remote GDB + */ + while (1) { + output_buffer[0] = 0; + getpacket(input_buffer); + + switch (input_buffer[0]) + { + case '?': + send_signal(sigval); + continue; + + case 'd': + /* toggle debug flag */ + break; + + /* + * Return the value of the CPU registers + */ + case 'g': + gdb_stub_get_non_pt_regs(regs); + ptr = output_buffer; + ptr= mem2hex((char *)regs,ptr,sizeof(s390_regs_common),FALSE); + ptr= mem2hex((char *)®s->crs[0],ptr,NUM_CRS*CR_SIZE,FALSE); + ptr = mem2hex((char *)®s->fp_regs, ptr,sizeof(s390_fp_regs)); + break; + + /* + * set the value of the CPU registers - return OK + * FIXME: Needs to be written + */ + case 'G': + ptr=input_buffer; + hex2mem (ptr, (char *)regs,sizeof(s390_regs_common), FALSE); + ptr+=sizeof(s390_regs_common)*2; + hex2mem (ptr, (char *)regs->crs[0],NUM_CRS*CR_SIZE, FALSE); + ptr+=NUM_CRS*CR_SIZE*2; + hex2mem (ptr, (char *)regs->fp_regs,sizeof(s390_fp_regs), FALSE); + gdb_stub_set_non_pt_regs(regs); + strcpy(output_buffer,"OK"); + break; + + /* + * mAA..AA,LLLL Read LLLL bytes at address AA..AA + */ + case 'm': + ptr = &input_buffer[1]; + + if (hexToInt(&ptr, &addr) + && *ptr++ == ',' + && hexToInt(&ptr, &length)) { + if (mem2hex((char *)addr, output_buffer, length, 1)) + break; + strcpy (output_buffer, "E03"); + } else + strcpy(output_buffer,"E01"); + break; + + /* + * MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK + */ + case 'M': + ptr = &input_buffer[1]; + + if (hexToInt(&ptr, &addr) + && *ptr++ == ',' + && hexToInt(&ptr, &length) + && *ptr++ == ':') { + if (hex2mem(ptr, (char *)addr, length, 1)) + strcpy(output_buffer, "OK"); + else + strcpy(output_buffer, "E03"); + } + else + strcpy(output_buffer, "E02"); + break; + + /* + * cAA..AA Continue at address AA..AA(optional) + */ + case 'c': + /* try to read optional parameter, pc unchanged if no parm */ + + ptr = &input_buffer[1]; + if (hexToInt(&ptr, &addr)) + regs->cp0_epc = addr; + + /* + * Need to flush the instruction cache here, as we may + * have deposited a breakpoint, and the icache probably + * has no way of knowing that a data ref to some location + * may have changed something that is in the instruction + * cache. + * NB: We flush both caches, just to be sure... + */ + + flush_cache_all(); + return; + /* NOTREACHED */ + break; + + + /* + * kill the program + */ + case 'k' : + break; /* do nothing */ + + + /* + * Reset the whole machine (FIXME: system dependent) + */ + case 'r': + break; + + + /* + * Step to next instruction + */ + case 's': + /* + * There is no single step insn in the MIPS ISA, so we + * use breakpoints and continue, instead. + */ + single_step(regs); + flush_cache_all(); + return; + /* NOTREACHED */ + + } + break; + + } /* switch */ + + /* + * reply to the request + */ + + putpacket(output_buffer); + + } /* while */ +} + +/* + * This function will generate a breakpoint exception. It is used at the + * beginning of a program to sync up with a debugger and can be used + * otherwise as a quick means to stop program execution and "break" into + * the debugger. + */ +void breakpoint(void) +{ + if (!gdb_stub_initialised) + return; + __asm__ __volatile__( + ".globl breakinst\n" + "breakinst:\t.word %0\n\t" + : + : "i" (S390_BREAKPOINT_U16) + : + ); +} + + + + + + + diff --git a/arch/s390x/kernel/head.S b/arch/s390x/kernel/head.S new file mode 100644 index 000000000..2b72a62c8 --- /dev/null +++ b/arch/s390x/kernel/head.S @@ -0,0 +1,598 @@ +/* + * arch/s390/kernel/head.S + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Hartmut Penner (hp@de.ibm.com), + * Martin Schwidefsky (schwidefsky@de.ibm.com), + * Rob van der Heij (rvdhei@iae.nl) + * + * There are 5 different IPL methods + * 1) load the image directly into ram at address 0 and do an PSW restart + * 2) linload will load the image from address 0x10000 to memory 0x10000 + * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated) + * 3) generate the tape ipl header, store the generated image on a tape + * and ipl from it + * In case of SL tape you need to IPL 5 times to get past VOL1 etc + * 4) generate the vm reader ipl header, move the generated image to the + * VM reader (use option NOH!) and do a ipl from reader (VM only) + * 5) direct call of start by the SALIPL loader + * We use the cpuid to distinguish between VM and native ipl + * params for kernel are pushed to 0x10400 (see setup.h) + + Changes: + Okt 25 2000 + added code to skip HDR and EOF to allow SL tape IPL (5 retries) + changed first CCW from rewind to backspace block + + */ + +#include +#include +#include + +#ifndef CONFIG_IPL + .org 0 + .long 0x00080000,0x80000000+startup # Just a restart PSW +#else +#ifdef CONFIG_IPL_TAPE +#define IPL_BS 1024 + .org 0 + .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded + .long 0x27000000,0x60000001 # by ipl to addresses 0-23. + .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs). + .long 0x00000000,0x00000000 # external old psw + .long 0x00000000,0x00000000 # svc old psw + .long 0x00000000,0x00000000 # program check old psw + .long 0x00000000,0x00000000 # machine check old psw + .long 0x00000000,0x00000000 # io old psw + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x000a0000,0x00000058 # external new psw + .long 0x000a0000,0x00000060 # svc new psw + .long 0x000a0000,0x00000068 # program check new psw + .long 0x000a0000,0x00000070 # machine check new psw + .long 0x00080000,0x80000000+.Lioint # io new psw + + .org 0x100 +# +# subroutine for loading from tape +# Paramters: +# R1 = device number +# R2 = load address +.Lloader: + st %r14,.Lldret + la %r3,.Lorbread # r3 = address of orb + la %r5,.Lirb # r5 = address of irb + st %r2,.Lccwread+4 # initialize CCW data addresses + lctl %c6,%c6,.Lcr6 + slr %r2,%r2 +.Lldlp: + la %r6,3 # 3 retries +.Lssch: + ssch 0(%r3) # load chunk of IPL_BS bytes + bnz .Llderr +.Lw4end: + bas %r14,.Lwait4io + tm 8(%r5),0x82 # do we have a problem ? + bnz .Lrecov + slr %r7,%r7 + icm %r7,3,10(%r5) # get residual count + lcr %r7,%r7 + la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read + ar %r2,%r7 # add to total size + tm 8(%r5),0x01 # found a tape mark ? + bnz .Ldone + l %r0,.Lccwread+4 # update CCW data addresses + ar %r0,%r7 + st %r0,.Lccwread+4 + b .Lldlp +.Ldone: + l %r14,.Lldret + br %r14 # r2 contains the total size +.Lrecov: + bas %r14,.Lsense # do the sensing + bct %r6,.Lssch # dec. retry count & branch + b .Llderr +# +# Sense subroutine +# +.Lsense: + st %r14,.Lsnsret + la %r7,.Lorbsense + ssch 0(%r7) # start sense command + bnz .Llderr + bas %r14,.Lwait4io + l %r14,.Lsnsret + tm 8(%r5),0x82 # do we have a problem ? + bnz .Llderr + br %r14 +# +# Wait for interrupt subroutine +# +.Lwait4io: + lpsw .Lwaitpsw +.Lioint: + c %r1,0xb8 # compare subchannel number + bne .Lwait4io + tsch 0(%r5) + slr %r0,%r0 + tm 8(%r5),0x82 # do we have a problem ? + bnz .Lwtexit + tm 8(%r5),0x04 # got device end ? + bz .Lwait4io +.Lwtexit: + br %r14 +.Llderr: + lpsw .Lcrash + + .align 8 +.Lorbread: + .long 0x00000000,0x0080ff00,.Lccwread + .align 8 +.Lorbsense: + .long 0x00000000,0x0080ff00,.Lccwsense + .align 8 +.Lccwread: + .long 0x02200000+IPL_BS,0x00000000 +.Lccwsense: + .long 0x04200001,0x00000000 +.Lwaitpsw: + .long 0x020a0000,0x80000000+.Lioint + +.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +.Lcr6: .long 0xff000000 + .align 8 +.Lcrash:.long 0x000a0000,0x00000000 +.Lldret:.long 0 +.Lsnsret: .long 0 +#endif /* CONFIG_IPL_TAPE */ + +#ifdef CONFIG_IPL_VM +#define IPL_BS 0x730 + .org 0 + .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded + .long 0x02000018,0x60000050 # by ipl to addresses 0-23. + .long 0x02000068,0x60000050 # (a PSW and two CCWs). + .fill 80-24,1,0x40 # bytes 24-79 are discarded !! + .long 0x020000f0,0x60000050 # The next 160 byte are loaded + .long 0x02000140,0x60000050 # to addresses 0x18-0xb7 + .long 0x02000190,0x60000050 # They form the continuation + .long 0x020001e0,0x60000050 # of the CCW program started + .long 0x02000230,0x60000050 # by ipl and load the range + .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image + .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730 + .long 0x02000320,0x60000050 # in memory. At the end of + .long 0x02000370,0x60000050 # the channel program the PSW + .long 0x020003c0,0x60000050 # at location 0 is loaded. + .long 0x02000410,0x60000050 # Initial processing starts + .long 0x02000460,0x60000050 # at 0xf0 = iplstart. + .long 0x020004b0,0x60000050 + .long 0x02000500,0x60000050 + .long 0x02000550,0x60000050 + .long 0x020005a0,0x60000050 + .long 0x020005f0,0x60000050 + .long 0x02000640,0x60000050 + .long 0x02000690,0x60000050 + .long 0x020006e0,0x20000050 + + .org 0xf0 +# +# subroutine for loading cards from the reader +# +.Lloader: + la %r3,.Lorb # r2 = address of orb into r2 + la %r5,.Lirb # r4 = address of irb + la %r6,.Lccws + la %r7,20 +.Linit: + st %r2,4(%r6) # initialize CCW data addresses + la %r2,0x50(%r2) + la %r6,8(%r6) + bct 7,.Linit + + lctl %c6,%c6,.Lcr6 # set IO subclass mask + slr %r2,%r2 +.Lldlp: + ssch 0(%r3) # load chunk of 1600 bytes + bnz .Llderr +.Lwait4irq: + mvc 0x78(8),.Lnewpsw # set up IO interrupt psw + lpsw .Lwaitpsw +.Lioint: + c %r1,0xb8 # compare subchannel number + bne .Lwait4irq + tsch 0(%r5) + + slr %r0,%r0 + ic %r0,8(%r5) # get device status + chi %r0,8 # channel end ? + be .Lcont + chi %r0,12 # channel end + device end ? + be .Lcont + + l %r0,4(%r5) + s %r0,8(%r3) # r0/8 = number of ccws executed + mhi %r0,10 # *10 = number of bytes in ccws + lh %r3,10(%r5) # get residual count + sr %r0,%r3 # #ccws*80-residual=#bytes read + ar %r2,%r0 + + br %r14 # r2 contains the total size + +.Lcont: + ahi %r2,0x640 # add 0x640 to total size + la %r6,.Lccws + la %r7,20 +.Lincr: + l %r0,4(%r6) # update CCW data addresses + ahi %r0,0x640 + st %r0,4(%r6) + ahi %r6,8 + bct 7,.Lincr + + b .Lldlp +.Llderr: + lpsw .Lcrash + + .align 8 +.Lorb: .long 0x00000000,0x0080ff00,.Lccws +.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +.Lcr6: .long 0xff000000 +.Lloadp:.long 0,0 + .align 8 +.Lcrash:.long 0x000a0000,0x00000000 +.Lnewpsw: + .long 0x00080000,0x80000000+.Lioint +.Lwaitpsw: + .long 0x020a0000,0x80000000+.Lioint + + .align 8 +.Lccws: .rept 19 + .long 0x02600050,0x00000000 + .endr + .long 0x02200050,0x00000000 +#endif /* CONFIG_IPL_VM */ + +iplstart: + lh %r1,0xb8 # test if subchannel number + bct %r1,.Lnoload # is valid + l %r1,0xb8 # load ipl subchannel number + la %r2,IPL_BS # load start address + bas %r14,.Lloader # load rest of ipl image + st %r1,__LC_IPLDEV # store ipl device number + l %r12,.Lparm # pointer to parameter area + +# +# load parameter file from ipl device +# +.Lagain1: + l %r2,INITRD_START+4-PARMAREA(%r12)# use ramdisk location as temp + bas %r14,.Lloader # load parameter file + ltr %r2,%r2 # got anything ? + bz .Lnopf + chi %r2,895 + bnh .Lnotrunc + la %r2,895 +.Lnotrunc: + l %r4,INITRD_START+4-PARMAREA(%r12) + clc 0(3,%r4),.L_hdr # if it is HDRx + bz .Lagain1 # skip dataset header + clc 0(3,%r4),.L_eof # if it is EOFx + bz .Lagain1 # skip dateset trailer + la %r5,0(%r4,%r2) + lr %r3,%r2 +.Lidebc: + tm 0(%r5),0x80 # high order bit set ? + bo .Ldocv # yes -> convert from EBCDIC + ahi %r5,-1 + bct %r3,.Lidebc + b .Lnocv +.Ldocv: + l %r3,.Lcvtab + tr 0(256,%r4),0(%r3) # convert parameters to ascii + tr 256(256,%r4),0(%r3) + tr 512(256,%r4),0(%r3) + tr 768(122,%r4),0(%r3) +.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line + mvc 0(256,%r3),0(%r4) + mvc 256(256,%r3),256(%r4) + mvc 512(256,%r3),512(%r4) + mvc 768(122,%r3),768(%r4) + slr %r0,%r0 + b .Lcntlp +.Ldelspc: + ic %r0,0(%r3) + ic %r0,0(%r2,%r3) + chi %r0,0x20 # is it a space ? + be .Lcntlp + ahi %r2,1 + b .Leolp +.Lcntlp: + brct %r2,.Ldelspc +.Leolp: + slr %r0,%r0 + stc %r0,0(%r2,%r3) # terminate buffer +.Lnopf: + +# +# load ramdisk from ipl device +# +.Lagain2: + l %r2,INITRD_START+4-PARMAREA(%r12)# load adr. of ramdisk + bas %r14,.Lloader # load ramdisk + st %r2,INITRD_SIZE+4-PARMAREA(%r12) # store size of ramdisk + ltr %r2,%r2 + bnz .Lrdcont + st %r2,INITRD_START+4-PARMAREA(%r12)# no ramdisk found, null it +.Lrdcont: + l %r2,INITRD_START-PARMAREA(%r12) + clc 0(3,%r2),.L_hdr # skip HDRx and EOFx + bz .Lagain2 + clc 0(3,%r2),.L_eof + bz .Lagain2 + +#ifdef CONFIG_IPL_VM +# +# reset files in VM reader +# + stidp __LC_CPUID # store cpuid + lh %r0,__LC_CPUID+4 # get cpu version + chi %r0,0x7490 # running on P/390 ? + be start # no -> skip reset + la %r2,.Lreset + lhi %r3,26 + .long 0x83230008 +#endif + +# +# everything loaded, go for it +# +.Lnoload: + l %r1,.Lstartup + br %r1 + +.Lparm: .long PARMAREA +.Lstartup: .long startup +.Lcvtab:.long _ebcasc # ebcdic to ascii table +.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 + .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 + .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" +.L_eof: .long 0xc5d6c600 /* C'EOF' */ +.L_hdr: .long 0xc8c4d900 /* C'HDR' */ +#endif /* CONFIG_IPL */ + +# +# SALIPL loader support. Based on a patch by Rob van der Heij. +# This entry point is called directly from the SALIPL loader and +# doesn't need a builtin ipl record. +# + .org 0x800 + .globl start +start: + stm %r0,%r15,0x07b0 # store registers + basr %r12,%r0 +.base: + l %r11,.parm + l %r8,.cmd # pointer to command buffer + + ltr %r9,%r9 # do we have SALIPL parameters? + bp .sk8x8 + + mvc 0(64,%r8),0x00b0 # copy saved registers + xc 64(240-64,%r8),0(%r8) # remainder of buffer + tr 0(64,%r8),.lowcase + b .gotr +.sk8x8: + mvc 0(240,%r8),0(%r9) # copy iplparms into buffer +.gotr: + l %r10,.tbl # EBCDIC to ASCII table + tr 0(240,%r8),0(%r10) + stidp __LC_CPUID # Are we running on VM maybe + cli __LC_CPUID,0xff + bnz .test + .long 0x83300060 # diag 3,0,x'0060' - storage size + b .done +.test: + mvc 0x68(8),.pgmnw # set up pgm check handler + l %r2,.fourmeg + lr %r3,%r2 + bctr %r3,%r0 # 4M-1 +.loop: iske %r0,%r3 + ar %r3,%r2 +.pgmx: + sr %r3,%r2 + la %r3,1(%r3) +.done: + st %r3,MEMORY_SIZE-PARMAREA(%r11) + slr %r0,%r0 + st %r0,INITRD_SIZE-PARMAREA(%r11) + st %r0,INITRD_START-PARMAREA(%r11) + j startup # continue with startup +.tbl: .long _ebcasc # translate table +.cmd: .long COMMAND_LINE # address of command line buffer +.parm: .long PARMAREA +.fourmeg: .long 0x00400000 # 4M +.pgmnw: .long 0x00080000,.pgmx +.lowcase: + .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 + .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f + .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 + .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f + .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 + .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f + .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 + .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f + .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 + .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f + .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 + .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f + .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 + .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f + .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 + .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f + + .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 + .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f + .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 + .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f + .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 + .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf + .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 + .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf + .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg + .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi + .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop + .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr + .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx + .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz + .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 + .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff + +# +# startup-code at 0x10000, running in real mode +# this is called either by the ipl loader or directly by PSW restart +# or linload or SALIPL +# + .org 0x10000 +startup:basr %r13,0 # get base +.LPG1: n %r13,.Lhighoff-.LPG1(%r13) # remove high order bit + lhi %r1,1 # mode 1 = esame + slr %r0,%r0 # set cpuid to zero + sigp %r1,%r0,0x12 # switch to esame mode + sam64 # switch to 64 bit mode + lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers + lg %r12,.Lparm1-.LPG1(%r13) # pointer to parameter area + +# +# find out memory size. +# + mvc 0x1d0(16),.Lpcmem-.LPG1(%r13) # setup program check handler + lghi %r2,1 + sllg %r2,%r2,17 # test in increments of 128KB + lgr %r1,%r2 + aghi %r1,-8 # test last word in the segment +.Lloop: + lg %r0,0(%r1) # test 128KB segment + stg %r0,0(%r1) + agr %r1,%r2 # add 128KB + bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop +.Lchkmem: + ng %r1,.L4malign-.LPG1(%r13) # align to multiples of 4M + stg %r1,MEMORY_SIZE-PARMAREA(%r12) # store memory size +# +# find out if we are running under VM +# + stidp __LC_CPUID # store cpuid + tm __LC_CPUID,0xff # running under VM ? + bno .Lnovm-.LPG1(%r13) + oi MACHINE_FLAGS+7-PARMAREA(%r12),1 # set VM flag +.Lnovm: + lh %r0,__LC_CPUID+4 # get cpu version + chi %r0,0x7490 # running on a P/390 ? + bne .Lnop390-.LPG1(%r13) + oi MACHINE_FLAGS+7-PARMAREA(%r12),4 # set P/390 flag +.Lnop390: + + lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, + # virtual and never return ... + .align 16 +.Lentry:.quad 0x0000000180000000,_stext +.Lctl: .quad 0x04b50002 # cr0: various things + .quad 0 # cr1: primary space segment table + .quad 0 # cr2: access register translation + .quad 0 # cr3: instruction authorization + .quad 0 # cr4: instruction authorization + .quad 0 # cr5: various things + .quad 0 # cr6: I/O interrupts + .quad 0 # cr7: secondary space segment table + .quad 0 # cr8: access registers translation + .quad 0 # cr9: tracing off + .quad 0 # cr10: tracing off + .quad 0 # cr11: tracing off + .quad 0 # cr12: tracing off + .quad 0 # cr13: home space segment table + .quad 0xc0000000 # cr14: machine check handling off + .quad 0 # cr15: linkage stack operations +.Lpcmem:.quad 0x0000000180000000,.Lchkmem +.Lflt0: .double 0 +.Lparm1:.quad PARMAREA +.Lhighoff:.long 0x7fffffff +.L4malign:.quad 0xffffffffffc00000 +.Lbigmem:.quad 0x04000000 +.Lmaxchunk:.quad 0x00ffffff + +# +# params at 10400 (setup.h) +# + .org PARMAREA + .quad 0x0100 # ORIG_ROOT_DEV: ramdisk major/minor + .word 0 # MOUNT_ROOT_RDONLY: no + .quad 0 # MEMORY_SIZE + .quad 0 # MACHINE_FLAGS (bit 0:VM) + .quad RAMDISK_ORIGIN # INITRD_START + .quad RAMDISK_SIZE # INITRD_SIZE + .word 0 # RAMDISK_FLAGS + + .org COMMAND_LINE + .byte "root=/dev/ram0 ro" + .byte 0 + +# +# startup-code, running in virtual mode +# + .org 0x10800 + .globl _stext +_stext: basr %r13,0 # get base +.LPG2: +# +# Setup lowcore +# + l %r1,__LC_IPLDEV # load ipl device number + spx .Lprefix-.LPG2(%r13) # set prefix to linux lowcore + st %r1,__LC_IPLDEV # store ipl device number + lg %r15,.Linittu-.LPG2(%r13) + aghi %r15,16384 # init_task_union + 16384 + stg %r15,__LC_KERNEL_STACK # set end of kernel stack + aghi %r15,-160 + xc 0(8,%r15),0(%r15) # set backchain to zero + lghi %r0,-1 + stg %r0,__LC_KERNEL_LEVEL # set interrupt count to -1 +# +# clear bss memory +# + lg %r2,.Lbss_bgn-.LPG2(%r13) # start of bss + lg %r3,.Lbss_end-.LPG2(%r13) # end of bss + sgr %r3,%r2 # length of bss + sgr %r4,%r4 # + sgr %r5,%r5 # set src,length and pad to zero + mvcle %r2,%r4,0 # clear mem + jo .-4 # branch back, if not finish +# check control registers + stctg %c0,%c15,0(%r15) + oc 6(1,%r15),.Locbits+5-.LPG2(%r13) # enable sigp external ints. + oc 4(1,%r15),.Locbits+4-.LPG2(%r13) # low addresss proctection + lctlg %c0,%c15,0(%r15) + +# + lam 0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess + brasl %r14,start_kernel # go to C code +# +# We returned from start_kernel ?!? PANIK +# + basr %r13,0 + lpswe .Ldw-.(%r13) # load disabled wait psw +# +.Lstart: .quad start_kernel + .align 8 +.Lprefix: .long init_S390_lowcore +.Linittu: .quad init_task_union +.Lbss_bgn: .quad __bss_start +.Lbss_end: .quad _end +.Locbits: .quad 0x0102040810204080 + .align 4 +.Laregs: .long 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0 + .align 8 +.Ldw: .quad 0x0002000180000000,0x0000000000000000 + diff --git a/arch/s390x/kernel/ieee.h b/arch/s390x/kernel/ieee.h new file mode 100644 index 000000000..ef7cc29de --- /dev/null +++ b/arch/s390x/kernel/ieee.h @@ -0,0 +1,90 @@ +/* + * arch/s390/kernel/ieee.h + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#include + +static inline void _adddf(int R1,int R2) +{ + current->tss.fprs[R1].fd = current->tss.fprs[R1].fd + + current->tss.fprs[R2].fd; +} + +static inline void _subdf(int R1,int R2) +{ + current->tss.fprs[R1].fd = current->tss.fprs[R1].fd - + current->tss.fprs[R2].fd; +} + +static inline void _muldf(int R1,int R2) +{ + current->tss.fprs[R1].fd = current->tss.fprs[R1].fd * + current->tss.fprs[R2].fd; +} + +static inline void _divdf(int R1,int R2) +{ + current->tss.fprs[R1].fd = current->tss.fprs[R1].fd / + current->tss.fprs[R2].fd; +} + +static inline void _negdf(int R1,int R2) +{ + current->tss.fprs[R1].fd = -current->tss.fprs[R1].fd; +} + +static inline void _fixdfsi(int R1,int R2) +{ + current->tss.regs->gprs[R1] = (__u32) current->tss.fprs[R2].fd; +} + +static inline void _extendsidf(int R1,int R2) +{ + current->tss.fprs[R1].fd = (double) current->tss.regs->gprs[R2]; +} + + +static inline void _addsf(int R1,int R2) +{ + current->tss.fprs[R1].ff = current->tss.fprs[R1].ff + + current->tss.fprs[R2].ff; +} + +static inline void _subsf(int R1,int R2) +{ + current->tss.fprs[R1].ff = current->tss.fprs[R1].ff - + current->tss.fprs[R2].ff; +} + +static inline void _mulsf(int R1,int R2) +{ + current->tss.fprs[R1].ff = current->tss.fprs[R1].ff * + current->tss.fprs[R2].ff; +} + +static inline void _divsf(int R1,int R2) +{ + current->tss.fprs[R1].ff = current->tss.fprs[R1].ff / + current->tss.fprs[R2].ff; +} + +static inline void _negsf(int R1,int R2) +{ + current->tss.fprs[R1].ff = -current->tss.fprs[R1].ff; +} + +static inline void _fixsfsi(int R1,int R2) +{ + current->tss.regs->gprs[R1] = (__u32) current->tss.fprs[R2].ff; +} + +static inline void _extendsisf(int R1,int R2) +{ + current->tss.fprs[R1].ff = (double) current->tss.regs->gprs[R2]; +} + + diff --git a/arch/s390x/kernel/init_task.c b/arch/s390x/kernel/init_task.c new file mode 100644 index 000000000..74cf730b0 --- /dev/null +++ b/arch/s390x/kernel/init_task.c @@ -0,0 +1,32 @@ +/* + * arch/s390/kernel/init_task.c + * + * S390 version + * + * Derived from "arch/i386/kernel/init_task.c" + */ + +#include +#include + +#include +#include + +static struct vm_area_struct init_mmap = INIT_MMAP; +static struct fs_struct init_fs = INIT_FS; +static struct files_struct init_files = INIT_FILES; +static struct signal_struct init_signals = INIT_SIGNALS; +struct mm_struct init_mm = INIT_MM(init_mm); + +/* + * Initial task structure. + * + * We need to make sure that this is 16384-byte aligned due to the + * way process stacks are handled. This is done by making sure + * the linker maps this in the .text segment right after head.S, + * and making head.S ensure the proper alignment. + * + * The things we do for performance.. + */ +union task_union init_task_union __attribute__((aligned(16384))) = + { INIT_TASK(init_task_union.task) }; diff --git a/arch/s390x/kernel/ioctl32.c b/arch/s390x/kernel/ioctl32.c new file mode 100644 index 000000000..fbc63aac2 --- /dev/null +++ b/arch/s390x/kernel/ioctl32.c @@ -0,0 +1,563 @@ +/* + * ioctl32.c: Conversion between 32bit and 64bit native ioctls. + * + * S390 version + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Gerhard Tonn (ton@de.ibm.com) + * + * Heavily inspired by the 32-bit Sparc compat code which is + * Copyright (C) 2000 Silicon Graphics, Inc. + * Written by Ulf Carlsson (ulfc@engr.sgi.com) + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "linux32.h" + +long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); + +struct hd_geometry32 { + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + __u32 start; +}; + +static inline int hd_geometry_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + struct hd_geometry32 *hg32 = (struct hd_geometry32 *) A(arg); + struct hd_geometry hg; + int ret; + mm_segment_t old_fs = get_fs(); + + set_fs (KERNEL_DS); + ret = sys_ioctl (fd, cmd, (long)&hg); + set_fs (old_fs); + + if (ret) + return ret; + + ret = put_user (hg.heads, &(hg32->heads)); + ret |= __put_user (hg.sectors, &(hg32->sectors)); + ret |= __put_user (hg.cylinders, &(hg32->cylinders)); + ret |= __put_user (hg.start, &(hg32->start)); + + return ret; +} + +struct timeval32 { + int tv_sec; + int tv_usec; +}; + +#define EXT2_IOC32_GETFLAGS _IOR('f', 1, int) +#define EXT2_IOC32_SETFLAGS _IOW('f', 2, int) +#define EXT2_IOC32_GETVERSION _IOR('v', 1, int) +#define EXT2_IOC32_SETVERSION _IOW('v', 2, int) + +struct ifmap32 { + unsigned int mem_start; + unsigned int mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct ifreq32 { +#define IFHWADDRLEN 6 +#define IFNAMSIZ 16 + union { + char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap32 ifru_map; + char ifru_slave[IFNAMSIZ]; /* Just fits the size */ + char ifru_newname[IFNAMSIZ]; + __u32 ifru_data; + } ifr_ifru; +}; + +struct ifconf32 { + int ifc_len; /* size of buffer */ + __u32 ifcbuf; +}; + +static int dev_ifname32(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + struct ireq32 *uir32 = (struct ireq32 *) A(arg); + struct net_device *dev; + struct ifreq32 ifr32; + + if (copy_from_user(&ifr32, uir32, sizeof(struct ifreq32))) + return -EFAULT; + + read_lock(&dev_base_lock); + dev = __dev_get_by_index(ifr32.ifr_ifindex); + if (!dev) { + read_unlock(&dev_base_lock); + return -ENODEV; + } + + strcpy(ifr32.ifr_name, dev->name); + read_unlock(&dev_base_lock); + + if (copy_to_user(uir32, &ifr32, sizeof(struct ifreq32))) + return -EFAULT; + + return 0; +} + +static inline int dev_ifconf(unsigned int fd, unsigned int cmd, + unsigned long arg) +{ + struct ioconf32 *uifc32 = (struct ioconf32 *) A(arg); + struct ifconf32 ifc32; + struct ifconf ifc; + struct ifreq32 *ifr32; + struct ifreq *ifr; + mm_segment_t old_fs; + int len; + int err; + + if (copy_from_user(&ifc32, uifc32, sizeof(struct ifconf32))) + return -EFAULT; + + if(ifc32.ifcbuf == 0) { + ifc32.ifc_len = 0; + ifc.ifc_len = 0; + ifc.ifc_buf = NULL; + } else { + ifc.ifc_len = ((ifc32.ifc_len / sizeof (struct ifreq32))) * + sizeof (struct ifreq); + ifc.ifc_buf = kmalloc (ifc.ifc_len, GFP_KERNEL); + if (!ifc.ifc_buf) + return -ENOMEM; + } + ifr = ifc.ifc_req; + ifr32 = (struct ifreq32 *) A(ifc32.ifcbuf); + len = ifc32.ifc_len / sizeof (struct ifreq32); + while (len--) { + if (copy_from_user(ifr++, ifr32++, sizeof (struct ifreq32))) { + err = -EFAULT; + goto out; + } + } + + old_fs = get_fs(); + set_fs (KERNEL_DS); + err = sys_ioctl (fd, SIOCGIFCONF, (unsigned long)&ifc); + set_fs (old_fs); + if (err) + goto out; + + ifr = ifc.ifc_req; + ifr32 = (struct ifreq32 *) A(ifc32.ifcbuf); + len = ifc.ifc_len / sizeof (struct ifreq); + ifc32.ifc_len = len * sizeof (struct ifreq32); + + while (len--) { + if (copy_to_user(ifr32++, ifr++, sizeof (struct ifreq32))) { + err = -EFAULT; + goto out; + } + } + + if (copy_to_user(uifc32, &ifc32, sizeof(struct ifconf32))) { + err = -EFAULT; + goto out; + } +out: + if(ifc.ifc_buf != NULL) + kfree (ifc.ifc_buf); + return err; +} + +static inline int dev_ifsioc(unsigned int fd, unsigned int cmd, + unsigned long arg) +{ + struct ifreq32 *uifr = (struct ifreq32 *) A(arg); + struct ifreq ifr; + mm_segment_t old_fs; + int err; + + switch (cmd) { + case SIOCSIFMAP: + err = copy_from_user(&ifr, uifr, sizeof(ifr.ifr_name)); + err |= __get_user(ifr.ifr_map.mem_start, &(uifr->ifr_ifru.ifru_map.mem_start)); + err |= __get_user(ifr.ifr_map.mem_end, &(uifr->ifr_ifru.ifru_map.mem_end)); + err |= __get_user(ifr.ifr_map.base_addr, &(uifr->ifr_ifru.ifru_map.base_addr)); + err |= __get_user(ifr.ifr_map.irq, &(uifr->ifr_ifru.ifru_map.irq)); + err |= __get_user(ifr.ifr_map.dma, &(uifr->ifr_ifru.ifru_map.dma)); + err |= __get_user(ifr.ifr_map.port, &(uifr->ifr_ifru.ifru_map.port)); + if (err) + return -EFAULT; + break; + default: + if (copy_from_user(&ifr, uifr, sizeof(struct ifreq32))) + return -EFAULT; + break; + } + old_fs = get_fs(); + set_fs (KERNEL_DS); + err = sys_ioctl (fd, cmd, (unsigned long)&ifr); + set_fs (old_fs); + if (!err) { + switch (cmd) { + case SIOCGIFFLAGS: + case SIOCGIFMETRIC: + case SIOCGIFMTU: + case SIOCGIFMEM: + case SIOCGIFHWADDR: + case SIOCGIFINDEX: + case SIOCGIFADDR: + case SIOCGIFBRDADDR: + case SIOCGIFDSTADDR: + case SIOCGIFNETMASK: + case SIOCGIFTXQLEN: + if (copy_to_user(uifr, &ifr, sizeof(struct ifreq32))) + return -EFAULT; + break; + case SIOCGIFMAP: + err = copy_to_user(uifr, &ifr, sizeof(ifr.ifr_name)); + err |= __put_user(ifr.ifr_map.mem_start, &(uifr->ifr_ifru.ifru_map.mem_start)); + err |= __put_user(ifr.ifr_map.mem_end, &(uifr->ifr_ifru.ifru_map.mem_end)); + err |= __put_user(ifr.ifr_map.base_addr, &(uifr->ifr_ifru.ifru_map.base_addr)); + err |= __put_user(ifr.ifr_map.irq, &(uifr->ifr_ifru.ifru_map.irq)); + err |= __put_user(ifr.ifr_map.dma, &(uifr->ifr_ifru.ifru_map.dma)); + err |= __put_user(ifr.ifr_map.port, &(uifr->ifr_ifru.ifru_map.port)); + if (err) + err = -EFAULT; + break; + } + } + return err; +} + +struct rtentry32 +{ + unsigned int rt_pad1; + struct sockaddr rt_dst; /* target address */ + struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ + struct sockaddr rt_genmask; /* target network mask (IP) */ + unsigned short rt_flags; + short rt_pad2; + unsigned int rt_pad3; + unsigned int rt_pad4; + short rt_metric; /* +1 for binary compatibility! */ + unsigned int rt_dev; /* forcing the device at add */ + unsigned int rt_mtu; /* per route MTU/Window */ +#ifndef __KERNEL__ +#define rt_mss rt_mtu /* Compatibility :-( */ +#endif + unsigned int rt_window; /* Window clamping */ + unsigned short rt_irtt; /* Initial RTT */ +}; + +static inline int routing_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + struct rtentry32 *ur = (struct rtentry32 *) A(arg); + struct rtentry r; + char devname[16]; + u32 rtdev; + int ret; + mm_segment_t old_fs = get_fs(); + + ret = copy_from_user (&r.rt_dst, &(ur->rt_dst), 3 * sizeof(struct sockaddr)); + ret |= __get_user (r.rt_flags, &(ur->rt_flags)); + ret |= __get_user (r.rt_metric, &(ur->rt_metric)); + ret |= __get_user (r.rt_mtu, &(ur->rt_mtu)); + ret |= __get_user (r.rt_window, &(ur->rt_window)); + ret |= __get_user (r.rt_irtt, &(ur->rt_irtt)); + ret |= __get_user (rtdev, &(ur->rt_dev)); + if (rtdev) { + ret |= copy_from_user (devname, (char *) A(rtdev), 15); + r.rt_dev = devname; devname[15] = 0; + } else + r.rt_dev = 0; + if (ret) + return -EFAULT; + set_fs (KERNEL_DS); + ret = sys_ioctl (fd, cmd, (long)&r); + set_fs (old_fs); + return ret; +} + +static int do_ext2_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + /* These are just misnamed, they actually get/put from/to user an int */ + switch (cmd) { + case EXT2_IOC32_GETFLAGS: cmd = EXT2_IOC_GETFLAGS; break; + case EXT2_IOC32_SETFLAGS: cmd = EXT2_IOC_SETFLAGS; break; + case EXT2_IOC32_GETVERSION: cmd = EXT2_IOC_GETVERSION; break; + case EXT2_IOC32_SETVERSION: cmd = EXT2_IOC_SETVERSION; break; + } + return sys_ioctl(fd, cmd, arg); +} + +static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + mm_segment_t old_fs = get_fs(); + int err; + unsigned long val; + + set_fs (KERNEL_DS); + err = sys_ioctl(fd, cmd, (unsigned long)&val); + set_fs (old_fs); + if (!err && put_user((unsigned int) val, (u32 *)arg)) + return -EFAULT; + return err; +} + +struct ioctl32_handler { + unsigned int cmd; + int (*function)(unsigned int, unsigned int, unsigned long); +}; + +struct ioctl32_list { + struct ioctl32_handler handler; + struct ioctl32_list *next; +}; + +#define IOCTL32_DEFAULT(cmd) { { cmd, (void *) sys_ioctl }, 0 } +#define IOCTL32_HANDLER(cmd, handler) { { cmd, (void *) handler }, 0 } + +static struct ioctl32_list ioctl32_handler_table[] = { + IOCTL32_DEFAULT(FIBMAP), + IOCTL32_DEFAULT(FIGETBSZ), + + IOCTL32_DEFAULT(BIODASDDISABLE), + IOCTL32_DEFAULT(BIODASDENABLE), + IOCTL32_DEFAULT(BIODASDRSRV), + IOCTL32_DEFAULT(BIODASDRLSE), + IOCTL32_DEFAULT(BIODASDSLCK), + IOCTL32_DEFAULT(BIODASDRSID), + IOCTL32_DEFAULT(BIODASDFORMAT), + IOCTL32_DEFAULT(BIODASDRWTB), + + IOCTL32_DEFAULT(BLKRRPART), + + IOCTL32_HANDLER(HDIO_GETGEO, hd_geometry_ioctl), + + IOCTL32_DEFAULT(TCGETA), + IOCTL32_DEFAULT(TCSETA), + IOCTL32_DEFAULT(TCSETAW), + IOCTL32_DEFAULT(TCSETAF), + IOCTL32_DEFAULT(TCSBRK), + IOCTL32_DEFAULT(TCXONC), + IOCTL32_DEFAULT(TCFLSH), + IOCTL32_DEFAULT(TCGETS), + IOCTL32_DEFAULT(TCSETS), + IOCTL32_DEFAULT(TCSETSW), + IOCTL32_DEFAULT(TCSETSF), + IOCTL32_DEFAULT(TIOCLINUX), + + IOCTL32_DEFAULT(TIOCGETD), + IOCTL32_DEFAULT(TIOCSETD), + IOCTL32_DEFAULT(TIOCEXCL), + IOCTL32_DEFAULT(TIOCNXCL), + IOCTL32_DEFAULT(TIOCCONS), + IOCTL32_DEFAULT(TIOCGSOFTCAR), + IOCTL32_DEFAULT(TIOCSSOFTCAR), + IOCTL32_DEFAULT(TIOCSWINSZ), + IOCTL32_DEFAULT(TIOCGWINSZ), + IOCTL32_DEFAULT(TIOCMGET), + IOCTL32_DEFAULT(TIOCMBIC), + IOCTL32_DEFAULT(TIOCMBIS), + IOCTL32_DEFAULT(TIOCMSET), + IOCTL32_DEFAULT(TIOCPKT), + IOCTL32_DEFAULT(TIOCNOTTY), + IOCTL32_DEFAULT(TIOCSTI), + IOCTL32_DEFAULT(TIOCOUTQ), + IOCTL32_DEFAULT(TIOCSPGRP), + IOCTL32_DEFAULT(TIOCGPGRP), + IOCTL32_DEFAULT(TIOCSCTTY), + IOCTL32_DEFAULT(TIOCGPTN), + IOCTL32_DEFAULT(TIOCSPTLCK), + IOCTL32_DEFAULT(TIOCGSERIAL), + IOCTL32_DEFAULT(TIOCSSERIAL), + IOCTL32_DEFAULT(TIOCSERGETLSR), + + IOCTL32_DEFAULT(FIOCLEX), + IOCTL32_DEFAULT(FIONCLEX), + IOCTL32_DEFAULT(FIOASYNC), + IOCTL32_DEFAULT(FIONBIO), + IOCTL32_DEFAULT(FIONREAD), + + IOCTL32_DEFAULT(PIO_FONT), + IOCTL32_DEFAULT(GIO_FONT), + IOCTL32_DEFAULT(KDSIGACCEPT), + IOCTL32_DEFAULT(KDGETKEYCODE), + IOCTL32_DEFAULT(KDSETKEYCODE), + IOCTL32_DEFAULT(KIOCSOUND), + IOCTL32_DEFAULT(KDMKTONE), + IOCTL32_DEFAULT(KDGKBTYPE), + IOCTL32_DEFAULT(KDSETMODE), + IOCTL32_DEFAULT(KDGETMODE), + IOCTL32_DEFAULT(KDSKBMODE), + IOCTL32_DEFAULT(KDGKBMODE), + IOCTL32_DEFAULT(KDSKBMETA), + IOCTL32_DEFAULT(KDGKBMETA), + IOCTL32_DEFAULT(KDGKBENT), + IOCTL32_DEFAULT(KDSKBENT), + IOCTL32_DEFAULT(KDGKBSENT), + IOCTL32_DEFAULT(KDSKBSENT), + IOCTL32_DEFAULT(KDGKBDIACR), + IOCTL32_DEFAULT(KDSKBDIACR), + IOCTL32_DEFAULT(KDGKBLED), + IOCTL32_DEFAULT(KDSKBLED), + IOCTL32_DEFAULT(KDGETLED), + IOCTL32_DEFAULT(KDSETLED), + IOCTL32_DEFAULT(GIO_SCRNMAP), + IOCTL32_DEFAULT(PIO_SCRNMAP), + IOCTL32_DEFAULT(GIO_UNISCRNMAP), + IOCTL32_DEFAULT(PIO_UNISCRNMAP), + IOCTL32_DEFAULT(PIO_FONTRESET), + IOCTL32_DEFAULT(PIO_UNIMAPCLR), + + IOCTL32_DEFAULT(VT_SETMODE), + IOCTL32_DEFAULT(VT_GETMODE), + IOCTL32_DEFAULT(VT_GETSTATE), + IOCTL32_DEFAULT(VT_OPENQRY), + IOCTL32_DEFAULT(VT_ACTIVATE), + IOCTL32_DEFAULT(VT_WAITACTIVE), + IOCTL32_DEFAULT(VT_RELDISP), + IOCTL32_DEFAULT(VT_DISALLOCATE), + IOCTL32_DEFAULT(VT_RESIZE), + IOCTL32_DEFAULT(VT_RESIZEX), + IOCTL32_DEFAULT(VT_LOCKSWITCH), + IOCTL32_DEFAULT(VT_UNLOCKSWITCH), + + IOCTL32_HANDLER(SIOCGIFNAME, dev_ifname32), + IOCTL32_HANDLER(SIOCGIFCONF, dev_ifconf), + IOCTL32_HANDLER(SIOCGIFFLAGS, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFFLAGS, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFMETRIC, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFMETRIC, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFMTU, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFMTU, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFMEM, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFMEM, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFHWADDR, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFHWADDR, dev_ifsioc), + IOCTL32_HANDLER(SIOCADDMULTI, dev_ifsioc), + IOCTL32_HANDLER(SIOCDELMULTI, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFINDEX, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFMAP, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFMAP, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFADDR, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFADDR, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFBRDADDR, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFBRDADDR, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFDSTADDR, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFDSTADDR, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFNETMASK, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFNETMASK, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFPFLAGS, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFPFLAGS, dev_ifsioc), + IOCTL32_HANDLER(SIOCGIFTXQLEN, dev_ifsioc), + IOCTL32_HANDLER(SIOCSIFTXQLEN, dev_ifsioc), + IOCTL32_HANDLER(SIOCADDRT, routing_ioctl), + IOCTL32_HANDLER(SIOCDELRT, routing_ioctl), + + IOCTL32_HANDLER(EXT2_IOC32_GETFLAGS, do_ext2_ioctl), + IOCTL32_HANDLER(EXT2_IOC32_SETFLAGS, do_ext2_ioctl), + IOCTL32_HANDLER(EXT2_IOC32_GETVERSION, do_ext2_ioctl), + IOCTL32_HANDLER(EXT2_IOC32_SETVERSION, do_ext2_ioctl), + + IOCTL32_HANDLER(BLKGETSIZE, w_long) + +}; + +#define NR_IOCTL32_HANDLERS (sizeof(ioctl32_handler_table) / \ + sizeof(ioctl32_handler_table[0])) + +static struct ioctl32_list *ioctl32_hash_table[1024]; + +static inline int ioctl32_hash(unsigned int cmd) +{ + return ((cmd >> 6) ^ (cmd >> 4) ^ cmd) & 0x3ff; +} + +int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + int (*handler)(unsigned int, unsigned int, unsigned long, struct file * filp); + struct file *filp; + struct ioctl32_list *l; + int error; + + l = ioctl32_hash_table[ioctl32_hash(cmd)]; + + error = -EBADF; + + filp = fget(fd); + if (!filp) + return error; + + if (!filp->f_op || !filp->f_op->ioctl) { + error = sys_ioctl (fd, cmd, arg); + goto out; + } + + while (l && l->handler.cmd != cmd) + l = l->next; + + if (l) { + handler = (void *)l->handler.function; + error = handler(fd, cmd, arg, filp); + } else { + error = -EINVAL; + printk("unknown ioctl: %08x\n", cmd); + } +out: + fput(filp); + return error; +} + +static void ioctl32_insert(struct ioctl32_list *entry) +{ + int hash = ioctl32_hash(entry->handler.cmd); + if (!ioctl32_hash_table[hash]) + ioctl32_hash_table[hash] = entry; + else { + struct ioctl32_list *l; + l = ioctl32_hash_table[hash]; + while (l->next) + l = l->next; + l->next = entry; + entry->next = 0; + } +} + +static int __init init_ioctl32(void) +{ + int i; + for (i = 0; i < NR_IOCTL32_HANDLERS; i++) + ioctl32_insert(&ioctl32_handler_table[i]); + return 0; +} + +__initcall(init_ioctl32); diff --git a/arch/s390x/kernel/irq.c b/arch/s390x/kernel/irq.c new file mode 100644 index 000000000..5177a5ae8 --- /dev/null +++ b/arch/s390x/kernel/irq.c @@ -0,0 +1,423 @@ +/* + * arch/s390/kernel/irq.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Ingo Adlung (adlung@de.ibm.com) + * + * Derived from "arch/i386/kernel/irq.c" + * Copyright (C) 1992, 1999 Linus Torvalds, Ingo Molnar + * + * S/390 I/O interrupt processing and I/O request processing is + * implemented in arch/s390/kernel/s390io.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +void s390_init_IRQ ( void ); +void s390_free_irq ( unsigned int irq, void *dev_id); +int s390_request_irq( unsigned int irq, + void (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, + const char *devname, + void *dev_id); + +#if 0 +/* + * The following vectors are part of the Linux architecture, there + * is no hardware IRQ pin equivalent for them, they are triggered + * through the ICC by us (IPIs), via smp_message_pass(): + */ +BUILD_SMP_INTERRUPT(reschedule_interrupt) +BUILD_SMP_INTERRUPT(invalidate_interrupt) +BUILD_SMP_INTERRUPT(stop_cpu_interrupt) +BUILD_SMP_INTERRUPT(mtrr_interrupt) +BUILD_SMP_INTERRUPT(spurious_interrupt) +#endif + +#if 0 +int get_irq_list(char *buf) +{ + int i, j; + struct irqaction * action; + char *p = buf; + + p += sprintf(p, " "); + + for (j=0; jirq_desc.action; + + if (!action) + continue; + + p += sprintf(p, "%3d: ",i); +#ifndef CONFIG_SMP + p += sprintf(p, "%10u ", kstat_irqs(i)); +#else + for (j=0; jirq_desc.handler->typename); + p += sprintf(p, " %s", action->name); + + for (action=action->next; action; action = action->next) + { + p += sprintf(p, ", %s", action->name); + + } /* endfor */ + + *p++ = '\n'; + + } /* endfor */ + + p += sprintf(p, "NMI: %10u\n", nmi_counter); +#ifdef CONFIG_SMP + p += sprintf(p, "IPI: %10u\n", atomic_read(&ipi_count)); +#endif + + return p - buf; +} +#endif + +/* + * Global interrupt locks for SMP. Allow interrupts to come in on any + * CPU, yet make cli/sti act globally to protect critical regions.. + */ +#ifdef CONFIG_SMP +atomic_t global_irq_holder = ATOMIC_INIT(NO_PROC_ID); +atomic_t global_irq_lock = ATOMIC_INIT(0); +atomic_t global_irq_count = ATOMIC_INIT(0); +atomic_t global_bh_count; + +/* + * "global_cli()" is a special case, in that it can hold the + * interrupts disabled for a longish time, and also because + * we may be doing TLB invalidates when holding the global + * IRQ lock for historical reasons. Thus we may need to check + * SMP invalidate events specially by hand here (but not in + * any normal spinlocks) + * + * Thankfully we don't need this as we can deliver flush tlbs with + * interrupts disabled DJB :-) + */ +#define check_smp_invalidate(cpu) + +static void show(char * str) +{ + int i; + unsigned long *stack; + int cpu = smp_processor_id(); + + printk("\n%s, CPU %d:\n", str, cpu); + printk("irq: %d [%d]\n", + atomic_read(&global_irq_count),local_irq_count(smp_processor_id())); + printk("bh: %d [%d]\n", + atomic_read(&global_bh_count),local_bh_count(smp_processor_id())); + stack = (unsigned long *) &str; + for (i = 40; i ; i--) { + unsigned long x = *++stack; + if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) { + printk("<[%08lx]> ", x); + } + } +} + +#define MAXCOUNT 100000000 + +static inline void wait_on_bh(void) +{ + int count = MAXCOUNT; + do { + if (!--count) { + show("wait_on_bh"); + count = ~0; + } + /* nothing .. wait for the other bh's to go away */ + } while (atomic_read(&global_bh_count) != 0); +} + +static inline void wait_on_irq(int cpu) +{ + int count = MAXCOUNT; + + for (;;) { + + /* + * Wait until all interrupts are gone. Wait + * for bottom half handlers unless we're + * already executing in one.. + */ + if (!atomic_read(&global_irq_count)) { + if (local_bh_count(cpu)|| + !atomic_read(&global_bh_count)) + break; + } + + /* Duh, we have to loop. Release the lock to avoid deadlocks */ + atomic_set(&global_irq_lock, 0); + + for (;;) { + if (!--count) { + show("wait_on_irq"); + count = ~0; + } + __sti(); + SYNC_OTHER_CORES(cpu); + __cli(); + check_smp_invalidate(cpu); + if (atomic_read(&global_irq_count)) + continue; + if (atomic_read(&global_irq_lock)) + continue; + if (!local_bh_count(cpu) + && atomic_read(&global_bh_count)) + continue; + if (!atomic_compare_and_swap(0, 1, &global_irq_lock)) + break; + } + } +} + +/* + * This is called when we want to synchronize with + * bottom half handlers. We need to wait until + * no other CPU is executing any bottom half handler. + * + * Don't wait if we're already running in an interrupt + * context or are inside a bh handler. + */ +void synchronize_bh(void) +{ + if (atomic_read(&global_bh_count) && !in_interrupt()) + wait_on_bh(); +} + +/* + * This is called when we want to synchronize with + * interrupts. We may for example tell a device to + * stop sending interrupts: but to make sure there + * are no interrupts that are executing on another + * CPU we need to call this function. + */ +void synchronize_irq(void) +{ + if (atomic_read(&global_irq_count)) { + /* Stupid approach */ + cli(); + sti(); + } +} + +static inline void get_irqlock(int cpu) +{ + if (atomic_compare_and_swap(0,1,&global_irq_lock) != 0) { + /* do we already hold the lock? */ + if ( cpu == atomic_read(&global_irq_holder)) + return; + /* Uhhuh.. Somebody else got it. Wait.. */ + do { + check_smp_invalidate(cpu); + } while (atomic_compare_and_swap(0,1,&global_irq_lock) != 0); + } + /* + * We also to make sure that nobody else is running + * in an interrupt context. + */ + wait_on_irq(cpu); + + /* + * Ok, finally.. + */ + atomic_set(&global_irq_holder,cpu); +} + +#define EFLAGS_I_SHIFT 57 + +/* + * A global "cli()" while in an interrupt context + * turns into just a local cli(). Interrupts + * should use spinlocks for the (very unlikely) + * case that they ever want to protect against + * each other. + * + * If we already have local interrupts disabled, + * this will not turn a local disable into a + * global one (problems with spinlocks: this makes + * save_flags+cli+sti usable inside a spinlock). + */ +void __global_cli(void) +{ + unsigned long flags; + + __save_flags(flags); + if (flags & (1UL << EFLAGS_I_SHIFT)) { + int cpu = smp_processor_id(); + __cli(); + if (!in_irq()) + get_irqlock(cpu); + } +} + +void __global_sti(void) +{ + + if (!in_irq()) + release_irqlock(smp_processor_id()); + __sti(); +} + +/* + * SMP flags value to restore to: + * 0 - global cli + * 1 - global sti + * 2 - local cli + * 3 - local sti + */ +unsigned long __global_save_flags(void) +{ + int retval; + int local_enabled; + unsigned long flags; + + __save_flags(flags); + local_enabled = (flags >> EFLAGS_I_SHIFT) & 1; + /* default to local */ + retval = 2 + local_enabled; + + /* check for global flags if we're not in an interrupt */ + if (!in_irq()) + { + if (local_enabled) + retval = 1; + if (atomic_read(&global_irq_holder)== smp_processor_id()) + retval = 0; + } + return retval; +} + +void __global_restore_flags(unsigned long flags) +{ + switch (flags) { + case 0: + __global_cli(); + break; + case 1: + __global_sti(); + break; + case 2: + __cli(); + break; + case 3: + __sti(); + break; + default: + printk("global_restore_flags: %08lx (%08lx)\n", + flags, (&flags)[-1]); + } +} + +#endif + +/* + * Note : This fuction should be eliminated as it doesn't comply with the + * S/390 irq scheme we have implemented ... + */ +int handle_IRQ_event(unsigned int irq, int cpu, struct pt_regs * regs) +{ + struct irqaction * action; + int status; + + status = 0; + + if ( ioinfo[irq] == INVALID_STORAGE_AREA ) + return( -ENODEV); + + action = ioinfo[irq]->irq_desc.action; + + if (action) + { + status |= 1; + + if (!(action->flags & SA_INTERRUPT)) + __sti(); + + do + { + status |= action->flags; + action->handler(irq, action->dev_id, regs); + action = action->next; + } while (action); + + if (status & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + __cli(); + + } /* endif */ + + return status; +} + +void enable_nop(int irq) +{ +} + +void __init init_IRQ(void) +{ + s390_init_IRQ(); +} + + +void free_irq(unsigned int irq, void *dev_id) +{ + s390_free_irq( irq, dev_id); +} + + +int request_irq( unsigned int irq, + void (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, + const char *devname, + void *dev_id) +{ + return( s390_request_irq( irq, handler, irqflags, devname, dev_id ) ); + +} + +void init_irq_proc(void) +{ + /* For now, nothing... */ +} + diff --git a/arch/s390x/kernel/irqextras390.c b/arch/s390x/kernel/irqextras390.c new file mode 100644 index 000000000..e1e455813 --- /dev/null +++ b/arch/s390x/kernel/irqextras390.c @@ -0,0 +1,35 @@ +/* + * arch/s390/kernel/irqextras390.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), + * + * Some channel code by D.J. Barrow + */ + +/* + +*/ +#include +#include + +#if 0 +// fixchannelprogram is now obselete +void fixchannelprogram(orb_bits_t *orbptr) +{ + __u32 newAddress=orbptr->ccw_program_address; + fixccws(orbptr->ccw_program_address); + orbptr->ccw_program_address=newAddress; + orbptr->ccw_program_address=(ccw1_t *)(((__u32)orbptr->ccw_program_address)); +} +#endif + +void fixccws(ccw1_bits_t *ccwptr) +{ + for(;;ccwptr++) + { // Just hope nobody starts doing prefixing + if(!ccwptr->cc) + break; + } +} diff --git a/arch/s390x/kernel/linux32.c b/arch/s390x/kernel/linux32.c new file mode 100644 index 000000000..668ac46c4 --- /dev/null +++ b/arch/s390x/kernel/linux32.c @@ -0,0 +1,4326 @@ +/* + * arch/s390x/kernel/linux32.c + * + * S390 version + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerhard Tonn (ton@de.ibm.com) + * + * Conversion between 31bit and 64bit native syscalls. + * + * Heavily inspired by the 32-bit Sparc compat code which is + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "linux32.h" + +extern asmlinkage long sys_chown(const char *, uid_t,gid_t); +extern asmlinkage long sys_lchown(const char *, uid_t,gid_t); +extern asmlinkage long sys_fchown(unsigned int, uid_t,gid_t); +extern asmlinkage long sys_setregid(gid_t, gid_t); +extern asmlinkage long sys_setgid(gid_t); +extern asmlinkage long sys_setreuid(uid_t, uid_t); +extern asmlinkage long sys_setuid(uid_t); +extern asmlinkage long sys_setresuid(uid_t, uid_t, uid_t); +extern asmlinkage long sys_setresgid(gid_t, gid_t, gid_t); +extern asmlinkage long sys_setfsuid(uid_t); +extern asmlinkage long sys_setfsgid(gid_t); + +/* For this source file, we want overflow handling. */ + +#undef high2lowuid +#undef high2lowgid +#undef low2highuid +#undef low2highgid +#undef SET_UID16 +#undef SET_GID16 +#undef NEW_TO_OLD_UID +#undef NEW_TO_OLD_GID +#undef SET_OLDSTAT_UID +#undef SET_OLDSTAT_GID +#undef SET_STAT_UID +#undef SET_STAT_GID + +#define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid) +#define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid) +#define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid) +#define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid) +#define SET_UID16(var, uid) var = high2lowuid(uid) +#define SET_GID16(var, gid) var = high2lowgid(gid) +#define NEW_TO_OLD_UID(uid) high2lowuid(uid) +#define NEW_TO_OLD_GID(gid) high2lowgid(gid) +#define SET_OLDSTAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid) +#define SET_OLDSTAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid) +#define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid) +#define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid) + +asmlinkage long sys32_chown16(const char * filename, u16 user, u16 group) +{ + return sys_chown(filename, low2highuid(user), low2highgid(group)); +} + +asmlinkage long sys32_lchown16(const char * filename, u16 user, u16 group) +{ + return sys_lchown(filename, low2highuid(user), low2highgid(group)); +} + +asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group) +{ + return sys_fchown(fd, low2highuid(user), low2highgid(group)); +} + +asmlinkage long sys32_setregid16(u16 rgid, u16 egid) +{ + return sys_setregid(low2highgid(rgid), low2highgid(egid)); +} + +asmlinkage long sys32_setgid16(u16 gid) +{ + return sys_setgid((gid_t)gid); +} + +asmlinkage long sys32_setreuid16(u16 ruid, u16 euid) +{ + return sys_setreuid(low2highuid(ruid), low2highuid(euid)); +} + +asmlinkage long sys32_setuid16(u16 uid) +{ + return sys_setuid((uid_t)uid); +} + +asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid) +{ + return sys_setresuid(low2highuid(ruid), low2highuid(euid), + low2highuid(suid)); +} + +asmlinkage long sys32_getresuid16(u16 *ruid, u16 *euid, u16 *suid) +{ + int retval; + + if (!(retval = put_user(high2lowuid(current->uid), ruid)) && + !(retval = put_user(high2lowuid(current->euid), euid))) + retval = put_user(high2lowuid(current->suid), suid); + + return retval; +} + +asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid) +{ + return sys_setresgid(low2highgid(rgid), low2highgid(egid), + low2highgid(sgid)); +} + +asmlinkage long sys32_getresgid16(u16 *rgid, u16 *egid, u16 *sgid) +{ + int retval; + + if (!(retval = put_user(high2lowgid(current->gid), rgid)) && + !(retval = put_user(high2lowgid(current->egid), egid))) + retval = put_user(high2lowgid(current->sgid), sgid); + + return retval; +} + +asmlinkage long sys32_setfsuid16(u16 uid) +{ + return sys_setfsuid((uid_t)uid); +} + +asmlinkage long sys32_setfsgid16(u16 gid) +{ + return sys_setfsgid((gid_t)gid); +} + +asmlinkage long sys32_getgroups16(int gidsetsize, u16 *grouplist) +{ + u16 groups[NGROUPS]; + int i,j; + + if (gidsetsize < 0) + return -EINVAL; + i = current->ngroups; + if (gidsetsize) { + if (i > gidsetsize) + return -EINVAL; + for(j=0;jgroups[j]; + if (copy_to_user(grouplist, groups, sizeof(u16)*i)) + return -EFAULT; + } + return i; +} + +asmlinkage long sys32_setgroups16(int gidsetsize, u16 *grouplist) +{ + u16 groups[NGROUPS]; + int i; + + if (!capable(CAP_SETGID)) + return -EPERM; + if ((unsigned) gidsetsize > NGROUPS) + return -EINVAL; + if (copy_from_user(groups, grouplist, gidsetsize * sizeof(u16))) + return -EFAULT; + for (i = 0 ; i < gidsetsize ; i++) + current->groups[i] = (gid_t)groups[i]; + current->ngroups = gidsetsize; + return 0; +} + +asmlinkage long sys32_getuid16(void) +{ + return high2lowuid(current->uid); +} + +asmlinkage long sys32_geteuid16(void) +{ + return high2lowuid(current->euid); +} + +asmlinkage long sys32_getgid16(void) +{ + return high2lowgid(current->gid); +} + +asmlinkage long sys32_getegid16(void) +{ + return high2lowgid(current->egid); +} + +/* 32-bit timeval and related flotsam. */ + +struct timeval32 +{ + int tv_sec, tv_usec; +}; + +struct itimerval32 +{ + struct timeval32 it_interval; + struct timeval32 it_value; +}; + +static inline long get_tv32(struct timeval *o, struct timeval32 *i) +{ + return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) || + (__get_user(o->tv_sec, &i->tv_sec) | + __get_user(o->tv_usec, &i->tv_usec))); +} + +static inline long put_tv32(struct timeval32 *o, struct timeval *i) +{ + return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || + (__put_user(i->tv_sec, &o->tv_sec) | + __put_user(i->tv_usec, &o->tv_usec))); +} + +static inline long get_it32(struct itimerval *o, struct itimerval32 *i) +{ + return (!access_ok(VERIFY_READ, i32, sizeof(*i32)) || + (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | + __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | + __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | + __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); +} + +static inline long put_it32(struct itimerval32 *o, struct itimerval *i) +{ + return (!access_ok(VERIFY_WRITE, i32, sizeof(*i32)) || + (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | + __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | + __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | + __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); +} + +struct msgbuf32 { s32 mtype; char mtext[1]; }; + +struct ipc_perm32 +{ + key_t key; + __kernel_uid_t32 uid; + __kernel_gid_t32 gid; + __kernel_uid_t32 cuid; + __kernel_gid_t32 cgid; + __kernel_mode_t32 mode; + unsigned short seq; +}; + +struct semid_ds32 { + struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t32 sem_otime; /* last semop time */ + __kernel_time_t32 sem_ctime; /* last change time */ + u32 sem_base; /* ptr to first semaphore in array */ + u32 sem_pending; /* pending operations to be processed */ + u32 sem_pending_last; /* last pending operation */ + u32 undo; /* undo requests on this array */ + unsigned short sem_nsems; /* no. of semaphores in array */ +}; + +struct semid64_ds32 { + struct ipc64_perm sem_perm; /* this structure is the same on sparc32 and sparc64 */ + unsigned int __pad1; + __kernel_time_t32 sem_otime; + unsigned int __pad2; + __kernel_time_t32 sem_ctime; + u32 sem_nsems; + u32 __unused1; + u32 __unused2; +}; + +struct msqid_ds32 +{ + struct ipc_perm32 msg_perm; + u32 msg_first; + u32 msg_last; + __kernel_time_t32 msg_stime; + __kernel_time_t32 msg_rtime; + __kernel_time_t32 msg_ctime; + u32 wwait; + u32 rwait; + unsigned short msg_cbytes; + unsigned short msg_qnum; + unsigned short msg_qbytes; + __kernel_ipc_pid_t32 msg_lspid; + __kernel_ipc_pid_t32 msg_lrpid; +}; + +struct msqid64_ds32 { + struct ipc64_perm msg_perm; + unsigned int __pad1; + __kernel_time_t32 msg_stime; + unsigned int __pad2; + __kernel_time_t32 msg_rtime; + unsigned int __pad3; + __kernel_time_t32 msg_ctime; + unsigned int msg_cbytes; + unsigned int msg_qnum; + unsigned int msg_qbytes; + __kernel_pid_t32 msg_lspid; + __kernel_pid_t32 msg_lrpid; + unsigned int __unused1; + unsigned int __unused2; +}; + + +struct shmid_ds32 { + struct ipc_perm32 shm_perm; + int shm_segsz; + __kernel_time_t32 shm_atime; + __kernel_time_t32 shm_dtime; + __kernel_time_t32 shm_ctime; + __kernel_ipc_pid_t32 shm_cpid; + __kernel_ipc_pid_t32 shm_lpid; + unsigned short shm_nattch; +}; + +struct shmid64_ds32 { + struct ipc64_perm shm_perm; + unsigned int __pad1; + __kernel_time_t32 shm_atime; + unsigned int __pad2; + __kernel_time_t32 shm_dtime; + unsigned int __pad3; + __kernel_time_t32 shm_ctime; + __kernel_size_t32 shm_segsz; + __kernel_pid_t32 shm_cpid; + __kernel_pid_t32 shm_lpid; + unsigned int shm_nattch; + unsigned int __unused1; + unsigned int __unused2; +}; + + +/* + * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.. + * + * This is really horribly ugly. + */ +#define IPCOP_MASK(__x) (1UL << (__x)) +static int do_sys32_semctl(int first, int second, int third, void *uptr) +{ + union semun fourth; + u32 pad; + int err = -EINVAL; + + if (!uptr) + goto out; + err = -EFAULT; + if (get_user (pad, (u32 *)uptr)) + goto out; + if(third == SETVAL) + fourth.val = (int)pad; + else + fourth.__pad = (void *)A(pad); + if (IPCOP_MASK (third) & + (IPCOP_MASK (IPC_INFO) | IPCOP_MASK (SEM_INFO) | IPCOP_MASK (GETVAL) | + IPCOP_MASK (GETPID) | IPCOP_MASK (GETNCNT) | IPCOP_MASK (GETZCNT) | + IPCOP_MASK (GETALL) | IPCOP_MASK (SETALL) | IPCOP_MASK (IPC_RMID))) { + err = sys_semctl (first, second, third, fourth); + } else if (third & IPC_64) { + struct semid64_ds s; + struct semid64_ds32 *usp = (struct semid64_ds32 *)A(pad); + mm_segment_t old_fs; + int need_back_translation; + + if (third == (IPC_SET|IPC_64)) { + err = get_user (s.sem_perm.uid, &usp->sem_perm.uid); + err |= __get_user (s.sem_perm.gid, &usp->sem_perm.gid); + err |= __get_user (s.sem_perm.mode, &usp->sem_perm.mode); + if (err) + goto out; + fourth.__pad = &s; + } + need_back_translation = + (IPCOP_MASK (third) & + (IPCOP_MASK (SEM_STAT) | IPCOP_MASK (IPC_STAT))) != 0; + if (need_back_translation) + fourth.__pad = &s; + old_fs = get_fs (); + set_fs (KERNEL_DS); + err = sys_semctl (first, second, third, fourth); + set_fs (old_fs); + if (need_back_translation) { + int err2 = put_user (s.sem_perm.key, &usp->sem_perm.key); + err2 |= __put_user (high2lowuid(s.sem_perm.uid), &usp->sem_perm.uid); + err2 |= __put_user (high2lowgid(s.sem_perm.gid), &usp->sem_perm.gid); + err2 |= __put_user (high2lowuid(s.sem_perm.cuid), &usp->sem_perm.cuid); + err2 |= __put_user (high2lowgid(s.sem_perm.cgid), &usp->sem_perm.cgid); + err2 |= __put_user (s.sem_perm.mode, &usp->sem_perm.mode); + err2 |= __put_user (s.sem_perm.seq, &usp->sem_perm.seq); + err2 |= __put_user (s.sem_otime, &usp->sem_otime); + err2 |= __put_user (s.sem_ctime, &usp->sem_ctime); + err2 |= __put_user (s.sem_nsems, &usp->sem_nsems); + if (err2) err = -EFAULT; + } + } else { + struct semid_ds s; + struct semid_ds32 *usp = (struct semid_ds32 *)A(pad); + mm_segment_t old_fs; + int need_back_translation; + + if (third == IPC_SET) { + err = get_user (s.sem_perm.uid, &usp->sem_perm.uid); + err |= __get_user (s.sem_perm.gid, &usp->sem_perm.gid); + err |= __get_user (s.sem_perm.mode, &usp->sem_perm.mode); + if (err) + goto out; + fourth.__pad = &s; + } + need_back_translation = + (IPCOP_MASK (third) & + (IPCOP_MASK (SEM_STAT) | IPCOP_MASK (IPC_STAT))) != 0; + if (need_back_translation) + fourth.__pad = &s; + old_fs = get_fs (); + set_fs (KERNEL_DS); + err = sys_semctl (first, second, third, fourth); + set_fs (old_fs); + if (need_back_translation) { + int err2 = put_user (s.sem_perm.key, &usp->sem_perm.key); + err2 |= __put_user (high2lowuid(s.sem_perm.uid), &usp->sem_perm.uid); + err2 |= __put_user (high2lowgid(s.sem_perm.gid), &usp->sem_perm.gid); + err2 |= __put_user (high2lowuid(s.sem_perm.cuid), &usp->sem_perm.cuid); + err2 |= __put_user (high2lowgid(s.sem_perm.cgid), &usp->sem_perm.cgid); + err2 |= __put_user (s.sem_perm.mode, &usp->sem_perm.mode); + err2 |= __put_user (s.sem_perm.seq, &usp->sem_perm.seq); + err2 |= __put_user (s.sem_otime, &usp->sem_otime); + err2 |= __put_user (s.sem_ctime, &usp->sem_ctime); + err2 |= __put_user (s.sem_nsems, &usp->sem_nsems); + if (err2) err = -EFAULT; + } + } +out: + return err; +} + +static int do_sys32_msgsnd (int first, int second, int third, void *uptr) +{ + struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER); + struct msgbuf32 *up = (struct msgbuf32 *)uptr; + mm_segment_t old_fs; + int err; + + if (!p) + return -ENOMEM; + err = get_user (p->mtype, &up->mtype); + err |= __copy_from_user (p->mtext, &up->mtext, second); + if (err) + goto out; + old_fs = get_fs (); + set_fs (KERNEL_DS); + err = sys_msgsnd (first, p, second, third); + set_fs (old_fs); +out: + kfree (p); + return err; +} + +static int do_sys32_msgrcv (int first, int second, int msgtyp, int third, + int version, void *uptr) +{ + struct msgbuf32 *up; + struct msgbuf *p; + mm_segment_t old_fs; + int err; + + if (!version) { + struct ipc_kludge_32 *uipck = (struct ipc_kludge_32 *)uptr; + struct ipc_kludge_32 ipck; + + err = -EINVAL; + if (!uptr) + goto out; + err = -EFAULT; + if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge_32))) + goto out; + uptr = (void *)A(ipck.msgp); + msgtyp = ipck.msgtyp; + } + err = -ENOMEM; + p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER); + if (!p) + goto out; + old_fs = get_fs (); + set_fs (KERNEL_DS); + err = sys_msgrcv (first, p, second + 4, msgtyp, third); + set_fs (old_fs); + if (err < 0) + goto free_then_out; + up = (struct msgbuf32 *)uptr; + if (put_user (p->mtype, &up->mtype) || + __copy_to_user (&up->mtext, p->mtext, err)) + err = -EFAULT; +free_then_out: + kfree (p); +out: + return err; +} + +static int do_sys32_msgctl (int first, int second, void *uptr) +{ + int err; + + if (IPCOP_MASK (second) & + (IPCOP_MASK (IPC_INFO) | IPCOP_MASK (MSG_INFO) | + IPCOP_MASK (IPC_RMID))) { + err = sys_msgctl (first, second, (struct msqid_ds *)uptr); + } else if (second & IPC_64) { + struct msqid64_ds m; + struct msqid64_ds32 *up = (struct msqid64_ds32 *)uptr; + mm_segment_t old_fs; + + if (second == (IPC_SET|IPC_64)) { + err = get_user (m.msg_perm.uid, &up->msg_perm.uid); + err |= __get_user (m.msg_perm.gid, &up->msg_perm.gid); + err |= __get_user (m.msg_perm.mode, &up->msg_perm.mode); + err |= __get_user (m.msg_qbytes, &up->msg_qbytes); + if (err) + goto out; + } + old_fs = get_fs (); + set_fs (KERNEL_DS); + err = sys_msgctl (first, second, (struct msqid_ds *)&m); + set_fs (old_fs); + if (IPCOP_MASK (second) & + (IPCOP_MASK (MSG_STAT) | IPCOP_MASK (IPC_STAT))) { + int err2 = put_user (m.msg_perm.key, &up->msg_perm.key); + err2 |= __put_user (high2lowuid(m.msg_perm.uid), &up->msg_perm.uid); + err2 |= __put_user (high2lowgid(m.msg_perm.gid), &up->msg_perm.gid); + err2 |= __put_user (high2lowuid(m.msg_perm.cuid), &up->msg_perm.cuid); + err2 |= __put_user (high2lowgid(m.msg_perm.cgid), &up->msg_perm.cgid); + err2 |= __put_user (m.msg_perm.mode, &up->msg_perm.mode); + err2 |= __put_user (m.msg_perm.seq, &up->msg_perm.seq); + err2 |= __put_user (m.msg_stime, &up->msg_stime); + err2 |= __put_user (m.msg_rtime, &up->msg_rtime); + err2 |= __put_user (m.msg_ctime, &up->msg_ctime); + err2 |= __put_user (m.msg_cbytes, &up->msg_cbytes); + err2 |= __put_user (m.msg_qnum, &up->msg_qnum); + err2 |= __put_user (m.msg_qbytes, &up->msg_qbytes); + err2 |= __put_user (m.msg_lspid, &up->msg_lspid); + err2 |= __put_user (m.msg_lrpid, &up->msg_lrpid); + if (err2) + err = -EFAULT; + } + } else { + struct msqid_ds m; + struct msqid_ds32 *up = (struct msqid_ds32 *)uptr; + mm_segment_t old_fs; + + if (second == IPC_SET) { + err = get_user (m.msg_perm.uid, &up->msg_perm.uid); + err |= __get_user (m.msg_perm.gid, &up->msg_perm.gid); + err |= __get_user (m.msg_perm.mode, &up->msg_perm.mode); + err |= __get_user (m.msg_qbytes, &up->msg_qbytes); + if (err) + goto out; + } + old_fs = get_fs (); + set_fs (KERNEL_DS); + err = sys_msgctl (first, second, &m); + set_fs (old_fs); + if (IPCOP_MASK (second) & + (IPCOP_MASK (MSG_STAT) | IPCOP_MASK (IPC_STAT))) { + int err2 = put_user (m.msg_perm.key, &up->msg_perm.key); + err2 |= __put_user (high2lowuid(m.msg_perm.uid), &up->msg_perm.uid); + err2 |= __put_user (high2lowgid(m.msg_perm.gid), &up->msg_perm.gid); + err2 |= __put_user (high2lowuid(m.msg_perm.cuid), &up->msg_perm.cuid); + err2 |= __put_user (high2lowgid(m.msg_perm.cgid), &up->msg_perm.cgid); + err2 |= __put_user (m.msg_perm.mode, &up->msg_perm.mode); + err2 |= __put_user (m.msg_perm.seq, &up->msg_perm.seq); + err2 |= __put_user (m.msg_stime, &up->msg_stime); + err2 |= __put_user (m.msg_rtime, &up->msg_rtime); + err2 |= __put_user (m.msg_ctime, &up->msg_ctime); + err2 |= __put_user (m.msg_cbytes, &up->msg_cbytes); + err2 |= __put_user (m.msg_qnum, &up->msg_qnum); + err2 |= __put_user (m.msg_qbytes, &up->msg_qbytes); + err2 |= __put_user (m.msg_lspid, &up->msg_lspid); + err2 |= __put_user (m.msg_lrpid, &up->msg_lrpid); + if (err2) + err = -EFAULT; + } + } + +out: + return err; +} + +static int do_sys32_shmat (int first, int second, int third, int version, void *uptr) +{ + unsigned long raddr; + u32 *uaddr = (u32 *)A((u32)third); + int err = -EINVAL; + + if (version == 1) + goto out; + err = sys_shmat (first, uptr, second, &raddr); + if (err) + goto out; + err = put_user (raddr, uaddr); +out: + return err; +} + +static int do_sys32_shmctl (int first, int second, void *uptr) +{ + int err; + + if (IPCOP_MASK (second) & + (IPCOP_MASK (IPC_INFO) | IPCOP_MASK (SHM_LOCK) | IPCOP_MASK (SHM_UNLOCK) | + IPCOP_MASK (IPC_RMID))) { + if (second == (IPC_INFO|IPC_64)) + second = IPC_INFO; /* So that we don't have to translate it */ + err = sys_shmctl (first, second, (struct shmid_ds *)uptr); + } else if ((second & IPC_64) && second != (SHM_INFO|IPC_64)) { + struct shmid64_ds s; + struct shmid64_ds32 *up = (struct shmid64_ds32 *)uptr; + mm_segment_t old_fs; + + if (second == (IPC_SET|IPC_64)) { + err = get_user (s.shm_perm.uid, &up->shm_perm.uid); + err |= __get_user (s.shm_perm.gid, &up->shm_perm.gid); + err |= __get_user (s.shm_perm.mode, &up->shm_perm.mode); + if (err) + goto out; + } + old_fs = get_fs (); + set_fs (KERNEL_DS); + err = sys_shmctl (first, second, (struct shmid_ds *)&s); + set_fs (old_fs); + if (err < 0) + goto out; + + /* Mask it even in this case so it becomes a CSE. */ + if (IPCOP_MASK (second) & + (IPCOP_MASK (SHM_STAT) | IPCOP_MASK (IPC_STAT))) { + int err2 = put_user (s.shm_perm.key, &up->shm_perm.key); + err2 |= __put_user (high2lowuid(s.shm_perm.uid), &up->shm_perm.uid); + err2 |= __put_user (high2lowgid(s.shm_perm.gid), &up->shm_perm.gid); + err2 |= __put_user (high2lowuid(s.shm_perm.cuid), &up->shm_perm.cuid); + err2 |= __put_user (high2lowgid(s.shm_perm.cgid), &up->shm_perm.cgid); + err2 |= __put_user (s.shm_perm.mode, &up->shm_perm.mode); + err2 |= __put_user (s.shm_perm.seq, &up->shm_perm.seq); + err2 |= __put_user (s.shm_atime, &up->shm_atime); + err2 |= __put_user (s.shm_dtime, &up->shm_dtime); + err2 |= __put_user (s.shm_ctime, &up->shm_ctime); + err2 |= __put_user (s.shm_segsz, &up->shm_segsz); + err2 |= __put_user (s.shm_nattch, &up->shm_nattch); + err2 |= __put_user (s.shm_cpid, &up->shm_cpid); + err2 |= __put_user (s.shm_lpid, &up->shm_lpid); + if (err2) + err = -EFAULT; + } + } else { + struct shmid_ds s; + struct shmid_ds32 *up = (struct shmid_ds32 *)uptr; + mm_segment_t old_fs; + + second &= ~IPC_64; + if (second == IPC_SET) { + err = get_user (s.shm_perm.uid, &up->shm_perm.uid); + err |= __get_user (s.shm_perm.gid, &up->shm_perm.gid); + err |= __get_user (s.shm_perm.mode, &up->shm_perm.mode); + if (err) + goto out; + } + old_fs = get_fs (); + set_fs (KERNEL_DS); + err = sys_shmctl (first, second, &s); + set_fs (old_fs); + if (err < 0) + goto out; + + /* Mask it even in this case so it becomes a CSE. */ + if (second == SHM_INFO) { + struct shm_info32 { + int used_ids; + u32 shm_tot, shm_rss, shm_swp; + u32 swap_attempts, swap_successes; + } *uip = (struct shm_info32 *)uptr; + struct shm_info *kp = (struct shm_info *)&s; + int err2 = put_user (kp->used_ids, &uip->used_ids); + err2 |= __put_user (kp->shm_tot, &uip->shm_tot); + err2 |= __put_user (kp->shm_rss, &uip->shm_rss); + err2 |= __put_user (kp->shm_swp, &uip->shm_swp); + err2 |= __put_user (kp->swap_attempts, &uip->swap_attempts); + err2 |= __put_user (kp->swap_successes, &uip->swap_successes); + if (err2) + err = -EFAULT; + } else if (IPCOP_MASK (second) & + (IPCOP_MASK (SHM_STAT) | IPCOP_MASK (IPC_STAT))) { + int err2 = put_user (s.shm_perm.key, &up->shm_perm.key); + err2 |= __put_user (high2lowuid(s.shm_perm.uid), &up->shm_perm.uid); + err2 |= __put_user (high2lowgid(s.shm_perm.gid), &up->shm_perm.gid); + err2 |= __put_user (high2lowuid(s.shm_perm.cuid), &up->shm_perm.cuid); + err2 |= __put_user (high2lowgid(s.shm_perm.cgid), &up->shm_perm.cgid); + err2 |= __put_user (s.shm_perm.mode, &up->shm_perm.mode); + err2 |= __put_user (s.shm_perm.seq, &up->shm_perm.seq); + err2 |= __put_user (s.shm_atime, &up->shm_atime); + err2 |= __put_user (s.shm_dtime, &up->shm_dtime); + err2 |= __put_user (s.shm_ctime, &up->shm_ctime); + err2 |= __put_user (s.shm_segsz, &up->shm_segsz); + err2 |= __put_user (s.shm_nattch, &up->shm_nattch); + err2 |= __put_user (s.shm_cpid, &up->shm_cpid); + err2 |= __put_user (s.shm_lpid, &up->shm_lpid); + if (err2) + err = -EFAULT; + } + } +out: + return err; +} + +asmlinkage int sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) +{ + int version, err; + + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + if(version) + return -EINVAL; + + if (call <= SEMCTL) + switch (call) { + case SEMOP: + /* struct sembuf is the same on 32 and 64bit :)) */ + err = sys_semop (first, (struct sembuf *)AA(ptr), second); + goto out; + case SEMGET: + err = sys_semget (first, second, third); + goto out; + case SEMCTL: + err = do_sys32_semctl (first, second, third, (void *)AA(ptr)); + goto out; + default: + err = -EINVAL; + goto out; + }; + if (call <= MSGCTL) + switch (call) { + case MSGSND: + err = do_sys32_msgsnd (first, second, third, (void *)AA(ptr)); + goto out; + case MSGRCV: + err = do_sys32_msgrcv (first, second, 0, third, + version, (void *)AA(ptr)); + goto out; + case MSGGET: + err = sys_msgget ((key_t) first, second); + goto out; + case MSGCTL: + err = do_sys32_msgctl (first, second, (void *)AA(ptr)); + goto out; + default: + err = -EINVAL; + goto out; + } + if (call <= SHMCTL) + switch (call) { + case SHMAT: + err = do_sys32_shmat (first, second, third, + version, (void *)AA(ptr)); + goto out; + case SHMDT: + err = sys_shmdt ((char *)AA(ptr)); + goto out; + case SHMGET: + err = sys_shmget (first, second, third); + goto out; + case SHMCTL: + err = do_sys32_shmctl (first, second, (void *)AA(ptr)); + goto out; + default: + err = -EINVAL; + goto out; + } + + err = -EINVAL; + +out: + return err; +} + +static inline int get_flock(struct flock *kfl, struct flock32 *ufl) +{ + int err; + + err = get_user(kfl->l_type, &ufl->l_type); + err |= __get_user(kfl->l_whence, &ufl->l_whence); + err |= __get_user(kfl->l_start, &ufl->l_start); + err |= __get_user(kfl->l_len, &ufl->l_len); + err |= __get_user(kfl->l_pid, &ufl->l_pid); + return err; +} + +static inline int put_flock(struct flock *kfl, struct flock32 *ufl) +{ + int err; + + err = __put_user(kfl->l_type, &ufl->l_type); + err |= __put_user(kfl->l_whence, &ufl->l_whence); + err |= __put_user(kfl->l_start, &ufl->l_start); + err |= __put_user(kfl->l_len, &ufl->l_len); + err |= __put_user(kfl->l_pid, &ufl->l_pid); + return err; +} + +extern asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); + +asmlinkage long sys32_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case F_GETLK: + case F_SETLK: + case F_SETLKW: + { + struct flock f; + mm_segment_t old_fs; + long ret; + + if(get_flock(&f, (struct flock32 *)arg)) + return -EFAULT; + old_fs = get_fs(); set_fs (KERNEL_DS); + ret = sys_fcntl(fd, cmd, (unsigned long)&f); + set_fs (old_fs); + if (ret) return ret; + if (f.l_start >= 0x7fffffffUL || + f.l_len >= 0x7fffffffUL || + f.l_start + f.l_len >= 0x7fffffffUL) + return -EOVERFLOW; + if(put_flock(&f, (struct flock32 *)arg)) + return -EFAULT; + return 0; + } + default: + return sys_fcntl(fd, cmd, (unsigned long)arg); + } +} + +asmlinkage long sys32_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + if (cmd >= F_GETLK64 && cmd <= F_SETLKW64) + return sys_fcntl(fd, cmd + F_GETLK - F_GETLK64, arg); + return sys32_fcntl(fd, cmd, arg); +} + +struct dqblk32 { + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u32 dqb_curblocks; + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; + +extern asmlinkage int sys_quotactl(int cmd, const char *special, int id, caddr_t addr); + +asmlinkage int sys32_quotactl(int cmd, const char *special, int id, unsigned long addr) +{ + int cmds = cmd >> SUBCMDSHIFT; + int err; + struct mem_dqblk d; + mm_segment_t old_fs; + char *spec; + + switch (cmds) { + case Q_GETQUOTA: + break; + case Q_SETQUOTA: + case Q_SETUSE: + case Q_SETQLIM: + if (copy_from_user (&d, (struct dqblk32 *)addr, + sizeof (struct dqblk32))) + return -EFAULT; + d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime; + d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime; + break; + default: + return sys_quotactl(cmd, special, + id, (caddr_t)addr); + } + spec = getname (special); + err = PTR_ERR(spec); + if (IS_ERR(spec)) return err; + old_fs = get_fs (); + set_fs (KERNEL_DS); + err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d); + set_fs (old_fs); + putname (spec); + if (cmds == Q_GETQUOTA) { + __kernel_time_t b = d.dqb_btime, i = d.dqb_itime; + ((struct dqblk32 *)&d)->dqb_itime = i; + ((struct dqblk32 *)&d)->dqb_btime = b; + if (copy_to_user ((struct dqblk32 *)addr, &d, + sizeof (struct dqblk32))) + return -EFAULT; + } + return err; +} + +static inline int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf) +{ + int err; + + err = put_user (kbuf->f_type, &ubuf->f_type); + err |= __put_user (kbuf->f_bsize, &ubuf->f_bsize); + err |= __put_user (kbuf->f_blocks, &ubuf->f_blocks); + err |= __put_user (kbuf->f_bfree, &ubuf->f_bfree); + err |= __put_user (kbuf->f_bavail, &ubuf->f_bavail); + err |= __put_user (kbuf->f_files, &ubuf->f_files); + err |= __put_user (kbuf->f_ffree, &ubuf->f_ffree); + err |= __put_user (kbuf->f_namelen, &ubuf->f_namelen); + err |= __put_user (kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]); + err |= __put_user (kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]); + return err; +} + +extern asmlinkage int sys_statfs(const char * path, struct statfs * buf); + +asmlinkage int sys32_statfs(const char * path, struct statfs32 *buf) +{ + int ret; + struct statfs s; + mm_segment_t old_fs = get_fs(); + char *pth; + + pth = getname (path); + ret = PTR_ERR(pth); + if (!IS_ERR(pth)) { + set_fs (KERNEL_DS); + ret = sys_statfs((const char *)pth, &s); + set_fs (old_fs); + putname (pth); + if (put_statfs(buf, &s)) + return -EFAULT; + } + return ret; +} + +extern asmlinkage int sys_fstatfs(unsigned int fd, struct statfs * buf); + +asmlinkage int sys32_fstatfs(unsigned int fd, struct statfs32 *buf) +{ + int ret; + struct statfs s; + mm_segment_t old_fs = get_fs(); + + set_fs (KERNEL_DS); + ret = sys_fstatfs(fd, &s); + set_fs (old_fs); + if (put_statfs(buf, &s)) + return -EFAULT; + return ret; +} + +extern asmlinkage long sys_truncate(const char * path, unsigned long length); +extern asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length); + +asmlinkage int sys32_truncate64(const char * path, unsigned long high, unsigned long low) +{ + if ((int)high < 0) + return -EINVAL; + else + return sys_truncate(path, (high << 32) | low); +} + +asmlinkage int sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low) +{ + if ((int)high < 0) + return -EINVAL; + else + return sys_ftruncate(fd, (high << 32) | low); +} + +extern asmlinkage int sys_utime(char * filename, struct utimbuf * times); + +struct utimbuf32 { + __kernel_time_t32 actime, modtime; +}; + +asmlinkage int sys32_utime(char * filename, struct utimbuf32 *times) +{ + struct utimbuf t; + mm_segment_t old_fs; + int ret; + char *filenam; + + if (!times) + return sys_utime(filename, NULL); + if (get_user (t.actime, ×->actime) || + __get_user (t.modtime, ×->modtime)) + return -EFAULT; + filenam = getname (filename); + ret = PTR_ERR(filenam); + if (!IS_ERR(filenam)) { + old_fs = get_fs(); + set_fs (KERNEL_DS); + ret = sys_utime(filenam, &t); + set_fs (old_fs); + putname (filenam); + } + return ret; +} + +struct iovec32 { u32 iov_base; __kernel_size_t32 iov_len; }; + +typedef ssize_t (*io_fn_t)(struct file *, char *, size_t, loff_t *); +typedef ssize_t (*iov_fn_t)(struct file *, const struct iovec *, unsigned long, loff_t *); + +static long do_readv_writev32(int type, struct file *file, + const struct iovec32 *vector, u32 count) +{ + unsigned long tot_len; + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov=iovstack, *ivp; + struct inode *inode; + long retval, i; + io_fn_t fn; + iov_fn_t fnv; + + /* First get the "struct iovec" from user memory and + * verify all the pointers + */ + if (!count) + return 0; + if (verify_area(VERIFY_READ, vector, sizeof(struct iovec32)*count)) + return -EFAULT; + if (count > UIO_MAXIOV) + return -EINVAL; + if (count > UIO_FASTIOV) { + iov = kmalloc(count*sizeof(struct iovec), GFP_KERNEL); + if (!iov) + return -ENOMEM; + } + + tot_len = 0; + i = count; + ivp = iov; + while(i > 0) { + u32 len; + u32 buf; + + __get_user(len, &vector->iov_len); + __get_user(buf, &vector->iov_base); + tot_len += len; + ivp->iov_base = (void *)A(buf); + ivp->iov_len = (__kernel_size_t) len; + vector++; + ivp++; + i--; + } + + inode = file->f_dentry->d_inode; + /* VERIFY_WRITE actually means a read, as we write to user space */ + retval = locks_verify_area((type == VERIFY_WRITE + ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE), + inode, file, file->f_pos, tot_len); + if (retval) + goto out; + + /* VERIFY_WRITE actually means a read, as we write to user space */ + fnv = (type == VERIFY_WRITE ? file->f_op->readv : file->f_op->writev); + if (fnv) { + retval = fnv(file, iov, count, &file->f_pos); + goto out; + } + + fn = (type == VERIFY_WRITE ? file->f_op->read : + (io_fn_t) file->f_op->write); + + ivp = iov; + while (count > 0) { + void * base; + int len, nr; + + base = ivp->iov_base; + len = ivp->iov_len; + ivp++; + count--; + nr = fn(file, base, len, &file->f_pos); + if (nr < 0) { + if (!retval) + retval = nr; + break; + } + retval += nr; + if (nr != len) + break; + } +out: + if (iov != iovstack) + kfree(iov); + + return retval; +} + +asmlinkage long sys32_readv(int fd, struct iovec32 *vector, u32 count) +{ + struct file *file; + long ret = -EBADF; + + file = fget(fd); + if(!file) + goto bad_file; + + if (file->f_op && (file->f_mode & FMODE_READ) && + (file->f_op->readv || file->f_op->read)) + ret = do_readv_writev32(VERIFY_WRITE, file, vector, count); + fput(file); + +bad_file: + return ret; +} + +asmlinkage long sys32_writev(int fd, struct iovec32 *vector, u32 count) +{ + struct file *file; + int ret = -EBADF; + + file = fget(fd); + if(!file) + goto bad_file; + if (file->f_op && (file->f_mode & FMODE_WRITE) && + (file->f_op->writev || file->f_op->write)) + ret = do_readv_writev32(VERIFY_READ, file, vector, count); + fput(file); + +bad_file: + return ret; +} + +/* readdir & getdents */ + +#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de))) +#define ROUND_UP(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1)) + +struct old_linux_dirent32 { + u32 d_ino; + u32 d_offset; + unsigned short d_namlen; + char d_name[1]; +}; + +struct readdir_callback32 { + struct old_linux_dirent32 * dirent; + int count; +}; + +static int fillonedir(void * __buf, const char * name, int namlen, + off_t offset, ino_t ino, unsigned int d_type) +{ + struct readdir_callback32 * buf = (struct readdir_callback32 *) __buf; + struct old_linux_dirent32 * dirent; + + if (buf->count) + return -EINVAL; + buf->count++; + dirent = buf->dirent; + put_user(ino, &dirent->d_ino); + put_user(offset, &dirent->d_offset); + put_user(namlen, &dirent->d_namlen); + copy_to_user(dirent->d_name, name, namlen); + put_user(0, dirent->d_name + namlen); + return 0; +} + +asmlinkage int old32_readdir(unsigned int fd, struct old_linux_dirent32 *dirent, unsigned int count) +{ + int error = -EBADF; + struct file * file; + struct readdir_callback32 buf; + + file = fget(fd); + if (!file) + goto out; + + buf.count = 0; + buf.dirent = dirent; + + error = vfs_readdir(file, fillonedir, &buf); + if (error < 0) + goto out_putf; + error = buf.count; + +out_putf: + fput(file); +out: + return error; +} + +struct linux_dirent32 { + u32 d_ino; + u32 d_off; + unsigned short d_reclen; + char d_name[1]; +}; + +struct getdents_callback32 { + struct linux_dirent32 * current_dir; + struct linux_dirent32 * previous; + int count; + int error; +}; + +static int filldir(void * __buf, const char * name, int namlen, off_t offset, ino_t ino, + unsigned int d_type) +{ + struct linux_dirent32 * dirent; + struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf; + int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1); + + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; + dirent = buf->previous; + if (dirent) + put_user(offset, &dirent->d_off); + dirent = buf->current_dir; + buf->previous = dirent; + put_user(ino, &dirent->d_ino); + put_user(reclen, &dirent->d_reclen); + copy_to_user(dirent->d_name, name, namlen); + put_user(0, dirent->d_name + namlen); + ((char *) dirent) += reclen; + buf->current_dir = dirent; + buf->count -= reclen; + return 0; +} + +asmlinkage int sys32_getdents(unsigned int fd, struct linux_dirent32 *dirent, unsigned int count) +{ + struct file * file; + struct linux_dirent32 * lastdirent; + struct getdents_callback32 buf; + int error = -EBADF; + + file = fget(fd); + if (!file) + goto out; + + buf.current_dir = dirent; + buf.previous = NULL; + buf.count = count; + buf.error = 0; + + error = vfs_readdir(file, filldir, &buf); + if (error < 0) + goto out_putf; + lastdirent = buf.previous; + error = buf.error; + if(lastdirent) { + put_user(file->f_pos, &lastdirent->d_off); + error = count - buf.count; + } +out_putf: + fput(file); +out: + return error; +} + +/* end of readdir & getdents */ + +/* + * Ooo, nasty. We need here to frob 32-bit unsigned longs to + * 64-bit unsigned longs. + */ + +static inline int +get_fd_set32(unsigned long n, unsigned long *fdset, u32 *ufdset) +{ + if (ufdset) { + unsigned long odd; + + if (verify_area(VERIFY_WRITE, ufdset, n*sizeof(u32))) + return -EFAULT; + + odd = n & 1UL; + n &= ~1UL; + while (n) { + unsigned long h, l; + __get_user(l, ufdset); + __get_user(h, ufdset+1); + ufdset += 2; + *fdset++ = h << 32 | l; + n -= 2; + } + if (odd) + __get_user(*fdset, ufdset); + } else { + /* Tricky, must clear full unsigned long in the + * kernel fdset at the end, this makes sure that + * actually happens. + */ + memset(fdset, 0, ((n + 1) & ~1)*sizeof(u32)); + } + return 0; +} + +static inline void +set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset) +{ + unsigned long odd; + + if (!ufdset) + return; + + odd = n & 1UL; + n &= ~1UL; + while (n) { + unsigned long h, l; + l = *fdset++; + h = l >> 32; + __put_user(l, ufdset); + __put_user(h, ufdset+1); + ufdset += 2; + n -= 2; + } + if (odd) + __put_user(*fdset, ufdset); +} + +#define MAX_SELECT_SECONDS \ + ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) + +asmlinkage int sys32_select(int n, u32 *inp, u32 *outp, u32 *exp, u32 tvp_x) +{ + fd_set_bits fds; + struct timeval32 *tvp = (struct timeval32 *)AA(tvp_x); + char *bits; + unsigned long nn; + long timeout; + int ret, size; + + timeout = MAX_SCHEDULE_TIMEOUT; + if (tvp) { + time_t sec, usec; + + if ((ret = verify_area(VERIFY_READ, tvp, sizeof(*tvp))) + || (ret = __get_user(sec, &tvp->tv_sec)) + || (ret = __get_user(usec, &tvp->tv_usec))) + goto out_nofds; + + ret = -EINVAL; + if(sec < 0 || usec < 0) + goto out_nofds; + + if ((unsigned long) sec < MAX_SELECT_SECONDS) { + timeout = (usec + 1000000/HZ - 1) / (1000000/HZ); + timeout += sec * (unsigned long) HZ; + } + } + + ret = -EINVAL; + if (n < 0) + goto out_nofds; + if (n > current->files->max_fdset) + n = current->files->max_fdset; + + /* + * We need 6 bitmaps (in/out/ex for both incoming and outgoing), + * since we used fdset we need to allocate memory in units of + * long-words. + */ + ret = -ENOMEM; + size = FDS_BYTES(n); + bits = kmalloc(6 * size, GFP_KERNEL); + if (!bits) + goto out_nofds; + fds.in = (unsigned long *) bits; + fds.out = (unsigned long *) (bits + size); + fds.ex = (unsigned long *) (bits + 2*size); + fds.res_in = (unsigned long *) (bits + 3*size); + fds.res_out = (unsigned long *) (bits + 4*size); + fds.res_ex = (unsigned long *) (bits + 5*size); + + nn = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32)); + if ((ret = get_fd_set32(nn, fds.in, inp)) || + (ret = get_fd_set32(nn, fds.out, outp)) || + (ret = get_fd_set32(nn, fds.ex, exp))) + goto out; + zero_fd_set(n, fds.res_in); + zero_fd_set(n, fds.res_out); + zero_fd_set(n, fds.res_ex); + + ret = do_select(n, &fds, &timeout); + + if (tvp && !(current->personality & STICKY_TIMEOUTS)) { + time_t sec = 0, usec = 0; + if (timeout) { + sec = timeout / HZ; + usec = timeout % HZ; + usec *= (1000000/HZ); + } + put_user(sec, &tvp->tv_sec); + put_user(usec, &tvp->tv_usec); + } + + if (ret < 0) + goto out; + if (!ret) { + ret = -ERESTARTNOHAND; + if (signal_pending(current)) + goto out; + ret = 0; + } + + set_fd_set32(nn, inp, fds.res_in); + set_fd_set32(nn, outp, fds.res_out); + set_fd_set32(nn, exp, fds.res_ex); + +out: + kfree(bits); +out_nofds: + return ret; +} + +static int cp_new_stat32(struct inode *inode, struct stat32 *statbuf) +{ + unsigned long ino, blksize, blocks; + kdev_t dev, rdev; + umode_t mode; + nlink_t nlink; + uid_t uid; + gid_t gid; + off_t size; + time_t atime, mtime, ctime; + int err; + + /* Stream the loads of inode data into the load buffer, + * then we push it all into the store buffer below. This + * should give optimal cache performance. + */ + ino = inode->i_ino; + dev = inode->i_dev; + mode = inode->i_mode; + nlink = inode->i_nlink; + uid = inode->i_uid; + gid = inode->i_gid; + rdev = inode->i_rdev; + size = inode->i_size; + atime = inode->i_atime; + mtime = inode->i_mtime; + ctime = inode->i_ctime; + blksize = inode->i_blksize; + blocks = inode->i_blocks; + + err = put_user(kdev_t_to_nr(dev), &statbuf->st_dev); + err |= put_user(ino, &statbuf->st_ino); + err |= put_user(mode, &statbuf->st_mode); + err |= put_user(nlink, &statbuf->st_nlink); + err |= put_user(high2lowuid(uid), &statbuf->st_uid); + err |= put_user(high2lowgid(gid), &statbuf->st_gid); + err |= put_user(kdev_t_to_nr(rdev), &statbuf->st_rdev); + err |= put_user(size, &statbuf->st_size); + err |= put_user(atime, &statbuf->st_atime); + err |= put_user(0, &statbuf->__unused1); + err |= put_user(mtime, &statbuf->st_mtime); + err |= put_user(0, &statbuf->__unused2); + err |= put_user(ctime, &statbuf->st_ctime); + err |= put_user(0, &statbuf->__unused3); + if (blksize) { + err |= put_user(blksize, &statbuf->st_blksize); + err |= put_user(blocks, &statbuf->st_blocks); + } else { + unsigned int tmp_blocks; + +#define D_B 7 +#define I_B (BLOCK_SIZE / sizeof(unsigned short)) + tmp_blocks = (size + BLOCK_SIZE - 1) / BLOCK_SIZE; + if (tmp_blocks > D_B) { + unsigned int indirect; + + indirect = (tmp_blocks - D_B + I_B - 1) / I_B; + tmp_blocks += indirect; + if (indirect > 1) { + indirect = (indirect - 1 + I_B - 1) / I_B; + tmp_blocks += indirect; + if (indirect > 1) + tmp_blocks++; + } + } + err |= put_user(BLOCK_SIZE, &statbuf->st_blksize); + err |= put_user((BLOCK_SIZE / 512) * tmp_blocks, &statbuf->st_blocks); +#undef D_B +#undef I_B + } +/* fixme + err |= put_user(0, &statbuf->__unused4[0]); + err |= put_user(0, &statbuf->__unused4[1]); +*/ + + return err; +} + +/* Perhaps this belongs in fs.h or similar. -DaveM */ +static __inline__ int +do_revalidate(struct dentry *dentry) +{ + struct inode * inode = dentry->d_inode; + if (inode->i_op && inode->i_op->revalidate) + return inode->i_op->revalidate(dentry); + return 0; +} + +asmlinkage int sys32_newstat(char * filename, struct stat32 *statbuf) +{ + struct nameidata nd; + int error; + + error = user_path_walk(filename, &nd); + if (!error) { + error = do_revalidate(nd.dentry); + if (!error) + error = cp_new_stat32(nd.dentry->d_inode, statbuf); + path_release(&nd); + } + return error; +} + +asmlinkage int sys32_newlstat(char * filename, struct stat32 *statbuf) +{ + struct nameidata nd; + int error; + + error = user_path_walk_link(filename, &nd); + if (!error) { + error = do_revalidate(nd.dentry); + if (!error) + error = cp_new_stat32(nd.dentry->d_inode, statbuf); + + path_release(&nd); + } + return error; +} + +asmlinkage int sys32_newfstat(unsigned int fd, struct stat32 *statbuf) +{ + struct file *f; + int err = -EBADF; + + f = fget(fd); + if (f) { + struct dentry * dentry = f->f_dentry; + + err = do_revalidate(dentry); + if (!err) + err = cp_new_stat32(dentry->d_inode, statbuf); + fput(f); + } + return err; +} + +extern asmlinkage int sys_sysfs(int option, unsigned long arg1, unsigned long arg2); + +asmlinkage int sys32_sysfs(int option, u32 arg1, u32 arg2) +{ + return sys_sysfs(option, arg1, arg2); +} + +struct ncp_mount_data32 { + int version; + unsigned int ncp_fd; + __kernel_uid_t32 mounted_uid; + __kernel_pid_t32 wdog_pid; + unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; + unsigned int time_out; + unsigned int retry_count; + unsigned int flags; + __kernel_uid_t32 uid; + __kernel_gid_t32 gid; + __kernel_mode_t32 file_mode; + __kernel_mode_t32 dir_mode; +}; + +static void *do_ncp_super_data_conv(void *raw_data) +{ + struct ncp_mount_data *n = (struct ncp_mount_data *)raw_data; + struct ncp_mount_data32 *n32 = (struct ncp_mount_data32 *)raw_data; + + n->dir_mode = n32->dir_mode; + n->file_mode = n32->file_mode; + n->gid = low2highgid(n32->gid); + n->uid = low2highuid(n32->uid); + memmove (n->mounted_vol, n32->mounted_vol, (sizeof (n32->mounted_vol) + 3 * sizeof (unsigned int))); + n->wdog_pid = n32->wdog_pid; + n->mounted_uid = low2highuid(n32->mounted_uid); + return raw_data; +} + +struct smb_mount_data32 { + int version; + __kernel_uid_t32 mounted_uid; + __kernel_uid_t32 uid; + __kernel_gid_t32 gid; + __kernel_mode_t32 file_mode; + __kernel_mode_t32 dir_mode; +}; + +static void *do_smb_super_data_conv(void *raw_data) +{ + struct smb_mount_data *s = (struct smb_mount_data *)raw_data; + struct smb_mount_data32 *s32 = (struct smb_mount_data32 *)raw_data; + + s->version = s32->version; + s->mounted_uid = low2highuid(s32->mounted_uid); + s->uid = low2highuid(s32->uid); + s->gid = low2highgid(s32->gid); + s->file_mode = s32->file_mode; + s->dir_mode = s32->dir_mode; + return raw_data; +} + +static int copy_mount_stuff_to_kernel(const void *user, unsigned long *kernel) +{ + int i; + unsigned long page; + struct vm_area_struct *vma; + + *kernel = 0; + if(!user) + return 0; + vma = find_vma(current->mm, (unsigned long)user); + if(!vma || (unsigned long)user < vma->vm_start) + return -EFAULT; + if(!(vma->vm_flags & VM_READ)) + return -EFAULT; + i = vma->vm_end - (unsigned long) user; + if(PAGE_SIZE <= (unsigned long) i) + i = PAGE_SIZE - 1; + if(!(page = __get_free_page(GFP_KERNEL))) + return -ENOMEM; + if(copy_from_user((void *) page, user, i)) { + free_page(page); + return -EFAULT; + } + *kernel = page; + return 0; +} + +#define SMBFS_NAME "smbfs" +#define NCPFS_NAME "ncpfs" + +asmlinkage int sys32_mount(char *dev_name, char *dir_name, char *type, unsigned long new_flags, u32 data) +{ + unsigned long type_page = 0; + unsigned long data_page = 0; + unsigned long dev_page = 0; + unsigned long dir_page = 0; + int err, is_smb, is_ncp; + + is_smb = is_ncp = 0; + + err = copy_mount_stuff_to_kernel((const void *)type, &type_page); + if (err) + goto out; + + if (!type_page) { + err = -EINVAL; + goto out; + } + + is_smb = !strcmp((char *)type_page, SMBFS_NAME); + is_ncp = !strcmp((char *)type_page, NCPFS_NAME); + + err = copy_mount_stuff_to_kernel((const void *)AA(data), &data_page); + if (err) + goto type_out; + + err = copy_mount_stuff_to_kernel(dev_name, &dev_page); + if (err) + goto data_out; + + err = copy_mount_stuff_to_kernel(dir_name, &dir_page); + if (err) + goto dev_out; + + if (!is_smb && !is_ncp) { + lock_kernel(); + err = do_mount((char*)dev_page, (char*)dir_page, + (char*)type_page, new_flags, (char*)data_page); + unlock_kernel(); + } else { + if (is_ncp) + do_ncp_super_data_conv((void *)data_page); + else + do_smb_super_data_conv((void *)data_page); + + lock_kernel(); + err = do_mount((char*)dev_page, (char*)dir_page, + (char*)type_page, new_flags, (char*)data_page); + unlock_kernel(); + } + free_page(dir_page); + +dev_out: + free_page(dev_page); + +data_out: + free_page(data_page); + +type_out: + free_page(type_page); + +out: + return err; +} + +struct rusage32 { + struct timeval32 ru_utime; + struct timeval32 ru_stime; + s32 ru_maxrss; + s32 ru_ixrss; + s32 ru_idrss; + s32 ru_isrss; + s32 ru_minflt; + s32 ru_majflt; + s32 ru_nswap; + s32 ru_inblock; + s32 ru_oublock; + s32 ru_msgsnd; + s32 ru_msgrcv; + s32 ru_nsignals; + s32 ru_nvcsw; + s32 ru_nivcsw; +}; + +static int put_rusage (struct rusage32 *ru, struct rusage *r) +{ + int err; + + err = put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec); + err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec); + err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec); + err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec); + err |= __put_user (r->ru_maxrss, &ru->ru_maxrss); + err |= __put_user (r->ru_ixrss, &ru->ru_ixrss); + err |= __put_user (r->ru_idrss, &ru->ru_idrss); + err |= __put_user (r->ru_isrss, &ru->ru_isrss); + err |= __put_user (r->ru_minflt, &ru->ru_minflt); + err |= __put_user (r->ru_majflt, &ru->ru_majflt); + err |= __put_user (r->ru_nswap, &ru->ru_nswap); + err |= __put_user (r->ru_inblock, &ru->ru_inblock); + err |= __put_user (r->ru_oublock, &ru->ru_oublock); + err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd); + err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv); + err |= __put_user (r->ru_nsignals, &ru->ru_nsignals); + err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw); + err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw); + return err; +} + +asmlinkage int sys32_wait4(__kernel_pid_t32 pid, unsigned int *stat_addr, int options, struct rusage32 *ru) +{ + if (!ru) + return sys_wait4(pid, stat_addr, options, NULL); + else { + struct rusage r; + int ret; + unsigned int status; + mm_segment_t old_fs = get_fs(); + + set_fs (KERNEL_DS); + ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r); + set_fs (old_fs); + if (put_rusage (ru, &r)) return -EFAULT; + if (stat_addr && put_user (status, stat_addr)) + return -EFAULT; + return ret; + } +} + +struct sysinfo32 { + s32 uptime; + u32 loads[3]; + u32 totalram; + u32 freeram; + u32 sharedram; + u32 bufferram; + u32 totalswap; + u32 freeswap; + unsigned short procs; + char _f[22]; +}; + +extern asmlinkage int sys_sysinfo(struct sysinfo *info); + +asmlinkage int sys32_sysinfo(struct sysinfo32 *info) +{ + struct sysinfo s; + int ret, err; + mm_segment_t old_fs = get_fs (); + + set_fs (KERNEL_DS); + ret = sys_sysinfo(&s); + set_fs (old_fs); + err = put_user (s.uptime, &info->uptime); + err |= __put_user (s.loads[0], &info->loads[0]); + err |= __put_user (s.loads[1], &info->loads[1]); + err |= __put_user (s.loads[2], &info->loads[2]); + err |= __put_user (s.totalram, &info->totalram); + err |= __put_user (s.freeram, &info->freeram); + err |= __put_user (s.sharedram, &info->sharedram); + err |= __put_user (s.bufferram, &info->bufferram); + err |= __put_user (s.totalswap, &info->totalswap); + err |= __put_user (s.freeswap, &info->freeswap); + err |= __put_user (s.procs, &info->procs); + if (err) + return -EFAULT; + return ret; +} + +struct timespec32 { + s32 tv_sec; + s32 tv_nsec; +}; + +extern asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval); + +asmlinkage int sys32_sched_rr_get_interval(__kernel_pid_t32 pid, struct timespec32 *interval) +{ + struct timespec t; + int ret; + mm_segment_t old_fs = get_fs (); + + set_fs (KERNEL_DS); + ret = sys_sched_rr_get_interval(pid, &t); + set_fs (old_fs); + if (put_user (t.tv_sec, &interval->tv_sec) || + __put_user (t.tv_nsec, &interval->tv_nsec)) + return -EFAULT; + return ret; +} + +extern asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp); + +asmlinkage int sys32_nanosleep(struct timespec32 *rqtp, struct timespec32 *rmtp) +{ + struct timespec t; + int ret; + mm_segment_t old_fs = get_fs (); + + if (get_user (t.tv_sec, &rqtp->tv_sec) || + __get_user (t.tv_nsec, &rqtp->tv_nsec)) + return -EFAULT; + set_fs (KERNEL_DS); + ret = sys_nanosleep(&t, rmtp ? &t : NULL); + set_fs (old_fs); + if (rmtp && ret == -EINTR) { + if (__put_user (t.tv_sec, &rmtp->tv_sec) || + __put_user (t.tv_nsec, &rmtp->tv_nsec)) + return -EFAULT; + } + return ret; +} + +extern asmlinkage int sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset); + +asmlinkage int sys32_sigprocmask(int how, old_sigset_t32 *set, old_sigset_t32 *oset) +{ + old_sigset_t s; + int ret; + mm_segment_t old_fs = get_fs(); + + if (set && get_user (s, set)) return -EFAULT; + set_fs (KERNEL_DS); + ret = sys_sigprocmask(how, set ? &s : NULL, oset ? &s : NULL); + set_fs (old_fs); + if (ret) return ret; + if (oset && put_user (s, oset)) return -EFAULT; + return 0; +} + +extern asmlinkage int sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize); + +asmlinkage int sys32_rt_sigprocmask(int how, sigset_t32 *set, sigset_t32 *oset, __kernel_size_t32 sigsetsize) +{ + sigset_t s; + sigset_t32 s32; + int ret; + mm_segment_t old_fs = get_fs(); + + if (set) { + if (copy_from_user (&s32, set, sizeof(sigset_t32))) + return -EFAULT; + switch (_NSIG_WORDS) { + case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); + case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); + case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); + case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); + } + } + set_fs (KERNEL_DS); + ret = sys_rt_sigprocmask(how, set ? &s : NULL, oset ? &s : NULL, sigsetsize); + set_fs (old_fs); + if (ret) return ret; + if (oset) { + switch (_NSIG_WORDS) { + case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; + case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; + case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; + case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; + } + if (copy_to_user (oset, &s32, sizeof(sigset_t32))) + return -EFAULT; + } + return 0; +} + +extern asmlinkage int sys_sigpending(old_sigset_t *set); + +asmlinkage int sys32_sigpending(old_sigset_t32 *set) +{ + old_sigset_t s; + int ret; + mm_segment_t old_fs = get_fs(); + + set_fs (KERNEL_DS); + ret = sys_sigpending(&s); + set_fs (old_fs); + if (put_user (s, set)) return -EFAULT; + return ret; +} + +extern asmlinkage int sys_rt_sigpending(sigset_t *set, size_t sigsetsize); + +asmlinkage int sys32_rt_sigpending(sigset_t32 *set, __kernel_size_t32 sigsetsize) +{ + sigset_t s; + sigset_t32 s32; + int ret; + mm_segment_t old_fs = get_fs(); + + set_fs (KERNEL_DS); + ret = sys_rt_sigpending(&s, sigsetsize); + set_fs (old_fs); + if (!ret) { + switch (_NSIG_WORDS) { + case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; + case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; + case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; + case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; + } + if (copy_to_user (set, &s32, sizeof(sigset_t32))) + return -EFAULT; + } + return ret; +} + +extern int +copy_siginfo_to_user32(siginfo_t32 *to, siginfo_t *from); + +asmlinkage int +sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo, + struct timespec32 *uts, __kernel_size_t32 sigsetsize) +{ + int ret, sig; + sigset_t these; + sigset_t32 these32; + struct timespec ts; + siginfo_t info; + long timeout = 0; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (copy_from_user (&these32, uthese, sizeof(sigset_t32))) + return -EFAULT; + + switch (_NSIG_WORDS) { + case 4: these.sig[3] = these32.sig[6] | (((long)these32.sig[7]) << 32); + case 3: these.sig[2] = these32.sig[4] | (((long)these32.sig[5]) << 32); + case 2: these.sig[1] = these32.sig[2] | (((long)these32.sig[3]) << 32); + case 1: these.sig[0] = these32.sig[0] | (((long)these32.sig[1]) << 32); + } + + /* + * Invert the set of allowed signals to get those we + * want to block. + */ + sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); + signotset(&these); + + if (uts) { + if (get_user (ts.tv_sec, &uts->tv_sec) || + get_user (ts.tv_nsec, &uts->tv_nsec)) + return -EINVAL; + if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 + || ts.tv_sec < 0) + return -EINVAL; + } + + spin_lock_irq(¤t->sigmask_lock); + sig = dequeue_signal(&these, &info); + if (!sig) { + /* None ready -- temporarily unblock those we're interested + in so that we'll be awakened when they arrive. */ + sigset_t oldblocked = current->blocked; + sigandsets(¤t->blocked, ¤t->blocked, &these); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + timeout = MAX_SCHEDULE_TIMEOUT; + if (uts) + timeout = (timespec_to_jiffies(&ts) + + (ts.tv_sec || ts.tv_nsec)); + + current->state = TASK_INTERRUPTIBLE; + timeout = schedule_timeout(timeout); + + spin_lock_irq(¤t->sigmask_lock); + sig = dequeue_signal(&these, &info); + current->blocked = oldblocked; + recalc_sigpending(current); + } + spin_unlock_irq(¤t->sigmask_lock); + + if (sig) { + ret = sig; + if (uinfo) { + if (copy_siginfo_to_user32(uinfo, &info)) + ret = -EFAULT; + } + } else { + ret = -EAGAIN; + if (timeout) + ret = -EINTR; + } + + return ret; +} + +extern asmlinkage int +sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo); + +asmlinkage int +sys32_rt_sigqueueinfo(int pid, int sig, siginfo_t32 *uinfo) +{ + siginfo_t info; + int ret; + mm_segment_t old_fs = get_fs(); + + if (copy_from_user (&info, uinfo, 3*sizeof(int)) || + copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE)) + return -EFAULT; + set_fs (KERNEL_DS); + ret = sys_rt_sigqueueinfo(pid, sig, &info); + set_fs (old_fs); + return ret; +} + +struct tms32 { + __kernel_clock_t32 tms_utime; + __kernel_clock_t32 tms_stime; + __kernel_clock_t32 tms_cutime; + __kernel_clock_t32 tms_cstime; +}; + +extern asmlinkage long sys_times(struct tms * tbuf); + +asmlinkage long sys32_times(struct tms32 *tbuf) +{ + struct tms t; + long ret; + mm_segment_t old_fs = get_fs (); + int err; + + set_fs (KERNEL_DS); + ret = sys_times(tbuf ? &t : NULL); + set_fs (old_fs); + if (tbuf) { + err = put_user (t.tms_utime, &tbuf->tms_utime); + err |= __put_user (t.tms_stime, &tbuf->tms_stime); + err |= __put_user (t.tms_cutime, &tbuf->tms_cutime); + err |= __put_user (t.tms_cstime, &tbuf->tms_cstime); + if (err) + ret = -EFAULT; + } + return ret; +} + +#define RLIM_INFINITY32 0x7fffffff +#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) + +struct rlimit32 { + u32 rlim_cur; + u32 rlim_max; +}; + +extern asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit *rlim); + +asmlinkage int sys32_old_getrlimit(unsigned int resource, struct rlimit32 *rlim) +{ + struct rlimit r; + int ret; + mm_segment_t old_fs = get_fs (); + + set_fs (KERNEL_DS); + ret = sys_old_getrlimit(resource, &r); + set_fs (old_fs); + if (!ret) { + ret = put_user (RESOURCE32(r.rlim_cur), &rlim->rlim_cur); + ret |= __put_user (RESOURCE32(r.rlim_max), &rlim->rlim_max); + } + return ret; +} + +extern asmlinkage int sys_setrlimit(unsigned int resource, struct rlimit *rlim); + +asmlinkage int sys32_setrlimit(unsigned int resource, struct rlimit32 *rlim) +{ + struct rlimit r; + int ret; + mm_segment_t old_fs = get_fs (); + + if (resource >= RLIM_NLIMITS) return -EINVAL; + if (get_user (r.rlim_cur, &rlim->rlim_cur) || + __get_user (r.rlim_max, &rlim->rlim_max)) + return -EFAULT; + if (r.rlim_cur == RLIM_INFINITY32) + r.rlim_cur = RLIM_INFINITY; + if (r.rlim_max == RLIM_INFINITY32) + r.rlim_max = RLIM_INFINITY; + set_fs (KERNEL_DS); + ret = sys_setrlimit(resource, &r); + set_fs (old_fs); + return ret; +} + +extern asmlinkage int sys_getrusage(int who, struct rusage *ru); + +asmlinkage int sys32_getrusage(int who, struct rusage32 *ru) +{ + struct rusage r; + int ret; + mm_segment_t old_fs = get_fs(); + + set_fs (KERNEL_DS); + ret = sys_getrusage(who, &r); + set_fs (old_fs); + if (put_rusage (ru, &r)) return -EFAULT; + return ret; +} + +/* XXX This really belongs in some header file... -DaveM */ +#define MAX_SOCK_ADDR 128 /* 108 for Unix domain - + 16 for IP, 16 for IPX, + 24 for IPv6, + about 80 for AX.25 */ + +extern struct socket *sockfd_lookup(int fd, int *err); + +/* XXX This as well... */ +extern __inline__ void sockfd_put(struct socket *sock) +{ + fput(sock->file); +} + +struct msghdr32 { + u32 msg_name; + int msg_namelen; + u32 msg_iov; + __kernel_size_t32 msg_iovlen; + u32 msg_control; + __kernel_size_t32 msg_controllen; + unsigned msg_flags; +}; + +struct cmsghdr32 { + __kernel_size_t32 cmsg_len; + int cmsg_level; + int cmsg_type; +}; + +/* Bleech... */ +#define __CMSG32_NXTHDR(ctl, len, cmsg, cmsglen) __cmsg32_nxthdr((ctl),(len),(cmsg),(cmsglen)) +#define CMSG32_NXTHDR(mhdr, cmsg, cmsglen) cmsg32_nxthdr((mhdr), (cmsg), (cmsglen)) + +#define CMSG32_ALIGN(len) ( ((len)+sizeof(int)-1) & ~(sizeof(int)-1) ) + +#define CMSG32_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG32_ALIGN(sizeof(struct cmsghdr32)))) +#define CMSG32_SPACE(len) (CMSG32_ALIGN(sizeof(struct cmsghdr32)) + CMSG32_ALIGN(len)) +#define CMSG32_LEN(len) (CMSG32_ALIGN(sizeof(struct cmsghdr32)) + (len)) + +#define __CMSG32_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr32) ? \ + (struct cmsghdr32 *)(ctl) : \ + (struct cmsghdr32 *)NULL) +#define CMSG32_FIRSTHDR(msg) __CMSG32_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen) + +__inline__ struct cmsghdr32 *__cmsg32_nxthdr(void *__ctl, __kernel_size_t __size, + struct cmsghdr32 *__cmsg, int __cmsg_len) +{ + struct cmsghdr32 * __ptr; + + __ptr = (struct cmsghdr32 *)(((unsigned char *) __cmsg) + + CMSG32_ALIGN(__cmsg_len)); + if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size) + return NULL; + + return __ptr; +} + +__inline__ struct cmsghdr32 *cmsg32_nxthdr (struct msghdr *__msg, + struct cmsghdr32 *__cmsg, + int __cmsg_len) +{ + return __cmsg32_nxthdr(__msg->msg_control, __msg->msg_controllen, + __cmsg, __cmsg_len); +} + +static inline int iov_from_user32_to_kern(struct iovec *kiov, + struct iovec32 *uiov32, + int niov) +{ + int tot_len = 0; + + while(niov > 0) { + u32 len, buf; + + if(get_user(len, &uiov32->iov_len) || + get_user(buf, &uiov32->iov_base)) { + tot_len = -EFAULT; + break; + } + tot_len += len; + kiov->iov_base = (void *)A(buf); + kiov->iov_len = (__kernel_size_t) len; + uiov32++; + kiov++; + niov--; + } + return tot_len; +} + +static inline int msghdr_from_user32_to_kern(struct msghdr *kmsg, + struct msghdr32 *umsg) +{ + u32 tmp1, tmp2, tmp3; + int err; + + err = get_user(tmp1, &umsg->msg_name); + err |= __get_user(tmp2, &umsg->msg_iov); + err |= __get_user(tmp3, &umsg->msg_control); + if (err) + return -EFAULT; + + kmsg->msg_name = (void *)A(tmp1); + kmsg->msg_iov = (struct iovec *)A(tmp2); + kmsg->msg_control = (void *)A(tmp3); + + err = get_user(kmsg->msg_namelen, &umsg->msg_namelen); + err |= get_user(kmsg->msg_iovlen, &umsg->msg_iovlen); + err |= get_user(kmsg->msg_controllen, &umsg->msg_controllen); + err |= get_user(kmsg->msg_flags, &umsg->msg_flags); + + return err; +} + +/* I've named the args so it is easy to tell whose space the pointers are in. */ +static int verify_iovec32(struct msghdr *kern_msg, struct iovec *kern_iov, + char *kern_address, int mode) +{ + int tot_len; + + if(kern_msg->msg_namelen) { + if(mode==VERIFY_READ) { + int err = move_addr_to_kernel(kern_msg->msg_name, + kern_msg->msg_namelen, + kern_address); + if(err < 0) + return err; + } + kern_msg->msg_name = kern_address; + } else + kern_msg->msg_name = NULL; + + if(kern_msg->msg_iovlen > UIO_FASTIOV) { + kern_iov = kmalloc(kern_msg->msg_iovlen * sizeof(struct iovec), + GFP_KERNEL); + if(!kern_iov) + return -ENOMEM; + } + + tot_len = iov_from_user32_to_kern(kern_iov, + (struct iovec32 *)kern_msg->msg_iov, + kern_msg->msg_iovlen); + if(tot_len >= 0) + kern_msg->msg_iov = kern_iov; + else if(kern_msg->msg_iovlen > UIO_FASTIOV) + kfree(kern_iov); + + return tot_len; +} + +/* There is a lot of hair here because the alignment rules (and + * thus placement) of cmsg headers and length are different for + * 32-bit apps. -DaveM + */ +static int cmsghdr_from_user32_to_kern(struct msghdr *kmsg, + unsigned char *stackbuf, int stackbuf_size) +{ + struct cmsghdr32 *ucmsg; + struct cmsghdr *kcmsg, *kcmsg_base; + __kernel_size_t32 ucmlen; + __kernel_size_t kcmlen, tmp; + + kcmlen = 0; + kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf; + ucmsg = CMSG32_FIRSTHDR(kmsg); + while(ucmsg != NULL) { + if(get_user(ucmlen, &ucmsg->cmsg_len)) + return -EFAULT; + + /* Catch bogons. */ + if(CMSG32_ALIGN(ucmlen) < + CMSG32_ALIGN(sizeof(struct cmsghdr32))) + return -EINVAL; + if((unsigned long)(((char *)ucmsg - (char *)kmsg->msg_control) + + ucmlen) > kmsg->msg_controllen) + return -EINVAL; + + tmp = ((ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))) + + CMSG_ALIGN(sizeof(struct cmsghdr))); + kcmlen += tmp; + ucmsg = CMSG32_NXTHDR(kmsg, ucmsg, ucmlen); + } + if(kcmlen == 0) + return -EINVAL; + + /* The kcmlen holds the 64-bit version of the control length. + * It may not be modified as we do not stick it into the kmsg + * until we have successfully copied over all of the data + * from the user. + */ + if(kcmlen > stackbuf_size) + kcmsg_base = kcmsg = kmalloc(kcmlen, GFP_KERNEL); + if(kcmsg == NULL) + return -ENOBUFS; + + /* Now copy them over neatly. */ + memset(kcmsg, 0, kcmlen); + ucmsg = CMSG32_FIRSTHDR(kmsg); + while(ucmsg != NULL) { + __get_user(ucmlen, &ucmsg->cmsg_len); + tmp = ((ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))) + + CMSG_ALIGN(sizeof(struct cmsghdr))); + kcmsg->cmsg_len = tmp; + __get_user(kcmsg->cmsg_level, &ucmsg->cmsg_level); + __get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type); + + /* Copy over the data. */ + if(copy_from_user(CMSG_DATA(kcmsg), + CMSG32_DATA(ucmsg), + (ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))))) + goto out_free_efault; + + /* Advance. */ + kcmsg = (struct cmsghdr *)((char *)kcmsg + CMSG_ALIGN(tmp)); + ucmsg = CMSG32_NXTHDR(kmsg, ucmsg, ucmlen); + } + + /* Ok, looks like we made it. Hook it up and return success. */ + kmsg->msg_control = kcmsg_base; + kmsg->msg_controllen = kcmlen; + return 0; + +out_free_efault: + if(kcmsg_base != (struct cmsghdr *)stackbuf) + kfree(kcmsg_base); + return -EFAULT; +} + +static void put_cmsg32(struct msghdr *kmsg, int level, int type, + int len, void *data) +{ + struct cmsghdr32 *cm = (struct cmsghdr32 *) kmsg->msg_control; + struct cmsghdr32 cmhdr; + int cmlen = CMSG32_LEN(len); + + if(cm == NULL || kmsg->msg_controllen < sizeof(*cm)) { + kmsg->msg_flags |= MSG_CTRUNC; + return; + } + + if(kmsg->msg_controllen < cmlen) { + kmsg->msg_flags |= MSG_CTRUNC; + cmlen = kmsg->msg_controllen; + } + cmhdr.cmsg_level = level; + cmhdr.cmsg_type = type; + cmhdr.cmsg_len = cmlen; + + if(copy_to_user(cm, &cmhdr, sizeof cmhdr)) + return; + if(copy_to_user(CMSG32_DATA(cm), data, cmlen - sizeof(struct cmsghdr32))) + return; + cmlen = CMSG32_SPACE(len); + kmsg->msg_control += cmlen; + kmsg->msg_controllen -= cmlen; +} + +static void scm_detach_fds32(struct msghdr *kmsg, struct scm_cookie *scm) +{ + struct cmsghdr32 *cm = (struct cmsghdr32 *) kmsg->msg_control; + int fdmax = (kmsg->msg_controllen - sizeof(struct cmsghdr32)) / sizeof(int); + int fdnum = scm->fp->count; + struct file **fp = scm->fp->fp; + int *cmfptr; + int err = 0, i; + + if (fdnum < fdmax) + fdmax = fdnum; + + for (i = 0, cmfptr = (int *) CMSG32_DATA(cm); i < fdmax; i++, cmfptr++) { + int new_fd; + err = get_unused_fd(); + if (err < 0) + break; + new_fd = err; + err = put_user(new_fd, cmfptr); + if (err) { + put_unused_fd(new_fd); + break; + } + /* Bump the usage count and install the file. */ + get_file(fp[i]); + fd_install(new_fd, fp[i]); + } + + if (i > 0) { + int cmlen = CMSG32_LEN(i * sizeof(int)); + if (!err) + err = put_user(SOL_SOCKET, &cm->cmsg_level); + if (!err) + err = put_user(SCM_RIGHTS, &cm->cmsg_type); + if (!err) + err = put_user(cmlen, &cm->cmsg_len); + if (!err) { + cmlen = CMSG32_SPACE(i * sizeof(int)); + kmsg->msg_control += cmlen; + kmsg->msg_controllen -= cmlen; + } + } + if (i < fdnum) + kmsg->msg_flags |= MSG_CTRUNC; + + /* + * All of the files that fit in the message have had their + * usage counts incremented, so we just free the list. + */ + __scm_destroy(scm); +} + +/* In these cases we (currently) can just copy to data over verbatim + * because all CMSGs created by the kernel have well defined types which + * have the same layout in both the 32-bit and 64-bit API. One must add + * some special cased conversions here if we start sending control messages + * with incompatible types. + * + * SCM_RIGHTS and SCM_CREDENTIALS are done by hand in recvmsg32 right after + * we do our work. The remaining cases are: + * + * SOL_IP IP_PKTINFO struct in_pktinfo 32-bit clean + * IP_TTL int 32-bit clean + * IP_TOS __u8 32-bit clean + * IP_RECVOPTS variable length 32-bit clean + * IP_RETOPTS variable length 32-bit clean + * (these last two are clean because the types are defined + * by the IPv4 protocol) + * IP_RECVERR struct sock_extended_err + + * struct sockaddr_in 32-bit clean + * SOL_IPV6 IPV6_RECVERR struct sock_extended_err + + * struct sockaddr_in6 32-bit clean + * IPV6_PKTINFO struct in6_pktinfo 32-bit clean + * IPV6_HOPLIMIT int 32-bit clean + * IPV6_FLOWINFO u32 32-bit clean + * IPV6_HOPOPTS ipv6 hop exthdr 32-bit clean + * IPV6_DSTOPTS ipv6 dst exthdr(s) 32-bit clean + * IPV6_RTHDR ipv6 routing exthdr 32-bit clean + * IPV6_AUTHHDR ipv6 auth exthdr 32-bit clean + */ +static void cmsg32_recvmsg_fixup(struct msghdr *kmsg, unsigned long orig_cmsg_uptr) +{ + unsigned char *workbuf, *wp; + unsigned long bufsz, space_avail; + struct cmsghdr *ucmsg; + + bufsz = ((unsigned long)kmsg->msg_control) - orig_cmsg_uptr; + space_avail = kmsg->msg_controllen + bufsz; + wp = workbuf = kmalloc(bufsz, GFP_KERNEL); + if(workbuf == NULL) + goto fail; + + /* To make this more sane we assume the kernel sends back properly + * formatted control messages. Because of how the kernel will truncate + * the cmsg_len for MSG_TRUNC cases, we need not check that case either. + */ + ucmsg = (struct cmsghdr *) orig_cmsg_uptr; + while(((unsigned long)ucmsg) <= + (((unsigned long)kmsg->msg_control) - sizeof(struct cmsghdr))) { + struct cmsghdr32 *kcmsg32 = (struct cmsghdr32 *) wp; + int clen64, clen32; + + /* UCMSG is the 64-bit format CMSG entry in user-space. + * KCMSG32 is within the kernel space temporary buffer + * we use to convert into a 32-bit style CMSG. + */ + __get_user(kcmsg32->cmsg_len, &ucmsg->cmsg_len); + __get_user(kcmsg32->cmsg_level, &ucmsg->cmsg_level); + __get_user(kcmsg32->cmsg_type, &ucmsg->cmsg_type); + + clen64 = kcmsg32->cmsg_len; + copy_from_user(CMSG32_DATA(kcmsg32), CMSG_DATA(ucmsg), + clen64 - CMSG_ALIGN(sizeof(*ucmsg))); + clen32 = ((clen64 - CMSG_ALIGN(sizeof(*ucmsg))) + + CMSG32_ALIGN(sizeof(struct cmsghdr32))); + kcmsg32->cmsg_len = clen32; + + ucmsg = (struct cmsghdr *) (((char *)ucmsg) + CMSG_ALIGN(clen64)); + wp = (((char *)kcmsg32) + CMSG32_ALIGN(clen32)); + } + + /* Copy back fixed up data, and adjust pointers. */ + bufsz = (wp - workbuf); + copy_to_user((void *)orig_cmsg_uptr, workbuf, bufsz); + + kmsg->msg_control = (struct cmsghdr *) + (((char *)orig_cmsg_uptr) + bufsz); + kmsg->msg_controllen = space_avail - bufsz; + + kfree(workbuf); + return; + +fail: + /* If we leave the 64-bit format CMSG chunks in there, + * the application could get confused and crash. So to + * ensure greater recovery, we report no CMSGs. + */ + kmsg->msg_controllen += bufsz; + kmsg->msg_control = (void *) orig_cmsg_uptr; +} + +asmlinkage int sys32_sendmsg(int fd, struct msghdr32 *user_msg, unsigned user_flags) +{ + struct socket *sock; + char address[MAX_SOCK_ADDR]; + struct iovec iov[UIO_FASTIOV]; + unsigned char ctl[sizeof(struct cmsghdr) + 20]; + unsigned char *ctl_buf = ctl; + struct msghdr kern_msg; + int err, total_len; + + if(msghdr_from_user32_to_kern(&kern_msg, user_msg)) + return -EFAULT; + if(kern_msg.msg_iovlen > UIO_MAXIOV) + return -EINVAL; + err = verify_iovec32(&kern_msg, iov, address, VERIFY_READ); + if (err < 0) + goto out; + total_len = err; + + if(kern_msg.msg_controllen) { + err = cmsghdr_from_user32_to_kern(&kern_msg, ctl, sizeof(ctl)); + if(err) + goto out_freeiov; + ctl_buf = kern_msg.msg_control; + } + kern_msg.msg_flags = user_flags; + + sock = sockfd_lookup(fd, &err); + if (sock != NULL) { + if (sock->file->f_flags & O_NONBLOCK) + kern_msg.msg_flags |= MSG_DONTWAIT; + err = sock_sendmsg(sock, &kern_msg, total_len); + sockfd_put(sock); + } + + /* N.B. Use kfree here, as kern_msg.msg_controllen might change? */ + if(ctl_buf != ctl) + kfree(ctl_buf); +out_freeiov: + if(kern_msg.msg_iov != iov) + kfree(kern_msg.msg_iov); +out: + return err; +} + +asmlinkage int sys32_recvmsg(int fd, struct msghdr32 *user_msg, unsigned int user_flags) +{ + struct iovec iovstack[UIO_FASTIOV]; + struct msghdr kern_msg; + char addr[MAX_SOCK_ADDR]; + struct socket *sock; + struct iovec *iov = iovstack; + struct sockaddr *uaddr; + int *uaddr_len; + unsigned long cmsg_ptr; + int err, total_len, len = 0; + + if(msghdr_from_user32_to_kern(&kern_msg, user_msg)) + return -EFAULT; + if(kern_msg.msg_iovlen > UIO_MAXIOV) + return -EINVAL; + + uaddr = kern_msg.msg_name; + uaddr_len = &user_msg->msg_namelen; + err = verify_iovec32(&kern_msg, iov, addr, VERIFY_WRITE); + if (err < 0) + goto out; + total_len = err; + + cmsg_ptr = (unsigned long) kern_msg.msg_control; + kern_msg.msg_flags = 0; + + sock = sockfd_lookup(fd, &err); + if (sock != NULL) { + struct scm_cookie scm; + + if (sock->file->f_flags & O_NONBLOCK) + user_flags |= MSG_DONTWAIT; + memset(&scm, 0, sizeof(scm)); + err = sock->ops->recvmsg(sock, &kern_msg, total_len, + user_flags, &scm); + if(err >= 0) { + len = err; + if(!kern_msg.msg_control) { + if(sock->passcred || scm.fp) + kern_msg.msg_flags |= MSG_CTRUNC; + if(scm.fp) + __scm_destroy(&scm); + } else { + /* If recvmsg processing itself placed some + * control messages into user space, it's is + * using 64-bit CMSG processing, so we need + * to fix it up before we tack on more stuff. + */ + if((unsigned long) kern_msg.msg_control != cmsg_ptr) + cmsg32_recvmsg_fixup(&kern_msg, cmsg_ptr); + + /* Wheee... */ + if(sock->passcred) + put_cmsg32(&kern_msg, + SOL_SOCKET, SCM_CREDENTIALS, + sizeof(scm.creds), &scm.creds); + if(scm.fp != NULL) + scm_detach_fds32(&kern_msg, &scm); + } + } + sockfd_put(sock); + } + + if(uaddr != NULL && err >= 0) + err = move_addr_to_user(addr, kern_msg.msg_namelen, uaddr, uaddr_len); + if(cmsg_ptr != 0 && err >= 0) { + unsigned long ucmsg_ptr = ((unsigned long)kern_msg.msg_control); + __kernel_size_t32 uclen = (__kernel_size_t32) (ucmsg_ptr - cmsg_ptr); + err |= __put_user(uclen, &user_msg->msg_controllen); + } + if(err >= 0) + err = __put_user(kern_msg.msg_flags, &user_msg->msg_flags); + if(kern_msg.msg_iov != iov) + kfree(kern_msg.msg_iov); +out: + if(err < 0) + return err; + return len; +} + +extern asmlinkage int sys_setsockopt(int fd, int level, int optname, + char *optval, int optlen); + +static int do_set_attach_filter(int fd, int level, int optname, + char *optval, int optlen) +{ + struct sock_fprog32 { + __u16 len; + __u32 filter; + } *fprog32 = (struct sock_fprog32 *)optval; + struct sock_fprog kfprog; + struct sock_filter *kfilter; + unsigned int fsize; + mm_segment_t old_fs; + __u32 uptr; + int ret; + + if (get_user(kfprog.len, &fprog32->len) || + __get_user(uptr, &fprog32->filter)) + return -EFAULT; + + kfprog.filter = (struct sock_filter *)A(uptr); + fsize = kfprog.len * sizeof(struct sock_filter); + + kfilter = (struct sock_filter *)kmalloc(fsize, GFP_KERNEL); + if (kfilter == NULL) + return -ENOMEM; + + if (copy_from_user(kfilter, kfprog.filter, fsize)) { + kfree(kfilter); + return -EFAULT; + } + + kfprog.filter = kfilter; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = sys_setsockopt(fd, level, optname, + (char *)&kfprog, sizeof(kfprog)); + set_fs(old_fs); + + kfree(kfilter); + + return ret; +} + +static int do_set_icmpv6_filter(int fd, int level, int optname, + char *optval, int optlen) +{ + struct icmp6_filter kfilter; + mm_segment_t old_fs; + int ret, i; + + if (copy_from_user(&kfilter, optval, sizeof(kfilter))) + return -EFAULT; + + + for (i = 0; i < 8; i += 2) { + u32 tmp = kfilter.data[i]; + + kfilter.data[i] = kfilter.data[i + 1]; + kfilter.data[i + 1] = tmp; + } + + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = sys_setsockopt(fd, level, optname, + (char *) &kfilter, sizeof(kfilter)); + set_fs(old_fs); + + return ret; +} + +asmlinkage int sys32_setsockopt(int fd, int level, int optname, + char *optval, int optlen) +{ + if (optname == SO_ATTACH_FILTER) + return do_set_attach_filter(fd, level, optname, + optval, optlen); + if (level == SOL_ICMPV6 && optname == ICMPV6_FILTER) + return do_set_icmpv6_filter(fd, level, optname, + optval, optlen); + + return sys_setsockopt(fd, level, optname, optval, optlen); +} + +extern void check_pending(int signum); + +/* + * count32() counts the number of arguments/envelopes + */ +static int count32(u32 * argv) +{ + int i = 0; + + if (argv != NULL) { + for (;;) { + u32 p; int error; + + error = get_user(p,argv); + if (error) return error; + if (!p) break; + argv++; i++; + } + } + return i; +} + +/* + * 'copy_string32()' copies argument/envelope strings from user + * memory to free pages in kernel mem. These are in a format ready + * to be put directly into the top of new user memory. + */ +static int copy_strings32(int argc, u32 * argv, struct linux_binprm *bprm) +{ + while (argc-- > 0) { + u32 str; + int len; + unsigned long pos; + + if (get_user(str, argv + argc) || + !str || + !(len = strnlen_user((char *)A(str), bprm->p))) + return -EFAULT; + + if (bprm->p < len) + return -E2BIG; + + bprm->p -= len; + + pos = bprm->p; + while (len) { + char *kaddr; + struct page *page; + int offset, bytes_to_copy, new, err; + + offset = pos % PAGE_SIZE; + page = bprm->page[pos / PAGE_SIZE]; + new = 0; + if (!page) { + page = alloc_page(GFP_USER); + bprm->page[pos / PAGE_SIZE] = page; + if (!page) + return -ENOMEM; + new = 1; + } + kaddr = (char *)kmap(page); + + if (new && offset) + memset(kaddr, 0, offset); + bytes_to_copy = PAGE_SIZE - offset; + if (bytes_to_copy > len) { + bytes_to_copy = len; + if (new) + memset(kaddr+offset+len, 0, + PAGE_SIZE-offset-len); + } + + err = copy_from_user(kaddr + offset, (char *)A(str), + bytes_to_copy); + flush_page_to_ram(page); + kunmap((unsigned long)kaddr); + + if (err) + return -EFAULT; + + pos += bytes_to_copy; + str += bytes_to_copy; + len -= bytes_to_copy; + } + } + return 0; +} + +/* + * sys32_execve() executes a new program. + */ +static inline int +do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs) +{ + struct linux_binprm bprm; + struct file * file; + int retval; + int i; + + bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *); + memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0])); + + file = open_exec(filename); + + retval = PTR_ERR(file); + if (IS_ERR(file)) + return retval; + + bprm.file = file; + bprm.filename = filename; + bprm.sh_bang = 0; + bprm.loader = 0; + bprm.exec = 0; + if ((bprm.argc = count32(argv)) < 0) { + allow_write_access(file); + fput(file); + return bprm.argc; + } + if ((bprm.envc = count32(envp)) < 0) { + allow_write_access(file); + fput(file); + return bprm.envc; + } + + retval = prepare_binprm(&bprm); + if (retval < 0) + goto out; + + retval = copy_strings_kernel(1, &bprm.filename, &bprm); + if (retval < 0) + goto out; + + bprm.exec = bprm.p; + retval = copy_strings32(bprm.envc, envp, &bprm); + if (retval < 0) + goto out; + + retval = copy_strings32(bprm.argc, argv, &bprm); + if (retval < 0) + goto out; + + retval = search_binary_handler(&bprm, regs); + if (retval >= 0) + /* execve success */ + return retval; + +out: + /* Something went wrong, return the inode and free the argument pages*/ + allow_write_access(bprm.file); + if (bprm.file) + fput(bprm.file); + + for (i=0 ; iptrace &= ~PT_DTRACE; + current->thread.fp_regs.fpc=0; + __asm__ __volatile__ + ("sr 0,0\n\t" + "sfpc 0,0\n\t" + : : :"0"); + } + putname(filename); +out: + return error; +} + + +#ifdef CONFIG_MODULES + +extern asmlinkage unsigned long sys_create_module(const char *name_user, size_t size); + +asmlinkage unsigned long sys32_create_module(const char *name_user, __kernel_size_t32 size) +{ + return sys_create_module(name_user, (size_t)size); +} + +extern asmlinkage int sys_init_module(const char *name_user, struct module *mod_user); + +/* Hey, when you're trying to init module, take time and prepare us a nice 64bit + * module structure, even if from 32bit modutils... Why to pollute kernel... :)) + */ +asmlinkage int sys32_init_module(const char *name_user, struct module *mod_user) +{ + return sys_init_module(name_user, mod_user); +} + +extern asmlinkage int sys_delete_module(const char *name_user); + +asmlinkage int sys32_delete_module(const char *name_user) +{ + return sys_delete_module(name_user); +} + +struct module_info32 { + u32 addr; + u32 size; + u32 flags; + s32 usecount; +}; + +/* Query various bits about modules. */ + +static inline long +get_mod_name(const char *user_name, char **buf) +{ + unsigned long page; + long retval; + + if ((unsigned long)user_name >= TASK_SIZE + && !segment_eq(get_fs (), KERNEL_DS)) + return -EFAULT; + + page = __get_free_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + retval = strncpy_from_user((char *)page, user_name, PAGE_SIZE); + if (retval > 0) { + if (retval < PAGE_SIZE) { + *buf = (char *)page; + return retval; + } + retval = -ENAMETOOLONG; + } else if (!retval) + retval = -EINVAL; + + free_page(page); + return retval; +} + +static inline void +put_mod_name(char *buf) +{ + free_page((unsigned long)buf); +} + +static __inline__ struct module *find_module(const char *name) +{ + struct module *mod; + + for (mod = module_list; mod ; mod = mod->next) { + if (mod->flags & MOD_DELETED) + continue; + if (!strcmp(mod->name, name)) + break; + } + + return mod; +} + +static int +qm_modules(char *buf, size_t bufsize, __kernel_size_t32 *ret) +{ + struct module *mod; + size_t nmod, space, len; + + nmod = space = 0; + + for (mod = module_list; mod->next != NULL; mod = mod->next, ++nmod) { + len = strlen(mod->name)+1; + if (len > bufsize) + goto calc_space_needed; + if (copy_to_user(buf, mod->name, len)) + return -EFAULT; + buf += len; + bufsize -= len; + space += len; + } + + if (put_user(nmod, ret)) + return -EFAULT; + else + return 0; + +calc_space_needed: + space += len; + while ((mod = mod->next)->next != NULL) + space += strlen(mod->name)+1; + + if (put_user(space, ret)) + return -EFAULT; + else + return -ENOSPC; +} + +static int +qm_deps(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret) +{ + size_t i, space, len; + + if (mod->next == NULL) + return -EINVAL; + if (!MOD_CAN_QUERY(mod)) + return put_user(0, ret); + + space = 0; + for (i = 0; i < mod->ndeps; ++i) { + const char *dep_name = mod->deps[i].dep->name; + + len = strlen(dep_name)+1; + if (len > bufsize) + goto calc_space_needed; + if (copy_to_user(buf, dep_name, len)) + return -EFAULT; + buf += len; + bufsize -= len; + space += len; + } + + return put_user(i, ret); + +calc_space_needed: + space += len; + while (++i < mod->ndeps) + space += strlen(mod->deps[i].dep->name)+1; + + if (put_user(space, ret)) + return -EFAULT; + else + return -ENOSPC; +} + +static int +qm_refs(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret) +{ + size_t nrefs, space, len; + struct module_ref *ref; + + if (mod->next == NULL) + return -EINVAL; + if (!MOD_CAN_QUERY(mod)) + if (put_user(0, ret)) + return -EFAULT; + else + return 0; + + space = 0; + for (nrefs = 0, ref = mod->refs; ref ; ++nrefs, ref = ref->next_ref) { + const char *ref_name = ref->ref->name; + + len = strlen(ref_name)+1; + if (len > bufsize) + goto calc_space_needed; + if (copy_to_user(buf, ref_name, len)) + return -EFAULT; + buf += len; + bufsize -= len; + space += len; + } + + if (put_user(nrefs, ret)) + return -EFAULT; + else + return 0; + +calc_space_needed: + space += len; + while ((ref = ref->next_ref) != NULL) + space += strlen(ref->ref->name)+1; + + if (put_user(space, ret)) + return -EFAULT; + else + return -ENOSPC; +} + +static inline int +qm_symbols(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret) +{ + size_t i, space, len; + struct module_symbol *s; + char *strings; + unsigned *vals; + + if (!MOD_CAN_QUERY(mod)) + if (put_user(0, ret)) + return -EFAULT; + else + return 0; + + space = mod->nsyms * 2*sizeof(u32); + + i = len = 0; + s = mod->syms; + + if (space > bufsize) + goto calc_space_needed; + + if (!access_ok(VERIFY_WRITE, buf, space)) + return -EFAULT; + + bufsize -= space; + vals = (unsigned *)buf; + strings = buf+space; + + for (; i < mod->nsyms ; ++i, ++s, vals += 2) { + len = strlen(s->name)+1; + if (len > bufsize) + goto calc_space_needed; + + if (copy_to_user(strings, s->name, len) + || __put_user(s->value, vals+0) + || __put_user(space, vals+1)) + return -EFAULT; + + strings += len; + bufsize -= len; + space += len; + } + + if (put_user(i, ret)) + return -EFAULT; + else + return 0; + +calc_space_needed: + for (; i < mod->nsyms; ++i, ++s) + space += strlen(s->name)+1; + + if (put_user(space, ret)) + return -EFAULT; + else + return -ENOSPC; +} + +static inline int +qm_info(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret) +{ + int error = 0; + + if (mod->next == NULL) + return -EINVAL; + + if (sizeof(struct module_info32) <= bufsize) { + struct module_info32 info; + info.addr = (unsigned long)mod; + info.size = mod->size; + info.flags = mod->flags; + info.usecount = + ((mod_member_present(mod, can_unload) + && mod->can_unload) + ? -1 : atomic_read(&mod->uc.usecount)); + + if (copy_to_user(buf, &info, sizeof(struct module_info32))) + return -EFAULT; + } else + error = -ENOSPC; + + if (put_user(sizeof(struct module_info32), ret)) + return -EFAULT; + + return error; +} + +asmlinkage int sys32_query_module(char *name_user, int which, char *buf, __kernel_size_t32 bufsize, u32 ret) +{ + struct module *mod; + int err; + + lock_kernel(); + if (name_user == 0) { + /* This finds "kernel_module" which is not exported. */ + for(mod = module_list; mod->next != NULL; mod = mod->next) + ; + } else { + long namelen; + char *name; + + if ((namelen = get_mod_name(name_user, &name)) < 0) { + err = namelen; + goto out; + } + err = -ENOENT; + if (namelen == 0) { + /* This finds "kernel_module" which is not exported. */ + for(mod = module_list; mod->next != NULL; mod = mod->next) + ; + } else if ((mod = find_module(name)) == NULL) { + put_mod_name(name); + goto out; + } + put_mod_name(name); + } + + switch (which) + { + case 0: + err = 0; + break; + case QM_MODULES: + err = qm_modules(buf, bufsize, (__kernel_size_t32 *)AA(ret)); + break; + case QM_DEPS: + err = qm_deps(mod, buf, bufsize, (__kernel_size_t32 *)AA(ret)); + break; + case QM_REFS: + err = qm_refs(mod, buf, bufsize, (__kernel_size_t32 *)AA(ret)); + break; + case QM_SYMBOLS: + err = qm_symbols(mod, buf, bufsize, (__kernel_size_t32 *)AA(ret)); + break; + case QM_INFO: + err = qm_info(mod, buf, bufsize, (__kernel_size_t32 *)AA(ret)); + break; + default: + err = -EINVAL; + break; + } +out: + unlock_kernel(); + return err; +} + +struct kernel_sym32 { + u32 value; + char name[60]; +}; + +extern asmlinkage int sys_get_kernel_syms(struct kernel_sym *table); + +asmlinkage int sys32_get_kernel_syms(struct kernel_sym32 *table) +{ + int len, i; + struct kernel_sym *tbl; + mm_segment_t old_fs; + + len = sys_get_kernel_syms(NULL); + if (!table) return len; + tbl = kmalloc (len * sizeof (struct kernel_sym), GFP_KERNEL); + if (!tbl) return -ENOMEM; + old_fs = get_fs(); + set_fs (KERNEL_DS); + sys_get_kernel_syms(tbl); + set_fs (old_fs); + for (i = 0; i < len; i++, table += sizeof (struct kernel_sym32)) { + if (put_user (tbl[i].value, &table->value) || + copy_to_user (table->name, tbl[i].name, 60)) + break; + } + kfree (tbl); + return i; +} + +#else /* CONFIG_MODULES */ + +asmlinkage unsigned long +sys32_create_module(const char *name_user, size_t size) +{ + return -ENOSYS; +} + +asmlinkage int +sys32_init_module(const char *name_user, struct module *mod_user) +{ + return -ENOSYS; +} + +asmlinkage int +sys32_delete_module(const char *name_user) +{ + return -ENOSYS; +} + +asmlinkage int +sys32_query_module(const char *name_user, int which, char *buf, size_t bufsize, + size_t *ret) +{ + /* Let the program know about the new interface. Not that + it'll do them much good. */ + if (which == 0) + return 0; + + return -ENOSYS; +} + +asmlinkage int +sys32_get_kernel_syms(struct kernel_sym *table) +{ + return -ENOSYS; +} + +#endif /* CONFIG_MODULES */ + +/* Stuff for NFS server syscalls... */ +struct nfsctl_svc32 { + u16 svc32_port; + s32 svc32_nthreads; +}; + +struct nfsctl_client32 { + s8 cl32_ident[NFSCLNT_IDMAX+1]; + s32 cl32_naddr; + struct in_addr cl32_addrlist[NFSCLNT_ADDRMAX]; + s32 cl32_fhkeytype; + s32 cl32_fhkeylen; + u8 cl32_fhkey[NFSCLNT_KEYMAX]; +}; + +struct nfsctl_export32 { + s8 ex32_client[NFSCLNT_IDMAX+1]; + s8 ex32_path[NFS_MAXPATHLEN+1]; + __kernel_dev_t32 ex32_dev; + __kernel_ino_t32 ex32_ino; + s32 ex32_flags; + __kernel_uid_t32 ex32_anon_uid; + __kernel_gid_t32 ex32_anon_gid; +}; + +struct nfsctl_uidmap32 { + u32 ug32_ident; /* char * */ + __kernel_uid_t32 ug32_uidbase; + s32 ug32_uidlen; + u32 ug32_udimap; /* uid_t * */ + __kernel_uid_t32 ug32_gidbase; + s32 ug32_gidlen; + u32 ug32_gdimap; /* gid_t * */ +}; + +struct nfsctl_fhparm32 { + struct sockaddr gf32_addr; + __kernel_dev_t32 gf32_dev; + __kernel_ino_t32 gf32_ino; + s32 gf32_version; +}; + +struct nfsctl_fdparm32 { + struct sockaddr gd32_addr; + s8 gd32_path[NFS_MAXPATHLEN+1]; + s32 gd32_version; +}; + +struct nfsctl_fsparm32 { + struct sockaddr gd32_addr; + s8 gd32_path[NFS_MAXPATHLEN+1]; + s32 gd32_maxlen; +}; + +struct nfsctl_arg32 { + s32 ca32_version; /* safeguard */ + union { + struct nfsctl_svc32 u32_svc; + struct nfsctl_client32 u32_client; + struct nfsctl_export32 u32_export; + struct nfsctl_uidmap32 u32_umap; + struct nfsctl_fhparm32 u32_getfh; + struct nfsctl_fdparm32 u32_getfd; + struct nfsctl_fsparm32 u32_getfs; + } u; +#define ca32_svc u.u32_svc +#define ca32_client u.u32_client +#define ca32_export u.u32_export +#define ca32_umap u.u32_umap +#define ca32_getfh u.u32_getfh +#define ca32_getfd u.u32_getfd +#define ca32_getfs u.u32_getfs +#define ca32_authd u.u32_authd +}; + +union nfsctl_res32 { + __u8 cr32_getfh[NFS_FHSIZE]; + struct knfsd_fh cr32_getfs; +}; + +static int nfs_svc32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) +{ + int err; + + err = __get_user(karg->ca_version, &arg32->ca32_version); + err |= __get_user(karg->ca_svc.svc_port, &arg32->ca32_svc.svc32_port); + err |= __get_user(karg->ca_svc.svc_nthreads, &arg32->ca32_svc.svc32_nthreads); + return err; +} + +static int nfs_clnt32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) +{ + int err; + + err = __get_user(karg->ca_version, &arg32->ca32_version); + err |= copy_from_user(&karg->ca_client.cl_ident[0], + &arg32->ca32_client.cl32_ident[0], + NFSCLNT_IDMAX); + err |= __get_user(karg->ca_client.cl_naddr, &arg32->ca32_client.cl32_naddr); + err |= copy_from_user(&karg->ca_client.cl_addrlist[0], + &arg32->ca32_client.cl32_addrlist[0], + (sizeof(struct in_addr) * NFSCLNT_ADDRMAX)); + err |= __get_user(karg->ca_client.cl_fhkeytype, + &arg32->ca32_client.cl32_fhkeytype); + err |= __get_user(karg->ca_client.cl_fhkeylen, + &arg32->ca32_client.cl32_fhkeylen); + err |= copy_from_user(&karg->ca_client.cl_fhkey[0], + &arg32->ca32_client.cl32_fhkey[0], + NFSCLNT_KEYMAX); + return err; +} + +static int nfs_exp32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) +{ + int err; + + err = __get_user(karg->ca_version, &arg32->ca32_version); + err |= copy_from_user(&karg->ca_export.ex_client[0], + &arg32->ca32_export.ex32_client[0], + NFSCLNT_IDMAX); + err |= copy_from_user(&karg->ca_export.ex_path[0], + &arg32->ca32_export.ex32_path[0], + NFS_MAXPATHLEN); + err |= __get_user(karg->ca_export.ex_dev, + &arg32->ca32_export.ex32_dev); + err |= __get_user(karg->ca_export.ex_ino, + &arg32->ca32_export.ex32_ino); + err |= __get_user(karg->ca_export.ex_flags, + &arg32->ca32_export.ex32_flags); + err |= __get_user(karg->ca_export.ex_anon_uid, + &arg32->ca32_export.ex32_anon_uid); + err |= __get_user(karg->ca_export.ex_anon_gid, + &arg32->ca32_export.ex32_anon_gid); + karg->ca_export.ex_anon_uid = high2lowuid(karg->ca_export.ex_anon_uid); + karg->ca_export.ex_anon_gid = high2lowgid(karg->ca_export.ex_anon_gid); + return err; +} + +static int nfs_uud32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) +{ + u32 uaddr; + int i; + int err; + + memset(karg, 0, sizeof(*karg)); + if(__get_user(karg->ca_version, &arg32->ca32_version)) + return -EFAULT; + karg->ca_umap.ug_ident = (char *)get_free_page(GFP_USER); + if(!karg->ca_umap.ug_ident) + return -ENOMEM; + err = __get_user(uaddr, &arg32->ca32_umap.ug32_ident); + if(strncpy_from_user(karg->ca_umap.ug_ident, + (char *)A(uaddr), PAGE_SIZE) <= 0) + return -EFAULT; + err |= __get_user(karg->ca_umap.ug_uidbase, + &arg32->ca32_umap.ug32_uidbase); + err |= __get_user(karg->ca_umap.ug_uidlen, + &arg32->ca32_umap.ug32_uidlen); + err |= __get_user(uaddr, &arg32->ca32_umap.ug32_udimap); + if (err) + return -EFAULT; + karg->ca_umap.ug_udimap = kmalloc((sizeof(uid_t) * karg->ca_umap.ug_uidlen), + GFP_USER); + if(!karg->ca_umap.ug_udimap) + return -ENOMEM; + for(i = 0; i < karg->ca_umap.ug_uidlen; i++) + err |= __get_user(karg->ca_umap.ug_udimap[i], + &(((__kernel_uid_t32 *)A(uaddr))[i])); + err |= __get_user(karg->ca_umap.ug_gidbase, + &arg32->ca32_umap.ug32_gidbase); + err |= __get_user(karg->ca_umap.ug_uidlen, + &arg32->ca32_umap.ug32_gidlen); + err |= __get_user(uaddr, &arg32->ca32_umap.ug32_gdimap); + if (err) + return -EFAULT; + karg->ca_umap.ug_gdimap = kmalloc((sizeof(gid_t) * karg->ca_umap.ug_uidlen), + GFP_USER); + if(!karg->ca_umap.ug_gdimap) + return -ENOMEM; + for(i = 0; i < karg->ca_umap.ug_gidlen; i++) + err |= __get_user(karg->ca_umap.ug_gdimap[i], + &(((__kernel_gid_t32 *)A(uaddr))[i])); + + return err; +} + +static int nfs_getfh32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) +{ + int err; + + err = __get_user(karg->ca_version, &arg32->ca32_version); + err |= copy_from_user(&karg->ca_getfh.gf_addr, + &arg32->ca32_getfh.gf32_addr, + (sizeof(struct sockaddr))); + err |= __get_user(karg->ca_getfh.gf_dev, + &arg32->ca32_getfh.gf32_dev); + err |= __get_user(karg->ca_getfh.gf_ino, + &arg32->ca32_getfh.gf32_ino); + err |= __get_user(karg->ca_getfh.gf_version, + &arg32->ca32_getfh.gf32_version); + return err; +} + +static int nfs_getfd32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) +{ + int err; + + err = __get_user(karg->ca_version, &arg32->ca32_version); + err |= copy_from_user(&karg->ca_getfd.gd_addr, + &arg32->ca32_getfd.gd32_addr, + (sizeof(struct sockaddr))); + err |= copy_from_user(&karg->ca_getfd.gd_path, + &arg32->ca32_getfd.gd32_path, + (NFS_MAXPATHLEN+1)); + err |= __get_user(karg->ca_getfd.gd_version, + &arg32->ca32_getfd.gd32_version); + return err; +} + +static int nfs_getfs32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) +{ + int err; + + err = __get_user(karg->ca_version, &arg32->ca32_version); + err |= copy_from_user(&karg->ca_getfs.gd_addr, + &arg32->ca32_getfs.gd32_addr, + (sizeof(struct sockaddr))); + err |= copy_from_user(&karg->ca_getfs.gd_path, + &arg32->ca32_getfs.gd32_path, + (NFS_MAXPATHLEN+1)); + err |= __get_user(karg->ca_getfs.gd_maxlen, + &arg32->ca32_getfs.gd32_maxlen); + return err; +} + +/* This really doesn't need translations, we are only passing + * back a union which contains opaque nfs file handle data. + */ +static int nfs_getfh32_res_trans(union nfsctl_res *kres, union nfsctl_res32 *res32) +{ + return copy_to_user(res32, kres, sizeof(*res32)); +} + +/* +asmlinkage long sys_ni_syscall(void); +*/ + +int asmlinkage sys32_nfsservctl(int cmd, struct nfsctl_arg32 *arg32, union nfsctl_res32 *res32) +{ + struct nfsctl_arg *karg = NULL; + union nfsctl_res *kres = NULL; + mm_segment_t oldfs; + int err; + + karg = kmalloc(sizeof(*karg), GFP_USER); + if(!karg) + return -ENOMEM; + if(res32) { + kres = kmalloc(sizeof(*kres), GFP_USER); + if(!kres) { + kfree(karg); + return -ENOMEM; + } + } + switch(cmd) { + case NFSCTL_SVC: + err = nfs_svc32_trans(karg, arg32); + break; + case NFSCTL_ADDCLIENT: + err = nfs_clnt32_trans(karg, arg32); + break; + case NFSCTL_DELCLIENT: + err = nfs_clnt32_trans(karg, arg32); + break; + case NFSCTL_EXPORT: + case NFSCTL_UNEXPORT: + err = nfs_exp32_trans(karg, arg32); + break; + /* This one is unimplemented, be we're ready for it. */ + case NFSCTL_UGIDUPDATE: + err = nfs_uud32_trans(karg, arg32); + break; + case NFSCTL_GETFH: + err = nfs_getfh32_trans(karg, arg32); + break; + case NFSCTL_GETFD: + err = nfs_getfd32_trans(karg, arg32); + break; + case NFSCTL_GETFS: + err = nfs_getfs32_trans(karg, arg32); + break; + default: + err = -EINVAL; + break; + } + if(err) + goto done; + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_nfsservctl(cmd, karg, kres); + set_fs(oldfs); + + if (err) + goto done; + + if((cmd == NFSCTL_GETFH) || + (cmd == NFSCTL_GETFD) || + (cmd == NFSCTL_GETFS)) + err = nfs_getfh32_res_trans(kres, res32); + +done: + if(karg) { + if(cmd == NFSCTL_UGIDUPDATE) { + if(karg->ca_umap.ug_ident) + kfree(karg->ca_umap.ug_ident); + if(karg->ca_umap.ug_udimap) + kfree(karg->ca_umap.ug_udimap); + if(karg->ca_umap.ug_gdimap) + kfree(karg->ca_umap.ug_gdimap); + } + kfree(karg); + } + if(kres) + kfree(kres); + return err; +} + +/* Translations due to time_t size differences. Which affects all + sorts of things, like timeval and itimerval. */ + +extern struct timezone sys_tz; +extern int do_sys_settimeofday(struct timeval *tv, struct timezone *tz); + +asmlinkage int sys32_gettimeofday(struct timeval32 *tv, struct timezone *tz) +{ + if (tv) { + struct timeval ktv; + do_gettimeofday(&ktv); + if (put_tv32(tv, &ktv)) + return -EFAULT; + } + if (tz) { + if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) + return -EFAULT; + } + return 0; +} + +asmlinkage int sys32_settimeofday(struct timeval32 *tv, struct timezone *tz) +{ + struct timeval ktv; + struct timezone ktz; + + if (tv) { + if (get_tv32(&ktv, tv)) + return -EFAULT; + } + if (tz) { + if (copy_from_user(&ktz, tz, sizeof(ktz))) + return -EFAULT; + } + + return do_sys_settimeofday(tv ? &ktv : NULL, tz ? &ktz : NULL); +} + +extern int do_getitimer(int which, struct itimerval *value); + +asmlinkage int sys32_getitimer(int which, struct itimerval32 *it) +{ + struct itimerval kit; + int error; + + error = do_getitimer(which, &kit); + if (!error && put_it32(it, &kit)) + error = -EFAULT; + + return error; +} + +extern int do_setitimer(int which, struct itimerval *, struct itimerval *); + +asmlinkage int sys32_setitimer(int which, struct itimerval32 *in, struct itimerval32 *out) +{ + struct itimerval kin, kout; + int error; + + if (in) { + if (get_it32(&kin, in)) + return -EFAULT; + } else + memset(&kin, 0, sizeof(kin)); + + error = do_setitimer(which, &kin, out ? &kout : NULL); + if (error || !out) + return error; + if (put_it32(out, &kout)) + return -EFAULT; + + return 0; + +} + +asmlinkage int sys_utimes(char *, struct timeval *); + +asmlinkage int sys32_utimes(char *filename, struct timeval32 *tvs) +{ + char *kfilename; + struct timeval ktvs[2]; + mm_segment_t old_fs; + int ret; + + kfilename = getname(filename); + ret = PTR_ERR(kfilename); + if (!IS_ERR(kfilename)) { + if (tvs) { + if (get_tv32(&ktvs[0], tvs) || + get_tv32(&ktvs[1], 1+tvs)) + return -EFAULT; + } + + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = sys_utimes(kfilename, &ktvs[0]); + set_fs(old_fs); + + putname(kfilename); + } + return ret; +} + +/* These are here just in case some old sparc32 binary calls it. */ +asmlinkage int sys32_pause(void) +{ + current->state = TASK_INTERRUPTIBLE; + schedule(); + return -ERESTARTNOHAND; +} + +extern asmlinkage int sys_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); + +asmlinkage int sys32_prctl(int option, u32 arg2, u32 arg3, u32 arg4, u32 arg5) +{ + return sys_prctl(option, + (unsigned long) arg2, + (unsigned long) arg3, + (unsigned long) arg4, + (unsigned long) arg5); +} + + +extern asmlinkage ssize_t sys_pread(unsigned int fd, char * buf, + size_t count, loff_t pos); + +extern asmlinkage ssize_t sys_pwrite(unsigned int fd, const char * buf, + size_t count, loff_t pos); + +typedef __kernel_ssize_t32 ssize_t32; + +asmlinkage ssize_t32 sys32_pread(unsigned int fd, char *ubuf, + __kernel_size_t32 count, u32 poshi, u32 poslo) +{ + return sys_pread(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); +} + +asmlinkage ssize_t32 sys32_pwrite(unsigned int fd, char *ubuf, + __kernel_size_t32 count, u32 poshi, u32 poslo) +{ + return sys_pwrite(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); +} + + +extern asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count); + +asmlinkage int sys32_sendfile(int out_fd, int in_fd, __kernel_off_t32 *offset, s32 count) +{ + mm_segment_t old_fs = get_fs(); + int ret; + off_t of; + + if (offset && get_user(of, offset)) + return -EFAULT; + + set_fs(KERNEL_DS); + ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count); + set_fs(old_fs); + + if (!ret && offset && put_user(of, offset)) + return -EFAULT; + + return ret; +} + +/* Handle adjtimex compatability. */ + +struct timex32 { + u32 modes; + s32 offset, freq, maxerror, esterror; + s32 status, constant, precision, tolerance; + struct timeval32 time; + s32 tick; + s32 ppsfreq, jitter, shift, stabil; + s32 jitcnt, calcnt, errcnt, stbcnt; + s32 :32; s32 :32; s32 :32; s32 :32; + s32 :32; s32 :32; s32 :32; s32 :32; + s32 :32; s32 :32; s32 :32; s32 :32; +}; + +extern int do_adjtimex(struct timex *); + +asmlinkage int sys32_adjtimex(struct timex32 *utp) +{ + struct timex txc; + int ret; + + memset(&txc, 0, sizeof(struct timex)); + + if(get_user(txc.modes, &utp->modes) || + __get_user(txc.offset, &utp->offset) || + __get_user(txc.freq, &utp->freq) || + __get_user(txc.maxerror, &utp->maxerror) || + __get_user(txc.esterror, &utp->esterror) || + __get_user(txc.status, &utp->status) || + __get_user(txc.constant, &utp->constant) || + __get_user(txc.precision, &utp->precision) || + __get_user(txc.tolerance, &utp->tolerance) || + __get_user(txc.time.tv_sec, &utp->time.tv_sec) || + __get_user(txc.time.tv_usec, &utp->time.tv_usec) || + __get_user(txc.tick, &utp->tick) || + __get_user(txc.ppsfreq, &utp->ppsfreq) || + __get_user(txc.jitter, &utp->jitter) || + __get_user(txc.shift, &utp->shift) || + __get_user(txc.stabil, &utp->stabil) || + __get_user(txc.jitcnt, &utp->jitcnt) || + __get_user(txc.calcnt, &utp->calcnt) || + __get_user(txc.errcnt, &utp->errcnt) || + __get_user(txc.stbcnt, &utp->stbcnt)) + return -EFAULT; + + ret = do_adjtimex(&txc); + + if(put_user(txc.modes, &utp->modes) || + __put_user(txc.offset, &utp->offset) || + __put_user(txc.freq, &utp->freq) || + __put_user(txc.maxerror, &utp->maxerror) || + __put_user(txc.esterror, &utp->esterror) || + __put_user(txc.status, &utp->status) || + __put_user(txc.constant, &utp->constant) || + __put_user(txc.precision, &utp->precision) || + __put_user(txc.tolerance, &utp->tolerance) || + __put_user(txc.time.tv_sec, &utp->time.tv_sec) || + __put_user(txc.time.tv_usec, &utp->time.tv_usec) || + __put_user(txc.tick, &utp->tick) || + __put_user(txc.ppsfreq, &utp->ppsfreq) || + __put_user(txc.jitter, &utp->jitter) || + __put_user(txc.shift, &utp->shift) || + __put_user(txc.stabil, &utp->stabil) || + __put_user(txc.jitcnt, &utp->jitcnt) || + __put_user(txc.calcnt, &utp->calcnt) || + __put_user(txc.errcnt, &utp->errcnt) || + __put_user(txc.stbcnt, &utp->stbcnt)) + ret = -EFAULT; + + return ret; +} + +extern asmlinkage long sys_setpriority(int which, int who, int niceval); + +asmlinkage int sys_setpriority32(u32 which, u32 who, u32 niceval) +{ + return sys_setpriority((int) which, + (int) who, + (int) niceval); +} + +struct __sysctl_args32 { + u32 name; + int nlen; + u32 oldval; + u32 oldlenp; + u32 newval; + u32 newlen; + u32 __unused[4]; +}; + +extern asmlinkage long sys32_sysctl(struct __sysctl_args32 *args) +{ + struct __sysctl_args32 tmp; + int error; + size_t oldlen, *oldlenp = NULL; + unsigned long addr = (((long)&args->__unused[0]) + 7) & ~7; + + if (copy_from_user(&tmp, args, sizeof(tmp))) + return -EFAULT; + + if (tmp.oldval && tmp.oldlenp) { + /* Duh, this is ugly and might not work if sysctl_args + is in read-only memory, but do_sysctl does indirectly + a lot of uaccess in both directions and we'd have to + basically copy the whole sysctl.c here, and + glibc's __sysctl uses rw memory for the structure + anyway. */ + if (get_user(oldlen, (u32 *)A(tmp.oldlenp)) || + put_user(oldlen, (size_t *)addr)) + return -EFAULT; + oldlenp = (size_t *)addr; + } + + lock_kernel(); + error = do_sysctl((int *)A(tmp.name), tmp.nlen, (void *)A(tmp.oldval), + oldlenp, (void *)A(tmp.newval), tmp.newlen); + unlock_kernel(); + if (oldlenp) { + if (!error) { + if (get_user(oldlen, (size_t *)addr) || + put_user(oldlen, (u32 *)A(tmp.oldlenp))) + error = -EFAULT; + } + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)); + } + return error; +} + +struct stat64_emu31 { + unsigned short st_dev; + unsigned char __pad0[6]; + + long long st_ino; + unsigned int st_mode; + unsigned int st_nlink; + + __u32 st_uid; + __u32 st_gid; + + unsigned short st_rdev; + unsigned char __pad3[10]; + + long long st_size; + __u32 st_blksize; + + __u32 st_blocks; /* Number 512-byte blocks allocated. */ + __u32 __pad4; /* future possible st_blocks high bits */ + + __u32 st_atime; + __u32 __pad5; + + __u32 st_mtime; + __u32 __pad6; + + __u32 st_ctime; + __u32 __pad7; /* will be high 32 bits of ctime someday */ + + __u32 __unused1; + __u32 __unused2; +}; + +static inline int +putstat64 (struct stat64_emu31 *ubuf, struct stat *kbuf) +{ + int err; + + err = put_user (kbuf->st_dev, &ubuf->st_dev); + err |= __put_user (kbuf->st_ino, &ubuf->st_ino); + err |= __put_user (kbuf->st_mode, &ubuf->st_mode); + err |= __put_user (kbuf->st_nlink, &ubuf->st_nlink); + err |= __put_user (kbuf->st_uid, &ubuf->st_uid); + err |= __put_user (kbuf->st_gid, &ubuf->st_gid); + err |= __put_user (kbuf->st_rdev, &ubuf->st_rdev); + err |= __put_user (kbuf->st_size, &ubuf->st_size); + err |= __put_user (kbuf->st_blksize, &ubuf->st_blksize); + err |= __put_user (kbuf->st_blocks, &ubuf->st_blocks); + err |= __put_user (kbuf->st_atime, &ubuf->st_atime); + err |= __put_user (kbuf->st_mtime, &ubuf->st_mtime); + err |= __put_user (kbuf->st_ctime, &ubuf->st_ctime); + return err; +} + +extern asmlinkage long sys_newstat(char * filename, struct stat * statbuf); + +asmlinkage long sys32_stat64(char * filename, struct stat64_emu31 * statbuf, long flags) +{ + int ret; + struct stat s; + char * tmp; + int err; + mm_segment_t old_fs = get_fs(); + + tmp = getname(filename); + err = PTR_ERR(tmp); + if (IS_ERR(tmp)) + return err; + + set_fs (KERNEL_DS); + ret = sys_newstat(tmp, &s); + set_fs (old_fs); + putname(tmp); + if (putstat64 (statbuf, &s)) + return -EFAULT; + return ret; +} + +extern asmlinkage long sys_newlstat(char * filename, struct stat * statbuf); + +asmlinkage long sys32_lstat64(char * filename, struct stat64_emu31 * statbuf, long flags) +{ + int ret; + struct stat s; + char * tmp; + int err; + mm_segment_t old_fs = get_fs(); + + tmp = getname(filename); + err = PTR_ERR(tmp); + if (IS_ERR(tmp)) + return err; + + set_fs (KERNEL_DS); + ret = sys_newstat(tmp, &s); + set_fs (old_fs); + putname(tmp); + if (putstat64 (statbuf, &s)) + return -EFAULT; + return ret; +} + +extern asmlinkage long sys_newfstat(unsigned int fd, struct stat * statbuf); + +asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 * statbuf, long flags) +{ + int ret; + struct stat s; + mm_segment_t old_fs = get_fs(); + + set_fs (KERNEL_DS); + ret = sys_newfstat(fd, &s); + set_fs (old_fs); + if (putstat64 (statbuf, &s)) + return -EFAULT; + return ret; +} + +/* + * Linux/i386 didn't use to be able to handle more than + * 4 system call parameters, so these system calls used a memory + * block for parameter passing.. + */ + +struct mmap_arg_struct_emu31 { + u32 addr; + u32 len; + u32 prot; + u32 flags; + u32 fd; + u32 offset; +}; + +/* common code for old and new mmaps */ +static inline long do_mmap2( + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + int error = -EBADF; + struct file * file = NULL; + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + goto out; + } + + down(¤t->mm->mmap_sem); + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); + up(¤t->mm->mmap_sem); + + if (file) + fput(file); +out: + return error; +} + + +asmlinkage unsigned long +old32_mmap(struct mmap_arg_struct_emu31 *arg) +{ + struct mmap_arg_struct_emu31 a; + int error = -EFAULT; + + if (copy_from_user(&a, arg, sizeof(a))) + goto out; + + error = -EINVAL; + if (a.offset & ~PAGE_MASK) + goto out; + + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); +out: + return error; +} + +asmlinkage long +sys32_mmap2(struct mmap_arg_struct_emu31 *arg) +{ + struct mmap_arg_struct_emu31 a; + int error = -EFAULT; + + if (copy_from_user(&a, arg, sizeof(a))) + goto out; + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); +out: + return error; +} + +extern asmlinkage long sys_socket(int family, int type, int protocol); +extern asmlinkage long sys_bind(int fd, struct sockaddr *umyaddr, int addrlen); +extern asmlinkage long sys_connect(int fd, struct sockaddr *uservaddr, int addrlen); +extern asmlinkage long sys_listen(int fd, int backlog); +extern asmlinkage long sys_accept(int fd, struct sockaddr *upeer_sockaddr, int *upeer_addrlen); +extern asmlinkage long sys_getsockname(int fd, struct sockaddr *usockaddr, int *usockaddr_len); +extern asmlinkage long sys_getpeername(int fd, struct sockaddr *usockaddr, int *usockaddr_len); +extern asmlinkage long sys_socketpair(int family, int type, int protocol, int usockvec[2]); +extern asmlinkage long sys_send(int fd, void * buff, size_t len, unsigned flags); +extern asmlinkage long sys_sendto(int fd, void * buff, size_t len, unsigned flags, + struct sockaddr *addr, int addr_len); +extern asmlinkage long sys_recv(int fd, void * ubuf, size_t size, unsigned flags); +extern asmlinkage long sys_recvfrom(int fd, void * ubuf, size_t size, unsigned flags, + struct sockaddr *addr, int *addr_len); +extern asmlinkage long sys_shutdown(int fd, int how); +extern asmlinkage long sys_getsockopt(int fd, int level, int optname, char *optval, int * optlen); + +/* Argument list sizes for sys_socketcall */ +#define AL(x) ((x) * sizeof(u32)) +static unsigned char nas[18] = {AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), + AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), + AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)}; +#undef AL + +asmlinkage long sys32_socketcall(int call, u32 *args) +{ + int ret; + u32 a[6]; + + if (call < SYS_SOCKET || call > SYS_RECVMSG) + return -EINVAL; + if (copy_from_user(a, args, nas[call])) + return -EFAULT; + switch(call) { + case SYS_SOCKET: + ret = sys_socket(a[0], a[1], a[2]); + break; + case SYS_BIND: + ret = sys_bind(a[0], (struct sockaddr *) A(a[1]), a[2]); + break; + case SYS_CONNECT: + ret = sys_connect(a[0], (struct sockaddr *) A(a[1]), a[2]); + break; + case SYS_LISTEN: + ret = sys_listen(a[0], a[1]); + break; + case SYS_ACCEPT: + ret = sys_accept(a[0], (struct sockaddr *) A(a[1]), + (int *) A(a[2])); + break; + case SYS_GETSOCKNAME: + ret = sys_getsockname(a[0], (struct sockaddr *) A(a[1]), + (int *) A(a[2])); + break; + case SYS_GETPEERNAME: + ret = sys_getpeername(a[0], (struct sockaddr *) A(a[1]), + (int *) A(a[2])); + break; + case SYS_SOCKETPAIR: + ret = sys_socketpair(a[0], a[1], a[2], (int *) A(a[3])); + break; + case SYS_SEND: + ret = sys_send(a[0], (void *) A(a[1]), a[2], a[3]); + break; + case SYS_SENDTO: + ret = sys_sendto(a[0], (void*) A(a[1]), a[2], a[3], (struct sockaddr *) A(a[4]), a[5]); + break; + case SYS_RECV: + ret = sys_recv(a[0], (void *) A(a[1]), a[2], a[3]); + break; + case SYS_RECVFROM: + ret = sys_recvfrom(a[0], (void *) A(a[1]), a[2], a[3], (struct sockaddr *) A(a[4]), (int *) A(a[5]) ); + break; + case SYS_SHUTDOWN: + ret = sys_shutdown(a[0], a[1]); + break; + case SYS_SETSOCKOPT: + ret = sys32_setsockopt(a[0], a[1], a[2], (char *) A(a[3]), + a[4]); + break; + case SYS_GETSOCKOPT: + ret = sys_getsockopt(a[0], a[1], a[2], (char *) A(a[3]), (int *) A(a[4]) ); + break; + case SYS_SENDMSG: + ret = sys32_sendmsg(a[0], (struct msghdr32 *) A(a[1]), + a[2]); + break; + case SYS_RECVMSG: + ret = sys32_recvmsg(a[0], (struct msghdr32 *) A(a[1]), + a[2]); + break; + default: + ret = EINVAL; + break; + } + return ret; +} + diff --git a/arch/s390x/kernel/linux32.h b/arch/s390x/kernel/linux32.h new file mode 100644 index 000000000..c3641b0a0 --- /dev/null +++ b/arch/s390x/kernel/linux32.h @@ -0,0 +1,246 @@ +#ifndef _ASM_S390X_S390_H +#define _ASM_S390X_S390_H + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_S390_SUPPORT + +/* Macro that masks the high order bit of an 32 bit pointer and converts it*/ +/* to a 64 bit pointer */ +#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL)) +#define AA(__x) \ + ((unsigned long)(__x)) + +/* Now 32bit compatibility types */ +typedef unsigned int __kernel_size_t32; +typedef int __kernel_ssize_t32; +typedef int __kernel_ptrdiff_t32; +typedef int __kernel_time_t32; +typedef int __kernel_clock_t32; +typedef int __kernel_pid_t32; +typedef unsigned short __kernel_ipc_pid_t32; +typedef unsigned short __kernel_uid_t32; +typedef unsigned short __kernel_gid_t32; +typedef unsigned short __kernel_dev_t32; +typedef unsigned int __kernel_ino_t32; +typedef unsigned short __kernel_mode_t32; +typedef unsigned short __kernel_umode_t32; +typedef short __kernel_nlink_t32; +typedef int __kernel_daddr_t32; +typedef int __kernel_off_t32; +typedef unsigned int __kernel_caddr_t32; +typedef long __kernel_loff_t32; +typedef __kernel_fsid_t __kernel_fsid_t32; + +struct ipc_kludge_32 { + __u32 msgp; /* pointer */ + __s32 msgtyp; +}; + +#define F_GETLK64 12 +#define F_SETLK64 13 +#define F_SETLKW64 14 + +struct flock32 { + short l_type; + short l_whence; + __kernel_off_t32 l_start; + __kernel_off_t32 l_len; + __kernel_pid_t32 l_pid; + short __unused; +}; + +struct stat32 { + unsigned short st_dev; + unsigned short __pad1; + __u32 st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + __u32 st_size; + __u32 st_blksize; + __u32 st_blocks; + __u32 st_atime; + __u32 __unused1; + __u32 st_mtime; + __u32 __unused2; + __u32 st_ctime; + __u32 __unused3; + __u32 __unused4; + __u32 __unused5; +}; + +struct statfs32 { + __s32 f_type; + __s32 f_bsize; + __s32 f_blocks; + __s32 f_bfree; + __s32 f_bavail; + __s32 f_files; + __s32 f_ffree; + __kernel_fsid_t f_fsid; + __s32 f_namelen; + __s32 f_spare[6]; +}; + +typedef __u32 old_sigset_t32; /* at least 32 bits */ + +struct old_sigaction32 { + __u32 sa_handler; /* Really a pointer, but need to deal with 32 bits */ + old_sigset_t32 sa_mask; /* A 32 bit mask */ + __u32 sa_flags; + __u32 sa_restorer; /* Another 32 bit pointer */ +}; + +#define _SIGCONTEXT_NSIG_WORDS32 2 +typedef struct { + __u32 sig[_SIGCONTEXT_NSIG_WORDS32]; +} sigset_t32; + +typedef union sigval32 { + int sival_int; + __u32 sival_ptr; +} sigval_t32; + +typedef struct siginfo32 { + int si_signo; + int si_errno; + int si_code; + + union { + int _pad[((128/sizeof(int)) - 3)]; + + /* kill() */ + struct { + pid_t _pid; /* sender's pid */ + uid_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + unsigned int _timer1; + unsigned int _timer2; + + } _timer; + + /* POSIX.1b signals */ + struct { + pid_t _pid; /* sender's pid */ + uid_t _uid; /* sender's uid */ + sigval_t32 _sigval; + } _rt; + + /* SIGCHLD */ + struct { + pid_t _pid; /* which child */ + uid_t _uid; /* sender's uid */ + int _status;/* exit code */ + __kernel_clock_t32 _utime; + __kernel_clock_t32 _stime; + } _sigchld; + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ + struct { + __u32 _addr; /* faulting insn/memory ref. - pointer */ + } _sigfault; + + /* SIGPOLL */ + struct { + int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + } _sifields; +} siginfo_t32; + +/* + * How these fields are to be accessed. + */ +#define si_pid _sifields._kill._pid +#define si_uid _sifields._kill._uid +#define si_status _sifields._sigchld._status +#define si_utime _sifields._sigchld._utime +#define si_stime _sifields._sigchld._stime +#define si_value _sifields._rt._sigval +#define si_int _sifields._rt._sigval.sival_int +#define si_ptr _sifields._rt._sigval.sival_ptr +#define si_addr _sifields._sigfault._addr +#define si_band _sifields._sigpoll._band +#define si_fd _sifields._sigpoll._fd + +/* asm/sigcontext.h */ +typedef union +{ + __u64 d; + __u32 f; +} freg_t32; + +typedef struct +{ + unsigned int fpc; + freg_t32 fprs[__NUM_FPRS]; +} _s390_fp_regs32; + +typedef struct +{ + __u32 mask; + __u32 addr; +} _psw_t32 __attribute__ ((aligned(8))); + +typedef struct +{ + _psw_t32 psw; + __u32 gprs[__NUM_GPRS]; + __u32 acrs[__NUM_ACRS]; +} _s390_regs_common32; + +typedef struct +{ + _s390_regs_common32 regs; + _s390_fp_regs32 fpregs; +} _sigregs32; + +#define _SIGCONTEXT_NSIG32 64 +#define _SIGCONTEXT_NSIG_BPW32 32 +#define __SIGNAL_FRAMESIZE32 96 +#define _SIGMASK_COPY_SIZE32 (sizeof(u32)*2) + +struct sigcontext32 +{ + __u32 oldmask[_SIGCONTEXT_NSIG_WORDS32]; + __u32 sregs; /* pointer */ +}; + +/* asm/signal.h */ +struct sigaction32 { + __u32 sa_handler; /* pointer */ + __u32 sa_flags; + __u32 sa_restorer; /* pointer */ + sigset_t32 sa_mask; /* mask last for extensibility */ +}; + +typedef struct { + __u32 ss_sp; /* pointer */ + int ss_flags; + __kernel_size_t32 ss_size; +} stack_t32; + +/* asm/ucontext.h */ +struct ucontext32 { + __u32 uc_flags; + __u32 uc_link; /* pointer */ + stack_t32 uc_stack; + sigset_t32 uc_sigmask; /* mask last for extensibility */ + __u32 sc; /* pointer */ +}; + +#endif /* !CONFIG_S390_SUPPORT */ + +#endif /* _ASM_S390X_S390_H */ diff --git a/arch/s390x/kernel/lowcore.S b/arch/s390x/kernel/lowcore.S new file mode 100644 index 000000000..4cf3bf8f2 --- /dev/null +++ b/arch/s390x/kernel/lowcore.S @@ -0,0 +1,28 @@ +/* + * arch/s390/kernel/lowcore.S + * S390 lowcore definition. + * + * S390 64 bit Version + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Hartmut Penner (hpenner@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com), + */ +#include + + .align 8192 + .globl init_S390_lowcore +init_S390_lowcore: + .fill 0x1a0-0x000,1,0 + .quad _RESTART_PSW_MASK + .quad restart_int_handler + .quad _EXT_PSW_MASK + .quad ext_int_handler + .quad _SVC_PSW_MASK + .quad system_call + .quad _PGM_PSW_MASK + .quad pgm_check_handler + .quad _MCCK_PSW_MASK + .quad mcck_int_handler +EXT_PSW: .quad _IO_PSW_MASK + .quad io_int_handler + .fill 0x2000-0x200,1,0 diff --git a/arch/s390x/kernel/mathemu.c b/arch/s390x/kernel/mathemu.c new file mode 100644 index 000000000..db6dc9431 --- /dev/null +++ b/arch/s390x/kernel/mathemu.c @@ -0,0 +1,920 @@ +/* + * arch/s390/kernel/mathemu.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * + * 'mathemu.c' handles IEEE instructions on a S390 processor + * that does not have the IEEE fpu + */ + +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_SYSCTL +int sysctl_ieee_emulation_warnings=1; +#endif + +static void display_emulation_not_implemented(char *instr) +{ + struct pt_regs *regs; + __u16 *location; + +#if CONFIG_SYSCTL + if(sysctl_ieee_emulation_warnings) +#endif + { + regs=current->thread.regs; + location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc); + printk("%s ieee fpu instruction not emulated process name: %s pid: %d \n", + instr, + current->comm, current->pid); + printk("%s's PSW: %08lx %08lx\n",instr, + (unsigned long) regs->psw.mask, + (unsigned long) location); + } +} + + +static void set_CC_df(__u64 val1,__u64 val2) { + int rc; + rc = __cmpdf2(val1,val2); + current->thread.regs->psw.mask &= 0xFFFFCFFFFFFFFFFFL; + switch (rc) { + case -1: + current->thread.regs->psw.mask |= 0x0000100000000000L; + break; + case 1: + current->thread.regs->psw.mask |= 0x0000200000000000L; + break; + } +} + +static void set_CC_sf(__u32 val1,__u32 val2) { + int rc; + rc = __cmpsf2(val1,val2); + current->thread.regs->psw.mask &= 0xFFFFCFFFFFFFFFFF; + switch (rc) { + case -1: + current->thread.regs->psw.mask |= 0x0000100000000000L; + break; + case 1: + current->thread.regs->psw.mask |= 0x0000200000000000L; + break; + } +} + + +static void emu_adb (int rx, __u64 val) { + current->thread.fp_regs.fprs[rx].d = __adddf3(current->thread.fp_regs.fprs[rx].d,val); + set_CC_df(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_adbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].d = __adddf3(current->thread.fp_regs.fprs[rx].d, + current->thread.fp_regs.fprs[ry].d); + set_CC_df(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_aeb (int rx, __u32 val) { + current->thread.fp_regs.fprs[rx].f = __addsf3(current->thread.fp_regs.fprs[rx].f,val); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_aebr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].f = __addsf3(current->thread.fp_regs.fprs[rx].f, + current->thread.fp_regs.fprs[ry].f); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_axbr (int rx, int ry) { + display_emulation_not_implemented("axbr"); +} + +static void emu_cdb (int rx, __u64 val) { + set_CC_df(current->thread.fp_regs.fprs[rx].d,val); +} + +static void emu_cdbr (int rx, int ry) { + set_CC_df(current->thread.fp_regs.fprs[rx].d,current->thread.fp_regs.fprs[ry].d); +} + +static void emu_cdfbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].d = + __floatsidf(current->thread.regs->gprs[ry]); +} + +static void emu_ceb (int rx, __u32 val) { + set_CC_sf(current->thread.fp_regs.fprs[rx].f,val); +} + +static void emu_cebr (int rx, int ry) { + set_CC_sf(current->thread.fp_regs.fprs[rx].f,current->thread.fp_regs.fprs[ry].f); +} + +static void emu_cefbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].f = + __floatsisf(current->thread.regs->gprs[ry]); +} + +static void emu_cfdbr (int rx, int ry, int mask) { + current->thread.regs->gprs[rx] = + __fixdfsi(current->thread.fp_regs.fprs[ry].d); +} + +static void emu_cfebr (int rx, int ry, int mask) { + current->thread.regs->gprs[rx] = + __fixsfsi(current->thread.fp_regs.fprs[ry].f); +} + +static void emu_cfxbr (int rx, int ry, int mask) { + display_emulation_not_implemented("cfxbr"); +} + +static void emu_cxbr (int rx, int ry) { + display_emulation_not_implemented("cxbr"); +} + +static void emu_cxfbr (int rx, int ry) { + display_emulation_not_implemented("cxfbr"); +} + +static void emu_ddb (int rx, __u64 val) { + current->thread.fp_regs.fprs[rx].d = __divdf3(current->thread.fp_regs.fprs[rx].d,val); + set_CC_df(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_ddbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].d = __divdf3(current->thread.fp_regs.fprs[rx].d, + current->thread.fp_regs.fprs[ry].d); + set_CC_df(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_deb (int rx, __u32 val) { + current->thread.fp_regs.fprs[rx].f = __divsf3(current->thread.fp_regs.fprs[rx].f,val); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_debr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].f = __divsf3(current->thread.fp_regs.fprs[rx].f, + current->thread.fp_regs.fprs[ry].f); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_didbr (int rx, int ry, int mask) { + display_emulation_not_implemented("didbr"); +} + +static void emu_diebr (int rx, int ry, int mask) { + display_emulation_not_implemented("diebr"); +} + +static void emu_dxbr (int rx, int ry) { + display_emulation_not_implemented("dxbr"); +} + +static void emu_efpc (int rx, int ry) { + display_emulation_not_implemented("efpc"); +} + +static void emu_fidbr (int rx, int ry, int mask) { + display_emulation_not_implemented("fidbr"); +} + +static void emu_fiebr (int rx, int ry, int mask) { + display_emulation_not_implemented("fiebr"); +} + +static void emu_fixbr (int rx, int ry, int mask) { + display_emulation_not_implemented("fixbr"); +} + +static void emu_kdb (int rx, __u64 val) { + display_emulation_not_implemented("kdb"); +} + +static void emu_kdbr (int rx, int ry) { + display_emulation_not_implemented("kdbr"); +} + +static void emu_keb (int rx, __u32 val) { + display_emulation_not_implemented("keb"); +} + +static void emu_kebr (int rx, int ry) { + display_emulation_not_implemented("kebr"); +} + +static void emu_kxbr (int rx, int ry) { + display_emulation_not_implemented("kxbr"); +} + +static void emu_lcdbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].d = + __negdf2(current->thread.fp_regs.fprs[ry].d); + set_CC_df(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_lcebr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].f = + __negsf2(current->thread.fp_regs.fprs[ry].f); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_lcxbr (int rx, int ry) { + display_emulation_not_implemented("lcxbr"); +} + +static void emu_ldeb (int rx, __u32 val) { + current->thread.fp_regs.fprs[rx].d = __extendsfdf2(val); +} + +static void emu_ldebr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].d = + __extendsfdf2(current->thread.fp_regs.fprs[ry].f); +} + +static void emu_ldxbr (int rx, int ry) { + display_emulation_not_implemented("ldxbr"); +} + +static void emu_ledbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].f = __truncdfsf2(current->thread.fp_regs.fprs[ry].d); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_lexbr (int rx, int ry) { + display_emulation_not_implemented("lexbr"); +} + +static void emu_lndbr (int rx, int ry) { + display_emulation_not_implemented("lndbr"); +} + +static void emu_lnebr (int rx, int ry) { + display_emulation_not_implemented("lnebr"); +} + +static void emu_lnxbr (int rx, int ry) { + display_emulation_not_implemented("lnxbr"); +} + +static void emu_lpdbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].d = __absdf2(current->thread.fp_regs.fprs[ry].d); + set_CC_df(current->thread.fp_regs.fprs[rx].d,0); +} + +static void emu_lpebr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].f = __abssf2(current->thread.fp_regs.fprs[ry].f); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_lpxbr (int rx, int ry) { + display_emulation_not_implemented("lpxbr"); +} + +static void emu_ltdbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].d = current->thread.fp_regs.fprs[ry].d; + set_CC_df(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_ltebr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].f = current->thread.fp_regs.fprs[ry].f; + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_ltxbr (int rx, int ry) { + display_emulation_not_implemented("ltxbr"); +} + +static void emu_lxdb (int rx, __u64 val) { + display_emulation_not_implemented("lxdb"); +} + +static void emu_lxdbr (int rx, int ry) { + display_emulation_not_implemented("lxdbr"); +} + +static void emu_lxeb (int rx, __u32 val) { + display_emulation_not_implemented("lxeb"); +} + +static void emu_lxebr (int rx, int ry) { + display_emulation_not_implemented("lxebr"); +} + +static void emu_madb (int rx, __u64 val, int mask) { + display_emulation_not_implemented("madb"); +} + +static void emu_madbr (int rx, int ry, int mask) { + display_emulation_not_implemented("madbr"); +} + +static void emu_maeb (int rx, __u32 val, int mask) { + display_emulation_not_implemented("maeb"); +} + +static void emu_maebr (int rx, int ry, int mask) { + display_emulation_not_implemented("maebr"); +} + +static void emu_mdb (int rx, __u64 val) { + current->thread.fp_regs.fprs[rx].d = __muldf3(current->thread.fp_regs.fprs[rx].d,val); + set_CC_df(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_mdbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].d = __muldf3(current->thread.fp_regs.fprs[rx].d, + current->thread.fp_regs.fprs[ry].d); + set_CC_df(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_mdeb (int rx, __u32 val) { + display_emulation_not_implemented("mdeb"); +} + +static void emu_mdebr (int rx, int ry) { + display_emulation_not_implemented("mdebr"); +} + +static void emu_meeb (int rx, __u32 val) { + current->thread.fp_regs.fprs[rx].f = __mulsf3(current->thread.fp_regs.fprs[rx].f, + val); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_meebr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].f = __mulsf3(current->thread.fp_regs.fprs[rx].f, + current->thread.fp_regs.fprs[ry].f); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_msdb (int rx, __u64 val, int mask) { + display_emulation_not_implemented("msdb"); +} + +static void emu_msdbr (int rx, int ry, int mask) { + display_emulation_not_implemented("msdbr"); +} + +static void emu_mseb (int rx, __u32 val, int mask) { + display_emulation_not_implemented("mseb"); +} + +static void emu_msebr (int rx, int ry, int mask) { + display_emulation_not_implemented("msebr"); +} + +static void emu_mxbr (int rx, int ry) { + display_emulation_not_implemented("mxbr"); +} + +static void emu_mxdb (int rx, __u64 val) { + display_emulation_not_implemented("mxdb"); +} + +static void emu_mxdbr (int rx, int ry) { + display_emulation_not_implemented("mxdbr"); +} + +static void emu_sdb (int rx, __u64 val) { + current->thread.fp_regs.fprs[rx].d = __subdf3(current->thread.fp_regs.fprs[rx].d, + val); + set_CC_sf(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_sdbr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].d = __subdf3(current->thread.fp_regs.fprs[rx].d, + current->thread.fp_regs.fprs[ry].d); + set_CC_sf(current->thread.fp_regs.fprs[rx].d,0ULL); +} + +static void emu_seb (int rx, __u32 val) { + current->thread.fp_regs.fprs[rx].f = __subsf3(current->thread.fp_regs.fprs[rx].f, + val); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_sebr (int rx, int ry) { + current->thread.fp_regs.fprs[rx].f = __subsf3(current->thread.fp_regs.fprs[rx].f, + current->thread.fp_regs.fprs[ry].f); + set_CC_sf(current->thread.fp_regs.fprs[rx].f,0); +} + +static void emu_sfpc (int rx, int ry) { + display_emulation_not_implemented("sfpc"); +} + +static void emu_sqdb (int rx, __u64 val) { + display_emulation_not_implemented("sqdb"); +} + +static void emu_sqdbr (int rx, int ry) { + display_emulation_not_implemented("sqdbr"); +} + +static void emu_sqeb (int rx, __u32 val) { + display_emulation_not_implemented("sqeb"); +} + +static void emu_sqebr (int rx, int ry) { + display_emulation_not_implemented("sqebr"); +} + +static void emu_sqxbr (int rx, int ry) { + display_emulation_not_implemented("sqxbr"); +} + +static void emu_sxbr (int rx, int ry) { + display_emulation_not_implemented("sxbr"); +} + +static void emu_tcdb (int rx, __u64 val) { + display_emulation_not_implemented("tcdb"); +} + +static void emu_tceb (int rx, __u32 val) { + display_emulation_not_implemented("tceb"); +} + +static void emu_tcxb (int rx, __u64 val) { + display_emulation_not_implemented("tcxb"); +} + + +static inline void emu_load_regd(int reg) { + if ((reg&9) == 0) { /* test if reg in {0,2,4,6} */ + __asm__ __volatile ( /* load reg from fp_regs.fprs[reg] */ + " bras 1,0f\n" + " ld 0,0(%1)\n" + "0: ex %0,0(1)" + : /* no output */ + : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].d) + : "1" ); + } +} + +static inline void emu_load_rege(int reg) { + if ((reg&9) == 0) { /* test if reg in {0,2,4,6} */ + __asm__ __volatile ( /* load reg from fp_regs.fprs[reg] */ + " bras 1,0f\n" + " le 0,0(%1)\n" + "0: ex %0,0(1)" + : /* no output */ + : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].f) + : "1" ); + } +} + +static inline void emu_store_regd(int reg) { + if ((reg&9) == 0) { /* test if reg in {0,2,4,6} */ + __asm__ __volatile ( /* store reg to fp_regs.fprs[reg] */ + " bras 1,0f\n" + " std 0,0(%1)\n" + "0: ex %0,0(1)" + : /* no output */ + : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].d) + : "1" ); + } +} + + +static inline void emu_store_rege(int reg) { + if ((reg&9) == 0) { /* test if reg in {0,2,4,6} */ + __asm__ __volatile ( /* store reg to fp_regs.fprs[reg] */ + " bras 1,0f\n" + " ste 0,0(%1)\n" + "0: ex %0,0(1)" + : /* no output */ + : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].f) + : "1" ); + } +} + +int math_emu_b3(__u8 *opcode, struct pt_regs * regs) { + static const __u8 format_table[] = { + 2, 2, 2, 2, 9, 1, 2, 1, 2, 2, 2, 2, 9, 2, 4, 4, + 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 3, 3, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1,10, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, + 0, 0, 0, 0, 5, 6, 6, 0, 7, 8, 8, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + static const void *jump_table[]= { + emu_lpebr, emu_lnebr, emu_ltebr, emu_lcebr, + emu_ldebr, emu_lxdbr, emu_lxebr, emu_mxdbr, + emu_kebr, emu_cebr, emu_aebr, emu_sebr, + emu_mdebr, emu_debr, emu_maebr, emu_msebr, + emu_lpdbr, emu_lndbr, emu_ltdbr, emu_lcdbr, + emu_sqebr, emu_sqdbr, emu_sqxbr, emu_meebr, + emu_kdbr, emu_cdbr, emu_adbr, emu_sdbr, + emu_mdbr, emu_ddbr, emu_madbr, emu_msdbr, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + emu_lpxbr, emu_lnxbr, emu_ltxbr, emu_lcxbr, + emu_ledbr, emu_ldxbr, emu_lexbr, emu_fixbr, + emu_kxbr, emu_cxbr, emu_axbr, emu_sxbr, + emu_mxbr, emu_dxbr, NULL, NULL, + NULL, NULL, NULL, emu_diebr, + NULL, NULL, NULL, emu_fiebr, + NULL, NULL, NULL, emu_didbr, + NULL, NULL, NULL, emu_fidbr, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + emu_sfpc, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + emu_efpc, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + emu_cefbr, emu_cdfbr, emu_cxfbr, NULL, + emu_cfebr, emu_cfdbr, emu_cfxbr + }; + + switch (format_table[opcode[1]]) { + case 1: /* RRE format, double operation */ + emu_store_regd((opcode[3]>>4)&15); + emu_store_regd(opcode[3]&15); + /* call the emulation function */ + ((void (*)(int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15); + emu_load_regd((opcode[3]>>4)&15); + emu_load_regd(opcode[3]&15); + return 0; + case 2: /* RRE format, float operation */ + emu_store_rege((opcode[3]>>4)&15); + emu_store_rege(opcode[3]&15); + /* call the emulation function */ + ((void (*)(int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15); + emu_load_rege((opcode[3]>>4)&15); + emu_load_rege(opcode[3]&15); + return 0; + case 3: /* RRF format, double operation */ + emu_store_regd((opcode[3]>>4)&15); + emu_store_regd(opcode[3]&15); + /* call the emulation function */ + ((void (*)(int, int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15,opcode[2]>>4); + emu_load_regd((opcode[3]>>4)&15); + emu_load_regd(opcode[3]&15); + return 0; + case 4: /* RRF format, float operation */ + emu_store_rege((opcode[3]>>4)&15); + emu_store_rege(opcode[3]&15); + /* call the emulation function */ + ((void (*)(int, int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15,opcode[2]>>4); + emu_load_rege((opcode[3]>>4)&15); + emu_load_rege(opcode[3]&15); + return 0; + case 5: /* RRE format, cefbr instruction */ + emu_store_rege((opcode[3]>>4)&15); + /* call the emulation function */ + ((void (*)(int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15); + emu_load_rege((opcode[3]>>4)&15); + return 0; + case 6: /* RRE format, cdfbr & cxfbr instruction */ + emu_store_regd((opcode[3]>>4)&15); + /* call the emulation function */ + ((void (*)(int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15); + emu_load_regd((opcode[3]>>4)&15); + return 0; + /* FIXME !! */ + return 0; + case 7: /* RRF format, cfebr instruction */ + emu_store_rege(opcode[3]&15); + /* call the emulation function */ + ((void (*)(int, int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15,opcode[2]>>4); + return 0; + case 8: /* RRF format, cfdbr & cfxbr instruction */ + emu_store_regd(opcode[3]&15); + /* call the emulation function */ + ((void (*)(int, int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15,opcode[2]>>4); + return 0; + case 9: /* RRE format, ldebr & mdebr instruction */ + /* float store but double load */ + emu_store_rege((opcode[3]>>4)&15); + emu_store_rege(opcode[3]&15); + /* call the emulation function */ + ((void (*)(int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15); + emu_load_regd((opcode[3]>>4)&15); + return 0; + case 10: /* RRE format, ledbr instruction */ + /* double store but float load */ + emu_store_regd((opcode[3]>>4)&15); + emu_store_regd(opcode[3]&15); + /* call the emulation function */ + ((void (*)(int, int))jump_table[opcode[1]]) + (opcode[3]>>4,opcode[3]&15); + emu_load_rege((opcode[3]>>4)&15); + return 0; + default: + return 1; + } +} + +static void* calc_addr(struct pt_regs *regs,int rx,int rb,int disp) +{ + rx &= 0xf; + rb &= 0xf; + disp &= 0xfff; + return (void*) ((rx != 0 ? regs->gprs[rx] : 0) + /* index */ + (rb != 0 ? regs->gprs[rb] : 0) + /* base */ + disp); +} + +int math_emu_ed(__u8 *opcode, struct pt_regs * regs) { + static const __u8 format_table[] = { + 0, 0, 0, 0, 5, 1, 2, 1, 2, 2, 2, 2, 5, 2, 4, 4, + 2, 1, 1, 0, 2, 1, 0, 2, 1, 1, 1, 1, 1, 1, 3, 3, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + static const void *jump_table[]= { + NULL, NULL, NULL, NULL, + emu_ldeb, emu_lxdb, emu_lxeb, emu_mxdb, + emu_keb, emu_ceb, emu_aeb, emu_seb, + emu_mdeb, emu_deb, emu_maeb, emu_mseb, + emu_tceb, emu_tcdb, emu_tcxb, NULL, + emu_sqeb, emu_sqdb, NULL, emu_meeb, + emu_kdb, emu_cdb, emu_adb, emu_sdb, + emu_mdb, emu_ddb, emu_madb, emu_msdb + }; + + switch (format_table[opcode[5]]) { + case 1: /* RXE format, __u64 constant */ { + __u64 *dxb, temp; + __u32 opc; + + emu_store_regd((opcode[1]>>4)&15); + opc = *((__u32 *) opcode); + dxb = (__u64 *) calc_addr(regs,opc>>16,opc>>12,opc); + /* FIXME: how to react if copy_from_user fails ? */ + copy_from_user(&temp, dxb, 8); + /* call the emulation function */ + ((void (*)(int, __u64))jump_table[opcode[5]]) + (opcode[1]>>4,temp); + emu_load_regd((opcode[1]>>4)&15); + return 0; + } + case 2: /* RXE format, __u32 constant */ { + __u32 *dxb, temp; + __u32 opc; + + emu_store_rege((opcode[1]>>4)&15); + opc = *((__u32 *) opcode); + dxb = (__u32 *) calc_addr(regs,opc>>16,opc>>12,opc); + /* FIXME: how to react if get_user fails ? */ + get_user(temp, dxb); + /* call the emulation function */ + ((void (*)(int, __u32))jump_table[opcode[5]]) + (opcode[1]>>4,temp); + emu_load_rege((opcode[1]>>4)&15); + return 0; + } + case 3: /* RXF format, __u64 constant */ { + __u32 *dxb, temp; + __u32 opc; + + emu_store_regd((opcode[1]>>4)&15); + opc = *((__u32 *) opcode); + dxb = (__u32 *) calc_addr(regs,opc>>16,opc>>12,opc); + /* FIXME: how to react if copy_from_user fails ? */ + copy_from_user(&temp, dxb, 8); + /* call the emulation function */ + ((void (*)(int, __u32, int))jump_table[opcode[5]]) + (opcode[1]>>4,temp,opcode[4]>>4); + emu_load_regd((opcode[1]>>4)&15); + return 0; + } + case 4: /* RXF format, __u32 constant */ { + __u32 *dxb, temp; + __u32 opc; + + emu_store_rege((opcode[1]>>4)&15); + opc = *((__u32 *) opcode); + dxb = (__u32 *) calc_addr(regs,opc>>16,opc>>12,opc); + /* FIXME: how to react if get_user fails ? */ + get_user(temp, dxb); + /* call the emulation function */ + ((void (*)(int, __u32, int))jump_table[opcode[5]]) + (opcode[1]>>4,temp,opcode[4]>>4); + emu_load_rege((opcode[1]>>4)&15); + return 0; + } + case 5: /* RXE format, __u32 constant */ + /* store_rege and load_regd */ + { + __u32 *dxb, temp; + __u32 opc; + emu_store_rege((opcode[1]>>4)&15); + opc = *((__u32 *) opcode); + dxb = (__u32 *) calc_addr(regs,opc>>16,opc>>12,opc); + /* FIXME: how to react if get_user fails ? */ + get_user(temp, dxb); + /* call the emulation function */ + ((void (*)(int, __u32))jump_table[opcode[5]]) + (opcode[1]>>4,temp); + emu_load_regd((opcode[1]>>4)&15); + return 0; + } + default: + return 1; + } +} + +/* + * Emulate LDR Rx,Ry with Rx or Ry not in {0, 2, 4, 6} + */ +void math_emu_ldr(__u8 *opcode) { + __u16 opc = *((__u16 *) opcode); + + if ((opc & 0x0090) == 0) { /* test if rx in {0,2,4,6} */ + /* we got an exception therfore ry can't be in {0,2,4,6} */ + __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ + " bras 1,0f\n" + " ld 0,0(%1)\n" + "0: ex %0,0(1)" + : /* no output */ + : "a" (opc&0x00f0), + "a" (¤t->thread.fp_regs.fprs[opc&0x000f].d) + : "1" ); + } else if ((opc & 0x0009) == 0) { /* test if ry in {0,2,4,6} */ + __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ + " bras 1,0f\n" + " std 0,0(%1)\n" + "0: ex %0,0(1)" + : /* no output */ + : "a" ((opc&0x000f)<<4), + "a" (¤t->thread.fp_regs.fprs[(opc&0x00f0)>>4].d) + : "1" ); + } else { /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ + current->thread.fp_regs.fprs[(opc&0x00f0)>>4] = + current->thread.fp_regs.fprs[opc&0x000f]; + } +} + +/* + * Emulate LER Rx,Ry with Rx or Ry not in {0, 2, 4, 6} + */ +void math_emu_ler(__u8 *opcode) { + __u16 opc = *((__u16 *) opcode); + + if ((opc & 0x0090) == 0) { /* test if rx in {0,2,4,6} */ + /* we got an exception therfore ry can't be in {0,2,4,6} */ + __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ + " bras 1,0f\n" + " le 0,0(%1)\n" + "0: ex %0,0(1)" + : /* no output */ + : "a" (opc&0x00f0), + "a" (¤t->thread.fp_regs.fprs[opc&0x000f].f) + : "1" ); + } else if ((opc & 0x0009) == 0) { /* test if ry in {0,2,4,6} */ + __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ + " bras 1,0f\n" + " ste 0,0(%1)\n" + "0: ex %0,0(1)" + : /* no output */ + : "a" ((opc&0x000f)<<4), + "a" (¤t->thread.fp_regs.fprs[(opc&0x00f0)>>4].f) + : "1" ); + } else { /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ + current->thread.fp_regs.fprs[(opc&0x00f0)>>4] = + current->thread.fp_regs.fprs[opc&0x000f]; + } +} + +/* + * Emulate LD R,D(X,B) with R not in {0, 2, 4, 6} + */ +void math_emu_ld(__u8 *opcode, struct pt_regs * regs) { + __u32 opc = *((__u32 *) opcode); + __u64 *dxb; + + dxb = (__u64 *) calc_addr(regs,opc>>16,opc>>12,opc); + /* FIXME: how to react if copy_from_user fails ? */ + copy_from_user(¤t->thread.fp_regs.fprs[(opc>>20)&15].d, dxb, 8); +} + +/* + * Emulate LE R,D(X,B) with R not in {0, 2, 4, 6} + */ +void math_emu_le(__u8 *opcode, struct pt_regs * regs) { + __u32 opc = *((__u32 *) opcode); + __u32 *mem, *dxb; + + dxb = (__u32 *) calc_addr(regs,opc>>16,opc>>12,opc); + /* FIXME: how to react if get_user fails ? */ + mem = (__u32 *) (¤t->thread.fp_regs.fprs[(opc>>20)&15].f); + get_user(mem[0], dxb); +} + +/* + * Emulate STD R,D(X,B) with R not in {0, 2, 4, 6} + */ +void math_emu_std(__u8 *opcode, struct pt_regs * regs) { + __u32 opc = *((__u32 *) opcode); + __u64 *dxb; + dxb = (__u64 *) calc_addr(regs,opc>>16,opc>>12,opc); + /* FIXME: how to react if copy_to_user fails ? */ + copy_to_user(dxb, ¤t->thread.fp_regs.fprs[(opc>>20)&15].d, 8); +} + +/* + * Emulate STE R,D(X,B) with R not in {0, 2, 4, 6} + */ +void math_emu_ste(__u8 *opcode, struct pt_regs * regs) { + __u32 opc = *((__u32 *) opcode); + __u32 *mem, *dxb; + dxb = (__u32 *) calc_addr(regs,opc>>16,opc>>12,opc); + /* FIXME: how to react if put_user fails ? */ + mem = (__u32 *) (¤t->thread.fp_regs.fprs[(opc>>20)&15].f); + put_user(mem[0], dxb); +} + +/* + * Emulate LFPC D(B) + */ +int math_emu_lfpc(__u8 *opcode, struct pt_regs *regs) { + /* FIXME: how to do that ?!? */ + return 0; +} + +/* + * Emulate STFPC D(B) + */ +int math_emu_stfpc(__u8 *opcode, struct pt_regs *regs) { + /* FIXME: how to do that ?!? */ + return 0; +} + +/* + * Emulate SRNM D(B) + */ +int math_emu_srnm(__u8 *opcode, struct pt_regs *regs) { + /* FIXME: how to do that ?!? */ + return 0; +} + + + + + + + + + + + + + + + + diff --git a/arch/s390x/kernel/process.c b/arch/s390x/kernel/process.c new file mode 100644 index 000000000..4533ad528 --- /dev/null +++ b/arch/s390x/kernel/process.c @@ -0,0 +1,516 @@ +/* + * arch/s390/kernel/process.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Hartmut Penner (hp@de.ibm.com), + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), + * + * Derived from "arch/i386/kernel/process.c" + * Copyright (C) 1995, Linus Torvalds + */ + +/* + * This file handles the architecture-dependent parts of process handling.. + */ + +#define __KERNEL_SYSCALLS__ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +spinlock_t semaphore_wake_lock = SPIN_LOCK_UNLOCKED; + +asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); + +/* + * The idle loop on a S390... + */ + +static psw_t wait_psw; + +int cpu_idle(void *unused) +{ + /* endless idle loop with no priority at all */ + init_idle(); + current->nice = 20; + current->counter = -100; + wait_psw.mask = _WAIT_PSW_MASK; + wait_psw.addr = (unsigned long) &&idle_wakeup; + while(1) { + if (softirq_active(smp_processor_id()) & + softirq_mask(smp_processor_id())) { + do_softirq(); + __sti(); + if (!current->need_resched) + continue; + } + if (current->need_resched) { + schedule(); + check_pgt_cache(); + continue; + } + + /* load wait psw */ + asm volatile ( + "lpswe %0" + : : "m" (wait_psw) ); +idle_wakeup: + } +} + +/* + As all the register will only be made displayable to the root + user ( via printk ) or checking if the uid of the user is 0 from + the /proc filesystem please god this will be secure enough DJB. + The lines are given one at a time so as not to chew stack space in + printk on a crash & also for the proc filesystem when you get + 0 returned you know you've got all the lines + */ + +static int sprintf_regs(int line, char *buff, struct task_struct *task, struct pt_regs *regs) +{ + int linelen=0; + int regno,chaincnt; + u64 backchain,prev_backchain,endchain; + u64 ksp = 0; + char *mode = "???"; + + enum + { + sp_linefeed, + sp_psw, + sp_ksp, + sp_gprs, + sp_gprs1, + sp_gprs2, + sp_gprs3, + sp_gprs4, + sp_gprs5, + sp_gprs6, + sp_gprs7, + sp_gprs8, + sp_acrs, + sp_acrs1, + sp_acrs2, + sp_acrs3, + sp_acrs4, + sp_kern_backchain, + sp_kern_backchain1 + }; + + if (task) + ksp = task->thread.ksp; + if (regs && !(regs->psw.mask & PSW_PROBLEM_STATE)) + ksp = regs->gprs[15]; + + if (regs) + mode = (regs->psw.mask & PSW_PROBLEM_STATE)? + "User" : "Kernel"; + + switch(line) + { + case sp_linefeed: + linelen=sprintf(buff,"\n"); + break; + case sp_psw: + if(regs) + linelen=sprintf(buff, "%s PSW: %016lx %016lx\n", mode, + (unsigned long) regs->psw.mask, + (unsigned long) regs->psw.addr); + else + linelen=sprintf(buff,"pt_regs=NULL some info unavailable\n"); + break; + case sp_ksp: + linelen=sprintf(&buff[linelen], + "task: %016lx ksp: %016lx pt_regs: %016lx\n", + (addr_t)task, (addr_t)ksp, (addr_t)regs); + break; + case sp_gprs: + if(regs) + linelen=sprintf(buff, "%s GPRS:\n", mode); + break; + case sp_gprs1 ... sp_gprs8: + if(regs) + { + regno=(line-sp_gprs1)*2; + linelen = sprintf(buff,"%016lx %016lx\n", + regs->gprs[regno], + regs->gprs[regno+1]); + } + break; + case sp_acrs: + if(regs) + linelen=sprintf(buff, "%s ACRS:\n", mode); + break; + case sp_acrs1 ... sp_acrs4: + if(regs) + { + regno=(line-sp_acrs1)*4; + linelen=sprintf(buff,"%08x %08x %08x %08x\n", + regs->acrs[regno], + regs->acrs[regno+1], + regs->acrs[regno+2], + regs->acrs[regno+3]); + } + break; + case sp_kern_backchain: + if (regs && (regs->psw.mask & PSW_PROBLEM_STATE)) + break; + if (ksp) + linelen=sprintf(buff, "Kernel BackChain CallChain\n"); + break; + default: + if (ksp) + { + + backchain=ksp&PSW_ADDR_MASK; + endchain=((backchain&(-THREAD_SIZE))+THREAD_SIZE); + prev_backchain=backchain-1; + line-=sp_kern_backchain1; + for(chaincnt=0;;chaincnt++) + { + if((backchain==0)||(backchain>=endchain) + ||(chaincnt>=8)||(prev_backchain>=backchain)) + break; + if(chaincnt==line) + { + linelen+=sprintf(&buff[linelen]," %016lx [<%016lx>]\n", + backchain, + *(u64 *)(backchain+112)&PSW_ADDR_MASK); + break; + } + prev_backchain=backchain; + backchain=(*((u64 *)backchain))&PSW_ADDR_MASK; + } + } + } + return(linelen); +} + +void show_regs(struct pt_regs *regs) +{ + char buff[80]; + int line; + + printk("CPU: %d\n",smp_processor_id()); + printk("Process %s (pid: %d, stackpage=%016lX)\n", + current->comm, current->pid, 4096+(addr_t)current); + + for (line = 0; sprintf_regs(line, buff, current, regs); line++) + printk(buff); +} + +char *task_show_regs(struct task_struct *task, char *buffer) +{ + int line, len; + + for (line = 0; ; line++) + { + len = sprintf_regs(line, buffer, task, NULL); + if (!len) break; + buffer += len; + } + return buffer; +} + +int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) +{ + int clone_arg = flags | CLONE_VM; + int retval; + + __asm__ __volatile__( + " slgr 2,2\n" + " lgr 3,%1\n" + " lg 4,%6\n" /* load kernel stack ptr of parent */ + " svc %b2\n" /* Linux system call*/ + " clg 4,%6\n" /* compare ksp's: child or parent ? */ + " je 0f\n" /* parent - jump*/ + " lg 15,%6\n" /* fix kernel stack pointer*/ + " aghi 15,%7\n" + " xc 0(160,15),0(15)\n" /* clear save area */ + " lgr 2,%4\n" /* load argument*/ + " basr 14,%5\n" /* call fn*/ + " svc %b3\n" /* Linux system call*/ + "0: lgr %0,2" + : "=a" (retval) + : "d" (clone_arg), "i" (__NR_clone), "i" (__NR_exit), + "d" (arg), "a" (fn), "i" (__LC_KERNEL_STACK) , + "i" (-STACK_FRAME_OVERHEAD) + : "2", "3", "4" ); + return retval; +} + +/* + * Free current thread data structures etc.. + */ +void exit_thread(void) +{ +} + +void flush_thread(void) +{ + + current->used_math = 0; + current->flags &= ~PF_USEDFPU; +} + +void release_thread(struct task_struct *dead_task) +{ +} + +int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, + unsigned long unused, + struct task_struct * p, struct pt_regs * regs) +{ + struct stack_frame + { + unsigned long back_chain; + unsigned long eos; + unsigned long glue1; + unsigned long glue2; + unsigned long scratch[2]; + unsigned long gprs[10]; /* gprs 6 -15 */ + unsigned long fprs[2]; /* fpr 4 and 6 */ + unsigned long empty[2]; +#if CONFIG_REMOTE_DEBUG + gdb_pt_regs childregs; +#else + pt_regs childregs; +#endif + __u32 pgm_old_ilc; /* single step magic from entry.S */ + __u32 pgm_svc_step; + } *frame; + + frame = (struct stack_frame *) (4*PAGE_SIZE + (unsigned long) p) -1; + frame = (struct stack_frame *) (((unsigned long) frame)&-8L); + p->thread.regs = &frame->childregs; + p->thread.ksp = (unsigned long) frame; + frame->childregs = *regs; + frame->childregs.gprs[15] = new_stackp; + frame->eos = 0; + + /* new return point is ret_from_sys_call */ + frame->gprs[8] = (unsigned long) &ret_from_fork; + + /* fake return stack for resume(), don't go back to schedule */ + frame->gprs[9] = (unsigned long) frame; + frame->pgm_svc_step = 0; /* Nope we aren't single stepping an svc */ + /* save fprs, if used in last task */ + save_fp_regs(&p->thread.fp_regs); + p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE; + /* Don't copy debug registers */ + memset(&p->thread.per_info,0,sizeof(p->thread.per_info)); + return 0; +} + +asmlinkage int sys_fork(struct pt_regs regs) +{ + int ret; + + lock_kernel(); + ret = do_fork(SIGCHLD, regs.gprs[15], ®s, 0); + unlock_kernel(); + return ret; +} + +asmlinkage int sys_clone(struct pt_regs regs) +{ + unsigned long clone_flags; + unsigned long newsp; + int ret; + + lock_kernel(); + clone_flags = regs.gprs[3]; + newsp = regs.orig_gpr2; + if (!newsp) + newsp = regs.gprs[15]; + ret = do_fork(clone_flags, newsp, ®s, 0); + unlock_kernel(); + return ret; +} + +/* + * This is trivial, and on the face of it looks like it + * could equally well be done in user mode. + * + * Not so, for quite unobvious reasons - register pressure. + * In user mode vfork() cannot have a stack frame, and if + * done by calling the "clone()" system call directly, you + * do not have enough call-clobbered registers to hold all + * the information you need. + */ +asmlinkage int sys_vfork(struct pt_regs regs) +{ + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, + regs.gprs[15], ®s, 0); +} + +/* + * sys_execve() executes a new program. + */ +asmlinkage int sys_execve(struct pt_regs regs) +{ + int error; + char * filename; + + filename = getname((char *) regs.orig_gpr2); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = do_execve(filename, (char **) regs.gprs[3], (char **) regs.gprs[4], ®s); + if (error == 0) + { + current->ptrace &= ~PT_DTRACE; + current->thread.fp_regs.fpc=0; + __asm__ __volatile__ + ("sr 0,0\n\t" + "sfpc 0,0\n\t" + : : :"0"); + } + putname(filename); +out: + return error; +} + + +/* + * fill in the FPU structure for a core dump. + */ +int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) +{ + save_fp_regs(fpregs); + return 1; +} + +/* + * fill in the user structure for a core dump.. + */ +void dump_thread(struct pt_regs * regs, struct user * dump) +{ + +/* changed the size calculations - should hopefully work better. lbt */ + dump->magic = CMAGIC; + dump->start_code = 0; + dump->start_stack = regs->gprs[15] & ~(PAGE_SIZE - 1); + dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; + dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; + dump->u_dsize -= dump->u_tsize; + dump->u_ssize = 0; + if (dump->start_stack < TASK_SIZE) + dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; + memcpy(&dump->regs.gprs[0],regs,sizeof(s390_regs)); + dump_fpu (regs, &dump->regs.fp_regs); + memcpy(&dump->regs.per_info,¤t->thread.per_info,sizeof(per_struct)); +} + +/* + * These bracket the sleeping functions.. + */ +extern void scheduling_functions_start_here(void); +extern void scheduling_functions_end_here(void); +#define first_sched ((unsigned long) scheduling_functions_start_here) +#define last_sched ((unsigned long) scheduling_functions_end_here) + +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long r14, r15, bc; + unsigned long stack_page; + int count = 0; + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + stack_page = (unsigned long) p; + r15 = p->thread.ksp; + if (!stack_page || r15 < stack_page || r15 >= 16380+stack_page) + return 0; + bc = *(unsigned long *) r15; + do { + if (bc < stack_page || bc >= 16380+stack_page) + return 0; + r14 = *(unsigned long *) (bc+112); + if (r14 < first_sched || r14 >= last_sched) + return r14; + bc = *(unsigned long *) bc; + } while (count++ < 16); + return 0; +} +#undef last_sched +#undef first_sched + +/* + * This should be safe even if called from tq_scheduler + * A typical mask would be sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM) or 0. + * + */ +void s390_daemonize(char *name,unsigned long mask,int use_init_fs) +{ + struct fs_struct *fs; + extern struct task_struct *child_reaper; + struct task_struct *this_process=current; + + /* + * If we were started as result of loading a module, close all of the + * user space pages. We don't need them, and if we didn't close them + * they would be locked into memory. + */ + exit_mm(current); + + this_process->session = 1; + this_process->pgrp = 1; + if(name) + { + strncpy(current->comm,name,15); + current->comm[15]=0; + } + else + current->comm[0]=0; + /* set signal mask to what we want to respond */ + siginitsetinv(¤t->blocked,mask); + /* exit_signal isn't set up */ + /* if we inherit from cpu idle */ + this_process->exit_signal=SIGCHLD; + /* if priority=0 schedule can go into a tight loop */ + this_process->policy= SCHED_OTHER; + /* nice goes priority=20-nice; */ + this_process->nice=10; + if(use_init_fs) + { + exit_fs(this_process); /* current->fs->count--; */ + fs = init_task.fs; + current->fs = fs; + atomic_inc(&fs->count); + exit_files(current); + } + write_lock_irq(&tasklist_lock); + /* We want init as our parent */ + REMOVE_LINKS(this_process); + this_process->p_opptr=this_process->p_pptr=child_reaper; + SET_LINKS(this_process); + write_unlock_irq(&tasklist_lock); +} diff --git a/arch/s390x/kernel/ptrace.c b/arch/s390x/kernel/ptrace.c new file mode 100644 index 000000000..d70b76d2b --- /dev/null +++ b/arch/s390x/kernel/ptrace.c @@ -0,0 +1,613 @@ +/* + * arch/s390/kernel/ptrace.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), + * + * Based on PowerPC version + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Derived from "arch/m68k/kernel/ptrace.c" + * Copyright (C) 1994 by Hamish Macdonald + * Taken from linux/kernel/ptrace.c and modified for M680x0. + * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds + * + * Modified by Cort Dougan (cort@cs.nmt.edu) + * + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file README.legal in the main directory of + * this archive for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +void FixPerRegisters(struct task_struct *task) +{ + struct pt_regs *regs = task->thread.regs; + per_struct *per_info= + (per_struct *)&task->thread.per_info; + + per_info->control_regs.bits.em_instruction_fetch = + per_info->single_step | per_info->instruction_fetch; + + if (per_info->single_step) { + per_info->control_regs.bits.starting_addr=0; +#ifdef CONFIG_S390_SUPPORT + if (current->thread.flags & S390_FLAG_31BIT) { + per_info->control_regs.bits.ending_addr=0x7fffffffUL; + } + else +#endif + { + per_info->control_regs.bits.ending_addr=-1L; + } + } else { + per_info->control_regs.bits.starting_addr= + per_info->starting_addr; + per_info->control_regs.bits.ending_addr= + per_info->ending_addr; + } + /* if any of the control reg tracing bits are on + we switch on per in the psw */ + if (per_info->control_regs.words.cr[0] & PER_EM_MASK) + regs->psw.mask |= PSW_PER_MASK; + else + regs->psw.mask &= ~PSW_PER_MASK; + if (per_info->control_regs.bits.storage_alt_space_ctl) + task->thread.user_seg |= USER_STD_MASK; + else + task->thread.user_seg &= ~USER_STD_MASK; +} + +void set_single_step(struct task_struct *task) +{ + per_struct *per_info= (per_struct *) &task->thread.per_info; + + per_info->single_step = 1; /* Single step */ + FixPerRegisters (task); +} + +void clear_single_step(struct task_struct *task) +{ + per_struct *per_info= (per_struct *) &task->thread.per_info; + + per_info->single_step = 0; + FixPerRegisters (task); +} + +int ptrace_usercopy(addr_t realuseraddr, addr_t copyaddr, int len, + int tofromuser, int writeuser, unsigned long mask) +{ + unsigned long *realuserptr, *copyptr; + unsigned long tempuser; + int retval; + + retval = 0; + realuserptr = (unsigned long *) realuseraddr; + copyptr = (unsigned long *) copyaddr; + + if (writeuser && realuserptr == NULL) + return 0; + + if (mask != -1L) { + tempuser = *realuserptr; + if (!writeuser) { + tempuser &= mask; + realuserptr = &tempuser; + } + } + if (tofromuser) { + if (writeuser) { + retval = copy_from_user(realuserptr, copyptr, len); + } else { + if (realuserptr == NULL) + retval = clear_user(copyptr, len); + else + retval = copy_to_user(copyptr,realuserptr,len); + retval = (retval == -EFAULT) ? -EIO : 0; + } + } else { + if (writeuser) + memcpy(realuserptr, copyptr, len); + else + memcpy(copyptr, realuserptr, len); + } + if (mask != -1L && writeuser) + *realuserptr = (*realuserptr & mask) | (tempuser & ~mask); + return retval; +} + +int copy_user(struct task_struct *task,saddr_t useraddr, addr_t copyaddr, + int len, int tofromuser, int writingtouser) +{ + int copylen=0,copymax; + addr_t realuseraddr; + saddr_t enduseraddr; + + unsigned long mask; + +#ifdef CONFIG_S390_SUPPORT + if (current->thread.flags & S390_FLAG_31BIT) { + /* adjust user offsets to 64 bit structure */ + if (useraddr < PT_PSWADDR / 2) + useraddr = 2 * useraddr; + else if(useraddr < PT_ACR0 / 2) + useraddr = 2 * useraddr + sizeof(addr_t) / 2; + else if(useraddr < PT_ACR0 / 2 + (PT_ORIGGPR2 - PT_ACR0)) + useraddr = useraddr + PT_ACR0 / 2; + else if(useraddr < PT_ACR0 / 2 + (sizeof(user_regs_struct) - sizeof(addr_t) / 2 - PT_ACR0)) + useraddr = useraddr + PT_ACR0 / 2 + sizeof(addr_t) / 2; + } +#endif + + enduseraddr=useraddr+len; + + if (useraddr < 0 || enduseraddr > sizeof(struct user)|| + (useraddr < PT_ENDREGS && (useraddr&3))|| + (enduseraddr < PT_ENDREGS && (enduseraddr&3))) + return (-EIO); + while(len>0) + { + mask=PSW_ADDR_MASK; + if(useraddrthread.regs)[useraddr]); + if(useraddrthread.fp_regs)[useraddr-PT_FPC]); + } + else if(useraddrthread.per_info)[useraddr-PT_CR_9]); + } + else + { + copymax=sizeof(struct user); + realuseraddr=(addr_t)NULL; + } + copylen=copymax-useraddr; + copylen=(copylen>len ? len:copylen); + if(ptrace_usercopy(realuseraddr,copyaddr,copylen,tofromuser,writingtouser,mask)) + return (-EIO); + copyaddr+=copylen; + len-=copylen; + useraddr+=copylen; + } + FixPerRegisters(task); + return(0); +} + +asmlinkage int sys_ptrace(long request, long pid, long addr, long data) +{ + struct task_struct *child; + int ret = -EPERM; + unsigned long flags; + unsigned long tmp; + int copied; + ptrace_area parea; + + lock_kernel(); + if (request == PTRACE_TRACEME) + { + /* are we already being traced? */ + if (current->ptrace & PT_PTRACED) + goto out; + /* set the ptrace bit in the process flags. */ + current->ptrace |= PT_PTRACED; + ret = 0; + goto out; + } + ret = -ESRCH; + read_lock(&tasklist_lock); + child = find_task_by_pid(pid); + read_unlock(&tasklist_lock); + if (!child) + goto out; + ret = -EPERM; + if (pid == 1) /* you may not mess with init */ + goto out; + if (request == PTRACE_ATTACH) + { + if (child == current) + goto out; + if ((!child->dumpable || + (current->uid != child->euid) || + (current->uid != child->suid) || + (current->uid != child->uid) || + (current->gid != child->egid) || + (current->gid != child->sgid)) && !capable(CAP_SYS_PTRACE)) + goto out; + /* the same process cannot be attached many times */ + if (child->ptrace & PT_PTRACED) + goto out; + child->ptrace |= PT_PTRACED; + + write_lock_irqsave(&tasklist_lock, flags); + if (child->p_pptr != current) + { + REMOVE_LINKS(child); + child->p_pptr = current; + SET_LINKS(child); + } + write_unlock_irqrestore(&tasklist_lock, flags); + + send_sig(SIGSTOP, child, 1); + ret = 0; + goto out; + } + ret = -ESRCH; + // printk("child=%lX child->flags=%lX",child,child->flags); + /* I added child!=current line so we can get the */ + /* ieee_instruction_pointer from the user structure DJB */ + if(child!=current) + { + if (!(child->ptrace & PT_PTRACED)) + goto out; + if (child->state != TASK_STOPPED) + { + if (request != PTRACE_KILL) + goto out; + } + if (child->p_pptr != current) + goto out; + } + switch (request) + { + /* If I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); + ret = -EIO; + if (copied != sizeof(tmp)) + goto out; + ret = put_user(tmp,(unsigned long *) data); + goto out; + + /* read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: + ret=copy_user(child,addr,data,sizeof(unsigned long),1,0); + break; + + /* If I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = 0; + if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) + goto out; + ret = -EIO; + goto out; + break; + + case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ + ret=copy_user(child,addr,(addr_t)&data,sizeof(unsigned long),0,1); + break; + + case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ + case PTRACE_CONT: /* restart after signal. */ + ret = -EIO; + if ((unsigned long) data >= _NSIG) + break; + if (request == PTRACE_SYSCALL) + child->ptrace |= PT_TRACESYS; + else + child->ptrace &= ~PT_TRACESYS; + child->exit_code = data; + /* make sure the single step bit is not set. */ + clear_single_step(child); + wake_up_process(child); + ret = 0; + break; + +/* + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + case PTRACE_KILL: + ret = 0; + if (child->state == TASK_ZOMBIE) /* already dead */ + break; + child->exit_code = SIGKILL; + clear_single_step(child); + wake_up_process(child); + /* make sure the single step bit is not set. */ + break; + + case PTRACE_SINGLESTEP: /* set the trap flag. */ + ret = -EIO; + if ((unsigned long) data >= _NSIG) + break; + child->ptrace &= ~PT_TRACESYS; + child->exit_code = data; + set_single_step(child); + /* give it a chance to run. */ + wake_up_process(child); + ret = 0; + break; + + case PTRACE_DETACH: /* detach a process that was attached. */ + ret = -EIO; + if ((unsigned long) data >= _NSIG) + break; + child->ptrace &= ~(PT_PTRACED|PT_TRACESYS); + child->exit_code = data; + write_lock_irqsave(&tasklist_lock, flags); + REMOVE_LINKS(child); + child->p_pptr = child->p_opptr; + SET_LINKS(child); + write_unlock_irqrestore(&tasklist_lock, flags); + /* make sure the single step bit is not set. */ + clear_single_step(child); + wake_up_process(child); + ret = 0; + break; + case PTRACE_PEEKUSR_AREA: + case PTRACE_POKEUSR_AREA: + if((ret=copy_from_user(&parea,(void *)addr,sizeof(parea)))==0) + ret=copy_user(child,parea.kernel_addr,parea.process_addr, + parea.len,1,(request==PTRACE_POKEUSR_AREA)); + break; + default: + ret = -EIO; + break; + } + out: + unlock_kernel(); + return ret; +} + +typedef struct +{ +__u32 len; +__u32 kernel_addr; +__u32 process_addr; +} ptrace_area_emu31; + +asmlinkage int sys32_ptrace(long request, long pid, long addr, s32 data) +{ + struct task_struct *child; + int ret = -EPERM; + unsigned long flags; + u32 tmp; + int copied; + ptrace_area parea; + + lock_kernel(); + if (request == PTRACE_TRACEME) + { + /* are we already being traced? */ + if (current->ptrace & PT_PTRACED) + goto out; + /* set the ptrace bit in the process flags. */ + current->ptrace |= PT_PTRACED; + ret = 0; + goto out; + } + ret = -ESRCH; + read_lock(&tasklist_lock); + child = find_task_by_pid(pid); + read_unlock(&tasklist_lock); + if (!child) + goto out; + ret = -EPERM; + if (pid == 1) /* you may not mess with init */ + goto out; + if (request == PTRACE_ATTACH) + { + if (child == current) + goto out; + if ((!child->dumpable || + (current->uid != child->euid) || + (current->uid != child->suid) || + (current->uid != child->uid) || + (current->gid != child->egid) || + (current->gid != child->sgid)) && !capable(CAP_SYS_PTRACE)) + goto out; + /* the same process cannot be attached many times */ + if (child->ptrace & PT_PTRACED) + goto out; + child->ptrace |= PT_PTRACED; + + write_lock_irqsave(&tasklist_lock, flags); + if (child->p_pptr != current) + { + REMOVE_LINKS(child); + child->p_pptr = current; + SET_LINKS(child); + } + write_unlock_irqrestore(&tasklist_lock, flags); + + send_sig(SIGSTOP, child, 1); + ret = 0; + goto out; + } + ret = -ESRCH; + // printk("child=%lX child->flags=%lX",child,child->flags); + /* I added child!=current line so we can get the */ + /* ieee_instruction_pointer from the user structure DJB */ + if(child!=current) + { + if (!(child->ptrace & PT_PTRACED)) + goto out; + if (child->state != TASK_STOPPED) + { + if (request != PTRACE_KILL) + goto out; + } + if (child->p_pptr != current) + goto out; + } + switch (request) + { + /* If I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); + ret = -EIO; + if (copied != sizeof(tmp)) + goto out; + ret = put_user(tmp,(u32 *)(unsigned long)data); + goto out; + + /* read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: + ret=copy_user(child,addr,data,sizeof(u32),1,0); + break; + + /* If I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = 0; + if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) + goto out; + ret = -EIO; + goto out; + break; + + case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ + ret=copy_user(child,addr,(addr_t)&data,sizeof(u32),0,1); + break; + + case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ + case PTRACE_CONT: /* restart after signal. */ + ret = -EIO; + if ((unsigned long) data >= _NSIG) + break; + if (request == PTRACE_SYSCALL) + child->ptrace |= PT_TRACESYS; + else + child->ptrace &= ~PT_TRACESYS; + child->exit_code = data; + /* make sure the single step bit is not set. */ + clear_single_step(child); + wake_up_process(child); + ret = 0; + break; + +/* + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + case PTRACE_KILL: + ret = 0; + if (child->state == TASK_ZOMBIE) /* already dead */ + break; + child->exit_code = SIGKILL; + clear_single_step(child); + wake_up_process(child); + /* make sure the single step bit is not set. */ + break; + + case PTRACE_SINGLESTEP: /* set the trap flag. */ + ret = -EIO; + if ((unsigned long) data >= _NSIG) + break; + child->ptrace &= ~PT_TRACESYS; + child->exit_code = data; + set_single_step(child); + /* give it a chance to run. */ + wake_up_process(child); + ret = 0; + break; + + case PTRACE_DETACH: /* detach a process that was attached. */ + ret = -EIO; + if ((unsigned long) data >= _NSIG) + break; + child->ptrace &= ~(PT_PTRACED|PT_TRACESYS); + child->exit_code = data; + write_lock_irqsave(&tasklist_lock, flags); + REMOVE_LINKS(child); + child->p_pptr = child->p_opptr; + SET_LINKS(child); + write_unlock_irqrestore(&tasklist_lock, flags); + /* make sure the single step bit is not set. */ + clear_single_step(child); + wake_up_process(child); + ret = 0; + break; + case PTRACE_PEEKUSR_AREA: + case PTRACE_POKEUSR_AREA: + { + ptrace_area_emu31 * parea31 = (void *)addr; + if (!access_ok(VERIFY_READ, parea31, sizeof(*parea31))) + return(-EFAULT); + ret = __get_user(parea.len, &parea31->len); + ret |= __get_user(parea.kernel_addr, &parea31->kernel_addr); + ret |= __get_user(parea.process_addr, &parea31->process_addr); + if(ret==0) + ret=copy_user(child,parea.kernel_addr,parea.process_addr, + parea.len,1,(request==PTRACE_POKEUSR_AREA)); + break; + } + default: + ret = -EIO; + break; + } + out: + unlock_kernel(); + return ret; +} + +asmlinkage void syscall_trace(void) +{ + lock_kernel(); + if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) + != (PT_PTRACED|PT_TRACESYS)) + goto out; + current->exit_code = SIGTRAP; + set_current_state(TASK_STOPPED); + notify_parent(current, SIGCHLD); + schedule(); + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } + out: + unlock_kernel(); +} diff --git a/arch/s390x/kernel/reipl.S b/arch/s390x/kernel/reipl.S new file mode 100644 index 000000000..d8af95ce1 --- /dev/null +++ b/arch/s390x/kernel/reipl.S @@ -0,0 +1,94 @@ +/* + * arch/s390/kernel/reipl.S + * + * S390 version + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) + Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + */ + +#include + .globl do_reipl +do_reipl: basr %r13,0 +.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) +.Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13) + stctg %c0,%c0,.Lctlsave-.Lpg0(%r13) + ni .Lctlsave+4-.Lpg0(%r13),0xef + lctlg %c0,%c0,.Lctlsave-.Lpg0(%r13) + lgr %r1,%r2 + mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) + stsch .Lschib-.Lpg0(%r13) + oi .Lschib+5-.Lpg0(%r13),0x84 +.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 + msch .Lschib-.Lpg0(%r13) + ssch .Liplorb-.Lpg0(%r13) + jz .L001 + bas %r14,.Ldisab-.Lpg0(%r13) +.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13) +.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13) +.Lcont: c %r1,__LC_SUBCHANNEL_ID + jnz .Ltpi + clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) + jnz .Ltpi + tsch .Liplirb-.Lpg0(%r13) + tm .Liplirb+9-.Lpg0(%r13),0xbf + jz .L002 + bas %r14,.Ldisab-.Lpg0(%r13) +.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 + jz .L003 + bas %r14,.Ldisab-.Lpg0(%r13) +.L003: spx .Lnull-.Lpg0(%r13) + st %r1,__LC_SUBCHANNEL_ID + lhi %r1,0 # mode 0 = esa + slr %r0,%r0 # set cpuid to zero + sigp %r1,%r0,0x12 # switch to esa mode + lpsw 0 +.Ldisab: sll %r14,1 + srl %r14,1 # need to kill hi bit to avoid specification exceptions. + st %r14,.Ldispsw+12-.Lpg0(%r13) + lpswe .Ldispsw-.Lpg0(%r13) + .align 8 +.Lall: .quad 0x00000000ff000000 +.Lctlsave: .quad 0x0000000000000000 +.Lnull: .long 0x0000000000000000 + .align 16 +/* + * These addresses have to be 31 bit otherwise + * the sigp will throw a specifcation exception + * when switching to ESA mode as bit 31 be set + * in the ESA psw. + * Bit 31 of the addresses has to be 0 for the + * 31bit lpswe instruction a fact they appear to have + * ommited from the pop. + */ +.Lnewpsw: .quad 0x0000000080000000 + .quad .Lpg1 +.Lpcnew: .quad 0x0000000080000000 + .quad .Lecs +.Lionew: .quad 0x0000000080000000 + .quad .Lcont +.Lwaitpsw: .quad 0x0202000080000000 + .quad .Ltpi +.Ldispsw: .quad 0x0002000080000000 + .quad 0x0000000000000000 +.Liplccws: .long 0x02000000,0x60000018 + .long 0x08000008,0x20000001 +.Liplorb: .long 0x0049504c,0x0000ff80 + .long 0x00000000+.Liplccws +.Lschib: .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 +.Liplirb: .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + .long 0x00000000,0x00000000 + + + diff --git a/arch/s390x/kernel/s390_ext.c b/arch/s390x/kernel/s390_ext.c new file mode 100644 index 000000000..6a7be9496 --- /dev/null +++ b/arch/s390x/kernel/s390_ext.c @@ -0,0 +1,77 @@ +/* + * arch/s390/kernel/s390_ext.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com), + * Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include +#include +#include +#include + +/* + * Simple hash strategy: index = code & 0xff; + * ext_int_hash[index] is the start of the list for all external interrupts + * that hash to this index. With the current set of external interrupts + * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console and 0x4000 + * iucv) this is always the first element. + */ +ext_int_info_t *ext_int_hash[256] = { 0, }; +ext_int_info_t ext_int_info_timer; +ext_int_info_t ext_int_info_hwc; + +int register_external_interrupt(__u16 code, ext_int_handler_t handler) { + ext_int_info_t *p; + int index; + + index = code & 0xff; + p = ext_int_hash[index]; + while (p != NULL) { + if (p->code == code) + return -EBUSY; + p = p->next; + } + if (code == 0x1004) /* time_init is done before kmalloc works :-/ */ + p = &ext_int_info_timer; + else if (code == 0x2401) /* hwc_init is done too early too */ + p = &ext_int_info_hwc; + else + p = (ext_int_info_t *) + kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC); + if (p == NULL) + return -ENOMEM; + p->code = code; + p->handler = handler; + p->next = ext_int_hash[index]; + ext_int_hash[index] = p; + return 0; +} + +int unregister_external_interrupt(__u16 code, ext_int_handler_t handler) { + ext_int_info_t *p, *q; + int index; + + index = code & 0xff; + q = NULL; + p = ext_int_hash[index]; + while (p != NULL) { + if (p->code == code && p->handler == handler) + break; + q = p; + p = p->next; + } + if (p == NULL) + return -ENOENT; + if (q != NULL) + q->next = p->next; + else + ext_int_hash[index] = p->next; + if (code != 0x1004 && code != 0x2401) + kfree(p); + return 0; +} + + diff --git a/arch/s390x/kernel/s390_ksyms.c b/arch/s390x/kernel/s390_ksyms.c new file mode 100644 index 000000000..5ab122488 --- /dev/null +++ b/arch/s390x/kernel/s390_ksyms.c @@ -0,0 +1,157 @@ +/* + * arch/s390/kernel/s390_ksyms.c + * + * S390 version + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if CONFIG_CHANDEV +#include +#endif +#if CONFIG_IP_MULTICAST +#include +#endif + +/* + * I/O subsystem + */ +EXPORT_SYMBOL(halt_IO); +EXPORT_SYMBOL(clear_IO); +EXPORT_SYMBOL(do_IO); +EXPORT_SYMBOL(resume_IO); +EXPORT_SYMBOL(ioinfo); +EXPORT_SYMBOL(get_dev_info_by_irq); +EXPORT_SYMBOL(get_dev_info_by_devno); +EXPORT_SYMBOL(get_irq_by_devno); +EXPORT_SYMBOL(get_devno_by_irq); +EXPORT_SYMBOL(get_irq_first); +EXPORT_SYMBOL(get_irq_next); +EXPORT_SYMBOL(read_conf_data); +EXPORT_SYMBOL(read_dev_chars); +EXPORT_SYMBOL(s390_request_irq_special); +EXPORT_SYMBOL(s390_device_register); +EXPORT_SYMBOL(s390_device_unregister); + +EXPORT_SYMBOL(ccw_alloc_request); +EXPORT_SYMBOL(ccw_free_request); + +EXPORT_SYMBOL(register_external_interrupt); +EXPORT_SYMBOL(unregister_external_interrupt); + +/* + * debug feature + */ +EXPORT_SYMBOL(debug_register); +EXPORT_SYMBOL(debug_unregister); +EXPORT_SYMBOL(debug_set_level); +EXPORT_SYMBOL(debug_register_view); +EXPORT_SYMBOL(debug_unregister_view); +EXPORT_SYMBOL(debug_event); +EXPORT_SYMBOL(debug_int_event); +EXPORT_SYMBOL(debug_text_event); +EXPORT_SYMBOL(debug_exception); +EXPORT_SYMBOL(debug_int_exception); +EXPORT_SYMBOL(debug_text_exception); +EXPORT_SYMBOL(debug_hex_ascii_view); +EXPORT_SYMBOL(debug_raw_view); +EXPORT_SYMBOL(debug_dflt_header_fn); + +/* + * memory management + */ +EXPORT_SYMBOL(_oi_bitmap); +EXPORT_SYMBOL(_ni_bitmap); +EXPORT_SYMBOL(_zb_findmap); +EXPORT_SYMBOL(__copy_from_user_fixup); +EXPORT_SYMBOL(__copy_to_user_fixup); + +/* + * semaphore ops + */ +EXPORT_SYMBOL(__up); +EXPORT_SYMBOL(__down); +EXPORT_SYMBOL(__down_interruptible); +EXPORT_SYMBOL(__down_trylock); + +/* + * string functions + */ +EXPORT_SYMBOL_NOVERS(memcmp); +EXPORT_SYMBOL_NOVERS(memset); +EXPORT_SYMBOL_NOVERS(memmove); +EXPORT_SYMBOL_NOVERS(strlen); +EXPORT_SYMBOL_NOVERS(strchr); +EXPORT_SYMBOL_NOVERS(strcmp); +EXPORT_SYMBOL_NOVERS(strncat); +EXPORT_SYMBOL_NOVERS(strncmp); +EXPORT_SYMBOL_NOVERS(strncpy); +EXPORT_SYMBOL_NOVERS(strnlen); +EXPORT_SYMBOL_NOVERS(strrchr); +EXPORT_SYMBOL_NOVERS(strtok); +EXPORT_SYMBOL_NOVERS(strpbrk); + +EXPORT_SYMBOL_NOVERS(_ascebc_500); +EXPORT_SYMBOL_NOVERS(_ebcasc_500); +EXPORT_SYMBOL_NOVERS(_ascebc); +EXPORT_SYMBOL_NOVERS(_ebcasc); +EXPORT_SYMBOL_NOVERS(_ebc_tolower); +EXPORT_SYMBOL_NOVERS(_ebc_toupper); + +/* + * binfmt_elf loader + */ +EXPORT_SYMBOL(get_pte_slow); +EXPORT_SYMBOL(get_pmd_slow); +extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); +EXPORT_SYMBOL(dump_fpu); +#ifdef CONFIG_S390_SUPPORT +extern int setup_arg_pages32(struct linux_binprm *bprm); +EXPORT_SYMBOL(setup_arg_pages32); +#endif +EXPORT_SYMBOL(overflowuid); +EXPORT_SYMBOL(overflowgid); + +/* + * misc. + */ +EXPORT_SYMBOL(module_list); +EXPORT_SYMBOL(__udelay); +#ifdef CONFIG_SMP +#include +EXPORT_SYMBOL(__global_cli); +EXPORT_SYMBOL(__global_sti); +EXPORT_SYMBOL(__global_save_flags); +EXPORT_SYMBOL(__global_restore_flags); +EXPORT_SYMBOL(lowcore_ptr); +EXPORT_SYMBOL(global_bh_lock); +EXPORT_SYMBOL(kernel_flag); +EXPORT_SYMBOL(smp_ctl_set_bit); +EXPORT_SYMBOL(smp_ctl_clear_bit); +#endif +EXPORT_SYMBOL(kernel_thread); +#if CONFIG_CHANDEV +EXPORT_SYMBOL(chandev_register_and_probe); +EXPORT_SYMBOL(chandev_request_irq); +EXPORT_SYMBOL(chandev_unregister); +EXPORT_SYMBOL(chandev_initdevice); +EXPORT_SYMBOL(chandev_initnetdevice); +#endif +#if CONFIG_IP_MULTICAST +/* Required for lcs gigibit ethernet multicast support */ +EXPORT_SYMBOL(arp_mc_map); +#endif +EXPORT_SYMBOL(s390_daemonize); +EXPORT_SYMBOL (set_normalized_cda); + diff --git a/arch/s390x/kernel/s390fpu.c b/arch/s390x/kernel/s390fpu.c new file mode 100644 index 000000000..7dfea3fba --- /dev/null +++ b/arch/s390x/kernel/s390fpu.c @@ -0,0 +1,87 @@ +/* + * arch/s390/kernel/s390fpu.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * + * s390fpu.h functions for saving & restoring the fpu state. + * + * I couldn't inline these as linux/sched.h included half the world + * & was required to at the task structure. + * & the functions were too complex to make macros from. + * ( & as usual I didn't feel like debugging inline code ). + */ + +#include + +void save_fp_regs(s390_fp_regs *fpregs) +{ +/* + * I don't think we can use STE here as this would load + * fp registers 0 & 2 into memory locations 0 & 1 etc. + */ + asm volatile ("STFPC 0(%0)\n\t" + "STD 0,8(%0)\n\t" + "STD 1,16(%0)\n\t" + "STD 2,24(%0)\n\t" + "STD 3,32(%0)\n\t" + "STD 4,40(%0)\n\t" + "STD 5,48(%0)\n\t" + "STD 6,56(%0)\n\t" + "STD 7,64(%0)\n\t" + "STD 8,72(%0)\n\t" + "STD 9,80(%0)\n\t" + "STD 10,88(%0)\n\t" + "STD 11,96(%0)\n\t" + "STD 12,104(%0)\n\t" + "STD 13,112(%0)\n\t" + "STD 14,120(%0)\n\t" + "STD 15,128(%0)\n\t" + : + : "a" (fpregs) + : "memory" + ); +} + +void restore_fp_regs(s390_fp_regs *fpregs) +{ + /* If we don't mask with the FPC_VALID_MASK here + * we've got a very quick shutdown -h now command + * via a kernel specification exception. + */ + fpregs->fpc&=FPC_VALID_MASK; + asm volatile ("LFPC 0(%0)\n\t" + "LD 0,8(%0)\n\t" + "LD 1,16(%0)\n\t" + "LD 2,24(%0)\n\t" + "LD 3,32(%0)\n\t" + "LD 4,40(%0)\n\t" + "LD 5,48(%0)\n\t" + "LD 6,56(%0)\n\t" + "LD 7,64(%0)\n\t" + "LD 8,72(%0)\n\t" + "LD 9,80(%0)\n\t" + "LD 10,88(%0)\n\t" + "LD 11,96(%0)\n\t" + "LD 12,104(%0)\n\t" + "LD 13,112(%0)\n\t" + "LD 14,120(%0)\n\t" + "LD 15,128(%0)\n\t" + : + : "a" (fpregs) + : "memory" + ); +} + + + + + + + + + + + + diff --git a/arch/s390x/kernel/semaphore.c b/arch/s390x/kernel/semaphore.c new file mode 100644 index 000000000..8af6d8277 --- /dev/null +++ b/arch/s390x/kernel/semaphore.c @@ -0,0 +1,302 @@ +/* + * linux/arch/S390/kernel/semaphore.c + * + * S390 version + * Copyright (C) 1998-2000 IBM Corporation + * Author(s): Martin Schwidefsky + * + * Derived from "linux/arch/i386/kernel/semaphore.c + * Copyright (C) 1999, Linus Torvalds + * + */ +#include + +#include + +/* + * Semaphores are implemented using a two-way counter: + * The "count" variable is decremented for each process + * that tries to acquire the semaphore, while the "sleeping" + * variable is a count of such acquires. + * + * Notably, the inline "up()" and "down()" functions can + * efficiently test if they need to do any extra work (up + * needs to do something only if count was negative before + * the increment operation. + * + * "sleeping" and the contention routine ordering is + * protected by the semaphore spinlock. + * + * Note that these functions are only called when there is + * contention on the lock, and as such all this is the + * "non-critical" part of the whole semaphore business. The + * critical part is the inline stuff in + * where we want to avoid any extra jumps and calls. + */ + +/* + * Logic: + * - only on a boundary condition do we need to care. When we go + * from a negative count to a non-negative, we wake people up. + * - when we go from a non-negative count to a negative do we + * (a) synchronize with the "sleeper" count and (b) make sure + * that we're on the wakeup list before we synchronize so that + * we cannot lose wakeup events. + */ + +void __up(struct semaphore *sem) +{ + wake_up(&sem->wait); +} + +static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED; + +void __down(struct semaphore * sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + tsk->state = TASK_UNINTERRUPTIBLE; + add_wait_queue_exclusive(&sem->wait, &wait); + + spin_lock_irq(&semaphore_lock); + sem->sleepers++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irq(&semaphore_lock); + + schedule(); + tsk->state = TASK_UNINTERRUPTIBLE; + spin_lock_irq(&semaphore_lock); + } + spin_unlock_irq(&semaphore_lock); + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + wake_up(&sem->wait); +} + +int __down_interruptible(struct semaphore * sem) +{ + int retval = 0; + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + tsk->state = TASK_INTERRUPTIBLE; + add_wait_queue_exclusive(&sem->wait, &wait); + + spin_lock_irq(&semaphore_lock); + sem->sleepers ++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * With signals pending, this turns into + * the trylock failure case - we won't be + * sleeping, and we* can't get the lock as + * it has contention. Just correct the count + * and exit. + */ + if (signal_pending(current)) { + retval = -EINTR; + sem->sleepers = 0; + atomic_add(sleepers, &sem->count); + break; + } + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock. The + * "-1" is because we're still hoping to get + * the lock. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irq(&semaphore_lock); + + schedule(); + tsk->state = TASK_INTERRUPTIBLE; + spin_lock_irq(&semaphore_lock); + } + spin_unlock_irq(&semaphore_lock); + tsk->state = TASK_RUNNING; + remove_wait_queue(&sem->wait, &wait); + wake_up(&sem->wait); + return retval; +} + +/* + * Trylock failed - make sure we correct for + * having decremented the count. + */ +int __down_trylock(struct semaphore * sem) +{ + unsigned long flags; + int sleepers; + + spin_lock_irqsave(&semaphore_lock, flags); + sleepers = sem->sleepers + 1; + sem->sleepers = 0; + + /* + * Add "everybody else" and us into it. They aren't + * playing, because we own the spinlock. + */ + if (!atomic_add_negative(sleepers, &sem->count)) + wake_up(&sem->wait); + + spin_unlock_irqrestore(&semaphore_lock, flags); + return 1; +} + +void down_read_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->read_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +void down_write_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->write_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); +} + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +void down_read_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + up_read(sem); /* this takes care of granting the lock */ + + add_wait_queue(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +void down_write_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; /* we must attempt to acquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* Called when someone has done an up that transitioned from + * negative to non-negative, meaning that the lock has been + * granted to whomever owned the bias. + */ +void rwsem_wake_readers(struct rw_semaphore *sem) +{ + if (xchg(&sem->read_bias_granted, 1)) + BUG(); + wake_up(&sem->wait); +} + +void rwsem_wake_writers(struct rw_semaphore *sem) +{ + if (xchg(&sem->write_bias_granted, 1)) + BUG(); + wake_up(&sem->write_bias_wait); +} + +void __down_read_failed(int count, struct rw_semaphore *sem) +{ + do { + if (count == -1) { + down_read_failed_biased(sem); + break; + } + down_read_failed(sem); + count = atomic_dec_return(&sem->count); + } while (count != 0); +} + +void __down_write_failed(int count, struct rw_semaphore *sem) +{ + do { + if (count < 0 && count > -RW_LOCK_BIAS) { + down_write_failed_biased(sem); + break; + } + down_write_failed(sem); + count = atomic_add_return(-RW_LOCK_BIAS, &sem->count); + } while (count != 0); +} + +void __rwsem_wake(int count, struct rw_semaphore *sem) +{ + if (count == 0) + rwsem_wake_readers(sem); + else + rwsem_wake_writers(sem); +} + diff --git a/arch/s390x/kernel/setup.c b/arch/s390x/kernel/setup.c new file mode 100644 index 000000000..528793cfc --- /dev/null +++ b/arch/s390x/kernel/setup.c @@ -0,0 +1,380 @@ +/* + * arch/s390/kernel/setup.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Hartmut Penner (hp@de.ibm.com), + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * Derived from "arch/i386/kernel/setup.c" + * Copyright (C) 1995, Linus Torvalds + */ + +/* + * This file handles the architecture-dependent parts of initialization + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_BLK_DEV_RAM +#include +#endif +#include +#include +#include +#include +#include +#include + +/* + * Machine setup.. + */ +__u16 boot_cpu_addr; +int cpus_initialized = 0; +unsigned long cpu_initialized = 0; +volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ + +/* + * Setup options + */ + +#ifdef CONFIG_BLK_DEV_RAM +extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */ +extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt*/ +extern int rd_image_start; /* starting block # of image */ +#endif + +extern int root_mountflags; +extern int _text,_etext, _edata, _end; + + +/* + * This is set up by the setup-routine at boot-time + * for S390 need to find out, what we have to setup + * using address 0x10400 ... + */ + +#include + +static char command_line[COMMAND_LINE_SIZE] = { 0, }; + char saved_command_line[COMMAND_LINE_SIZE]; + +static struct resource code_resource = { "Kernel code", 0x100000, 0 }; +static struct resource data_resource = { "Kernel data", 0, 0 }; + +/* + * cpu_init() initializes state that is per-CPU. + */ +void __init cpu_init (void) +{ + int nr = smp_processor_id(); + int addr = hard_smp_processor_id(); + + if (test_and_set_bit(nr,&cpu_initialized)) { + printk("CPU#%d ALREADY INITIALIZED!!!!!!!!!\n", nr); + for (;;) __sti(); + } + cpus_initialized++; + + /* + * Store processor id in lowcore (used e.g. in timer_interrupt) + */ + asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); + S390_lowcore.cpu_data.cpu_addr = addr; + S390_lowcore.cpu_data.cpu_nr = nr; + + /* + * Force FPU initialization: + */ + current->flags &= ~PF_USEDFPU; + current->used_math = 0; + + /* Setup active_mm for idle_task */ + atomic_inc(&init_mm.mm_count); + current->active_mm = &init_mm; + if (current->mm) + BUG(); + enter_lazy_tlb(&init_mm, current, nr); +} + +/* + * VM halt and poweroff setup routines + */ +char vmhalt_cmd[128] = ""; +char vmpoff_cmd[128] = ""; + +static inline void strncpy_skip_quote(char *dst, char *src, int n) +{ + int sx, dx; + + dx = 0; + for (sx = 0; src[sx] != 0; sx++) { + if (src[sx] == '"') continue; + dst[dx++] = src[sx]; + if (dx >= n) break; + } +} + +static int __init vmhalt_setup(char *str) +{ + strncpy_skip_quote(vmhalt_cmd, str, 127); + vmhalt_cmd[127] = 0; + return 1; +} + +__setup("vmhalt=", vmhalt_setup); + +static int __init vmpoff_setup(char *str) +{ + strncpy_skip_quote(vmpoff_cmd, str, 127); + vmpoff_cmd[127] = 0; + return 1; +} + +__setup("vmpoff=", vmpoff_setup); + +/* + * Reboot, halt and power_off routines for non SMP. + */ +#ifndef CONFIG_SMP +void machine_restart(char * __unused) +{ + reipl(S390_lowcore.ipl_device); +} + +void machine_halt(void) +{ + if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) + cpcmd(vmhalt_cmd, NULL, 0); + signal_processor(smp_processor_id(), sigp_stop_and_store_status); +} + +void machine_power_off(void) +{ + if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) + cpcmd(vmpoff_cmd, NULL, 0); + signal_processor(smp_processor_id(), sigp_stop_and_store_status); +} +#endif + +/* + * Setup function called from init/main.c just after the banner + * was printed. + */ +void __init setup_arch(char **cmdline_p) +{ + unsigned long bootmap_size; + unsigned long memory_start, memory_end; + char c = ' ', cn, *to = command_line, *from = COMMAND_LINE; + struct resource *res; + unsigned long start_pfn, end_pfn; + static unsigned int smptrap=0; + unsigned long delay = 0; + int len = 0; + + if (smptrap) + return; + smptrap=1; + + printk("Command line is: %s\n", COMMAND_LINE); + + /* + * Setup lowcore information for boot cpu + */ + cpu_init(); + boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; + __cpu_logical_map[0] = boot_cpu_addr; + + /* + * print what head.S has found out about the machine + */ + printk((MACHINE_IS_VM) ? + "We are running under VM\n" : + "We are running native\n"); + + ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV); +#ifdef CONFIG_BLK_DEV_RAM + rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; + rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); + rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); +#endif + /* nasty stuff with PARMAREAs. we use head.S or parameterline + if (!MOUNT_ROOT_RDONLY) + root_mountflags &= ~MS_RDONLY; + */ + memory_start = (unsigned long) &_end; /* fixit if use $CODELO etc*/ + memory_end = MEMORY_SIZE; /* detected in head.s */ + init_mm.start_code = PAGE_OFFSET; + init_mm.end_code = (unsigned long) &_etext; + init_mm.end_data = (unsigned long) &_edata; + init_mm.brk = (unsigned long) &_end; + + code_resource.start = (unsigned long) &_text; + code_resource.end = (unsigned long) &_etext - 1; + data_resource.start = (unsigned long) &_etext; + data_resource.end = (unsigned long) &_edata - 1; + + /* Save unparsed command line copy for /proc/cmdline */ + memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); + saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; + + for (;;) { + /* + * "mem=XXX[kKmM]" sets memsize + */ + if (c == ' ' && strncmp(from, "mem=", 4) == 0) { + if (to != command_line) to--; + memory_end = simple_strtoul(from+4, &from, 0); + if ( *from == 'K' || *from == 'k' ) { + memory_end = memory_end << 10; + from++; + } else if ( *from == 'M' || *from == 'm' ) { + memory_end = memory_end << 20; + from++; + } + } + /* + * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes + */ + if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) { + if (to != command_line) to--; + delay = simple_strtoul(from+9, &from, 0); + if (*from == 's' || *from == 'S') { + delay = delay*1000000; + from++; + } else if (*from == 'm' || *from == 'M') { + delay = delay*60*1000000; + from++; + } + /* now wait for the requestion amount of time */ + udelay(delay); + } + cn = *(from++); + if (!cn) + break; + if (cn == '\n') + cn = ' '; /* replace newlines with space */ + if (cn == ' ' && c == ' ') + continue; /* remove additional spaces */ + c = cn; + if (COMMAND_LINE_SIZE <= ++len) + break; + *(to++) = c; + } + if (c == ' ' && to > command_line) to--; + *to = '\0'; + *cmdline_p = command_line; + + /* + * partially used pages are not usable - thus + * we are rounding upwards: + */ + start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; + end_pfn = memory_end >> PAGE_SHIFT; + + /* + * Initialize the boot-time allocator + */ + bootmap_size = init_bootmem(start_pfn, end_pfn); + + /* + * Register RAM pages with the bootmem allocator. + */ + free_bootmem(start_pfn << PAGE_SHIFT, + (end_pfn - start_pfn) << PAGE_SHIFT); + + /* + * Reserve the bootmem bitmap itself as well. We do this in two + * steps (first step was init_bootmem()) because this catches + * the (very unlikely) case of us accidentally initializing the + * bootmem allocator with an invalid RAM area. + */ + reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); + + + + +#ifdef CONFIG_BLK_DEV_INITRD + if (INITRD_START) { + if (INITRD_START + INITRD_SIZE <= memory_end) { + reserve_bootmem(INITRD_START, INITRD_SIZE); + initrd_start = INITRD_START; + initrd_end = initrd_start + INITRD_SIZE; + } else { + printk("initrd extends beyond end of memory " + "(0x%08lx > 0x%08lx)\ndisabling initrd\n", + initrd_start + INITRD_SIZE, memory_end); + initrd_start = initrd_end = 0; + } + } +#endif + + paging_init(); + + res = alloc_bootmem_low(sizeof(struct resource)); + res->start = 0; + res->end = memory_end; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + request_resource(&iomem_resource, res); + request_resource(res, &code_resource); + request_resource(res, &data_resource); +} + +void print_cpu_info(struct cpuinfo_S390 *cpuinfo) +{ + printk("cpu %d " +#ifdef CONFIG_SMP + "phys_idx=%d " +#endif + "vers=%02X ident=%06X machine=%04X unused=%04X\n", + cpuinfo->cpu_nr, +#ifdef CONFIG_SMP + cpuinfo->cpu_addr, +#endif + cpuinfo->cpu_id.version, + cpuinfo->cpu_id.ident, + cpuinfo->cpu_id.machine, + cpuinfo->cpu_id.unused); +} + +/* + * Get CPU information for use by the procfs. + */ + +int get_cpuinfo(char * buffer) +{ + struct cpuinfo_S390 *cpuinfo; + char *p = buffer; + int i; + + p += sprintf(p,"vendor_id : IBM/S390\n" + "# processors : %i\n" + "bogomips per cpu: %lu.%02lu\n", + smp_num_cpus, loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ))%100); + for (i = 0; i < smp_num_cpus; i++) { + cpuinfo = &safe_get_cpu_lowcore(i).cpu_data; + p += sprintf(p,"processor %i: " + "version = %02X, " + "identification = %06X, " + "machine = %04X\n", + i, cpuinfo->cpu_id.version, + cpuinfo->cpu_id.ident, + cpuinfo->cpu_id.machine); + } + return p - buffer; +} + diff --git a/arch/s390x/kernel/signal.c b/arch/s390x/kernel/signal.c new file mode 100644 index 000000000..4728336c4 --- /dev/null +++ b/arch/s390x/kernel/signal.c @@ -0,0 +1,595 @@ +/* + * arch/s390/kernel/signal.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * + * Based on Intel version + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_SIG 0 + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +/* pretcode & sig are used to store the return addr on Intel + & the signal no as the first parameter we do this differently + using gpr14 & gpr2. */ + +#define SIGFRAME_COMMON \ +__u8 callee_used_stack[__SIGNAL_FRAMESIZE]; \ +struct sigcontext sc; \ +_sigregs sregs; \ +__u8 retcode[S390_SYSCALL_SIZE]; + +typedef struct +{ + SIGFRAME_COMMON +} sigframe; + +typedef struct +{ + SIGFRAME_COMMON + struct siginfo info; + struct ucontext uc; +} rt_sigframe; + +asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset)); + +int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from) +{ + if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) + return -EFAULT; + if (from->si_code < 0) + return __copy_to_user(to, from, sizeof(siginfo_t)); + else { + int err; + + /* If you change siginfo_t structure, please be sure + this code is fixed accordingly. + It should never copy any pad contained in the structure + to avoid security leaks, but must copy the generic + 3 ints plus the relevant union member. */ + err = __put_user(from->si_signo, &to->si_signo); + err |= __put_user(from->si_errno, &to->si_errno); + err |= __put_user((short)from->si_code, &to->si_code); + /* First 32bits of unions are always present. */ + err |= __put_user(from->si_pid, &to->si_pid); + switch (from->si_code >> 16) { + case __SI_FAULT >> 16: + break; + case __SI_CHLD >> 16: + err |= __put_user(from->si_utime, &to->si_utime); + err |= __put_user(from->si_stime, &to->si_stime); + err |= __put_user(from->si_status, &to->si_status); + default: + err |= __put_user(from->si_uid, &to->si_uid); + break; + /* case __SI_RT: This is not generated by the kernel as of now. */ + } + return err; + } +} + +/* + * Atomically swap in the new signal mask, and wait for a signal. + */ +asmlinkage int +sys_sigsuspend(struct pt_regs * regs,int history0, int history1, old_sigset_t mask) +{ + sigset_t saveset; + + mask &= _BLOCKABLE; + spin_lock_irq(¤t->sigmask_lock); + saveset = current->blocked; + siginitset(¤t->blocked, mask); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + regs->gprs[2] = -EINTR; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + if (do_signal(regs, &saveset)) + return -EINTR; + } +} + +asmlinkage int +sys_rt_sigsuspend(struct pt_regs * regs,sigset_t *unewset, size_t sigsetsize) +{ + sigset_t saveset, newset; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (copy_from_user(&newset, unewset, sizeof(newset))) + return -EFAULT; + sigdelsetmask(&newset, ~_BLOCKABLE); + + spin_lock_irq(¤t->sigmask_lock); + saveset = current->blocked; + current->blocked = newset; + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + regs->gprs[2] = -EINTR; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + if (do_signal(regs, &saveset)) + return -EINTR; + } +} + +asmlinkage int +sys_sigaction(int sig, const struct old_sigaction *act, + struct old_sigaction *oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + if (act) { + old_sigset_t mask; + if (verify_area(VERIFY_READ, act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) + return -EFAULT; + __get_user(new_ka.sa.sa_flags, &act->sa_flags); + __get_user(mask, &act->sa_mask); + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) + return -EFAULT; + __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); + } + + return ret; +} + +asmlinkage int +sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs) +{ + return do_sigaltstack(uss, uoss, regs->gprs[15]); +} + + + + +static int save_sigregs(struct pt_regs *regs,_sigregs *sregs) +{ + int err; + s390_fp_regs fpregs; + + err = __copy_to_user(&sregs->regs,regs,sizeof(s390_regs_common)); + if(!err) + { + save_fp_regs(&fpregs); + err=__copy_to_user(&sregs->fpregs,&fpregs,sizeof(fpregs)); + } + return(err); + +} + +static int restore_sigregs(struct pt_regs *regs,_sigregs *sregs) +{ + int err; + s390_fp_regs fpregs; + psw_t saved_psw=regs->psw; + err=__copy_from_user(regs,&sregs->regs,sizeof(s390_regs_common)); + if(!err) + { + regs->orig_gpr2 = -1; /* disable syscall checks */ + regs->psw.mask=(saved_psw.mask&~PSW_MASK_DEBUGCHANGE)| + (regs->psw.mask&PSW_MASK_DEBUGCHANGE); + regs->psw.addr=(saved_psw.addr&~PSW_ADDR_DEBUGCHANGE)| + (regs->psw.addr&PSW_ADDR_DEBUGCHANGE); + err=__copy_from_user(&fpregs,&sregs->fpregs,sizeof(fpregs)); + if(!err) + restore_fp_regs(&fpregs); + } + return(err); +} + +static int +restore_sigcontext(struct sigcontext *sc, pt_regs *regs, + _sigregs *sregs,sigset_t *set) +{ + unsigned int err; + + err=restore_sigregs(regs,sregs); + if(!err) + err=__copy_from_user(&set->sig,&sc->oldmask,_SIGMASK_COPY_SIZE); + return(err); +} + +int sigreturn_common(struct pt_regs *regs,int framesize) +{ + sigframe *frame = (sigframe *)regs->gprs[15]; + sigset_t set; + + if (verify_area(VERIFY_READ, frame, sizeof(*frame))) + return -1; + if (restore_sigcontext(&frame->sc,regs,&frame->sregs,&set)) + return -1; + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sigmask_lock); + current->blocked = set; + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + return 0; +} + +asmlinkage long sys_sigreturn(struct pt_regs *regs) +{ + + if (sigreturn_common(regs,sizeof(sigframe))) + goto badframe; + return regs->gprs[2]; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) +{ + rt_sigframe *frame = (rt_sigframe *)regs->gprs[15]; + + if (sigreturn_common(regs,sizeof(rt_sigframe))) + goto badframe; + /* It is more difficult to avoid calling this function than to + call it and ignore errors. */ + do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]); + return regs->gprs[2]; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +/* + * Set up a signal frame. + */ + + +/* + * Determine which stack to use.. + */ +static inline void * +get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) +{ + unsigned long sp; + + /* Default to using normal stack */ + sp = regs->gprs[15]; + + /* This is the X/Open sanctioned signal stack switching. */ + if (ka->sa.sa_flags & SA_ONSTACK) { + if (! on_sig_stack(sp)) + sp = current->sas_ss_sp + current->sas_ss_size; + } + + /* This is the legacy signal stack switching. */ + else if (!user_mode(regs) && + !(ka->sa.sa_flags & SA_RESTORER) && + ka->sa.sa_restorer) { + sp = (unsigned long) ka->sa.sa_restorer; + } + + return (void *)((sp - frame_size) & -8ul); +} + +static void *setup_frame_common(int sig, struct k_sigaction *ka, + sigset_t *set, struct pt_regs * regs, + int frame_size,u16 retcode) +{ + sigframe *frame; + int err; + + frame = get_sigframe(ka, regs,frame_size); + if (!access_ok(VERIFY_WRITE, frame,frame_size)) + return 0; + err = save_sigregs(regs,&frame->sregs); + if(!err) + err=__put_user(&frame->sregs,&frame->sc.sregs); + if(!err) + + err=__copy_to_user(&frame->sc.oldmask,&set->sig,_SIGMASK_COPY_SIZE); + if(!err) + { + regs->gprs[2]=(current->exec_domain + && current->exec_domain->signal_invmap + && sig < 32 + ? current->exec_domain->signal_invmap[sig] + : sig); + /* Set up registers for signal handler */ + regs->gprs[15] = (addr_t)frame; + regs->psw.addr = FIX_PSW(ka->sa.sa_handler); + } + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->sa.sa_flags & SA_RESTORER) { + regs->gprs[14] = FIX_PSW(ka->sa.sa_restorer); + } else { + regs->gprs[14] = FIX_PSW(frame->retcode); + err |= __put_user(retcode, (u16 *)(frame->retcode)); + } + return(err ? 0:frame); +} + +static void setup_frame(int sig, struct k_sigaction *ka, + sigset_t *set, struct pt_regs * regs) +{ + sigframe *frame; + + if((frame=setup_frame_common(sig,ka,set,regs,sizeof(sigframe), + (S390_SYSCALL_OPCODE|__NR_sigreturn)))==0) + goto give_sigsegv; +#if DEBUG_SIG + printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->eip, frame->pretcode); +#endif + /* Martin wants this for pthreads */ + regs->gprs[3] = (addr_t)&frame->sc; + return; + +give_sigsegv: + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); +} + +static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs * regs) +{ + rt_sigframe *frame; + addr_t orig_sp=regs->gprs[15]; + int err; + + if((frame=setup_frame_common(sig,ka,set,regs,sizeof(rt_sigframe), + (S390_SYSCALL_OPCODE|__NR_rt_sigreturn)))==0) + goto give_sigsegv; + + err = copy_siginfo_to_user(&frame->info, info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(orig_sp), + &frame->uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __put_user(&frame->sc,&frame->uc.sc); + regs->gprs[3] = (addr_t)&frame->info; + regs->gprs[4] = (addr_t)&frame->uc; + + if (err) + goto give_sigsegv; + +#if DEBUG_SIG + printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->eip, frame->pretcode); +#endif + return; + +give_sigsegv: + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); +} + +/* + * OK, we're invoking a handler + */ + +static void +handle_signal(unsigned long sig, struct k_sigaction *ka, + siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) +{ + /* Are we from a system call? */ + if (regs->orig_gpr2 >= 0) { + /* If so, check system call restarting.. */ + switch (regs->gprs[2]) { + case -ERESTARTNOHAND: + regs->gprs[2] = -EINTR; + break; + + case -ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->gprs[2] = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + regs->gprs[2] = regs->orig_gpr2; + regs->psw.addr -= 2; + } + } + + /* Set up the stack frame */ + if (ka->sa.sa_flags & SA_SIGINFO) + setup_rt_frame(sig, ka, info, oldset, regs); + else + setup_frame(sig, ka, oldset, regs); + + if (ka->sa.sa_flags & SA_ONESHOT) + ka->sa.sa_handler = SIG_DFL; + + if (!(ka->sa.sa_flags & SA_NODEFER)) { + spin_lock_irq(¤t->sigmask_lock); + sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigaddset(¤t->blocked,sig); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + } +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +int do_signal(struct pt_regs *regs, sigset_t *oldset) +{ + siginfo_t info; + struct k_sigaction *ka; + + /* + * We want the common case to go fast, which + * is why we may in certain cases get here from + * kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return 1; + + if (!oldset) + oldset = ¤t->blocked; +#ifdef CONFIG_S390_SUPPORT + if (current->thread.flags & S390_FLAG_31BIT) { + extern asmlinkage int do_signal32(struct pt_regs *regs, sigset_t *oldset); + return do_signal32(regs, oldset); + } +#endif + + for (;;) { + unsigned long signr; + + spin_lock_irq(¤t->sigmask_lock); + signr = dequeue_signal(¤t->blocked, &info); + spin_unlock_irq(¤t->sigmask_lock); + + if (!signr) + break; + + if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { + /* Let the debugger run. */ + current->exit_code = signr; + set_current_state(TASK_STOPPED); + notify_parent(current, SIGCHLD); + schedule(); + + /* We're back. Did the debugger cancel the sig? */ + if (!(signr = current->exit_code)) + continue; + current->exit_code = 0; + + /* The debugger continued. Ignore SIGSTOP. */ + if (signr == SIGSTOP) + continue; + + /* Update the siginfo structure. Is this good? */ + if (signr != info.si_signo) { + info.si_signo = signr; + info.si_errno = 0; + info.si_code = SI_USER; + info.si_pid = current->p_pptr->pid; + info.si_uid = current->p_pptr->uid; + } + + /* If the (new) signal is now blocked, requeue it. */ + if (sigismember(¤t->blocked, signr)) { + send_sig_info(signr, &info, current); + continue; + } + } + + ka = ¤t->sig->action[signr-1]; + if (ka->sa.sa_handler == SIG_IGN) { + if (signr != SIGCHLD) + continue; + /* Check for SIGCHLD: it's special. */ + while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0) + /* nothing */; + continue; + } + + if (ka->sa.sa_handler == SIG_DFL) { + int exit_code = signr; + + /* Init gets no signals it doesn't want. */ + if (current->pid == 1) + continue; + + switch (signr) { + case SIGCONT: case SIGCHLD: case SIGWINCH: + continue; + + case SIGTSTP: case SIGTTIN: case SIGTTOU: + if (is_orphaned_pgrp(current->pgrp)) + continue; + /* FALLTHRU */ + + case SIGSTOP: + set_current_state(TASK_STOPPED); + current->exit_code = signr; + if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) + notify_parent(current, SIGCHLD); + schedule(); + continue; + + case SIGQUIT: case SIGILL: case SIGTRAP: + case SIGABRT: case SIGFPE: case SIGSEGV: + if (do_coredump(signr, regs)) + exit_code |= 0x80; + /* FALLTHRU */ + + default: + lock_kernel(); + sigaddset(¤t->pending.signal, signr); + recalc_sigpending(current); + current->flags |= PF_SIGNALED; + do_exit(exit_code); + /* NOTREACHED */ + } + } + + /* Whee! Actually deliver the signal. */ + handle_signal(signr, ka, &info, oldset, regs); + return 1; + } + + /* Did we come from a system call? */ + if ( regs->trap == __LC_SVC_OLD_PSW /* System Call! */ ) { + /* Restart the system call - no handlers present */ + if (regs->gprs[2] == -ERESTARTNOHAND || + regs->gprs[2] == -ERESTARTSYS || + regs->gprs[2] == -ERESTARTNOINTR) { + regs->gprs[2] = regs->orig_gpr2; + regs->psw.addr -= 2; + } + } + return 0; +} diff --git a/arch/s390x/kernel/signal32.c b/arch/s390x/kernel/signal32.c new file mode 100644 index 000000000..81e5417ba --- /dev/null +++ b/arch/s390x/kernel/signal32.c @@ -0,0 +1,725 @@ +/* + * arch/s390/kernel/signal32.c + * + * S390 version + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * Gerhard Tonn (ton@de.ibm.com) + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linux32.h" + +#define DEBUG_SIG 0 + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +/* pretcode & sig are used to store the return addr on Intel + & the signal no as the first parameter we do this differently + using gpr14 & gpr2. */ + +#define SIGFRAME_COMMON32 \ +__u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; \ +struct sigcontext32 sc; \ +_sigregs32 sregs; \ +__u8 retcode[S390_SYSCALL_SIZE]; + +typedef struct +{ + SIGFRAME_COMMON32 +} sigframe32; + +typedef struct +{ + SIGFRAME_COMMON32 + struct siginfo32 info; + struct ucontext32 uc; +} rt_sigframe32; + +asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset)); + +int do_signal32(struct pt_regs *regs, sigset_t *oldset); + +int copy_siginfo_to_user32(siginfo_t32 *to, siginfo_t *from) +{ + int err; + + if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t32))) + return -EFAULT; + + /* If you change siginfo_t structure, please be sure + this code is fixed accordingly. + It should never copy any pad contained in the structure + to avoid security leaks, but must copy the generic + 3 ints plus the relevant union member. + This routine must convert siginfo from 64bit to 32bit as well + at the same time. */ + err = __put_user(from->si_signo, &to->si_signo); + err |= __put_user(from->si_errno, &to->si_errno); + err |= __put_user((short)from->si_code, &to->si_code); + if (from->si_code < 0) + err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); + else { + switch (from->si_code >> 16) { + case __SI_KILL >> 16: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + break; + case __SI_CHLD >> 16: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + err |= __put_user(from->si_utime, &to->si_utime); + err |= __put_user(from->si_stime, &to->si_stime); + err |= __put_user(from->si_status, &to->si_status); + break; + case __SI_FAULT >> 16: + err |= __put_user(from->si_addr, &to->si_addr); + break; + case __SI_POLL >> 16: + case __SI_TIMER >> 16: + err |= __put_user(from->si_band, &to->si_band); + err |= __put_user(from->si_fd, &to->si_fd); + break; + default: + break; + /* case __SI_RT: This is not generated by the kernel as of now. */ + } + } + return err; +} + +/* + * Atomically swap in the new signal mask, and wait for a signal. + */ +asmlinkage int +sys32_sigsuspend(struct pt_regs * regs,int history0, int history1, old_sigset_t mask) +{ + sigset_t saveset; + + mask &= _BLOCKABLE; + spin_lock_irq(¤t->sigmask_lock); + saveset = current->blocked; + siginitset(¤t->blocked, mask); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + regs->gprs[2] = -EINTR; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + if (do_signal32(regs, &saveset)) + return -EINTR; + } +} + +asmlinkage int +sys32_rt_sigsuspend(struct pt_regs * regs,sigset_t32 *unewset, size_t sigsetsize) +{ + sigset_t saveset, newset; + sigset_t32 set32; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (copy_from_user(&set32, unewset, sizeof(set32))) + return -EFAULT; + switch (_NSIG_WORDS) { + case 4: newset.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32); + case 3: newset.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32); + case 2: newset.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32); + case 1: newset.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32); + } + sigdelsetmask(&newset, ~_BLOCKABLE); + + spin_lock_irq(¤t->sigmask_lock); + saveset = current->blocked; + current->blocked = newset; + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + regs->gprs[2] = -EINTR; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + if (do_signal32(regs, &saveset)) + return -EINTR; + } +} + +asmlinkage int +sys32_sigaction(int sig, const struct old_sigaction32 *act, + struct old_sigaction32 *oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + if (act) { + old_sigset_t32 mask; + if (verify_area(VERIFY_READ, act, sizeof(*act)) || + __get_user((unsigned long)new_ka.sa.sa_handler, &act->sa_handler) || + __get_user((unsigned long)new_ka.sa.sa_restorer, &act->sa_restorer)) + return -EFAULT; + __get_user(new_ka.sa.sa_flags, &act->sa_flags); + __get_user(mask, &act->sa_mask); + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || + __put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user((unsigned long)old_ka.sa.sa_restorer, &oact->sa_restorer)) + return -EFAULT; + __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); + } + + return ret; +} + +int +do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact); + +asmlinkage long +sys32_rt_sigaction(int sig, const struct sigaction32 *act, + struct sigaction32 *oact, size_t sigsetsize) +{ + struct k_sigaction new_ka, old_ka; + int ret; + sigset_t32 set32; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t32)) + return -EINVAL; + + if (act) { + ret = get_user((unsigned long)new_ka.sa.sa_handler, &act->sa_handler); + ret |= __copy_from_user(&set32, &act->sa_mask, + sizeof(sigset_t32)); + switch (_NSIG_WORDS) { + case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] + | (((long)set32.sig[7]) << 32); + case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] + | (((long)set32.sig[5]) << 32); + case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] + | (((long)set32.sig[3]) << 32); + case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] + | (((long)set32.sig[1]) << 32); + } + ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); + + if (ret) + return -EFAULT; + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + switch (_NSIG_WORDS) { + case 4: + set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); + set32.sig[6] = old_ka.sa.sa_mask.sig[3]; + case 3: + set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); + set32.sig[4] = old_ka.sa.sa_mask.sig[2]; + case 2: + set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); + set32.sig[2] = old_ka.sa.sa_mask.sig[1]; + case 1: + set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); + set32.sig[0] = old_ka.sa.sa_mask.sig[0]; + } + ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); + ret |= __copy_to_user(&oact->sa_mask, &set32, + sizeof(sigset_t32)); + ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + } + + return ret; +} + +asmlinkage int +sys32_sigaltstack(const stack_t32 *uss, stack_t32 *uoss, struct pt_regs *regs) +{ + stack_t kss, koss; + int ret, err = 0; + mm_segment_t old_fs = get_fs(); + + if (uss) { + if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) + return -EFAULT; + err |= __get_user(kss.ss_sp, &uss->ss_sp); + err |= __get_user(kss.ss_size, &uss->ss_size); + err |= __get_user(kss.ss_flags, &uss->ss_flags); + if (err) + return -EFAULT; + } + + set_fs (KERNEL_DS); + ret = do_sigaltstack(uss ? &kss : NULL , uoss ? &koss : NULL, regs->gprs[15]); + set_fs (old_fs); + + if (!ret && uoss) { + if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) + return -EFAULT; + err |= __put_user(koss.ss_sp, &uoss->ss_sp); + err |= __put_user(koss.ss_size, &uoss->ss_size); + err |= __put_user(koss.ss_flags, &uoss->ss_flags); + if (err) + return -EFAULT; + } + return ret; +} + +static int save_sigregs32(struct pt_regs *regs,_sigregs32 *sregs) +{ + int err = 0; + s390_fp_regs fpregs; + int i; + + for(i=0; igprs[i], &sregs->regs.gprs[i]); + for(i=0; iacrs[i], &sregs->regs.acrs[i]); + err |= __copy_to_user(&sregs->regs.psw.mask, ®s->psw.mask, 4); + err |= __copy_to_user(&sregs->regs.psw.addr, ((char*)®s->psw.addr)+4, 4); + if(!err) + { + save_fp_regs(&fpregs); + __put_user(fpregs.fpc, &sregs->fpregs.fpc); + for(i=0; ifpregs.fprs[i].d); + } + return(err); + +} + +static int restore_sigregs32(struct pt_regs *regs,_sigregs32 *sregs) +{ + int err = 0; + s390_fp_regs fpregs; + psw_t saved_psw=regs->psw; + int i; + + for(i=0; igprs[i], &sregs->regs.gprs[i]); + for(i=0; iacrs[i], &sregs->regs.acrs[i]); + err |= __copy_from_user(®s->psw.mask, &sregs->regs.psw.mask, 4); + err |= __copy_from_user(((char*)®s->psw.addr)+4, &sregs->regs.psw.addr, 4); + + if(!err) + { + regs->orig_gpr2 = -1; /* disable syscall checks */ + regs->psw.mask=(saved_psw.mask&~PSW_MASK_DEBUGCHANGE)| + (regs->psw.mask&PSW_MASK_DEBUGCHANGE); + regs->psw.addr=(saved_psw.addr&~PSW_ADDR_DEBUGCHANGE)| + (regs->psw.addr&PSW_ADDR_DEBUGCHANGE); + __get_user(fpregs.fpc, &sregs->fpregs.fpc); + for(i=0; ifpregs.fprs[i].d); + if(!err) + restore_fp_regs(&fpregs); + } + return(err); +} + +static int +restore_sigcontext32(struct sigcontext32 *sc, pt_regs *regs, + _sigregs32 *sregs,sigset_t *set) +{ + unsigned int err; + + err=restore_sigregs32(regs,sregs); + if(!err) + err=__copy_from_user(&set->sig,&sc->oldmask,_SIGMASK_COPY_SIZE32); + return(err); +} + +int sigreturn_common32(struct pt_regs *regs) +{ + sigframe32 *frame = (sigframe32 *)regs->gprs[15]; + sigset_t set; + + if (verify_area(VERIFY_READ, frame, sizeof(*frame))) + return -1; + if (restore_sigcontext32(&frame->sc,regs,&frame->sregs,&set)) + return -1; + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sigmask_lock); + current->blocked = set; + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + return 0; +} + +asmlinkage long sys32_sigreturn(struct pt_regs *regs) +{ + + if (sigreturn_common32(regs)) + goto badframe; + return regs->gprs[2]; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) +{ + rt_sigframe32 *frame = (rt_sigframe32 *)regs->gprs[15]; + stack_t st; + int err; + mm_segment_t old_fs = get_fs(); + + if (sigreturn_common32(regs)) + goto badframe; + + err = __get_user(st.ss_sp, &frame->uc.uc_stack.ss_sp); + st.ss_sp = (void *) A((unsigned long)st.ss_sp); + err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size); + err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags); + if (err) + goto badframe; + set_fs (KERNEL_DS); + /* It is more difficult to avoid calling this function than to + call it and ignore errors. */ + do_sigaltstack(&st, NULL, regs->gprs[15]); + set_fs (old_fs); + + return regs->gprs[2]; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +/* + * Set up a signal frame. + */ + + +/* + * Determine which stack to use.. + */ +static inline void * +get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) +{ + unsigned long sp; + + /* Default to using normal stack */ + sp = (unsigned long) A(regs->gprs[15]); + + /* This is the X/Open sanctioned signal stack switching. */ + if (ka->sa.sa_flags & SA_ONSTACK) { + if (! on_sig_stack(sp)) + sp = current->sas_ss_sp + current->sas_ss_size; + } + + /* This is the legacy signal stack switching. */ + else if (!user_mode(regs) && + !(ka->sa.sa_flags & SA_RESTORER) && + ka->sa.sa_restorer) { + sp = (unsigned long) ka->sa.sa_restorer; + } + + return (void *)((sp - frame_size) & -8ul); +} + +static void *setup_frame_common32(int sig, struct k_sigaction *ka, + sigset_t *set, struct pt_regs * regs, + int frame_size,u16 retcode) +{ + sigframe32 *frame; + int err; + + frame = get_sigframe(ka, regs,frame_size); + if (!access_ok(VERIFY_WRITE, frame,frame_size)) + return 0; + err = save_sigregs32(regs,&frame->sregs); + if(!err) + err=__put_user(&frame->sregs,&frame->sc.sregs); + if(!err) + + err=__copy_to_user(&frame->sc.oldmask,&set->sig,_SIGMASK_COPY_SIZE32); + if(!err) + { + regs->gprs[2]=(current->exec_domain + && current->exec_domain->signal_invmap + && sig < 32 + ? current->exec_domain->signal_invmap[sig] + : sig); + /* Set up registers for signal handler */ + regs->gprs[15] = (addr_t)frame; + regs->psw.addr = FIX_PSW(ka->sa.sa_handler); + } + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->sa.sa_flags & SA_RESTORER) { + regs->gprs[14] = FIX_PSW(ka->sa.sa_restorer); + } else { + regs->gprs[14] = FIX_PSW(frame->retcode); + err |= __put_user(retcode, (u16 *)(frame->retcode)); + } + return(err ? 0:frame); +} + +static void setup_frame32(int sig, struct k_sigaction *ka, + sigset_t *set, struct pt_regs * regs) +{ + sigframe32 *frame; + + if((frame=setup_frame_common32(sig,ka,set,regs,sizeof(sigframe32), + (S390_SYSCALL_OPCODE|__NR_sigreturn)))==0) + goto give_sigsegv; +#if DEBUG_SIG + printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->eip, frame->pretcode); +#endif + /* Martin wants this for pthreads */ + regs->gprs[3] = (addr_t)&frame->sc; + return; + +give_sigsegv: + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); +} + +static void setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs * regs) +{ + rt_sigframe32 *frame; + addr_t orig_sp=regs->gprs[15]; + int err; + + if((frame=setup_frame_common32(sig,ka,set,regs,sizeof(rt_sigframe32), + (S390_SYSCALL_OPCODE|__NR_rt_sigreturn)))==0) + goto give_sigsegv; + + err = copy_siginfo_to_user32(&frame->info, info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(orig_sp), + &frame->uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + regs->gprs[3] = (addr_t)&frame->info; + regs->gprs[4] = (addr_t)&frame->uc; + + if (err) + goto give_sigsegv; + +#if DEBUG_SIG + printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->eip, frame->pretcode); +#endif + return; + +give_sigsegv: + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); +} + +/* + * OK, we're invoking a handler + */ + +static void +handle_signal32(unsigned long sig, struct k_sigaction *ka, + siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) +{ + /* Are we from a system call? */ + if (regs->orig_gpr2 >= 0) { + /* If so, check system call restarting.. */ + switch (regs->gprs[2]) { + case -ERESTARTNOHAND: + regs->gprs[2] = -EINTR; + break; + + case -ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->gprs[2] = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + regs->gprs[2] = regs->orig_gpr2; + regs->psw.addr -= 2; + } + } + + /* Set up the stack frame */ + if (ka->sa.sa_flags & SA_SIGINFO) + setup_rt_frame32(sig, ka, info, oldset, regs); + else + setup_frame32(sig, ka, oldset, regs); + + if (ka->sa.sa_flags & SA_ONESHOT) + ka->sa.sa_handler = SIG_DFL; + + if (!(ka->sa.sa_flags & SA_NODEFER)) { + spin_lock_irq(¤t->sigmask_lock); + sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigaddset(¤t->blocked,sig); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + } +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +int do_signal32(struct pt_regs *regs, sigset_t *oldset) +{ + siginfo_t info; + struct k_sigaction *ka; + + /* + * We want the common case to go fast, which + * is why we may in certain cases get here from + * kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return 1; + + if (!oldset) + oldset = ¤t->blocked; + + for (;;) { + unsigned long signr; + + spin_lock_irq(¤t->sigmask_lock); + signr = dequeue_signal(¤t->blocked, &info); + spin_unlock_irq(¤t->sigmask_lock); + + if (!signr) + break; + + if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { + /* Let the debugger run. */ + current->exit_code = signr; + set_current_state(TASK_STOPPED); + notify_parent(current, SIGCHLD); + schedule(); + + /* We're back. Did the debugger cancel the sig? */ + if (!(signr = current->exit_code)) + continue; + current->exit_code = 0; + + /* The debugger continued. Ignore SIGSTOP. */ + if (signr == SIGSTOP) + continue; + + /* Update the siginfo structure. Is this good? */ + if (signr != info.si_signo) { + info.si_signo = signr; + info.si_errno = 0; + info.si_code = SI_USER; + info.si_pid = current->p_pptr->pid; + info.si_uid = current->p_pptr->uid; + } + + /* If the (new) signal is now blocked, requeue it. */ + if (sigismember(¤t->blocked, signr)) { + send_sig_info(signr, &info, current); + continue; + } + } + + ka = ¤t->sig->action[signr-1]; + if (ka->sa.sa_handler == SIG_IGN) { + if (signr != SIGCHLD) + continue; + /* Check for SIGCHLD: it's special. */ + while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0) + /* nothing */; + continue; + } + + if (ka->sa.sa_handler == SIG_DFL) { + int exit_code = signr; + + /* Init gets no signals it doesn't want. */ + if (current->pid == 1) + continue; + + switch (signr) { + case SIGCONT: case SIGCHLD: case SIGWINCH: + continue; + + case SIGTSTP: case SIGTTIN: case SIGTTOU: + if (is_orphaned_pgrp(current->pgrp)) + continue; + /* FALLTHRU */ + + case SIGSTOP: + set_current_state(TASK_STOPPED); + current->exit_code = signr; + if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) + notify_parent(current, SIGCHLD); + schedule(); + continue; + + case SIGQUIT: case SIGILL: case SIGTRAP: + case SIGABRT: case SIGFPE: case SIGSEGV: + if (do_coredump(signr, regs)) + exit_code |= 0x80; + /* FALLTHRU */ + + default: + lock_kernel(); + sigaddset(¤t->pending.signal, signr); + recalc_sigpending(current); + current->flags |= PF_SIGNALED; + do_exit(exit_code); + /* NOTREACHED */ + } + } + + /* Whee! Actually deliver the signal. */ + handle_signal32(signr, ka, &info, oldset, regs); + return 1; + } + + /* Did we come from a system call? */ + if ( regs->trap == __LC_SVC_OLD_PSW /* System Call! */ ) { + /* Restart the system call - no handlers present */ + if (regs->gprs[2] == -ERESTARTNOHAND || + regs->gprs[2] == -ERESTARTSYS || + regs->gprs[2] == -ERESTARTNOINTR) { + regs->gprs[2] = regs->orig_gpr2; + regs->psw.addr -= 2; + } + } + return 0; +} diff --git a/arch/s390x/kernel/smp.c b/arch/s390x/kernel/smp.c new file mode 100644 index 000000000..19d8e3dad --- /dev/null +++ b/arch/s390x/kernel/smp.c @@ -0,0 +1,760 @@ +/* + * arch/s390/kernel/smp.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * based on other smp stuff by + * (c) 1995 Alan Cox, CymruNET Ltd + * (c) 1998 Ingo Molnar + * + * We work with logical cpu numbering everywhere we can. The only + * functions using the real cpu address (got from STAP) are the sigp + * functions. For all other functions we use the identity mapping. + * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is + * used e.g. to find the idle task belonging to a logical cpu. Every array + * in the kernel is sorted by the logical cpu number and not by the physical + * one which is causing all the confusion with __cpu_logical_map and + * cpu_number_map in other architectures. + */ + +#include + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "cpcmd.h" + +/* prototypes */ +extern int cpu_idle(void * unused); + +extern __u16 boot_cpu_addr; +extern volatile int __cpu_logical_map[]; + +/* + * An array with a pointer the lowcore of every CPU. + */ +static int max_cpus = NR_CPUS; /* Setup configured maximum number of CPUs to activate */ +int smp_num_cpus; +struct _lowcore *lowcore_ptr[NR_CPUS]; +unsigned int prof_multiplier[NR_CPUS]; +unsigned int prof_old_multiplier[NR_CPUS]; +unsigned int prof_counter[NR_CPUS]; +cycles_t cacheflush_time=0; +int smp_threads_ready=0; /* Set when the idlers are all forked. */ +static atomic_t smp_commenced = ATOMIC_INIT(0); + +spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED; + +/* + * Setup routine for controlling SMP activation + * + * Command-line option of "nosmp" or "maxcpus=0" will disable SMP + * activation entirely (the MPS table probe still happens, though). + * + * Command-line option of "maxcpus=", where is an integer + * greater than 0, limits the maximum number of CPUs activated in + * SMP mode to . + */ + +static int __init nosmp(char *str) +{ + max_cpus = 0; + return 1; +} + +__setup("nosmp", nosmp); + +static int __init maxcpus(char *str) +{ + get_option(&str, &max_cpus); + return 1; +} + +__setup("maxcpus=", maxcpus); + +/* + * Reboot, halt and power_off routines for SMP. + */ +extern char vmhalt_cmd[]; +extern char vmpoff_cmd[]; + +extern void reipl(unsigned long devno); + +void do_machine_restart(void) +{ + smp_send_stop(); + reipl(S390_lowcore.ipl_device); +} + +void machine_restart(char * __unused) +{ + if (smp_processor_id() != 0) { + smp_ext_bitcall(0, ec_restart); + for (;;); + } else + do_machine_restart(); +} + +void do_machine_halt(void) +{ + smp_send_stop(); + if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) + cpcmd(vmhalt_cmd, NULL, 0); + signal_processor(smp_processor_id(), sigp_stop_and_store_status); +} + +void machine_halt(void) +{ + if (smp_processor_id() != 0) { + smp_ext_bitcall(0, ec_halt); + for (;;); + } else + do_machine_halt(); +} + +void do_machine_power_off(void) +{ + smp_send_stop(); + if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) + cpcmd(vmpoff_cmd, NULL, 0); + signal_processor(smp_processor_id(), sigp_stop_and_store_status); +} + +void machine_power_off(void) +{ + if (smp_processor_id() != 0) { + smp_ext_bitcall(0, ec_power_off); + for (;;); + } else + do_machine_power_off(); +} + +/* + * This is the main routine where commands issued by other + * cpus are handled. + */ + +void do_ext_call_interrupt(struct pt_regs *regs, __u16 code) +{ + ec_ext_call *ec, *next; + unsigned long bits; + + /* + * handle bit signal external calls + * + * For the ec_schedule signal we have to do nothing. All the work + * is done automatically when we return from the interrupt. + * For the ec_restart, ec_halt and ec_power_off we call the + * appropriate routine. + */ + bits = xchg(&S390_lowcore.ext_call_fast, 0); + + if (test_bit(ec_restart, &bits)) + do_machine_restart(); + if (test_bit(ec_halt, &bits)) + do_machine_halt(); + if (test_bit(ec_power_off, &bits)) + do_machine_power_off(); + + /* + * Handle external call commands with a parameter area + */ + ec = (ec_ext_call *) xchg(&S390_lowcore.ext_call_queue, 0); + if (ec == NULL) + return; /* no command signals */ + + /* Make a fifo out of the lifo */ + next = ec->next; + ec->next = NULL; + while (next != NULL) { + ec_ext_call *tmp = next->next; + next->next = ec; + ec = next; + next = tmp; + } + + /* Execute every sigp command on the queue */ + while (ec != NULL) { + switch (ec->cmd) { + case ec_callback_async: { + void (*func)(void *info); + void *info; + + func = ec->func; + info = ec->info; + atomic_set(&ec->status,ec_executing); + (func)(info); + return; + } + case ec_callback_sync: + atomic_set(&ec->status,ec_executing); + (ec->func)(ec->info); + atomic_set(&ec->status,ec_done); + return; + default: + } + ec = ec->next; + } +} + +/* + * Swap in a new request to external call queue + */ +static inline void smp_add_ext_call(ec_ext_call *ec, struct _lowcore *lowcore) +{ + int success; + + while (1) { + ec->next = (ec_ext_call*) lowcore->ext_call_queue; + __asm__ __volatile__ ( + " lgr 0,%2\n" + " csg 0,%3,%1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (success), "+m" (lowcore->ext_call_queue) + : "d" (ec->next), "d" (ec) + : "cc", "0" ); + if (success == 0) break; + } +} + +/* + * Send an external call sigp to another cpu and wait for its completion. + */ +sigp_ccode +smp_ext_call(int cpu, void (*func)(void *info), void *info, int wait) +{ + sigp_ccode ccode; + ec_ext_call ec; + + ec.cmd = wait ? ec_callback_sync:ec_callback_async; + atomic_set(&ec.status, ec_pending); + ec.func = func; + ec.info = info; + /* swap in new request to external call queue */ + smp_add_ext_call(&ec, &get_cpu_lowcore(cpu)); + /* + * We try once to deliver the signal. There are four possible + * return codes: + * 0) Order code accepted - can't show up on an external call + * 1) Status stored - fine, wait for completion. + * 2) Busy - there is another signal pending. Thats fine too, because + * do_ext_call from the pending signal will execute all signals on + * the queue. We wait for completion. + * 3) Not operational - something very bad has happened to the cpu. + * do not wait for completion. + */ + ccode = signal_processor(cpu, sigp_external_call); + + if (ccode != sigp_not_operational) + /* wait for completion, FIXME: possible seed of a deadlock */ + while (atomic_read(&ec.status) != (wait?ec_done:ec_executing)); + + return ccode; +} + +/* + * Send a callback sigp to every other cpu in the system. + */ +void smp_ext_call_others(void (*func)(void *info), void *info, int wait) +{ + ec_ext_call ec[NR_CPUS]; + sigp_ccode ccode; + int i; + + for (i = 0; i < smp_num_cpus; i++) { + if (smp_processor_id() == i) + continue; + ec[i].cmd = wait ? ec_callback_sync : ec_callback_async; + atomic_set(&ec[i].status, ec_pending); + ec[i].func = func; + ec[i].info = info; + smp_add_ext_call(ec+i, &get_cpu_lowcore(i)); + ccode = signal_processor(i, sigp_external_call); + } + + /* wait for completion, FIXME: possible seed of a deadlock */ + for (i = 0; i < smp_num_cpus; i++) { + if (smp_processor_id() == i) + continue; + while (atomic_read(&ec[i].status) != + (wait ? ec_done:ec_executing)); + } +} + +/* + * Send an external call sigp to another cpu and return without waiting + * for its completion. + */ +sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig) +{ + sigp_ccode ccode; + + /* + * Set signaling bit in lowcore of target cpu and kick it + */ + set_bit(sig, &(get_cpu_lowcore(cpu).ext_call_fast)); + ccode = signal_processor(cpu, sigp_external_call); + return ccode; +} + +/* + * Send an external call sigp to every other cpu in the system and + * return without waiting for its completion. + */ +void smp_ext_bitcall_others(ec_bit_sig sig) +{ + sigp_ccode ccode; + int i; + + for (i = 0; i < smp_num_cpus; i++) { + if (smp_processor_id() == i) + continue; + /* + * Set signaling bit in lowcore of target cpu and kick it + */ + set_bit(sig, &(get_cpu_lowcore(i).ext_call_fast)); + ccode = signal_processor(i, sigp_external_call); + } +} + +/* + * cycles through all the cpus, + * returns early if info is not NULL & the processor has something + * of intrest to report in the info structure. + * it returns the next cpu to check if it returns early. + * i.e. it should be used as follows if you wish to receive info. + * next_cpu=0; + * do + * { + * info->cpu=next_cpu; + * next_cpu=smp_signal_others(order_code,parameter,1,info); + * ... check info here + * } while(next_cpu<=smp_num_cpus) + * + * if you are lazy just use it like + * smp_signal_others(order_code,parameter,0,1,NULL); + */ +int smp_signal_others(sigp_order_code order_code, u32 parameter, + int spin, sigp_info *info) +{ + sigp_ccode ccode; + u32 dummy; + u16 i; + + if (info) + info->intresting = 0; + for (i = (info ? info->cpu : 0); i < smp_num_cpus; i++) { + if (smp_processor_id() != i) { + do { + ccode = signal_processor_ps( + (info ? &info->status : &dummy), + parameter, i, order_code); + } while(spin && ccode == sigp_busy); + if (info && ccode != sigp_order_code_accepted) { + info->intresting = 1; + info->cpu = i; + info->ccode = ccode; + i++; + break; + } + } + } + return i; +} + +/* + * this function sends a 'stop' sigp to all other CPUs in the system. + * it goes straight through. + */ + +void smp_send_stop(void) +{ + int i; + u32 dummy; + unsigned long low_core_addr; + + /* write magic number to zero page (absolute 0) */ + + get_cpu_lowcore(smp_processor_id()).panic_magic = __PANIC_MAGIC; + + /* stop all processors */ + + smp_signal_others(sigp_stop, 0, TRUE, NULL); + + /* store status of all processors in their lowcores (real 0) */ + + for (i = 0; i < smp_num_cpus; i++) { + if (smp_processor_id() != i) { + int ccode; + low_core_addr = (unsigned long)&get_cpu_lowcore(i); + do { + ccode = signal_processor_ps( + &dummy, + low_core_addr, + i, + sigp_store_status_at_address); + } while(ccode == sigp_busy); + } + } +} + +/* + * this function sends a 'reschedule' IPI to another CPU. + * it goes straight through and wastes no time serializing + * anything. Worst case is that we lose a reschedule ... + */ + +void smp_send_reschedule(int cpu) +{ + smp_ext_bitcall(cpu, ec_schedule); +} + +/* + * parameter area for the set/clear control bit callbacks + */ +typedef struct +{ + __u16 start_ctl; + __u16 end_ctl; + __u64 orvals[16]; + __u64 andvals[16]; +} ec_creg_mask_parms; + +/* + * callback for setting/clearing control bits + */ +void smp_ctl_bit_callback(void *info) { + ec_creg_mask_parms *pp; + u64 cregs[16]; + int i; + + pp = (ec_creg_mask_parms *) info; + asm volatile (" bras 1,0f\n" + " stctg 0,0,0(%0)\n" + "0: ex %1,0(1)\n" + : : "a" (cregs+pp->start_ctl), + "a" ((pp->start_ctl<<4) + pp->end_ctl) + : "memory", "1" ); + for (i = pp->start_ctl; i <= pp->end_ctl; i++) + cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; + asm volatile (" bras 1,0f\n" + " lctlg 0,0,0(%0)\n" + "0: ex %1,0(1)\n" + : : "a" (cregs+pp->start_ctl), + "a" ((pp->start_ctl<<4) + pp->end_ctl) + : "memory", "1" ); +} + +/* + * Set a bit in a control register of all cpus + */ +void smp_ctl_set_bit(int cr, int bit) { + ec_creg_mask_parms parms; + + if (atomic_read(&smp_commenced) != 0) { + parms.start_ctl = cr; + parms.end_ctl = cr; + parms.orvals[cr] = 1 << bit; + parms.andvals[cr] = -1L; + smp_ext_call_others(smp_ctl_bit_callback, &parms, 1); + } + __ctl_set_bit(cr, bit); +} + +/* + * Clear a bit in a control register of all cpus + */ +void smp_ctl_clear_bit(int cr, int bit) { + ec_creg_mask_parms parms; + + if (atomic_read(&smp_commenced) != 0) { + parms.start_ctl = cr; + parms.end_ctl = cr; + parms.orvals[cr] = 0; + parms.andvals[cr] = ~(1L << bit); + smp_ext_call_others(smp_ctl_bit_callback, &parms, 1); + } + __ctl_clear_bit(cr, bit); +} + +/* + * Call a function on all other processors + */ + +int +smp_call_function(void (*func)(void *info), void *info, int retry, int wait) +/* + * [SUMMARY] Run a function on all other CPUs. + * The function to run. This must be fast and non-blocking. + * An arbitrary pointer to pass to the function. + * currently unused. + * If true, wait (atomically) until function has completed on other CPUs. + * [RETURNS] 0 on success, else a negative status code. Does not return until + * remote CPUs are nearly ready to execute <> or are or have executed. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler, you may call it from a bottom half handler. + */ +{ + if (atomic_read(&smp_commenced) != 0) + smp_ext_call_others(func, info, 1); + (func)(info); + return 0; +} + +/* + * Lets check how many CPUs we have. + */ + +void smp_count_cpus(void) +{ + int curr_cpu; + + current->processor = 0; + smp_num_cpus = 1; + for (curr_cpu = 0; + curr_cpu <= 65535 && smp_num_cpus < max_cpus; curr_cpu++) { + if ((__u16) curr_cpu == boot_cpu_addr) + continue; + __cpu_logical_map[smp_num_cpus] = (__u16) curr_cpu; + if (signal_processor(smp_num_cpus, sigp_sense) == + sigp_not_operational) + continue; + smp_num_cpus++; + } + printk("Detected %d CPU's\n",(int) smp_num_cpus); + printk("Boot cpu address %2X\n", boot_cpu_addr); +} + + +/* + * Activate a secondary processor. + */ +extern void init_100hz_timer(void); + +int __init start_secondary(void *cpuvoid) +{ + /* Setup the cpu */ + cpu_init(); + /* Print info about this processor */ + print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id()).cpu_data); + /* Wait for completion of smp startup */ + while (!atomic_read(&smp_commenced)) + /* nothing */ ; + /* init per CPU 100 hz timer */ + init_100hz_timer(); + /* cpu_idle will call schedule for us */ + return cpu_idle(NULL); +} + +/* + * The restart interrupt handler jumps to start_secondary directly + * without the detour over initialize_secondary. We defined it here + * so that the linker doesn't complain. + */ +void __init initialize_secondary(void) +{ +} + +static int __init fork_by_hand(void) +{ + struct pt_regs regs; + /* don't care about the psw and regs settings since we'll never + reschedule the forked task. */ + memset(®s,0,sizeof(pt_regs)); + return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0); +} + +static void __init do_boot_cpu(int cpu) +{ + struct task_struct *idle; + struct _lowcore *cpu_lowcore; + + /* We can't use kernel_thread since we must _avoid_ to reschedule + the child. */ + if (fork_by_hand() < 0) + panic("failed fork for CPU %d", cpu); + + /* + * We remove it from the pidhash and the runqueue + * once we got the process: + */ + idle = init_task.prev_task; + if (!idle) + panic("No idle process for CPU %d",cpu); + idle->processor = cpu; + idle->has_cpu = 1; /* we schedule the first task manually */ + + del_from_runqueue(idle); + unhash_process(idle); + init_tasks[cpu] = idle; + + cpu_lowcore=&get_cpu_lowcore(cpu); + cpu_lowcore->kernel_stack=idle->thread.ksp; + __asm__ __volatile__("stctg 0,15,%0\n\t" + "stam 0,15,%1" + : "=m" (cpu_lowcore->cregs_save_area[0]), + "=m" (cpu_lowcore->access_regs_save_area[0]) + : : "memory"); + + eieio(); + signal_processor(cpu,sigp_restart); +} + +/* + * Architecture specific routine called by the kernel just before init is + * fired off. This allows the BP to have everything in order [we hope]. + * At the end of this all the APs will hit the system scheduling and off + * we go. Each AP will load the system gdt's and jump through the kernel + * init into idle(). At this point the scheduler will one day take over + * and give them jobs to do. smp_callin is a standard routine + * we use to track CPUs as they power up. + */ + +void __init smp_commence(void) +{ + /* + * Lets the callins below out of their loop. + */ + atomic_set(&smp_commenced,1); +} + +/* + * Cycle through the processors sending restart sigps to boot each. + */ + +void __init smp_boot_cpus(void) +{ + struct _lowcore *curr_lowcore; + sigp_ccode ccode; + int i; + + /* request the 0x1202 external interrupt */ + if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) + panic("Couldn't request external interrupt 0x1202"); + smp_count_cpus(); + memset(lowcore_ptr,0,sizeof(lowcore_ptr)); + + /* + * Initialize the logical to physical CPU number mapping + * and the per-CPU profiling counter/multiplier + */ + + for (i = 0; i < NR_CPUS; i++) { + prof_counter[i] = 1; + prof_old_multiplier[i] = 1; + prof_multiplier[i] = 1; + } + + print_cpu_info(&safe_get_cpu_lowcore(0).cpu_data); + + for(i = 0; i < smp_num_cpus; i++) + { + curr_lowcore = (struct _lowcore *) + __get_free_pages(GFP_KERNEL|GFP_DMA, 1); + if (curr_lowcore == NULL) { + printk("smp_boot_cpus failed to allocate prefix memory\n"); + break; + } + lowcore_ptr[i] = curr_lowcore; + memcpy(curr_lowcore, &S390_lowcore, sizeof(struct _lowcore)); + /* + * Most of the parameters are set up when the cpu is + * started up. + */ + if (smp_processor_id() == i) + set_prefix((u32)(u64)curr_lowcore); + else { + ccode = signal_processor_p((u64)(curr_lowcore), + i, sigp_set_prefix); + if(ccode) { + /* if this gets troublesome I'll have to do + * something about it. */ + printk("ccode %d for cpu %d returned when " + "setting prefix in smp_boot_cpus not good.\n", + (int) ccode, (int) i); + } + else + do_boot_cpu(i); + } + } +} + +/* + * the frequency of the profiling timer can be changed + * by writing a multiplier value into /proc/profile. + * + * usually you want to run this on all CPUs ;) + */ +int setup_profiling_timer(unsigned int multiplier) +{ + return 0; +} + +/* + * Local timer interrupt handler. It does both profiling and + * process statistics/rescheduling. + * + * We do profiling in every local tick, statistics/rescheduling + * happen only every 'profiling multiplier' ticks. The default + * multiplier is 1 and it can be changed by writing the new multiplier + * value into /proc/profile. + */ + +void smp_local_timer_interrupt(struct pt_regs * regs) +{ + int user = (user_mode(regs) != 0); + int cpu = smp_processor_id(); + + /* + * The profiling function is SMP safe. (nothing can mess + * around with "current", and the profiling counters are + * updated with atomic operations). This is especially + * useful with a profiling multiplier != 1 + */ + if (!user_mode(regs)) + s390_do_profile(regs->psw.addr); + + if (!--prof_counter[cpu]) { + int system = 1-user; + struct task_struct * p = current; + + /* + * The multiplier may have changed since the last time we got + * to this point as a result of the user writing to + * /proc/profile. In this case we need to adjust the APIC + * timer accordingly. + * + * Interrupts are already masked off at this point. + */ + prof_counter[cpu] = prof_multiplier[cpu]; + if (prof_counter[cpu] != prof_old_multiplier[cpu]) { + prof_old_multiplier[cpu] = prof_counter[cpu]; + } + + /* + * After doing the above, we need to make like + * a normal interrupt - otherwise timer interrupts + * ignore the global interrupt lock, which is the + * WrongThing (tm) to do. + */ + + irq_enter(cpu, 0); + update_process_times(user); + irq_exit(cpu, 0); + } +} + diff --git a/arch/s390x/kernel/sys_s390.c b/arch/s390x/kernel/sys_s390.c new file mode 100644 index 000000000..4c06bd326 --- /dev/null +++ b/arch/s390x/kernel/sys_s390.c @@ -0,0 +1,205 @@ +/* + * arch/s390/kernel/sys_s390.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * + * Derived from "arch/i386/kernel/sys_i386.c" + * + * This file contains various random system calls that + * have a non-standard calling sequence on the Linux/s390 + * platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * sys_pipe() is the normal C calling standard for creating + * a pipe. It's not the way Unix traditionally does this, though. + */ +asmlinkage long sys_pipe(unsigned long * fildes) +{ + int fd[2]; + int error; + + error = do_pipe(fd); + if (!error) { + if (copy_to_user(fildes, fd, 2*sizeof(int))) + error = -EFAULT; + } + return error; +} + +/* common code for old and new mmaps */ +static inline long do_mmap2( + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + long error = -EBADF; + struct file * file = NULL; + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + goto out; + } + + down(¤t->mm->mmap_sem); + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); + up(¤t->mm->mmap_sem); + + if (file) + fput(file); +out: + return error; +} + +/* + * Perform the select(nd, in, out, ex, tv) and mmap() system + * calls. Linux/i386 didn't use to be able to handle more than + * 4 system call parameters, so these system calls used a memory + * block for parameter passing.. + */ + +struct mmap_arg_struct { + unsigned long addr; + unsigned long len; + unsigned long prot; + unsigned long flags; + unsigned long fd; + unsigned long offset; +}; + +asmlinkage long sys_mmap2(struct mmap_arg_struct *arg) +{ + struct mmap_arg_struct a; + int error = -EFAULT; + + if (copy_from_user(&a, arg, sizeof(a))) + goto out; + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); +out: + return error; +} + +asmlinkage long old_mmap(struct mmap_arg_struct *arg) +{ + struct mmap_arg_struct a; + long error = -EFAULT; + + if (copy_from_user(&a, arg, sizeof(a))) + goto out; + + error = -EINVAL; + if (a.offset & ~PAGE_MASK) + goto out; + + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); +out: + return error; +} + +extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); + +/* + * sys_ipc() is the de-multiplexer for the SysV IPC calls.. + * + * This is really horribly ugly. + */ +asmlinkage int sys_ipc (uint call, int first, int second, + unsigned long third, void *ptr) +{ + struct ipc_kludge tmp; + int ret; + + switch (call) { + case SEMOP: + return sys_semop (first, (struct sembuf *)ptr, second); + case SEMGET: + return sys_semget (first, second, third); + case SEMCTL: { + union semun fourth; + if (!ptr) + return -EINVAL; + if (get_user(fourth.__pad, (void **) ptr)) + return -EFAULT; + return sys_semctl (first, second, third, fourth); + } + case MSGSND: + return sys_msgsnd (first, (struct msgbuf *) ptr, + second, third); + break; + case MSGRCV: + if (!ptr) + return -EINVAL; + if (copy_from_user (&tmp, (struct ipc_kludge *) ptr, + sizeof (struct ipc_kludge))) + return -EFAULT; + return sys_msgrcv (first, tmp.msgp, + second, tmp.msgtyp, third); + case MSGGET: + return sys_msgget ((key_t) first, second); + case MSGCTL: + return sys_msgctl (first, second, (struct msqid_ds *) ptr); + + case SHMAT: { + ulong raddr; + ret = sys_shmat (first, (char *) ptr, second, &raddr); + if (ret) + return ret; + return put_user (raddr, (ulong *) third); + break; + } + case SHMDT: + return sys_shmdt ((char *)ptr); + case SHMGET: + return sys_shmget (first, second, third); + case SHMCTL: + return sys_shmctl (first, second, + (struct shmid_ds *) ptr); + default: + return -EINVAL; + + } + + return -EINVAL; +} + +/* + * Old cruft + */ +asmlinkage int sys_uname(struct old_utsname * name) +{ + int err; + if (!name) + return -EFAULT; + down_read(&uts_sem); + err=copy_to_user(name, &system_utsname, sizeof (*name)); + up_read(&uts_sem); + return err?-EFAULT:0; +} + +asmlinkage int sys_pause(void) +{ + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + return -ERESTARTNOHAND; +} + diff --git a/arch/s390x/kernel/time.c b/arch/s390x/kernel/time.c new file mode 100644 index 000000000..d4e91842e --- /dev/null +++ b/arch/s390x/kernel/time.c @@ -0,0 +1,258 @@ +/* + * arch/s390/kernel/time.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Hartmut Penner (hp@de.ibm.com), + * Martin Schwidefsky (schwidefsky@de.ibm.com), + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * + * Derived from "arch/i386/kernel/time.c" + * Copyright (C) 1991, 1992, 1995 Linus Torvalds + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + + +/* change this if you have some constant time drift */ +#define USECS_PER_JIFFY ((signed long)1000000/HZ) +#define CLK_TICKS_PER_JIFFY ((signed long)USECS_PER_JIFFY<<12) + +#define TICK_SIZE tick + +static uint64_t init_timer_cc, last_timer_cc; + +extern rwlock_t xtime_lock; +extern unsigned long wall_jiffies; + +void tod_to_timeval(uint64_t todval, struct timeval *xtime) +{ +#if 0 + const int high_bit = 0x80000000L; + const int c_f4240 = 0xf4240L; + const int c_7a120 = 0x7a120; + /* We have to divide the 64 bit value todval by 4096 + * (because the 2^12 bit is the one that changes every + * microsecond) and then split it into seconds and + * microseconds. A value of max (2^52-1) divided by + * the value 0xF4240 can yield a max result of approx + * (2^32.068). Thats to big to fit into a signed int + * ... hacking time! + */ + asm volatile ("L 2,%1\n\t" + "LR 3,2\n\t" + "SRL 2,12\n\t" + "SLL 3,20\n\t" + "L 4,%O1+4(%R1)\n\t" + "SRL 4,12\n\t" + "OR 3,4\n\t" /* now R2/R3 contain (todval >> 12) */ + "SR 4,4\n\t" + "CL 2,%2\n\t" + "JL .+12\n\t" + "S 2,%2\n\t" + "L 4,%3\n\t" + "D 2,%4\n\t" + "OR 3,4\n\t" + "ST 2,%O0+4(%R0)\n\t" + "ST 3,%0" + : "=m" (*xtime) : "m" (todval), + "m" (c_7a120), "m" (high_bit), "m" (c_f4240) + : "cc", "memory", "2", "3", "4" ); +#else + todval >>= 12; + xtime->tv_sec = todval / 1000000; + xtime->tv_usec = todval % 1000000; +#endif +} + +unsigned long do_gettimeoffset(void) +{ + __u64 timer_cc; + + asm volatile ("STCK %0" : "=m" (timer_cc)); + /* We require the offset from the previous interrupt */ + return ((unsigned long)((timer_cc - last_timer_cc)>>12)); +} + +/* + * This version of gettimeofday has microsecond resolution. + */ +void do_gettimeofday(struct timeval *tv) +{ + unsigned long flags; + unsigned long usec, sec; + unsigned long lost_ticks = jiffies - wall_jiffies; + + read_lock_irqsave(&xtime_lock, flags); + usec = do_gettimeoffset(); + if (lost_ticks) + usec +=(USECS_PER_JIFFY*lost_ticks); + sec = xtime.tv_sec; + usec += xtime.tv_usec; + read_unlock_irqrestore(&xtime_lock, flags); + + while (usec >= 1000000) { + usec -= 1000000; + sec++; + } + + tv->tv_sec = sec; + tv->tv_usec = usec; +} + +void do_settimeofday(struct timeval *tv) +{ + + write_lock_irq(&xtime_lock); + /* This is revolting. We need to set the xtime.tv_usec + * correctly. However, the value in this location is + * is value at the last tick. + * Discover what correction gettimeofday + * would have done, and then undo it! + */ + tv->tv_usec -= do_gettimeoffset(); + + while (tv->tv_usec < 0) { + tv->tv_usec += 1000000; + tv->tv_sec--; + } + + xtime = *tv; + time_adjust = 0; /* stop active adjtime() */ + time_status |= STA_UNSYNC; + time_maxerror = NTP_PHASE_LIMIT; + time_esterror = NTP_PHASE_LIMIT; + write_unlock_irq(&xtime_lock); +} + +/* + * timer_interrupt() needs to keep up the real-time clock, + * as well as call the "do_timer()" routine every clocktick + */ + +#ifdef CONFIG_SMP +extern __u16 boot_cpu_addr; +#endif + +void do_timer_interrupt(struct pt_regs *regs,int error_code) +{ + unsigned long flags; + + /* + * reset timer to 10ms minus time already elapsed + * since timer-interrupt pending + */ + + save_flags(flags); + cli(); +#ifdef CONFIG_SMP + if(S390_lowcore.cpu_data.cpu_addr==boot_cpu_addr) { + write_lock(&xtime_lock); + last_timer_cc = S390_lowcore.jiffy_timer_cc; + } +#else + last_timer_cc = S390_lowcore.jiffy_timer_cc; +#endif + /* set clock comparator */ + S390_lowcore.jiffy_timer_cc += CLK_TICKS_PER_JIFFY; + asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer_cc)); + +/* + * In the SMP case we use the local timer interrupt to do the + * profiling, except when we simulate SMP mode on a uniprocessor + * system, in that case we have to call the local interrupt handler. + */ +#ifdef CONFIG_SMP + /* when SMP, do smp_local_timer_interrupt for *all* CPUs, + but only do the rest for the boot CPU */ + smp_local_timer_interrupt(regs); +#else + if (!user_mode(regs)) + s390_do_profile(regs->psw.addr); +#endif + +#ifdef CONFIG_SMP + if(S390_lowcore.cpu_data.cpu_addr==boot_cpu_addr) +#endif + { + do_timer(regs); +#ifdef CONFIG_SMP + write_unlock(&xtime_lock); +#endif + } + restore_flags(flags); + +} + +/* + * Start the clock comparator on the current CPU + */ +static unsigned long cr0 __attribute__ ((aligned (8))); + +void init_100hz_timer(void) +{ + /* allow clock comparator timer interrupt */ + asm volatile ("STCTG 0,0,%0" : "=m" (cr0) : : "memory"); + cr0 |= 0x800; + asm volatile ("LCTLG 0,0,%0" : : "m" (cr0) : "memory"); + /* set clock comparator */ + /* read the TOD clock */ + asm volatile ("STCK %0" : "=m" (S390_lowcore.jiffy_timer_cc)); + S390_lowcore.jiffy_timer_cc += CLK_TICKS_PER_JIFFY; + asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer_cc)); +} + +/* + * Initialize the TOD clock and the CPU timer of + * the boot cpu. + */ +void __init time_init(void) +{ + int cc; + + /* kick the TOD clock */ + asm volatile ("STCK %1\n\t" + "IPM %0\n\t" + "SRL %0,28" : "=r" (cc), "=m" (init_timer_cc)); + switch (cc) { + case 0: /* clock in set state: all is fine */ + break; + case 1: /* clock in non-set state: FIXME */ + printk("time_init: TOD clock in non-set state\n"); + break; + case 2: /* clock in error state: FIXME */ + printk("time_init: TOD clock in error state\n"); + break; + case 3: /* clock in stopped or not-operational state: FIXME */ + printk("time_init: TOD clock stopped/non-operational\n"); + break; + } + /* request the 0x1004 external interrupt */ + if (register_external_interrupt(0x1004, do_timer_interrupt) != 0) + panic("Couldn't request external interrupts 0x1004"); + init_100hz_timer(); + init_timer_cc = S390_lowcore.jiffy_timer_cc; + init_timer_cc -= 0x8126d60e46000000LL - + (0x3c26700LL*1000000*4096); + tod_to_timeval(init_timer_cc, &xtime); +} diff --git a/arch/s390x/kernel/traps.c b/arch/s390x/kernel/traps.c new file mode 100644 index 000000000..3301ddc18 --- /dev/null +++ b/arch/s390x/kernel/traps.c @@ -0,0 +1,248 @@ +/* + * arch/s390/kernel/traps.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), + * + * Derived from "arch/i386/kernel/traps.c" + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +/* + * 'Traps.c' handles hardware traps and faults after we have saved some + * state in 'asm.s'. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#if CONFIG_REMOTE_DEBUG +#include +#endif + +/* Called from entry.S only */ +extern void handle_per_exception(struct pt_regs *regs); + +typedef void pgm_check_handler_t(struct pt_regs *, long); +pgm_check_handler_t *pgm_check_table[128]; + +#ifdef CONFIG_SYSCTL +#ifdef CONFIG_PROCESS_DEBUG +int sysctl_userprocess_debug = 1; +#else +int sysctl_userprocess_debug = 0; +#endif +#endif + +extern pgm_check_handler_t do_page_fault; + +spinlock_t die_lock; + +void die(const char * str, struct pt_regs * regs, long err) +{ + console_verbose(); + spin_lock_irq(&die_lock); + printk("%s: %04lx\n", str, err & 0xffff); + show_regs(regs); + spin_unlock_irq(&die_lock); + do_exit(SIGSEGV); +} + +#define DO_ERROR(signr, str, name) \ +asmlinkage void name(struct pt_regs * regs, long interruption_code) \ +{ \ + do_trap(interruption_code, signr, str, regs, NULL); \ +} + +#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ +asmlinkage void name(struct pt_regs * regs, long interruption_code) \ +{ \ + siginfo_t info; \ + info.si_signo = signr; \ + info.si_errno = 0; \ + info.si_code = sicode; \ + info.si_addr = (void *)siaddr; \ + do_trap(interruption_code, signr, str, regs, &info); \ +} + +static void inline do_trap(long interruption_code, int signr, char *str, + struct pt_regs *regs, siginfo_t *info) +{ + if (regs->psw.mask & PSW_PROBLEM_STATE) { + struct task_struct *tsk = current; + tsk->thread.trap_no = interruption_code; + if (info) + force_sig_info(signr, info, tsk); + else + force_sig(signr, tsk); +#ifndef CONFIG_SYSCTL +#ifdef CONFIG_PROCESS_DEBUG + printk("User process fault: interruption code 0x%lX\n", + interruption_code); + show_regs(regs); +#endif +#else + if (sysctl_userprocess_debug) { + printk("User process fault: interruption code 0x%lX\n", + interruption_code); + show_regs(regs); + } +#endif + } else { + unsigned long fixup = search_exception_table(regs->psw.addr); + if (fixup) + regs->psw.addr = fixup; + else + die(str, regs, interruption_code); + } +} + +int do_debugger_trap(struct pt_regs *regs,int signal) +{ + if(regs->psw.mask&PSW_PROBLEM_STATE) + { + if(current->ptrace & PT_PTRACED) + force_sig(signal,current); + else + return 1; + } + else + { +#if CONFIG_REMOTE_DEBUG + if(gdb_stub_initialised) + { + gdb_stub_handle_exception((gdb_pt_regs *)regs,signal); + return 0; + } +#endif + return 1; + } + return 0; +} + +DO_ERROR(SIGSEGV, "Unknown program exception", default_trap_handler) +DO_ERROR(SIGILL, "privileged operation", privileged_op) +DO_ERROR(SIGILL, "execute exception", execute_exception) +DO_ERROR(SIGSEGV, "addressing exception", addressing_exception) +DO_ERROR(SIGFPE, "fixpoint divide exception", divide_exception) +DO_ERROR(SIGILL, "translation exception", translation_exception) +DO_ERROR(SIGILL, "special operand exception", special_op_exception) +DO_ERROR(SIGILL, "operand exception", operand_exception) +DO_ERROR(SIGILL, "specification exception", specification_exception); + +asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) +{ + __u8 opcode[6]; + __u16 *location; + int do_sig = 0; + + lock_kernel(); + location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc); + /* WARNING don't change this check back to */ + /* int problem_state=(regs->psw.mask & PSW_PROBLEM_STATE); */ + /* & then doing if(problem_state) an int is too small for this */ + /* check on 64 bit. */ + if(regs->psw.mask & PSW_PROBLEM_STATE) + get_user(*((__u16 *) opcode), location); + else + *((__u16 *)opcode)=*((__u16 *)location); + if(*((__u16 *)opcode)==S390_BREAKPOINT_U16) + { + if(do_debugger_trap(regs,SIGTRAP)) + do_sig=1; + } + else + do_sig = 1; + if (do_sig) + do_trap(interruption_code, SIGILL, "illegal operation", regs, NULL); + unlock_kernel(); +} + +asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) +{ + __u16 *location; + int do_sig = 0; + + lock_kernel(); + location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc); + __asm__ volatile ("stfpc %0\n\t" + : "=m" (current->thread.fp_regs.fpc)); + /* Same code should work when we implement fpu emulation */ + /* provided we call data exception from the fpu emulator */ + if(current->thread.fp_regs.fpc&FPC_DXC_MASK) + { + current->thread.ieee_instruction_pointer=(addr_t)location; + force_sig(SIGFPE, current); + } + else + do_sig = 1; + if (do_sig) + do_trap(interruption_code, SIGILL, "data exception", regs, NULL); + unlock_kernel(); +} + + + +/* init is done in lowcore.S and head.S */ + +void __init trap_init(void) +{ + int i; + + for (i = 0; i < 128; i++) + pgm_check_table[i] = &default_trap_handler; + pgm_check_table[1] = &illegal_op; + pgm_check_table[2] = &privileged_op; + pgm_check_table[3] = &execute_exception; + pgm_check_table[5] = &addressing_exception; + pgm_check_table[6] = &specification_exception; + pgm_check_table[7] = &data_exception; + pgm_check_table[9] = ÷_exception; + pgm_check_table[0x12] = &translation_exception; + pgm_check_table[0x13] = &special_op_exception; + pgm_check_table[0x15] = &operand_exception; + pgm_check_table[4] = &do_page_fault; + pgm_check_table[0x10] = &do_page_fault; + pgm_check_table[0x11] = &do_page_fault; + pgm_check_table[0x1C] = &privileged_op; + pgm_check_table[0x38] = &addressing_exception; + pgm_check_table[0x3B] = &do_page_fault; +} + + +void handle_per_exception(struct pt_regs *regs) +{ + if(regs->psw.mask&PSW_PROBLEM_STATE) + { + per_struct *per_info=¤t->thread.per_info; + per_info->lowcore.words.perc_atmid=S390_lowcore.per_perc_atmid; + per_info->lowcore.words.address=S390_lowcore.per_address; + per_info->lowcore.words.access_id=S390_lowcore.per_access_id; + } + if(do_debugger_trap(regs,SIGTRAP)) + { + /* I've seen this possibly a task structure being reused ? */ + printk("Spurious per exception detected\n"); + printk("switching off per tracing for this task.\n"); + show_regs(regs); + /* Hopefully switching off per tracing will help us survive */ + regs->psw.mask &= ~PSW_PER_MASK; + } +} + diff --git a/arch/s390x/kernel/wrapper32.S b/arch/s390x/kernel/wrapper32.S new file mode 100644 index 000000000..2cc5cb6cf --- /dev/null +++ b/arch/s390x/kernel/wrapper32.S @@ -0,0 +1,1071 @@ +/* +* arch/s390/kernel/sys_wrapper31.S +* wrapper for 31 bit compatible system calls. +* +* S390 version +* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation +* Author(s): Gerhard Tonn (ton@de.ibm.com), +*/ + + .globl sys32_exit_wrapper +sys32_exit_wrapper: + lgfr %r2,%r2 # int + jg sys_exit # branch to sys_exit + + .globl sys32_read_wrapper +sys32_read_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # char * + llgfr %r4,%r4 # size_t + jg sys_read # branch to sys_read + + .globl sys32_write_wrapper +sys32_write_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # const char * + llgfr %r4,%r4 # size_t + jg sys_write # branch to system call + + .globl sys32_open_wrapper +sys32_open_wrapper: + llgtr %r2,%r2 # const char * + lgfr %r3,%r3 # int + lgfr %r4,%r4 # int + jg sys_open # branch to system call + + .globl sys32_close_wrapper +sys32_close_wrapper: + llgfr %r2,%r2 # unsigned int + jg sys_close # branch to system call + + .globl sys32_creat_wrapper +sys32_creat_wrapper: + llgtr %r2,%r2 # const char * + lgfr %r3,%r3 # int + jg sys_creat # branch to system call + + .globl sys32_link_wrapper +sys32_link_wrapper: + llgtr %r2,%r2 # const char * + llgtr %r3,%r3 # const char * + jg sys_link # branch to system call + + .globl sys32_unlink_wrapper +sys32_unlink_wrapper: + llgtr %r2,%r2 # const char * + jg sys_unlink # branch to system call + + .globl sys32_chdir_wrapper +sys32_chdir_wrapper: + llgtr %r2,%r2 # const char * + jg sys_chdir # branch to system call + + .globl sys32_time_wrapper +sys32_time_wrapper: + llgtr %r2,%r2 # int * + jg sys_time # branch to system call + + .globl sys32_mknod_wrapper +sys32_mknod_wrapper: + llgtr %r2,%r2 # const char * + lgfr %r3,%r3 # int + llgfr %r4,%r4 # dev + jg sys_mknod # branch to system call + + .globl sys32_chmod_wrapper +sys32_chmod_wrapper: + llgtr %r2,%r2 # const char * + llgfr %r3,%r3 # mode_t + jg sys_chmod # branch to system call + + .globl sys32_lchown16_wrapper +sys32_lchown16_wrapper: + llgtr %r2,%r2 # const char * + llgfr %r3,%r3 # __kernel_old_uid_emu31_t + llgfr %r4,%r4 # __kernel_old_uid_emu31_t + jg sys32_lchown16 # branch to system call + + .globl sys32_lseek_wrapper +sys32_lseek_wrapper: + llgfr %r2,%r2 # unsigned int + lgfr %r3,%r3 # off_t + llgfr %r4,%r4 # unsigned int + jg sys_lseek # branch to system call + +#sys32_getpid_wrapper # void + + .globl sys32_mount_wrapper +sys32_mount_wrapper: + llgtr %r2,%r2 # char * + llgtr %r3,%r3 # char * + llgtr %r4,%r4 # char * + llgfr %r5,%r5 # unsigned long + llgtr %r6,%r6 # void * + jg sys32_mount # branch to system call + + .globl sys32_oldumount_wrapper +sys32_oldumount_wrapper: + llgtr %r2,%r2 # char * + jg sys_oldumount # branch to system call + + .globl sys32_setuid16_wrapper +sys32_setuid16_wrapper: + llgfr %r2,%r2 # __kernel_old_uid_emu31_t + jg sys32_setuid16 # branch to system call + +#sys32_getuid16_wrapper # void + + .globl sys32_ptrace_wrapper +sys32_ptrace_wrapper: + lgfr %r2,%r2 # long + lgfr %r3,%r3 # long + llgtr %r4,%r4 # long + lgfr %r5,%r5 # long + jg sys32_ptrace # branch to system call + + .globl sys32_alarm_wrapper +sys32_alarm_wrapper: + llgtr %r2,%r2 # unsigned int + jg sys_alarm # branch to system call + +#sys32_pause_wrapper # void + + .globl sys32_utime_wrapper +sys32_utime_wrapper: + llgtr %r2,%r2 # char * + llgtr %r3,%r3 # struct utimbuf_emu31 * + jg sys32_utime # branch to system call + + .globl sys32_access_wrapper +sys32_access_wrapper: + llgtr %r2,%r2 # const char * + lgfr %r3,%r3 # int + jg sys_access # branch to system call + + .globl sys32_nice_wrapper +sys32_nice_wrapper: + lgfr %r2,%r2 # int + jg sys_nice # branch to system call + +#sys32_sync_wrapper # void + + .globl sys32_kill_wrapper +sys32_kill_wrapper: + lgfr %r2,%r2 # int + lgfr %r3,%r3 # int + jg sys_kill # branch to system call + + .globl sys32_rename_wrapper +sys32_rename_wrapper: + llgtr %r2,%r2 # const char * + llgtr %r3,%r3 # const char * + jg sys_rename # branch to system call + + .globl sys32_mkdir_wrapper +sys32_mkdir_wrapper: + llgtr %r2,%r2 # const char * + lgfr %r3,%r3 # int + jg sys_mkdir # branch to system call + + .globl sys32_rmdir_wrapper +sys32_rmdir_wrapper: + llgtr %r2,%r2 # const char * + jg sys_rmdir # branch to system call + + .globl sys32_dup_wrapper +sys32_dup_wrapper: + llgfr %r2,%r2 # unsigned int + jg sys_dup # branch to system call + + .globl sys32_pipe_wrapper +sys32_pipe_wrapper: + llgtr %r2,%r2 # u32 * + jg sys_pipe # branch to system call + + .globl sys32_times_wrapper +sys32_times_wrapper: + llgtr %r2,%r2 # struct tms_emu31 * + jg sys32_times # branch to system call + + .globl sys32_brk_wrapper +sys32_brk_wrapper: + llgtr %r2,%r2 # unsigned long + jg sys_brk # branch to system call + + .globl sys32_setgid16_wrapper +sys32_setgid16_wrapper: + llgfr %r2,%r2 # __kernel_old_gid_emu31_t + jg sys32_setgid16 # branch to system call + +#sys32_getgid16_wrapper # void + + .globl sys32_signal_wrapper +sys32_signal_wrapper: + lgfr %r2,%r2 # int + llgfr %r3,%r3 # __sighandler_t + jg sys_signal + +#sys32_geteuid16_wrapper # void + +#sys32_getegid16_wrapper # void + + .globl sys32_acct_wrapper +sys32_acct_wrapper: + llgtr %r2,%r2 # char * + jg sys_acct # branch to system call + + .globl sys32_umount_wrapper +sys32_umount_wrapper: + llgtr %r2,%r2 # char * + lgfr %r3,%r3 # int + jg sys_umount # branch to system call + + .globl sys32_ioctl_wrapper +sys32_ioctl_wrapper: + llgfr %r2,%r2 # unsigned int + llgfr %r3,%r3 # unsigned int + llgfr %r4,%r4 # unsigned int + jg sys32_ioctl # branch to system call + + .globl sys32_fcntl_wrapper +sys32_fcntl_wrapper: + llgfr %r2,%r2 # unsigned int + llgfr %r3,%r3 # unsigned int + llgfr %r4,%r4 # unsigned long + jg sys32_fcntl # branch to system call + + .globl sys32_setpgid_wrapper +sys32_setpgid_wrapper: + lgfr %r2,%r2 # pid_t + lgfr %r3,%r3 # pid_t + jg sys_setpgid # branch to system call + + .globl sys32_umask_wrapper +sys32_umask_wrapper: + lgfr %r3,%r3 # int + jg sys_umask # branch to system call + + .globl sys32_chroot_wrapper +sys32_chroot_wrapper: + llgtr %r2,%r2 # char * + jg sys_chroot # branch to system call + + .globl sys32_ustat_wrapper +sys32_ustat_wrapper: + llgfr %r2,%r2 # dev_t + llgtr %r3,%r3 # struct ustat * + jg sys_ustat + + .globl sys32_dup2_wrapper +sys32_dup2_wrapper: + llgfr %r2,%r2 # unsigned int + llgfr %r3,%r3 # unsigned int + jg sys_dup2 # branch to system call + +#sys32_getppid_wrapper # void + +#sys32_getpgrp_wrapper # void + +#sys32_setsid_wrapper # void + + .globl sys32_sigaction_wrapper +sys32_sigaction_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # const struct old_sigaction * + jg sys32_sigaction # branch to system call + + .globl sys32_setreuid16_wrapper +sys32_setreuid16_wrapper: + llgfr %r2,%r2 # __kernel_old_uid_emu31_t + llgfr %r3,%r3 # __kernel_old_uid_emu31_t + jg sys32_setreuid16 # branch to system call + + .globl sys32_setregid16_wrapper +sys32_setregid16_wrapper: + llgfr %r2,%r2 # __kernel_old_gid_emu31_t + llgfr %r3,%r3 # __kernel_old_gid_emu31_t + jg sys32_setregid16 # branch to system call + +#sys32_sigsuspend_wrapper # done in sigsuspend_glue + + .globl sys32_sigpending_wrapper +sys32_sigpending_wrapper: + llgtr %r2,%r2 # old_sigset_emu31_t * + jg sys32_sigpending # branch to system call + + .globl sys32_sethostname_wrapper +sys32_sethostname_wrapper: + llgtr %r2,%r2 # char * + lgfr %r3,%r3 # int + jg sys_sethostname # branch to system call + + .globl sys32_setrlimit_wrapper +sys32_setrlimit_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # struct rlimit_emu31 * + jg sys32_setrlimit # branch to system call + + .globl sys32_old_getrlimit_wrapper +sys32_old_getrlimit_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # struct rlimit_emu31 * + jg sys32_old_getrlimit # branch to system call + + .globl sys32_mmap2_wrapper +sys32_mmap2_wrapper: + llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * + jg sys32_mmap2 # branch to system call + + .globl sys32_getrusage_wrapper +sys32_getrusage_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # struct rusage_emu31 * + jg sys32_getrusage # branch to system call + + .globl sys32_gettimeofday_wrapper +sys32_gettimeofday_wrapper: + llgtr %r2,%r2 # struct timeval_emu31 * + llgtr %r3,%r3 # struct timezone * + jg sys32_gettimeofday # branch to system call + + .globl sys32_settimeofday_wrapper +sys32_settimeofday_wrapper: + llgtr %r2,%r2 # struct timeval_emu31 * + llgtr %r3,%r3 # struct timezone * + jg sys32_settimeofday # branch to system call + + .globl sys32_getgroups16_wrapper +sys32_getgroups16_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # __kernel_old_gid_emu31_t * + jg sys32_getgroups16 # branch to system call + + .globl sys32_setgroups16_wrapper +sys32_setgroups16_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # __kernel_old_gid_emu31_t * + jg sys32_setgroups16 # branch to system call + + .globl sys32_symlink_wrapper +sys32_symlink_wrapper: + llgtr %r2,%r2 # const char * + llgtr %r3,%r3 # const char * + jg sys_symlink # branch to system call + + .globl sys32_readlink_wrapper +sys32_readlink_wrapper: + llgtr %r2,%r2 # const char * + llgtr %r3,%r3 # char * + lgfr %r4,%r4 # int + jg sys_readlink # branch to system call + + .globl sys32_uselib_wrapper +sys32_uselib_wrapper: + llgtr %r2,%r2 # const char * + jg sys_uselib # branch to system call + + .globl sys32_swapon_wrapper +sys32_swapon_wrapper: + llgtr %r2,%r2 # const char * + lgfr %r3,%r3 # int + jg sys_swapon # branch to system call + + .globl sys32_reboot_wrapper +sys32_reboot_wrapper: + lgfr %r2,%r2 # int + lgfr %r3,%r3 # int + llgfr %r4,%r4 # unsigned int + llgtr %r5,%r5 # void * + jg sys_reboot # branch to system call + + .globl old32_readdir_wrapper +old32_readdir_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # void * + llgfr %r4,%r4 # unsigned int + jg old32_readdir # branch to system call + + .globl old32_mmap_wrapper +old32_mmap_wrapper: + llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * + jg old32_mmap # branch to system call + + .globl sys32_munmap_wrapper +sys32_munmap_wrapper: + llgfr %r2,%r2 # unsigned long + llgfr %r3,%r3 # size_t + jg sys_munmap # branch to system call + + .globl sys32_truncate_wrapper +sys32_truncate_wrapper: + llgtr %r2,%r2 # const char * + llgfr %r3,%r3 # unsigned long + jg sys_truncate # branch to system call + + .globl sys32_ftruncate_wrapper +sys32_ftruncate_wrapper: + llgfr %r2,%r2 # unsigned int + llgfr %r3,%r3 # unsigned long + jg sys_ftruncate # branch to system call + + .globl sys32_fchmod_wrapper +sys32_fchmod_wrapper: + llgfr %r2,%r2 # unsigned int + llgfr %r3,%r3 # mode_t + jg sys_fchmod # branch to system call + + .globl sys32_fchown16_wrapper +sys32_fchown16_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # __kernel_old_uid_emu31_t * + llgtr %r4,%r4 # __kernel_old_gid_emu31_t * + jg sys32_fchown16 # branch to system call + + .globl sys32_getpriority_wrapper +sys32_getpriority_wrapper: + lgfr %r2,%r2 # int + lgfr %r3,%r3 # int + jg sys_getpriority # branch to system call + + .globl sys32_setpriority_wrapper +sys32_setpriority_wrapper: + lgfr %r2,%r2 # int + lgfr %r3,%r3 # int + lgfr %r4,%r4 # int + jg sys_setpriority # branch to system call + + .globl sys32_statfs_wrapper +sys32_statfs_wrapper: + llgtr %r2,%r2 # char * + llgtr %r3,%r3 # struct statfs_emu31 * + jg sys32_statfs # branch to system call + + .globl sys32_fstatfs_wrapper +sys32_fstatfs_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # struct statfs_emu31 * + jg sys32_fstatfs # branch to system call + + .globl sys32_socketcall_wrapper +sys32_socketcall_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # u32 * + jg sys32_socketcall # branch to system call + + .globl sys32_syslog_wrapper +sys32_syslog_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # char * + lgfr %r4,%r4 # int + jg sys_syslog # branch to system call + + .globl sys32_setitimer_wrapper +sys32_setitimer_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # struct itimerval_emu31 * + llgtr %r4,%r4 # struct itimerval_emu31 * + jg sys32_setitimer # branch to system call + + .globl sys32_getitimer_wrapper +sys32_getitimer_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # struct itimerval_emu31 * + jg sys32_getitimer # branch to system call + + .globl sys32_newstat_wrapper +sys32_newstat_wrapper: + llgtr %r2,%r2 # char * + llgtr %r3,%r3 # struct stat_emu31 * + jg sys32_newstat # branch to system call + + .globl sys32_newlstat_wrapper +sys32_newlstat_wrapper: + llgtr %r2,%r2 # char * + llgtr %r3,%r3 # struct stat_emu31 * + jg sys32_newlstat # branch to system call + + .globl sys32_newfstat_wrapper +sys32_newfstat_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # struct stat_emu31 * + jg sys32_newfstat # branch to system call + +#sys32_vhangup_wrapper # void + + .globl sys32_wait4_wrapper +sys32_wait4_wrapper: + lgfr %r2,%r2 # pid_t + llgtr %r3,%r3 # unsigned int * + lgfr %r4,%r4 # int + llgtr %r5,%r5 # struct rusage * + jg sys32_wait4 # branch to system call + + .globl sys32_swapoff_wrapper +sys32_swapoff_wrapper: + llgtr %r2,%r2 # const char * + jg sys_swapoff # branch to system call + + .globl sys32_sysinfo_wrapper +sys32_sysinfo_wrapper: + llgtr %r2,%r2 # struct sysinfo_emu31 * + jg sys32_sysinfo # branch to system call + + .globl sys32_ipc_wrapper +sys32_ipc_wrapper: + llgfr %r2,%r2 # uint + lgfr %r3,%r3 # int + lgfr %r4,%r4 # int + lgfr %r5,%r5 # int + llgtr %r6,%r6 # void * + jg sys32_ipc # branch to system call + + .globl sys32_fsync_wrapper +sys32_fsync_wrapper: + llgfr %r2,%r2 # unsigned int + jg sys_fsync # branch to system call + +#sys32_sigreturn_wrapper # done in sigreturn_glue + +#sys32_clone_wrapper # done in clone_glue + + .globl sys32_setdomainname_wrapper +sys32_setdomainname_wrapper: + llgtr %r2,%r2 # char * + lgfr %r3,%r3 # int + jg sys_setdomainname # branch to system call + + .globl sys32_newuname_wrapper +sys32_newuname_wrapper: + llgtr %r2,%r2 # struct new_utsname * + jg sys_newuname # branch to system call + + .globl sys32_adjtimex_wrapper +sys32_adjtimex_wrapper: + llgtr %r2,%r2 # struct timex_emu31 * + jg sys32_adjtimex # branch to system call + + .globl sys32_mprotect_wrapper +sys32_mprotect_wrapper: + llgtr %r2,%r2 # unsigned long (actually pointer + llgfr %r3,%r3 # size_t + llgfr %r4,%r4 # unsigned long + jg sys_mprotect # branch to system call + + .globl sys32_sigprocmask_wrapper +sys32_sigprocmask_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # old_sigset_emu31 * + llgtr %r4,%r4 # old_sigset_emu31 * + jg sys32_sigprocmask # branch to system call + + .globl sys32_create_module_wrapper +sys32_create_module_wrapper: + llgtr %r2,%r2 # const char * + llgfr %r3,%r3 # size_t + jg sys32_create_module # branch to system call + + .globl sys32_init_module_wrapper +sys32_init_module_wrapper: + llgtr %r2,%r2 # const char * + llgtr %r3,%r3 # struct module * + jg sys32_init_module # branch to system call + + .globl sys32_delete_module_wrapper +sys32_delete_module_wrapper: + llgtr %r2,%r2 # const char * + jg sys32_delete_module # branch to system call + + .globl sys32_get_kernel_syms_wrapper +sys32_get_kernel_syms_wrapper: + llgtr %r2,%r2 # struct kernel_sym_emu31 * + jg sys32_get_kernel_syms # branch to system call + + .globl sys32_quotactl_wrapper +sys32_quotactl_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # const char * + lgfr %r4,%r4 # int + llgtr %r5,%r5 # caddr_t + jg sys32_quotactl # branch to system call + + .globl sys32_getpgid_wrapper +sys32_getpgid_wrapper: + lgfr %r2,%r2 # pid_t + jg sys_getpgid # branch to system call + + .globl sys32_fchdir_wrapper +sys32_fchdir_wrapper: + llgfr %r2,%r2 # unsigned int + jg sys_fchdir # branch to system call + + .globl sys32_bdflush_wrapper +sys32_bdflush_wrapper: + lgfr %r2,%r2 # int + lgfr %r3,%r3 # long + jg sys_bdflush # branch to system call + + .globl sys32_sysfs_wrapper +sys32_sysfs_wrapper: + lgfr %r2,%r2 # int + llgfr %r3,%r3 # unsigned long + llgfr %r4,%r4 # unsigned long + jg sys_sysfs # branch to system call + + .globl sys32_personality_wrapper +sys32_personality_wrapper: + llgfr %r2,%r2 # unsigned long + jg sys_personality # branch to system call + + .globl sys32_setfsuid16_wrapper +sys32_setfsuid16_wrapper: + llgfr %r2,%r2 # __kernel_old_uid_emu31_t + jg sys32_setfsuid16 # branch to system call + + .globl sys32_setfsgid16_wrapper +sys32_setfsgid16_wrapper: + llgfr %r2,%r2 # __kernel_old_gid_emu31_t + jg sys32_setfsgid16 # branch to system call + + .globl sys32_llseek_wrapper +sys32_llseek_wrapper: + llgfr %r2,%r2 # unsigned int + lgfr %r3,%r3 # off_t + llgfr %r4,%r4 # unsigned int + jg sys_llseek # branch to system call + + .globl sys32_getdents_wrapper +sys32_getdents_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # void * + llgfr %r4,%r4 # unsigned int + jg sys32_getdents # branch to system call + + .globl sys32_select_wrapper +sys32_select_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # fd_set * + llgtr %r4,%r4 # fd_set * + llgtr %r5,%r5 # fd_set * + llgtr %r6,%r6 # struct timeval_emu31 * + jg sys32_select # branch to system call + + .globl sys32_flock_wrapper +sys32_flock_wrapper: + llgfr %r2,%r2 # unsigned int + llgfr %r3,%r3 # unsigned int + jg sys_flock # branch to system call + + .globl sys32_msync_wrapper +sys32_msync_wrapper: + llgfr %r2,%r2 # unsigned long + llgfr %r3,%r3 # size_t + lgfr %r4,%r4 # int + jg sys_msync # branch to system call + + .globl sys32_readv_wrapper +sys32_readv_wrapper: + llgfr %r2,%r2 # unsigned long + llgtr %r3,%r3 # const struct iovec_emu31 * + llgfr %r4,%r4 # unsigned long + jg sys32_readv # branch to system call + + .globl sys32_writev_wrapper +sys32_writev_wrapper: + llgfr %r2,%r2 # unsigned long + llgtr %r3,%r3 # const struct iovec_emu31 * + llgfr %r4,%r4 # unsigned long + jg sys32_writev # branch to system call + + .globl sys32_getsid_wrapper +sys32_getsid_wrapper: + lgfr %r2,%r2 # pid_t + jg sys_getsid # branch to system call + + .globl sys32_fdatasync_wrapper +sys32_fdatasync_wrapper: + llgfr %r2,%r2 # unsigned int + jg sys_fdatasync # branch to system call + +#sys32_sysctl_wrapper # tbd + + .globl sys32_mlock_wrapper +sys32_mlock_wrapper: + llgfr %r2,%r2 # unsigned long + llgfr %r3,%r3 # size_t + jg sys_mlock # branch to system call + + .globl sys32_munlock_wrapper +sys32_munlock_wrapper: + llgfr %r2,%r2 # unsigned long + llgfr %r3,%r3 # size_t + jg sys_munlock # branch to system call + + .globl sys32_mlockall_wrapper +sys32_mlockall_wrapper: + lgfr %r2,%r2 # int + jg sys_mlockall # branch to system call + +#sys32_munlockall_wrapper # void + + .globl sys32_sched_setparam_wrapper +sys32_sched_setparam_wrapper: + lgfr %r2,%r2 # pid_t + llgtr %r3,%r3 # struct sched_param * + jg sys_sched_setparam # branch to system call + + .globl sys32_sched_getparam_wrapper +sys32_sched_getparam_wrapper: + lgfr %r2,%r2 # pid_t + llgtr %r3,%r3 # struct sched_param * + jg sys_sched_getparam # branch to system call + + .globl sys32_sched_setscheduler_wrapper +sys32_sched_setscheduler_wrapper: + lgfr %r2,%r2 # pid_t + lgfr %r3,%r3 # int + llgtr %r4,%r4 # struct sched_param * + jg sys_sched_setscheduler # branch to system call + + .globl sys32_sched_getscheduler_wrapper +sys32_sched_getscheduler_wrapper: + lgfr %r2,%r2 # pid_t + jg sys_sched_getscheduler # branch to system call + +#sys32_sched_yield_wrapper # void + + .globl sys32_sched_get_priority_max_wrapper +sys32_sched_get_priority_max_wrapper: + lgfr %r2,%r2 # int + jg sys_sched_get_priority_max # branch to system call + + .globl sys32_sched_get_priority_min_wrapper +sys32_sched_get_priority_min_wrapper: + lgfr %r2,%r2 # int + jg sys_sched_get_priority_min # branch to system call + + .globl sys32_sched_rr_get_interval_wrapper +sys32_sched_rr_get_interval_wrapper: + lgfr %r2,%r2 # pid_t + llgtr %r3,%r3 # struct timespec_emu31 * + jg sys32_sched_rr_get_interval # branch to system call + + .globl sys32_nanosleep_wrapper +sys32_nanosleep_wrapper: + llgtr %r2,%r2 # struct timespec_emu31 * + llgtr %r3,%r3 # struct timespec_emu31 * + jg sys32_nanosleep # branch to system call + + .globl sys32_mremap_wrapper +sys32_mremap_wrapper: + llgfr %r2,%r2 # unsigned long + llgfr %r3,%r3 # unsigned long + llgfr %r4,%r4 # unsigned long + llgfr %r5,%r5 # unsigned long + llgfr %r6,%r6 # unsigned long + jg sys_mremap # branch to system call + + .globl sys32_setresuid16_wrapper +sys32_setresuid16_wrapper: + llgfr %r2,%r2 # __kernel_old_uid_emu31_t + llgfr %r3,%r3 # __kernel_old_uid_emu31_t + llgfr %r4,%r4 # __kernel_old_uid_emu31_t + jg sys32_setresuid16 # branch to system call + + .globl sys32_getresuid16_wrapper +sys32_getresuid16_wrapper: + llgtr %r2,%r2 # __kernel_old_uid_emu31_t * + llgtr %r3,%r3 # __kernel_old_uid_emu31_t * + llgtr %r4,%r4 # __kernel_old_uid_emu31_t * + jg sys32_getresuid16 # branch to system call + + .globl sys32_query_module_wrapper +sys32_query_module_wrapper: + llgtr %r2,%r2 # const char * + lgfr %r3,%r3 # int + llgtr %r4,%r4 # char * + llgfr %r5,%r5 # size_t + llgtr %r6,%r6 # size_t * + jg sys32_query_module # branch to system call + + .globl sys32_poll_wrapper +sys32_poll_wrapper: + llgtr %r2,%r2 # struct pollfd * + llgfr %r3,%r3 # unsigned int + lgfr %r4,%r4 # long + jg sys_poll # branch to system call + + .globl sys32_nfsservctl_wrapper +sys32_nfsservctl_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # struct nfsctl_arg_emu31 * + llgtr %r4,%r4 # union nfsctl_res_emu31 * + jg sys32_nfsservctl # branch to system call + + .globl sys32_setresgid16_wrapper +sys32_setresgid16_wrapper: + llgfr %r2,%r2 # __kernel_old_gid_emu31_t + llgfr %r3,%r3 # __kernel_old_gid_emu31_t + llgfr %r4,%r4 # __kernel_old_gid_emu31_t + jg sys32_setresgid16 # branch to system call + + .globl sys32_getresgid16_wrapper +sys32_getresgid16_wrapper: + llgtr %r2,%r2 # __kernel_old_gid_emu31_t * + llgtr %r3,%r3 # __kernel_old_gid_emu31_t * + llgtr %r4,%r4 # __kernel_old_gid_emu31_t * + jg sys32_getresgid16 # branch to system call + + .globl sys32_prctl_wrapper +sys32_prctl_wrapper: + lgfr %r2,%r2 # int + llgfr %r3,%r3 # unsigned long + llgfr %r4,%r4 # unsigned long + llgfr %r5,%r5 # unsigned long + llgfr %r6,%r6 # unsigned long + jg sys_prctl # branch to system call + +#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue + + .globl sys32_rt_sigaction_wrapper +sys32_rt_sigaction_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # const struct sigaction_emu31 * + llgtr %r4,%r4 # const struct sigaction_emu31 * + llgfr %r5,%r5 # size_t + jg sys32_rt_sigaction # branch to system call + + .globl sys32_rt_sigprocmask_wrapper +sys32_rt_sigprocmask_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # old_sigset_emu31 * + llgtr %r4,%r4 # old_sigset_emu31 * + jg sys32_rt_sigprocmask # branch to system call + + .globl sys32_rt_sigpending_wrapper +sys32_rt_sigpending_wrapper: + llgtr %r2,%r2 # sigset_emu31 * + llgfr %r3,%r3 # size_t + jg sys32_rt_sigpending # branch to system call + + .globl sys32_rt_sigtimedwait_wrapper +sys32_rt_sigtimedwait_wrapper: + llgtr %r2,%r2 # const sigset_emu31_t * + llgtr %r3,%r3 # siginfo_emu31_t * + llgtr %r4,%r4 # const struct timespec_emu31 * + llgfr %r5,%r5 # size_t + jg sys32_rt_sigtimedwait # branch to system call + + .globl sys32_rt_sigqueueinfo_wrapper +sys32_rt_sigqueueinfo_wrapper: + lgfr %r2,%r2 # int + lgfr %r3,%r3 # int + llgtr %r4,%r4 # siginfo_emu31_t * + jg sys32_rt_sigqueueinfo # branch to system call + +#sys32_rt_sigsuspend_wrapper # done in rt_sigsuspend_glue + + .globl sys32_pread_wrapper +sys32_pread_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # char * + llgfr %r4,%r4 # size_t + llgfr %r5,%r5 # u32 + llgfr %r6,%r6 # u32 + jg sys32_pread # branch to system call + + .globl sys32_pwrite_wrapper +sys32_pwrite_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # const char * + llgfr %r4,%r4 # size_t + llgfr %r5,%r5 # u32 + llgfr %r6,%r6 # u32 + jg sys32_pwrite # branch to system call + + .globl sys32_chown16_wrapper +sys32_chown16_wrapper: + llgtr %r2,%r2 # const char * + llgfr %r3,%r3 # __kernel_old_uid_emu31_t + llgfr %r4,%r4 # __kernel_old_gid_emu31_t + jg sys32_chown16 # branch to system call + + .globl sys32_getcwd_wrapper +sys32_getcwd_wrapper: + llgtr %r2,%r2 # char * + llgfr %r3,%r3 # unsigned long + jg sys_getcwd # branch to system call + + .globl sys32_capget_wrapper +sys32_capget_wrapper: + llgtr %r2,%r2 # cap_user_header_t + llgtr %r3,%r3 # cap_user_data_t + jg sys_capget # branch to system call + + .globl sys32_capset_wrapper +sys32_capset_wrapper: + llgtr %r2,%r2 # cap_user_header_t + llgtr %r3,%r3 # const cap_user_data_t + jg sys_capset # branch to system call + + .globl sys32_sigaltstack_wrapper +sys32_sigaltstack_wrapper: + llgtr %r2,%r2 # const stack_emu31_t * + llgtr %r3,%r3 # stack_emu31_t * + jg sys32_sigaltstack + + .globl sys32_sendfile_wrapper +sys32_sendfile_wrapper: + lgfr %r2,%r2 # int + lgfr %r3,%r3 # int + llgtr %r4,%r4 # __kernel_off_emu31_t * + llgfr %r5,%r5 # size_t + jg sys32_sendfile # branch to system call + +#sys32_vfork_wrapper # done in vfork_glue + + .globl sys32_truncate64_wrapper +sys32_truncate64_wrapper: + llgtr %r2,%r2 # const char * + lgfr %r3,%r3 # s32 + llgfr %r4,%r4 # u32 + jg sys32_truncate64 # branch to system call + + .globl sys32_ftruncate64_wrapper +sys32_ftruncate64_wrapper: + llgfr %r2,%r2 # unsigned int + lgfr %r3,%r3 # s32 + llgfr %r4,%r4 # u32 + jg sys32_ftruncate64 # branch to system call + + .globl sys32_lchown_wrapper +sys32_lchown_wrapper: + llgtr %r2,%r2 # const char * + llgfr %r3,%r3 # uid_t + llgfr %r4,%r4 # gid_t + jg sys_lchown # branch to system call + +#sys32_getuid_wrapper # void +#sys32_getgid_wrapper # void +#sys32_geteuid_wrapper # void +#sys32_getegid_wrapper # void + + .globl sys32_setreuid_wrapper +sys32_setreuid_wrapper: + llgfr %r2,%r2 # uid_t + llgfr %r3,%r3 # uid_t + jg sys_setreuid # branch to system call + + .globl sys32_setregid_wrapper +sys32_setregid_wrapper: + llgfr %r2,%r2 # gid_t + llgfr %r3,%r3 # gid_t + jg sys_setregid # branch to system call + + .globl sys32_getgroups_wrapper +sys32_getgroups_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # gid_t * + jg sys_getgroups # branch to system call + + .globl sys32_setgroups_wrapper +sys32_setgroups_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # gid_t * + jg sys_setgroups # branch to system call + + .globl sys32_fchown_wrapper +sys32_fchown_wrapper: + llgfr %r2,%r2 # unsigned int + llgfr %r3,%r3 # uid_t + llgfr %r4,%r4 # gid_t + jg sys_fchown # branch to system call + + .globl sys32_setresuid_wrapper +sys32_setresuid_wrapper: + llgfr %r2,%r2 # uid_t + llgfr %r3,%r3 # uid_t + llgfr %r4,%r4 # uid_t + jg sys_setresuid # branch to system call + + .globl sys32_getresuid_wrapper +sys32_getresuid_wrapper: + llgtr %r2,%r2 # uid_t * + llgtr %r3,%r3 # uid_t * + llgtr %r4,%r4 # uid_t * + jg sys_getresuid # branch to system call + + .globl sys32_setresgid_wrapper +sys32_setresgid_wrapper: + llgfr %r2,%r2 # gid_t + llgfr %r3,%r3 # gid_t + llgfr %r4,%r4 # gid_t + jg sys_setresgid # branch to system call + + .globl sys32_getresgid_wrapper +sys32_getresgid_wrapper: + llgtr %r2,%r2 # gid_t * + llgtr %r3,%r3 # gid_t * + llgtr %r4,%r4 # gid_t * + jg sys_getresgid # branch to system call + + .globl sys32_chown_wrapper +sys32_chown_wrapper: + llgtr %r2,%r2 # const char * + llgfr %r3,%r3 # uid_t + llgfr %r4,%r4 # gid_t + jg sys_chown # branch to system call + + .globl sys32_setuid_wrapper +sys32_setuid_wrapper: + llgfr %r2,%r2 # uid_t + jg sys_setuid # branch to system call + + .globl sys32_setgid_wrapper +sys32_setgid_wrapper: + llgfr %r2,%r2 # gid_t + jg sys_setgid # branch to system call + + .globl sys32_setfsuid_wrapper +sys32_setfsuid_wrapper: + llgfr %r2,%r2 # uid_t + jg sys_setfsuid # branch to system call + + .globl sys32_setfsgid_wrapper +sys32_setfsgid_wrapper: + llgfr %r2,%r2 # gid_t + jg sys_setfsgid # branch to system call + + .globl sys32_pivot_root_wrapper +sys32_pivot_root_wrapper: + llgtr %r2,%r2 # const char * + llgtr %r3,%r3 # const char * + jg sys_pivot_root # branch to system call + + .globl sys32_mincore_wrapper +sys32_mincore_wrapper: + llgfr %r2,%r2 # unsigned long + llgfr %r3,%r3 # size_t + llgtr %r4,%r4 # unsigned char * + jg sys_mincore # branch to system call + + .globl sys32_madvise_wrapper +sys32_madvise_wrapper: + llgfr %r2,%r2 # unsigned long + llgfr %r3,%r3 # size_t + lgfr %r4,%r4 # int + jg sys_madvise # branch to system call + + .globl sys32_getdents64_wrapper +sys32_getdents64_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # void * + llgfr %r4,%r4 # unsigned int + jg sys_getdents64 # branch to system call + + .globl sys32_fcntl64_wrapper +sys32_fcntl64_wrapper: + llgfr %r2,%r2 # unsigned int + llgfr %r3,%r3 # unsigned int + llgfr %r4,%r4 # unsigned long + jg sys32_fcntl64 # branch to system call + diff --git a/arch/s390x/lib/Makefile b/arch/s390x/lib/Makefile new file mode 100644 index 000000000..30ee0f662 --- /dev/null +++ b/arch/s390x/lib/Makefile @@ -0,0 +1,18 @@ +# +# Makefile for s390-specific library files.. +# + +ifdef SMP +.S.o: + $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $< -o $*.o +else +.S.o: + $(CC) -D__ASSEMBLY__ -traditional -c $< -o $*.o +endif + +L_TARGET = lib.a + +obj-y = checksum.o delay.o memset.o strcmp.o strncpy.o uaccess.o + +include $(TOPDIR)/Rules.make + diff --git a/arch/s390x/lib/checksum.c b/arch/s390x/lib/checksum.c new file mode 100644 index 000000000..489299b3a --- /dev/null +++ b/arch/s390x/lib/checksum.c @@ -0,0 +1,40 @@ +/* + * arch/s390/lib/checksum.c + * S390 fast network checksum routines + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Ulrich Hild (first version), + * Martin Schwidefsky (schwidefsky@de.ibm.com), + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), + * + * This file contains network checksum routines + */ + +#include +#include +#include +#include +#include + +/* + * computes a partial checksum, e.g. for TCP/UDP fragments + */ +unsigned int +csum_partial (const unsigned char *buff, int len, unsigned int sum) +{ + /* + * Experiments with ethernet and slip connections show that buff + * is aligned on either a 2-byte or 4-byte boundary. + */ + __asm__ __volatile__ ( + " lgr 2,%1\n" /* address in gpr 2 */ + " lgfr 3,%2\n" /* length in gpr 3 */ + "0: cksm %0,2\n" /* do checksum on longs */ + " jo 0b\n" + : "+&d" (sum) + : "d" (buff), "d" (len) + : "cc", "2", "3" ); + return sum; +} + diff --git a/arch/s390x/lib/delay.c b/arch/s390x/lib/delay.c new file mode 100644 index 000000000..012a95308 --- /dev/null +++ b/arch/s390x/lib/delay.c @@ -0,0 +1,50 @@ +/* + * arch/s390/kernel/delay.c + * Precise Delay Loops for S390 + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * + * Derived from "arch/i386/lib/delay.c" + * Copyright (C) 1993 Linus Torvalds + * Copyright (C) 1997 Martin Mares + */ + +#include +#include +#include + +#ifdef CONFIG_SMP +#include +#endif + +void __delay(unsigned long loops) +{ + /* + * To end the bloody studid and useless discussion about the + * BogoMips number I took the liberty to define the __delay + * function in a way that that resulting BogoMips number will + * yield the megahertz number of the cpu. The important function + * is udelay and that is done using the tod clock. -- martin. + */ + __asm__ __volatile__( + "0: brct %0,0b" + : /* no outputs */ : "r" (loops/2) ); +} + +/* + * Waits for 'usecs' microseconds using the tod clock + */ +void __udelay(unsigned long usecs) +{ + uint64_t start_cc, end_cc; + + if (usecs == 0) + return; + asm volatile ("STCK %0" : "=m" (start_cc)); + do { + asm volatile ("STCK %0" : "=m" (end_cc)); + } while (((end_cc - start_cc)/4096) < usecs); +} + diff --git a/arch/s390x/lib/memset.S b/arch/s390x/lib/memset.S new file mode 100644 index 000000000..1e4b035d2 --- /dev/null +++ b/arch/s390x/lib/memset.S @@ -0,0 +1,30 @@ +/* + * arch/s390/lib/memset.S + * S390 fast memset routine + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +/* + * R2 = address to memory area + * R3 = byte to fill memory with + * R4 = number of bytes to fill + */ + .globl memset +memset: + LTGR 4,4 + JZ memset_end + LGR 0,2 # save pointer to memory area + LGR 1,3 # move pad byte to R1 + LGR 3,4 + SGR 4,4 # no source for MVCLE, only a pad byte + SGR 5,5 + MVCLE 2,4,0(1) # thats it, MVCLE is your friend + JO .-4 + LGR 2,0 # return pointer to mem. +memset_end: + BR 14 + + diff --git a/arch/s390x/lib/strcmp.S b/arch/s390x/lib/strcmp.S new file mode 100644 index 000000000..124f3df26 --- /dev/null +++ b/arch/s390x/lib/strcmp.S @@ -0,0 +1,27 @@ +/* + * arch/s390/lib/strcmp.S + * S390 strcmp routine + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +/* + * R2 = address of compare string + * R3 = address of test string + */ + .globl strcmp +strcmp: + SGR 0,0 + SGR 1,1 + CLST 2,3 + JO .-4 + JE strcmp_equal + IC 0,0(3) + IC 1,0(2) + SGR 1,0 +strcmp_equal: + LGR 2,1 + BR 14 + diff --git a/arch/s390x/lib/strncpy.S b/arch/s390x/lib/strncpy.S new file mode 100644 index 000000000..0360a38c3 --- /dev/null +++ b/arch/s390x/lib/strncpy.S @@ -0,0 +1,30 @@ +/* + * arch/s390/kernel/strncpy.S + * S390 strncpy routine + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +/* + * R2 = address of destination + * R3 = address of source string + * R4 = max number of bytes to copy + */ + .globl strncpy +strncpy: + LGR 1,2 # don't touch address in R2 + LTR 4,4 + JZ strncpy_exit # 0 bytes -> nothing to do + SGR 0,0 +strncpy_loop: + ICM 0,1,0(3) # ICM sets the cc, IC does not + LA 3,1(3) + STC 0,0(1) + LA 1,1(1) + JZ strncpy_exit # ICM inserted a 0x00 + BRCTG 4,strncpy_loop # R4 -= 1, jump to strncpy_loop if > 0 +strncpy_exit: + BR 14 + diff --git a/arch/s390x/lib/uaccess.S b/arch/s390x/lib/uaccess.S new file mode 100644 index 000000000..2a5356c19 --- /dev/null +++ b/arch/s390x/lib/uaccess.S @@ -0,0 +1,45 @@ +/* + * arch/s390x/lib/uaccess.S + * fixup routines for copy_{from|to}_user functions. + * + * S390 + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * These functions have a non-standard call interface + */ + +#include + + .text + .align 4 + .globl __copy_from_user_fixup +__copy_from_user_fixup: + lg 1,__LC_PGM_OLD_PSW+8 +0: lghi 3,-4096 + ng 3,__LC_TRANS_EXC_ADDR + sgr 3,4 + bm 4(1) +1: mvcle 2,4,0 + b 4(1) + .section __ex_table,"a" + .align 8 + .quad 1b,0b + .previous + + .align 4 + .text + .globl __copy_to_user_fixup +__copy_to_user_fixup: + lg 1,__LC_PGM_OLD_PSW+8 +0: lghi 5,-4096 + ng 5,__LC_TRANS_EXC_ADDR + sgr 5,4 + bm 4(1) +1: mvcle 4,2,0 + b 4(1) + .section __ex_table,"a" + .align 8 + .quad 1b,0b + .previous + diff --git a/arch/s390x/mm/Makefile b/arch/s390x/mm/Makefile new file mode 100644 index 000000000..73e25bd30 --- /dev/null +++ b/arch/s390x/mm/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the linux i386-specific parts of the memory manager. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definition is now in the main makefile... + +O_TARGET := mm.o + +obj-y := init.o fault.o ioremap.o extable.o + +include $(TOPDIR)/Rules.make diff --git a/arch/s390x/mm/extable.c b/arch/s390x/mm/extable.c new file mode 100644 index 000000000..774e86854 --- /dev/null +++ b/arch/s390x/mm/extable.c @@ -0,0 +1,61 @@ +/* + * arch/s390/mm/extable.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Hartmut Penner (hp@de.ibm.com) + * + * Derived from "arch/i386/mm/extable.c" + */ + +#include +#include +#include + +extern const struct exception_table_entry __start___ex_table[]; +extern const struct exception_table_entry __stop___ex_table[]; + +static inline unsigned long +search_one_table(const struct exception_table_entry *first, + const struct exception_table_entry *last, + unsigned long value) +{ + while (first <= last) { + const struct exception_table_entry *mid; + long diff; + + mid = (last - first) / 2 + first; + diff = mid->insn - value; + if (diff == 0) + return mid->fixup; + else if (diff < 0) + first = mid+1; + else + last = mid-1; + } + return 0; +} + +unsigned long +search_exception_table(unsigned long addr) +{ + unsigned long ret; + +#ifndef CONFIG_MODULES + /* There is only the kernel to search. */ + ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr); + if (ret) return FIX_PSW(ret); +#else + /* The kernel is the last "module" -- no need to treat it special. */ + struct module *mp; + for (mp = module_list; mp != NULL; mp = mp->next) { + if (mp->ex_table_start == NULL) + continue; + ret = search_one_table(mp->ex_table_start, + mp->ex_table_end - 1, addr); + if (ret) return FIX_PSW(ret); + } +#endif + + return 0; +} diff --git a/arch/s390x/mm/fault.c b/arch/s390x/mm/fault.c new file mode 100644 index 000000000..4c324b690 --- /dev/null +++ b/arch/s390x/mm/fault.c @@ -0,0 +1,266 @@ +/* + * arch/s390/mm/fault.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Hartmut Penner (hp@de.ibm.com) + * + * Derived from "arch/i386/mm/fault.c" + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef CONFIG_SYSCTL +extern int sysctl_userprocess_debug; +#endif + +extern void die(const char *,struct pt_regs *,long); + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + * + * error_code: + * ****0004 Protection -> Write-Protection (suprression) + * ****0010 Segment translation -> Not present (nullification) + * ****0011 Page translation -> Not present (nullification) + * ****003B Region third exception -> Not present (nullification) + */ +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) +{ + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct * vma; + unsigned long address; + unsigned long fixup; + int write; + unsigned long psw_mask; + unsigned long psw_addr; + int si_code = SEGV_MAPERR; + int kernel_address = 0; + + /* + * get psw mask of Program old psw to find out, + * if user or kernel mode + */ + + psw_mask = S390_lowcore.program_old_psw.mask; + psw_addr = S390_lowcore.program_old_psw.addr; + + /* + * get the failing address + * more specific the segment and page table portion of + * the address + */ + + address = S390_lowcore.trans_exc_code&-4096L; + + tsk = current; + mm = tsk->mm; + + if (in_interrupt() || !mm) + goto no_context; + + /* + * Check which address space the address belongs to + */ + switch (S390_lowcore.trans_exc_code & 3) + { + case 0: /* Primary Segment Table Descriptor */ + kernel_address = 1; + goto no_context; + + case 1: /* STD determined via access register */ + if (S390_lowcore.exc_access_id == 0) + { + kernel_address = 1; + goto no_context; + } + if (regs && S390_lowcore.exc_access_id < NUM_ACRS) + { + if (regs->acrs[S390_lowcore.exc_access_id] == 0) + { + kernel_address = 1; + goto no_context; + } + if (regs->acrs[S390_lowcore.exc_access_id] == 1) + { + /* user space address */ + break; + } + } + die("page fault via unknown access register", regs, error_code); + break; + + case 2: /* Secondary Segment Table Descriptor */ + case 3: /* Home Segment Table Descriptor */ + /* user space address */ + break; + } + + + /* + * When we get here, the fault happened in the current + * task's user address space, so we search the VMAs + */ + + down(&mm->mmap_sem); + + vma = find_vma(mm, address); + if (!vma) { + printk("no vma for address %lX\n",address); + goto bad_area; + } + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; +/* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + write = 0; + si_code = SEGV_ACCERR; + + switch (error_code & 0xFF) { + case 0x04: /* write, present*/ + write = 1; + break; + case 0x10: /* not present*/ + case 0x11: /* not present*/ + case 0x3B: /* not present*/ + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) + goto bad_area; + break; + default: + printk("code should be 4, 10 or 11 (%lX) \n",error_code&0xFF); + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + switch (handle_mm_fault(mm, vma, address, write)) { + case 1: + tsk->min_flt++; + break; + case 2: + tsk->maj_flt++; + break; + case 0: + goto do_sigbus; + default: + goto out_of_memory; + } + + up(&mm->mmap_sem); + return; + +/* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + up(&mm->mmap_sem); + + /* User mode accesses just cause a SIGSEGV */ + if (psw_mask & PSW_PROBLEM_STATE) { + struct siginfo si; + tsk->thread.prot_addr = address; + tsk->thread.trap_no = error_code; +#ifndef CONFIG_SYSCTL +#ifdef CONFIG_PROCESS_DEBUG + printk("User process fault: interruption code 0x%lX\n",error_code); + printk("failing address: %lX\n",address); + show_regs(regs); +#endif +#else + if (sysctl_userprocess_debug) { + printk("User process fault: interruption code 0x%lX\n", + error_code); + printk("failing address: %lX\n", address); + show_regs(regs); + } +#endif + si.si_signo = SIGSEGV; + si.si_code = si_code; + si.si_addr = (void*) address; + force_sig_info(SIGSEGV, &si, tsk); + return; + } + +no_context: + /* Are we prepared to handle this kernel fault? */ + if ((fixup = search_exception_table(regs->psw.addr)) != 0) { + regs->psw.addr = fixup; + return; + } + +/* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + if (kernel_address) + printk(KERN_ALERT "Unable to handle kernel pointer dereference" + " at virtual kernel address %016lx\n", address); + else + printk(KERN_ALERT "Unable to handle kernel paging request" + " at virtual user address %016lx\n", address); + +/* + * need to define, which information is useful here + */ + + die("Oops", regs, error_code); + do_exit(SIGKILL); + + +/* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. +*/ +out_of_memory: + up(&mm->mmap_sem); + printk("VM: killing process %s\n", tsk->comm); + if (psw_mask & PSW_PROBLEM_STATE) + do_exit(SIGKILL); + goto no_context; + +do_sigbus: + up(&mm->mmap_sem); + + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + tsk->thread.prot_addr = address; + tsk->thread.trap_no = error_code; + force_sig(SIGBUS, tsk); + + /* Kernel mode? Handle exceptions or die */ + if (!(psw_mask & PSW_PROBLEM_STATE)) + goto no_context; +} diff --git a/arch/s390x/mm/init.c b/arch/s390x/mm/init.c new file mode 100644 index 000000000..91d66ccfe --- /dev/null +++ b/arch/s390x/mm/init.c @@ -0,0 +1,405 @@ +/* + * arch/s390/mm/init.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Hartmut Penner (hpenner@de.ibm.com) + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_BLK_DEV_INITRD +#include +#endif +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +static unsigned long totalram_pages; + +/* + * empty_bad_page is the page that is used for page faults when linux + * is out-of-memory. Older versions of linux just did a + * do_exit(), but using this instead means there is less risk + * for a process dying in kernel mode, possibly leaving an inode + * unused etc.. + * + * empty_bad_pte_table is the accompanying page-table: it is initialized + * to point to BAD_PAGE entries. + * + * empty_bad_pmd_table is the accompanying segment table: it is initialized + * to point to empty_bad_pte_table page tables. + * + * ZERO_PAGE is a special page that is used for zero-initialized + * data and COW. + */ + +pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); +char empty_bad_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); +char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); +pmd_t empty_bad_pmd_table[PTRS_PER_PMD] __attribute__((__aligned__(PAGE_SIZE))); +pte_t empty_bad_pte_table[PTRS_PER_PTE] __attribute__((__aligned__(PAGE_SIZE))); + +static int test_access(unsigned long loc) +{ + static const int ssm_mask = 0x07000000L; + int rc, i; + + rc = 0; + for (i=0; i<2; i++) { + __asm__ __volatile__( + " slgr %0,%0\n" + " ssm %1\n" + " tprot 0(%2),0\n" + "0: jne 1f\n" + " lghi %0,1\n" + "1: ssm %3\n" + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 0b,1b\n" + ".previous" + : "+&d" (rc) : "i" (0), "a" (loc), "m" (ssm_mask) + : "cc"); + if (rc == 0) + break; + loc += 0x100000; + } + return rc; +} + +static pmd_t *get_bad_pmd_table(void) +{ + pmd_t v; + int i; + + pmd_set(&v, empty_bad_pte_table); + + for (i = 0; i < PTRS_PER_PMD; i++) + empty_bad_pmd_table[i] = v; + + return empty_bad_pmd_table; +} + +static pte_t *get_bad_pte_table(void) +{ + pte_t v; + int i; + + v = pte_mkdirty(mk_pte_phys(__pa(empty_bad_page), PAGE_SHARED)); + + for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++) + empty_bad_pte_table[i] = v; + + return empty_bad_pte_table; +} + +pmd_t * +get_pmd_slow(pgd_t *pgd, unsigned long offset) +{ + pmd_t *pmd; + int i; + + pmd = (pmd_t *) __get_free_pages(GFP_KERNEL,2); + if (pgd_none(*pgd)) { + if (pmd) { + for (i = 0; i < PTRS_PER_PMD; i++) + pmd_clear(pmd+i); + pgd_set(pgd, pmd); + return pmd + offset; + } + pmd = (pmd_t *) get_bad_pmd_table(); + pgd_set(pgd, pmd); + return NULL; + } + free_pages((unsigned long)pmd,2); + if (pgd_bad(*pgd)) + BUG(); + return (pmd_t *) pgd_page(*pgd) + offset; +} + +pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset) +{ + pte_t *pte; + int i; + + pte = (pte_t*) __get_free_page(GFP_KERNEL); + if (pmd_none(*pmd)) { + if (pte) { + for (i=0;i high) { + do { + if(pgd_quicklist) + free_pgd_slow(get_pgd_fast()), freed += 4; + if(pmd_quicklist) + free_pmd_slow(get_pmd_fast()), freed += 4; + if(pte_quicklist) + free_pte_slow(get_pte_fast()), freed++; + } while(pgtable_cache_size > low); + } + return freed; +} + +void show_mem(void) +{ + int i, total = 0,reserved = 0; + int shared = 0, cached = 0; + + printk("Mem-info:\n"); + show_free_areas(); + printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); + i = max_mapnr; + while (i-- > 0) { + total++; + if (PageReserved(mem_map+i)) + reserved++; + else if (PageSwapCache(mem_map+i)) + cached++; + else if (page_count(mem_map+i)) + shared += atomic_read(&mem_map[i].count) - 1; + } + printk("%d pages of RAM\n",total); + printk("%d reserved pages\n",reserved); + printk("%d pages shared\n",shared); + printk("%d pages swap cached\n",cached); + printk("%ld pages in page table cache\n",pgtable_cache_size); + show_buffers(); +} + +/* References to section boundaries */ + +extern unsigned long _text; +extern unsigned long _etext; +extern unsigned long _edata; +extern unsigned long __bss_start; +extern unsigned long _end; + +extern unsigned long __init_begin; +extern unsigned long __init_end; + +/* + * paging_init() sets up the page tables + */ + +unsigned long last_valid_pfn; + +void __init paging_init(void) +{ + pgd_t * pg_dir; + pmd_t * pm_dir; + pte_t * pt_dir; + pte_t pte; + int i,j,k; + unsigned long address=0; + unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | + _REGION_TABLE; + unsigned long end_mem = (unsigned long) __va(max_low_pfn*PAGE_SIZE); + static const int ssm_mask = 0x04000000L; + + unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; + unsigned long dma_pfn, high_pfn; + + dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; + high_pfn = max_low_pfn; + + if (dma_pfn > high_pfn) + zones_size[ZONE_DMA] = high_pfn; + else { + zones_size[ZONE_DMA] = dma_pfn; + zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; + } + + /* Initialize mem_map[]. */ + free_area_init(zones_size); + + + /* + * map whole physical memory to virtual memory (identity mapping) + */ + + pg_dir = swapper_pg_dir; + + for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) { + + if (address >= end_mem) { + pgd_clear(pg_dir); + continue; + } + + pm_dir = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE*4); + pgd_set(pg_dir,pm_dir); + + for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { + if (address >= end_mem) { + pmd_clear(pm_dir); + continue; + } + + pt_dir = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + pmd_set(pm_dir,pt_dir); + + for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { + pte = mk_pte_phys(address, PAGE_KERNEL); + if (address >= end_mem) { + pte_clear(&pte); + continue; + } + set_pte(pt_dir, pte); + address += PAGE_SIZE; + } + } + } + + /* enable virtual mapping in kernel mode */ + __asm__ __volatile__("lctlg 1,1,%0\n\t" + "lctlg 7,7,%0\n\t" + "lctlg 13,13,%0\n\t" + "ssm %1" + : :"m" (pgdir_k), "m" (ssm_mask)); + + local_flush_tlb(); + + return; +} + +void __init mem_init(void) +{ + unsigned long codesize, reservedpages, datasize, initsize; + unsigned long tmp; + + max_mapnr = num_physpages = max_low_pfn; + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); + + /* clear the zero-page */ + memset(empty_zero_page, 0, PAGE_SIZE); + + /* this will put all low memory onto the freelists */ + totalram_pages += free_all_bootmem(); + + /* mark usable pages in the mem_map[] and count reserved pages */ + reservedpages = 0; + tmp = 0; + do { + if (tmp && (tmp & 0x1ff) == 0 && + test_access(tmp * PAGE_SIZE) == 0) { + printk("2M Segment 0x%016lX not available\n", + tmp * PAGE_SIZE); + do { + set_bit(PG_reserved, &mem_map[tmp].flags); + reservedpages++; + tmp++; + } while (tmp < max_low_pfn && (tmp & 0x1ff)); + } else { + if (PageReserved(mem_map+tmp)) + reservedpages++; + tmp++; + } + } while (tmp < max_low_pfn); + + codesize = (unsigned long) &_etext - (unsigned long) &_text; + datasize = (unsigned long) &_edata - (unsigned long) &_etext; + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", + (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + max_mapnr << (PAGE_SHIFT-10), + codesize >> 10, + reservedpages << (PAGE_SHIFT-10), + datasize >>10, + initsize >> 10); +} + +void free_initmem(void) +{ + unsigned long addr; + + addr = (unsigned long)(&__init_begin); + for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); + free_page(addr); + totalram_pages++; + } + printk ("Freeing unused kernel memory: %ldk freed\n", + (&__init_end - &__init_begin) >> 10); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + if (start < end) + printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + for (; start < end; start += PAGE_SIZE) { + ClearPageReserved(virt_to_page(start)); + set_page_count(virt_to_page(start), 1); + free_page(start); + totalram_pages++; + } +} +#endif + +void si_meminfo(struct sysinfo *val) +{ + val->totalram = totalram_pages; + val->sharedram = 0; + val->freeram = nr_free_pages(); + val->bufferram = atomic_read(&buffermem_pages); + val->totalhigh = 0; + val->freehigh = 0; + val->mem_unit = PAGE_SIZE; +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-indent-level: 4 + * c-brace-imaginary-offset: 0 + * c-brace-offset: -4 + * c-argdecl-indent: 4 + * c-label-offset: -4 + * c-continued-statement-offset: 4 + * c-continued-brace-offset: 0 + * indent-tabs-mode: nil + * tab-width: 8 + * End: + */ diff --git a/arch/s390x/mm/ioremap.c b/arch/s390x/mm/ioremap.c new file mode 100644 index 000000000..38acc4a22 --- /dev/null +++ b/arch/s390x/mm/ioremap.c @@ -0,0 +1,129 @@ +/* + * arch/s390/mm/ioremap.c + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Hartmut Penner (hp@de.ibm.com) + * + * Derived from "arch/i386/mm/extable.c" + * (C) Copyright 1995 1996 Linus Torvalds + * + * Re-map IO memory to kernel address space so that we can access it. + * This is needed for high PCI addresses that aren't mapped in the + * 640k-1MB IO memory area on PC's + */ + +#include +#include +#include + +static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, + unsigned long phys_addr, unsigned long flags) +{ + unsigned long end; + + address &= ~PMD_MASK; + end = address + size; + if (end > PMD_SIZE) + end = PMD_SIZE; + if (address >= end) + BUG(); + do { + if (!pte_none(*pte)) { + printk("remap_area_pte: page already exists\n"); + BUG(); + } + set_pte(pte, mk_pte_phys(phys_addr, + __pgprot(_PAGE_PRESENT | flags))); + address += PAGE_SIZE; + phys_addr += PAGE_SIZE; + pte++; + } while (address && (address < end)); +} + +static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, + unsigned long phys_addr, unsigned long flags) +{ + unsigned long end; + + address &= ~PGDIR_MASK; + end = address + size; + if (end > PGDIR_SIZE) + end = PGDIR_SIZE; + phys_addr -= address; + if (address >= end) + BUG(); + do { + pte_t * pte = pte_alloc_kernel(pmd, address); + if (!pte) + return -ENOMEM; + remap_area_pte(pte, address, end - address, address + phys_addr, flags); + address = (address + PMD_SIZE) & PMD_MASK; + pmd++; + } while (address && (address < end)); + return 0; +} + +static int remap_area_pages(unsigned long address, unsigned long phys_addr, + unsigned long size, unsigned long flags) +{ + pgd_t * dir; + unsigned long end = address + size; + + phys_addr -= address; + dir = pgd_offset(&init_mm, address); + flush_cache_all(); + if (address >= end) + BUG(); + do { + pmd_t *pmd = pmd_alloc_kernel(dir, address); + if (!pmd) + return -ENOMEM; + if (remap_area_pmd(pmd, address, end - address, + phys_addr + address, flags)) + return -ENOMEM; + set_pgdir(address, *dir); + address = (address + PGDIR_SIZE) & PGDIR_MASK; + dir++; + } while (address && (address < end)); + flush_tlb_all(); + return 0; +} + +/* + * Generic mapping function (not visible outside): + */ + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + */ +void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) +{ + void * addr; + struct vm_struct * area; + + if (phys_addr < virt_to_phys(high_memory)) + return phys_to_virt(phys_addr); + if (phys_addr & ~PAGE_MASK) + return NULL; + size = PAGE_ALIGN(size); + if (!size || size > phys_addr + size) + return NULL; + area = get_vm_area(size, VM_IOREMAP); + if (!area) + return NULL; + addr = area->addr; + if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { + vfree(addr); + return NULL; + } + return addr; +} + +void iounmap(void *addr) +{ + if (addr > high_memory) + return vfree(addr); +} diff --git a/arch/s390x/tools/dasdfmt/Makefile b/arch/s390x/tools/dasdfmt/Makefile new file mode 100644 index 000000000..b60641bbc --- /dev/null +++ b/arch/s390x/tools/dasdfmt/Makefile @@ -0,0 +1,9 @@ +all: dasdfmt + +dasdfmt: dasdfmt.c + $(CC) -o $@ $^ + $(STRIP) $@ + +clean: + rm -f dasdfmt + diff --git a/arch/s390x/tools/dasdfmt/dasdfmt.8 b/arch/s390x/tools/dasdfmt/dasdfmt.8 new file mode 100644 index 000000000..9e6a4e89e --- /dev/null +++ b/arch/s390x/tools/dasdfmt/dasdfmt.8 @@ -0,0 +1,68 @@ +.TH DASDFMT 8 "Tue Jan 25 2000" +.UC 4 +.SH NAME +dasdfmt \- formatting of DSAD (ECKD) disk drives. +.SH SYNOPSIS +\fBdasdfmt\fR [-tvyLV] [-b \fIblockSize\fR] [-l \fIdiskLabel\fR] \fIdiskSpec\fR +.SH DESCRIPTION +\fBdasdfmt\fR formats a DASD (ECKD) disk drive to prepare it +for usage with Linux for S/390. \fBWARNING\fR: Incautious usage of +\fBdasdfmt\fR can result in \fBLOSS OF DATA\fR. + +.SH OPTIONS +.TP +\fB-t\fR +Disables any modification of the disk drive. \fBdasdfmt\fR just prints +out, what it \fBwould\fR do. + +.TP +\fB-v\fR +Increases verbosity. + +.TP +\fB-y\fR +Start formatting without further user-confirmation. + +.TP +\fB-L\fR +Omit the writing of a disk label after formatting. + +.TP +\fB-V\fR +Print version number and exit. + +.TP +\fB-b\fR \fIblockSize\fR +Specify blocksize to be used. \fIblocksize\fR must be a positive integer +and always be a power of two. Due due some limitations in the driver, +it is \fBstrongly\fR recommended to use a \fIblockSize\fR of \fI4096\fR. + +.TP +\fB-l\fR \fIdiskLabel\fR +Specify the label to be written to disk after formatting. If no label is +specified, a sensible default is used. \fIdiskLabel\fR is interpreted as +ASCII string and is automatically converted to EBCDIC. + +.TP +\fIdiskSpec\fR +This parameter specified the device to be formatted. It also can be +given in two variants: +.sp + \fB-f\fR \fB/dev/dasd\fR\fIX\fR +.br +or +.br + \fB-n\fR \fIdevnum\fR +.sp +The first form uses the commonly used +.SM UNIX +device notation where \fIX\fR is a single lowercase letter. +The second form uses simply the device number. + +.SH BUGS +None so far ;-) + +.SH AUTHOR +.nf +This man-page was written by Fritz Elfert +.fi diff --git a/arch/s390x/tools/dasdfmt/dasdfmt.c b/arch/s390x/tools/dasdfmt/dasdfmt.c new file mode 100644 index 000000000..2820fc91d --- /dev/null +++ b/arch/s390x/tools/dasdfmt/dasdfmt.c @@ -0,0 +1,830 @@ +/* + * + * dasdfmt.c + * + * S390 version + * Copyright (C) 1999,2000 IBM Corporation + * Author(s): Utz Bacher, + * + * Device-in-use-checks by Fritz Elfert, + * + * Still to do: + * detect non-switch parameters ("dasdfmt -n 170 XY") and complain about them + */ + +/* #define _LINUX_BLKDEV_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define __KERNEL__ /* we want to use kdev_t and not have to define it */ +#include +#undef __KERNEL__ + +#include +#include +#include + +#define EXIT_MISUSE 1 +#define EXIT_BUSY 2 +#define TEMPFILENAME "/tmp/ddfXXXXXX" +#define TEMPFILENAMECHARS 8 /* 8 characters are fixed in all temp filenames */ +#define SLASHDEV "/dev/" +#define PROC_DASD_DEVICES "/proc/dasd/devices" +/* _PATH_MOUNTED is /etc/mtab - /proc/mounts does not show root-fs correctly */ +#define PROC_MOUNTS _PATH_MOUNTED +#define PROC_SWAPS "/proc/swaps" +#define DASD_DRIVER_NAME "dasd" +#define LABEL_LENGTH 10 +#define PROC_LINE_LENGTH 80 +#define ERR_LENGTH 80 + +#define MAX_FILELEN NAME_MAX+PATH_MAX + +#define GIVEN_DEVNO 1 +#define GIVEN_MAJOR 2 +#define GIVEN_MINOR 4 + +#define CHECK_START 1 +#define CHECK_END 2 +#define CHECK_BLKSIZE 4 +#define CHECK_ALL ~0 + +#define ERRMSG(x...) {fflush(stdout);fprintf(stderr,x);} +#define ERRMSG_EXIT(ec,x...) {fflush(stdout);fprintf(stderr,x);exit(ec);} + +#define CHECK_SPEC_MAX_ONCE(i,str) \ + {if (i>1) \ + ERRMSG_EXIT(EXIT_MISUSE,"%s: " str " " \ + "can only be specified once\n",prog_name);} + +#define PARSE_PARAM_INTO(x,param,base,str) \ + {x=(int)strtol(param,&endptr,base); \ + if (*endptr) \ + ERRMSG_EXIT(EXIT_MISUSE,"%s: " str " " \ + "is in invalid format\n",prog_name);} + +char *prog_name;/*="dasdfmt";*/ +char tempfilename[]=TEMPFILENAME; + +__u8 _ascebc[256] = +{ + /*00 NUL SOH STX ETX EOT ENQ ACK BEL */ + 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F, + /*08 BS HT LF VT FF CR SO SI */ + /* ->NL */ + 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + /*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ + 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26, + /*18 CAN EM SUB ESC FS GS RS US */ + /* ->IGS ->IRS ->IUS */ + 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F, + /*20 SP ! " # $ % & ' */ + 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D, + /*28 ( ) * + , - . / */ + 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61, + /*30 0 1 2 3 4 5 6 7 */ + 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, + /*38 8 9 : ; < = > ? */ + 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F, + /*40 @ A B C D E F G */ + 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, + /*48 H I J K L M N O */ + 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, + /*50 P Q R S T U V W */ + 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, + /*58 X Y Z [ \ ] ^ _ */ + 0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D, + /*60 ` a b c d e f g */ + 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + /*68 h i j k l m n o */ + 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, + /*70 p q r s t u v w */ + 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, + /*78 x y z { | } ~ DL */ + 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07, + /*80*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*88*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*90*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*98*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*A0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*A8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*B0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*B8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*C0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*C8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*D0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*D8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*E0 sz */ + 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*E8*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*F0*/ + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + /*F8*/ + 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF +}; + +void convert_label(char *str) +{ + int i; + for (i=0;i] [-b ] [] " \ + "\n\n",prog_name); +#else /* RANGE_FORMATTING */ + printf("Usage: %s [-htvyLV] [-l