summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2001-01-31 22:22:27 +0000
committerRalf Baechle <ralf@linux-mips.org>2001-01-31 22:22:27 +0000
commit825423e4c4f18289df2393951cfd2a7a31fc0464 (patch)
tree4ad80e981c3d9effa910d2247d118d254f9a5d09
parentc4693dc4856ab907a5c02187a8d398861bebfc7e (diff)
Merge with Linux 2.4.1.
-rw-r--r--CREDITS9
-rw-r--r--Documentation/Changes8
-rw-r--r--Documentation/Configure.help166
-rw-r--r--Documentation/cachetlb.txt28
-rw-r--r--Documentation/usb/hotplug.txt19
-rw-r--r--Documentation/usb/usb-serial.txt2
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile8
-rw-r--r--arch/alpha/kernel/Makefile8
-rw-r--r--arch/alpha/kernel/osf_sys.c1
-rw-r--r--arch/alpha/kernel/signal.c1
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c2
-rw-r--r--arch/arm/kernel/signal.c2
-rw-r--r--arch/i386/Makefile2
-rw-r--r--arch/i386/boot/bootsect.S13
-rw-r--r--arch/i386/config.in14
-rw-r--r--arch/i386/defconfig17
-rw-r--r--arch/i386/kernel/i387.c71
-rw-r--r--arch/i386/kernel/io_apic.c5
-rw-r--r--arch/i386/kernel/pci-irq.c113
-rw-r--r--arch/i386/kernel/setup.c24
-rw-r--r--arch/i386/kernel/signal.c2
-rw-r--r--arch/i386/kernel/traps.c1
-rw-r--r--arch/i386/lib/mmx.c34
-rw-r--r--arch/i386/mm/init.c12
-rw-r--r--arch/ia64/ia32/sys_ia32.c3
-rw-r--r--arch/m68k/kernel/signal.c2
-rw-r--r--arch/mips/defconfig3
-rw-r--r--arch/mips/defconfig-atlas3
-rw-r--r--arch/mips/defconfig-cp70003
-rw-r--r--arch/mips/defconfig-ddb54763
-rw-r--r--arch/mips/defconfig-decstation3
-rw-r--r--arch/mips/defconfig-ev641203
-rw-r--r--arch/mips/defconfig-ev961003
-rw-r--r--arch/mips/defconfig-ip223
-rw-r--r--arch/mips/defconfig-malta3
-rw-r--r--arch/mips/defconfig-rm2003
-rw-r--r--arch/mips/kernel/irixsig.c2
-rw-r--r--arch/mips/kernel/signal.c2
-rw-r--r--arch/mips64/defconfig3
-rw-r--r--arch/mips64/defconfig-ip223
-rw-r--r--arch/mips64/defconfig-ip273
-rw-r--r--arch/mips64/kernel/linux32.c3
-rw-r--r--arch/mips64/kernel/signal.c2
-rw-r--r--arch/mips64/kernel/signal32.c2
-rw-r--r--arch/ppc/8260_io/enet.c3
-rw-r--r--arch/ppc/8260_io/fcc_enet.c3
-rw-r--r--arch/ppc/8xx_io/enet.c4
-rw-r--r--arch/ppc/8xx_io/fec.c3
-rw-r--r--arch/ppc/Makefile3
-rw-r--r--arch/ppc/boot/Makefile28
-rw-r--r--arch/ppc/boot/vreset.c10
-rw-r--r--arch/ppc/chrpboot/Makefile10
-rw-r--r--arch/ppc/coffboot/Makefile7
-rw-r--r--arch/ppc/coffboot/coffmain.c62
-rw-r--r--arch/ppc/config.in108
-rw-r--r--arch/ppc/configs/apus_defconfig273
-rw-r--r--arch/ppc/configs/bseip_defconfig9
-rw-r--r--arch/ppc/configs/common_defconfig203
-rw-r--r--arch/ppc/configs/est8260_defconfig23
-rw-r--r--arch/ppc/configs/ibmchrp_defconfig636
-rw-r--r--arch/ppc/configs/mbx_defconfig9
-rw-r--r--arch/ppc/configs/oak_defconfig11
-rw-r--r--arch/ppc/configs/power3_defconfig662
-rw-r--r--arch/ppc/configs/rpxcllf_defconfig9
-rw-r--r--arch/ppc/configs/rpxlite_defconfig9
-rw-r--r--arch/ppc/configs/walnut_defconfig11
-rw-r--r--arch/ppc/defconfig203
-rw-r--r--arch/ppc/kernel/Makefile6
-rw-r--r--arch/ppc/kernel/apus_setup.c22
-rw-r--r--arch/ppc/kernel/chrp_pci.c630
-rw-r--r--arch/ppc/kernel/chrp_setup.c130
-rw-r--r--arch/ppc/kernel/entry.S17
-rw-r--r--arch/ppc/kernel/error_log.c183
-rw-r--r--arch/ppc/kernel/error_log.h95
-rw-r--r--arch/ppc/kernel/feature.c108
-rw-r--r--arch/ppc/kernel/galaxy_pci.c69
-rw-r--r--arch/ppc/kernel/hashtable.S53
-rw-r--r--arch/ppc/kernel/head.S97
-rw-r--r--arch/ppc/kernel/i8259.c33
-rw-r--r--arch/ppc/kernel/idle.c79
-rw-r--r--arch/ppc/kernel/indirect_pci.c148
-rw-r--r--arch/ppc/kernel/irq.c409
-rw-r--r--arch/ppc/kernel/local_irq.h3
-rw-r--r--arch/ppc/kernel/m8260_setup.c43
-rw-r--r--arch/ppc/kernel/m8xx_setup.c32
-rw-r--r--arch/ppc/kernel/misc.S220
-rw-r--r--arch/ppc/kernel/open_pic.c704
-rw-r--r--arch/ppc/kernel/open_pic.h42
-rw-r--r--arch/ppc/kernel/open_pic_defs.h328
-rw-r--r--arch/ppc/kernel/pci.c599
-rw-r--r--arch/ppc/kernel/pci.h46
-rw-r--r--arch/ppc/kernel/pmac_pci.c916
-rw-r--r--arch/ppc/kernel/pmac_pic.c148
-rw-r--r--arch/ppc/kernel/pmac_setup.c170
-rw-r--r--arch/ppc/kernel/pmac_time.c4
-rw-r--r--arch/ppc/kernel/ppc_asm.h6
-rw-r--r--arch/ppc/kernel/ppc_htab.c3
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c46
-rw-r--r--arch/ppc/kernel/prep_nvram.c34
-rw-r--r--arch/ppc/kernel/prep_pci.c336
-rw-r--r--arch/ppc/kernel/prep_setup.c186
-rw-r--r--arch/ppc/kernel/proc_rtas.c784
-rw-r--r--arch/ppc/kernel/process.c17
-rw-r--r--arch/ppc/kernel/prom.c206
-rw-r--r--arch/ppc/kernel/setup.c51
-rw-r--r--arch/ppc/kernel/signal.c2
-rw-r--r--arch/ppc/kernel/smp.c1043
-rw-r--r--arch/ppc/kernel/time.c44
-rw-r--r--arch/ppc/kernel/traps.c36
-rw-r--r--arch/ppc/lib/Makefile2
-rw-r--r--arch/ppc/lib/locks.c5
-rw-r--r--arch/ppc/mm/fault.c19
-rw-r--r--arch/ppc/mm/init.c415
-rw-r--r--arch/ppc/xmon/start.c64
-rw-r--r--arch/ppc/xmon/xmon.c3
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/sh/boot/compressed/head.S2
-rw-r--r--arch/sh/kernel/entry.S738
-rw-r--r--arch/sh/kernel/fpu.c212
-rw-r--r--arch/sh/kernel/head.S44
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/kernel/irq_imask.c8
-rw-r--r--arch/sh/kernel/process.c32
-rw-r--r--arch/sh/kernel/setup.c4
-rw-r--r--arch/sh/kernel/sh_bios.c18
-rw-r--r--arch/sh/kernel/sh_ksyms.c5
-rw-r--r--arch/sh/kernel/signal.c6
-rw-r--r--arch/sh/kernel/traps.c446
-rw-r--r--arch/sh/lib/checksum.S157
-rw-r--r--arch/sh/lib/delay.c6
-rw-r--r--arch/sh/mm/cache.c16
-rw-r--r--arch/sparc/config.in5
-rw-r--r--arch/sparc/defconfig29
-rw-r--r--arch/sparc/kernel/entry.S3
-rw-r--r--arch/sparc/kernel/pcic.c89
-rw-r--r--arch/sparc/kernel/semaphore.c2
-rw-r--r--arch/sparc/kernel/signal.c5
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c3
-rw-r--r--arch/sparc/kernel/sys_sunos.c3
-rw-r--r--arch/sparc/kernel/time.c29
-rw-r--r--arch/sparc64/config.in6
-rw-r--r--arch/sparc64/defconfig12
-rw-r--r--arch/sparc64/kernel/ioctl32.c13
-rw-r--r--arch/sparc64/kernel/pci.c9
-rw-r--r--arch/sparc64/kernel/pci_iommu.c20
-rw-r--r--arch/sparc64/kernel/pci_psycho.c54
-rw-r--r--arch/sparc64/kernel/pci_sabre.c60
-rw-r--r--arch/sparc64/kernel/pci_schizo.c169
-rw-r--r--arch/sparc64/kernel/signal.c5
-rw-r--r--arch/sparc64/kernel/signal32.c5
-rw-r--r--arch/sparc64/kernel/smp.c4
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c3
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c5
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c3
-rw-r--r--arch/sparc64/kernel/time.c39
-rw-r--r--drivers/acpi/Makefile14
-rw-r--r--drivers/acpi/acpi_ksyms.c83
-rw-r--r--drivers/acpi/cmbatt.c26
-rw-r--r--drivers/acpi/common/cmalloc.c8
-rw-r--r--drivers/acpi/common/cmclib.c9
-rw-r--r--drivers/acpi/common/cmcopy.c12
-rw-r--r--drivers/acpi/common/cmdebug.c6
-rw-r--r--drivers/acpi/common/cmdelete.c4
-rw-r--r--drivers/acpi/common/cmeval.c24
-rw-r--r--drivers/acpi/common/cmglobal.c13
-rw-r--r--drivers/acpi/common/cminit.c49
-rw-r--r--drivers/acpi/common/cmobject.c6
-rw-r--r--drivers/acpi/common/cmutils.c16
-rw-r--r--drivers/acpi/common/cmxface.c18
-rw-r--r--drivers/acpi/cpu.c51
-rw-r--r--drivers/acpi/dispatcher/dsfield.c4
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c4
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c47
-rw-r--r--drivers/acpi/dispatcher/dsobject.c20
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c12
-rw-r--r--drivers/acpi/dispatcher/dsutils.c6
-rw-r--r--drivers/acpi/dispatcher/dswexec.c16
-rw-r--r--drivers/acpi/dispatcher/dswload.c4
-rw-r--r--drivers/acpi/dispatcher/dswscope.c4
-rw-r--r--drivers/acpi/dispatcher/dswstate.c59
-rw-r--r--drivers/acpi/driver.c28
-rw-r--r--drivers/acpi/ec.c29
-rw-r--r--drivers/acpi/events/evevent.c4
-rw-r--r--drivers/acpi/events/evmisc.c4
-rw-r--r--drivers/acpi/events/evregion.c24
-rw-r--r--drivers/acpi/events/evrgnini.c4
-rw-r--r--drivers/acpi/events/evsci.c4
-rw-r--r--drivers/acpi/events/evxface.c244
-rw-r--r--drivers/acpi/events/evxfevnt.c4
-rw-r--r--drivers/acpi/events/evxfregn.c4
-rw-r--r--drivers/acpi/hardware/hwacpi.c52
-rw-r--r--drivers/acpi/hardware/hwgpe.c5
-rw-r--r--drivers/acpi/hardware/hwregs.c57
-rw-r--r--drivers/acpi/hardware/hwsleep.c189
-rw-r--r--drivers/acpi/hardware/hwtimer.c196
-rw-r--r--drivers/acpi/include/accommon.h15
-rw-r--r--drivers/acpi/include/acconfig.h10
-rw-r--r--drivers/acpi/include/acdebug.h4
-rw-r--r--drivers/acpi/include/acdispat.h4
-rw-r--r--drivers/acpi/include/acenv.h30
-rw-r--r--drivers/acpi/include/acevents.h4
-rw-r--r--drivers/acpi/include/acexcep.h10
-rw-r--r--drivers/acpi/include/acgcc.h4
-rw-r--r--drivers/acpi/include/acglobal.h8
-rw-r--r--drivers/acpi/include/achware.h98
-rw-r--r--drivers/acpi/include/acinterp.h93
-rw-r--r--drivers/acpi/include/aclinux.h4
-rw-r--r--drivers/acpi/include/aclocal.h130
-rw-r--r--drivers/acpi/include/acmacros.h29
-rw-r--r--drivers/acpi/include/acnamesp.h4
-rw-r--r--drivers/acpi/include/acobject.h14
-rw-r--r--drivers/acpi/include/acoutput.h5
-rw-r--r--drivers/acpi/include/acparser.h4
-rw-r--r--drivers/acpi/include/acpi.h4
-rw-r--r--drivers/acpi/include/acpiosxf.h6
-rw-r--r--drivers/acpi/include/acpixf.h6
-rw-r--r--drivers/acpi/include/acresrc.h4
-rw-r--r--drivers/acpi/include/actables.h4
-rw-r--r--drivers/acpi/include/actbl.h4
-rw-r--r--drivers/acpi/include/actbl1.h4
-rw-r--r--drivers/acpi/include/actbl2.h8
-rw-r--r--drivers/acpi/include/actbl71.h4
-rw-r--r--drivers/acpi/include/actypes.h44
-rw-r--r--drivers/acpi/include/amlcode.h49
-rw-r--r--drivers/acpi/interpreter/amconfig.c10
-rw-r--r--drivers/acpi/interpreter/amconvrt.c525
-rw-r--r--drivers/acpi/interpreter/amcreate.c6
-rw-r--r--drivers/acpi/interpreter/amdyadic.c369
-rw-r--r--drivers/acpi/interpreter/amfield.c9
-rw-r--r--drivers/acpi/interpreter/amfldio.c84
-rw-r--r--drivers/acpi/interpreter/ammisc.c70
-rw-r--r--drivers/acpi/interpreter/ammonad.c68
-rw-r--r--drivers/acpi/interpreter/amnames.c4
-rw-r--r--drivers/acpi/interpreter/amprep.c4
-rw-r--r--drivers/acpi/interpreter/amregion.c10
-rw-r--r--drivers/acpi/interpreter/amresnte.c26
-rw-r--r--drivers/acpi/interpreter/amresolv.c30
-rw-r--r--drivers/acpi/interpreter/amresop.c89
-rw-r--r--drivers/acpi/interpreter/amstore.c455
-rw-r--r--drivers/acpi/interpreter/amstoren.c490
-rw-r--r--drivers/acpi/interpreter/amstorob.c524
-rw-r--r--drivers/acpi/interpreter/amsystem.c8
-rw-r--r--drivers/acpi/interpreter/amutils.c21
-rw-r--r--drivers/acpi/interpreter/amxface.c4
-rw-r--r--drivers/acpi/namespace/nsaccess.c16
-rw-r--r--drivers/acpi/namespace/nsalloc.c4
-rw-r--r--drivers/acpi/namespace/nseval.c4
-rw-r--r--drivers/acpi/namespace/nsinit.c15
-rw-r--r--drivers/acpi/namespace/nsload.c4
-rw-r--r--drivers/acpi/namespace/nsnames.c4
-rw-r--r--drivers/acpi/namespace/nsobject.c6
-rw-r--r--drivers/acpi/namespace/nssearch.c6
-rw-r--r--drivers/acpi/namespace/nsutils.c4
-rw-r--r--drivers/acpi/namespace/nswalk.c4
-rw-r--r--drivers/acpi/namespace/nsxfname.c4
-rw-r--r--drivers/acpi/namespace/nsxfobj.c5
-rw-r--r--drivers/acpi/os.c2
-rw-r--r--drivers/acpi/parser/psargs.c4
-rw-r--r--drivers/acpi/parser/psopcode.c171
-rw-r--r--drivers/acpi/parser/psparse.c10
-rw-r--r--drivers/acpi/parser/psscope.c4
-rw-r--r--drivers/acpi/parser/pstree.c4
-rw-r--r--drivers/acpi/parser/psutils.c4
-rw-r--r--drivers/acpi/parser/pswalk.c4
-rw-r--r--drivers/acpi/parser/psxface.c10
-rw-r--r--drivers/acpi/power.c4
-rw-r--r--drivers/acpi/resources/rsaddr.c4
-rw-r--r--drivers/acpi/resources/rscalc.c4
-rw-r--r--drivers/acpi/resources/rscreate.c18
-rw-r--r--drivers/acpi/resources/rsdump.c4
-rw-r--r--drivers/acpi/resources/rsio.c4
-rw-r--r--drivers/acpi/resources/rsirq.c4
-rw-r--r--drivers/acpi/resources/rslist.c4
-rw-r--r--drivers/acpi/resources/rsmemory.c4
-rw-r--r--drivers/acpi/resources/rsmisc.c4
-rw-r--r--drivers/acpi/resources/rsutils.c4
-rw-r--r--drivers/acpi/resources/rsxface.c4
-rw-r--r--drivers/acpi/sys.c70
-rw-r--r--drivers/acpi/table.c21
-rw-r--r--drivers/acpi/tables/tbconvrt.c21
-rw-r--r--drivers/acpi/tables/tbget.c16
-rw-r--r--drivers/acpi/tables/tbinstal.c4
-rw-r--r--drivers/acpi/tables/tbutils.c4
-rw-r--r--drivers/acpi/tables/tbxface.c4
-rw-r--r--drivers/acpi/tables/tbxfroot.c4
-rw-r--r--drivers/atm/Makefile2
-rw-r--r--drivers/block/DAC960.c8
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/cpqarray.c158
-rw-r--r--drivers/block/elevator.c130
-rw-r--r--drivers/block/ll_rw_blk.c335
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c2
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/char/agp/agpgart_be.c11
-rw-r--r--drivers/char/drm/Config.in3
-rw-r--r--drivers/char/drm/Makefile17
-rw-r--r--drivers/char/drm/drm.h187
-rw-r--r--drivers/char/drm/radeon_bufs.c298
-rw-r--r--drivers/char/drm/radeon_context.c215
-rw-r--r--drivers/char/drm/radeon_cp.c1314
-rw-r--r--drivers/char/drm/radeon_drm.h325
-rw-r--r--drivers/char/drm/radeon_drv.c702
-rw-r--r--drivers/char/drm/radeon_drv.h709
-rw-r--r--drivers/char/drm/radeon_state.c1447
-rw-r--r--drivers/char/misc.c4
-rw-r--r--drivers/char/n_tty.c36
-rw-r--r--drivers/i2o/i2o_block.c7
-rw-r--r--drivers/ide/hpt366.c9
-rw-r--r--drivers/ide/ide-cd.c37
-rw-r--r--drivers/ide/ide-dma.c8
-rw-r--r--drivers/ide/ide-probe.c7
-rw-r--r--drivers/ide/via82cxxx.c2
-rw-r--r--drivers/isdn/hisax/Makefile4
-rw-r--r--drivers/isdn/hisax/config.c4
-rw-r--r--drivers/isdn/hisax/isdnl3.c2
-rw-r--r--drivers/isdn/hisax/md5sums.asc12
-rw-r--r--drivers/isdn/isdn_common.c5
-rw-r--r--drivers/isdn/isdn_net.c1
-rw-r--r--drivers/isdn/isdn_ppp.c14
-rw-r--r--drivers/isdn/isdn_v110.c4
-rw-r--r--drivers/md/Config.in5
-rw-r--r--drivers/md/lvm-snap.c32
-rw-r--r--drivers/md/lvm-snap.h47
-rw-r--r--drivers/md/lvm.c632
-rw-r--r--drivers/md/md.c112
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/md/xor.c3
-rw-r--r--drivers/net/3c59x.c108
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/depca.c4
-rw-r--r--drivers/net/dmfe.c4
-rw-r--r--drivers/net/eepro100.c15
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/hamradio/scc.c4
-rw-r--r--drivers/net/myri_sbus.c10
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/ppp_async.c89
-rw-r--r--drivers/net/pppoe.c40
-rw-r--r--drivers/net/sunbmac.c61
-rw-r--r--drivers/net/tulip/ChangeLog36
-rw-r--r--drivers/net/tulip/eeprom.c12
-rw-r--r--drivers/net/tulip/media.c6
-rw-r--r--drivers/net/tulip/tulip_core.c2
-rw-r--r--drivers/net/wan/lapbether.c172
-rw-r--r--drivers/net/wan/lmc/lmc.h3
-rw-r--r--drivers/net/wan/lmc/lmc_media.h3
-rw-r--r--drivers/net/wan/lmc/lmc_prot.h3
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h3
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/pnp/isapnp_proc.c17
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/sbus/audio/Config.in6
-rw-r--r--drivers/sbus/audio/amd7930.c6
-rw-r--r--drivers/sbus/audio/dbri.c6
-rw-r--r--drivers/sbus/char/Makefile1
-rw-r--r--drivers/sbus/char/bpp.c34
-rw-r--r--drivers/sbus/char/cpwatchdog.c838
-rw-r--r--drivers/sbus/char/flash.c17
-rw-r--r--drivers/sbus/char/jsflash.c2
-rw-r--r--drivers/sbus/char/pcikbd.c19
-rw-r--r--drivers/sbus/char/rtc.c50
-rw-r--r--drivers/sbus/char/sunkbd.c11
-rw-r--r--drivers/sbus/char/sunmouse.c11
-rw-r--r--drivers/sbus/char/vfc_dev.c28
-rw-r--r--drivers/sbus/sbus.c14
-rw-r--r--drivers/scsi/constants.c2
-rw-r--r--drivers/scsi/ibmmca.c9
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/scsi_lib.c191
-rw-r--r--drivers/scsi/scsi_merge.c12
-rw-r--r--drivers/scsi/sg.c1
-rw-r--r--drivers/scsi/sr.c10
-rw-r--r--drivers/sound/Config.in6
-rw-r--r--drivers/sound/Makefile4
-rw-r--r--drivers/sound/emu10k1/audio.c33
-rw-r--r--drivers/sound/trix.c1
-rw-r--r--drivers/sound/via82cxxx_audio.c707
-rw-r--r--drivers/sound/ymfpci.c998
-rw-r--r--drivers/sound/ymfpci.h116
-rw-r--r--drivers/usb/rio500.c3
-rw-r--r--drivers/usb/serial/Config.in1
-rw-r--r--drivers/usb/serial/usbserial.c23
-rw-r--r--drivers/usb/serial/visor.c7
-rw-r--r--drivers/usb/storage/debug.h2
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h232
-rw-r--r--drivers/usb/storage/usb.c425
-rw-r--r--drivers/usb/storage/usb.h9
-rw-r--r--drivers/video/sbusfb.c6
-rw-r--r--drivers/video/vfb.c2
-rw-r--r--fs/Config.in2
-rw-r--r--fs/Makefile1
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/coda/cnode.c62
-rw-r--r--fs/exec.c9
-rw-r--r--fs/fat/cache.c29
-rw-r--r--fs/hpfs/inode.c1
-rw-r--r--fs/inode.c22
-rw-r--r--fs/ncpfs/dir.c6
-rw-r--r--fs/ncpfs/sock.c5
-rw-r--r--fs/nfs/flushd.c56
-rw-r--r--fs/partitions/msdos.c6
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/reiserfs/.cvsignore2
-rw-r--r--fs/reiserfs/Makefile20
-rw-r--r--fs/reiserfs/README157
-rw-r--r--fs/reiserfs/bitmap.c679
-rw-r--r--fs/reiserfs/buffer2.c358
-rw-r--r--fs/reiserfs/dir.c249
-rw-r--r--fs/reiserfs/do_balan.c2043
-rw-r--r--fs/reiserfs/file.c124
-rw-r--r--fs/reiserfs/fix_node.c2908
-rw-r--r--fs/reiserfs/hashes.c226
-rw-r--r--fs/reiserfs/ibalance.c1140
-rw-r--r--fs/reiserfs/inode.c1881
-rw-r--r--fs/reiserfs/ioctl.c101
-rw-r--r--fs/reiserfs/item_ops.c718
-rw-r--r--fs/reiserfs/journal.c3215
-rw-r--r--fs/reiserfs/lbalance.c1326
-rw-r--r--fs/reiserfs/namei.c1221
-rw-r--r--fs/reiserfs/objectid.c211
-rw-r--r--fs/reiserfs/prints.c881
-rw-r--r--fs/reiserfs/resize.c168
-rw-r--r--fs/reiserfs/stree.c2078
-rw-r--r--fs/reiserfs/super.c879
-rw-r--r--fs/reiserfs/tail_conversion.c297
-rw-r--r--fs/reiserfs/version.c7
-rw-r--r--fs/super.c6
-rw-r--r--fs/udf/file.c1
-rw-r--r--fs/udf/inode.c1
-rw-r--r--include/asm-alpha/errno.h2
-rw-r--r--include/asm-alpha/unistd.h1
-rw-r--r--include/asm-i386/bugs.h19
-rw-r--r--include/asm-i386/errno.h1
-rw-r--r--include/asm-i386/i387.h10
-rw-r--r--include/asm-i386/pgtable.h6
-rw-r--r--include/asm-i386/system.h7
-rw-r--r--include/asm-mips/errno.h5
-rw-r--r--include/asm-mips64/errno.h7
-rw-r--r--include/asm-ppc/delay.h41
-rw-r--r--include/asm-ppc/dma.h38
-rw-r--r--include/asm-ppc/elf.h19
-rw-r--r--include/asm-ppc/feature.h2
-rw-r--r--include/asm-ppc/hardirq.h6
-rw-r--r--include/asm-ppc/hw_irq.h16
-rw-r--r--include/asm-ppc/ide.h25
-rw-r--r--include/asm-ppc/io.h21
-rw-r--r--include/asm-ppc/ioctls.h1
-rw-r--r--include/asm-ppc/irq.h9
-rw-r--r--include/asm-ppc/keylargo.h9
-rw-r--r--include/asm-ppc/linux_logo.h3
-rw-r--r--include/asm-ppc/machdep.h37
-rw-r--r--include/asm-ppc/mman.h1
-rw-r--r--include/asm-ppc/mmu.h7
-rw-r--r--include/asm-ppc/mmu_context.h7
-rw-r--r--include/asm-ppc/parport.h18
-rw-r--r--include/asm-ppc/pci-bridge.h79
-rw-r--r--include/asm-ppc/pci.h20
-rw-r--r--include/asm-ppc/pgtable.h127
-rw-r--r--include/asm-ppc/prep_nvram.h5
-rw-r--r--include/asm-ppc/processor.h58
-rw-r--r--include/asm-ppc/prom.h8
-rw-r--r--include/asm-ppc/raven.h2
-rw-r--r--include/asm-ppc/segment.h6
-rw-r--r--include/asm-ppc/serial.h5
-rw-r--r--include/asm-ppc/smp.h2
-rw-r--r--include/asm-ppc/smplock.h4
-rw-r--r--include/asm-ppc/termios.h1
-rw-r--r--include/asm-ppc/unistd.h4
-rw-r--r--include/asm-sh/current.h2
-rw-r--r--include/asm-sh/pgtable.h6
-rw-r--r--include/asm-sh/sh_bios.h2
-rw-r--r--include/asm-sh/system.h65
-rw-r--r--include/asm-sparc/mostek.h3
-rw-r--r--include/asm-sparc64/mostek.h3
-rw-r--r--include/asm-sparc64/pbm.h17
-rw-r--r--include/asm-sparc64/watchdog.h31
-rw-r--r--include/linux/acpi.h15
-rw-r--r--include/linux/blk.h6
-rw-r--r--include/linux/blkdev.h28
-rw-r--r--include/linux/dn.h7
-rw-r--r--include/linux/elevator.h78
-rw-r--r--include/linux/fs.h73
-rw-r--r--include/linux/lvm.h67
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/nfs_xdr.h4
-rw-r--r--include/linux/raid/md_u.h1
-rw-r--r--include/linux/reiserfs_fs.h2074
-rw-r--r--include/linux/reiserfs_fs_i.h63
-rw-r--r--include/linux/reiserfs_fs_sb.h398
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/sched.h24
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/vt_kern.h2
-rw-r--r--include/net/dn.h26
-rw-r--r--include/net/dn_nsp.h11
-rw-r--r--include/net/ipx.h8
-rw-r--r--include/net/x25.h1
-rw-r--r--ipc/shm.c5
-rw-r--r--kernel/context.c2
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/ksyms.c2
-rw-r--r--kernel/sched.c48
-rw-r--r--mm/filemap.c18
-rw-r--r--mm/memory.c14
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/page_alloc.c49
-rw-r--r--mm/shmem.c111
-rw-r--r--mm/slab.c2
-rw-r--r--mm/swap.c3
-rw-r--r--mm/vmalloc.c1
-rw-r--r--mm/vmscan.c383
-rw-r--r--net/appletalk/aarp.c599
-rw-r--r--net/appletalk/ddp.c923
-rw-r--r--net/atm/lec.c10
-rw-r--r--net/atm/lec.h4
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/core/netfilter.c5
-rw-r--r--net/decnet/Makefile2
-rw-r--r--net/decnet/TODO12
-rw-r--r--net/decnet/af_decnet.c567
-rw-r--r--net/decnet/dn_dev.c4
-rw-r--r--net/decnet/dn_fib.c6
-rw-r--r--net/decnet/dn_neigh.c168
-rw-r--r--net/decnet/dn_nsp_in.c174
-rw-r--r--net/decnet/dn_nsp_out.c304
-rw-r--r--net/decnet/dn_route.c190
-rw-r--r--net/decnet/dn_rules.c11
-rw-r--r--net/decnet/dn_table.c2
-rw-r--r--net/decnet/dn_timer.c8
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/netfilter/Config.in11
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c9
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c21
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c14
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c24
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c6
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c22
-rw-r--r--net/ipv6/netfilter/ip6t_MARK.c21
-rw-r--r--net/ipv6/netfilter/ip6t_mark.c6
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c189
-rw-r--r--net/ipx/af_ipx.c1296
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sched/sch_dsmark.c22
-rw-r--r--net/sched/sch_gred.c12
-rw-r--r--net/sunrpc/auth.c20
-rw-r--r--net/sunrpc/sunrpc_syms.c1
-rw-r--r--net/sysctl_net.c7
-rw-r--r--net/unix/Makefile2
-rw-r--r--net/unix/af_unix.c52
-rw-r--r--net/unix/sysctl_net_unix.c22
-rw-r--r--net/x25/Makefile2
-rw-r--r--net/x25/af_x25.c21
-rw-r--r--net/x25/x25_dev.c6
-rw-r--r--net/x25/x25_facilities.c6
-rw-r--r--net/x25/x25_in.c4
-rw-r--r--net/x25/x25_link.c12
-rw-r--r--net/x25/x25_out.c4
-rw-r--r--net/x25/x25_route.c12
-rw-r--r--net/x25/x25_subr.c18
-rw-r--r--net/x25/x25_timer.c42
-rw-r--r--scripts/checkconfig.pl5
571 files changed, 48261 insertions, 12465 deletions
diff --git a/CREDITS b/CREDITS
index b55a2ff16..eb5573d97 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2363,10 +2363,11 @@ E: gt8134b@prism.gatech.edu
D: Dosemu
N: Hannu Savolainen
-E: hannu@voxware.pp.fi
-D: Kernel sound drivers
-S: Hiekkalaiturintie 3 A 8
-S: 00980 Helsinki
+E: hannu@opensound.com
+D: Maintainer of the sound drivers until 2.1.x days.
+D: Original compressed boot image support.
+S: Valurink. 4A11
+S: 03600 Karkkila
S: Finland
N: Eric Schenk
diff --git a/Documentation/Changes b/Documentation/Changes
index 91c60eee1..0b2f66e6f 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -56,7 +56,7 @@ o modutils 2.4.0 # insmod -V
o e2fsprogs 1.19 # tune2fs --version
o pcmcia-cs 3.1.21 # cardmgr -V
o PPP 2.4.0 # pppd --version
-o isdn4k-utils 3.1beta7 # isdnctrl 2>&1|grep version
+o isdn4k-utils 3.1pre1 # isdnctrl 2>&1|grep version
Kernel compilation
==================
@@ -335,9 +335,9 @@ o <ftp://ftp.isdn4linux.de/pub/isdn4linux/utils/testing/isdn4k-utils.v3.1beta7.
Netfilter
---------
-o <http://netfilter.filewatcher.org/iptables-1.1.1.tar.bz2>
-o <http://www.samba.org/netfilter/iptables-1.1.1.tar.bz2>
-o <http://netfilter.kernelnotes.org/iptables-1.1.1.tar.bz2>
+o <http://netfilter.filewatcher.org/iptables-1.2.tar.bz2>
+o <http://netfilter.samba.org/iptables-1.2.tar.bz2>
+o <http://netfilter.kernelnotes.org/iptables-1.2.tar.bz2>
Ip-route2
---------
diff --git a/Documentation/Configure.help b/Documentation/Configure.help
index 05c0930ca..a6e9caa73 100644
--- a/Documentation/Configure.help
+++ b/Documentation/Configure.help
@@ -1450,15 +1450,6 @@ CONFIG_BLK_DEV_LVM
want), say M here and read Documentation/modules.txt. The module
will be called lvm-mod.o.
-Logical Volume Manager /proc file system information
-CONFIG_LVM_PROC_FS
- If you say Y here, you are able to access overall Logical Volume
- Manager, Volume Group, Logical and Physical Volume information in
- /proc/lvm.
-
- To use this option, you have to check, that the "/proc file system
- support" (CONFIG_PROC_FS) is enabled too.
-
Multiple devices driver support
CONFIG_BLK_DEV_MD
This driver lets you combine several hard disk partitions into one
@@ -1565,20 +1556,6 @@ CONFIG_MD_RAID5
If unsure, say Y.
-RAID Boot support
-CONFIG_MD_BOOT
- To boot with an initial raid volume (any type) you can select
- autodetect, or answer Y here and appropriate options to the kernel
- at boot time.
- For lilo and loadlin options see the file Documentation/md.txt.
-
-RAID AutoDetect support
-CONFIG_AUTODETECT_RAID
- An alternative to "Raid Boot support" is autodetect support.
- With this selected, any partitons of type 0xFD will be considered for
- inclusion in a RAID array. Information in the RAID-superblock on
- the partition will determine how it is included.
-
Support for Acer PICA 1 chipset
CONFIG_ACER_PICA_61
This is a machine with a R4400 133/150 MHz CPU. To compile a Linux
@@ -2086,6 +2063,72 @@ CONFIG_INET_ECN
If in doubt, say N.
+IP6 tables support (required for filtering/masq/NAT)
+CONFIG_IP6_NF_IPTABLES
+ ip6tables is a general, extensible packet identification framework.
+ Currently only the packet filtering and packet mangling subsystem
+ for IPv6 use this, but connection tracking is going to follow.
+ Say 'Y' or 'M' here if you want to use either of those.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+IPv6 limit match support
+CONFIG_IP6_NF_MATCH_LIMIT
+ limit matching allows you to control the rate at which a rule can be
+ matched: mainly useful in combination with the LOG target ("LOG
+ target support", below) and to avoid some Denial of Service attacks.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+MAC address match support
+CONFIG_IP6_NF_MATCH_MAC
+ mac matching allows you to match packets based on the source
+ ethernet address of the packet.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+netfilter mark match support
+CONFIG_IP6_NF_MATCH_MARK
+ Netfilter mark matching allows you to match packets based on the
+ `nfmark' value in the packet. This can be set by the MARK target
+ (see below).
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+Packet filtering
+CONFIG_IP6_NF_FILTER
+ Packet filtering defines a table `filter', which has a series of
+ rules for simple packet filtering at local input, forwarding and
+ local output. See the man page for iptables(8).
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+Packet mangling
+CONFIG_IP6_NF_MANGLE
+ This option adds a `mangle' table to iptables: see the man page for
+ iptables(8). This table is used for various packet alterations
+ which can effect how the packet is routed.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+MARK target support
+CONFIG_IP6_NF_TARGET_MARK
+ This option adds a `MARK' target, which allows you to create rules
+ in the `mangle' table which alter the netfilter mark (nfmark) field
+ associated with the packet packet prior to routing. This can change
+ the routing method (see `IP: use netfilter MARK value as routing
+ key') and can also be used by other subsystems to change their
+ behavior.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
SYN flood protection
CONFIG_SYN_COOKIES
Normal TCP/IP networking is open to an attack known as "SYN
@@ -2642,13 +2685,14 @@ CONFIG_HOTPLUG
One well known example of this is PCMCIA- or PC-cards, credit-card
size devices such as network cards, modems or hard drives which are
- plugged into slots found on all modern laptop computers.
+ plugged into slots found on all modern laptop computers. Another
+ example, used on modern desktops as well as laptops, is USB.
- Another example, used on modern desktops as well as laptops, is USB.
- Enable HOTPLUG with USB and KMOD, and your kernel will automatically
- call out to a user mode "policy agent" to load modules and set up
- software needed to use USB devices you plug in. Get agent software
- (at http://www.linux-usb.org/policy.html) and install it.
+ Enable HOTPLUG and KMOD, and build a modular kernel. Get agent
+ software (at http://linux-hotplug.sourceforge.net) and install it.
+ Then your kernel will automatically call out to a user mode "policy
+ agent" (/sbin/hotplug) to load modules and set up software needed
+ to use devices as you hotplug them.
PCMCIA/Cardbus support
CONFIG_PCMCIA
@@ -10762,6 +10806,46 @@ CONFIG_MINIX_FS
called minix.o. Note that the file system of your root partition
(the one containing the directory /) cannot be compiled as a module.
+Reiserfs support
+CONFIG_REISERFS_FS
+
+ Stores not just filenames but the files themselves in a balanced
+ tree. Uses journaling.
+
+ Balanced trees are more efficient than traditional
+ filesystem architectural foundations.
+
+ You can use reiserfs in all cases where you use the ext2fs file
+ system, and you will gain in speed and disk space. It has fewer
+ worst case performance situations than other file systems
+ because balanced trees are hardier creatures than other algorithms
+ are (if that is not technical enough, read www.namesys.com....:-) )
+
+ It is more easily extended to have features currently found in
+ database and keyword search systems than block allocation based
+ filesystems are. The next version will be so extended, and will
+ support plugins consistent with our motto ``It takes more than a
+ license to make source code open.''
+
+ Read www.namesys.com to learn more about reiserfs.
+
+ Sponsored by Threshold Networks, Emusic.com, and Bigstorage.com.
+
+ If you like it, you can pay us to add new features to it that you
+ need, buy a support contract, or pay us to port it to another OS.
+
+Enable Reiserfs consistency checks
+CONFIG_REISERFS_CHECK
+ If you set this to yes, then ReiserFS will perform every check it
+ can possibly imagine of its internal consistency throughout its
+ operation. It will also go substantially slower. More than once we
+ have forgotten that this was on, and then gone despondent over the
+ latest benchmarks.:-) Use of this option allows our team to go all
+ out in checking for consistency when debugging without fear of its
+ effect on end users. If you are on the verge of sending in a bug
+ report, say yes and you might get a useful error message. Almost
+ everyone should say no.
+
Second extended fs support
CONFIG_EXT2_FS
This is the de facto standard Linux file system (method to organize
@@ -13242,7 +13326,7 @@ CONFIG_ACPI
This support requires an ACPI compliant platform (hardware/firmware).
If both ACPI and Advanced Power Management (APM) support are
- configured, ACPI is used.
+ configured, whichever is loaded first shall be used.
This code DOES NOT currently provide a complete OSPM implementation --
it has not yet reached APM's level of functionality. When fully
@@ -14644,6 +14728,14 @@ CONFIG_ISDN_PPP_VJ
This enables Van Jacobson header compression for synchronous PPP.
Say Y if the other end of the connection supports it.
+CONFIG_ISDN_PPP_BSDCOMP
+ Support for the BSD-Compress compression method for PPP, which uses
+ the LZW compression method to compress each PPP packet before it is
+ sent over the wire. The machine at the other end of the PPP link
+ (usually your ISP) has to support the BSD-Compress compression
+ method as well for this to be useful. Even if they don't support it,
+ it is safe to say Y here.
+
Support audio via ISDN
CONFIG_ISDN_AUDIO
If you say Y here, the modem-emulator will support a subset of the
@@ -16980,6 +17072,20 @@ CONFIG_DISPLAY7SEG
another UltraSPARC-IIi-cEngine boardset with a 7-segment display,
you should say N to this option.
+CP1XXX Hardware Watchdog support
+CONFIG_WATCHDOG_CP1XXX
+ This is the driver for the hardware watchdog timers present on
+ Sun Microsystems CompactPCI models CP1400 and CP1500.
+
+ This driver is also available as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called cpwatchdog.o. If you want to compile it
+ as a module, say M here and read Documentation/modules.txt.
+
+ If you do not have a CompactPCI model CP1400 or CP1500, or
+ another UltraSPARC-IIi-cEngine boardset with hardware watchdog,
+ you should say N to this option.
+
IA-64 system type
CONFIG_IA64_GENERIC
This selects the system type of your hardware. A "generic" kernel
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index f3ae78497..c47cd632d 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -167,7 +167,7 @@ Here are the routines, one by one:
This interface flushes an entire user address space from
the caches. That is, after running, there will be no cache
- lines assosciated with 'mm'.
+ lines associated with 'mm'.
This interface is used to handle whole address space
page table operations such as what happens during
@@ -209,7 +209,7 @@ require a whole different set of interfaces to handle properly.
The biggest problem is that of virtual aliasing in the data cache
of a processor.
-Is your port subsceptible to virtual aliasing in it's D-cache?
+Is your port susceptible to virtual aliasing in it's D-cache?
Well, if your D-cache is virtually indexed, is larger in size than
PAGE_SIZE, and does not prevent multiple cache lines for the same
physical address from existing at once, you have this problem.
@@ -221,6 +221,9 @@ size). This setting will force the SYSv IPC layer to only allow user
processes to mmap shared memory at address which are a multiple of
this value.
+NOTE: This does not fix shared mmaps, check out the sparc64 port for
+one way to solve this (in particular SPARC_FLAG_MMAPSHARED).
+
Next, you have two methods to solve the D-cache aliasing issue for all
other cases. Please keep in mind that fact that, for a given page
mapped into some user address space, there is always at least one more
@@ -240,7 +243,7 @@ existing ports should move over to the new mechanism as well.
The physical page 'page' is about to be place into the
user address space of a process. If it is possible for
stores done recently by the kernel into this physical
- page, to not be visible to an arbitray mapping in userspace,
+ page, to not be visible to an arbitrary mapping in userspace,
you must flush this page from the D-cache.
If the D-cache is writeback in nature, the dirty data (if
@@ -266,7 +269,7 @@ Here is the new interface:
For example, a port may temporarily map 'from' and 'to' to
kernel virtual addresses during the copy. The virtual address
- for these two pages is choosen in such a way that the kernel
+ for these two pages is chosen in such a way that the kernel
load/store instructions happen to virtual addresses which are
of the same "color" as the user mapping of the page. Sparc64
for example, uses this technique.
@@ -306,7 +309,7 @@ Here is the new interface:
simply be defined as a nop on that architecture.
There is a bit set aside in page->flags (PG_arch_1) as
- "architecture private". The kernel guarentees that,
+ "architecture private". The kernel guarantees that,
for pagecache pages, it will clear this bit when such
a page first enters the pagecache.
@@ -323,7 +326,14 @@ Here is the new interface:
update_mmu_cache(), a check is made of this flag bit, and if
set the flush is done and the flag bit is cleared.
-XXX Not documented: flush_icache_page(), need to talk to Paul
- Mackerras, David Mosberger-Tang, et al.
- to see what the expected semantics of this
- interface are. -DaveM
+ void flush_icache_range(unsigned long start, unsigned long end)
+ When the kernel stores into addresses that it will execute
+ out of (eg when loading modules), this function is called.
+
+ If the icache does not snoop stores then this routine will need
+ to flush it.
+
+ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+ All the functionality of flush_icache_page can be implemented in
+ flush_dcache_page and update_mmu_cache. In 2.5 the hope is to
+ remove this interface completely.
diff --git a/Documentation/usb/hotplug.txt b/Documentation/usb/hotplug.txt
index a526ffc67..0b95e275b 100644
--- a/Documentation/usb/hotplug.txt
+++ b/Documentation/usb/hotplug.txt
@@ -1,4 +1,4 @@
-USB HOTPLUGGING
+LINUX HOTPLUGGING
In hotpluggable busses like USB (and Cardbus PCI), end-users plug devices
into the bus with power on. In most cases, users expect the devices to become
@@ -8,7 +8,7 @@ immediately usable. That means the system must do many things, including:
loading a kernel module; newer drivers can use modutils to
publish their device (and class) support to user utilities.
- - Bind a driver to that device. That's done using the USB
+ - Bind a driver to that device. Bus frameworks do that using a
device driver's probe() routine.
- Tell other subsystems to configure the new device. Print
@@ -26,6 +26,10 @@ Those triggered actions must support a system's administrative policies;
such programs are called "policy agents" here. Typically they involve
shell scripts that dispatch to more familiar administration tools.
+Because some of those actions rely on information about drivers (metadata)
+that is currently available only when the drivers are dynamically linked,
+you get the best hotplugging when you configure a highly modular system.
+
KERNEL HOTPLUG HELPER (/sbin/hotplug)
@@ -40,9 +44,14 @@ some kernel event. That name is used as the first key for further event
dispatch; any other argument and environment parameters are specified by
the subsystem making that invocation.
-A reference implementation of a /sbin/hotplug script is available at the
-http://www.linux-usb.org website, which works USB for but also knows how to
-delegate to any /etc/hotplug/$TYPE.agent policy agent present.
+Hotplug software and other resources is available at:
+
+ http://linux-hotplug.sourceforge.net
+
+Mailing list information is also available at that site.
+
+
+--------------------------------------------------------------------------
USB POLICY AGENT
diff --git a/Documentation/usb/usb-serial.txt b/Documentation/usb/usb-serial.txt
index 2fc5ac8eb..e1371097b 100644
--- a/Documentation/usb/usb-serial.txt
+++ b/Documentation/usb/usb-serial.txt
@@ -223,7 +223,7 @@ Generic Serial driver
To enable the generic driver to recognize your device, build the driver
as a module and load it by the following invocation:
- insmod usb-serial vendor=0x#### product=0x####
+ insmod usbserial vendor=0x#### product=0x####
where the #### is replaced with the hex representation of your device's
vendor id and product id.
diff --git a/MAINTAINERS b/MAINTAINERS
index 492d15a40..e30666fb7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -196,6 +196,12 @@ L: linux-kernel@vger.kernel.org
W: http://www.ocston.org/~tigran/patches/bfs
S: Maintained
+BLOCK LAYER
+P: Jens Axboe
+M: axboe@suse.de
+L: linux-kernel@vger.kernel.org
+S: Maintained
+
BTTV VIDEO4LINUX DRIVER
P: Gerd Knorr
M: kraxel@goldbach.in-berlin.de
diff --git a/Makefile b/Makefile
index b46ec806a..eb90cc623 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 2
PATCHLEVEL = 4
-SUBLEVEL = 0
+SUBLEVEL = 1
EXTRAVERSION =
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
@@ -110,8 +110,7 @@ export ROOT_DEV = CURRENT
export SVGA_MODE = -DSVGA_MODE=NORMAL_VGA
#
-# if you want the RAM disk device, define this to be the
-# size in blocks.
+# if you want the RAM disk device, define this to be the size in blocks.
# This is i386 specific.
#
@@ -458,9 +457,8 @@ export MODVERFILE
depend dep: dep-files
-# make checkconfig: Prune 'scripts' directory to avoid "false positives".
checkconfig:
- find * -name '*.[hcS]' -type f -print | grep -v scripts/ | sort | xargs $(PERL) -w scripts/checkconfig.pl
+ find * -name '*.[hcS]' -type f -print | sort | xargs $(PERL) -w scripts/checkconfig.pl
checkhelp:
find * -name [cC]onfig.in -print | sort | xargs $(PERL) -w scripts/checkhelp.pl
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 85cc7ed48..abc04cca2 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -23,7 +23,7 @@ obj-y := entry.o traps.o process.o osf_sys.o irq.o irq_alpha.o \
# FIXME!
# These should be made conditional on the stuff that needs them!
#
-obj-y += irq_i8259.o irq_srm.o irq_pyxis.o \
+obj-y += irq_i8259.o irq_srm.o \
es1888.o smc37c669.o smc37c93x.o ns87312.o
ifdef CONFIG_VGA_HOSE
@@ -43,7 +43,7 @@ obj-y += core_apecs.o core_cia.o core_irongate.o core_lca.o core_mcpcia.o \
sys_jensen.o sys_miata.o sys_mikasa.o sys_nautilus.o sys_titan.o \
sys_noritake.o sys_rawhide.o sys_ruffian.o sys_rx164.o \
sys_sable.o sys_sio.o sys_sx164.o sys_takara.o sys_rx164.o \
- sys_wildfire.o core_wildfire.o
+ sys_wildfire.o core_wildfire.o irq_pyxis.o
else
@@ -94,6 +94,10 @@ obj-$(CONFIG_ALPHA_SX164) += sys_sx164.o
obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o
obj-$(CONFIG_ALPHA_WILDFIRE) += sys_wildfire.o
+ifneq ($(CONFIG_ALPHA_MIATA)$(CONFIG_ALPHA_RUFFIAN)$(CONFIG_ALPHA_SX164),)
+obj-y += irq_pyxis.o
+endif
+
endif # GENERIC
all: kernel.o head.o
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 4d574b824..cd28b07fc 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -904,7 +904,6 @@ extern int do_sys_settimeofday(struct timeval *tv, struct timezone *tz);
extern int do_getitimer(int which, struct itimerval *value);
extern int do_setitimer(int which, struct itimerval *, struct itimerval *);
asmlinkage int sys_utimes(char *, struct timeval *);
-extern int sys_wait4(pid_t, int *, int, struct rusage *);
extern int do_adjtimex(struct timex *);
struct timeval32
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 73df1e930..e3d75f7a1 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -30,7 +30,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-asmlinkage int sys_wait4(int, int *, int, struct rusage *);
asmlinkage void ret_from_sys_call(void);
asmlinkage int do_signal(sigset_t *, struct pt_regs *,
struct switch_stack *, unsigned long, unsigned long);
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 624c6732c..58a8f4658 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -42,7 +42,7 @@ static inline void
cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
{
int ofs = (irq - 16) / 8;
- outb(mask >> (16 + ofs*3), 0x804 + ofs);
+ outb(mask >> (16 + ofs * 8), 0x804 + ofs);
}
static inline void
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 1af2abd84..1b8f88815 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -32,8 +32,6 @@
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn))
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn))
-asmlinkage int sys_wait4(pid_t pid, unsigned long * stat_addr,
- int options, unsigned long *ru);
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 25b523026..f7941bcbe 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -50,7 +50,7 @@ ifdef CONFIG_M686
CFLAGS += -march=i686
endif
-ifdef CONFIG_M686FXSR
+ifdef CONFIG_MPENTIUMIII
CFLAGS += -march=i686
endif
diff --git a/arch/i386/boot/bootsect.S b/arch/i386/boot/bootsect.S
index 26c67d5fd..5434a9df2 100644
--- a/arch/i386/boot/bootsect.S
+++ b/arch/i386/boot/bootsect.S
@@ -25,10 +25,9 @@
* loads pretty fast by getting whole tracks at a time whenever possible.
*/
-#include <linux/config.h> /* for CONFIG_ROOT_RDONLY */
#include <asm/boot.h>
-SETUPSECS = 4 /* default nr of setup-sectors */
+SETUPSECTS = 4 /* default nr of setup-sectors */
BOOTSEG = 0x07C0 /* original address of boot-sector */
INITSEG = DEF_INITSEG /* we move boot here - out of the way */
SETUPSEG = DEF_SETUPSEG /* setup starts here */
@@ -46,8 +45,8 @@ SWAP_DEV = 0 /* SWAP_DEV is now written by "build" */
#define RAMDISK 0
#endif
-#ifndef CONFIG_ROOT_RDONLY
-#define CONFIG_ROOT_RDONLY 1
+#ifndef ROOT_RDONLY
+#define ROOT_RDONLY 1
#endif
.code16
@@ -394,11 +393,11 @@ disksizes: .byte 36, 18, 15, 9
msg1: .byte 13, 10
.ascii "Loading"
-# XXX: This is a *very* snug fit.
+# XXX: This is a fairly snug fit.
.org 497
-setup_sects: .byte SETUPSECS
-root_flags: .word CONFIG_ROOT_RDONLY
+setup_sects: .byte SETUPSECTS
+root_flags: .word ROOT_RDONLY
syssize: .word SYSSIZE
swap_dev: .word SWAP_DEV
ram_size: .word RAMDISK
diff --git a/arch/i386/config.in b/arch/i386/config.in
index 4dc863995..2dec3d90b 100644
--- a/arch/i386/config.in
+++ b/arch/i386/config.in
@@ -33,7 +33,7 @@ choice 'Processor family' \
Pentium-Classic CONFIG_M586TSC \
Pentium-MMX CONFIG_M586MMX \
Pentium-Pro/Celeron/Pentium-II CONFIG_M686 \
- Pentium-III CONFIG_M686FXSR \
+ Pentium-III CONFIG_MPENTIUMIII \
Pentium-4 CONFIG_MPENTIUM4 \
K6/K6-II/K6-III CONFIG_MK6 \
Athlon/K7 CONFIG_MK7 \
@@ -45,8 +45,6 @@ choice 'Processor family' \
# Define implied options from the CPU selection here
#
-unset CONFIG_X86_FXSR
-
if [ "$CONFIG_M386" = "y" ]; then
define_bool CONFIG_X86_CMPXCHG n
define_int CONFIG_X86_L1_CACHE_SHIFT 4
@@ -87,14 +85,12 @@ if [ "$CONFIG_M686" = "y" ]; then
define_bool CONFIG_X86_PGE y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
fi
-if [ "$CONFIG_M686FXSR" = "y" ]; then
+if [ "$CONFIG_MPENTIUMIII" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_SHIFT 5
define_bool CONFIG_X86_TSC y
define_bool CONFIG_X86_GOOD_APIC y
define_bool CONFIG_X86_PGE y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
- define_bool CONFIG_X86_FXSR y
- define_bool CONFIG_X86_XMM y
fi
if [ "$CONFIG_MPENTIUM4" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_SHIFT 7
@@ -102,8 +98,6 @@ if [ "$CONFIG_MPENTIUM4" = "y" ]; then
define_bool CONFIG_X86_GOOD_APIC y
define_bool CONFIG_X86_PGE y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
- define_bool CONFIG_X86_FXSR y
- define_bool CONFIG_X86_XMM y
fi
if [ "$CONFIG_MK6" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_SHIFT 5
@@ -158,9 +152,7 @@ if [ "$CONFIG_HIGHMEM64G" = "y" ]; then
define_bool CONFIG_X86_PAE y
fi
-if [ "$CONFIG_X86_FXSR" != "y" ]; then
- bool 'Math emulation' CONFIG_MATH_EMULATION
-fi
+bool 'Math emulation' CONFIG_MATH_EMULATION
bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
bool 'Symmetric multi-processing support' CONFIG_SMP
if [ "$CONFIG_SMP" != "y" ]; then
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index de38a46ef..05a2e3725 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -27,7 +27,7 @@ CONFIG_KMOD=y
# CONFIG_M586TSC is not set
# CONFIG_M586MMX is not set
# CONFIG_M686 is not set
-CONFIG_M686FXSR=y
+CONFIG_MPENTIUMIII=y
# CONFIG_MPENTIUM4 is not set
# CONFIG_MK6 is not set
# CONFIG_MK7 is not set
@@ -45,8 +45,6 @@ CONFIG_X86_TSC=y
CONFIG_X86_GOOD_APIC=y
CONFIG_X86_PGE=y
CONFIG_X86_USE_PPRO_CHECKSUM=y
-CONFIG_X86_FXSR=y
-CONFIG_X86_XMM=y
# CONFIG_TOSHIBA is not set
# CONFIG_MICROCODE is not set
# CONFIG_X86_MSR is not set
@@ -54,6 +52,7 @@ CONFIG_X86_XMM=y
CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
# CONFIG_HIGHMEM64G is not set
+# CONFIG_MATH_EMULATION is not set
# CONFIG_MTRR is not set
CONFIG_SMP=y
CONFIG_HAVE_DEC_LOCK=y
@@ -134,7 +133,6 @@ CONFIG_BLK_DEV_FD=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -512,11 +510,18 @@ CONFIG_PSMOUSE=y
# Ftape, the floppy tape device driver
#
# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
+CONFIG_AGP=y
+CONFIG_AGP_INTEL=y
+CONFIG_AGP_I810=y
+CONFIG_AGP_VIA=y
+CONFIG_AGP_AMD=y
+CONFIG_AGP_SIS=y
+CONFIG_AGP_ALI=y
CONFIG_DRM=y
CONFIG_DRM_TDFX=y
# CONFIG_DRM_GAMMA is not set
# CONFIG_DRM_R128 is not set
+CONFIG_DRM_RADEON=y
# CONFIG_DRM_I810 is not set
# CONFIG_DRM_MGA is not set
CONFIG_PCMCIA_SERIAL=y
@@ -538,6 +543,8 @@ CONFIG_PCMCIA_SERIAL=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
CONFIG_AUTOFS4_FS=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/i386/kernel/i387.c b/arch/i386/kernel/i387.c
index 3031432d0..a3a526b4b 100644
--- a/arch/i386/kernel/i387.c
+++ b/arch/i386/kernel/i387.c
@@ -18,14 +18,6 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
-#if defined(CONFIG_X86_FXSR)
-#define HAVE_FXSR 1
-#elif defined(CONFIG_X86_RUNTIME_FXSR)
-#define HAVE_FXSR (cpu_has_fxsr)
-#else
-#define HAVE_FXSR 0
-#endif
-
#ifdef CONFIG_MATH_EMULATION
#define HAVE_HWFP (boot_cpu_data.hard_math)
#else
@@ -35,13 +27,13 @@
/*
* The _current_ task is using the FPU for the first time
* so initialize it and set the mxcsr to its default
- * value at reset if we support FXSR and then
+ * value at reset if we support XMM instructions and then
* remeber the current task has used the FPU.
*/
void init_fpu(void)
{
__asm__("fninit");
- if ( HAVE_FXSR )
+ if ( cpu_has_xmm )
load_mxcsr(0x1f80);
current->used_math = 1;
@@ -51,9 +43,9 @@ void init_fpu(void)
* FPU lazy state save handling.
*/
-void save_init_fpu( struct task_struct *tsk )
+static inline void __save_init_fpu( struct task_struct *tsk )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
asm volatile( "fxsave %0 ; fnclex"
: "=m" (tsk->thread.i387.fxsave) );
} else {
@@ -61,12 +53,28 @@ void save_init_fpu( struct task_struct *tsk )
: "=m" (tsk->thread.i387.fsave) );
}
tsk->flags &= ~PF_USEDFPU;
+}
+
+void save_init_fpu( struct task_struct *tsk )
+{
+ __save_init_fpu(tsk);
stts();
}
+void kernel_fpu_begin(void)
+{
+ struct task_struct *tsk = current;
+
+ if (tsk->flags & PF_USEDFPU) {
+ __save_init_fpu(tsk);
+ return;
+ }
+ clts();
+}
+
void restore_fpu( struct task_struct *tsk )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
asm volatile( "fxrstor %0"
: : "m" (tsk->thread.i387.fxsave) );
} else {
@@ -144,7 +152,7 @@ static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave
unsigned short get_fpu_cwd( struct task_struct *tsk )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
return tsk->thread.i387.fxsave.cwd;
} else {
return (unsigned short)tsk->thread.i387.fsave.cwd;
@@ -153,7 +161,7 @@ unsigned short get_fpu_cwd( struct task_struct *tsk )
unsigned short get_fpu_swd( struct task_struct *tsk )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
return tsk->thread.i387.fxsave.swd;
} else {
return (unsigned short)tsk->thread.i387.fsave.swd;
@@ -162,7 +170,7 @@ unsigned short get_fpu_swd( struct task_struct *tsk )
unsigned short get_fpu_twd( struct task_struct *tsk )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
return tsk->thread.i387.fxsave.twd;
} else {
return (unsigned short)tsk->thread.i387.fsave.twd;
@@ -171,7 +179,7 @@ unsigned short get_fpu_twd( struct task_struct *tsk )
unsigned short get_fpu_mxcsr( struct task_struct *tsk )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
return tsk->thread.i387.fxsave.mxcsr;
} else {
return 0x1f80;
@@ -180,7 +188,7 @@ unsigned short get_fpu_mxcsr( struct task_struct *tsk )
void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
tsk->thread.i387.fxsave.cwd = cwd;
} else {
tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000);
@@ -189,7 +197,7 @@ void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd )
void set_fpu_swd( struct task_struct *tsk, unsigned short swd )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
tsk->thread.i387.fxsave.swd = swd;
} else {
tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000);
@@ -198,7 +206,7 @@ void set_fpu_swd( struct task_struct *tsk, unsigned short swd )
void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd);
} else {
tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000);
@@ -207,7 +215,7 @@ void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_xmm ) {
tsk->thread.i387.fxsave.mxcsr = mxcsr;
}
}
@@ -321,7 +329,7 @@ int save_i387( struct _fpstate *buf )
current->used_math = 0;
if ( HAVE_HWFP ) {
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
return save_i387_fxsave( buf );
} else {
return save_i387_fsave( buf );
@@ -354,7 +362,7 @@ int restore_i387( struct _fpstate *buf )
int err;
if ( HAVE_HWFP ) {
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
err = restore_i387_fxsave( buf );
} else {
err = restore_i387_fsave( buf );
@@ -387,7 +395,7 @@ static inline int get_fpregs_fxsave( struct user_i387_struct *buf,
int get_fpregs( struct user_i387_struct *buf, struct task_struct *tsk )
{
if ( HAVE_HWFP ) {
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
return get_fpregs_fxsave( buf, tsk );
} else {
return get_fpregs_fsave( buf, tsk );
@@ -415,7 +423,7 @@ static inline int set_fpregs_fxsave( struct task_struct *tsk,
int set_fpregs( struct task_struct *tsk, struct user_i387_struct *buf )
{
if ( HAVE_HWFP ) {
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
return set_fpregs_fxsave( tsk, buf );
} else {
return set_fpregs_fsave( tsk, buf );
@@ -428,9 +436,10 @@ int set_fpregs( struct task_struct *tsk, struct user_i387_struct *buf )
int get_fpxregs( struct user_fxsr_struct *buf, struct task_struct *tsk )
{
- if ( HAVE_FXSR ) {
- __copy_to_user( (void *)buf, &tsk->thread.i387.fxsave,
- sizeof(struct user_fxsr_struct) );
+ if ( cpu_has_fxsr ) {
+ if (__copy_to_user( (void *)buf, &tsk->thread.i387.fxsave,
+ sizeof(struct user_fxsr_struct) ))
+ return -EFAULT;
return 0;
} else {
return -EIO;
@@ -439,7 +448,7 @@ int get_fpxregs( struct user_fxsr_struct *buf, struct task_struct *tsk )
int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct *buf )
{
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
__copy_from_user( &tsk->thread.i387.fxsave, (void *)buf,
sizeof(struct user_fxsr_struct) );
/* mxcsr bit 6 and 31-16 must be zero for security reasons */
@@ -485,7 +494,7 @@ int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
fpvalid = tsk->used_math;
if ( fpvalid ) {
unlazy_fpu( tsk );
- if ( HAVE_FXSR ) {
+ if ( cpu_has_fxsr ) {
copy_fpu_fxsave( tsk, fpu );
} else {
copy_fpu_fsave( tsk, fpu );
@@ -500,7 +509,7 @@ int dump_extended_fpu( struct pt_regs *regs, struct user_fxsr_struct *fpu )
int fpvalid;
struct task_struct *tsk = current;
- fpvalid = tsk->used_math && HAVE_FXSR;
+ fpvalid = tsk->used_math && cpu_has_fxsr;
if ( fpvalid ) {
unlazy_fpu( tsk );
memcpy( fpu, &tsk->thread.i387.fxsave,
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index c5aece040..b17f499b4 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -253,7 +253,8 @@ static int __init find_isa_irq_pin(int irq, int type)
* Find a specific PCI IRQ entry.
* Not an __init, possibly needed by modules
*/
-static int __init pin_2_irq(int idx, int apic, int pin);
+static int pin_2_irq(int idx, int apic, int pin);
+
int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pci_pin)
{
int apic, i, best_guess = -1;
@@ -475,7 +476,7 @@ static inline int irq_trigger(int idx)
return MPBIOS_trigger(idx);
}
-static int __init pin_2_irq(int idx, int apic, int pin)
+static int pin_2_irq(int idx, int apic, int pin)
{
int irq, i;
int bus = mp_irqs[idx].mpc_srcbus;
diff --git a/arch/i386/kernel/pci-irq.c b/arch/i386/kernel/pci-irq.c
index bd2bf22d3..8281e3f82 100644
--- a/arch/i386/kernel/pci-irq.c
+++ b/arch/i386/kernel/pci-irq.c
@@ -234,22 +234,107 @@ static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
return 1;
}
+/*
+ * PIRQ routing for SiS 85C503 router used in several SiS chipsets
+ * According to the SiS 5595 datasheet (preliminary V1.0, 12/24/1997)
+ * the related registers work as follows:
+ *
+ * general: one byte per re-routable IRQ,
+ * bit 7 IRQ mapping enabled (0) or disabled (1)
+ * bits [6:4] reserved
+ * bits [3:0] IRQ to map to
+ * allowed: 3-7, 9-12, 14-15
+ * reserved: 0, 1, 2, 8, 13
+ *
+ * individual registers in device config space:
+ *
+ * 0x41/0x42/0x43/0x44: PCI INT A/B/C/D - bits as in general case
+ *
+ * 0x61: IDEIRQ: bits as in general case - but:
+ * bits [6:5] must be written 01
+ * bit 4 channel-select primary (0), secondary (1)
+ *
+ * 0x62: USBIRQ: bits as in general case - but:
+ * bit 4 OHCI function disabled (0), enabled (1)
+ *
+ * 0x6a: ACPI/SCI IRQ - bits as in general case
+ *
+ * 0x7e: Data Acq. Module IRQ - bits as in general case
+ *
+ * Apparently there are systems implementing PCI routing table using both
+ * link values 0x01-0x04 and 0x41-0x44 for PCI INTA..D, but register offsets
+ * like 0x62 as link values for USBIRQ e.g. So there is no simple
+ * "register = offset + pirq" relation.
+ * Currently we support PCI INTA..D and USBIRQ and try our best to handle
+ * both link mappings.
+ * IDE/ACPI/DAQ mapping is currently unsupported (left untouched as set by BIOS).
+ */
+
static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
u8 x;
- int reg = 0x41 + (pirq - 'A') ;
-
- pci_read_config_byte(router, reg, &x);
+ int reg = pirq;
+
+ switch(pirq) {
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ reg += 0x40;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x62:
+ pci_read_config_byte(router, reg, &x);
+ if (reg != 0x62)
+ break;
+ if (!(x & 0x40))
+ return 0;
+ break;
+ case 0x61:
+ case 0x6a:
+ case 0x7e:
+ printk("SiS pirq: advanced IDE/ACPI/DAQ mapping not yet implemented\n");
+ return 0;
+ default:
+ printk("SiS router pirq escape (%d)\n", pirq);
+ return 0;
+ }
return (x & 0x80) ? 0 : (x & 0x0f);
}
static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
u8 x;
- int reg = 0x41 + (pirq - 'A') ;
-
- pci_read_config_byte(router, reg, &x);
- x = (pirq & 0x20) ? 0 : (irq & 0x0f);
+ int reg = pirq;
+
+ switch(pirq) {
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ reg += 0x40;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x62:
+ x = (irq&0x0f) ? (irq&0x0f) : 0x80;
+ if (reg != 0x62)
+ break;
+ /* always mark OHCI enabled, as nothing else knows about this */
+ x |= 0x40;
+ break;
+ case 0x61:
+ case 0x6a:
+ case 0x7e:
+ printk("advanced SiS pirq mapping not yet implemented\n");
+ return 0;
+ default:
+ printk("SiS router pirq escape (%d)\n", pirq);
+ return 0;
+ }
pci_write_config_byte(router, reg, x);
return 1;
@@ -462,18 +547,9 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
irq = pirq & 0xf;
DBG(" -> hardcoded IRQ %d\n", irq);
msg = "Hardcoded";
- if (dev->irq && dev->irq != irq) {
- printk("IRQ routing conflict in pirq table! Try 'pci=autoirq'\n");
- return 0;
- }
} else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq))) {
DBG(" -> got IRQ %d\n", irq);
msg = "Found";
- /* We refuse to override the dev->irq information. Give a warning! */
- if (dev->irq && dev->irq != irq) {
- printk("IRQ routing conflict in pirq table! Try 'pci=autoirq'\n");
- return 0;
- }
} else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
DBG(" -> assigning IRQ %d", newirq);
if (r->set(pirq_router_dev, dev, pirq, newirq)) {
@@ -504,6 +580,11 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
if (!info)
continue;
if (info->irq[pin].link == pirq) {
+ /* We refuse to override the dev->irq information. Give a warning! */
+ if (dev2->irq && dev2->irq != irq) {
+ printk("IRQ routing conflict in pirq table for device %s\n", dev2->slot_name);
+ continue;
+ }
dev2->irq = irq;
pirq_penalty[irq]++;
if (dev != dev2)
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 09aa4a08c..2c3f61f7f 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -147,6 +147,7 @@ extern char _text, _etext, _edata, _end;
extern unsigned long cpu_khz;
static int disable_x86_serial_nr __initdata = 1;
+static int disable_x86_fxsr __initdata = 0;
/*
* This is set up by the setup-routine at boot-time
@@ -518,7 +519,7 @@ void __init setup_memory_region(void)
e820.nr_map = 0;
add_memory_region(0, LOWMEMSIZE(), E820_RAM);
- add_memory_region(HIGH_MEMORY, (mem_size << 10) - HIGH_MEMORY, E820_RAM);
+ add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
}
printk("BIOS-provided physical RAM map:\n");
print_memory_map(who);
@@ -1400,9 +1401,6 @@ static void __init init_centaur(struct cpuinfo_x86 *c)
wrmsr (0x1107, lo, hi);
set_bit(X86_FEATURE_CX8, &c->x86_capability);
- rdmsr (0x80000001, lo, hi);
- if (hi & (1<<31))
- set_bit(X86_FEATURE_3DNOW, &c->x86_capability);
get_model_name(c);
display_cacheinfo(c);
@@ -1796,6 +1794,13 @@ int __init x86_serial_nr_setup(char *s)
}
__setup("serialnumber", x86_serial_nr_setup);
+int __init x86_fxsr_setup(char * s)
+{
+ disable_x86_fxsr = 1;
+ return 1;
+}
+__setup("nofxsr", x86_fxsr_setup);
+
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(u32 flag)
@@ -1866,7 +1871,8 @@ static int __init id_and_try_enable_cpuid(struct cpuinfo_x86 *c)
/* Detect Cyrix with disabled CPUID */
if ( c->x86 == 4 && test_cyrix_52div() ) {
strcpy(c->x86_vendor_id, "CyrixInstead");
- }
+ c->x86_vendor = X86_VENDOR_CYRIX;
+ } else
/* Detect NexGen with old hypercode */
if ( deep_magic_nexgen_probe() ) {
@@ -2000,11 +2006,17 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
*/
/* TSC disabled? */
-#ifdef CONFIG_TSC
+#ifndef CONFIG_X86_TSC
if ( tsc_disable )
clear_bit(X86_FEATURE_TSC, &c->x86_capability);
#endif
+ /* FXSR disabled? */
+ if (disable_x86_fxsr) {
+ clear_bit(X86_FEATURE_FXSR, &c->x86_capability);
+ clear_bit(X86_FEATURE_XMM, &c->x86_capability);
+ }
+
/* Disable the PN if appropriate */
squash_the_stupid_serial_number(c);
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 2423a8b06..7df9c875c 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -26,8 +26,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 158d183b8..7c02813af 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/highmem.h>
#ifdef CONFIG_MCA
#include <linux/mca.h>
diff --git a/arch/i386/lib/mmx.c b/arch/i386/lib/mmx.c
index 4aca07e72..dfbf7ba8e 100644
--- a/arch/i386/lib/mmx.c
+++ b/arch/i386/lib/mmx.c
@@ -2,6 +2,8 @@
#include <linux/string.h>
#include <linux/sched.h>
+#include <asm/i387.h>
+
/*
* MMX 3DNow! library helper functions
*
@@ -26,13 +28,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
void *p=to;
int i= len >> 6; /* len/64 */
- if (!(current->flags & PF_USEDFPU))
- clts();
- else
- {
- __asm__ __volatile__ ( " fnsave %0; fwait\n"::"m"(current->thread.i387));
- current->flags &= ~PF_USEDFPU;
- }
+ kernel_fpu_begin();
__asm__ __volatile__ (
"1: prefetch (%0)\n" /* This set is 28 bytes */
@@ -88,20 +84,15 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
* Now do the tail of the block
*/
__memcpy(to, from, len&63);
- stts();
+ kernel_fpu_end();
return p;
}
static void fast_clear_page(void *page)
{
int i;
- if (!(current->flags & PF_USEDFPU))
- clts();
- else
- {
- __asm__ __volatile__ ( " fnsave %0; fwait\n"::"m"(current->thread.i387));
- current->flags &= ~PF_USEDFPU;
- }
+
+ kernel_fpu_begin();
__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
@@ -127,19 +118,14 @@ static void fast_clear_page(void *page)
__asm__ __volatile__ (
" sfence \n" : :
);
- stts();
+ kernel_fpu_end();
}
static void fast_copy_page(void *to, void *from)
{
int i;
- if (!(current->flags & PF_USEDFPU))
- clts();
- else
- {
- __asm__ __volatile__ ( " fnsave %0; fwait\n"::"m"(current->thread.i387));
- current->flags &= ~PF_USEDFPU;
- }
+
+ kernel_fpu_begin();
/* maybe the prefetch stuff can go before the expensive fnsave...
* but that is for later. -AV
@@ -199,7 +185,7 @@ static void fast_copy_page(void *to, void *from)
__asm__ __volatile__ (
" sfence \n" : :
);
- stts();
+ kernel_fpu_end();
}
/*
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index b2e5a1bfc..6414b508c 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -317,7 +317,7 @@ static void __init pagetable_init (void)
pgd_t *pgd, *pgd_base;
int i, j, k;
pmd_t *pmd;
- pte_t *pte;
+ pte_t *pte, *pte_base;
/*
* This can be zero as well - no problem, in that case we exit
@@ -366,11 +366,7 @@ static void __init pagetable_init (void)
continue;
}
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
-
- if (pte != pte_offset(pmd, 0))
- BUG();
+ pte_base = pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
@@ -378,6 +374,10 @@ static void __init pagetable_init (void)
break;
*pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
}
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
+ if (pte_base != pte_offset(pmd, 0))
+ BUG();
+
}
}
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index f7b857b4c..25007aa43 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1957,9 +1957,6 @@ put_rusage (struct rusage32 *ru, struct rusage *r)
return err;
}
-extern asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr,
- int options, struct rusage * ru);
-
asmlinkage long
sys32_wait4(__kernel_pid_t32 pid, unsigned int *stat_addr, int options,
struct rusage32 *ru)
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 9d754f9b8..e9c41780c 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -47,8 +47,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
const int frame_extra_sizes[16] = {
diff --git a/arch/mips/defconfig b/arch/mips/defconfig
index e17cb4e09..924a848c7 100644
--- a/arch/mips/defconfig
+++ b/arch/mips/defconfig
@@ -115,7 +115,6 @@ CONFIG_SYSCTL=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -249,6 +248,8 @@ CONFIG_FONT_8x16=y
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/defconfig-atlas b/arch/mips/defconfig-atlas
index 56a5cbd26..73d74c356 100644
--- a/arch/mips/defconfig-atlas
+++ b/arch/mips/defconfig-atlas
@@ -111,7 +111,6 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -377,6 +376,8 @@ CONFIG_RTC=y
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/defconfig-cp7000 b/arch/mips/defconfig-cp7000
index 1221ed1bf..60cb12ad0 100644
--- a/arch/mips/defconfig-cp7000
+++ b/arch/mips/defconfig-cp7000
@@ -113,7 +113,6 @@ CONFIG_SYSCTL=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -352,6 +351,8 @@ CONFIG_SERIAL_EXTENDED=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/defconfig-ddb5476 b/arch/mips/defconfig-ddb5476
index 2f947f254..3c54bca48 100644
--- a/arch/mips/defconfig-ddb5476
+++ b/arch/mips/defconfig-ddb5476
@@ -115,7 +115,6 @@ CONFIG_SYSCTL=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -415,6 +414,8 @@ CONFIG_PSMOUSE=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/defconfig-decstation b/arch/mips/defconfig-decstation
index ac2e8d9e4..d633b27a2 100644
--- a/arch/mips/defconfig-decstation
+++ b/arch/mips/defconfig-decstation
@@ -109,7 +109,6 @@ CONFIG_TC=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -243,6 +242,8 @@ CONFIG_RTC=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/defconfig-ev64120 b/arch/mips/defconfig-ev64120
index 1a24e68e5..78f51045a 100644
--- a/arch/mips/defconfig-ev64120
+++ b/arch/mips/defconfig-ev64120
@@ -117,7 +117,6 @@ CONFIG_SYSCTL=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -351,6 +350,8 @@ CONFIG_SERIAL=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/defconfig-ev96100 b/arch/mips/defconfig-ev96100
index c04d274b0..48b2bcd61 100644
--- a/arch/mips/defconfig-ev96100
+++ b/arch/mips/defconfig-ev96100
@@ -113,7 +113,6 @@ CONFIG_NET=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -342,6 +341,8 @@ CONFIG_SERIAL_CONSOLE=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/defconfig-ip22 b/arch/mips/defconfig-ip22
index e17cb4e09..924a848c7 100644
--- a/arch/mips/defconfig-ip22
+++ b/arch/mips/defconfig-ip22
@@ -115,7 +115,6 @@ CONFIG_SYSCTL=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -249,6 +248,8 @@ CONFIG_FONT_8x16=y
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/defconfig-malta b/arch/mips/defconfig-malta
index 43b031607..66125a703 100644
--- a/arch/mips/defconfig-malta
+++ b/arch/mips/defconfig-malta
@@ -110,7 +110,6 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -376,6 +375,8 @@ CONFIG_RTC=y
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/defconfig-rm200 b/arch/mips/defconfig-rm200
index 0964d3957..a04708b93 100644
--- a/arch/mips/defconfig-rm200
+++ b/arch/mips/defconfig-rm200
@@ -119,7 +119,6 @@ CONFIG_BLK_DEV_FD=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -266,6 +265,8 @@ CONFIG_RTC=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 022bb1f77..900e1004f 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -16,8 +16,6 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
-asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
extern asmlinkage void syscall_trace(void);
#undef DEBUG_SIG
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index a1e26db87..ff7c8d5a8 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -30,8 +30,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-extern asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
extern asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
extern asmlinkage int (*save_fp_context)(struct sigcontext *sc);
diff --git a/arch/mips64/defconfig b/arch/mips64/defconfig
index 1636cb6cb..8f8b780b2 100644
--- a/arch/mips64/defconfig
+++ b/arch/mips64/defconfig
@@ -91,7 +91,6 @@ CONFIG_PCI_NAMES=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -351,6 +350,8 @@ CONFIG_SERIAL_CONSOLE=y
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips64/defconfig-ip22 b/arch/mips64/defconfig-ip22
index d047a87c6..39518cb59 100644
--- a/arch/mips64/defconfig-ip22
+++ b/arch/mips64/defconfig-ip22
@@ -81,7 +81,6 @@ CONFIG_BINFMT_ELF=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -267,6 +266,8 @@ CONFIG_VT_CONSOLE=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips64/defconfig-ip27 b/arch/mips64/defconfig-ip27
index 1636cb6cb..8f8b780b2 100644
--- a/arch/mips64/defconfig-ip27
+++ b/arch/mips64/defconfig-ip27
@@ -91,7 +91,6 @@ CONFIG_PCI_NAMES=y
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
#
# Networking options
@@ -351,6 +350,8 @@ CONFIG_SERIAL_CONSOLE=y
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
diff --git a/arch/mips64/kernel/linux32.c b/arch/mips64/kernel/linux32.c
index 2e64aa465..f1a6084e5 100644
--- a/arch/mips64/kernel/linux32.c
+++ b/arch/mips64/kernel/linux32.c
@@ -585,9 +585,6 @@ put_rusage (struct rusage32 *ru, struct rusage *r)
return err;
}
-extern asmlinkage int sys_wait4(pid_t pid, unsigned int * stat_addr,
- int options, struct rusage * ru);
-
asmlinkage int
sys32_wait4(__kernel_pid_t32 pid, unsigned int * stat_addr, int options,
struct rusage32 * ru)
diff --git a/arch/mips64/kernel/signal.c b/arch/mips64/kernel/signal.c
index 15b2fc60b..f30df53a3 100644
--- a/arch/mips64/kernel/signal.c
+++ b/arch/mips64/kernel/signal.c
@@ -31,8 +31,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-extern asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
extern asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
extern asmlinkage int save_fp_context(struct sigcontext *sc);
extern asmlinkage int restore_fp_context(struct sigcontext *sc);
diff --git a/arch/mips64/kernel/signal32.c b/arch/mips64/kernel/signal32.c
index 548f271b5..6dad16160 100644
--- a/arch/mips64/kernel/signal32.c
+++ b/arch/mips64/kernel/signal32.c
@@ -30,8 +30,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-extern asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
extern asmlinkage int do_signal32(sigset_t *oldset, struct pt_regs *regs);
extern asmlinkage int save_fp_context(struct sigcontext *sc);
extern asmlinkage int restore_fp_context(struct sigcontext *sc);
diff --git a/arch/ppc/8260_io/enet.c b/arch/ppc/8260_io/enet.c
index e1ff3c092..77b0eb279 100644
--- a/arch/ppc/8260_io/enet.c
+++ b/arch/ppc/8260_io/enet.c
@@ -633,6 +633,9 @@ int __init scc_enet_init(void)
/* Allocate some private information.
*/
cep = (struct scc_enet_private *)kmalloc(sizeof(*cep), GFP_KERNEL);
+ if (cep == NULL)
+ return -ENOMEM;
+
__clear_user(cep,sizeof(*cep));
spin_lock_init(&cep->lock);
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index da3991ae3..c8c31bd42 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -1082,6 +1082,9 @@ int __init fec_enet_init(void)
*/
cep = (struct fcc_enet_private *)
kmalloc(sizeof(*cep), GFP_KERNEL);
+ if (cep == NULL)
+ return -ENOMEM;
+
__clear_user(cep,sizeof(*cep));
spin_lock_init(&cep->lock);
cep->fip = fip;
diff --git a/arch/ppc/8xx_io/enet.c b/arch/ppc/8xx_io/enet.c
index b77cf32da..01eb2758b 100644
--- a/arch/ppc/8xx_io/enet.c
+++ b/arch/ppc/8xx_io/enet.c
@@ -652,7 +652,9 @@ int __init scc_enet_init(void)
/* Allocate some private information.
*/
cep = (struct scc_enet_private *)kmalloc(sizeof(*cep), GFP_KERNEL);
- /*memset(cep, 0, sizeof(*cep));*/
+ if (cep == NULL)
+ return -ENOMEM;
+
__clear_user(cep,sizeof(*cep));
spin_lock_init(&cep->lock);
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c
index 659576e8c..6e5e9e686 100644
--- a/arch/ppc/8xx_io/fec.c
+++ b/arch/ppc/8xx_io/fec.c
@@ -1459,6 +1459,9 @@ int __init fec_enet_init(void)
/* Allocate some private information.
*/
fep = (struct fec_enet_private *)kmalloc(sizeof(*fep), GFP_KERNEL);
+ if (fep == NULL)
+ return -ENOMEM;
+
__clear_user(fep,sizeof(*fep));
/* Create an Ethernet device instance.
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index d4172ed4a..488766003 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -145,9 +145,6 @@ endif
clean_config:
rm -f .config arch/ppc/defconfig
-gemini_config: clean_config
- cp -f arch/ppc/configs/gemini_defconfig arch/ppc/defconfig
-
pmac_config: clean_config
cp -f arch/ppc/configs/pmac_defconfig arch/ppc/defconfig
diff --git a/arch/ppc/boot/Makefile b/arch/ppc/boot/Makefile
index 26961e8ea..32c2926a1 100644
--- a/arch/ppc/boot/Makefile
+++ b/arch/ppc/boot/Makefile
@@ -25,22 +25,12 @@ ZSZ = 0
IOFF = 0
ISZ = 0
-ifeq ($(CONFIG_ALL_PPC),y)
-CONFIG_PREP=y
-endif
-
ifeq ($(CONFIG_SMP),y)
TFTPIMAGE=/tftpboot/zImage.prep.smp$(MSIZE)
else
TFTPIMAGE=/tftpboot/zImage.prep$(MSIZE)
endif
-ifeq ($(CONFIG_SMP),y)
-TFTPSIMAGE=/tftpboot/sImage.smp
-else
-TFTPSIMAGE=/tftpboot/sImage
-endif
-
ifeq ($(CONFIG_PPC64BRIDGE),y)
MSIZE=.64
else
@@ -81,8 +71,8 @@ zvmlinux.initrd: zvmlinux
zvmlinux.initrd.tmp $@
rm zvmlinux.initrd.tmp
-zImage: zvmlinux mkprep sImage
-ifdef CONFIG_PREP
+zImage: zvmlinux mkprep
+ifdef CONFIG_ALL_PPC
./mkprep -pbp zvmlinux zImage
endif
ifdef CONFIG_APUS
@@ -90,13 +80,8 @@ ifdef CONFIG_APUS
gzip $(GZIP_FLAGS) vmapus
endif
-sImage: ../../../vmlinux
-ifdef CONFIG_GEMINI
- $(OBJCOPY) -I elf32-powerpc -O binary ../../../vmlinux sImage
-endif
-
zImage.initrd: zvmlinux.initrd mkprep
-ifdef CONFIG_PREP
+ifdef CONFIG_ALL_PPC
./mkprep -pbp zvmlinux.initrd zImage.initrd
endif
@@ -128,18 +113,15 @@ mkprep : mkprep.c
$(HOSTCC) -o mkprep mkprep.c
znetboot : zImage
-ifdef CONFIG_PREP
+ifdef CONFIG_ALL_PPC
cp zImage $(TFTPIMAGE)
endif
-ifdef CONFIG_GEMINI
- cp sImage $(TFTPSIMAGE)
-endif
znetboot.initrd : zImage.initrd
cp zImage.initrd $(TFTPIMAGE)
clean:
- rm -f vmlinux* zvmlinux* mkprep zImage* sImage*
+ rm -f vmlinux* zvmlinux* mkprep zImage*
fastdep:
$(TOPDIR)/scripts/mkdep *.[Sch] > .depend
diff --git a/arch/ppc/boot/vreset.c b/arch/ppc/boot/vreset.c
index c01361dc6..dfabd72e7 100644
--- a/arch/ppc/boot/vreset.c
+++ b/arch/ppc/boot/vreset.c
@@ -19,12 +19,16 @@
*/
#include "iso_font.h"
-#include <linux/delay.h>
extern char *vidmem;
extern int lines, cols;
-/* estimate for delay */
-unsigned long loops_per_sec = 50000000;;
+
+static void mdelay(int ms)
+{
+ for (; ms > 0; --ms)
+ udelay(1000);
+}
+
/*
* VGA Register
*/
diff --git a/arch/ppc/chrpboot/Makefile b/arch/ppc/chrpboot/Makefile
index f7ea3a3fa..9dbb7909d 100644
--- a/arch/ppc/chrpboot/Makefile
+++ b/arch/ppc/chrpboot/Makefile
@@ -23,11 +23,6 @@ LD_ARGS = -Ttext 0x00400000
OBJS = crt0.o start.o main.o misc.o ../coffboot/string.o ../coffboot/zlib.o image.o
LIBS = $(TOPDIR)/lib/lib.a
-ifeq ($(CONFIG_ALL_PPC),y)
-# yes, we want to build chrp stuff
-CONFIG_CHRP = y
-endif
-
ifeq ($(CONFIG_SMP),y)
TFTPIMAGE=/tftpboot/zImage.chrp.smp$(MSIZE)
else
@@ -37,10 +32,10 @@ endif
all: $(TOPDIR)/zImage
#
-# Only build anything here if we're configured for CHRP
+# Only build anything here if we're configured for ALL_PPC
# -- cort
#
-ifeq ($(CONFIG_CHRP),y)
+ifeq ($(CONFIG_ALL_PPC),y)
znetboot: zImage
cp zImage $(TFTPIMAGE)
@@ -96,6 +91,7 @@ vmlinux.coff.initrd:
clean:
rm -f piggyback note addnote $(OBJS) zImage
+ rm -f zImage.rs6k zImage.initrd zImage.initrd.rs6k
fastdep:
$(TOPDIR)/scripts/mkdep *.[Sch] > .depend
diff --git a/arch/ppc/coffboot/Makefile b/arch/ppc/coffboot/Makefile
index 494026e42..2835e1f89 100644
--- a/arch/ppc/coffboot/Makefile
+++ b/arch/ppc/coffboot/Makefile
@@ -20,18 +20,13 @@ else
MSIZE=
endif
-ifeq ($(CONFIG_ALL_PPC),y)
-# yes, we want to build pmac stuff
-CONFIG_PMAC = y
-endif
-
ifeq ($(CONFIG_SMP),y)
TFTPIMAGE=/tftpboot/zImage.pmac.smp$(MSIZE)
else
TFTPIMAGE=/tftpboot/zImage.pmac$(MSIZE)
endif
-ifeq ($(CONFIG_PMAC),y)
+ifeq ($(CONFIG_ALL_PPC),y)
chrpmain.o: chrpmain.c
$(CC) $(CFLAGS) -DSYSMAP_OFFSET=0 -DSYSMAP_SIZE=0 -c chrpmain.c
diff --git a/arch/ppc/coffboot/coffmain.c b/arch/ppc/coffboot/coffmain.c
index b76ba5976..49e769f22 100644
--- a/arch/ppc/coffboot/coffmain.c
+++ b/arch/ppc/coffboot/coffmain.c
@@ -22,11 +22,18 @@ void gunzip(void *, int, unsigned char *, int *);
#define get_32be(x) (*(unsigned *)(x))
#define RAM_START 0xc0000000
-#define PROG_START RAM_START
#define RAM_END (RAM_START + 0x800000) /* only 8M mapped with BATs */
+#define PROG_START RAM_START
+#define PROG_SIZE 0x00400000
+
+#define SCRATCH_SIZE (128 << 10)
+
char *avail_ram;
-char *end_avail;
+char *begin_avail, *end_avail;
+char *avail_high;
+unsigned int heap_use;
+unsigned int heap_max;
extern char _start[], _end[];
extern char image_data[];
@@ -34,6 +41,7 @@ extern int image_len;
extern char initrd_data[];
extern int initrd_len;
+char heap[SCRATCH_SIZE];
boot(int a1, int a2, void *prom)
{
@@ -58,16 +66,18 @@ boot(int a1, int a2, void *prom)
im = image_data;
len = image_len;
/* claim 3MB starting at 0 */
- claim(0, 3 << 20, 0);
+ claim(0, PROG_SIZE, 0);
dst = (void *) RAM_START;
if (im[0] == 0x1f && im[1] == 0x8b) {
- /* claim 512kB for scratch space */
- avail_ram = claim(0, 512 << 10, 0x10) + RAM_START;
- end_avail = avail_ram + (512 << 10);
- printf("avail_ram = %x\n", avail_ram);
+ /* claim some memory for scratch space */
+ begin_avail = avail_high = avail_ram = heap;
+ end_avail = heap + sizeof(heap);
+ printf("heap at 0x%x\n", avail_ram);
printf("gunzipping (0x%x <- 0x%x:0x%0x)...", dst, im, im+len);
- gunzip(dst, 3 << 20, im, &len);
+ gunzip(dst, PROG_SIZE, im, &len);
printf("done %u bytes\n", len);
+ printf("%u bytes of heap consumed, max in use %u\n",
+ avail_high - begin_avail, heap_max);
} else {
memmove(dst, im, len);
}
@@ -78,9 +88,6 @@ boot(int a1, int a2, void *prom)
sa = (unsigned long)PROG_START;
printf("start address = 0x%x\n", sa);
-#if 0
- pause();
-#endif
(*(void (*)())sa)(a1, a2, prom);
printf("returned?\n");
@@ -114,13 +121,33 @@ void make_bi_recs(unsigned long addr)
rec = (struct bi_record *)((unsigned long)rec + rec->size);
}
+struct memchunk {
+ unsigned int size;
+ struct memchunk *next;
+};
+
+static struct memchunk *freechunks;
+
void *zalloc(void *x, unsigned items, unsigned size)
{
- void *p = avail_ram;
+ void *p;
+ struct memchunk **mpp, *mp;
size *= items;
size = (size + 7) & -8;
+ heap_use += size;
+ if (heap_use > heap_max)
+ heap_max = heap_use;
+ for (mpp = &freechunks; (mp = *mpp) != 0; mpp = &mp->next) {
+ if (mp->size == size) {
+ *mpp = mp->next;
+ return mp;
+ }
+ }
+ p = avail_ram;
avail_ram += size;
+ if (avail_ram > avail_high)
+ avail_high = avail_ram;
if (avail_ram > end_avail) {
printf("oops... out of memory\n");
pause();
@@ -130,6 +157,17 @@ void *zalloc(void *x, unsigned items, unsigned size)
void zfree(void *x, void *addr, unsigned nb)
{
+ struct memchunk *mp = addr;
+
+ nb = (nb + 7) & -8;
+ heap_use -= nb;
+ if (avail_ram == addr + nb) {
+ avail_ram = addr;
+ return;
+ }
+ mp->size = nb;
+ mp->next = freechunks;
+ freechunks = mp;
}
#define HEAD_CRC 2
diff --git a/arch/ppc/config.in b/arch/ppc/config.in
index 34606f99a..3b4c2c141 100644
--- a/arch/ppc/config.in
+++ b/arch/ppc/config.in
@@ -24,21 +24,24 @@ mainmenu_option next_comment
comment 'Platform support'
define_bool CONFIG_PPC y
choice 'Processor Type' \
- "6xx/7xx/7400 CONFIG_6xx \
+ "6xx/7xx/74xx/8260 CONFIG_6xx \
4xx CONFIG_4xx \
POWER3 CONFIG_POWER3 \
POWER4 CONFIG_POWER4 \
- 8260 CONFIG_8260 \
8xx CONFIG_8xx" 6xx
+if [ "$CONFIG_6xx" = "y" ]; then
+ bool 'MPC8260 CPM Support' CONFIG_8260
+fi
+
if [ "$CONFIG_POWER3" = "y" -o "$CONFIG_POWER4" = "y" ]; then
define_bool CONFIG_PPC64BRIDGE y
define_bool CONFIG_ALL_PPC y
fi
-
+
if [ "$CONFIG_8260" = "y" ]; then
- define_bool CONFIG_6xx y
define_bool CONFIG_SERIAL_CONSOLE y
+ bool 'Support for EST8260' CONFIG_EST8260
fi
if [ "$CONFIG_4xx" = "y" ]; then
@@ -59,25 +62,32 @@ if [ "$CONFIG_8xx" = "y" ]; then
TQM860 CONFIG_TQM860 \
MBX CONFIG_MBX \
WinCept CONFIG_WINCEPT" RPX-Lite
+
+ if [ "$CONFIG_TQM8xxL" = "y" ]; then
+ bool 'FPS850 Mainboard' CONFIG_FPS850
+ fi
fi
-if [ "$CONFIG_6xx" = "y" ]; then
+
+if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ]; then
choice 'Machine Type' \
"PowerMac/PReP/MTX/CHRP CONFIG_ALL_PPC \
- Gemini CONFIG_GEMINI \
- EST8260 CONFIG_EST8260 \
APUS CONFIG_APUS" PowerMac/PReP/MTX/CHRP
fi
+if [ "$CONFIG_PPC64BRIDGE" != "y" ]; then
+ bool 'Workarounds for PPC601 bugs' CONFIG_PPC601_SYNC_FIX
+fi
+
if [ "$CONFIG_8xx" = "y" -o "$CONFIG_8260" = "y" ]; then
define_bool CONFIG_ALL_PPC n
fi
-if [ "$CONFIG_TQM8xxL" = "y" ]; then
- bool 'FPS850 Mainboard' CONFIG_FPS850
+bool 'Symmetric multi-processing support' CONFIG_SMP
+if [ "$CONFIG_SMP" = "y" ]; then
+ bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
fi
-bool 'Symmetric multi-processing support' CONFIG_SMP
-if [ "$CONFIG_6xx" = "y" ];then
+if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then
bool 'AltiVec Support' CONFIG_ALTIVEC
fi
@@ -106,14 +116,14 @@ define_bool CONFIG_MCA n
if [ "$CONFIG_APUS" = "y" -o "$CONFIG_4xx" = "y" -o \
"$CONFIG_8260" = "y" ]; then
- define_bool CONFIG_PCI n
+ define_bool CONFIG_PCI n
else
- if [ "$CONFIG_8xx" = "y" ]; then
- bool 'QSpan PCI' CONFIG_PCI_QSPAN
- define_bool CONFIG_PCI $CONFIG_PCI_QSPAN
- else
- define_bool CONFIG_PCI y
- fi
+ if [ "$CONFIG_8xx" = "y" ]; then
+ bool 'QSpan PCI' CONFIG_PCI_QSPAN
+ define_bool CONFIG_PCI $CONFIG_PCI_QSPAN
+ else
+ define_bool CONFIG_PCI y
+ fi
fi
bool 'Networking support' CONFIG_NET
@@ -123,40 +133,34 @@ bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
# only elf supported, a.out is not -- Cort
if [ "$CONFIG_PROC_FS" = "y" ]; then
- define_bool CONFIG_KCORE_ELF y
+ define_bool CONFIG_KCORE_ELF y
fi
define_bool CONFIG_BINFMT_ELF y
define_bool CONFIG_KERNEL_ELF y
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
source drivers/pci/Config.in
-source drivers/zorro/Config.in
bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
if [ "$CONFIG_HOTPLUG" = "y" ]; then
- source drivers/pcmcia/Config.in
+ source drivers/pcmcia/Config.in
else
- define_bool CONFIG_PCMCIA n
+ define_bool CONFIG_PCMCIA n
fi
source drivers/parport/Config.in
if [ "$CONFIG_4xx" != "y" -a "$CONFIG_8xx" != "y" ]; then
- bool 'Support for VGA Console' CONFIG_VGA_CONSOLE
- bool 'Support for frame buffer devices' CONFIG_FB
- if [ "$CONFIG_FB" = "y" ]; then
- bool 'Backward compatibility mode for Xpmac' CONFIG_FB_COMPAT_XPMAC
- fi
-
tristate 'Support for /dev/rtc' CONFIG_PPC_RTC
- bool 'Support for Open Firmware device tree in /proc' CONFIG_PROC_DEVICETREE
- bool 'Support for early boot text console (BootX only)' CONFIG_BOOTX_TEXT
- bool 'Support for Motorola Hot Swap' CONFIG_MOTOROLA_HOTSWAP
fi
-if [ "$CONFIG_PREP" = "y" -o "$CONFIG_ALL_PPC" = "y" ]; then
- bool 'PReP bootloader kernel arguments' CONFIG_CMDLINE_BOOL
+if [ "$CONFIG_ALL_PPC" = "y" ]; then
+ bool 'Support for Open Firmware device tree in /proc' CONFIG_PROC_DEVICETREE
+ bool 'Support for RTAS (RunTime Abstraction Services) in /proc' CONFIG_PPC_RTAS
+ bool 'Support for early boot text console (BootX or OpenFirmware only)' CONFIG_BOOTX_TEXT
+ bool 'Support for PReP Residual Data' CONFIG_PREP_RESIDUAL
+ bool 'Default bootloader kernel arguments' CONFIG_CMDLINE_BOOL
if [ "$CONFIG_CMDLINE_BOOL" = "y" ] ; then
string 'Initial kernel command string' CONFIG_CMDLINE "console=ttyS0,9600 console=tty0 root=/dev/sda2"
fi
@@ -181,6 +185,7 @@ if [ "$CONFIG_APUS" = "y" ]; then
fi
bool 'Use power LED as a heartbeat' CONFIG_HEARTBEAT
bool '/proc/hardware support' CONFIG_PROC_HARDWARE
+ source drivers/zorro/Config.in
fi
endmenu
@@ -254,7 +259,13 @@ endmenu
mainmenu_option next_comment
comment 'Console drivers'
-source drivers/video/Config.in
+if [ "$CONFIG_4xx" != "y" -a "$CONFIG_8xx" != "y" ]; then
+ bool 'Support for VGA Console' CONFIG_VGA_CONSOLE
+fi
+ source drivers/video/Config.in
+if [ "$CONFIG_FB" = "y" -a "$CONFIG_ALL_PPC" = "y" ]; then
+ bool 'Backward compatibility mode for Xpmac' CONFIG_FB_COMPAT_XPMAC
+fi
endmenu
source drivers/input/Config.in
@@ -267,29 +278,26 @@ if [ "$CONFIG_ALL_PPC" = "y" ]; then
bool 'Support for CUDA based PowerMacs' CONFIG_ADB_CUDA
bool 'Support for PMU based PowerMacs' CONFIG_ADB_PMU
if [ "$CONFIG_ADB_PMU" = "y" ]; then
- bool ' Power management support for PowerBooks' CONFIG_PMAC_PBOOK
- # made a separate option since backlight may end up beeing used
- # on non-powerbook machines (but only on PMU based ones AFAIK)
- bool ' Backlight control for LCD screens' CONFIG_PMAC_BACKLIGHT
+ bool ' Power management support for PowerBooks' CONFIG_PMAC_PBOOK
+ # made a separate option since backlight may end up beeing used
+ # on non-powerbook machines (but only on PMU based ones AFAIK)
+ bool ' Backlight control for LCD screens' CONFIG_PMAC_BACKLIGHT
fi
bool 'Support for PowerMac floppy' CONFIG_MAC_FLOPPY
tristate 'Support for PowerMac serial ports' CONFIG_MAC_SERIAL
if [ "$CONFIG_MAC_SERIAL" = "y" ]; then
- bool ' Support for console on serial port' CONFIG_SERIAL_CONSOLE
+ bool ' Support for console on serial port' CONFIG_SERIAL_CONSOLE
fi
bool 'Apple Desktop Bus (ADB) support' CONFIG_ADB
if [ "$CONFIG_ADB" = "y" ]; then
- bool ' Include MacIO (CHRP) ADB driver' CONFIG_ADB_MACIO
- fi
-fi
-if [ "$CONFIG_ADB" = "y" ]; then
- dep_bool ' Use input layer for ADB devices' CONFIG_INPUT_ADBHID $CONFIG_INPUT
- if [ "$CONFIG_INPUT_ADBHID" = "y" ]; then
- define_bool CONFIG_MAC_HID y
- bool ' Support for ADB raw keycodes' CONFIG_MAC_ADBKEYCODES
- bool ' Support for mouse button 2+3 emulation' CONFIG_MAC_EMUMOUSEBTN
- else
- bool ' Support for ADB keyboard (old driver)' CONFIG_ADB_KEYBOARD
+ bool ' Include MacIO (CHRP) ADB driver' CONFIG_ADB_MACIO
+ dep_bool ' Use input layer for ADB devices' CONFIG_INPUT_ADBHID $CONFIG_INPUT
+ if [ "$CONFIG_INPUT_ADBHID" = "y" ]; then
+ bool ' Support for ADB raw keycodes' CONFIG_MAC_ADBKEYCODES
+ bool ' Support for mouse button 2+3 emulation' CONFIG_MAC_EMUMOUSEBTN
+ else
+ bool ' Support for ADB keyboard (old driver)' CONFIG_ADB_KEYBOARD
+ fi
fi
fi
endmenu
diff --git a/arch/ppc/configs/apus_defconfig b/arch/ppc/configs/apus_defconfig
index ed930fff2..69e25a9b3 100644
--- a/arch/ppc/configs/apus_defconfig
+++ b/arch/ppc/configs/apus_defconfig
@@ -11,9 +11,7 @@ CONFIG_EXPERIMENTAL=y
#
# Loadable module support
#
-CONFIG_MODULES=y
-# CONFIG_MODVERSIONS is not set
-CONFIG_KMOD=y
+# CONFIG_MODULES is not set
#
# Platform support
@@ -23,14 +21,14 @@ CONFIG_6xx=y
# CONFIG_4xx is not set
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-# CONFIG_8260 is not set
# CONFIG_8xx is not set
-CONFIG_ALL_PPC=y
-# CONFIG_GEMINI is not set
-# CONFIG_EST8260 is not set
-# CONFIG_APUS is not set
+# CONFIG_8260 is not set
+# CONFIG_ALL_PPC is not set
+CONFIG_APUS=y
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_SMP is not set
# CONFIG_ALTIVEC is not set
+CONFIG_MACH_SPECIFIC=y
#
# General setup
@@ -38,8 +36,10 @@ CONFIG_ALL_PPC=y
# CONFIG_HIGHMEM is not set
# CONFIG_MOL is not set
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
-CONFIG_PCI=y
+# CONFIG_MCA is not set
+# CONFIG_PCI is not set
CONFIG_NET=y
CONFIG_SYSCTL=y
CONFIG_SYSVIPC=y
@@ -47,23 +47,35 @@ CONFIG_SYSVIPC=y
CONFIG_KCORE_ELF=y
CONFIG_BINFMT_ELF=y
CONFIG_KERNEL_ELF=y
-CONFIG_BINFMT_MISC=m
-# CONFIG_PCI_NAMES is not set
-# CONFIG_HOTPLUG is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_HOTPLUG=y
+
+#
+# PCMCIA/CardBus support
+#
# CONFIG_PCMCIA is not set
#
# Parallel port support
#
# CONFIG_PARPORT is not set
-# CONFIG_VGA_CONSOLE is not set
+CONFIG_PPC_RTC=y
+CONFIG_FB_CONSOLE=y
+CONFIG_AMIGA=y
+CONFIG_ZORRO=y
+CONFIG_AMIGAMOUSE=y
+CONFIG_ABSTRACT_CONSOLE=y
CONFIG_FB=y
-CONFIG_FB_COMPAT_XPMAC=y
-# CONFIG_PPC_RTC is not set
-CONFIG_PROC_DEVICETREE=y
-# CONFIG_BOOTX_TEXT is not set
-# CONFIG_MOTOROLA_HOTSWAP is not set
-# CONFIG_CMDLINE_BOOL is not set
+CONFIG_AMIGA_BUILTIN_SERIAL=y
+# CONFIG_M68K_PRINTER is not set
+CONFIG_GVPIOEXT=y
+# CONFIG_GVPIOEXT_LP is not set
+CONFIG_GVPIOEXT_PLIP=y
+CONFIG_MULTIFACE_III_TTY=y
+# CONFIG_SERIAL_CONSOLE is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_ZORRO_NAMES=y
#
# Memory Technology Devices (MTD)
@@ -80,12 +92,14 @@ CONFIG_PROC_DEVICETREE=y
# Block devices
#
# CONFIG_BLK_DEV_FD is not set
+CONFIG_AMIGA_FLOPPY=y
+CONFIG_AMIGA_Z2RAM=y
# CONFIG_BLK_DEV_XD is not set
# CONFIG_PARIDE is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_NBD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=4096
@@ -106,21 +120,17 @@ CONFIG_BLK_DEV_INITRD=y
#
# Networking options
#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_NETLINK=y
-# CONFIG_RTNETLINK is not set
-# CONFIG_NETLINK_DEV is not set
+# CONFIG_PACKET is not set
+# CONFIG_NETLINK is not set
# CONFIG_NETFILTER is not set
# CONFIG_FILTER is not set
CONFIG_UNIX=y
CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
+# CONFIG_IP_MULTICAST is not set
# CONFIG_IP_ADVANCED_ROUTER is not set
# CONFIG_IP_PNP is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_IPV6 is not set
@@ -131,7 +141,7 @@ CONFIG_IP_MULTICAST=y
#
#
# CONFIG_IPX is not set
-CONFIG_ATALK=m
+# CONFIG_ATALK is not set
# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
# CONFIG_X25 is not set
@@ -151,9 +161,48 @@ CONFIG_ATALK=m
#
# ATA/IDE/MFM/RLL support
#
-# CONFIG_IDE is not set
-# CONFIG_BLK_DEV_IDE_MODES is not set
+CONFIG_IDE=y
+
+#
+# IDE, ATA and ATAPI Block devices
+#
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_HD_IDE is not set
# CONFIG_BLK_DEV_HD is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDEDISK_VENDOR is not set
+# CONFIG_BLK_DEV_IDEDISK_FUJITSU is not set
+# CONFIG_BLK_DEV_IDEDISK_IBM is not set
+# CONFIG_BLK_DEV_IDEDISK_MAXTOR is not set
+# CONFIG_BLK_DEV_IDEDISK_QUANTUM is not set
+# CONFIG_BLK_DEV_IDEDISK_SEAGATE is not set
+# CONFIG_BLK_DEV_IDEDISK_WD is not set
+# CONFIG_BLK_DEV_COMMERIAL is not set
+# CONFIG_BLK_DEV_TIVO is not set
+# CONFIG_BLK_DEV_IDECS is not set
+CONFIG_BLK_DEV_IDECD=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_BLK_DEV_CMD640_ENHANCED is not set
+# CONFIG_BLK_DEV_ISAPNP is not set
+CONFIG_BLK_DEV_GAYLE=y
+CONFIG_BLK_DEV_IDEDOUBLER=y
+CONFIG_BLK_DEV_BUDDHA=y
+# CONFIG_IDE_CHIPSETS is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_DMA_NONPCI is not set
+# CONFIG_BLK_DEV_IDE_MODES is not set
#
# SCSI support
@@ -165,7 +214,8 @@ CONFIG_SCSI=y
#
CONFIG_BLK_DEV_SD=y
CONFIG_SD_EXTRA_DEVS=40
-CONFIG_CHR_DEV_ST=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SR_EXTRA_DEVS=2
@@ -176,29 +226,23 @@ CONFIG_SR_EXTRA_DEVS=2
#
# CONFIG_SCSI_DEBUG_QUEUES is not set
# CONFIG_SCSI_MULTI_LUN is not set
-CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
#
# SCSI low-level drivers
#
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_7000FASST is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AHA152X is not set
# CONFIG_SCSI_AHA1542 is not set
# CONFIG_SCSI_AHA1740 is not set
-CONFIG_SCSI_AIC7XXX=y
-# CONFIG_AIC7XXX_TCQ_ON_BY_DEFAULT is not set
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
-CONFIG_AIC7XXX_PROC_STATS=y
-CONFIG_AIC7XXX_RESET_DELAY=15
+# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_IN2000 is not set
# CONFIG_SCSI_AM53C974 is not set
# CONFIG_SCSI_MEGARAID is not set
# CONFIG_SCSI_BUSLOGIC is not set
-# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_DTC3280 is not set
# CONFIG_SCSI_EATA is not set
@@ -211,30 +255,18 @@ CONFIG_AIC7XXX_RESET_DELAY=15
# CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_NCR53C406A is not set
# CONFIG_SCSI_NCR53C7xx is not set
-# CONFIG_SCSI_NCR53C8XX is not set
-# CONFIG_SCSI_SYM53C8XX is not set
# CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_SCSI_PSI240I is not set
# CONFIG_SCSI_QLOGIC_FAS is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
-# CONFIG_SCSI_QLOGIC_FC is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_SIM710 is not set
# CONFIG_SCSI_SYM53C416 is not set
-# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_T128 is not set
# CONFIG_SCSI_U14_34F is not set
# CONFIG_SCSI_DEBUG is not set
-CONFIG_SCSI_MESH=y
-CONFIG_SCSI_MESH_SYNC_RATE=5
-CONFIG_SCSI_MAC53C94=y
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
+# CONFIG_SCSI_MESH is not set
+# CONFIG_SCSI_MAC53C94 is not set
#
# Network device support
@@ -245,34 +277,30 @@ CONFIG_NETDEVICES=y
# ARCnet devices
#
# CONFIG_ARCNET is not set
-
-#
-# Appletalk devices
-#
-# CONFIG_APPLETALK is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
-# CONFIG_ETHERTAP is not set
# CONFIG_NET_SB1000 is not set
#
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
-CONFIG_MACE=y
-CONFIG_BMAC=y
+# CONFIG_MACE is not set
+# CONFIG_BMAC is not set
# CONFIG_GMAC is not set
-# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
+CONFIG_ARIADNE=y
+# CONFIG_NE2K_ZORRO is not set
+CONFIG_A2065=y
+CONFIG_HYDRA=y
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
# CONFIG_NET_VENDOR_SMC is not set
# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
# CONFIG_NET_PCI is not set
# CONFIG_NET_POCKET is not set
@@ -287,13 +315,16 @@ CONFIG_BMAC=y
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
CONFIG_PPP=y
-# CONFIG_PPP_MULTILINK is not set
-# CONFIG_PPP_ASYNC is not set
-# CONFIG_PPP_SYNC_TTY is not set
-# CONFIG_PPP_DEFLATE is not set
-# CONFIG_PPP_BSDCOMP is not set
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
# CONFIG_PPPOE is not set
-# CONFIG_SLIP is not set
+CONFIG_SLIP=y
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+# CONFIG_SLIP_MODE_SLIP6 is not set
#
# Wireless LAN (non-hamradio)
@@ -336,44 +367,47 @@ CONFIG_PPP=y
#
# Console drivers
#
+# CONFIG_VGA_CONSOLE is not set
#
# Frame-buffer support
#
CONFIG_FB=y
CONFIG_DUMMY_CONSOLE=y
-# CONFIG_FB_RIVA is not set
-# CONFIG_FB_CLGEN is not set
-# CONFIG_FB_PM2 is not set
+CONFIG_FB_CLGEN=y
+CONFIG_FB_PM2=y
+CONFIG_FB_PM2_CVPPC=y
# CONFIG_FB_CYBER2000 is not set
-CONFIG_FB_OF=y
-CONFIG_FB_CONTROL=y
-CONFIG_FB_PLATINUM=y
-CONFIG_FB_VALKYRIE=y
-CONFIG_FB_CT65550=y
-CONFIG_FB_IMSTT=y
+CONFIG_FB_AMIGA=y
+CONFIG_FB_AMIGA_OCS=y
+CONFIG_FB_AMIGA_ECS=y
+CONFIG_FB_AMIGA_AGA=y
+CONFIG_FB_CYBER=y
+CONFIG_FB_VIRGE=y
+CONFIG_FB_RETINAZ3=y
+# CONFIG_FB_FM2 is not set
+# CONFIG_FB_OF is not set
+# CONFIG_FB_CONTROL is not set
+# CONFIG_FB_PLATINUM is not set
+# CONFIG_FB_VALKYRIE is not set
+# CONFIG_FB_CT65550 is not set
+# CONFIG_FB_IMSTT is not set
# CONFIG_FB_S3TRIO is not set
# CONFIG_FB_VGA16 is not set
-# CONFIG_FB_MATROX is not set
-CONFIG_FB_ATY=y
-# CONFIG_FB_ATY128 is not set
-# CONFIG_FB_3DFX is not set
-# CONFIG_FB_SIS is not set
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FBCON_ADVANCED is not set
+CONFIG_FBCON_MFB=y
CONFIG_FBCON_CFB8=y
CONFIG_FBCON_CFB16=y
CONFIG_FBCON_CFB24=y
CONFIG_FBCON_CFB32=y
-# CONFIG_FBCON_FONTWIDTH8_ONLY is not set
-CONFIG_FBCON_FONTS=y
-# CONFIG_FONT_8x8 is not set
+CONFIG_FBCON_AFB=y
+CONFIG_FBCON_ILBM=y
+CONFIG_FBCON_FONTWIDTH8_ONLY=y
+# CONFIG_FBCON_FONTS is not set
+CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
-CONFIG_FONT_SUN8x16=y
-CONFIG_FONT_SUN12x22=y
-# CONFIG_FONT_6x11 is not set
-# CONFIG_FONT_PEARL_8x8 is not set
-# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_PEARL_8x8=y
#
# Input core support
@@ -383,23 +417,17 @@ CONFIG_FONT_SUN12x22=y
#
# Macintosh device drivers
#
-# CONFIG_ADB_CUDA is not set
-# CONFIG_ADB_PMU is not set
-CONFIG_MAC_FLOPPY=y
-CONFIG_MAC_SERIAL=y
-# CONFIG_SERIAL_CONSOLE is not set
-# CONFIG_ADB is not set
#
# Character devices
#
CONFIG_VT=y
CONFIG_VT_CONSOLE=y
-CONFIG_SERIAL=m
+CONFIG_SERIAL=y
+# CONFIG_SERIAL_CONSOLE is not set
# CONFIG_SERIAL_EXTENDED is not set
# CONFIG_SERIAL_NONSTANDARD is not set
-CONFIG_UNIX98_PTYS=y
-CONFIG_UNIX98_PTY_COUNT=256
+# CONFIG_UNIX98_PTYS is not set
#
# I2C support
@@ -427,7 +455,7 @@ CONFIG_UNIX98_PTY_COUNT=256
#
# CONFIG_WATCHDOG is not set
# CONFIG_INTEL_RNG is not set
-CONFIG_NVRAM=y
+# CONFIG_NVRAM is not set
# CONFIG_RTC is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
@@ -449,24 +477,24 @@ CONFIG_NVRAM=y
# File systems
#
# CONFIG_QUOTA is not set
-CONFIG_AUTOFS_FS=y
+# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
-# CONFIG_AFFS_FS is not set
+CONFIG_AFFS_FS=y
CONFIG_HFS_FS=y
# CONFIG_BFS_FS is not set
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
# CONFIG_UMSDOS_FS is not set
-CONFIG_VFAT_FS=m
+CONFIG_VFAT_FS=y
# CONFIG_EFS_FS is not set
# CONFIG_JFFS_FS is not set
# CONFIG_CRAMFS is not set
# CONFIG_RAMFS is not set
CONFIG_ISO9660_FS=y
# CONFIG_JOLIET is not set
-# CONFIG_MINIX_FS is not set
+CONFIG_MINIX_FS=y
# CONFIG_NTFS_FS is not set
# CONFIG_NTFS_RW is not set
# CONFIG_HPFS_FS is not set
@@ -474,7 +502,7 @@ CONFIG_PROC_FS=y
# CONFIG_DEVFS_FS is not set
# CONFIG_DEVFS_MOUNT is not set
# CONFIG_DEVFS_DEBUG is not set
-CONFIG_DEVPTS_FS=y
+# CONFIG_DEVPTS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_QNX4FS_RW is not set
# CONFIG_ROMFS_FS is not set
@@ -493,7 +521,7 @@ CONFIG_EXT2_FS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
# CONFIG_ROOT_NFS is not set
-CONFIG_NFSD=y
+# CONFIG_NFSD is not set
# CONFIG_NFSD_V3 is not set
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
@@ -505,8 +533,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -514,14 +540,15 @@ CONFIG_LOCKD=y
# Partition Types
#
# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+# CONFIG_SMB_NLS is not set
CONFIG_NLS=y
#
# Native Language Support
#
CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
# CONFIG_NLS_CODEPAGE_850 is not set
@@ -541,7 +568,7 @@ CONFIG_NLS_CODEPAGE_437=y
# CONFIG_NLS_CODEPAGE_936 is not set
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
@@ -558,23 +585,7 @@ CONFIG_NLS_CODEPAGE_437=y
#
# Sound
#
-CONFIG_SOUND=y
-# CONFIG_DMASOUND_AWACS is not set
-# CONFIG_SOUND_CMPCI is not set
-# CONFIG_SOUND_EMU10K1 is not set
-# CONFIG_SOUND_FUSION is not set
-# CONFIG_SOUND_CS4281 is not set
-# CONFIG_SOUND_ES1370 is not set
-# CONFIG_SOUND_ES1371 is not set
-# CONFIG_SOUND_ESSSOLO1 is not set
-# CONFIG_SOUND_MAESTRO is not set
-# CONFIG_SOUND_SONICVIBES is not set
-# CONFIG_SOUND_TRIDENT is not set
-# CONFIG_SOUND_MSNDCLAS is not set
-# CONFIG_SOUND_MSNDPIN is not set
-# CONFIG_SOUND_VIA82CXXX is not set
-# CONFIG_SOUND_OSS is not set
-# CONFIG_SOUND_TVMIXER is not set
+# CONFIG_SOUND is not set
#
# USB support
diff --git a/arch/ppc/configs/bseip_defconfig b/arch/ppc/configs/bseip_defconfig
index f0ff611f4..82b7174bd 100644
--- a/arch/ppc/configs/bseip_defconfig
+++ b/arch/ppc/configs/bseip_defconfig
@@ -21,7 +21,6 @@ CONFIG_PPC=y
# CONFIG_4xx is not set
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-# CONFIG_8260 is not set
CONFIG_8xx=y
CONFIG_SERIAL_CONSOLE=y
# CONFIG_RPXLITE is not set
@@ -32,6 +31,7 @@ CONFIG_BSEIP=y
# CONFIG_TQM860 is not set
# CONFIG_MBX is not set
# CONFIG_WINCEPT is not set
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_ALL_PPC is not set
# CONFIG_SMP is not set
CONFIG_MACH_SPECIFIC=y
@@ -43,7 +43,9 @@ CONFIG_MATH_EMULATION=y
# CONFIG_HIGHMEM is not set
# CONFIG_MOL is not set
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
# CONFIG_PCI_QSPAN is not set
# CONFIG_PCI is not set
CONFIG_NET=y
@@ -182,7 +184,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_MACE is not set
# CONFIG_BMAC is not set
# CONFIG_GMAC is not set
-# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
@@ -190,7 +191,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
# CONFIG_NET_PCI is not set
# CONFIG_NET_POCKET is not set
@@ -377,8 +377,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -395,6 +393,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
+# CONFIG_SMB_NLS is not set
# CONFIG_NLS is not set
#
diff --git a/arch/ppc/configs/common_defconfig b/arch/ppc/configs/common_defconfig
index 5574ecbdb..210887de7 100644
--- a/arch/ppc/configs/common_defconfig
+++ b/arch/ppc/configs/common_defconfig
@@ -23,12 +23,11 @@ CONFIG_6xx=y
# CONFIG_4xx is not set
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-# CONFIG_8260 is not set
# CONFIG_8xx is not set
+# CONFIG_8260 is not set
CONFIG_ALL_PPC=y
-# CONFIG_GEMINI is not set
-# CONFIG_EST8260 is not set
# CONFIG_APUS is not set
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_SMP is not set
CONFIG_ALTIVEC=y
@@ -36,9 +35,11 @@ CONFIG_ALTIVEC=y
# General setup
#
# CONFIG_HIGHMEM is not set
-# CONFIG_MOL is not set
+CONFIG_MOL=y
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_SYSCTL=y
@@ -49,21 +50,24 @@ CONFIG_BINFMT_ELF=y
CONFIG_KERNEL_ELF=y
CONFIG_BINFMT_MISC=m
CONFIG_PCI_NAMES=y
-# CONFIG_HOTPLUG is not set
+CONFIG_HOTPLUG=y
+
+#
+# PCMCIA/CardBus support
+#
# CONFIG_PCMCIA is not set
#
# Parallel port support
#
# CONFIG_PARPORT is not set
-CONFIG_VGA_CONSOLE=y
-CONFIG_FB=y
-CONFIG_FB_COMPAT_XPMAC=y
CONFIG_PPC_RTC=y
CONFIG_PROC_DEVICETREE=y
+CONFIG_PPC_RTAS=y
CONFIG_BOOTX_TEXT=y
-# CONFIG_MOTOROLA_HOTSWAP is not set
-# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PREP_RESIDUAL=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,9600 console=tty0 root=/dev/sda2"
#
# Memory Technology Devices (MTD)
@@ -79,7 +83,7 @@ CONFIG_BOOTX_TEXT=y
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
+CONFIG_BLK_DEV_FD=m
# CONFIG_BLK_DEV_XD is not set
# CONFIG_PARIDE is not set
# CONFIG_BLK_CPQ_DA is not set
@@ -111,7 +115,8 @@ CONFIG_PACKET=y
CONFIG_NETLINK=y
# CONFIG_RTNETLINK is not set
# CONFIG_NETLINK_DEV is not set
-# CONFIG_NETFILTER is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
# CONFIG_FILTER is not set
CONFIG_UNIX=y
CONFIG_INET=y
@@ -123,6 +128,34 @@ CONFIG_IP_MULTICAST=y
# CONFIG_IP_MROUTE is not set
# CONFIG_INET_ECN is not set
CONFIG_SYN_COOKIES=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_UNCLEAN=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_MIRROR=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_NAT_NEEDED=y
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
# CONFIG_IPV6 is not set
# CONFIG_KHTTPD is not set
# CONFIG_ATM is not set
@@ -239,6 +272,7 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SD_EXTRA_DEVS=40
CONFIG_CHR_DEV_ST=y
+# CONFIG_CHR_DEV_OSST is not set
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SR_EXTRA_DEVS=2
@@ -344,7 +378,6 @@ CONFIG_NET_ETHERNET=y
CONFIG_MACE=y
CONFIG_BMAC=y
CONFIG_GMAC=y
-# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
@@ -357,27 +390,28 @@ CONFIG_GMAC=y
CONFIG_NET_PCI=y
CONFIG_PCNET32=y
# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_AC3200 is not set
# CONFIG_APRICOT is not set
# CONFIG_CS89x0 is not set
-CONFIG_DE4X5=y
-# CONFIG_TULIP is not set
+CONFIG_TULIP=y
+CONFIG_DE4X5=m
# CONFIG_DGRS is not set
# CONFIG_DM9102 is not set
# CONFIG_EEPRO100 is not set
+# CONFIG_EEPRO100_PM is not set
# CONFIG_LNE390 is not set
# CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set
# CONFIG_NE3210 is not set
# CONFIG_ES3210 is not set
-# CONFIG_RTL8129 is not set
# CONFIG_8139TOO is not set
+# CONFIG_RTL8129 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_WINBOND_840 is not set
+# CONFIG_HAPPYMEAL is not set
# CONFIG_NET_POCKET is not set
#
@@ -439,6 +473,7 @@ CONFIG_PPP_DEFLATE=y
#
# Console drivers
#
+CONFIG_VGA_CONSOLE=y
#
# Frame-buffer support
@@ -460,7 +495,8 @@ CONFIG_FB_IMSTT=y
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX_MILLENIUM=y
CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G100=y
+# CONFIG_FB_MATROX_G100 is not set
+# CONFIG_FB_MATROX_G450 is not set
# CONFIG_FB_MATROX_MULTIHEAD is not set
CONFIG_FB_ATY=y
CONFIG_FB_ATY128=y
@@ -481,6 +517,7 @@ CONFIG_FONT_SUN12x22=y
# CONFIG_FONT_6x11 is not set
# CONFIG_FONT_PEARL_8x8 is not set
# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FB_COMPAT_XPMAC is not set
#
# Input core support
@@ -498,15 +535,13 @@ CONFIG_INPUT_EVDEV=y
#
CONFIG_ADB_CUDA=y
CONFIG_ADB_PMU=y
-CONFIG_PMAC_PBOOK=y
-CONFIG_PMAC_BACKLIGHT=y
-CONFIG_MAC_FLOPPY=y
-CONFIG_MAC_SERIAL=y
-# CONFIG_SERIAL_CONSOLE is not set
+# CONFIG_PMAC_PBOOK is not set
+# CONFIG_PMAC_BACKLIGHT is not set
+# CONFIG_MAC_FLOPPY is not set
+CONFIG_MAC_SERIAL=m
CONFIG_ADB=y
CONFIG_ADB_MACIO=y
CONFIG_INPUT_ADBHID=y
-CONFIG_MAC_HID=y
CONFIG_MAC_ADBKEYCODES=y
CONFIG_MAC_EMUMOUSEBTN=y
@@ -575,17 +610,17 @@ CONFIG_NVRAM=y
# File systems
#
# CONFIG_QUOTA is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
+CONFIG_HFS_FS=m
# CONFIG_BFS_FS is not set
-CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
# CONFIG_UMSDOS_FS is not set
-CONFIG_VFAT_FS=y
+CONFIG_VFAT_FS=m
# CONFIG_EFS_FS is not set
# CONFIG_JFFS_FS is not set
# CONFIG_CRAMFS is not set
@@ -631,8 +666,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -652,6 +685,7 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
+# CONFIG_SMB_NLS is not set
CONFIG_NLS=y
#
@@ -678,7 +712,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_936 is not set
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ISO8859_1=m
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
@@ -695,9 +729,9 @@ CONFIG_NLS_DEFAULT="iso8859-1"
#
# Sound
#
-CONFIG_SOUND=y
-CONFIG_DMASOUND_AWACS=y
-CONFIG_DMASOUND=y
+CONFIG_SOUND=m
+CONFIG_DMASOUND_AWACS=m
+CONFIG_DMASOUND=m
# CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set
# CONFIG_SOUND_FUSION is not set
@@ -711,43 +745,14 @@ CONFIG_DMASOUND=y
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set
-CONFIG_SOUND_OSS=y
-# CONFIG_SOUND_TRACEINIT is not set
-# CONFIG_SOUND_DMAP is not set
-# CONFIG_SOUND_AD1816 is not set
-# CONFIG_SOUND_SGALAXY is not set
-# CONFIG_SOUND_ADLIB is not set
-# CONFIG_SOUND_ACI_MIXER is not set
-CONFIG_SOUND_CS4232=m
-# CONFIG_SOUND_SSCAPE is not set
-# CONFIG_SOUND_GUS is not set
-# CONFIG_SOUND_ICH is not set
-# CONFIG_SOUND_VMIDI is not set
-# CONFIG_SOUND_TRIX is not set
-# CONFIG_SOUND_MSS is not set
-# CONFIG_SOUND_MPU401 is not set
-# CONFIG_SOUND_NM256 is not set
-# CONFIG_SOUND_MAD16 is not set
-# CONFIG_SOUND_PAS is not set
-# CONFIG_PAS_JOYSTICK is not set
-# CONFIG_SOUND_PSS is not set
-# CONFIG_SOUND_SB is not set
-# CONFIG_SOUND_AWE32_SYNTH is not set
-# CONFIG_SOUND_WAVEFRONT is not set
-# CONFIG_SOUND_MAUI is not set
-# CONFIG_SOUND_YM3812 is not set
-# CONFIG_SOUND_OPL3SA1 is not set
-# CONFIG_SOUND_OPL3SA2 is not set
-# CONFIG_SOUND_YMPCI is not set
-# CONFIG_SOUND_UART6850 is not set
-# CONFIG_SOUND_AEDSP16 is not set
+# CONFIG_SOUND_OSS is not set
# CONFIG_SOUND_TVMIXER is not set
#
# USB support
#
CONFIG_USB=y
-CONFIG_USB_DEBUG=y
+# CONFIG_USB_DEBUG is not set
#
# Miscellaneous USB options
@@ -763,33 +768,69 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_OHCI=y
#
-# USB Devices
+# USB Device Class drivers
#
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_BLUETOOTH is not set
+# CONFIG_USB_STORAGE is not set
+CONFIG_USB_ACM=m
# CONFIG_USB_PRINTER is not set
+
+#
+# USB Human Interface Devices (HID)
+#
+CONFIG_USB_HID=y
+# CONFIG_USB_WACOM is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_DC2XX is not set
+# CONFIG_USB_MDC800 is not set
# CONFIG_USB_SCANNER is not set
# CONFIG_USB_MICROTEK is not set
-# CONFIG_USB_AUDIO is not set
-# CONFIG_USB_ACM is not set
-# CONFIG_USB_SERIAL is not set
+
+#
+# USB Multimedia devices
+#
# CONFIG_USB_IBMCAM is not set
# CONFIG_USB_OV511 is not set
-# CONFIG_USB_DC2XX is not set
-# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_STORAGE is not set
-# CONFIG_USB_USS720 is not set
+# CONFIG_USB_DSBR is not set
# CONFIG_USB_DABUSB is not set
+
+#
+# USB Network adaptors
+#
# CONFIG_USB_PLUSB is not set
# CONFIG_USB_PEGASUS is not set
-# CONFIG_USB_RIO500 is not set
-# CONFIG_USB_DSBR is not set
-# CONFIG_USB_BLUETOOTH is not set
# CONFIG_USB_NET1080 is not set
#
-# USB Human Interface Devices (HID)
+# USB port drivers
#
-CONFIG_USB_HID=y
-# CONFIG_USB_WACOM is not set
+# CONFIG_USB_USS720 is not set
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_SERIAL_DEBUG is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+CONFIG_USB_SERIAL_VISOR=m
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+
+#
+# USB misc drivers
+#
+# CONFIG_USB_RIO500 is not set
#
# Kernel hacking
diff --git a/arch/ppc/configs/est8260_defconfig b/arch/ppc/configs/est8260_defconfig
index afb972c03..a4a44f0ab 100644
--- a/arch/ppc/configs/est8260_defconfig
+++ b/arch/ppc/configs/est8260_defconfig
@@ -17,21 +17,17 @@ CONFIG_EXPERIMENTAL=y
# Platform support
#
CONFIG_PPC=y
-# CONFIG_6xx is not set
+CONFIG_6xx=y
# CONFIG_4xx is not set
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-CONFIG_8260=y
# CONFIG_8xx is not set
-CONFIG_6xx=y
+CONFIG_8260=y
CONFIG_SERIAL_CONSOLE=y
-# CONFIG_ALL_PPC is not set
-# CONFIG_GEMINI is not set
CONFIG_EST8260=y
-# CONFIG_APUS is not set
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_ALL_PPC is not set
# CONFIG_SMP is not set
-# CONFIG_ALTIVEC is not set
CONFIG_MACH_SPECIFIC=y
#
@@ -40,7 +36,9 @@ CONFIG_MACH_SPECIFIC=y
# CONFIG_HIGHMEM is not set
# CONFIG_MOL is not set
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
# CONFIG_PCI is not set
CONFIG_NET=y
CONFIG_SYSCTL=y
@@ -57,12 +55,7 @@ CONFIG_KERNEL_ELF=y
# Parallel port support
#
# CONFIG_PARPORT is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_FB is not set
# CONFIG_PPC_RTC is not set
-# CONFIG_PROC_DEVICETREE is not set
-# CONFIG_BOOTX_TEXT is not set
-# CONFIG_MOTOROLA_HOTSWAP is not set
#
# Memory Technology Devices (MTD)
@@ -184,7 +177,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_MACE is not set
# CONFIG_BMAC is not set
# CONFIG_GMAC is not set
-# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
@@ -192,7 +184,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
# CONFIG_NET_PCI is not set
# CONFIG_NET_POCKET is not set
@@ -250,6 +241,7 @@ CONFIG_NET_ETHERNET=y
#
# Console drivers
#
+# CONFIG_VGA_CONSOLE is not set
#
# Frame-buffer support
@@ -379,8 +371,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -397,6 +387,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
+# CONFIG_SMB_NLS is not set
# CONFIG_NLS is not set
#
diff --git a/arch/ppc/configs/ibmchrp_defconfig b/arch/ppc/configs/ibmchrp_defconfig
new file mode 100644
index 000000000..652b94209
--- /dev/null
+++ b/arch/ppc/configs/ibmchrp_defconfig
@@ -0,0 +1,636 @@
+#
+# Automatically generated by make menuconfig: don't edit
+#
+# CONFIG_UID16 is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_KMOD=y
+
+#
+# Platform support
+#
+CONFIG_PPC=y
+CONFIG_6xx=y
+# CONFIG_4xx is not set
+# CONFIG_POWER3 is not set
+# CONFIG_POWER4 is not set
+# CONFIG_8260 is not set
+# CONFIG_8xx is not set
+CONFIG_ALL_PPC=y
+# CONFIG_GEMINI is not set
+# CONFIG_APUS is not set
+# CONFIG_PPC601_SYNC_FIX is not set
+# CONFIG_SMP is not set
+# CONFIG_ALTIVEC is not set
+
+#
+# General setup
+#
+CONFIG_HIGHMEM=y
+# CONFIG_MOL is not set
+# CONFIG_ISA is not set
+# CONFIG_EISA is not set
+# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
+CONFIG_PCI=y
+CONFIG_NET=y
+CONFIG_SYSCTL=y
+CONFIG_SYSVIPC=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_KCORE_ELF=y
+CONFIG_BINFMT_ELF=y
+CONFIG_KERNEL_ELF=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PCI_NAMES=y
+# CONFIG_HOTPLUG is not set
+# CONFIG_PCMCIA is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+CONFIG_PPC_RTC=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_PPC_RTAS=y
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_PREP_RESIDUAL is not set
+# CONFIG_CMDLINE_BOOL is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Plug and Play configuration
+#
+# CONFIG_PNP is not set
+# CONFIG_ISAPNP is not set
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_PARIDE is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_MD_LINEAR is not set
+# CONFIG_MD_RAID0 is not set
+# CONFIG_MD_RAID1 is not set
+# CONFIG_MD_RAID5 is not set
+# CONFIG_BLK_DEV_LVM is not set
+# CONFIG_LVM_PROC_FS is not set
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK=y
+# CONFIG_RTNETLINK is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_FILTER is not set
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_INET_ECN is not set
+CONFIG_SYN_COOKIES=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_UNCLEAN=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_MIRROR=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_NAT_NEEDED=y
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
+# CONFIG_IPV6 is not set
+# CONFIG_KHTTPD is not set
+# CONFIG_ATM is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_LLC is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# ATA/IDE/MFM/RLL support
+#
+# CONFIG_IDE is not set
+# CONFIG_BLK_DEV_IDE_MODES is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI support
+#
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SD_EXTRA_DEVS=40
+CONFIG_CHR_DEV_ST=y
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_SR_EXTRA_DEVS=2
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_DEBUG_QUEUES is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_AM53C974 is not set
+# CONFIG_SCSI_MEGARAID is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_DMA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NCR53C7xx is not set
+# CONFIG_SCSI_NCR53C8XX is not set
+CONFIG_SCSI_SYM53C8XX=y
+CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8
+CONFIG_SCSI_NCR53C8XX_MAX_TAGS=32
+CONFIG_SCSI_NCR53C8XX_SYNC=20
+# CONFIG_SCSI_NCR53C8XX_PROFILE is not set
+# CONFIG_SCSI_NCR53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_NCR53C8XX_PQS_PDS is not set
+# CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_MESH is not set
+# CONFIG_SCSI_MAC53C94 is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+# CONFIG_NET_SB1000 is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+# CONFIG_MACE is not set
+# CONFIG_BMAC is not set
+# CONFIG_GMAC is not set
+# CONFIG_NCR885E is not set
+# CONFIG_OAKNET is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=y
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_APRICOT is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_TULIP is not set
+CONFIG_DE4X5=y
+# CONFIG_DGRS is not set
+# CONFIG_DM9102 is not set
+# CONFIG_EEPRO100 is not set
+# CONFIG_EEPRO100_PM is not set
+# CONFIG_LNE390 is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_NE3210 is not set
+# CONFIG_ES3210 is not set
+# CONFIG_8139TOO is not set
+# CONFIG_RTL8129 is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Token Ring devices
+#
+CONFIG_TR=y
+# CONFIG_IBMTR is not set
+CONFIG_IBMOL=y
+# CONFIG_IBMLS is not set
+# CONFIG_TMS380TR is not set
+# CONFIG_SMCTR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_RCPCI is not set
+# CONFIG_SHAPER is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Console drivers
+#
+CONFIG_VGA_CONSOLE=y
+
+#
+# Frame-buffer support
+#
+CONFIG_FB=y
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_CLGEN is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+CONFIG_FB_OF=y
+# CONFIG_FB_CONTROL is not set
+# CONFIG_FB_PLATINUM is not set
+# CONFIG_FB_VALKYRIE is not set
+# CONFIG_FB_CT65550 is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_S3TRIO is not set
+# CONFIG_FB_VGA16 is not set
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB_MATROX_G100=y
+# CONFIG_FB_MATROX_G450 is not set
+# CONFIG_FB_MATROX_MULTIHEAD is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_ATY128 is not set
+CONFIG_FB_3DFX=y
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FBCON_ADVANCED is not set
+CONFIG_FBCON_CFB8=y
+CONFIG_FBCON_CFB16=y
+CONFIG_FBCON_CFB24=y
+CONFIG_FBCON_CFB32=y
+# CONFIG_FBCON_FONTWIDTH8_ONLY is not set
+CONFIG_FBCON_FONTS=y
+# CONFIG_FONT_8x8 is not set
+CONFIG_FONT_8x16=y
+CONFIG_FONT_SUN8x16=y
+CONFIG_FONT_SUN12x22=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FB_COMPAT_XPMAC is not set
+
+#
+# Input core support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_KEYBDEV=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+
+#
+# Macintosh device drivers
+#
+# CONFIG_ADB_CUDA is not set
+# CONFIG_ADB_PMU is not set
+# CONFIG_MAC_FLOPPY is not set
+# CONFIG_MAC_SERIAL is not set
+# CONFIG_ADB is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_CONSOLE=y
+# CONFIG_SERIAL_EXTENDED is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_UNIX98_PTY_COUNT=256
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Mice
+#
+CONFIG_BUSMOUSE=y
+# CONFIG_ATIXL_BUSMOUSE is not set
+# CONFIG_LOGIBUSMOUSE is not set
+# CONFIG_MS_BUSMOUSE is not set
+CONFIG_MOUSE=y
+CONFIG_PSMOUSE=y
+# CONFIG_82C710_MOUSE is not set
+# CONFIG_PC110_PAD is not set
+
+#
+# Joysticks
+#
+# CONFIG_JOYSTICK is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_INTEL_RNG is not set
+CONFIG_NVRAM=y
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# File systems
+#
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_ADFS_FS_RW is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_BFS_FS is not set
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+# CONFIG_UMSDOS_FS is not set
+CONFIG_VFAT_FS=m
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_RAMFS is not set
+CONFIG_ISO9660_FS=y
+# CONFIG_JOLIET is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_NTFS_RW is not set
+# CONFIG_HPFS_FS is not set
+CONFIG_PROC_FS=y
+CONFIG_DEVFS_FS=y
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX4FS_RW is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_EXT2_FS=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_SYSV_FS_WRITE is not set
+# CONFIG_UDF_FS is not set
+# CONFIG_UDF_RW is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+# CONFIG_CODA_FS is not set
+# CONFIG_NFS_FS is not set
+# CONFIG_NFS_V3 is not set
+# CONFIG_ROOT_NFS is not set
+# CONFIG_NFSD is not set
+# CONFIG_NFSD_V3 is not set
+# CONFIG_SUNRPC is not set
+# CONFIG_LOCKD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+# CONFIG_NCPFS_NLS is not set
+# CONFIG_NCPFS_EXTRAS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_SMB_NLS is not set
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+
+#
+# Kernel hacking
+#
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_KGDB is not set
+CONFIG_XMON=y
diff --git a/arch/ppc/configs/mbx_defconfig b/arch/ppc/configs/mbx_defconfig
index 6230f5877..81829327a 100644
--- a/arch/ppc/configs/mbx_defconfig
+++ b/arch/ppc/configs/mbx_defconfig
@@ -21,7 +21,6 @@ CONFIG_PPC=y
# CONFIG_4xx is not set
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-# CONFIG_8260 is not set
CONFIG_8xx=y
CONFIG_SERIAL_CONSOLE=y
# CONFIG_RPXLITE is not set
@@ -32,6 +31,7 @@ CONFIG_SERIAL_CONSOLE=y
# CONFIG_TQM860 is not set
CONFIG_MBX=y
# CONFIG_WINCEPT is not set
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_ALL_PPC is not set
# CONFIG_SMP is not set
CONFIG_MACH_SPECIFIC=y
@@ -43,7 +43,9 @@ CONFIG_MATH_EMULATION=y
# CONFIG_HIGHMEM is not set
# CONFIG_MOL is not set
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
# CONFIG_PCI_QSPAN is not set
# CONFIG_PCI is not set
CONFIG_NET=y
@@ -176,7 +178,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_MACE is not set
# CONFIG_BMAC is not set
# CONFIG_GMAC is not set
-# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
@@ -184,7 +185,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
# CONFIG_NET_PCI is not set
# CONFIG_NET_POCKET is not set
@@ -370,8 +370,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -388,6 +386,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
+# CONFIG_SMB_NLS is not set
# CONFIG_NLS is not set
#
diff --git a/arch/ppc/configs/oak_defconfig b/arch/ppc/configs/oak_defconfig
index f33966d35..d8f39c36e 100644
--- a/arch/ppc/configs/oak_defconfig
+++ b/arch/ppc/configs/oak_defconfig
@@ -12,7 +12,7 @@ CONFIG_EXPERIMENTAL=y
# Loadable module support
#
CONFIG_MODULES=y
-# CONFIG_MODVERSIONS is not set
+CONFIG_MODVERSIONS=y
CONFIG_KMOD=y
#
@@ -23,10 +23,10 @@ CONFIG_PPC=y
CONFIG_4xx=y
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-# CONFIG_8260 is not set
# CONFIG_8xx is not set
CONFIG_OAK=y
# CONFIG_WALNUT is not set
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_SMP is not set
CONFIG_MACH_SPECIFIC=y
# CONFIG_MATH_EMULATION is not set
@@ -37,7 +37,9 @@ CONFIG_MACH_SPECIFIC=y
# CONFIG_HIGHMEM is not set
# CONFIG_MOL is not set
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
# CONFIG_PCI is not set
CONFIG_NET=y
CONFIG_SYSCTL=y
@@ -171,7 +173,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_MACE is not set
# CONFIG_BMAC is not set
# CONFIG_GMAC is not set
-# CONFIG_NCR885E is not set
CONFIG_OAKNET=y
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
@@ -179,7 +180,6 @@ CONFIG_OAKNET=y
# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
# CONFIG_NET_PCI is not set
# CONFIG_NET_POCKET is not set
@@ -366,8 +366,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -376,6 +374,7 @@ CONFIG_LOCKD=y
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
+# CONFIG_SMB_NLS is not set
# CONFIG_NLS is not set
#
diff --git a/arch/ppc/configs/power3_defconfig b/arch/ppc/configs/power3_defconfig
new file mode 100644
index 000000000..0deb9b5e8
--- /dev/null
+++ b/arch/ppc/configs/power3_defconfig
@@ -0,0 +1,662 @@
+#
+# Automatically generated by make menuconfig: don't edit
+#
+# CONFIG_UID16 is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_KMOD=y
+
+#
+# Platform support
+#
+CONFIG_PPC=y
+# CONFIG_6xx is not set
+# CONFIG_4xx is not set
+CONFIG_POWER3=y
+# CONFIG_POWER4 is not set
+# CONFIG_8xx is not set
+CONFIG_PPC64BRIDGE=y
+CONFIG_ALL_PPC=y
+CONFIG_SMP=y
+
+#
+# General setup
+#
+CONFIG_HIGHMEM=y
+# CONFIG_MOL is not set
+# CONFIG_ISA is not set
+# CONFIG_EISA is not set
+# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
+CONFIG_PCI=y
+CONFIG_NET=y
+CONFIG_SYSCTL=y
+CONFIG_SYSVIPC=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_KCORE_ELF=y
+CONFIG_BINFMT_ELF=y
+CONFIG_KERNEL_ELF=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PCI_NAMES=y
+# CONFIG_HOTPLUG is not set
+# CONFIG_PCMCIA is not set
+
+#
+# Parallel port support
+#
+CONFIG_PARPORT=m
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_FIFO=y
+# CONFIG_PARPORT_PC_SUPERIO is not set
+# CONFIG_PARPORT_AMIGA is not set
+# CONFIG_PARPORT_MFC3 is not set
+# CONFIG_PARPORT_ATARI is not set
+# CONFIG_PARPORT_SUNBPP is not set
+# CONFIG_PARPORT_OTHER is not set
+# CONFIG_PARPORT_1284 is not set
+# CONFIG_PPC_RTC is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_PPC_RTAS=y
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_PREP_RESIDUAL is not set
+# CONFIG_CMDLINE_BOOL is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Plug and Play configuration
+#
+# CONFIG_PNP is not set
+# CONFIG_ISAPNP is not set
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_PARIDE is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID5=y
+# CONFIG_MD_BOOT is not set
+# CONFIG_AUTODETECT_RAID is not set
+CONFIG_BLK_DEV_LVM=y
+CONFIG_LVM_PROC_FS=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK=y
+# CONFIG_RTNETLINK is not set
+# CONFIG_NETLINK_DEV is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_FILTER is not set
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_INET_ECN is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_IPV6 is not set
+# CONFIG_KHTTPD is not set
+# CONFIG_ATM is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_LLC is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# ATA/IDE/MFM/RLL support
+#
+# CONFIG_IDE is not set
+# CONFIG_BLK_DEV_IDE_MODES is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI support
+#
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SD_EXTRA_DEVS=40
+CONFIG_CHR_DEV_ST=y
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_SR_EXTRA_DEVS=2
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_DEBUG_QUEUES is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_AM53C974 is not set
+# CONFIG_SCSI_MEGARAID is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_DMA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_PPA is not set
+# CONFIG_SCSI_IMM is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NCR53C7xx is not set
+# CONFIG_SCSI_NCR53C8XX is not set
+CONFIG_SCSI_SYM53C8XX=y
+CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8
+CONFIG_SCSI_NCR53C8XX_MAX_TAGS=32
+CONFIG_SCSI_NCR53C8XX_SYNC=20
+# CONFIG_SCSI_NCR53C8XX_PROFILE is not set
+# CONFIG_SCSI_NCR53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_NCR53C8XX_PQS_PDS is not set
+# CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_MESH is not set
+# CONFIG_SCSI_MAC53C94 is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+# CONFIG_NET_SB1000 is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+# CONFIG_MACE is not set
+# CONFIG_BMAC is not set
+# CONFIG_GMAC is not set
+# CONFIG_OAKNET is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=y
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_APRICOT is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_TULIP is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_DGRS is not set
+# CONFIG_DM9102 is not set
+# CONFIG_EEPRO100 is not set
+# CONFIG_EEPRO100_PM is not set
+# CONFIG_LNE390 is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_NE3210 is not set
+# CONFIG_ES3210 is not set
+# CONFIG_8139TOO is not set
+# CONFIG_RTL8129 is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PLIP is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Token Ring devices
+#
+CONFIG_TR=y
+# CONFIG_IBMTR is not set
+CONFIG_IBMOL=y
+# CONFIG_IBMLS is not set
+# CONFIG_TMS380TR is not set
+# CONFIG_SMCTR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_RCPCI is not set
+# CONFIG_SHAPER is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Console drivers
+#
+# CONFIG_VGA_CONSOLE is not set
+
+#
+# Frame-buffer support
+#
+CONFIG_FB=y
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_CLGEN is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+CONFIG_FB_OF=y
+# CONFIG_FB_CONTROL is not set
+# CONFIG_FB_PLATINUM is not set
+# CONFIG_FB_VALKYRIE is not set
+# CONFIG_FB_CT65550 is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_S3TRIO is not set
+# CONFIG_FB_VGA16 is not set
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB_MATROX_G100=y
+CONFIG_FB_MATROX_I2C=y
+# CONFIG_FB_MATROX_MAVEN is not set
+# CONFIG_FB_MATROX_G450 is not set
+CONFIG_FB_MATROX_MULTIHEAD=y
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FBCON_ADVANCED is not set
+CONFIG_FBCON_CFB8=y
+CONFIG_FBCON_CFB16=y
+CONFIG_FBCON_CFB24=y
+CONFIG_FBCON_CFB32=y
+# CONFIG_FBCON_FONTWIDTH8_ONLY is not set
+CONFIG_FBCON_FONTS=y
+# CONFIG_FONT_8x8 is not set
+CONFIG_FONT_8x16=y
+CONFIG_FONT_SUN8x16=y
+CONFIG_FONT_SUN12x22=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FB_COMPAT_XPMAC is not set
+
+#
+# Input core support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_KEYBDEV=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+
+#
+# Macintosh device drivers
+#
+# CONFIG_ADB_CUDA is not set
+# CONFIG_ADB_PMU is not set
+# CONFIG_MAC_FLOPPY is not set
+# CONFIG_MAC_SERIAL is not set
+# CONFIG_ADB is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_CONSOLE=y
+# CONFIG_SERIAL_EXTENDED is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_UNIX98_PTY_COUNT=2048
+CONFIG_PRINTER=m
+# CONFIG_LP_CONSOLE is not set
+# CONFIG_PPDEV is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_PHILIPSPAR is not set
+# CONFIG_I2C_ELV is not set
+# CONFIG_I2C_VELLEMAN is not set
+CONFIG_I2C_ALGOPCF=y
+# CONFIG_I2C_ELEKTOR is not set
+CONFIG_I2C_CHARDEV=y
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
+CONFIG_MOUSE=y
+CONFIG_PSMOUSE=y
+# CONFIG_82C710_MOUSE is not set
+# CONFIG_PC110_PAD is not set
+
+#
+# Joysticks
+#
+# CONFIG_JOYSTICK is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_INTEL_RNG is not set
+CONFIG_NVRAM=y
+# CONFIG_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# File systems
+#
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_ADFS_FS_RW is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_BFS_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+# CONFIG_UMSDOS_FS is not set
+CONFIG_VFAT_FS=y
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_RAMFS is not set
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+# CONFIG_MINIX_FS is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_NTFS_RW is not set
+# CONFIG_HPFS_FS is not set
+CONFIG_PROC_FS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX4FS_RW is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_EXT2_FS=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_SYSV_FS_WRITE is not set
+# CONFIG_UDF_FS is not set
+# CONFIG_UDF_RW is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+# CONFIG_CODA_FS is not set
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_ROOT_NFS is not set
+CONFIG_NFSD=y
+# CONFIG_NFSD_V3 is not set
+CONFIG_SUNRPC=y
+CONFIG_LOCKD=y
+# CONFIG_SMB_FS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+# CONFIG_NCPFS_NLS is not set
+# CONFIG_NCPFS_EXTRAS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_SMB_NLS is not set
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Sound
+#
+CONFIG_SOUND=y
+# CONFIG_DMASOUND_AWACS is not set
+# CONFIG_SOUND_CMPCI is not set
+# CONFIG_SOUND_EMU10K1 is not set
+# CONFIG_SOUND_FUSION is not set
+# CONFIG_SOUND_CS4281 is not set
+# CONFIG_SOUND_ES1370 is not set
+# CONFIG_SOUND_ES1371 is not set
+# CONFIG_SOUND_ESSSOLO1 is not set
+# CONFIG_SOUND_MAESTRO is not set
+# CONFIG_SOUND_SONICVIBES is not set
+# CONFIG_SOUND_TRIDENT is not set
+# CONFIG_SOUND_MSNDCLAS is not set
+# CONFIG_SOUND_MSNDPIN is not set
+# CONFIG_SOUND_VIA82CXXX is not set
+CONFIG_SOUND_OSS=y
+CONFIG_SOUND_TRACEINIT=y
+# CONFIG_SOUND_DMAP is not set
+# CONFIG_SOUND_AD1816 is not set
+# CONFIG_SOUND_SGALAXY is not set
+# CONFIG_SOUND_ADLIB is not set
+# CONFIG_SOUND_ACI_MIXER is not set
+CONFIG_SOUND_CS4232=m
+# CONFIG_SOUND_SSCAPE is not set
+# CONFIG_SOUND_GUS is not set
+# CONFIG_SOUND_ICH is not set
+# CONFIG_SOUND_VMIDI is not set
+# CONFIG_SOUND_TRIX is not set
+CONFIG_SOUND_MSS=m
+# CONFIG_SOUND_MPU401 is not set
+# CONFIG_SOUND_NM256 is not set
+# CONFIG_SOUND_MAD16 is not set
+# CONFIG_SOUND_PAS is not set
+# CONFIG_PAS_JOYSTICK is not set
+# CONFIG_SOUND_PSS is not set
+# CONFIG_SOUND_SB is not set
+# CONFIG_SOUND_AWE32_SYNTH is not set
+# CONFIG_SOUND_WAVEFRONT is not set
+# CONFIG_SOUND_MAUI is not set
+# CONFIG_SOUND_YM3812 is not set
+# CONFIG_SOUND_OPL3SA1 is not set
+# CONFIG_SOUND_OPL3SA2 is not set
+# CONFIG_SOUND_YMPCI is not set
+# CONFIG_SOUND_YMFPCI is not set
+# CONFIG_SOUND_UART6850 is not set
+# CONFIG_SOUND_AEDSP16 is not set
+CONFIG_SOUND_TVMIXER=y
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+
+#
+# Kernel hacking
+#
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_KGDB is not set
+CONFIG_XMON=y
diff --git a/arch/ppc/configs/rpxcllf_defconfig b/arch/ppc/configs/rpxcllf_defconfig
index 78ee82230..258d3e79f 100644
--- a/arch/ppc/configs/rpxcllf_defconfig
+++ b/arch/ppc/configs/rpxcllf_defconfig
@@ -21,7 +21,6 @@ CONFIG_PPC=y
# CONFIG_4xx is not set
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-# CONFIG_8260 is not set
CONFIG_8xx=y
CONFIG_SERIAL_CONSOLE=y
# CONFIG_RPXLITE is not set
@@ -32,6 +31,7 @@ CONFIG_RPXCLASSIC=y
# CONFIG_TQM860 is not set
# CONFIG_MBX is not set
# CONFIG_WINCEPT is not set
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_ALL_PPC is not set
# CONFIG_SMP is not set
CONFIG_MACH_SPECIFIC=y
@@ -43,7 +43,9 @@ CONFIG_MATH_EMULATION=y
# CONFIG_HIGHMEM is not set
# CONFIG_MOL is not set
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
# CONFIG_PCI_QSPAN is not set
# CONFIG_PCI is not set
CONFIG_NET=y
@@ -182,7 +184,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_MACE is not set
# CONFIG_BMAC is not set
# CONFIG_GMAC is not set
-# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
@@ -190,7 +191,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
# CONFIG_NET_PCI is not set
# CONFIG_NET_POCKET is not set
@@ -377,8 +377,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -395,6 +393,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
+# CONFIG_SMB_NLS is not set
# CONFIG_NLS is not set
#
diff --git a/arch/ppc/configs/rpxlite_defconfig b/arch/ppc/configs/rpxlite_defconfig
index 741d9265f..df00f5ace 100644
--- a/arch/ppc/configs/rpxlite_defconfig
+++ b/arch/ppc/configs/rpxlite_defconfig
@@ -21,7 +21,6 @@ CONFIG_PPC=y
# CONFIG_4xx is not set
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-# CONFIG_8260 is not set
CONFIG_8xx=y
CONFIG_SERIAL_CONSOLE=y
CONFIG_RPXLITE=y
@@ -32,6 +31,7 @@ CONFIG_RPXLITE=y
# CONFIG_TQM860 is not set
# CONFIG_MBX is not set
# CONFIG_WINCEPT is not set
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_ALL_PPC is not set
# CONFIG_SMP is not set
CONFIG_MACH_SPECIFIC=y
@@ -43,7 +43,9 @@ CONFIG_MATH_EMULATION=y
# CONFIG_HIGHMEM is not set
# CONFIG_MOL is not set
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
# CONFIG_PCI_QSPAN is not set
# CONFIG_PCI is not set
CONFIG_NET=y
@@ -182,7 +184,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_MACE is not set
# CONFIG_BMAC is not set
# CONFIG_GMAC is not set
-# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
@@ -190,7 +191,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
# CONFIG_NET_PCI is not set
# CONFIG_NET_POCKET is not set
@@ -377,8 +377,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -395,6 +393,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
+# CONFIG_SMB_NLS is not set
# CONFIG_NLS is not set
#
diff --git a/arch/ppc/configs/walnut_defconfig b/arch/ppc/configs/walnut_defconfig
index c3e95c6ac..eef439fc2 100644
--- a/arch/ppc/configs/walnut_defconfig
+++ b/arch/ppc/configs/walnut_defconfig
@@ -12,7 +12,7 @@ CONFIG_EXPERIMENTAL=y
# Loadable module support
#
CONFIG_MODULES=y
-# CONFIG_MODVERSIONS is not set
+CONFIG_MODVERSIONS=y
CONFIG_KMOD=y
#
@@ -23,10 +23,10 @@ CONFIG_PPC=y
CONFIG_4xx=y
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-# CONFIG_8260 is not set
# CONFIG_8xx is not set
# CONFIG_OAK is not set
CONFIG_WALNUT=y
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_SMP is not set
CONFIG_MACH_SPECIFIC=y
# CONFIG_MATH_EMULATION is not set
@@ -37,7 +37,9 @@ CONFIG_MACH_SPECIFIC=y
# CONFIG_HIGHMEM is not set
# CONFIG_MOL is not set
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
# CONFIG_PCI is not set
CONFIG_NET=y
CONFIG_SYSCTL=y
@@ -171,7 +173,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_MACE is not set
# CONFIG_BMAC is not set
# CONFIG_GMAC is not set
-# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
@@ -179,7 +180,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
# CONFIG_NET_PCI is not set
# CONFIG_NET_POCKET is not set
@@ -369,8 +369,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -379,6 +377,7 @@ CONFIG_LOCKD=y
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
+# CONFIG_SMB_NLS is not set
# CONFIG_NLS is not set
#
diff --git a/arch/ppc/defconfig b/arch/ppc/defconfig
index 5574ecbdb..210887de7 100644
--- a/arch/ppc/defconfig
+++ b/arch/ppc/defconfig
@@ -23,12 +23,11 @@ CONFIG_6xx=y
# CONFIG_4xx is not set
# CONFIG_POWER3 is not set
# CONFIG_POWER4 is not set
-# CONFIG_8260 is not set
# CONFIG_8xx is not set
+# CONFIG_8260 is not set
CONFIG_ALL_PPC=y
-# CONFIG_GEMINI is not set
-# CONFIG_EST8260 is not set
# CONFIG_APUS is not set
+CONFIG_PPC601_SYNC_FIX=y
# CONFIG_SMP is not set
CONFIG_ALTIVEC=y
@@ -36,9 +35,11 @@ CONFIG_ALTIVEC=y
# General setup
#
# CONFIG_HIGHMEM is not set
-# CONFIG_MOL is not set
+CONFIG_MOL=y
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_SYSCTL=y
@@ -49,21 +50,24 @@ CONFIG_BINFMT_ELF=y
CONFIG_KERNEL_ELF=y
CONFIG_BINFMT_MISC=m
CONFIG_PCI_NAMES=y
-# CONFIG_HOTPLUG is not set
+CONFIG_HOTPLUG=y
+
+#
+# PCMCIA/CardBus support
+#
# CONFIG_PCMCIA is not set
#
# Parallel port support
#
# CONFIG_PARPORT is not set
-CONFIG_VGA_CONSOLE=y
-CONFIG_FB=y
-CONFIG_FB_COMPAT_XPMAC=y
CONFIG_PPC_RTC=y
CONFIG_PROC_DEVICETREE=y
+CONFIG_PPC_RTAS=y
CONFIG_BOOTX_TEXT=y
-# CONFIG_MOTOROLA_HOTSWAP is not set
-# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PREP_RESIDUAL=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,9600 console=tty0 root=/dev/sda2"
#
# Memory Technology Devices (MTD)
@@ -79,7 +83,7 @@ CONFIG_BOOTX_TEXT=y
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
+CONFIG_BLK_DEV_FD=m
# CONFIG_BLK_DEV_XD is not set
# CONFIG_PARIDE is not set
# CONFIG_BLK_CPQ_DA is not set
@@ -111,7 +115,8 @@ CONFIG_PACKET=y
CONFIG_NETLINK=y
# CONFIG_RTNETLINK is not set
# CONFIG_NETLINK_DEV is not set
-# CONFIG_NETFILTER is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
# CONFIG_FILTER is not set
CONFIG_UNIX=y
CONFIG_INET=y
@@ -123,6 +128,34 @@ CONFIG_IP_MULTICAST=y
# CONFIG_IP_MROUTE is not set
# CONFIG_INET_ECN is not set
CONFIG_SYN_COOKIES=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_UNCLEAN=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_MIRROR=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_NAT_NEEDED=y
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
# CONFIG_IPV6 is not set
# CONFIG_KHTTPD is not set
# CONFIG_ATM is not set
@@ -239,6 +272,7 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SD_EXTRA_DEVS=40
CONFIG_CHR_DEV_ST=y
+# CONFIG_CHR_DEV_OSST is not set
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SR_EXTRA_DEVS=2
@@ -344,7 +378,6 @@ CONFIG_NET_ETHERNET=y
CONFIG_MACE=y
CONFIG_BMAC=y
CONFIG_GMAC=y
-# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
@@ -357,27 +390,28 @@ CONFIG_GMAC=y
CONFIG_NET_PCI=y
CONFIG_PCNET32=y
# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_AC3200 is not set
# CONFIG_APRICOT is not set
# CONFIG_CS89x0 is not set
-CONFIG_DE4X5=y
-# CONFIG_TULIP is not set
+CONFIG_TULIP=y
+CONFIG_DE4X5=m
# CONFIG_DGRS is not set
# CONFIG_DM9102 is not set
# CONFIG_EEPRO100 is not set
+# CONFIG_EEPRO100_PM is not set
# CONFIG_LNE390 is not set
# CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set
# CONFIG_NE3210 is not set
# CONFIG_ES3210 is not set
-# CONFIG_RTL8129 is not set
# CONFIG_8139TOO is not set
+# CONFIG_RTL8129 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_WINBOND_840 is not set
+# CONFIG_HAPPYMEAL is not set
# CONFIG_NET_POCKET is not set
#
@@ -439,6 +473,7 @@ CONFIG_PPP_DEFLATE=y
#
# Console drivers
#
+CONFIG_VGA_CONSOLE=y
#
# Frame-buffer support
@@ -460,7 +495,8 @@ CONFIG_FB_IMSTT=y
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX_MILLENIUM=y
CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G100=y
+# CONFIG_FB_MATROX_G100 is not set
+# CONFIG_FB_MATROX_G450 is not set
# CONFIG_FB_MATROX_MULTIHEAD is not set
CONFIG_FB_ATY=y
CONFIG_FB_ATY128=y
@@ -481,6 +517,7 @@ CONFIG_FONT_SUN12x22=y
# CONFIG_FONT_6x11 is not set
# CONFIG_FONT_PEARL_8x8 is not set
# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FB_COMPAT_XPMAC is not set
#
# Input core support
@@ -498,15 +535,13 @@ CONFIG_INPUT_EVDEV=y
#
CONFIG_ADB_CUDA=y
CONFIG_ADB_PMU=y
-CONFIG_PMAC_PBOOK=y
-CONFIG_PMAC_BACKLIGHT=y
-CONFIG_MAC_FLOPPY=y
-CONFIG_MAC_SERIAL=y
-# CONFIG_SERIAL_CONSOLE is not set
+# CONFIG_PMAC_PBOOK is not set
+# CONFIG_PMAC_BACKLIGHT is not set
+# CONFIG_MAC_FLOPPY is not set
+CONFIG_MAC_SERIAL=m
CONFIG_ADB=y
CONFIG_ADB_MACIO=y
CONFIG_INPUT_ADBHID=y
-CONFIG_MAC_HID=y
CONFIG_MAC_ADBKEYCODES=y
CONFIG_MAC_EMUMOUSEBTN=y
@@ -575,17 +610,17 @@ CONFIG_NVRAM=y
# File systems
#
# CONFIG_QUOTA is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
+CONFIG_HFS_FS=m
# CONFIG_BFS_FS is not set
-CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
# CONFIG_UMSDOS_FS is not set
-CONFIG_VFAT_FS=y
+CONFIG_VFAT_FS=m
# CONFIG_EFS_FS is not set
# CONFIG_JFFS_FS is not set
# CONFIG_CRAMFS is not set
@@ -631,8 +666,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -652,6 +685,7 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
+# CONFIG_SMB_NLS is not set
CONFIG_NLS=y
#
@@ -678,7 +712,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_936 is not set
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ISO8859_1=m
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
@@ -695,9 +729,9 @@ CONFIG_NLS_DEFAULT="iso8859-1"
#
# Sound
#
-CONFIG_SOUND=y
-CONFIG_DMASOUND_AWACS=y
-CONFIG_DMASOUND=y
+CONFIG_SOUND=m
+CONFIG_DMASOUND_AWACS=m
+CONFIG_DMASOUND=m
# CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set
# CONFIG_SOUND_FUSION is not set
@@ -711,43 +745,14 @@ CONFIG_DMASOUND=y
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set
-CONFIG_SOUND_OSS=y
-# CONFIG_SOUND_TRACEINIT is not set
-# CONFIG_SOUND_DMAP is not set
-# CONFIG_SOUND_AD1816 is not set
-# CONFIG_SOUND_SGALAXY is not set
-# CONFIG_SOUND_ADLIB is not set
-# CONFIG_SOUND_ACI_MIXER is not set
-CONFIG_SOUND_CS4232=m
-# CONFIG_SOUND_SSCAPE is not set
-# CONFIG_SOUND_GUS is not set
-# CONFIG_SOUND_ICH is not set
-# CONFIG_SOUND_VMIDI is not set
-# CONFIG_SOUND_TRIX is not set
-# CONFIG_SOUND_MSS is not set
-# CONFIG_SOUND_MPU401 is not set
-# CONFIG_SOUND_NM256 is not set
-# CONFIG_SOUND_MAD16 is not set
-# CONFIG_SOUND_PAS is not set
-# CONFIG_PAS_JOYSTICK is not set
-# CONFIG_SOUND_PSS is not set
-# CONFIG_SOUND_SB is not set
-# CONFIG_SOUND_AWE32_SYNTH is not set
-# CONFIG_SOUND_WAVEFRONT is not set
-# CONFIG_SOUND_MAUI is not set
-# CONFIG_SOUND_YM3812 is not set
-# CONFIG_SOUND_OPL3SA1 is not set
-# CONFIG_SOUND_OPL3SA2 is not set
-# CONFIG_SOUND_YMPCI is not set
-# CONFIG_SOUND_UART6850 is not set
-# CONFIG_SOUND_AEDSP16 is not set
+# CONFIG_SOUND_OSS is not set
# CONFIG_SOUND_TVMIXER is not set
#
# USB support
#
CONFIG_USB=y
-CONFIG_USB_DEBUG=y
+# CONFIG_USB_DEBUG is not set
#
# Miscellaneous USB options
@@ -763,33 +768,69 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_OHCI=y
#
-# USB Devices
+# USB Device Class drivers
#
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_BLUETOOTH is not set
+# CONFIG_USB_STORAGE is not set
+CONFIG_USB_ACM=m
# CONFIG_USB_PRINTER is not set
+
+#
+# USB Human Interface Devices (HID)
+#
+CONFIG_USB_HID=y
+# CONFIG_USB_WACOM is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_DC2XX is not set
+# CONFIG_USB_MDC800 is not set
# CONFIG_USB_SCANNER is not set
# CONFIG_USB_MICROTEK is not set
-# CONFIG_USB_AUDIO is not set
-# CONFIG_USB_ACM is not set
-# CONFIG_USB_SERIAL is not set
+
+#
+# USB Multimedia devices
+#
# CONFIG_USB_IBMCAM is not set
# CONFIG_USB_OV511 is not set
-# CONFIG_USB_DC2XX is not set
-# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_STORAGE is not set
-# CONFIG_USB_USS720 is not set
+# CONFIG_USB_DSBR is not set
# CONFIG_USB_DABUSB is not set
+
+#
+# USB Network adaptors
+#
# CONFIG_USB_PLUSB is not set
# CONFIG_USB_PEGASUS is not set
-# CONFIG_USB_RIO500 is not set
-# CONFIG_USB_DSBR is not set
-# CONFIG_USB_BLUETOOTH is not set
# CONFIG_USB_NET1080 is not set
#
-# USB Human Interface Devices (HID)
+# USB port drivers
#
-CONFIG_USB_HID=y
-# CONFIG_USB_WACOM is not set
+# CONFIG_USB_USS720 is not set
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_SERIAL_DEBUG is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+CONFIG_USB_SERIAL_VISOR=m
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+
+#
+# USB misc drivers
+#
+# CONFIG_USB_RIO500 is not set
#
# Kernel hacking
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index 4a9852612..545fce0cd 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_PPC) := entry.o traps.o irq.o idle.o time.o misc.o \
process.o signal.o bitops.o ptrace.o \
ppc_htab.o semaphore.o syscalls.o \
align.o setup.o
+obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_POWER4) += xics.o
obj-$(CONFIG_PCI) += pci.o pci-dma.o
obj-$(CONFIG_KGDB) += ppc-stub.o
@@ -62,11 +63,8 @@ obj-$(CONFIG_ALL_PPC) += pmac_pic.o pmac_setup.o pmac_time.o prom.o \
feature.o pmac_pci.o chrp_setup.o \
chrp_time.o chrp_pci.o open_pic.o \
indirect_pci.o i8259.o prep_pci.o \
- prep_time.o prep_nvram.o ppc_ksyms.o \
- prep_setup.o
+ prep_time.o prep_nvram.o prep_setup.o
obj-$(CONFIG_PMAC_BACKLIGHT) += pmac_backlight.o
-obj-$(CONFIG_GEMINI) += gemini_prom.o gemini_pci.o gemini_setup.o \
- open_pic.o
obj-$(CONFIG_8260) += m8260_setup.o ppc8260_pic.o
diff --git a/arch/ppc/kernel/apus_setup.c b/arch/ppc/kernel/apus_setup.c
index 5979c9312..70a683d20 100644
--- a/arch/ppc/kernel/apus_setup.c
+++ b/arch/ppc/kernel/apus_setup.c
@@ -647,25 +647,6 @@ apus_ide_default_io_base(int index)
return 0;
}
-int
-apus_ide_check_region(ide_ioreg_t from, unsigned int extent)
-{
- return 0;
-}
-
-void
-apus_ide_request_region(ide_ioreg_t from,
- unsigned int extent,
- const char *name)
-{
-}
-
-void
-apus_ide_release_region(ide_ioreg_t from,
- unsigned int extent)
-{
-}
-
void
apus_ide_fix_driveid(struct hd_driveid *id)
{
@@ -1131,9 +1112,6 @@ void apus_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_ide_md.outsw = apus_ide_outsw;
ppc_ide_md.default_irq = apus_ide_default_irq;
ppc_ide_md.default_io_base = apus_ide_default_io_base;
- ppc_ide_md.ide_check_region = apus_ide_check_region;
- ppc_ide_md.ide_request_region = apus_ide_request_region;
- ppc_ide_md.ide_release_region = apus_ide_release_region;
ppc_ide_md.fix_driveid = apus_ide_fix_driveid;
ppc_ide_md.ide_init_hwif = apus_ide_init_hwif_ports;
diff --git a/arch/ppc/kernel/chrp_pci.c b/arch/ppc/kernel/chrp_pci.c
index 43b678861..914ed02ff 100644
--- a/arch/ppc/kernel/chrp_pci.c
+++ b/arch/ppc/kernel/chrp_pci.c
@@ -8,8 +8,8 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/openpic.h>
#include <linux/ide.h>
+#include <linux/bootmem.h>
#include <asm/io.h>
#include <asm/pgtable.h>
@@ -19,16 +19,19 @@
#include <asm/gg2.h>
#include <asm/machdep.h>
#include <asm/init.h>
+#include <asm/pci-bridge.h>
+#include "open_pic.h"
#include "pci.h"
+
#ifdef CONFIG_POWER4
-static unsigned long pci_address_offset(int, unsigned int);
+extern unsigned long pci_address_offset(int, unsigned int);
#endif /* CONFIG_POWER4 */
/* LongTrail */
-#define pci_config_addr(bus, dev, offset) \
-(GG2_PCI_CONFIG_BASE | ((bus)<<16) | ((dev)<<8) | (offset))
+#define pci_config_addr(dev, offset) \
+(GG2_PCI_CONFIG_BASE | ((dev->bus->number)<<16) | ((dev->devfn)<<8) | (offset))
volatile struct Hydra *Hydra = NULL;
@@ -37,205 +40,127 @@ volatile struct Hydra *Hydra = NULL;
* limit the bus number to 3 bits
*/
-int __chrp gg2_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char *val)
-{
- if (bus > 7) {
- *val = 0xff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- *val = in_8((unsigned char *)pci_config_addr(bus, dev_fn, offset));
- return PCIBIOS_SUCCESSFUL;
+#define cfg_read(val, addr, type, op) *val = op((type)(addr))
+#define cfg_write(val, addr, type, op) op((type *)(addr), (val))
+
+#define cfg_read_bad(val, size) *val = bad_##size;
+#define cfg_write_bad(val, size)
+
+#define bad_byte 0xff
+#define bad_word 0xffff
+#define bad_dword 0xffffffffU
+
+#define GG2_PCI_OP(rw, size, type, op) \
+int __chrp gg2_##rw##_config_##size(struct pci_dev *dev, int off, type val) \
+{ \
+ if (dev->bus->number > 7) { \
+ cfg_##rw##_bad(val, size) \
+ return PCIBIOS_DEVICE_NOT_FOUND; \
+ } \
+ cfg_##rw(val, pci_config_addr(dev, off), type, op); \
+ return PCIBIOS_SUCCESSFUL; \
}
-int __chrp gg2_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short *val)
-{
- if (bus > 7) {
- *val = 0xffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- *val = in_le16((unsigned short *)pci_config_addr(bus, dev_fn, offset));
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-int __chrp gg2_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int *val)
-{
- if (bus > 7) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- *val = in_le32((unsigned int *)pci_config_addr(bus, dev_fn, offset));
- return PCIBIOS_SUCCESSFUL;
-}
-
-int __chrp gg2_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char val)
-{
- if (bus > 7)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_8((unsigned char *)pci_config_addr(bus, dev_fn, offset), val);
- return PCIBIOS_SUCCESSFUL;
-}
-
-int __chrp gg2_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short val)
-{
- if (bus > 7)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le16((unsigned short *)pci_config_addr(bus, dev_fn, offset), val);
- return PCIBIOS_SUCCESSFUL;
-}
+GG2_PCI_OP(read, byte, u8 *, in_8)
+GG2_PCI_OP(read, word, u16 *, in_le16)
+GG2_PCI_OP(read, dword, u32 *, in_le32)
+GG2_PCI_OP(write, byte, u8, out_8)
+GG2_PCI_OP(write, word, u16, out_le16)
+GG2_PCI_OP(write, dword, u32, out_le32)
-int __chrp gg2_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int val)
+static struct pci_ops gg2_pci_ops =
{
- if (bus > 7)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le32((unsigned int *)pci_config_addr(bus, dev_fn, offset), val);
- return PCIBIOS_SUCCESSFUL;
-}
+ gg2_read_config_byte,
+ gg2_read_config_word,
+ gg2_read_config_dword,
+ gg2_write_config_byte,
+ gg2_write_config_word,
+ gg2_write_config_dword
+};
-#define python_config_address(bus) (unsigned *)((0xfef00000+0xf8000)-(bus*0x100000))
-#define python_config_data(bus) ((0xfef00000+0xf8010)-(bus*0x100000))
-#define PYTHON_CFA(b, d, o) (0x80 | ((b<<6) << 8) | ((d) << 16) \
+/*
+ * Access functions for PCI config space on IBM "python" host bridges.
+ */
+#define PYTHON_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \
| (((o) & ~3) << 24))
-unsigned int python_busnr = 0;
-
-int __chrp python_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char *val)
-{
- if (bus > python_busnr) {
- *val = 0xff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- out_be32( python_config_address( bus ), PYTHON_CFA(bus,dev_fn,offset));
- *val = in_8((unsigned char *)python_config_data(bus) + (offset&3));
- return PCIBIOS_SUCCESSFUL;
-}
-int __chrp python_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short *val)
-{
- if (bus > python_busnr) {
- *val = 0xffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- out_be32( python_config_address( bus ), PYTHON_CFA(bus,dev_fn,offset));
- *val = in_le16((unsigned short *)(python_config_data(bus) + (offset&3)));
- return PCIBIOS_SUCCESSFUL;
+#define PYTHON_PCI_OP(rw, size, type, op, mask) \
+int __chrp \
+python_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
+{ \
+ struct pci_controller *hose = dev->sysdata; \
+ \
+ out_be32(hose->cfg_addr, \
+ PYTHON_CFA(dev->bus->number, dev->devfn, offset)); \
+ cfg_##rw(val, hose->cfg_data + (offset & mask), type, op); \
+ return PCIBIOS_SUCCESSFUL; \
}
+PYTHON_PCI_OP(read, byte, u8 *, in_8, 3)
+PYTHON_PCI_OP(read, word, u16 *, in_le16, 2)
+PYTHON_PCI_OP(read, dword, u32 *, in_le32, 0)
+PYTHON_PCI_OP(write, byte, u8, out_8, 3)
+PYTHON_PCI_OP(write, word, u16, out_le16, 2)
+PYTHON_PCI_OP(write, dword, u32, out_le32, 0)
-int __chrp python_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int *val)
-{
- if (bus > python_busnr) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- out_be32( python_config_address( bus ), PYTHON_CFA(bus,dev_fn,offset));
- *val = in_le32((unsigned *)python_config_data(bus));
- return PCIBIOS_SUCCESSFUL;
-}
-
-int __chrp python_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char val)
+static struct pci_ops python_pci_ops =
{
- if (bus > python_busnr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_be32( python_config_address( bus ), PYTHON_CFA(bus,dev_fn,offset));
- out_8((volatile unsigned char *)python_config_data(bus) + (offset&3), val);
- return PCIBIOS_SUCCESSFUL;
-}
-
-int __chrp python_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short val)
-{
- if (bus > python_busnr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_be32( python_config_address( bus ), PYTHON_CFA(bus,dev_fn,offset));
- out_le16((volatile unsigned short *)python_config_data(bus) + (offset&3),
- val);
- return PCIBIOS_SUCCESSFUL;
-}
-
-int __chrp python_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int val)
-{
- if (bus > python_busnr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_be32( python_config_address( bus ), PYTHON_CFA(bus,dev_fn,offset));
- out_le32((unsigned *)python_config_data(bus) + (offset&3), val);
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-int __chrp rtas_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char *val)
-{
- unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
- unsigned long ret;
-
- if (call_rtas( "read-pci-config", 2, 2, &ret, addr, 1) != 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- *val = ret;
- return PCIBIOS_SUCCESSFUL;
-}
-
-int __chrp rtas_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short *val)
-{
- unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
- unsigned long ret;
-
- if (call_rtas("read-pci-config", 2, 2, &ret, addr, 2) != 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- *val = ret;
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-int __chrp rtas_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int *val)
-{
- unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
- unsigned long ret;
+ python_read_config_byte,
+ python_read_config_word,
+ python_read_config_dword,
+ python_write_config_byte,
+ python_write_config_word,
+ python_write_config_dword
+};
- if (call_rtas("read-pci-config", 2, 2, &ret, addr, 4) != 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- *val = ret;
- return PCIBIOS_SUCCESSFUL;
+#ifdef CONFIG_POWER4
+/*
+ * Access functions for PCI config space using RTAS calls.
+ */
+#define RTAS_PCI_READ_OP(size, type, nbytes) \
+int __chrp \
+rtas_read_config_##size(struct pci_dev *dev, int offset, type val) \
+{ \
+ unsigned long addr = (offset & 0xff) | ((dev->devfn & 0xff) << 8) \
+ | ((dev->bus->number & 0xff) << 16); \
+ unsigned long ret = ~0UL; \
+ int rval; \
+ \
+ rval = call_rtas("read-pci-config", 2, 2, &ret, addr, nbytes); \
+ *val = ret; \
+ return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; \
}
-int __chrp rtas_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char val)
-{
- unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
- if ( call_rtas( "write-pci-config", 3, 1, NULL, addr, 1, (ulong)val ) != 0 )
- return PCIBIOS_DEVICE_NOT_FOUND;
- return PCIBIOS_SUCCESSFUL;
+#define RTAS_PCI_WRITE_OP(size, type, nbytes) \
+int __chrp \
+rtas_write_config_##size(struct pci_dev *dev, int offset, type val) \
+{ \
+ unsigned long addr = (offset & 0xff) | ((dev->devfn & 0xff) << 8) \
+ | ((dev->bus->number & 0xff) << 16); \
+ int rval; \
+ \
+ rval = call_rtas("write-pci-config", 3, 1, NULL, \
+ addr, nbytes, (ulong)val); \
+ return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; \
}
-int __chrp rtas_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short val)
-{
- unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
- if ( call_rtas( "write-pci-config", 3, 1, NULL, addr, 2, (ulong)val ) != 0 )
- return PCIBIOS_DEVICE_NOT_FOUND;
- return PCIBIOS_SUCCESSFUL;
-}
+RTAS_PCI_READ_OP(byte, u8 *, 1)
+RTAS_PCI_READ_OP(word, u16 *, 2)
+RTAS_PCI_READ_OP(dword, u32 *, 4)
+RTAS_PCI_WRITE_OP(byte, u8, 1)
+RTAS_PCI_WRITE_OP(word, u16, 2)
+RTAS_PCI_WRITE_OP(dword, u32, 4)
-int __chrp rtas_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int val)
+static struct pci_ops rtas_pci_ops =
{
- unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
- if ( call_rtas( "write-pci-config", 3, 1, NULL, addr, 4, (ulong)val ) != 0 )
- return PCIBIOS_DEVICE_NOT_FOUND;
- return PCIBIOS_SUCCESSFUL;
-}
+ rtas_read_config_byte,
+ rtas_read_config_word,
+ rtas_read_config_dword,
+ rtas_write_config_byte,
+ rtas_write_config_word,
+ rtas_write_config_dword
+};
+#endif /* CONFIG_POWER4 */
/*
* Temporary fixes for PCI devices. These should be replaced by OF query
@@ -278,7 +203,7 @@ hydra_init(void)
HYDRA_FC_MPIC_ENABLE |
HYDRA_FC_SLOW_SCC_PCLK |
HYDRA_FC_MPIC_IS_MASTER));
- OpenPIC = (volatile struct OpenPIC *)&Hydra->OpenPIC;
+ OpenPIC_Addr = &Hydra->OpenPIC;
OpenPIC_InitSenses = hydra_openpic_initsenses;
OpenPIC_NumInitSenses = sizeof(hydra_openpic_initsenses);
return 1;
@@ -316,54 +241,25 @@ void __init
chrp_pcibios_fixup(void)
{
struct pci_dev *dev;
- int *brp;
struct device_node *np;
- extern struct pci_ops generic_pci_ops;
-
-#ifndef CONFIG_POWER4
- np = find_devices("device-tree");
- if (np != 0) {
- for (np = np->child; np != NULL; np = np->sibling) {
- if (np->type == NULL || strcmp(np->type, "pci") != 0)
- continue;
- if ((brp = (int *) get_property(np, "bus-range", NULL)) == 0)
- continue;
- if (brp[0] != 0) /* bus 0 is already done */
- pci_scan_bus(brp[0], &generic_pci_ops, NULL);
- }
- }
-#else
- /* XXX kludge for now because we can't properly handle
- physical addresses > 4GB. -- paulus */
- pci_scan_bus(0x1e, &generic_pci_ops, NULL);
-#endif /* CONFIG_POWER4 */
/* PCI interrupts are controlled by the OpenPIC */
pci_for_each_dev(dev) {
- np = find_pci_device_OFnode(dev->bus->number, dev->devfn);
+ np = pci_device_to_OF_node(dev);
if ((np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0))
dev->irq = np->intrs[0].line;
- /* these need to be absolute addrs for OF and Matrox FB -- Cort */
- if ( dev->vendor == PCI_VENDOR_ID_MATROX )
- {
- if ( dev->resource[0].start < isa_mem_base )
- dev->resource[0].start += isa_mem_base;
- if ( dev->resource[1].start < isa_mem_base )
- dev->resource[1].start += isa_mem_base;
- }
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+
/* the F50 identifies the amd as a trident */
if ( (dev->vendor == PCI_VENDOR_ID_TRIDENT) &&
(dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET) )
{
dev->vendor = PCI_VENDOR_ID_AMD;
- pcibios_write_config_word(dev->bus->number,
- dev->devfn, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
+ pci_write_config_word(dev, PCI_VENDOR_ID,
+ PCI_VENDOR_ID_AMD);
}
#ifdef CONFIG_POWER4
power4_fixup_dev(dev);
-#else
- if (dev->bus->number > 0 && python_busnr > 0)
- dev->resource[0].start += dev->bus->number*0x01000000;
#endif
}
}
@@ -402,86 +298,213 @@ static void __init gg2_pcibios_fixup_bus(struct pci_bus *bus)
bus->resource[1] = &gg2_resources.pci_mem;
}
-decl_config_access_method(grackle);
-decl_config_access_method(indirect);
-decl_config_access_method(rtas);
+static void process_bridge_ranges(struct pci_controller *hose,
+ struct device_node *dev, int index)
+{
+ unsigned int *ranges;
+ int rlen = 0;
+ int memno = 0;
+ struct resource *res;
+
+ hose->io_base_phys = 0;
+ ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
+ while ((rlen -= 6 * sizeof(unsigned int)) >= 0) {
+ res = NULL;
+ switch (ranges[0] >> 24) {
+ case 1: /* I/O space */
+ if (ranges[2] != 0)
+ break;
+ hose->io_base_phys = ranges[3];
+ hose->io_base_virt = ioremap(ranges[3], ranges[5]);
+ if (index == 0) {
+ isa_io_base = (unsigned long) hose->io_base_virt;
+ printk("isa_io_base=%lx\n", isa_io_base);
+ }
+ res = &hose->io_resource;
+ res->flags = IORESOURCE_IO;
+ break;
+ case 2: /* memory space */
+ if (index == 0 && ranges[1] == 0 && ranges[2] == 0){
+ isa_mem_base = ranges[3];
+ printk("isa_mem_base=%lx\n", isa_mem_base);
+ }
+ if (memno == 0) {
+ hose->pci_mem_offset = ranges[3] - ranges[2];
+ printk("pci_mem_offset=%lx for this bridge\n",
+ hose->pci_mem_offset);
+ }
+ res = &hose->mem_resources[memno];
+ res->flags = IORESOURCE_MEM;
+ ++memno;
+ break;
+ }
+ if (res != NULL) {
+ res->name = dev->full_name;
+ res->start = ranges[3];
+ res->end = res->start + ranges[5] - 1;
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+ }
+ ranges += 6;
+ }
+}
+
+/* this is largely modeled and stolen after the pmac_pci code -- tgall
+ */
+
+static void __init
+ibm_add_bridges(struct device_node *dev)
+{
+ int *bus_range;
+ int len, index = 0;
+ struct pci_controller *hose;
+ volatile unsigned char *cfg;
+ unsigned int *dma;
+#ifdef CONFIG_POWER3
+ unsigned long *opprop = (unsigned long *)
+ get_property(find_path_device("/"), "platform-open-pic", NULL);
+#endif
+
+ for(; dev != NULL; dev = dev->next, ++index) {
+ if (dev->n_addrs < 1) {
+ printk(KERN_WARNING "Can't use %s: no address\n",
+ dev->full_name);
+ continue;
+ }
+ bus_range = (int *) get_property(dev, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int)) {
+ printk(KERN_WARNING "Can't get bus-range for %s\n",
+ dev->full_name);
+ continue;
+ }
+ if (bus_range[1] == bus_range[0])
+ printk(KERN_INFO "PCI bus %d", bus_range[0]);
+ else
+ printk(KERN_INFO "PCI buses %d..%d",
+ bus_range[0], bus_range[1]);
+ printk(" controlled by %s at %x\n", dev->type,
+ dev->addrs[0].address);
+
+ hose = pcibios_alloc_controller();
+ if (!hose) {
+ printk("Can't allocate PCI controller structure for %s\n",
+ dev->full_name);
+ continue;
+ }
+ hose->arch_data = dev;
+ hose->first_busno = bus_range[0];
+ hose->last_busno = bus_range[1];
+ hose->ops = &python_pci_ops;
+
+ cfg = ioremap(dev->addrs[0].address + 0xf8000, 0x20);
+ hose->cfg_addr = (volatile unsigned int *) cfg;
+ hose->cfg_data = cfg + 0x10;
+
+ process_bridge_ranges(hose, dev, index);
+
+#ifdef CONFIG_POWER3
+ openpic_setup_ISU(index, opprop[index+1]);
+#endif /* CONFIG_POWER3 */
+
+ /* check the first bridge for a property that we can
+ use to set pci_dram_offset */
+ dma = (unsigned int *)
+ get_property(dev, "ibm,dma-ranges", &len);
+ if (index == 0 && dma != NULL && len >= 6 * sizeof(*dma)) {
+ pci_dram_offset = dma[2] - dma[3];
+ printk("pci_dram_offset = %lx\n", pci_dram_offset);
+ }
+ }
+}
+
+#ifdef CONFIG_POWER4
+void __init
+power4_add_bridge(void)
+{
+ struct pci_controller* hose;
+
+ hose = pcibios_alloc_controller();
+ if (!hose)
+ return;
+ hose->first_busno = 0;
+ hose->last_busno = 0xff;
+
+ hose->ops = &rtas_pci_ops;
+ pci_dram_offset = 0;
+}
+#endif /* CONFIG_POWER4 */
void __init
-chrp_setup_pci_ptrs(void)
+chrp_find_bridges(void)
{
struct device_node *py;
+ char *model, *name;
+ struct pci_controller* hose;
ppc_md.pcibios_fixup = chrp_pcibios_fixup;
+
#ifdef CONFIG_POWER4
- set_config_access_method(rtas);
- pci_dram_offset = 0;
+ power4_add_bridge();
#else /* CONFIG_POWER4 */
- if ( !strncmp("MOT",
- get_property(find_path_device("/"), "model", NULL),3) )
- {
- pci_dram_offset = 0;
- isa_mem_base = 0xf7000000;
- isa_io_base = 0xfe000000;
- set_config_access_method(grackle);
- }
- else
- {
- if ((py = find_compatible_devices("pci", "IBM,python")) != 0
- || (py = find_compatible_devices("pci", "IBM,python3.0")) != 0)
- {
- char *name = get_property(find_path_device("/"), "name", NULL);
-
- /* find out how many pythons */
- while ( (py = py->next) ) python_busnr++;
- set_config_access_method(python);
-
- /*
- * We base these values on the machine type but should
- * try to read them from the python controller itself.
- * -- Cort
- */
- if ( !strncmp("IBM,7025-F50", name, 12) )
- {
- pci_dram_offset = 0x80000000;
- isa_mem_base = 0xa0000000;
- isa_io_base = 0x88000000;
- } else if ( !strncmp("IBM,7043-260", name, 12)
- || !strncmp("IBM,7044-270", name, 12))
- {
- pci_dram_offset = 0x0;
- isa_mem_base = 0xc0000000;
- isa_io_base = 0xf8000000;
- }
- }
- else
- {
- if ( !strncmp("IBM,7043-150", get_property(find_path_device("/"), "name", NULL),12) ||
- !strncmp("IBM,7046-155", get_property(find_path_device("/"), "name", NULL),12) ||
- !strncmp("IBM,7046-B50", get_property(find_path_device("/"), "name", NULL),12) )
- {
- pci_dram_offset = 0;
- isa_mem_base = 0x80000000;
- isa_io_base = 0xfe000000;
- pci_config_address = (unsigned int *)0xfec00000;
- pci_config_data = (unsigned char *)0xfee00000;
- set_config_access_method(indirect);
- }
- else
- {
- /* LongTrail */
- pci_dram_offset = 0;
- isa_mem_base = 0xf7000000;
- isa_io_base = 0xf8000000;
- set_config_access_method(gg2);
- ppc_md.pcibios_fixup = gg2_pcibios_fixup;
- ppc_md.pcibios_fixup_bus = gg2_pcibios_fixup_bus;
- }
- }
+ model = get_property(find_path_device("/"), "model", NULL);
+ if (!strncmp("MOT", model, 3)) {
+ struct pci_controller* hose;
+
+ hose = pcibios_alloc_controller();
+ if (!hose)
+ return;
+ hose->first_busno = 0;
+ hose->last_busno = 0xff;
+ /* Check that please. This must be the root of the OF
+ * PCI tree (the root host bridge
+ */
+ hose->arch_data = find_devices("pci");
+ setup_grackle(hose, 0x20000);
+ return;
}
+
+ if ((py = find_compatible_devices("pci", "IBM,python")))
+ {
+ /* XXX xmon_init_scc needs this set and the BAT
+ set up in MMU_init */
+ ibm_add_bridges(find_devices("pci"));
+ return;
+ }
+
+
+ hose = pcibios_alloc_controller();
+ if (!hose)
+ return;
+ hose->first_busno = 0;
+ hose->last_busno = 0xff;
+ /* Check that please. This must be the root of the OF
+ * PCI tree (the root host bridge
+ */
+ hose->arch_data = find_devices("pci");
+ name = get_property(find_path_device("/"), "name", NULL);
+ if (!strncmp("IBM,7043-150", name, 12) ||
+ !strncmp("IBM,7046-155", name, 12) ||
+ !strncmp("IBM,7046-B50", name, 12) ) {
+ setup_grackle(hose, 0x01000000);
+ isa_mem_base = 0x80000000;
+ return;
+ }
+
+ /* LongTrail */
+ hose->ops = &gg2_pci_ops;
+ pci_dram_offset = 0;
+ isa_mem_base = 0xf7000000;
+ hose->io_base_phys = (unsigned long) 0xf8000000;
+ hose->io_base_virt = ioremap(hose->io_base_phys, 0x10000);
+ isa_io_base = (unsigned long) hose->io_base_virt;
+ ppc_md.pcibios_fixup = gg2_pcibios_fixup;
+ ppc_md.pcibios_fixup_bus = gg2_pcibios_fixup_bus;
#endif /* CONFIG_POWER4 */
}
#ifdef CONFIG_PPC64BRIDGE
+#ifdef CONFIG_POWER4
/*
* Hack alert!!!
* 64-bit machines like POWER3 and POWER4 have > 32 bit
@@ -490,9 +513,7 @@ chrp_setup_pci_ptrs(void)
* page table gives us into parts of the physical address
* space above 4GB so we can access the I/O devices.
*/
-
-#ifdef CONFIG_POWER4
-static unsigned long pci_address_offset(int busnr, unsigned int flags)
+unsigned long pci_address_offset(int busnr, unsigned int flags)
{
unsigned long offset = 0;
@@ -508,34 +529,5 @@ static unsigned long pci_address_offset(int busnr, unsigned int flags)
}
return offset;
}
-
-unsigned long phys_to_bus(unsigned long pa)
-{
- if (pa >= 0xf8000000)
- pa -= 0x38000000;
- else if (pa >= 0x80000000 && pa < 0xc0000000)
- pa += 0x40000000;
- return pa;
-}
-
-unsigned long bus_to_phys(unsigned int ba, int busnr)
-{
- return ba + pci_address_offset(busnr, IORESOURCE_MEM);
-}
-
-#else /* CONFIG_POWER4 */
-/*
- * For now assume I/O addresses are < 4GB and PCI bridges don't
- * remap addresses on POWER3 machines.
- */
-unsigned long phys_to_bus(unsigned long pa)
-{
- return pa;
-}
-
-unsigned long bus_to_phys(unsigned int ba, int busnr)
-{
- return ba;
-}
#endif /* CONFIG_POWER4 */
#endif /* CONFIG_PPC64BRIDGE */
diff --git a/arch/ppc/kernel/chrp_setup.c b/arch/ppc/kernel/chrp_setup.c
index ccc6621de..5682f3fad 100644
--- a/arch/ppc/kernel/chrp_setup.c
+++ b/arch/ppc/kernel/chrp_setup.c
@@ -30,7 +30,6 @@
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/pci.h>
-#include <linux/openpic.h>
#include <linux/version.h>
#include <linux/adb.h>
#include <linux/module.h>
@@ -50,21 +49,19 @@
#include <asm/hydra.h>
#include <asm/keyboard.h>
#include <asm/init.h>
-
#include <asm/time.h>
+
#include "local_irq.h"
#include "i8259.h"
#include "open_pic.h"
#include "xics.h"
-extern volatile unsigned char *chrp_int_ack_special;
-
unsigned long chrp_get_rtc_time(void);
int chrp_set_rtc_time(unsigned long nowtime);
void chrp_calibrate_decr(void);
long chrp_time_init(void);
-void chrp_setup_pci_ptrs(void);
+void chrp_find_bridges(void);
void chrp_event_scan(void);
void rtas_display_progress(char *, unsigned short);
void rtas_indicator_progress(char *, unsigned short);
@@ -92,7 +89,7 @@ kdev_t boot_dev;
extern PTE *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
extern int probingmem;
-extern unsigned long loops_per_sec;
+extern unsigned long loops_per_jiffy;
extern int bootx_text_mapped;
static int max_width;
@@ -244,7 +241,7 @@ chrp_setup_arch(void)
struct device_node *device;
/* init to some ~sane value until calibrate_delay() runs */
- loops_per_sec = 50000000;
+ loops_per_jiffy = 50000000/HZ;
#ifdef CONFIG_BLK_DEV_INITRD
/* this is fine for chrp */
@@ -257,6 +254,9 @@ chrp_setup_arch(void)
ROOT_DEV = to_kdev_t(0x0802); /* sda2 (sda1 is for the kernel) */
printk("Boot arguments: %s\n", cmd_line);
+ /* Lookup PCI host bridges */
+ chrp_find_bridges();
+
#ifndef CONFIG_PPC64BRIDGE
/* PCI bridge config space access area -
* appears to be not in devtree on longtrail. */
@@ -266,11 +266,12 @@ chrp_setup_arch(void)
* -- Geert
*/
hydra_init(); /* Mac I/O */
+
#endif /* CONFIG_PPC64BRIDGE */
#ifndef CONFIG_POWER4
/* Some IBM machines don't have the hydra -- Cort */
- if ( !OpenPIC )
+ if ( !OpenPIC_Addr )
{
unsigned long *opprop;
@@ -279,7 +280,7 @@ chrp_setup_arch(void)
if (opprop != 0) {
printk("OpenPIC addrs: %lx %lx %lx\n",
opprop[0], opprop[1], opprop[2]);
- OpenPIC = ioremap(opprop[0], sizeof(struct OpenPIC));
+ OpenPIC_Addr = ioremap(opprop[0], 0x40000);
}
}
#endif
@@ -292,23 +293,17 @@ chrp_setup_arch(void)
conswitchp = &dummy_con;
#endif
-#ifndef CONFIG_PPC64BRIDGE
- pmac_find_bridges();
-#endif /* CONFIG_PPC64BRIDGE */
-
/* Get the event scan rate for the rtas so we know how
* often it expects a heartbeat. -- Cort
*/
- if ( rtas_data )
- {
+ if ( rtas_data ) {
struct property *p;
device = find_devices("rtas");
for ( p = device->properties;
p && strncmp(p->name, "rtas-event-scan-rate", 20);
p = p->next )
/* nothing */ ;
- if ( p && *(unsigned long *)p->value )
- {
+ if ( p && *(unsigned long *)p->value ) {
ppc_md.heartbeat = chrp_event_scan;
ppc_md.heartbeat_reset = (HZ/(*(unsigned long *)p->value)*30)-1;
ppc_md.heartbeat_count = 1;
@@ -365,79 +360,44 @@ chrp_irq_cannonicalize(u_int irq)
}
}
-int __chrp chrp_get_irq( struct pt_regs *regs )
-{
- int irq;
-
- irq = openpic_irq( smp_processor_id() );
- if (irq == IRQ_8259_CASCADE)
- {
- /*
- * This magic address generates a PCI IACK cycle.
- */
- if ( chrp_int_ack_special )
- irq = *chrp_int_ack_special;
- else
- irq = i8259_irq( smp_processor_id() );
- openpic_eoi( smp_processor_id() );
- }
- if (irq == OPENPIC_VEC_SPURIOUS)
- /*
- * Spurious interrupts should never be
- * acknowledged
- */
- irq = -1;
- /*
- * I would like to openpic_eoi here but there seem to be timing problems
- * between the openpic ack and the openpic eoi.
- * -- Cort
- */
- return irq;
-}
-
-void __chrp chrp_post_irq(struct pt_regs* regs, int irq)
-{
- /*
- * If it's an i8259 irq then we've already done the
- * openpic irq. So we just check to make sure the controller
- * is an openpic and if it is then eoi
- *
- * We do it this way since our irq_desc[irq].handler can change
- * with RTL and no longer be open_pic -- Cort
- */
- if ( irq >= open_pic_irq_offset)
- openpic_eoi( smp_processor_id() );
-}
-
void __init chrp_init_IRQ(void)
{
struct device_node *np;
int i;
unsigned long *addrp;
+ unsigned char* chrp_int_ack_special = 0;
+ unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS];
+ int nmi_irq = -1;
+#if defined(CONFIG_VT) && defined(CONFIG_ADB_KEYBOARD) && defined(XMON)
+ struct device_node *kbd;
+#endif
if (!(np = find_devices("pci"))
|| !(addrp = (unsigned long *)
get_property(np, "8259-interrupt-acknowledge", NULL)))
printk("Cannot find pci to get ack address\n");
else
- chrp_int_ack_special = (volatile unsigned char *)
- ioremap(*addrp, 1);
- open_pic_irq_offset = 16;
- for ( i = 16 ; i < NR_IRQS ; i++ )
- irq_desc[i].handler = &open_pic;
- openpic_init(1);
- enable_irq(IRQ_8259_CASCADE);
- for ( i = 0 ; i < 16 ; i++ )
+ chrp_int_ack_special = (unsigned char *)ioremap(*addrp, 1);
+ /* hydra still sets OpenPIC_InitSenses to a static set of values */
+ if (OpenPIC_InitSenses == NULL) {
+ prom_get_irq_senses(init_senses, NUM_8259_INTERRUPTS, NR_IRQS);
+ OpenPIC_InitSenses = init_senses;
+ OpenPIC_NumInitSenses = NR_IRQS - NUM_8259_INTERRUPTS;
+ }
+ openpic_init(1, NUM_8259_INTERRUPTS, chrp_int_ack_special, nmi_irq);
+ for ( i = 0 ; i < NUM_8259_INTERRUPTS ; i++ )
irq_desc[i].handler = &i8259_pic;
i8259_init();
-#ifdef CONFIG_XMON
- request_irq(openpic_to_irq(HYDRA_INT_ADB_NMI),
- xmon_irq, 0, "NMI", 0);
-#endif /* CONFIG_XMON */
-#ifdef CONFIG_SMP
- request_irq(openpic_to_irq(OPENPIC_VEC_IPI),
- openpic_ipi_action, 0, "IPI0", 0);
-#endif /* CONFIG_SMP */
+#if defined(CONFIG_VT) && defined(CONFIG_ADB_KEYBOARD) && defined(XMON)
+ /* see if there is a keyboard in the device tree
+ with a parent of type "adb" */
+ for (kbd = find_devices("keyboard"); kbd; kbd = kbd->next)
+ if (kbd->parent && kbd->parent->type
+ && strcmp(kbd->parent->type, "adb") == 0)
+ break;
+ if (kbd)
+ request_irq( HYDRA_INT_ADB_NMI, xmon_irq, 0, "XMON break", 0);
+#endif
}
void __init
@@ -556,12 +516,6 @@ chrp_ide_release_region(ide_ioreg_t from,
}
void __chrp
-chrp_ide_fix_driveid(struct hd_driveid *id)
-{
- ppc_generic_ide_fix_driveid(id);
-}
-
-void __chrp
chrp_ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port, ide_ioreg_t ctrl_port, int *irq)
{
ide_ioreg_t reg = data_port;
@@ -586,7 +540,6 @@ void __init
chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
- chrp_setup_pci_ptrs();
#ifdef CONFIG_BLK_DEV_INITRD
/* take care of initrd if we have one */
if ( r6 )
@@ -596,10 +549,10 @@ void __init
}
#endif /* CONFIG_BLK_DEV_INITRD */
- /* pci_dram_offset/isa_io_base/isa_mem_base set by setup_pci_ptrs() */
ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
+ isa_io_base = CHRP_ISA_IO_BASE; /* default value */
ppc_md.setup_arch = chrp_setup_arch;
ppc_md.setup_residual = NULL;
@@ -607,8 +560,8 @@ void __init
ppc_md.irq_cannonicalize = chrp_irq_cannonicalize;
#ifndef CONFIG_POWER4
ppc_md.init_IRQ = chrp_init_IRQ;
- ppc_md.get_irq = chrp_get_irq;
- ppc_md.post_irq = chrp_post_irq;
+ ppc_md.get_irq = openpic_get_irq;
+ ppc_md.post_irq = NULL;
#else
ppc_md.init_IRQ = xics_init_IRQ;
ppc_md.get_irq = xics_get_irq;
@@ -669,11 +622,12 @@ void __init
ppc_ide_md.ide_check_region = chrp_ide_check_region;
ppc_ide_md.ide_request_region = chrp_ide_request_region;
ppc_ide_md.ide_release_region = chrp_ide_release_region;
- ppc_ide_md.fix_driveid = chrp_ide_fix_driveid;
+ ppc_ide_md.fix_driveid = ppc_generic_ide_fix_driveid;
ppc_ide_md.ide_init_hwif = chrp_ide_init_hwif_ports;
ppc_ide_md.io_base = _IO_BASE;
#endif
+
/*
* Print the banner, then scroll down so boot progress
* can be printed. -- Cort
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 89922aaab..93bb8ca00 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -234,7 +234,6 @@ _GLOBAL(_switch)
li r0,0x0ff0
stw r0,TRAP(r1)
stw r1,KSP(r3) /* Set old stack pointer */
- sync
tophys(r0,r4)
CLR_TOP32(r0)
mtspr SPRG3,r0 /* Update current THREAD phys addr */
@@ -251,7 +250,7 @@ _GLOBAL(_switch)
#endif
mtspr M_TWB,r9 /* Update MMU base address */
tlbia
- SYNC
+ sync
#endif /* CONFIG_8xx */
lwz r1,KSP(r4) /* Load new stack pointer */
/* save the old current 'last' for return value */
@@ -342,23 +341,23 @@ lost_irq_ret:
do_bottom_half_ret:
2: lwz r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
- beq+ restore /* if so, check need_resched and signals */
- .globl ret_to_user_hook
-ret_to_user_hook:
- nop
+ beq+ do_signal_ret /* if so, check need_resched and signals */
lwz r3,NEED_RESCHED(r2)
cmpi 0,r3,0 /* check need_resched flag */
beq+ 7f
bl schedule
7: lwz r5,SIGPENDING(r2) /* Check for pending unblocked signals */
cmpwi 0,r5,0
- beq+ restore
+ beq+ do_signal_ret
li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD
MOL_HOOK_MMU(8,r8)
bl do_signal
.globl do_signal_ret
do_signal_ret:
+ .globl ret_to_user_hook
+ret_to_user_hook:
+ nop
restore:
lwz r3,_XER(r1)
mtspr XER,r3
@@ -372,7 +371,7 @@ restore:
*/
mfmsr r0 /* Get current interrupt state */
rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
- sync /* Some chip revs have problems here... */
+ SYNC /* Some chip revs have problems here... */
mtmsr r0 /* Update machine state */
/* if returning to user mode, set new sprg2 and save kernel SP */
@@ -468,7 +467,7 @@ enter_rtas:
andc r0,r9,r0
li r10,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
andc r9,r0,r10
- sync /* disable interrupts so SRR0/1 */
+ SYNC /* disable interrupts so SRR0/1 */
mtmsr r0 /* don't get trashed */
mtlr r6
CLR_TOP32(r7)
diff --git a/arch/ppc/kernel/error_log.c b/arch/ppc/kernel/error_log.c
new file mode 100644
index 000000000..5ee750dda
--- /dev/null
+++ b/arch/ppc/kernel/error_log.c
@@ -0,0 +1,183 @@
+/*
+ * arch/ppc/kernel/error_log.c
+ *
+ * Copyright (c) 2000 Tilmann Bitterberg
+ * (tilmann@bitterberg.de)
+ *
+ * Error processing of errors found by rtas even-scan routine
+ * which is done with every heartbeat. (chrp_setup.c)
+ */
+
+#include <linux/sched.h>
+
+#include <asm/prom.h>
+
+#include "error_log.h"
+
+/* ****************************************************************** */
+/*
+ * EVENT-SCAN
+ * The whole stuff below here doesn't take any action when it found
+ * an error, it just prints as much information as possible and
+ * then its up to the user to decide what to do.
+ *
+ * Returns 0 if no errors were found
+ * Returns 1 if there may be more errors
+ */
+int ppc_rtas_errorlog_scan(void)
+{
+const char *_errlog_severity[] = {
+#ifdef VERBOSE_ERRORS
+ "No Error\n\t\
+Should require no further information",
+ "Event\n\t\
+This is not really an error, it is an event. I use events\n\t\
+to communicate with RTAS back and forth.",
+ "Warning\n\t\
+Indicates a non-state-losing error, either fully recovered\n\t\
+by RTAS or not needing recovery. Ignore it.",
+ "Error sync\n\t\
+May only be fatal to a certain program or thread. Recovery\n\t\
+and continuation is possible, if I only had a handler for\n\t\
+this. Less serious",
+ "Error\n\t\
+Less serious, but still causing a loss of data and state.\n\t\
+I can't tell you exactly what to do, You have to decide\n\t\
+with help from the target and initiator field, what kind\n\t\
+of further actions may take place.",
+ "Fatal\n\t\
+Represent a permanent hardware failure and I believe this\n\t\
+affects my overall performance and behaviour. I would not\n\t\
+attempt to continue normal operation."
+#else
+ "No Error",
+ "Event",
+ "Warning",
+ "Error sync",
+ "Error",
+ "Fatal"
+#endif /* VERBOSE_ERRORS */
+};
+
+#if 0 /* unused?? */
+const char *_errlog_disposition[] = {
+#ifdef VERBOSE_ERRORS
+ "Fully recovered\n\t\
+There was an error, but it is fully recovered by RTAS.",
+ "Limited recovery\n\t\
+RTAS was able to recover the state of the machine, but some\n\t\
+feature of the machine has been disabled or lost (for example\n\t\
+error checking) or performance may suffer.",
+ "Not recovered\n\t\
+Whether RTAS did not try to recover anything or recovery failed:\n\t\
+HOUSTON, WE HAVE A PROBLEM!"
+#else
+ "Fully recovered",
+ "Limited recovery",
+ "Not recovered"
+#endif /* VERBOSE_ERRORS */
+};
+#endif
+
+const char *_errlog_extended[] = {
+#ifdef VERBOSE_ERRORS
+ "Not present\n\t\
+Sad, the RTAS call didn't return an extended error log.",
+ "Present\n\t\
+The extended log is present and hopefully it contains a lot of\n\t\
+useful information, which leads to the solution of the problem."
+#else
+ "Not present",
+ "Present"
+#endif /* VERBOSE_ERRORS */
+};
+
+const char *_errlog_initiator[] = {
+ "Unknown or not applicable",
+ "CPU",
+ "PCI",
+ "ISA",
+ "Memory",
+ "Power management"
+};
+
+const char *_errlog_target[] = {
+ "Unknown or not applicable",
+ "CPU",
+ "PCI",
+ "ISA",
+ "Memory",
+ "Power management"
+};
+ rtas_error_log error_log;
+ char logdata[1024];
+ int error;
+#if 0 /* unused?? */
+ int retries = 0; /* if HW error, try 10 times */
+#endif
+
+ error = call_rtas ("event-scan", 4, 1, (unsigned long *)&error_log,
+ INTERNAL_ERROR | EPOW_WARNING,
+ 0, __pa(logdata), 1024);
+
+ if (error == 1) /* no errors found */
+ return 0;
+
+ if (error == -1) {
+ printk(KERN_ERR "Unable to get errors. Do you a favor and throw this box away\n");
+ return 0;
+ }
+ if (error_log.version != 1)
+ printk(KERN_WARNING "Unknown version (%d), please implement me\n",
+ error_log.version);
+
+ switch (error_log.disposition) {
+ case DISP_FULLY_RECOVERED:
+ /* there was an error, but everything is fine now */
+ return 0;
+ case DISP_NOT_RECOVERED:
+ printk("We have a really serious Problem!\n");
+ case DISP_LIMITED_RECOVERY:
+ printk("Error classification\n");
+ printk("Severity : %s\n",
+ ppc_rtas_errorlog_check_severity (error_log));
+ printk("Initiator : %s\n",
+ ppc_rtas_errorlog_check_initiator (error_log));
+ printk("Target : %s\n",
+ ppc_rtas_errorlog_check_target (error_log));
+ printk("Type : %s\n",
+ ppc_rtas_errorlog_check_type (error_log));
+ printk("Ext. log : %s\n",
+ ppc_rtas_errorlog_check_extended (error_log));
+ if (error_log.extended)
+ ppc_rtas_errorlog_disect_extended (logdata);
+ return 1;
+ default:
+ /* nothing */
+ break;
+ }
+ return 0;
+}
+/* ****************************************************************** */
+const char * ppc_rtas_errorlog_check_type (rtas_error_log error_log)
+{
+ const char *_errlog_type[] = {
+ "unknown type",
+ "too many tries failed",
+ "TCE error",
+ "RTAS device failed",
+ "target timed out",
+ "parity error on data", /* 5 */
+ "parity error on address",
+ "parity error on external cache",
+ "access to invalid address",
+ "uncorrectable ECC error",
+ "corrected ECC error" /* 10 */
+ };
+ if (error_log.type == TYPE_EPOW)
+ return "EPOW";
+ if (error_log.type >= TYPE_PMGM_POWER_SW_ON)
+ return "PowerMGM Event (not handled right now)";
+ return _errlog_type[error_log.type];
+}
+
diff --git a/arch/ppc/kernel/error_log.h b/arch/ppc/kernel/error_log.h
new file mode 100644
index 000000000..579fc3e0d
--- /dev/null
+++ b/arch/ppc/kernel/error_log.h
@@ -0,0 +1,95 @@
+#ifndef __ERROR_LOG_H__
+#define __ERROR_LOG_H__
+
+#define VERBOSE_ERRORS 1 /* Maybe I enlarge the kernel too much */
+#undef VERBOSE_ERRORS
+
+/* Event classes */
+/* XXX: Endianess correct? NOW*/
+#define INTERNAL_ERROR 0x80000000 /* set bit 0 */
+#define EPOW_WARNING 0x40000000 /* set bit 1 */
+#define POWERMGM_EVENTS 0x20000000 /* set bit 2 */
+
+/* event-scan returns */
+#define SEVERITY_FATAL 0x5
+#define SEVERITY_ERROR 0x4
+#define SEVERITY_ERROR_SYNC 0x3
+#define SEVERITY_WARNING 0x2
+#define SEVERITY_EVENT 0x1
+#define SEVERITY_NO_ERROR 0x0
+#define DISP_FULLY_RECOVERED 0x0
+#define DISP_LIMITED_RECOVERY 0x1
+#define DISP_NOT_RECOVERED 0x2
+#define PART_PRESENT 0x0
+#define PART_NOT_PRESENT 0x1
+#define INITIATOR_UNKNOWN 0x0
+#define INITIATOR_CPU 0x1
+#define INITIATOR_PCI 0x2
+#define INITIATOR_ISA 0x3
+#define INITIATOR_MEMORY 0x4
+#define INITIATOR_POWERMGM 0x5
+#define TARGET_UNKNOWN 0x0
+#define TARGET_CPU 0x1
+#define TARGET_PCI 0x2
+#define TARGET_ISA 0x3
+#define TARGET_MEMORY 0x4
+#define TARGET_POWERMGM 0x5
+#define TYPE_RETRY 0x01
+#define TYPE_TCE_ERR 0x02
+#define TYPE_INTERN_DEV_FAIL 0x03
+#define TYPE_TIMEOUT 0x04
+#define TYPE_DATA_PARITY 0x05
+#define TYPE_ADDR_PARITY 0x06
+#define TYPE_CACHE_PARITY 0x07
+#define TYPE_ADDR_INVALID 0x08
+#define TYPE_ECC_UNCORR 0x09
+#define TYPE_ECC_CORR 0x0a
+#define TYPE_EPOW 0x40
+/* I don't add PowerMGM events right now, this is a different topic */
+#define TYPE_PMGM_POWER_SW_ON 0x60
+#define TYPE_PMGM_POWER_SW_OFF 0x61
+#define TYPE_PMGM_LID_OPEN 0x62
+#define TYPE_PMGM_LID_CLOSE 0x63
+#define TYPE_PMGM_SLEEP_BTN 0x64
+#define TYPE_PMGM_WAKE_BTN 0x65
+#define TYPE_PMGM_BATTERY_WARN 0x66
+#define TYPE_PMGM_BATTERY_CRIT 0x67
+#define TYPE_PMGM_SWITCH_TO_BAT 0x68
+#define TYPE_PMGM_SWITCH_TO_AC 0x69
+#define TYPE_PMGM_KBD_OR_MOUSE 0x6a
+#define TYPE_PMGM_ENCLOS_OPEN 0x6b
+#define TYPE_PMGM_ENCLOS_CLOSED 0x6c
+#define TYPE_PMGM_RING_INDICATE 0x6d
+#define TYPE_PMGM_LAN_ATTENTION 0x6e
+#define TYPE_PMGM_TIME_ALARM 0x6f
+#define TYPE_PMGM_CONFIG_CHANGE 0x70
+#define TYPE_PMGM_SERVICE_PROC 0x71
+
+typedef struct _rtas_error_log {
+ unsigned long version:8; /* Architectural version */
+ unsigned long severity:3; /* Severity level of error */
+ unsigned long disposition:2; /* Degree of recovery */
+ unsigned long extended:1; /* extended log present? */
+ unsigned long /* reserved */ :2; /* Reserved for future use */
+ unsigned long initiator:4; /* Initiator of event */
+ unsigned long target:4; /* Target of failed operation */
+ unsigned long type:8; /* General event or error*/
+ unsigned long extended_log_length:32; /* length in bytes */
+} rtas_error_log;
+
+/* ****************************************************************** */
+#define ppc_rtas_errorlog_check_severity(x) \
+ (_errlog_severity[x.severity])
+#define ppc_rtas_errorlog_check_target(x) \
+ (_errlog_target[x.target])
+#define ppc_rtas_errorlog_check_initiator(x) \
+ (_errlog_initiator[x.initiator])
+#define ppc_rtas_errorlog_check_extended(x) \
+ (_errlog_extended[x.extended])
+#define ppc_rtas_errorlog_disect_extended(x) \
+ do { /* implement me */ } while(0)
+extern const char * ppc_rtas_errorlog_check_type (rtas_error_log error_log);
+extern int ppc_rtas_errorlog_scan(void);
+
+
+#endif /* __ERROR_LOG_H__ */
diff --git a/arch/ppc/kernel/feature.c b/arch/ppc/kernel/feature.c
index 57599917a..f22f4b163 100644
--- a/arch/ppc/kernel/feature.c
+++ b/arch/ppc/kernel/feature.c
@@ -238,12 +238,15 @@ feature_add_controller(struct device_node *controller_device, fbit* bits);
static struct feature_controller*
feature_lookup_controller(struct device_node *device);
+#ifdef CONFIG_PMAC_PBOOK
static void heathrow_prepare_for_sleep(struct feature_controller* ctrler);
static void heathrow_wakeup(struct feature_controller* ctrler);
-static void keylargo_init(void);
-static void uninorth_init(void);
static void core99_prepare_for_sleep(struct feature_controller* ctrler);
static void core99_wake_up(struct feature_controller* ctrler);
+#endif /* CONFIG_PMAC_PBOOK */
+
+static void keylargo_init(void);
+static void uninorth_init(void);
/* static variables */
static struct feature_controller controllers[MAX_FEATURE_CONTROLLERS];
@@ -255,6 +258,10 @@ static volatile u32* keylargo_base = NULL;
static int uninorth_rev;
static int keylargo_rev;
+/*
+ * WARNING ! This function is called early in setup_arch, neither the IO base
+ * nor the udelay calibration have been done yet
+ */
void
feature_init(void)
{
@@ -527,14 +534,31 @@ feature_set_usb_power(struct device_node* device, int power)
void
feature_set_firewire_power(struct device_node* device, int power)
{
+ if (!uninorth_base)
+ return;
+ if (power)
+ UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
+ else
+ UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
+ udelay(20);
+}
+
+#ifdef CONFIG_SMP
+void
+feature_core99_kick_cpu1(void)
+{
+ out_8((volatile u8 *)KL_FCR(KL_GPIO_KICK_CPU1), KL_GPIO_KICK_CPU1_UP);
+ udelay(1);
+ out_8((volatile u8 *)KL_FCR(KL_GPIO_KICK_CPU1), KL_GPIO_KICK_CPU1_DOWN);
}
+#endif /* CONFIG_SMP */
/* Initialize the Core99 UniNorth host bridge and memory controller
*/
static void
uninorth_init(void)
{
- struct device_node* gmac;
+ struct device_node* gmac, *fw;
unsigned long actrl;
/* Set the arbitrer QAck delay according to what Apple does
@@ -564,6 +588,11 @@ uninorth_init(void)
}
if (gmac)
feature_set_gmac_power(gmac, 0);
+
+ /* Kludge (enable FW before PCI probe) */
+ fw = find_devices("firewire");
+ if (fw && device_is_compatible(fw, "pci106b,18"))
+ feature_set_firewire_power(fw, 1);
}
/* Initialize the Core99 KeyLargo ASIC. Currently, we just make sure
@@ -576,6 +605,43 @@ keylargo_init(void)
}
#ifdef CONFIG_PMAC_PBOOK
+
+static u32 save_fcr[5];
+static u32 save_mbcr;
+
+static void
+heathrow_prepare_for_sleep(struct feature_controller* ctrler)
+{
+ save_mbcr = in_le32(FREG(ctrler, 0x34));
+ save_fcr[0] = in_le32(FREG(ctrler, 0x38));
+ save_fcr[1] = in_le32(FREG(ctrler, 0x3c));
+
+ out_le32(FREG(ctrler, 0x38), save_fcr[0] & ~HRW_IOBUS_ENABLE);
+}
+
+static void
+heathrow_wakeup(struct feature_controller* ctrler)
+{
+ out_le32(FREG(ctrler, 0x38), save_fcr[0]);
+ out_le32(FREG(ctrler, 0x3c), save_fcr[1]);
+ out_le32(FREG(ctrler, 0x34), save_mbcr);
+ mdelay(1);
+ out_le32(FREG(ctrler, 0x38), save_fcr[0] | HRW_IOBUS_ENABLE);
+ mdelay(1);
+}
+
+static void
+core99_prepare_for_sleep(struct feature_controller* ctrler)
+{
+ /* Not yet implemented */
+}
+
+static void
+core99_wake_up(struct feature_controller* ctrler)
+{
+ /* Not yet implemented */
+}
+
void
feature_prepare_for_sleep(void)
{
@@ -599,7 +665,6 @@ feature_prepare_for_sleep(void)
}
}
-
void
feature_wake_up(void)
{
@@ -622,39 +687,4 @@ feature_wake_up(void)
}
}
-static u32 save_fcr[5];
-static u32 save_mbcr;
-
-static void
-heathrow_prepare_for_sleep(struct feature_controller* ctrler)
-{
- save_mbcr = in_le32(FREG(ctrler, 0x34));
- save_fcr[0] = in_le32(FREG(ctrler, 0x38));
- save_fcr[1] = in_le32(FREG(ctrler, 0x3c));
-
- out_le32(FREG(ctrler, 0x38), save_fcr[0] & ~HRW_IOBUS_ENABLE);
-}
-
-static void
-heathrow_wakeup(struct feature_controller* ctrler)
-{
- out_le32(FREG(ctrler, 0x38), save_fcr[0]);
- out_le32(FREG(ctrler, 0x3c), save_fcr[1]);
- out_le32(FREG(ctrler, 0x34), save_mbcr);
- mdelay(1);
- out_le32(FREG(ctrler, 0x38), save_fcr[0] | HRW_IOBUS_ENABLE);
- mdelay(1);
-}
-
-static void
-core99_prepare_for_sleep(struct feature_controller* ctrler)
-{
- /* Not yet implemented */
-}
-
-static void
-core99_wake_up(struct feature_controller* ctrler)
-{
- /* Not yet implemented */
-}
#endif /* CONFIG_PMAC_PBOOK */
diff --git a/arch/ppc/kernel/galaxy_pci.c b/arch/ppc/kernel/galaxy_pci.c
index aeddd9a0e..36e7e14fb 100644
--- a/arch/ppc/kernel/galaxy_pci.c
+++ b/arch/ppc/kernel/galaxy_pci.c
@@ -36,67 +36,84 @@
/* Function Prototypes */
-decl_config_access_method(galaxy);
-
-
void __init
galaxy_pcibios_fixup(void)
{
}
-void __init
-galaxy_setup_pci_ptrs(void)
+static int
+galaxy_pcibios_read_config_byte(struct pci_controller* hose,
+ u8 bus, u8 dev, u8 offset, u8 *val)
{
- set_config_access_method(galaxy);
- ppc_md.pcibios_fixup = galaxy_pcibios_fixup;
+ return (PCIBIOS_SUCCESSFUL);
}
-int
-galaxy_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char *val)
+static int
+galaxy_pcibios_read_config_word(struct pci_controller* hose,
+ u8 bus, u8 dev, u8 offset, u16 *val)
{
return (PCIBIOS_SUCCESSFUL);
}
-int
-galaxy_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short *val)
+static int
+galaxy_pcibios_read_config_dword(struct pci_controller* hose,
+ u8 bus, u8 dev, u8 offset, u32 *val)
{
return (PCIBIOS_SUCCESSFUL);
}
-int
-galaxy_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int *val)
+static int
+galaxy_pcibios_write_config_byte(struct pci_controller* hose,
+ u8 bus, u8 dev, u8 offset, u8 val)
{
return (PCIBIOS_SUCCESSFUL);
}
-int
-galaxy_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char val)
+static int
+galaxy_pcibios_write_config_word(struct pci_controller* hose,
+ u8 bus, u8 dev, u8 offset, u16 val)
{
return (PCIBIOS_SUCCESSFUL);
}
-int
-galaxy_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short val)
+static int
+galaxy_pcibios_write_config_dword(struct pci_controller* hose,
+ u8 bus, u8 dev, u8 offset, u32 val)
{
return (PCIBIOS_SUCCESSFUL);
}
-int
-galaxy_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int val)
+static struct pci_controller_ops galaxy_pci_ops =
{
+ galaxy_pcibios_read_config_byte,
+ galaxy_pcibios_read_config_word,
+ galaxy_pcibios_read_config_dword,
+ galaxy_pcibios_write_config_byte,
+ galaxy_pcibios_write_config_word,
+ galaxy_pcibios_write_config_dword
+};
- return (PCIBIOS_SUCCESSFUL);
+void __init
+galaxy_find_bridges(void)
+{
+ struct pci_controller* hose;
+
+ set_config_access_method(galaxy);
+
+ ppc_md.pcibios_fixup = galaxy_pcibios_fixup;
+ hose = pcibios_alloc_controller();
+ if (!hose)
+ return;
+ hose->ops = &galaxy_pci_ops;
+ /* Todo ...
+ hose->cfg_data = ioremap(PCICFGDATA, ...);
+ hose->cfg_addr = ioremap(PCICFGADDR, ...);
+ */
}
diff --git a/arch/ppc/kernel/hashtable.S b/arch/ppc/kernel/hashtable.S
index 06b7c6f1d..8ea3cdc7a 100644
--- a/arch/ppc/kernel/hashtable.S
+++ b/arch/ppc/kernel/hashtable.S
@@ -56,7 +56,6 @@ hash_page:
#ifdef CONFIG_PPC64BRIDGE
mfmsr r0
clrldi r0,r0,1 /* make sure it's in 32-bit mode */
- sync
MTMSRD(r0)
isync
#endif
@@ -112,23 +111,31 @@ hash_page:
#endif
tophys(r2,r5)
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
- lwz r6,0(r2) /* get linux-style pte */
ori r4,r4,1 /* set _PAGE_PRESENT bit in access */
+ rlwinm r5,r4,5,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
+ rlwimi r5,r4,7,22,22 /* _PAGE_RW -> _PAGE_HWWRITE */
+ ori r5,r5,0x100 /* set _PAGE_ACCESSED */
+retry:
+ lwz r6,0(r2) /* get linux-style pte */
andc. r0,r4,r6 /* check access & ~permission */
#ifdef CONFIG_SMP
bne- hash_page_out /* return if access not permitted */
#else
bnelr-
#endif
+ andc. r0,r5,r6 /* any bits not yet set? */
+ beq 2f
- ori r6,r6,0x100 /* set _PAGE_ACCESSED in pte */
- rlwinm r5,r4,5,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
- rlwimi r5,r4,7,22,22 /* _PAGE_RW -> _PAGE_HWWRITE */
- or r6,r6,r5
- stw r6,0(r2) /* update PTE (accessed/dirty bits) */
+ /* Update the linux PTE atomically */
+ lwarx r0,0,r2 /* refetch the pte and check */
+ cmpw 0,r0,r6 /* that it hasn't been changed */
+ bne- retry /* retry if it has */
+ or r6,r6,r5 /* set accessed/dirty bits */
+ stwcx. r6,0,r2 /* attempt to update PTE */
+ bne- retry /* retry if someone got there first */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwinm r4,r6,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
+2: rlwinm r4,r6,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
rlwimi r6,r6,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
ori r4,r4,0xe04 /* clear out reserved bits */
andc r6,r6,r4 /* PP=2 or 0, when _PAGE_HWWRITE */
@@ -166,12 +173,16 @@ hash_page_patch_A:
rlwimi r4,r5,32-5,25-Hash_bits,24 /* (VSID & hash_mask) << 7 */
rlwinm r0,r3,32-5,25-Hash_bits,24 /* (PI & hash_mask) << 7 */
xor r4,r4,r0 /* make primary hash */
+ li r2,8 /* PTEs/group */
+#ifndef CONFIG_SMP
+ /* We don't do this for SMP - another cpu could have put in
+ the appropriate PTE since we took the exception. -- paulus. */
/* See whether it was a PTE not found exception or a
protection violation. */
andis. r0,r20,0x4000
- li r2,8 /* PTEs/group */
bne 10f /* no PTE: go look for an empty slot */
+#endif /* CONFIG_SMP */
tlbie r3 /* invalidate TLB entry */
/* Search the primary PTEG for a PTE whose 1st dword matches r5 */
@@ -263,7 +274,6 @@ found_empty:
std r5,0(r3)
found_slot:
std r6,8(r3)
- sync
#else /* CONFIG_SMP */
/*
@@ -311,12 +321,16 @@ hash_page_patch_A:
rlwimi r4,r5,32-1,26-Hash_bits,25 /* (VSID & hash_mask) << 6 */
rlwinm r0,r3,32-6,26-Hash_bits,25 /* (PI & hash_mask) << 6 */
xor r4,r4,r0 /* make primary hash */
+ li r2,8 /* PTEs/group */
+#ifndef CONFIG_SMP
+ /* We don't do this for SMP - another cpu could have put in
+ the appropriate PTE since we took the exception. -- paulus. */
/* See whether it was a PTE not found exception or a
protection violation. */
andis. r0,r20,0x4000
- li r2,8 /* PTEs/group */
bne 10f /* no PTE: go look for an empty slot */
+#endif /* CONFIG_SMP */
tlbie r3 /* invalidate TLB entry */
/* Search the primary PTEG for a PTE whose 1st word matches r5 */
@@ -394,7 +408,6 @@ found_empty:
stw r5,0(r3)
found_slot:
stw r6,4(r3)
- sync
#else /* CONFIG_SMP */
/*
@@ -428,6 +441,8 @@ found_slot:
#endif /* CONFIG_SMP */
#endif /* CONFIG_PPC64BRIDGE */
+ sync /* make sure pte updates get to memory */
+
/*
* Update the hash table miss count. We only want misses here
* that _are_ valid addresses and have a pte otherwise we don't
@@ -517,7 +532,7 @@ _GLOBAL(flush_hash_segments)
a hash table miss while we have the hash table locked,
or we'll get a deadlock. -paulus */
mfmsr r10
- sync
+ SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
mtmsr r0
SYNC
@@ -616,7 +631,7 @@ _GLOBAL(flush_hash_page)
a hash table miss while we have the hash table locked,
or we'll get a deadlock. -paulus */
mfmsr r10
- sync
+ SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
mtmsr r0
SYNC
@@ -628,10 +643,14 @@ _GLOBAL(flush_hash_page)
oris r8,r8,9
10: lwarx r7,0,r9
cmpi 0,r7,0
- bne- 10b
+ bne- 11f
stwcx. r8,0,r9
- bne- 10b
- eieio
+ beq+ 12f
+11: lwz r7,0(r9)
+ cmpi 0,r7,0
+ beq 10b
+ b 11b
+12: eieio
#endif
#ifndef CONFIG_PPC64BRIDGE
rlwinm r3,r3,11,1,20 /* put context into vsid */
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 71b5a96c5..f296d1606 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -149,6 +149,8 @@ __start:
mr r28,r6
mr r27,r7
li r24,0 /* cpu # */
+ /* N.B. prom_init clears the BSS even if it doesn't do
+ * anything else -- paulus. */
bl prom_init
#ifdef CONFIG_APUS
@@ -159,7 +161,6 @@ __start:
bl fix_mem_constants
#endif /* CONFIG_APUS */
-#ifndef CONFIG_GEMINI
/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
* the physical address we are running at, returned by prom_init()
*/
@@ -167,7 +168,6 @@ __start:
__after_mmu_off:
bl clear_bats
bl flush_tlbs
-#endif
#ifndef CONFIG_POWER4
/* POWER4 doesn't have BATs */
@@ -290,6 +290,7 @@ label: \
addi r3,r1,STACK_FRAME_OVERHEAD; \
li r20,MSR_KERNEL; \
bl transfer_to_handler; \
+i##n: \
.long hdlr; \
.long ret_from_except
@@ -301,17 +302,13 @@ label: \
addi r3,r1,STACK_FRAME_OVERHEAD; \
li r20,MSR_KERNEL; \
bl transfer_to_handler; \
+i##n: \
.long hdlr; \
.long ret_from_except
/* System reset */
-#ifdef CONFIG_SMP /* MVME/MTX and gemini start the secondary here */
-#ifdef CONFIG_GEMINI
- . = 0x100
- b __secondary_start_gemini
-#else /* CONFIG_GEMINI */
+#ifdef CONFIG_SMP /* MVME/MTX start the secondary here */
STD_EXCEPTION(0x100, Reset, __secondary_start_psurge)
-#endif /* CONFIG_GEMINI */
#else
STD_EXCEPTION(0x100, Reset, UnknownException)
#endif
@@ -344,6 +341,7 @@ DataAccess:
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
+i0x300:
.long do_page_fault
.long ret_from_except
@@ -384,6 +382,7 @@ InstructionAccess:
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
+i0x400:
.long do_page_fault
.long ret_from_except
@@ -429,6 +428,7 @@ Alignment:
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
+i0x600:
.long AlignmentException
.long ret_from_except
@@ -441,6 +441,7 @@ ProgramCheck:
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
+i0x700:
.long ProgramCheckException
.long ret_from_except
@@ -452,6 +453,7 @@ FPUnavailable:
bne load_up_fpu /* if from user, just load it up */
li r20,MSR_KERNEL
bl transfer_to_handler /* if from kernel, take a trap */
+i0x800:
.long KernelFP
.long ret_from_except
@@ -575,7 +577,7 @@ InstructionAddressInvalid:
mfmsr r0 /* Restore "normal" registers */
xoris r0,r0,MSR_TGPR>>16
mtcrf 0x80,r3 /* Restore CR0 */
- sync /* Some chip revs have problems here... */
+ SYNC /* Some chip revs have problems here... */
mtmsr r0
b InstructionAccess
@@ -646,7 +648,7 @@ DataAddressInvalid:
mfmsr r0 /* Restore "normal" registers */
xoris r0,r0,MSR_TGPR>>16
mtcrf 0x80,r3 /* Restore CR0 */
- sync /* Some chip revs have problems here... */
+ SYNC /* Some chip revs have problems here... */
mtmsr r0
b DataAccess
@@ -843,7 +845,7 @@ load_up_fpu:
#endif /* CONFIG_PPC64BRIDGE */
SYNC
MTMSRD(r5) /* enable use of fpu now */
- SYNC
+ isync
/*
* For SMP, we don't do lazy FPU switching because it just gets too
* horrendously complex, especially when a task switches from one CPU
@@ -929,7 +931,7 @@ load_up_altivec:
oris r5,r5,MSR_VEC@h
SYNC
mtmsr r5 /* enable use of AltiVec now */
- SYNC
+ isync
/*
* For SMP, we don't do lazy AltiVec switching because it just gets too
* horrendously complex, especially when a task switches from one CPU
@@ -1023,7 +1025,7 @@ giveup_altivec:
oris r5,r5,MSR_VEC@h
SYNC
mtmsr r5 /* enable use of AltiVec now */
- SYNC
+ isync
cmpi 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
@@ -1064,7 +1066,7 @@ giveup_fpu:
ori r5,r5,MSR_FP
SYNC
mtmsr r5 /* enable use of fpu now */
- SYNC
+ isync
cmpi 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
@@ -1163,6 +1165,7 @@ fix_mem_constants:
icbi r0,r14 /* flush the icache line */
cmpw r12,r13
bne 1b
+ isync
/*
* Map the memory where the exception handlers will
@@ -1208,9 +1211,9 @@ apus_interrupt_entry:
mfmsr 20
xori r20,r20,MSR_DR
- sync
+ SYNC
mtmsr r20
- sync
+ isync
lis r4,APUS_IPL_EMU@h
@@ -1243,9 +1246,9 @@ apus_interrupt_entry:
mfmsr r20
xori r20,r20,MSR_DR
- sync
+ SYNC
mtmsr r20
- sync
+ isync
stw r3,(_CCR+4)(r21);
@@ -1263,28 +1266,24 @@ apus_interrupt_entry:
#endif /* CONFIG_APUS */
#ifdef CONFIG_SMP
-#ifdef CONFIG_GEMINI
- .globl __secondary_start_gemini
-__secondary_start_gemini:
- mfspr r4,HID0
- ori r4,r4,HID0_ICFI
- li r3,0
- ori r3,r3,HID0_ICE
- andc r4,r4,r3
- mtspr HID0,r4
- sync
- bl prom_init
- b __secondary_start
-#endif /* CONFIG_GEMINI */
-
.globl __secondary_start_psurge
__secondary_start_psurge:
li r24,1 /* cpu # */
+ b __secondary_start_psurge99
+ .globl __secondary_start_psurge2
+__secondary_start_psurge2:
+ li r24,2 /* cpu # */
+ b __secondary_start_psurge99
+ .globl __secondary_start_psurge3
+__secondary_start_psurge3:
+ li r24,3 /* cpu # */
+ b __secondary_start_psurge99
+__secondary_start_psurge99:
/* we come in here with IR=0 and DR=1, and DBAT 0
set to map the 0xf0000000 - 0xffffffff region */
mfmsr r0
rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
- sync
+ SYNC
mtmsr r0
isync
@@ -1293,7 +1292,7 @@ __secondary_start:
#ifdef CONFIG_PPC64BRIDGE
mfmsr r0
clrldi r0,r0,1 /* make sure it's in 32-bit mode */
- sync
+ SYNC
MTMSRD(r0)
isync
#else
@@ -1445,21 +1444,6 @@ start_here:
li r3,0
mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
- /* Clear out the BSS */
- lis r11,_end@ha
- addi r11,r11,_end@l
- lis r8,__bss_start@ha
- addi r8,r8,__bss_start@l
- subf r11,r8,r11
- addi r11,r11,3
- rlwinm. r11,r11,30,2,31
- beq 2f
- addi r8,r8,-4
- mtctr r11
- li r0,0
-3: stwu r0,4(r8)
- bdnz 3b
-2:
/* stack */
addi r1,r2,TASK_UNION_SIZE
li r0,0
@@ -1504,7 +1488,7 @@ start_here:
RFI
/* Load up the kernel context */
2:
- SYNC /* Force all PTE updates to finish */
+ sync /* Force all PTE updates to finish */
tlbia /* Clear all TLB entries */
sync /* wait for tlbia/tlbie to finish */
#ifdef CONFIG_SMP
@@ -1552,7 +1536,6 @@ _GLOBAL(set_context)
* -- Cort
*/
clear_bats:
-#if !defined(CONFIG_GEMINI)
li r20,0
mfspr r9,PVR
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
@@ -1576,10 +1559,8 @@ clear_bats:
mtspr IBAT2L,r20
mtspr IBAT3U,r20
mtspr IBAT3L,r20
-#endif /* !defined(CONFIG_GEMINI) */
blr
-#ifndef CONFIG_GEMINI
flush_tlbs:
lis r20, 0x40
1: addic. r20, r20, -0x1000
@@ -1598,7 +1579,6 @@ mmu_off:
mtspr SRR1,r3
sync
RFI
-#endif
#ifndef CONFIG_POWER4
/*
@@ -1745,3 +1725,12 @@ swapper_pg_dir:
.globl cmd_line
cmd_line:
.space 512
+
+ .globl intercept_table
+intercept_table:
+ .long 0, i0x100, i0x200, i0x300, i0x400, 0, i0x600, i0x700
+ .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
+ .long 0, 0, 0, i0x1300, 0, 0, 0, 0
+ .long 0, 0, 0, 0, 0, 0, 0, 0
+ .long 0, 0, 0, 0, 0, 0, 0, 0
+ .long 0, 0, 0, 0, 0, 0, 0, 0
diff --git a/arch/ppc/kernel/i8259.c b/arch/ppc/kernel/i8259.c
index bdb6ec844..7690d9091 100644
--- a/arch/ppc/kernel/i8259.c
+++ b/arch/ppc/kernel/i8259.c
@@ -10,12 +10,15 @@ unsigned char cached_8259[2] = { 0xff, 0xff };
#define cached_A1 (cached_8259[0])
#define cached_21 (cached_8259[1])
+spinlock_t i8259_lock = SPIN_LOCK_UNLOCKED;
+
int i8259_pic_irq_offset;
int i8259_irq(int cpu)
{
int irq;
+ spin_lock/*_irqsave*/(&i8259_lock/*, flags*/);
/*
* Perform an interrupt acknowledge cycle on controller 1
*/
@@ -40,14 +43,20 @@ int i8259_irq(int cpu)
* interrupt
*/
outb(0x0b, 0x20);
- if(~inb(0x20)&0x80)
+ if(~inb(0x20)&0x80) {
+ spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
return -1;
+ }
}
+ spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
return irq;
}
static void i8259_mask_and_ack_irq(unsigned int irq_nr)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8259_lock, flags);
if ( irq_nr >= i8259_pic_irq_offset )
irq_nr -= i8259_pic_irq_offset;
@@ -63,6 +72,7 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr)
outb(cached_21,0x21);
outb(0x20,0x20); /* Non-specific EOI */
}
+ spin_unlock_irqrestore(&i8259_lock, flags);
}
static void i8259_set_irq_mask(int irq_nr)
@@ -73,6 +83,9 @@ static void i8259_set_irq_mask(int irq_nr)
static void i8259_mask_irq(unsigned int irq_nr)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8259_lock, flags);
if ( irq_nr >= i8259_pic_irq_offset )
irq_nr -= i8259_pic_irq_offset;
if ( irq_nr < 8 )
@@ -80,11 +93,14 @@ static void i8259_mask_irq(unsigned int irq_nr)
else
cached_A1 |= 1 << (irq_nr-8);
i8259_set_irq_mask(irq_nr);
+ spin_unlock_irqrestore(&i8259_lock, flags);
}
static void i8259_unmask_irq(unsigned int irq_nr)
{
+ unsigned long flags;
+ spin_lock_irqsave(&i8259_lock, flags);
if ( irq_nr >= i8259_pic_irq_offset )
irq_nr -= i8259_pic_irq_offset;
if ( irq_nr < 8 )
@@ -92,6 +108,13 @@ static void i8259_unmask_irq(unsigned int irq_nr)
else
cached_A1 &= ~(1 << (irq_nr-8));
i8259_set_irq_mask(irq_nr);
+ spin_unlock_irqrestore(&i8259_lock, flags);
+}
+
+static void i8259_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ i8259_unmask_irq(irq);
}
struct hw_interrupt_type i8259_pic = {
@@ -101,11 +124,15 @@ struct hw_interrupt_type i8259_pic = {
i8259_unmask_irq,
i8259_mask_irq,
i8259_mask_and_ack_irq,
- 0
+ i8259_end_irq,
+ NULL
};
void __init i8259_init(void)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8259_lock, flags);
/* init master interrupt controller */
outb(0x11, 0x20); /* Start init sequence */
outb(0x00, 0x21); /* Vector base */
@@ -120,7 +147,7 @@ void __init i8259_init(void)
outb(0xFF, 0xA1); /* Mask all */
outb(cached_A1, 0xA1);
outb(cached_21, 0x21);
+ spin_unlock_irqrestore(&i8259_lock, flags);
request_irq( i8259_pic_irq_offset + 2, no_action, SA_INTERRUPT,
"82c59 secondary cascade", NULL );
- enable_irq(i8259_pic_irq_offset + 2); /* Enable cascade interrupt */
}
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index a363a0e34..6ca616979 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -47,24 +47,35 @@ atomic_t zeropage_calls; /* # zero'd pages request that've been made */
int idled(void)
{
+ int do_power_save = 0;
+
+ /* only sleep on the 603-family/750 processors */
+ switch (_get_PVR() >> 16) {
+ case 3: /* 603 */
+ case 6: /* 603e */
+ case 7: /* 603ev */
+ case 8: /* 750 */
+ case 12: /* 7400 */
+ do_power_save = 1;
+ }
+
/* endless loop with no priority at all */
current->nice = 20;
current->counter = -100;
init_idle();
for (;;)
{
- __sti();
-
- check_pgt_cache();
-
- /*if ( !current->need_resched && zero_paged_on ) zero_paged();*/
- if ( !current->need_resched && htab_reclaim_on ) htab_reclaim();
- if ( !current->need_resched ) power_save();
+ /*if ( !current->need_resched && zero_paged_on )
+ zero_paged();*/
+ if (!current->need_resched && htab_reclaim_on)
+ htab_reclaim();
+ if (do_power_save && !current->need_resched)
+ power_save();
-#ifdef CONFIG_SMP
- if (current->need_resched)
-#endif
+ if (current->need_resched) {
schedule();
+ check_pgt_cache();
+ }
}
return 0;
}
@@ -278,31 +289,31 @@ void zero_paged(void)
void power_save(void)
{
- unsigned long msr, hid0;
-
- /* only sleep on the 603-family/750 processors */
- switch (_get_PVR() >> 16) {
- case 3: /* 603 */
- case 6: /* 603e */
- case 7: /* 603ev */
- case 8: /* 750 */
- case 12: /* 7400 */
- save_flags(msr);
- __cli();
- if (!current->need_resched) {
- asm("mfspr %0,1008" : "=r" (hid0) :);
- hid0 &= ~(HID0_NAP | HID0_SLEEP | HID0_DOZE);
- hid0 |= (powersave_nap? HID0_NAP: HID0_DOZE) | HID0_DPM;
- asm("mtspr 1008,%0" : : "r" (hid0));
+ unsigned long hid0;
+ /*
+ * Disable interrupts to prevent a lost wakeup
+ * when going to sleep. This is necessary even with
+ * RTLinux since we are not guaranteed an interrupt
+ * didn't come in and is waiting for a __sti() before
+ * emulating one. This way, we really do hard disable.
+ *
+ * We assume that we're sti-ed when we come in here. We
+ * are in the idle loop so if we're cli-ed then it's a bug
+ * anyway.
+ * -- Cort
+ */
+ _nmask_and_or_msr(MSR_EE, 0);
+ if (!current->need_resched)
+ {
+ asm("mfspr %0,1008" : "=r" (hid0) :);
+ hid0 &= ~(HID0_NAP | HID0_SLEEP | HID0_DOZE);
+ hid0 |= (powersave_nap? HID0_NAP: HID0_DOZE) | HID0_DPM;
+ asm("mtspr 1008,%0" : : "r" (hid0));
- /* set the POW bit in the MSR, and enable interrupts
- * so we wake up sometime! */
- __sti(); /* this keeps rtl from getting confused -- Cort */
- _nmask_and_or_msr(0, MSR_POW | MSR_EE);
- }
- restore_flags(msr);
- default:
- return;
+ /* set the POW bit in the MSR, and enable interrupts
+ * so we wake up sometime! */
+ _nmask_and_or_msr(0, MSR_POW | MSR_EE);
}
+ _nmask_and_or_msr(0, MSR_EE);
}
diff --git a/arch/ppc/kernel/indirect_pci.c b/arch/ppc/kernel/indirect_pci.c
index 552c552dc..3ba335d1f 100644
--- a/arch/ppc/kernel/indirect_pci.c
+++ b/arch/ppc/kernel/indirect_pci.c
@@ -9,113 +9,59 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/kernel.h>
#include <linux/pci.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
-unsigned int * pci_config_address;
-unsigned char * pci_config_data;
-
-int indirect_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char *val)
-{
- unsigned long flags;
-
- save_flags(flags); cli();
-
- out_be32(pci_config_address,
- ((offset&0xfc)<<24) | (dev_fn<<16) | (bus<<8) | 0x80);
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
- *val= in_8(pci_config_data + (offset&3));
-
- restore_flags(flags);
- return PCIBIOS_SUCCESSFUL;
+#include <asm/init.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+
+#include "pci.h"
+
+#define cfg_read(val, addr, type, op) *val = op((type)(addr))
+#define cfg_write(val, addr, type, op) op((type *)(addr), (val))
+
+#define INDIRECT_PCI_OP(rw, size, type, op, mask) \
+static int \
+indirect_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
+{ \
+ struct pci_controller *hose = dev->sysdata; \
+ \
+ out_be32(hose->cfg_addr, \
+ ((offset & 0xfc) << 24) | (dev->devfn << 16) \
+ | (dev->bus->number << 8) | 0x80); \
+ cfg_##rw(val, hose->cfg_data + (offset & mask), type, op); \
+ return PCIBIOS_SUCCESSFUL; \
}
-int indirect_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short *val)
-{
- unsigned long flags;
-
- if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
-
- save_flags(flags); cli();
-
- out_be32(pci_config_address,
- ((offset&0xfc)<<24) | (dev_fn<<16) | (bus<<8) | 0x80);
+INDIRECT_PCI_OP(read, byte, u8 *, in_8, 3)
+INDIRECT_PCI_OP(read, word, u16 *, in_le16, 2)
+INDIRECT_PCI_OP(read, dword, u32 *, in_le32, 0)
+INDIRECT_PCI_OP(write, byte, u8, out_8, 3)
+INDIRECT_PCI_OP(write, word, u16, out_le16, 2)
+INDIRECT_PCI_OP(write, dword, u32, out_le32, 0)
- *val= in_le16((unsigned short *)(pci_config_data + (offset&3)));
-
- restore_flags(flags);
- return PCIBIOS_SUCCESSFUL;
-}
-
-int indirect_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int *val)
+static struct pci_ops indirect_pci_ops =
{
- unsigned long flags;
-
- if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
-
- save_flags(flags); cli();
-
- out_be32(pci_config_address,
- ((offset&0xfc)<<24) | (dev_fn<<16) | (bus<<8) | 0x80);
-
- *val= in_le32((unsigned *)pci_config_data);
-
- restore_flags(flags);
- return PCIBIOS_SUCCESSFUL;
-}
-
-int indirect_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char val)
+ indirect_read_config_byte,
+ indirect_read_config_word,
+ indirect_read_config_dword,
+ indirect_write_config_byte,
+ indirect_write_config_word,
+ indirect_write_config_dword
+};
+
+void __init
+setup_indirect_pci(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data)
{
- unsigned long flags;
-
- save_flags(flags); cli();
-
- out_be32(pci_config_address,
- ((offset&0xfc)<<24) | (dev_fn<<16) | (bus<<8) | 0x80);
-
- out_8(pci_config_data + (offset&3), val);
-
- restore_flags(flags);
- return PCIBIOS_SUCCESSFUL;
-}
-
-int indirect_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short val)
-{
- unsigned long flags;
-
- if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER;
-
- save_flags(flags); cli();
-
- out_be32(pci_config_address,
- ((offset&0xfc)<<24) | (dev_fn<<16) | (bus<<8) | 0x80);
-
- out_le16((unsigned short *)(pci_config_data + (offset&3)), val);
-
- restore_flags(flags);
- return PCIBIOS_SUCCESSFUL;
+ hose->ops = &indirect_pci_ops;
+ hose->cfg_addr = (unsigned int *) ioremap(cfg_addr, 4);
+ hose->cfg_data = (unsigned char *) ioremap(cfg_data, 4);
}
-int indirect_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int val)
-{
- unsigned long flags;
-
- if (offset&3) return PCIBIOS_BAD_REGISTER_NUMBER;
-
- save_flags(flags); cli();
-
- out_be32(pci_config_address,
- ((offset&0xfc)<<24) | (dev_fn<<16) | (bus<<8) | 0x80);
-
- out_le32((unsigned *)pci_config_data, val);
-
- restore_flags(flags);
- return PCIBIOS_SUCCESSFUL;
-}
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
index a0caa4a4c..02ff4d964 100644
--- a/arch/ppc/kernel/irq.c
+++ b/arch/ppc/kernel/irq.c
@@ -41,11 +41,11 @@
#include <linux/config.h>
#include <linux/init.h>
#include <linux/malloc.h>
-#include <linux/openpic.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/proc_fs.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
@@ -64,19 +64,22 @@
#include "local_irq.h"
-extern volatile unsigned long ipi_count;
+atomic_t ipi_recv;
+atomic_t ipi_sent;
void enable_irq(unsigned int irq_nr);
void disable_irq(unsigned int irq_nr);
-volatile unsigned char *chrp_int_ack_special;
+static void register_irq_proc (unsigned int irq);
#define MAXCOUNT 10000000
-irq_desc_t irq_desc[NR_IRQS];
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
+ { [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}};
+
int ppc_spurious_interrupts = 0;
struct irqaction *ppc_irq_action[NR_IRQS];
-unsigned int ppc_cached_irq_mask[NR_MASK_WORDS];
-unsigned int ppc_lost_interrupts[NR_MASK_WORDS];
+unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
atomic_t ppc_n_lost_interrupts;
/* nasty hack for shared irq's since we need to do kmalloc calls but
@@ -115,52 +118,136 @@ void irq_kfree(void *ptr)
kfree(ptr);
}
+int
+setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ unsigned long flags;
+ struct irqaction *old, **p;
+ irq_desc_t *desc = irq_desc + irq;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ desc->depth = 0;
+ desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
+ unmask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ register_irq_proc(irq);
+ return 0;
+}
+
+/* This could be promoted to a real free_irq() ... */
+static int
+do_free_irq(int irq, void* dev_id)
+{
+ irq_desc_t *desc;
+ struct irqaction **p;
+ unsigned long flags;
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ mask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+#ifdef CONFIG_SMP
+ /* Wait to make sure it's not being used on another CPU */
+ while (desc->status & IRQ_INPROGRESS)
+ barrier();
+#endif
+ irq_kfree(action);
+ return 0;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ break;
+ }
+ return -ENOENT;
+}
+
#if (defined(CONFIG_8xx) || defined(CONFIG_8260))
/* Name change so we can catch standard drivers that potentially mess up
* the internal interrupt controller on 8xx and 8260. Just bear with me,
* I don't like this either and I am searching a better solution. For
* now, this is what I need. -- Dan
*/
-int request_8xxirq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
+#define request_irq request_8xxirq
#elif defined(CONFIG_APUS)
-int request_sysirq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
-#else
-int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
+#define request_irq request_sysirq
+#define free_irq sys_free_irq
#endif
+
+int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
{
- struct irqaction *old, **p, *action;
- unsigned long flags;
+ struct irqaction *action;
+ int retval;
if (irq >= NR_IRQS)
return -EINVAL;
if (!handler)
- {
- /* Free */
- p = &irq_desc[irq].action;
- while ((action = *p) != NULL && action->dev_id != dev_id)
- p = &action->next;
- if (action == NULL)
- return -ENOENT;
-
- /* Found it - now free it */
- save_flags(flags);
- cli();
- *p = action->next;
- if (irq_desc[irq].action == NULL)
- disable_irq(irq);
- restore_flags(flags);
- irq_kfree(action);
- return 0;
- }
+ /* We could implement really free_irq() instead of that... */
+ return do_free_irq(irq, dev_id);
action = (struct irqaction *)
irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
- if (!action)
+ if (!action) {
+ printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
return -ENOMEM;
-
- save_flags(flags);
- cli();
+ }
action->handler = handler;
action->flags = irqflags;
@@ -168,57 +255,109 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *)
action->name = devname;
action->dev_id = dev_id;
action->next = NULL;
- enable_irq(irq);
- p = &irq_desc[irq].action;
-
- if ((old = *p) != NULL) {
- /* Can't share interrupts unless both agree to */
- if (!(old->flags & action->flags & SA_SHIRQ))
- return -EBUSY;
- /* add new interrupt at end of irq queue */
- do {
- p = &old->next;
- old = *p;
- } while (old);
- }
- *p = action;
-
- restore_flags(flags);
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+
return 0;
}
-#ifdef CONFIG_APUS
-void sys_free_irq(unsigned int irq, void *dev_id)
-{
- sys_request_irq(irq, NULL, 0, NULL, dev_id);
-}
-#else
void free_irq(unsigned int irq, void *dev_id)
{
-#if (defined(CONFIG_8xx) || defined(CONFIG_8260))
- request_8xxirq(irq, NULL, 0, NULL, dev_id);
-#else
request_irq(irq, NULL, 0, NULL, dev_id);
-#endif
}
-#endif
-/* XXX should implement irq disable depth like on intel */
-void disable_irq_nosync(unsigned int irq_nr)
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+
+/**
+ * disable_irq_nosync - disable an irq without waiting
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables of an interrupt
+ * stack. Unlike disable_irq(), this function does not ensure existing
+ * instances of the IRQ handler have completed before returning.
+ *
+ * This function may be called from IRQ context.
+ */
+
+ void disable_irq_nosync(unsigned int irq)
{
- mask_irq(irq_nr);
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ if (!(desc->status & IRQ_PER_CPU))
+ desc->status |= IRQ_DISABLED;
+ mask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
}
-void disable_irq(unsigned int irq_nr)
+/**
+ * disable_irq - disable an irq and wait for completion
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables of an interrupt
+ * stack. That is for two disables you need two enables. This
+ * function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+
+void disable_irq(unsigned int irq)
{
- mask_irq(irq_nr);
- synchronize_irq();
+ disable_irq_nosync(irq);
+
+ if (!local_irq_count(smp_processor_id())) {
+ do {
+ barrier();
+ } while (irq_desc[irq].status & IRQ_INPROGRESS);
+ }
}
-void enable_irq(unsigned int irq_nr)
+/**
+ * enable_irq - enable interrupt handling on an irq
+ * @irq: Interrupt to enable
+ *
+ * Re-enables the processing of interrupts on this IRQ line
+ * providing no disable_irq calls are now in effect.
+ *
+ * This function may be called from IRQ context.
+ */
+
+void enable_irq(unsigned int irq)
{
- unmask_irq(irq_nr);
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
+ case 1: {
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+ desc->status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
+ }
+ unmask_irq(irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ break;
+ case 0:
+ printk("enable_irq(%u) unbalanced\n", irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
}
int get_irq_list(char *buf)
@@ -257,15 +396,41 @@ int get_irq_list(char *buf)
}
len += sprintf(buf+len, "\n");
}
+#ifdef CONFIG_TAU
+ len += sprintf(buf+len, "TAU: ");
+ for (j = 0; j < smp_num_cpus; j++)
+ len += sprintf(buf+len, "%10u ",
+ tau_interrupts(j));
+ len += sprintf(buf+len, "\n");
+#endif
#ifdef CONFIG_SMP
/* should this be per processor send/receive? */
- len += sprintf(buf+len, "IPI: %10lu\n", ipi_count);
+ len += sprintf(buf+len, "IPI (recv/sent): %10u/%u\n",
+ atomic_read(&ipi_recv), atomic_read(&ipi_sent));
#endif
len += sprintf(buf+len, "BAD: %10u\n", ppc_spurious_interrupts);
return len;
#endif /* CONFIG_APUS */
}
+static inline void
+handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
+{
+ int status = 0;
+
+ if (!(action->flags & SA_INTERRUPT))
+ __sti();
+
+ do {
+ status |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ __cli();
+}
+
/*
* Eventually, this should take an array of interrupts and an array size
* so it can dispatch multiple interrupts.
@@ -275,33 +440,87 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
int status;
struct irqaction *action;
int cpu = smp_processor_id();
+ irq_desc_t *desc = irq_desc + irq;
- mask_and_ack_irq(irq);
- status = 0;
- action = irq_desc[irq].action;
kstat.irqs[cpu][irq]++;
- if (action && action->handler) {
- if (!(action->flags & SA_INTERRUPT))
- __sti();
- do {
- status |= action->flags;
- action->handler(irq, action->dev_id, regs);
- action = action->next;
- } while ( action );
- __cli();
- if (irq_desc[irq].handler) {
- if (irq_desc[irq].handler->end)
- irq_desc[irq].handler->end(irq);
- else if (irq_desc[irq].handler->enable)
- irq_desc[irq].handler->enable(irq);
+ spin_lock(&desc->lock);
+ ack_irq(irq);
+ /*
+ REPLAY is when Linux resends an IRQ that was dropped earlier
+ WAITING is used by probe to mark irqs that are being tested
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ if (!(status & IRQ_PER_CPU))
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ action = desc->action;
+ if (!action || !action->handler) {
+ ppc_spurious_interrupts++;
+ printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
+ /* We can't call disable_irq here, it would deadlock */
+ if (!desc->depth)
+ desc->depth = 1;
+ desc->status |= IRQ_DISABLED;
+ /* This is not a real spurrious interrupt, we
+ * have to eoi it, so we jump to out
+ */
+ mask_irq(irq);
+ goto out;
}
- } else {
- ppc_spurious_interrupts++;
- printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
- disable_irq(irq);
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ if (!(status & IRQ_PER_CPU))
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
+
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ Since we set PENDING, if another processor is handling
+ a different instance of this same irq, the other processor
+ will take care of it.
+ */
+ if (!action)
+ goto out;
+
+
+ /*
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in do_IRQ
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ spin_unlock(&desc->lock);
+ handle_irq_event(irq, regs, action);
+ spin_lock(&desc->lock);
+
+ if (!(desc->status & IRQ_PENDING))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+out:
+ /*
+ * The ->end() handler has to deal with interrupts which got
+ * disabled while the handler was running.
+ */
+ if (irq_desc[irq].handler) {
if (irq_desc[irq].handler->end)
irq_desc[irq].handler->end(irq);
+ else if (irq_desc[irq].handler->enable)
+ irq_desc[irq].handler->enable(irq);
}
+ spin_unlock(&desc->lock);
}
int do_IRQ(struct pt_regs *regs, int isfake)
@@ -320,6 +539,7 @@ int do_IRQ(struct pt_regs *regs, int isfake)
{
printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n",
irq, regs->nip);
+ /* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
}
goto out;
@@ -362,7 +582,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_SMP
unsigned char global_irq_holder = NO_PROC_ID;
-unsigned volatile int global_irq_lock;
+unsigned volatile long global_irq_lock; /* pendantic :long for set_bit--RR*/
atomic_t global_irq_count;
atomic_t global_bh_count;
@@ -634,7 +854,11 @@ static struct proc_dir_entry * root_irq_dir;
static struct proc_dir_entry * irq_dir [NR_IRQS];
static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+#ifdef CONFIG_IRQ_ALL_CPUS
unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0xffffffff};
+#else /* CONFIG_IRQ_ALL_CPUS */
+unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0x00000000};
+#endif /* CONFIG_IRQ_ALL_CPUS */
#define HEX_DIGITS 8
@@ -694,6 +918,7 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
err = parse_hex_value(buffer, count, &new_value);
+/* Why is this disabled ? --BenH */
#if 0/*CONFIG_SMP*/
/*
* Do not allow disabling IRQs completely - it's a too easy
diff --git a/arch/ppc/kernel/local_irq.h b/arch/ppc/kernel/local_irq.h
index 5c616bbbd..319e3377c 100644
--- a/arch/ppc/kernel/local_irq.h
+++ b/arch/ppc/kernel/local_irq.h
@@ -15,8 +15,5 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
extern int ppc_spurious_interrupts;
extern int ppc_second_irq;
extern struct irqaction *ppc_irq_action[NR_IRQS];
-extern unsigned int ppc_cached_irq_mask[NR_MASK_WORDS];
-extern unsigned int ppc_lost_interrupts[NR_MASK_WORDS];
-extern atomic_t ppc_n_lost_interrupts;
#endif /* _PPC_KERNEL_LOCAL_IRQ_H */
diff --git a/arch/ppc/kernel/m8260_setup.c b/arch/ppc/kernel/m8260_setup.c
index 6e006a867..a55f5235d 100644
--- a/arch/ppc/kernel/m8260_setup.c
+++ b/arch/ppc/kernel/m8260_setup.c
@@ -62,7 +62,7 @@ extern void mackbd_leds(unsigned char leds);
extern void mackbd_init_hw(void);
#endif
-extern unsigned long loops_per_sec;
+extern unsigned long loops_per_jiffy;
unsigned char __res[sizeof(bd_t)];
unsigned long empty_zero_page[1024];
@@ -286,10 +286,7 @@ m8260_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_ide_md.outsw = m8xx_ide_outsw;
ppc_ide_md.default_irq = m8xx_ide_default_irq;
ppc_ide_md.default_io_base = m8xx_ide_default_io_base;
- ppc_ide_md.check_region = m8xx_ide_check_region;
- ppc_ide_md.request_region = m8xx_ide_request_region;
- ppc_ide_md.release_region = m8xx_ide_release_region;
- ppc_ide_md.fix_driveid = m8xx_ide_fix_driveid;
+ ppc_ide_md.fix_driveid = ppc_generic_ide_fix_driveid;
ppc_ide_md.ide_init_hwif = m8xx_ide_init_hwif_ports;
ppc_ide_md.ide_request_irq = m8xx_ide_request_irq;
@@ -297,13 +294,39 @@ m8260_init(unsigned long r3, unsigned long r4, unsigned long r5,
#endif
}
-void
+/*
+ * Copied from prom.c so I don't have include all of that crap.
+ * -- Dan
+ *
+ * prom_init() is called very early on, before the kernel text
+ * and data have been mapped to KERNELBASE. At this point the code
+ * is running at whatever address it has been loaded at, so
+ * references to extern and static variables must be relocated
+ * explicitly. The procedure reloc_offset() returns the address
+ * we're currently running at minus the address we were linked at.
+ * (Note that strings count as static variables.)
+ */
+extern unsigned long reloc_offset(void);
+#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
+
+__init
+unsigned long
prom_init(uint r3, uint r4, uint r5, uint r6)
{
- /* Nothing to do now, but we are called immediatedly upon
- * kernel start up with MMU disabled, so if there is
- * anything we need to do......
- */
+ unsigned long offset = reloc_offset();
+ unsigned long phys;
+ extern char __bss_start, _end;
+
+ /* First zero the BSS -- use memset, some arches don't have
+ * caches on yet */
+ memset_io(PTRRELOC(&__bss_start),0 , &_end - &__bss_start);
+
+ /* Default */
+ phys = offset + KERNELBASE;
+
+ /* We are done.
+ */
+ return phys;
}
/* Mainly for ksyms.
diff --git a/arch/ppc/kernel/m8xx_setup.c b/arch/ppc/kernel/m8xx_setup.c
index 7dc408a13..f9813fa75 100644
--- a/arch/ppc/kernel/m8xx_setup.c
+++ b/arch/ppc/kernel/m8xx_setup.c
@@ -63,8 +63,6 @@ extern void mackbd_leds(unsigned char leds);
extern void mackbd_init_hw(void);
#endif
-extern unsigned long loops_per_sec;
-
unsigned char __res[sizeof(bd_t)];
unsigned long empty_zero_page[1024];
@@ -344,25 +342,6 @@ m8xx_ide_default_io_base(int index)
}
int
-m8xx_ide_check_region(ide_ioreg_t from, unsigned int extent)
-{
- return 0;
-}
-
-void
-m8xx_ide_request_region(ide_ioreg_t from,
- unsigned int extent,
- const char *name)
-{
-}
-
-void
-m8xx_ide_release_region(ide_ioreg_t from,
- unsigned int extent)
-{
-}
-
-int
m8xx_ide_request_irq(unsigned int irq,
void (*handler)(int, void *, struct pt_regs *),
unsigned long flags,
@@ -376,12 +355,6 @@ m8xx_ide_request_irq(unsigned int irq,
#endif
}
-void
-m8xx_ide_fix_driveid(struct hd_driveid *id)
-{
- ppc_generic_ide_fix_driveid(id);
-}
-
/* We can use an external IDE controller or wire the IDE interface to
* the internal PCMCIA controller.
*/
@@ -515,10 +488,7 @@ m8xx_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_ide_md.outsw = m8xx_ide_outsw;
ppc_ide_md.default_irq = m8xx_ide_default_irq;
ppc_ide_md.default_io_base = m8xx_ide_default_io_base;
- ppc_ide_md.check_region = m8xx_ide_check_region;
- ppc_ide_md.request_region = m8xx_ide_request_region;
- ppc_ide_md.release_region = m8xx_ide_release_region;
- ppc_ide_md.fix_driveid = m8xx_ide_fix_driveid;
+ ppc_ide_md.fix_driveid = ppc_generic_ide_fix_driveid;
ppc_ide_md.ide_init_hwif = m8xx_ide_init_hwif_ports;
ppc_ide_md.ide_request_irq = m8xx_ide_request_irq;
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 94e1cd277..3f54003c7 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -37,6 +37,14 @@
.text
+ .align 5
+_GLOBAL(__delay)
+ cmpwi 0,r3,0
+ mtctr r3
+ beqlr
+1: bdnz 1b
+ blr
+
/*
* Returns (address we're running at) - (address we were linked at)
* for use before the text and data are mapped to KERNELBASE.
@@ -82,16 +90,16 @@ _GLOBAL(__no_use_restore_flags)
lwz r7,ppc_n_lost_interrupts@l(r7)
cmpi 0,r7,0 /* lost interrupts to process first? */
bne- do_lost_interrupts
-1: sync
+1: SYNC
mtmsr r3
- isync
+ SYNC
blr
_GLOBAL(__no_use_cli)
mfmsr r0 /* Get current interrupt state */
rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
- sync /* Some chip revs have problems here... */
+ SYNC /* Some chip revs have problems here... */
mtmsr r0 /* Update machine state */
blr /* Done */
@@ -102,7 +110,7 @@ _GLOBAL(__no_use_sti)
ori r3,r3,MSR_EE /* Turn on 'EE' bit */
cmpi 0,r4,0 /* lost interrupts to process first? */
bne- do_lost_interrupts
- sync /* Some chip revs have problems here... */
+ SYNC /* Some chip revs have problems here... */
mtmsr r3 /* Update machine state */
blr
@@ -121,7 +129,7 @@ _GLOBAL(do_lost_interrupts)
cmpi 0,r4,0
bne- 1b
lwz r3,8(r1)
- sync
+ SYNC
mtmsr r3
lwz r0,20(r1)
mtlr r0
@@ -137,8 +145,9 @@ _GLOBAL(do_lost_interrupts)
mfmsr r0 /* Get current msr */
andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
or r0,r0,r4 /* Or on the bits in r4 (second parm) */
- sync /* Some chip revs have problems here... */
+ SYNC /* Some chip revs have problems here... */
mtmsr r0 /* Update machine state */
+ isync
blr /* Done */
@@ -148,7 +157,7 @@ _GLOBAL(do_lost_interrupts)
_GLOBAL(_tlbia)
#if defined(CONFIG_SMP)
mfmsr r10
- sync
+ SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
mtmsr r0
SYNC
@@ -161,7 +170,6 @@ _GLOBAL(_tlbia)
bne- 10b
stwcx. r8,0,r9
bne- 10b
- eieio
#endif /* CONFIG_SMP */
sync
tlbia
@@ -182,7 +190,7 @@ _GLOBAL(_tlbia)
_GLOBAL(_tlbie)
#if defined(CONFIG_SMP)
mfmsr r10
- sync
+ SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
mtmsr r0
SYNC
@@ -228,7 +236,7 @@ _GLOBAL(flush_instruction_cache)
ori r3,r3,HID0_ICFI
mtspr HID0,r3
#endif /* CONFIG_8xx */
- SYNC
+ isync
blr
/*
@@ -259,7 +267,6 @@ _GLOBAL(flush_icache_range)
2: icbi 0,r6
addi r6,r6,CACHE_LINE_SIZE
bdnz 2b
- sync
isync
blr
@@ -717,6 +724,8 @@ _GLOBAL(_get_SP)
mr r3,r1 /* Close enough */
blr
+#if 0
+/* isn't it just easier to use the mtspr/mfspr inline macros?? --Troy */
_GLOBAL(_get_THRM1)
mfspr r3,THRM1
blr
@@ -740,6 +749,7 @@ _GLOBAL(_set_THRM2)
_GLOBAL(_set_THRM3)
mtspr THRM3,r3
blr
+#endif
_GLOBAL(_get_PVR)
mfspr r3,PVR
@@ -755,6 +765,12 @@ _GLOBAL(_get_HID0)
mfspr r3,HID0
blr
+_GLOBAL(_set_HID0)
+ sync
+ mtspr HID0, r3
+ SYNC /* Handle erratas in some cases */
+ blr
+
_GLOBAL(_get_ICTC)
mfspr r3,ICTC
blr
@@ -763,7 +779,6 @@ _GLOBAL(_set_ICTC)
mtspr ICTC,r3
blr
-
/*
L2CR functions
Copyright © 1997-1998 by PowerLogix R & D, Inc.
@@ -785,6 +800,17 @@ _GLOBAL(_set_ICTC)
/*
Thur, Dec. 12, 1998.
- First public release, contributed by PowerLogix.
+ ***********
+ Sat, Aug. 7, 1999.
+ - Terry: Made sure code disabled interrupts before running. (Previously
+ it was assumed interrupts were already disabled).
+ - Terry: Updated for tentative G4 support. 4MB of memory is now flushed
+ instead of 2MB. (Prob. only 3 is necessary).
+ - Terry: Updated for workaround to HID0[DPM] processor bug
+ during global invalidates.
+ ***********
+ Thu, July 13, 2000.
+ - Terry: Added isync to correct for an errata.
Author: Terry Greeniaus (tgree@phys.ualberta.ca)
Please e-mail updates to this file to me, thanks!
@@ -823,82 +849,94 @@ _GLOBAL(_set_ICTC)
causes cache pushes from the L1 cache to go to the L2 cache
instead of to main memory.
*/
-
+/*
+ * Summary: this procedure ignores the L2I bit in the value passed in,
+ * flushes the cache if it was already enabled, always invalidates the
+ * cache, then enables the cache if the L2E bit is set in the value
+ * passed in.
+ * -- paulus.
+ */
_GLOBAL(_set_L2CR)
- /* Make sure this is a 750 chip */
+ /* Make sure this is a 750 or 7400 chip */
mfspr r4,PVR
rlwinm r4,r4,16,16,31
- cmplwi r4,0x0008
- beq thisIs750
- cmplwi r4,0x000c
- beq thisIs750
- li r3,-1
- blr
-
-thisIs750:
- /* Get the current enable bit of the L2CR into r4 */
- mfspr r4,L2CR
- mfmsr r7
-
- /* See if we want to perform a global inval this time. */
- rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
- rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
- rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
- rlwimi r3,r4,0,0,0 /* Keep the enable bit the same as it was. */
- bne dontDisableCache /* Only disable the cache if L2CRApply
- has the enable bit off */
-
-disableCache:
- /* Disable the cache. First, we turn off interrupts.
- An interrupt while we are flushing the cache could bring
- in data which may not get properly flushed. */
- rlwinm r4,r7,0,17,15 /* Turn off EE bit */
+ cmpwi r4,0x0008
+ cmpwi cr1,r4,0x000c
+ cror 2,2,4*cr1+2
+ bne 99f
+
+ /* Turn off interrupts and data relocation. */
+ mfmsr r7 /* Save MSR in r7 */
+ rlwinm r4,r7,0,17,15
+ rlwinm r4,r4,0,28,26 /* Turn off DR bit */
sync
mtmsr r4
- sync
+ isync
+
+ /* Get the current enable bit of the L2CR into r4 */
+ mfspr r4,L2CR
-/*
- Now, read the first 2MB of memory to put new data in the cache.
- (Actually we only need the size of the L2 cache plus the size
- of the L1 cache, but 2MB will cover everything just to be safe).
-*/
- lis r4,0x0001
+ /* Tweak some bits */
+ rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
+ rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
+ rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
+
+ /* Check to see if we need to flush */
+ rlwinm. r4,r4,0,0,0
+ beq 2f
+
+ /* Flush the cache. First, read the first 4MB of memory (physical) to
+ * put new data in the cache. (Actually we only need
+ * the size of the L2 cache plus the size of the L1 cache, but 4MB will
+ * cover everything just to be safe).
+ */
+
+ /**** Might be a good idea to set L2DO here - to prevent instructions
+ from getting into the cache. But since we invalidate
+ the next time we enable the cache it doesn't really matter.
+ ****/
+
+ lis r4,0x0002
mtctr r4
- lis r4,KERNELBASE@h
-1: lwzx r0,r0,r4
- addi r4,r4,0x0020 /* Go to start of next cache line */
+ li r4,0
+1:
+ lwzx r0,r0,r4
+ addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
- /* Now, flush the first 2MB of memory */
- lis r4,0x0001
+ /* Now, flush the first 4MB of memory */
+ lis r4,0x0002
mtctr r4
- lis r4,KERNELBASE@h
+ li r4,0
sync
-2: dcbf r0,r4
- addi r4,r4,0x0020 /* Go to start of next cache line */
- bdnz 2b
-
- /* Turn off the L2CR enable bit. */
- rlwinm r3,r3,0,1,31
-
-dontDisableCache:
- /* Set up the L2CR configuration bits */
+1:
+ dcbf r0,r4
+ addi r4,r4,32 /* Go to start of next cache line */
+ bdnz 1b
+
+2:
+ /* Set up the L2CR configuration bits (and switch L2 off) */
sync
mtspr L2CR,r3
sync
- /* Reenable interrupts if necessary. */
- mtmsr r7
+ /* Before we perform the global invalidation, we must disable dynamic
+ * power management via HID0[DPM] to work around a processor bug where
+ * DPM can possibly interfere with the state machine in the processor
+ * that invalidates the L2 cache tags.
+ */
+ mfspr r8,HID0 /* Save HID0 in r8 */
+ rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
sync
-
- cmplwi r6,0
- beq noInval
-
+ mtspr HID0,r4 /* Disable DPM */
+ sync
+
/* Perform a global invalidation */
oris r3,r3,0x0020
sync
mtspr L2CR,r3
sync
+ isync /* For errata */
/* Wait for the invalidation to complete */
3: mfspr r3,L2CR
@@ -910,27 +948,38 @@ dontDisableCache:
mtspr L2CR,r3
sync
-noInval:
+ /* Restore HID0[DPM] to whatever it was before */
+ sync
+ mtspr 1008,r8
+ sync
+
/* See if we need to enable the cache */
cmplwi r5,0
- beqlr
+ beq 4f
/* Enable the cache */
oris r3,r3,0x8000
mtspr L2CR,r3
sync
+
+ /* Restore MSR (restores EE and DR bits to original state) */
+4: SYNC
+ mtmsr r7
+ isync
+ blr
+
+99: li r3,-1
blr
_GLOBAL(_get_L2CR)
/* Make sure this is a 750 chip */
mfspr r3,PVR
- rlwinm r3,r3,16,16,31
- cmplwi r3,0x0008
- beq 1f
- cmplwi r3,0x000c
+ srwi r3,r3,16
+ cmpwi r3,0x0008
+ cmpwi cr1,r3,0x000c
li r3,0
+ cror 2,2,4*cr1+2
bnelr
-1:
/* Return the L2CR contents */
mfspr r3,L2CR
blr
@@ -986,15 +1035,6 @@ _GLOBAL(cvt_df)
blr
#endif
-_GLOBAL(__clear_msr_me)
- mfmsr r0 /* Get current interrupt state */
- lis r3,0
- ori r3,r3,MSR_ME
- andc r0,r0,r3 /* Clears bit in (r4) */
- sync /* Some chip revs have problems here */
- mtmsr r0 /* Update machine state */
- blr
-
/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
@@ -1244,16 +1284,20 @@ _GLOBAL(sys_call_table)
.long sys_getrlimit /* 190 */
.long sys_ni_syscall /* 191 */ /* Unused */
.long sys_ni_syscall /* 192 - reserved - mmap2 */
- .long sys_ni_syscall /* 193 - reserved - truncate64 */
- .long sys_ni_syscall /* 194 - reserved - ftruncate64 */
- .long sys_ni_syscall /* 195 - reserved - stat64 */
- .long sys_ni_syscall /* 196 - reserved - lstat64 */
- .long sys_ni_syscall /* 197 - reserved - fstat64 */
+ .long sys_truncate64 /* 193 */
+ .long sys_ftruncate64 /* 194 */
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64 /* 196 */
+ .long sys_fstat64 /* 197 */
.long sys_pciconfig_read /* 198 */
.long sys_pciconfig_write /* 199 */
.long sys_pciconfig_iobase /* 200 */
.long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */
.long sys_getdents64 /* 202 */
+ .long sys_pivot_root /* 203 */
+ .long sys_fcntl64 /* 204 */
+ .long sys_madvise /* 205 */
+ .long sys_mincore /* 206 */
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
.endr
diff --git a/arch/ppc/kernel/open_pic.c b/arch/ppc/kernel/open_pic.c
index 5ca365b1c..0cbd4f553 100644
--- a/arch/ppc/kernel/open_pic.c
+++ b/arch/ppc/kernel/open_pic.c
@@ -1,5 +1,5 @@
/*
- * arch/ppc/kernel/openpic.c -- OpenPIC Interrupt Handling
+ * arch/ppc/kernel/open_pic.c -- OpenPIC Interrupt Handling
*
* Copyright (C) 1997 Geert Uytterhoeven
*
@@ -13,35 +13,38 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/openpic.h>
+#include <linux/irq.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
+
#include "local_irq.h"
+#include "open_pic.h"
+#include "open_pic_defs.h"
-volatile struct OpenPIC *OpenPIC = NULL;
+void* OpenPIC_Addr;
+static volatile struct OpenPIC *OpenPIC = NULL;
u_int OpenPIC_NumInitSenses __initdata = 0;
u_char *OpenPIC_InitSenses __initdata = NULL;
-int open_pic_irq_offset;
extern int use_of_interrupt_tree;
-void chrp_mask_irq(unsigned int);
-void chrp_unmask_irq(unsigned int);
void find_ISUs(void);
static u_int NumProcessors;
static u_int NumSources;
-OpenPIC_Source *ISU;
-/*
- * We should use this if we have > 1 ISU.
- * We can just point each entry to the
- * appropriate source regs but it wastes a lot of space
- * so until we have >1 ISU I'll leave it unimplemented.
- * -- Cort
-OpenPIC_Source ISU[128];
-*/
+#ifdef CONFIG_POWER3
+static int NumISUs;
+#endif
+static int open_pic_irq_offset;
+static volatile unsigned char* chrp_int_ack_special;
+
+OpenPIC_SourcePtr ISU[OPENPIC_MAX_ISU];
+
+static void openpic_end_irq(unsigned int irq_nr);
+static void openpic_ack_irq(unsigned int irq_nr);
+static void openpic_set_affinity(unsigned int irq_nr, unsigned long cpumask);
struct hw_interrupt_type open_pic = {
" OpenPIC ",
@@ -49,45 +52,69 @@ struct hw_interrupt_type open_pic = {
NULL,
openpic_enable_irq,
openpic_disable_irq,
- 0,
+ openpic_ack_irq,
+ openpic_end_irq,
+ openpic_set_affinity
+};
+
+#ifdef CONFIG_SMP
+static void openpic_end_ipi(unsigned int irq_nr);
+static void openpic_ack_ipi(unsigned int irq_nr);
+static void openpic_enable_ipi(unsigned int irq_nr);
+static void openpic_disable_ipi(unsigned int irq_nr);
+
+struct hw_interrupt_type open_pic_ipi = {
+ " OpenPIC ",
+ NULL,
+ NULL,
+ openpic_enable_ipi,
+ openpic_disable_ipi,
+ openpic_ack_ipi,
+ openpic_end_ipi,
0
};
+#endif /* CONFIG_SMP */
/*
- * Accesses to the current processor's registers
+ * Accesses to the current processor's openpic registers
*/
-#ifndef __powerpc__
-#define THIS_CPU Private
-#define CHECK_THIS_CPU do {} while (0)
-#else
+#ifdef CONFIG_SMP
#define THIS_CPU Processor[cpu]
+#define DECL_THIS_CPU int cpu = smp_hw_index[smp_processor_id()]
#define CHECK_THIS_CPU check_arg_cpu(cpu)
-#endif
+#else
+#define THIS_CPU Processor[0]
+#define DECL_THIS_CPU
+#define CHECK_THIS_CPU
+#endif /* CONFIG_SMP */
#if 1
#define check_arg_ipi(ipi) \
if (ipi < 0 || ipi >= OPENPIC_NUM_IPI) \
- printk("openpic.c:%d: illegal ipi %d\n", __LINE__, ipi);
+ printk("open_pic.c:%d: illegal ipi %d\n", __LINE__, ipi);
#define check_arg_timer(timer) \
if (timer < 0 || timer >= OPENPIC_NUM_TIMERS) \
- printk("openpic.c:%d: illegal timer %d\n", __LINE__, timer);
+ printk("open_pic.c:%d: illegal timer %d\n", __LINE__, timer);
#define check_arg_vec(vec) \
if (vec < 0 || vec >= OPENPIC_NUM_VECTORS) \
- printk("openpic.c:%d: illegal vector %d\n", __LINE__, vec);
+ printk("open_pic.c:%d: illegal vector %d\n", __LINE__, vec);
#define check_arg_pri(pri) \
if (pri < 0 || pri >= OPENPIC_NUM_PRI) \
- printk("openpic.c:%d: illegal priority %d\n", __LINE__, pri);
+ printk("open_pic.c:%d: illegal priority %d\n", __LINE__, pri);
/*
- * I changed this to return to keep us from from trying to use irq #'s
- * that we're using for IPI's.
- * -- Cort
- */
+ * Print out a backtrace if it's out of range, since if it's larger than NR_IRQ's
+ * data has probably been corrupted and we're going to panic or deadlock later
+ * anyway --Troy
+ */
+extern unsigned long* _get_SP(void);
#define check_arg_irq(irq) \
- /*if (irq < 0 || irq >= (NumSources+open_pic_irq_offset)) \
- printk("openpic.c:%d: illegal irq %d\n", __LINE__, irq);*/
+ if (irq < open_pic_irq_offset || irq >= (NumSources+open_pic_irq_offset)){ \
+ printk("open_pic.c:%d: illegal irq %d\n", __LINE__, irq); \
+ print_backtrace(_get_SP()); }
#define check_arg_cpu(cpu) \
- if (cpu < 0 || cpu >= NumProcessors) \
- printk("openpic.c:%d: illegal cpu %d\n", __LINE__, cpu);
+ if (cpu < 0 || cpu >= NumProcessors){ \
+ printk("open_pic.c:%d: illegal cpu %d\n", __LINE__, cpu); \
+ print_backtrace(_get_SP()); }
#else
#define check_arg_ipi(ipi) do {} while (0)
#define check_arg_timer(timer) do {} while (0)
@@ -97,23 +124,10 @@ struct hw_interrupt_type open_pic = {
#define check_arg_cpu(cpu) do {} while (0)
#endif
-#ifdef CONFIG_SMP
-void openpic_ipi_action(int cpl, void *dev_id, struct pt_regs *regs)
-{
- smp_message_recv(cpl-OPENPIC_VEC_IPI, regs);
-}
-#endif /* CONFIG_SMP */
-
-#ifdef __i386__
-static inline u_int in_le32(volatile u_int *addr)
-{
- return *addr;
-}
-
-static inline void out_le32(volatile u_int *addr, u_int val)
-{
- *addr = val;
-}
+#ifdef CONFIG_POWER3
+ #define GET_ISU(source) ISU[(source) >> 4][(source) & 0xf]
+#else
+ #define GET_ISU(source) ISU[0][(source)]
#endif
u_int openpic_read(volatile u_int *addr)
@@ -156,22 +170,66 @@ static void openpic_safe_writefield(volatile u_int *addr, u_int mask,
u_int field)
{
openpic_setfield(addr, OPENPIC_MASK);
- /* wait until it's not in use */
- /* BenH: Is this code really enough ? I would rather check the result
- * and eventually retry ...
- */
while (openpic_read(addr) & OPENPIC_ACTIVITY);
openpic_writefield(addr, mask | OPENPIC_MASK, field | OPENPIC_MASK);
}
-void __init openpic_init(int main_pic)
+#ifdef CONFIG_SMP
+/* yes this is right ... bug, feature, you decide! -- tgall */
+u_int openpic_read_IPI(volatile u_int* addr)
+{
+ u_int val = 0;
+#ifdef CONFIG_POWER3
+ val = in_be32(addr);
+#else
+ val = in_le32(addr);
+#endif
+ return val;
+}
+
+/* because of the power3 be / le above, this is needed */
+inline void openpic_writefield_IPI(volatile u_int* addr, u_int mask, u_int field)
+{
+ u_int val = openpic_read_IPI(addr);
+ openpic_write(addr, (val & ~mask) | (field & mask));
+}
+
+static inline void openpic_clearfield_IPI(volatile u_int *addr, u_int mask)
+{
+ openpic_writefield_IPI(addr, mask, 0);
+}
+
+static inline void openpic_setfield_IPI(volatile u_int *addr, u_int mask)
+{
+ openpic_writefield_IPI(addr, mask, mask);
+}
+
+static void openpic_safe_writefield_IPI(volatile u_int *addr, u_int mask, u_int field)
+{
+ openpic_setfield_IPI(addr, OPENPIC_MASK);
+
+ /* wait until it's not in use */
+ /* BenH: Is this code really enough ? I would rather check the result
+ * and eventually retry ...
+ */
+ while(openpic_read_IPI(addr) & OPENPIC_ACTIVITY);
+
+ openpic_writefield_IPI(addr, mask | OPENPIC_MASK, field | OPENPIC_MASK);
+}
+#endif /* CONFIG_SMP */
+
+void __init openpic_init(int main_pic, int offset, unsigned char* chrp_ack,
+ int programmer_switch_irq)
{
u_int t, i;
u_int timerfreq;
const char *version;
- if (!OpenPIC)
- panic("No OpenPIC found");
+ if (!OpenPIC_Addr) {
+ printk("No OpenPIC found !\n");
+ return;
+ }
+ OpenPIC = (volatile struct OpenPIC *)OpenPIC_Addr;
if ( ppc_md.progress ) ppc_md.progress("openpic enter",0x122);
@@ -194,179 +252,180 @@ void __init openpic_init(int main_pic)
OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT) + 1;
NumSources = ((t & OPENPIC_FEATURE_LAST_SOURCE_MASK) >>
OPENPIC_FEATURE_LAST_SOURCE_SHIFT) + 1;
- if ( _machine != _MACH_Pmac )
- {
- printk("OpenPIC Version %s (%d CPUs and %d IRQ sources) at %p\n", version,
- NumProcessors, NumSources, OpenPIC);
- timerfreq = openpic_read(&OpenPIC->Global.Timer_Frequency);
- printk("OpenPIC timer frequency is ");
- if (timerfreq)
- printk("%d MHz\n", timerfreq>>20);
- else
- printk("not set\n");
+ printk("OpenPIC Version %s (%d CPUs and %d IRQ sources) at %p\n",
+ version, NumProcessors, NumSources, OpenPIC);
+ timerfreq = openpic_read(&OpenPIC->Global.Timer_Frequency);
+ if (timerfreq)
+ printk("OpenPIC timer frequency is %d.%06d MHz\n",
+ timerfreq / 1000000, timerfreq % 1000000);
+
+ if (!main_pic)
+ return;
+
+ open_pic_irq_offset = offset;
+ chrp_int_ack_special = (volatile unsigned char*)chrp_ack;
+
+ /* Initialize timer interrupts */
+ if ( ppc_md.progress ) ppc_md.progress("openpic timer",0x3ba);
+ for (i = 0; i < OPENPIC_NUM_TIMERS; i++) {
+ /* Disabled, Priority 0 */
+ openpic_inittimer(i, 0, OPENPIC_VEC_TIMER+i+offset);
+ /* No processor */
+ openpic_maptimer(i, 0);
}
-
- if ( main_pic )
- {
- /* Initialize timer interrupts */
- if ( ppc_md.progress ) ppc_md.progress("openpic timer",0x3ba);
- for (i = 0; i < OPENPIC_NUM_TIMERS; i++) {
- /* Disabled, Priority 0 */
- openpic_inittimer(i, 0, OPENPIC_VEC_TIMER+i);
- /* No processor */
- openpic_maptimer(i, 0);
- }
-
- /* Initialize IPI interrupts */
- if ( ppc_md.progress ) ppc_md.progress("openpic ipi",0x3bb);
- for (i = 0; i < OPENPIC_NUM_IPI; i++) {
- /* Disabled, Priority 8 */
- openpic_initipi(i, 8, OPENPIC_VEC_IPI+i);
- }
- find_ISUs();
- if ( _machine != _MACH_Pmac )
- {
- /* Initialize external interrupts */
- if ( ppc_md.progress ) ppc_md.progress("openpic ext",0x3bc);
- /* SIOint (8259 cascade) is special */
- openpic_initirq(0, 8, open_pic_irq_offset, 1, 1);
- openpic_mapirq(0, 1<<0);
- for (i = 1; i < NumSources; i++) {
- /* Enabled, Priority 8 */
- openpic_initirq(i, 8, open_pic_irq_offset+i, 0,
- i < OpenPIC_NumInitSenses ? OpenPIC_InitSenses[i] : 1);
- /* Processor 0 */
- openpic_mapirq(i, 1<<0);
- }
- }
- else
- {
- /* Prevent any interrupt from occuring during initialisation.
- * Hum... I believe this is not necessary, Apple does that in
- * Darwin's PowerExpress code.
- */
- openpic_set_priority(0, 0xf);
-
- /* First disable all interrupts and map them to CPU 0 */
- for (i = 0; i < NumSources; i++) {
- openpic_disable_irq(i);
- openpic_mapirq(i, 1<<0);
- }
-
- /* If we use the device tree, then lookup all interrupts and
- * initialize them according to sense infos found in the tree
- */
- if (use_of_interrupt_tree) {
- struct device_node* np = find_all_nodes();
- while(np) {
- int j, pri;
- pri = strcmp(np->name, "programmer-switch") ? 2 : 7;
- for (j=0;j<np->n_intrs;j++) {
- openpic_initirq(np->intrs[j].line,
- pri,
- np->intrs[j].line,
- 0,
- np->intrs[j].sense);
- if (np->intrs[j].sense)
- irq_desc[np->intrs[j].line].status = IRQ_LEVEL;
- }
- np = np->next;
- }
- }
- }
-
- /* Initialize the spurious interrupt */
- if ( ppc_md.progress ) ppc_md.progress("openpic spurious",0x3bd);
- openpic_set_spurious(OPENPIC_VEC_SPURIOUS);
- if ( !(_machine & (_MACH_gemini|_MACH_Pmac)) )
- {
- if (request_irq(IRQ_8259_CASCADE, no_action, SA_INTERRUPT,
- "82c59 cascade", NULL))
- printk("Unable to get OpenPIC IRQ 0 for cascade\n");
- }
- openpic_set_priority(0, 0);
- openpic_disable_8259_pass_through();
+
+#ifdef CONFIG_SMP
+ /* Initialize IPI interrupts */
+ if ( ppc_md.progress ) ppc_md.progress("openpic ipi",0x3bb);
+ for (i = 0; i < OPENPIC_NUM_IPI; i++) {
+ /* Disabled, Priority 10..13 */
+ openpic_initipi(i, 10+i, OPENPIC_VEC_IPI+i+offset);
+ /* IPIs are per-CPU */
+ irq_desc[OPENPIC_VEC_IPI+i+offset].status |= IRQ_PER_CPU;
+ irq_desc[OPENPIC_VEC_IPI+i+offset].handler = &open_pic_ipi;
+ }
+#endif
+
+ find_ISUs();
+
+ /* Initialize external interrupts */
+ if (ppc_md.progress) ppc_md.progress("openpic ext",0x3bc);
+
+ openpic_set_priority(0xf);
+
+ /* SIOint (8259 cascade) is special */
+ if (offset) {
+ openpic_initirq(0, 8, offset, 1, 1);
+ openpic_mapirq(0, 1<<0);
+ }
+
+ /* Init all external sources */
+ for (i = 1; i < NumSources; i++) {
+ int pri, sense;
+
+ /* the bootloader may have left it enabled (bad !) */
+ openpic_disable_irq(i+offset);
+
+ pri = (i == programmer_switch_irq)? 9: 8;
+ sense = (i < OpenPIC_NumInitSenses)? OpenPIC_InitSenses[i]: 1;
+ if (sense)
+ irq_desc[i+offset].status = IRQ_LEVEL;
+
+ /* Enabled, Priority 8 or 9 */
+ openpic_initirq(i, pri, i+offset, !sense, sense);
+ /* Processor 0 */
+ openpic_mapirq(i, 1<<0);
}
- if ( ppc_md.progress ) ppc_md.progress("openpic exit",0x222);
+
+ /* Init descriptors */
+ for (i = offset; i < NumSources + offset; i++)
+ irq_desc[i].handler = &open_pic;
+
+ /* Initialize the spurious interrupt */
+ if (ppc_md.progress) ppc_md.progress("openpic spurious",0x3bd);
+ openpic_set_spurious(OPENPIC_VEC_SPURIOUS+offset);
+
+ /* Initialize the cascade */
+ if (offset) {
+ if (request_irq(offset, no_action, SA_INTERRUPT,
+ "82c59 cascade", NULL))
+ printk("Unable to get OpenPIC IRQ 0 for cascade\n");
+ }
+ openpic_set_priority(0);
+ openpic_disable_8259_pass_through();
+
+ if (ppc_md.progress) ppc_md.progress("openpic exit",0x222);
+}
+
+#ifdef CONFIG_POWER3
+void openpic_setup_ISU(int isu_num, unsigned long addr)
+{
+ if (isu_num >= OPENPIC_MAX_ISU)
+ return;
+ ISU[isu_num] = (OpenPIC_SourcePtr) ioremap(addr, 0x400);
+ if (isu_num >= NumISUs)
+ NumISUs = isu_num + 1;
}
+#endif
void find_ISUs(void)
{
-#ifdef CONFIG_PPC64BRIDGE
- /* hardcode this for now since the IBM 260 is the only thing with
- * a distributed openpic right now. -- Cort
+#ifdef CONFIG_POWER3
+ /* Use /interrupt-controller/reg and
+ * /interrupt-controller/interrupt-ranges from OF device tree
+ * the ISU array is setup in chrp_pci.c in ibm_add_bridges
+ * as a result
+ * -- tgall
+ */
+
+ /* basically each ISU is a bus, and this assumes that
+ * open_pic_isu_count interrupts per bus are possible
+ * ISU == Interrupt Source
*/
- ISU = (OpenPIC_Source *)0xfeff7c00;
- NumSources = 0x10;
+ NumSources = NumISUs * 0x10;
+
#else
/* for non-distributed OpenPIC implementations it's in the IDU -- Cort */
- ISU = (OpenPIC_Source *)OpenPIC->Source;
+ ISU[0] = (OpenPIC_Source *)OpenPIC->Source;
#endif
}
-void openpic_reset(void)
+static inline void openpic_reset(void)
{
openpic_setfield(&OpenPIC->Global.Global_Configuration0,
OPENPIC_CONFIG_RESET);
}
-void openpic_enable_8259_pass_through(void)
+static inline void openpic_enable_8259_pass_through(void)
{
openpic_clearfield(&OpenPIC->Global.Global_Configuration0,
OPENPIC_CONFIG_8259_PASSTHROUGH_DISABLE);
}
-void openpic_disable_8259_pass_through(void)
+static void openpic_disable_8259_pass_through(void)
{
openpic_setfield(&OpenPIC->Global.Global_Configuration0,
OPENPIC_CONFIG_8259_PASSTHROUGH_DISABLE);
}
-#ifndef __i386__
/*
* Find out the current interrupt
*/
-u_int openpic_irq(u_int cpu)
+static u_int openpic_irq(void)
{
u_int vec;
+ DECL_THIS_CPU;
- check_arg_cpu(cpu);
+ CHECK_THIS_CPU;
vec = openpic_readfield(&OpenPIC->THIS_CPU.Interrupt_Acknowledge,
OPENPIC_VECTOR_MASK);
return vec;
}
-#endif
-#ifndef __powerpc__
-void openpic_eoi(void)
-#else
-void openpic_eoi(u_int cpu)
-#endif
+static void openpic_eoi(void)
{
- check_arg_cpu(cpu);
+ DECL_THIS_CPU;
+
+ CHECK_THIS_CPU;
openpic_write(&OpenPIC->THIS_CPU.EOI, 0);
/* Handle PCI write posting */
(void)openpic_read(&OpenPIC->THIS_CPU.EOI);
}
-#ifndef __powerpc__
-u_int openpic_get_priority(void)
-#else
-u_int openpic_get_priority(u_int cpu)
-#endif
+static inline u_int openpic_get_priority(void)
{
+ DECL_THIS_CPU;
+
CHECK_THIS_CPU;
return openpic_readfield(&OpenPIC->THIS_CPU.Current_Task_Priority,
OPENPIC_CURRENT_TASK_PRIORITY_MASK);
}
-#ifndef __powerpc__
-void openpic_set_priority(u_int pri)
-#else
-void openpic_set_priority(u_int cpu, u_int pri)
-#endif
+static void openpic_set_priority(u_int pri)
{
+ DECL_THIS_CPU;
+
CHECK_THIS_CPU;
check_arg_pri(pri);
openpic_writefield(&OpenPIC->THIS_CPU.Current_Task_Priority,
@@ -376,24 +435,43 @@ void openpic_set_priority(u_int cpu, u_int pri)
/*
* Get/set the spurious vector
*/
-u_int openpic_get_spurious(void)
+static inline u_int openpic_get_spurious(void)
{
return openpic_readfield(&OpenPIC->Global.Spurious_Vector,
OPENPIC_VECTOR_MASK);
}
-void openpic_set_spurious(u_int vec)
+static void openpic_set_spurious(u_int vec)
{
check_arg_vec(vec);
openpic_writefield(&OpenPIC->Global.Spurious_Vector, OPENPIC_VECTOR_MASK,
vec);
}
+#ifdef CONFIG_SMP
+/*
+ * Convert a cpu mask from logical to physical cpu numbers.
+ */
+static inline u32 physmask(u32 cpumask)
+{
+ int i;
+ u32 mask = 0;
+
+ for (i = 0; i < smp_num_cpus; ++i, cpumask >>= 1)
+ mask |= (cpumask & 1) << smp_hw_index[i];
+ return mask;
+}
+#else
+#define physmask(cpumask) (cpumask)
+#endif
+
void openpic_init_processor(u_int cpumask)
{
- openpic_write(&OpenPIC->Global.Processor_Initialization, cpumask);
+ openpic_write(&OpenPIC->Global.Processor_Initialization,
+ physmask(cpumask));
}
+#ifdef CONFIG_SMP
/*
* Initialize an interprocessor interrupt (and disable it)
*
@@ -401,35 +479,55 @@ void openpic_init_processor(u_int cpumask)
* pri: interrupt source priority
* vec: the vector it will produce
*/
-void openpic_initipi(u_int ipi, u_int pri, u_int vec)
+static void __init openpic_initipi(u_int ipi, u_int pri, u_int vec)
{
- check_arg_timer(ipi);
+ check_arg_ipi(ipi);
check_arg_pri(pri);
check_arg_vec(vec);
- openpic_safe_writefield(&OpenPIC->Global.IPI_Vector_Priority(ipi),
+ openpic_safe_writefield_IPI(&OpenPIC->Global.IPI_Vector_Priority(ipi),
OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK,
(pri << OPENPIC_PRIORITY_SHIFT) | vec);
}
/*
* Send an IPI to one or more CPUs
+ *
+ * Externally called, however, it takes an IPI number (0...OPENPIC_NUM_IPI)
+ * and not a system-wide interrupt number
*/
-#ifndef __powerpc__
void openpic_cause_IPI(u_int ipi, u_int cpumask)
-#else
-void openpic_cause_IPI(u_int cpu, u_int ipi, u_int cpumask)
-#endif
{
+ DECL_THIS_CPU;
+
CHECK_THIS_CPU;
check_arg_ipi(ipi);
- openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi), cpumask);
+ openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi),
+ physmask(cpumask));
}
-void openpic_enable_IPI(u_int ipi)
+void openpic_request_IPIs(void)
{
- check_arg_ipi(ipi);
- openpic_clearfield(&OpenPIC->Global.IPI_Vector_Priority(ipi),
- OPENPIC_MASK);
+ int i;
+
+ /*
+ * Make sure this matches what is defined in smp.c for
+ * smp_message_{pass|recv}() or what shows up in
+ * /proc/interrupts will be wrong!!! --Troy */
+
+ if (OpenPIC == NULL)
+ return;
+
+ request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset,
+ openpic_ipi_action, 0, "IPI0 (call function)", 0);
+ request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+1,
+ openpic_ipi_action, 0, "IPI1 (reschedule)", 0);
+ request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+2,
+ openpic_ipi_action, 0, "IPI2 (invalidate tlb)", 0);
+ request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+3,
+ openpic_ipi_action, 0, "IPI3 (xmon break)", 0);
+
+ for ( i = 0; i < OPENPIC_NUM_IPI ; i++ )
+ openpic_enable_ipi(OPENPIC_VEC_IPI+open_pic_irq_offset+i);
}
/*
@@ -437,21 +535,31 @@ void openpic_enable_IPI(u_int ipi)
*
* Get IPI's working and start taking interrupts.
* -- Cort
- */
-void do_openpic_setup_cpu(void)
+ */
+static spinlock_t openpic_setup_lock __initdata = SPIN_LOCK_UNLOCKED;
+
+void __init do_openpic_setup_cpu(void)
{
int i;
-
- for ( i = 0; i < OPENPIC_NUM_IPI ; i++ )
- openpic_enable_IPI(i);
-#if 0
- /* let the openpic know we want intrs */
- for ( i = 0; i < NumSources ; i++ )
- openpic_mapirq(i, openpic_read(ISU[i].Destination)
- | (1<<smp_processor_id()) );
-#endif
- openpic_set_priority(smp_processor_id(), 0);
+ u32 msk = 1 << smp_hw_index[smp_processor_id()];
+
+ spin_lock(&openpic_setup_lock);
+
+#ifdef CONFIG_IRQ_ALL_CPUS
+ /* let the openpic know we want intrs. default affinity
+ * is 0xffffffff until changed via /proc
+ * That's how it's done on x86. If we want it differently, then
+ * we should make sure we also change the default values of irq_affinity
+ * in irq.c.
+ */
+ for (i = 0; i < NumSources ; i++)
+ openpic_mapirq(i, openpic_read(&GET_ISU(i).Destination) | msk);
+#endif /* CONFIG_IRQ_ALL_CPUS */
+ openpic_set_priority(0);
+
+ spin_unlock(&openpic_setup_lock);
}
+#endif /* CONFIG_SMP */
/*
* Initialize a timer interrupt (and disable it)
@@ -460,7 +568,7 @@ void do_openpic_setup_cpu(void)
* pri: interrupt source priority
* vec: the vector it will produce
*/
-void openpic_inittimer(u_int timer, u_int pri, u_int vec)
+static void __init openpic_inittimer(u_int timer, u_int pri, u_int vec)
{
check_arg_timer(timer);
check_arg_pri(pri);
@@ -473,37 +581,99 @@ void openpic_inittimer(u_int timer, u_int pri, u_int vec)
/*
* Map a timer interrupt to one or more CPUs
*/
-void openpic_maptimer(u_int timer, u_int cpumask)
+static void __init openpic_maptimer(u_int timer, u_int cpumask)
{
check_arg_timer(timer);
- openpic_write(&OpenPIC->Global.Timer[timer].Destination, cpumask);
+ openpic_write(&OpenPIC->Global.Timer[timer].Destination,
+ physmask(cpumask));
}
+
+/*
+ *
+ * All functions below take an offset'ed irq argument
+ *
+ */
+
+
/*
- * Enable/disable an interrupt source
+ * Enable/disable an external interrupt source
+ *
+ * Externally called, irq is an offseted system-wide interrupt number
*/
-void openpic_enable_irq(u_int irq)
+static void openpic_enable_irq(u_int irq)
{
check_arg_irq(irq);
- openpic_clearfield(&ISU[irq - open_pic_irq_offset].Vector_Priority, OPENPIC_MASK);
+
+ /*
+ * Never want to disable a timer or ipi irq
+ * (only want to disable irqs within an ISU).
+ */
+ if (((irq >= OPENPIC_VEC_IPI+open_pic_irq_offset) &&
+ (irq < OPENPIC_VEC_IPI+open_pic_irq_offset+OPENPIC_NUM_IPI)) ||
+ ((irq >= OPENPIC_VEC_TIMER+open_pic_irq_offset) &&
+ (irq < OPENPIC_VEC_TIMER+open_pic_irq_offset+OPENPIC_NUM_TIMERS)))
+ {
+ /* silently ignore the enable of the timer or ipi irq. */
+ return;
+ }
+
+
+ openpic_clearfield(&GET_ISU(irq - open_pic_irq_offset).Vector_Priority, OPENPIC_MASK);
/* make sure mask gets to controller before we return to user */
do {
mb(); /* sync is probably useless here */
- } while(openpic_readfield(&ISU[irq - open_pic_irq_offset].Vector_Priority,
+ } while(openpic_readfield(&GET_ISU(irq - open_pic_irq_offset).Vector_Priority,
OPENPIC_MASK));
}
-void openpic_disable_irq(u_int irq)
+static void openpic_disable_irq(u_int irq)
{
+ u32 vp;
+
check_arg_irq(irq);
- openpic_setfield(&ISU[irq - open_pic_irq_offset].Vector_Priority, OPENPIC_MASK);
+ /*
+ * Never want to disable a timer or ipi irq
+ * (only want to disable irqs within an ISU).
+ */
+ if (((irq >= OPENPIC_VEC_IPI+open_pic_irq_offset) &&
+ (irq < OPENPIC_VEC_IPI+open_pic_irq_offset+OPENPIC_NUM_IPI)) ||
+ ((irq >= OPENPIC_VEC_TIMER+open_pic_irq_offset) &&
+ (irq < OPENPIC_VEC_TIMER+open_pic_irq_offset+OPENPIC_NUM_TIMERS)))
+ {
+ panic("openpic_disable_irq - disabling non-ISU irq");
+ }
+
+
+ openpic_setfield(&GET_ISU(irq - open_pic_irq_offset).Vector_Priority, OPENPIC_MASK);
/* make sure mask gets to controller before we return to user */
do {
mb(); /* sync is probably useless here */
- } while(!openpic_readfield(&ISU[irq - open_pic_irq_offset].Vector_Priority,
- OPENPIC_MASK));
+ vp = openpic_readfield(&GET_ISU(irq - open_pic_irq_offset).Vector_Priority,
+ OPENPIC_MASK | OPENPIC_ACTIVITY);
+ } while((vp & OPENPIC_ACTIVITY) && !(vp & OPENPIC_MASK));
}
+#ifdef CONFIG_SMP
+/*
+ * Enable/disable an IPI interrupt source
+ *
+ * Externally called, irq is an offseted system-wide interrupt number
+ */
+void openpic_enable_ipi(u_int irq)
+{
+ irq -= (OPENPIC_VEC_IPI+open_pic_irq_offset);
+ check_arg_ipi(irq);
+ openpic_clearfield_IPI(&OpenPIC->Global.IPI_Vector_Priority(irq), OPENPIC_MASK);
+
+}
+void openpic_disable_ipi(u_int irq)
+{
+ /* NEVER disable an IPI... that's just plain wrong! */
+}
+
+#endif
+
/*
* Initialize an interrupt source (and disable it!)
*
@@ -513,12 +683,9 @@ void openpic_disable_irq(u_int irq)
* pol: polarity (1 for positive, 0 for negative)
* sense: 1 for level, 0 for edge
*/
-void openpic_initirq(u_int irq, u_int pri, u_int vec, int pol, int sense)
+static void openpic_initirq(u_int irq, u_int pri, u_int vec, int pol, int sense)
{
- check_arg_irq(irq);
- check_arg_pri(pri);
- check_arg_vec(vec);
- openpic_safe_writefield(&ISU[irq].Vector_Priority,
+ openpic_safe_writefield(&GET_ISU(irq).Vector_Priority,
OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK |
OPENPIC_SENSE_MASK | OPENPIC_POLARITY_MASK,
(pri << OPENPIC_PRIORITY_SHIFT) | vec |
@@ -530,10 +697,9 @@ void openpic_initirq(u_int irq, u_int pri, u_int vec, int pol, int sense)
/*
* Map an interrupt source to one or more CPUs
*/
-void openpic_mapirq(u_int irq, u_int cpumask)
+static void openpic_mapirq(u_int irq, u_int physmask)
{
- check_arg_irq(irq);
- openpic_write(&ISU[irq].Destination, cpumask);
+ openpic_write(&GET_ISU(irq).Destination, physmask);
}
/*
@@ -541,10 +707,92 @@ void openpic_mapirq(u_int irq, u_int cpumask)
*
* sense: 1 for level, 0 for edge
*/
-void openpic_set_sense(u_int irq, int sense)
+static inline void openpic_set_sense(u_int irq, int sense)
{
- check_arg_irq(irq);
- openpic_safe_writefield(&ISU[irq].Vector_Priority,
+ openpic_safe_writefield(&GET_ISU(irq).Vector_Priority,
OPENPIC_SENSE_LEVEL,
(sense ? OPENPIC_SENSE_LEVEL : 0));
}
+
+/* No spinlocks, should not be necessary with the OpenPIC
+ * (1 register = 1 interrupt and we have the desc lock).
+ */
+static void openpic_ack_irq(unsigned int irq_nr)
+{
+#if 1 /* masking should be unnecessary, but I still get spurrious */
+ openpic_disable_irq(irq_nr);
+#endif
+ if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
+ openpic_eoi();
+}
+
+static void openpic_end_irq(unsigned int irq_nr)
+{
+ if ((irq_desc[irq_nr].status & IRQ_LEVEL) != 0)
+ openpic_eoi();
+
+#if 1 /* masking should be unnecessary, but I still get spurrious */
+ if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ openpic_enable_irq(irq_nr);
+#endif
+}
+
+static void openpic_set_affinity(unsigned int irq_nr, unsigned long cpumask)
+{
+ openpic_mapirq(irq_nr - open_pic_irq_offset, physmask(cpumask));
+}
+
+#ifdef CONFIG_SMP
+static void openpic_ack_ipi(unsigned int irq_nr)
+{
+}
+
+static void openpic_end_ipi(unsigned int irq_nr)
+{
+ /* IPIs are marked IRQ_PER_CPU. This has the side effect of
+ * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
+ * applying to them. We EOI them late to avoid re-entering.
+ * however, I'm wondering if we could simply let them have the
+ * SA_INTERRUPT flag and let them execute with all interrupts OFF.
+ * This would have the side effect of either running cross-CPU
+ * functions with interrupts off, or we can re-enable them explicitely
+ * with a __sti() in smp_call_function_interrupt(), since
+ * smp_call_function() is protected by a spinlock.
+ * Or maybe we shouldn't set the IRQ_PER_CPU flag on cross-CPU
+ * function calls IPI at all but that would make a special case.
+ */
+ openpic_eoi();
+}
+
+static void openpic_ipi_action(int cpl, void *dev_id, struct pt_regs *regs)
+{
+ smp_message_recv(cpl-OPENPIC_VEC_IPI-open_pic_irq_offset, regs);
+}
+
+#endif /* CONFIG_SMP */
+
+/* This one may be merged with PReP and CHRP */
+int
+openpic_get_irq(struct pt_regs *regs)
+{
+ extern int i8259_irq(int cpu);
+
+ int irq = openpic_irq();
+
+ /* Management of the cascade should be moved out of here */
+ if (open_pic_irq_offset && irq == open_pic_irq_offset)
+ {
+ /*
+ * This magic address generates a PCI IACK cycle.
+ */
+ if ( chrp_int_ack_special )
+ irq = *chrp_int_ack_special;
+ else
+ irq = i8259_irq( smp_processor_id() );
+ openpic_eoi();
+ }
+ if (irq == OPENPIC_VEC_SPURIOUS + open_pic_irq_offset)
+ irq = -1;
+ return irq;
+}
+
diff --git a/arch/ppc/kernel/open_pic.h b/arch/ppc/kernel/open_pic.h
index 3e51ffba3..cf924ba3a 100644
--- a/arch/ppc/kernel/open_pic.h
+++ b/arch/ppc/kernel/open_pic.h
@@ -1,11 +1,45 @@
+/*
+ * arch/ppc/kernel/open_pic.h -- OpenPIC Interrupt Handling
+ *
+ * Copyright (C) 1997 Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+
#ifndef _PPC_KERNEL_OPEN_PIC_H
#define _PPC_KERNEL_OPEN_PIC_H
+#include <linux/config.h>
+
+#define OPENPIC_SIZE 0x40000
+
+/* OpenPIC IRQ controller structure */
extern struct hw_interrupt_type open_pic;
-void openpic_ipi_action(int cpl, void *dev_id, struct pt_regs *regs);
-void openpic_enable_IPI(u_int ipi);
-void do_openpic_setup_cpu(void);
+/* OpenPIC IPI controller structure */
+#ifdef CONFIG_SMP
+extern struct hw_interrupt_type open_pic_ipi;
+#endif /* CONFIG_SMP */
+
+extern u_int OpenPIC_NumInitSenses;
+extern u_char *OpenPIC_InitSenses;
+extern void* OpenPIC_Addr;
+
+/* Exported functions */
+extern void openpic_init(int, int, unsigned char *, int);
+extern void openpic_request_IPIs(void);
+extern void do_openpic_setup_cpu(void);
+extern int openpic_get_irq(struct pt_regs *regs);
+extern void openpic_init_processor(u_int cpumask);
+extern void openpic_setup_ISU(int isu_num, unsigned long addr);
+extern void openpic_cause_IPI(u_int ipi, u_int cpumask);
-extern int open_pic_irq_offset;
+extern inline int openpic_to_irq(int irq)
+{
+ return irq += NUM_8259_INTERRUPTS;
+}
+/*extern int open_pic_irq_offset;*/
#endif /* _PPC_KERNEL_OPEN_PIC_H */
diff --git a/arch/ppc/kernel/open_pic_defs.h b/arch/ppc/kernel/open_pic_defs.h
new file mode 100644
index 000000000..fed4dd1cc
--- /dev/null
+++ b/arch/ppc/kernel/open_pic_defs.h
@@ -0,0 +1,328 @@
+/*
+ * linux/openpic.h -- OpenPIC definitions
+ *
+ * Copyright (C) 1997 Geert Uytterhoeven
+ *
+ * This file is based on the following documentation:
+ *
+ * The Open Programmable Interrupt Controller (PIC)
+ * Register Interface Specification Revision 1.2
+ *
+ * Issue Date: October 1995
+ *
+ * Issued jointly by Advanced Micro Devices and Cyrix Corporation
+ *
+ * AMD is a registered trademark of Advanced Micro Devices, Inc.
+ * Copyright (C) 1995, Advanced Micro Devices, Inc. and Cyrix, Inc.
+ * All Rights Reserved.
+ *
+ * To receive a copy of this documentation, send an email to openpic@amd.com.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _LINUX_OPENPIC_H
+#define _LINUX_OPENPIC_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+ /*
+ * OpenPIC supports up to 2048 interrupt sources and up to 32 processors
+ */
+
+#define OPENPIC_MAX_SOURCES 2048
+#define OPENPIC_MAX_PROCESSORS 32
+#define OPENPIC_MAX_ISU 16
+
+#define OPENPIC_NUM_TIMERS 4
+#define OPENPIC_NUM_IPI 4
+#define OPENPIC_NUM_PRI 16
+#define OPENPIC_NUM_VECTORS 256
+
+
+ /*
+ * Non-offset'ed vector numbers
+ */
+
+#define OPENPIC_VEC_TIMER 64 /* and up */
+#define OPENPIC_VEC_IPI 72 /* and up */
+#define OPENPIC_VEC_SPURIOUS 127
+
+
+ /*
+ * OpenPIC Registers are 32 bits and aligned on 128 bit boundaries
+ */
+
+typedef struct _OpenPIC_Reg {
+ u_int Reg; /* Little endian! */
+ char Pad[0xc];
+} OpenPIC_Reg;
+
+
+ /*
+ * Per Processor Registers
+ */
+
+typedef struct _OpenPIC_Processor {
+ /*
+ * Private Shadow Registers (for SLiC backwards compatibility)
+ */
+ u_int IPI0_Dispatch_Shadow; /* Write Only */
+ char Pad1[0x4];
+ u_int IPI0_Vector_Priority_Shadow; /* Read/Write */
+ char Pad2[0x34];
+ /*
+ * Interprocessor Interrupt Command Ports
+ */
+ OpenPIC_Reg _IPI_Dispatch[OPENPIC_NUM_IPI]; /* Write Only */
+ /*
+ * Current Task Priority Register
+ */
+ OpenPIC_Reg _Current_Task_Priority; /* Read/Write */
+ char Pad3[0x10];
+ /*
+ * Interrupt Acknowledge Register
+ */
+ OpenPIC_Reg _Interrupt_Acknowledge; /* Read Only */
+ /*
+ * End of Interrupt (EOI) Register
+ */
+ OpenPIC_Reg _EOI; /* Read/Write */
+ char Pad5[0xf40];
+} OpenPIC_Processor;
+
+
+ /*
+ * Timer Registers
+ */
+
+typedef struct _OpenPIC_Timer {
+ OpenPIC_Reg _Current_Count; /* Read Only */
+ OpenPIC_Reg _Base_Count; /* Read/Write */
+ OpenPIC_Reg _Vector_Priority; /* Read/Write */
+ OpenPIC_Reg _Destination; /* Read/Write */
+} OpenPIC_Timer;
+
+
+ /*
+ * Global Registers
+ */
+
+typedef struct _OpenPIC_Global {
+ /*
+ * Feature Reporting Registers
+ */
+ OpenPIC_Reg _Feature_Reporting0; /* Read Only */
+ OpenPIC_Reg _Feature_Reporting1; /* Future Expansion */
+ /*
+ * Global Configuration Registers
+ */
+ OpenPIC_Reg _Global_Configuration0; /* Read/Write */
+ OpenPIC_Reg _Global_Configuration1; /* Future Expansion */
+ /*
+ * Vendor Specific Registers
+ */
+ OpenPIC_Reg _Vendor_Specific[4];
+ /*
+ * Vendor Identification Register
+ */
+ OpenPIC_Reg _Vendor_Identification; /* Read Only */
+ /*
+ * Processor Initialization Register
+ */
+ OpenPIC_Reg _Processor_Initialization; /* Read/Write */
+ /*
+ * IPI Vector/Priority Registers
+ */
+ OpenPIC_Reg _IPI_Vector_Priority[OPENPIC_NUM_IPI]; /* Read/Write */
+ /*
+ * Spurious Vector Register
+ */
+ OpenPIC_Reg _Spurious_Vector; /* Read/Write */
+ /*
+ * Global Timer Registers
+ */
+ OpenPIC_Reg _Timer_Frequency; /* Read/Write */
+ OpenPIC_Timer Timer[OPENPIC_NUM_TIMERS];
+ char Pad1[0xee00];
+} OpenPIC_Global;
+
+
+ /*
+ * Interrupt Source Registers
+ */
+
+typedef struct _OpenPIC_Source {
+ OpenPIC_Reg _Vector_Priority; /* Read/Write */
+ OpenPIC_Reg _Destination; /* Read/Write */
+} OpenPIC_Source, *OpenPIC_SourcePtr;
+
+
+ /*
+ * OpenPIC Register Map
+ */
+
+struct OpenPIC {
+ char Pad1[0x1000];
+ /*
+ * Global Registers
+ */
+ OpenPIC_Global Global;
+ /*
+ * Interrupt Source Configuration Registers
+ */
+ OpenPIC_Source Source[OPENPIC_MAX_SOURCES];
+ /*
+ * Per Processor Registers
+ */
+ OpenPIC_Processor Processor[OPENPIC_MAX_PROCESSORS];
+};
+
+extern volatile struct OpenPIC *OpenPIC;
+
+
+ /*
+ * Current Task Priority Register
+ */
+
+#define OPENPIC_CURRENT_TASK_PRIORITY_MASK 0x0000000f
+
+ /*
+ * Who Am I Register
+ */
+
+#define OPENPIC_WHO_AM_I_ID_MASK 0x0000001f
+
+ /*
+ * Feature Reporting Register 0
+ */
+
+#define OPENPIC_FEATURE_LAST_SOURCE_MASK 0x07ff0000
+#define OPENPIC_FEATURE_LAST_SOURCE_SHIFT 16
+#define OPENPIC_FEATURE_LAST_PROCESSOR_MASK 0x00001f00
+#define OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT 8
+#define OPENPIC_FEATURE_VERSION_MASK 0x000000ff
+
+ /*
+ * Global Configuration Register 0
+ */
+
+#define OPENPIC_CONFIG_RESET 0x80000000
+#define OPENPIC_CONFIG_8259_PASSTHROUGH_DISABLE 0x20000000
+#define OPENPIC_CONFIG_BASE_MASK 0x000fffff
+
+ /*
+ * Vendor Identification Register
+ */
+
+#define OPENPIC_VENDOR_ID_STEPPING_MASK 0x00ff0000
+#define OPENPIC_VENDOR_ID_STEPPING_SHIFT 16
+#define OPENPIC_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
+#define OPENPIC_VENDOR_ID_DEVICE_ID_SHIFT 8
+#define OPENPIC_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
+
+ /*
+ * Vector/Priority Registers
+ */
+
+#define OPENPIC_MASK 0x80000000
+#define OPENPIC_ACTIVITY 0x40000000 /* Read Only */
+#define OPENPIC_PRIORITY_MASK 0x000f0000
+#define OPENPIC_PRIORITY_SHIFT 16
+#define OPENPIC_VECTOR_MASK 0x000000ff
+
+
+ /*
+ * Interrupt Source Registers
+ */
+
+#define OPENPIC_POLARITY_POSITIVE 0x00800000
+#define OPENPIC_POLARITY_NEGATIVE 0x00000000
+#define OPENPIC_POLARITY_MASK 0x00800000
+#define OPENPIC_SENSE_LEVEL 0x00400000
+#define OPENPIC_SENSE_EDGE 0x00000000
+#define OPENPIC_SENSE_MASK 0x00400000
+
+
+ /*
+ * Timer Registers
+ */
+
+#define OPENPIC_COUNT_MASK 0x7fffffff
+#define OPENPIC_TIMER_TOGGLE 0x80000000
+#define OPENPIC_TIMER_COUNT_INHIBIT 0x80000000
+
+
+ /*
+ * Aliases to make life simpler
+ */
+
+/* Per Processor Registers */
+#define IPI_Dispatch(i) _IPI_Dispatch[i].Reg
+#define Current_Task_Priority _Current_Task_Priority.Reg
+#define Interrupt_Acknowledge _Interrupt_Acknowledge.Reg
+#define EOI _EOI.Reg
+
+/* Global Registers */
+#define Feature_Reporting0 _Feature_Reporting0.Reg
+#define Feature_Reporting1 _Feature_Reporting1.Reg
+#define Global_Configuration0 _Global_Configuration0.Reg
+#define Global_Configuration1 _Global_Configuration1.Reg
+#define Vendor_Specific(i) _Vendor_Specific[i].Reg
+#define Vendor_Identification _Vendor_Identification.Reg
+#define Processor_Initialization _Processor_Initialization.Reg
+#define IPI_Vector_Priority(i) _IPI_Vector_Priority[i].Reg
+#define Spurious_Vector _Spurious_Vector.Reg
+#define Timer_Frequency _Timer_Frequency.Reg
+
+/* Timer Registers */
+#define Current_Count _Current_Count.Reg
+#define Base_Count _Base_Count.Reg
+#define Vector_Priority _Vector_Priority.Reg
+#define Destination _Destination.Reg
+
+/* Interrupt Source Registers */
+#define Vector_Priority _Vector_Priority.Reg
+#define Destination _Destination.Reg
+
+ /*
+ * Local (static) OpenPIC Operations
+ */
+
+
+/* Global Operations */
+static void openpic_reset(void);
+static void openpic_enable_8259_pass_through(void);
+static void openpic_disable_8259_pass_through(void);
+static u_int openpic_irq(void);
+static void openpic_eoi(void);
+static u_int openpic_get_priority(void);
+static void openpic_set_priority(u_int pri);
+static u_int openpic_get_spurious(void);
+static void openpic_set_spurious(u_int vector);
+
+#ifdef CONFIG_SMP
+/* Interprocessor Interrupts */
+static void openpic_initipi(u_int ipi, u_int pri, u_int vector);
+static void openpic_ipi_action(int cpl, void *dev_id, struct pt_regs *regs);
+#endif
+
+/* Timer Interrupts */
+static void openpic_inittimer(u_int timer, u_int pri, u_int vector);
+static void openpic_maptimer(u_int timer, u_int cpumask);
+
+/* Interrupt Sources */
+static void openpic_enable_irq(u_int irq);
+static void openpic_disable_irq(u_int irq);
+static void openpic_initirq(u_int irq, u_int pri, u_int vector, int polarity,
+ int is_level);
+static void openpic_mapirq(u_int irq, u_int cpumask);
+static void openpic_set_sense(u_int irq, int sense);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_OPENPIC_H */
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 6d6f775f2..1e503fae4 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -3,15 +3,16 @@
* Common pmac/prep/chrp pci routines. -- Cort
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/openpic.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/errno.h>
+#include <linux/bootmem.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -25,7 +26,7 @@
#include "pci.h"
-#undef DEBUG
+#define DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
@@ -37,54 +38,44 @@ unsigned long isa_io_base = 0;
unsigned long isa_mem_base = 0;
unsigned long pci_dram_offset = 0;
-struct pci_fixup pcibios_fixups[] = {
- { 0 }
-};
+static u8* pci_to_OF_bus_map;
-int generic_pcibios_read_byte(struct pci_dev *dev, int where, u8 *val)
-{
- return ppc_md.pcibios_read_config_byte(dev->bus->number,dev->devfn,where,val);
-}
-int generic_pcibios_read_word(struct pci_dev *dev, int where, u16 *val)
-{
- return ppc_md.pcibios_read_config_word(dev->bus->number,dev->devfn,where,val);
-}
-int generic_pcibios_read_dword(struct pci_dev *dev, int where, u32 *val)
-{
- return ppc_md.pcibios_read_config_dword(dev->bus->number,dev->devfn,where,val);
-}
-int generic_pcibios_write_byte(struct pci_dev *dev, int where, u8 val)
-{
- return ppc_md.pcibios_write_config_byte(dev->bus->number,dev->devfn,where,val);
-}
-int generic_pcibios_write_word(struct pci_dev *dev, int where, u16 val)
-{
- return ppc_md.pcibios_write_config_word(dev->bus->number,dev->devfn,where,val);
-}
-int generic_pcibios_write_dword(struct pci_dev *dev, int where, u32 val)
-{
- return ppc_md.pcibios_write_config_dword(dev->bus->number,dev->devfn,where,val);
-}
+static void pcibios_fixup_resources(struct pci_dev* dev);
+#ifdef CONFIG_ALL_PPC
+static void pcibios_fixup_cardbus(struct pci_dev* dev);
+#endif
-struct pci_ops generic_pci_ops =
-{
- generic_pcibios_read_byte,
- generic_pcibios_read_word,
- generic_pcibios_read_dword,
- generic_pcibios_write_byte,
- generic_pcibios_write_word,
- generic_pcibios_write_dword
-};
+/* By default, we don't re-assign bus numbers. We do this only on
+ * some pmacs
+ */
+int pci_assign_all_busses;
+
+struct pci_controller* hose_head;
+struct pci_controller** hose_tail = &hose_head;
+static int pci_bus_count;
+struct pci_fixup pcibios_fixups[] = {
+ { PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources },
+#ifdef CONFIG_ALL_PPC
+ /* We should add per-machine fixup support in xxx_setup.c or xxx_pci.c */
+ { PCI_FIXUP_FINAL, PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1211, pcibios_fixup_cardbus },
+#endif /* CONFIG_ALL_PPC */
+ { 0 }
+};
-void pcibios_update_resource(struct pci_dev *dev, struct resource *root,
+void
+pcibios_update_resource(struct pci_dev *dev, struct resource *root,
struct resource *res, int resource)
{
u32 new, check;
int reg;
-
- new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
+ struct pci_controller* hose = dev->sysdata;
+
+ new = res->start;
+ if (hose && res->flags & IORESOURCE_MEM)
+ new -= hose->pci_mem_offset;
+ new |= (res->flags & PCI_REGION_FLAG_MASK);
if (resource < 6) {
reg = PCI_BASE_ADDRESS_0 + 4*resource;
} else if (resource == PCI_ROM_RESOURCE) {
@@ -104,6 +95,62 @@ void pcibios_update_resource(struct pci_dev *dev, struct resource *root,
}
}
+static void
+pcibios_fixup_resources(struct pci_dev* dev)
+{
+ struct pci_controller* hose =
+ (struct pci_controller *)dev->sysdata;
+ int i;
+ if (!hose) {
+ printk("No hose for PCI dev %x.%x !\n", dev->bus->number, dev->devfn >> 3);
+ return;
+ }
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ struct resource *res = dev->resource + i;
+ if (!res->start)
+ continue;
+ if (res->flags & IORESOURCE_MEM) {
+ res->start += hose->pci_mem_offset;
+ res->end += hose->pci_mem_offset;
+#ifdef DEBUG
+ printk("Fixup mem res, dev: %x.%x, res_start: %lx->%lx\n",
+ dev->bus->number, dev->devfn>>3, res->start-hose->pci_mem_offset,
+ res->start);
+#endif
+ }
+
+ if ((res->flags & IORESOURCE_IO)
+ && (unsigned long) hose->io_base_virt != isa_io_base) {
+ unsigned long offs = (unsigned long) hose->io_base_virt - isa_io_base;
+ res->start += offs;
+ res->end += offs;
+ printk("Fixup IO res, dev: %x.%x, res_start: %lx->%lx\n",
+ dev->bus->number, dev->devfn>>3,
+ res->start - offs, res->start);
+ }
+ }
+}
+
+#ifdef CONFIG_ALL_PPC
+static void
+pcibios_fixup_cardbus(struct pci_dev* dev)
+{
+ /*
+ * Fix the interrupt routing on the TI1211 chip on the 1999
+ * G3 powerbook, which doesn't get initialized properly by OF.
+ */
+ if (dev->vendor == PCI_VENDOR_ID_TI
+ && dev->device == PCI_DEVICE_ID_TI_1211) {
+ u32 val;
+ /* 0x8c == TI122X_IRQMUX, 2 says to route the INTA
+ signal out the MFUNC0 pin */
+ if (pci_read_config_dword(dev, 0x8c, &val) == 0
+ && val == 0)
+ pci_write_config_dword(dev, 0x8c, 2);
+ }
+}
+#endif /* CONFIG_ALL_PPC */
+
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
@@ -172,7 +219,8 @@ pcibios_align_resource(void *data, struct resource *res, unsigned long size)
* as well.
*/
-static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
+static void __init
+pcibios_allocate_bus_resources(struct list_head *bus_list)
{
struct list_head *ln;
struct pci_bus *bus;
@@ -197,7 +245,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
}
}
-static void __init pcibios_allocate_resources(int pass)
+static void __init
+pcibios_allocate_resources(int pass)
{
struct pci_dev *dev;
int idx, disabled;
@@ -250,7 +299,8 @@ static void __init pcibios_allocate_resources(int pass)
}
}
-static void __init pcibios_assign_resources(void)
+static void __init
+pcibios_assign_resources(void)
{
struct pci_dev *dev;
int idx;
@@ -278,7 +328,9 @@ static void __init pcibios_assign_resources(void)
* the BIOS forgot to do so or because we have decided the old
* address was unusable for some reason.
*/
- if (!r->start && r->end)
+ if (!r->start && r->end &&
+ (!ppc_md.pcibios_enable_device_hook ||
+ !ppc_md.pcibios_enable_device_hook(dev, 1)))
pci_assign_resource(dev, idx);
}
@@ -293,7 +345,8 @@ static void __init pcibios_assign_resources(void)
}
-int pcibios_enable_resources(struct pci_dev *dev)
+int
+pcibios_enable_resources(struct pci_dev *dev)
{
u16 cmd, old_cmd;
int idx;
@@ -321,18 +374,267 @@ int pcibios_enable_resources(struct pci_dev *dev)
return 0;
}
+struct pci_controller * __init
+pcibios_alloc_controller(void)
+{
+ struct pci_controller *hose;
+
+ hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
+ memset(hose, 0, sizeof(struct pci_controller));
+
+ *hose_tail = hose;
+ hose_tail = &hose->next;
+
+ return hose;
+}
+static void
+make_one_node_map(struct device_node* node, u8 pci_bus)
+{
+ int *bus_range;
+ int len;
+
+ if (pci_bus >= pci_bus_count)
+ return;
+ bus_range = (int *) get_property(node, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int)) {
+ printk(KERN_WARNING "Can't get bus-range for %s\n",
+ node->full_name);
+ return;
+ }
+ pci_to_OF_bus_map[pci_bus] = bus_range[0];
+
+ for (node=node->child; node != 0;node = node->sibling) {
+ struct pci_dev* dev;
+ unsigned int *class_code, *reg;
+
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
+ if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
+ (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
+ continue;
+ reg = (unsigned int *)get_property(node, "reg", 0);
+ if (!reg)
+ continue;
+ dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
+ if (!dev || !dev->subordinate)
+ continue;
+ make_one_node_map(node, dev->subordinate->number);
+ }
+}
+
+void
+pcibios_make_OF_bus_map(void)
+{
+ int i;
+ struct pci_controller* hose;
+ u8* of_prop_map;
+
+ pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL);
+ if (!pci_to_OF_bus_map) {
+ printk(KERN_ERR "Can't allocate OF bus map !\n");
+ return;
+ }
+
+ /* We fill the bus map with invalid values, that helps
+ * debugging.
+ */
+ for (i=0; i<pci_bus_count; i++)
+ pci_to_OF_bus_map[i] = 0xff;
+
+ /* For each hose, we begin searching bridges */
+ for(hose=hose_head; hose; hose=hose->next) {
+ struct device_node* node;
+ node = (struct device_node *)hose->arch_data;
+ if (!node)
+ continue;
+ make_one_node_map(node, hose->first_busno);
+ }
+ of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", 0);
+ if (of_prop_map)
+ memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count);
+#ifdef DEBUG
+ printk("PCI->OF bus map:\n");
+ for (i=0; i<pci_bus_count; i++) {
+ if (pci_to_OF_bus_map[i] == 0xff)
+ continue;
+ printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
+ }
+#endif
+}
-void __init pcibios_init(void)
+static struct device_node*
+scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
{
+ struct device_node* sub_node;
+
+ for (; node != 0;node = node->sibling) {
+ unsigned int *class_code, *reg;
+
+ reg = (unsigned int *) get_property(node, "reg", 0);
+ if (reg && ((reg[0] >> 8) & 0xff) == dev_fn
+ && ((reg[0] >> 16) & 0xff) == bus)
+ return node;
+
+ /* For PCI<->PCI bridges or CardBus bridges, we go down */
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
+ if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
+ (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
+ continue;
+ sub_node = scan_OF_childs_for_device(node->child, bus, dev_fn);
+ if (sub_node)
+ return sub_node;
+ }
+ return NULL;
+}
+
+/*
+ * Scans the OF tree for a device node matching a PCI device
+ */
+struct device_node*
+pci_device_to_OF_node(struct pci_dev *dev)
+{
+ struct pci_controller *hose;
+ struct device_node *node;
+ int bus;
+
+ if (!have_of)
+ return NULL;
+
+ /* Lookup the hose */
+ bus = dev->bus->number;
+ hose = pci_bus_to_hose(bus);
+ if (!hose)
+ return NULL;
+
+ /* Check it has an OF node associated */
+ node = (struct device_node *) hose->arch_data;
+ if (!node)
+ return NULL;
+
+ /* Fixup bus number according to what OF think it is. */
+ if (pci_to_OF_bus_map)
+ bus = pci_to_OF_bus_map[bus];
+ if (bus == 0xff)
+ return NULL;
+
+ /* Now, lookup childs of the hose */
+ return scan_OF_childs_for_device(node->child, bus, dev->devfn);
+}
+
+/* This routine is meant to be used early during boot, when the
+ * PCI bus numbers have not yet been assigned, and you need to
+ * issue PCI config cycles to an OF device.
+ * It could also be used to "fix" RTAS config cycles if you want
+ * to set pci_assign_all_busses to 1 and still use RTAS for PCI
+ * config cycles.
+ */
+struct pci_controller*
+pci_find_hose_for_OF_device(struct device_node* node)
+{
+ if (!have_of)
+ return NULL;
+ while(node) {
+ struct pci_controller* hose;
+ for (hose=hose_head;hose;hose=hose->next)
+ if (hose->arch_data == node)
+ return hose;
+ node=node->parent;
+ }
+ return NULL;
+}
+
+/*
+ * Returns the PCI device matching a given OF node
+ */
+int
+pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
+{
+ unsigned int *reg;
+ int i;
+
+ if (!have_of)
+ return -ENODEV;
+ reg = (unsigned int *) get_property(node, "reg", 0);
+ if (!reg)
+ return -ENODEV;
+ *bus = (reg[0] >> 16) & 0xff;
+ for (i=0; pci_to_OF_bus_map && i<pci_bus_count; i++)
+ if (pci_to_OF_bus_map[i] == *bus) {
+ *bus = i;
+ break;
+ }
+ *devfn = ((reg[0] >> 8) & 0xff);
+ return 0;
+}
+
+void __init
+pcibios_init(void)
+{
+ struct pci_controller *hose;
+ struct pci_bus *bus;
+ int next_busno;
+
printk("PCI: Probing PCI hardware\n");
- pci_scan_bus(0, &generic_pci_ops, NULL);
+
+ /* Scan all of the recorded PCI controllers. */
+ for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
+ if (pci_assign_all_busses)
+ hose->first_busno = next_busno;
+ hose->last_busno = 0xff;
+ bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
+ hose->bus = bus;
+ hose->last_busno = bus->subordinate;
+ if (pci_assign_all_busses || next_busno <= hose->last_busno)
+ next_busno = hose->last_busno+1;
+ }
+ pci_bus_count = next_busno;
+
+ /* OpenFirmware based machines need a map of OF bus
+ * numbers vs. kernel bus numbers since we may have to
+ * remap them.
+ */
+ if (pci_assign_all_busses && have_of)
+ pcibios_make_OF_bus_map();
+
+ /* Call machine dependant fixup */
if (ppc_md.pcibios_fixup)
ppc_md.pcibios_fixup();
+
+ /* Allocate and assign resources */
pcibios_allocate_bus_resources(&pci_root_buses);
pcibios_allocate_resources(0);
pcibios_allocate_resources(1);
pcibios_assign_resources();
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /* OF fails to initialize IDE controllers on macs
+ * (and maybe other machines)
+ *
+ * This late fixup is done here since I want it to happen after
+ * resource assignement, and there's no "late-init" arch hook
+ *
+ * Ideally, this should be moved to the IDE layer, but we need
+ * to check specifically with Andre Hedrick how to do it cleanly
+ * since the common IDE code seem to care about the fact that the
+ * BIOS may have disabled a controller.
+ *
+ * -- BenH
+ */
+ if (_machine == _MACH_Pmac) {
+ struct pci_dev *dev;
+ pci_for_each_dev(dev)
+ {
+ if ((dev->class >> 16) == PCI_BASE_CLASS_STORAGE)
+ pci_enable_device(dev);
+ }
+ }
+#endif /* CONFIG_BLK_DEV_IDE */
+}
+
+int __init
+pcibios_assign_all_busses(void)
+{
+ return pci_assign_all_busses;
}
void __init
@@ -344,9 +646,16 @@ pcibios_fixup_pbus_ranges(struct pci_bus * bus, struct pbus_set_ranges_data * ra
ranges->mem_end -= bus->resource[1]->start;
}
+unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
+ unsigned long start, unsigned long size)
+{
+ return start;
+}
void __init pcibios_fixup_bus(struct pci_bus *bus)
{
+ pci_read_bridge_bases(bus);
+
if ( ppc_md.pcibios_fixup_bus )
ppc_md.pcibios_fixup_bus(bus);
}
@@ -370,6 +679,10 @@ int pcibios_enable_device(struct pci_dev *dev)
int idx;
struct resource *r;
+ if (ppc_md.pcibios_enable_device_hook)
+ if (ppc_md.pcibios_enable_device_hook(dev, 0))
+ return -EINVAL;
+
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (idx=0; idx<6; idx++) {
@@ -391,37 +704,99 @@ int pcibios_enable_device(struct pci_dev *dev)
return 0;
}
-void *
-pci_dev_io_base(unsigned char bus, unsigned char devfn, int physical)
+struct pci_controller*
+pci_bus_to_hose(int bus)
{
- if (!ppc_md.pci_dev_io_base) {
- /* Please, someone fix this for non-pmac machines, we
- * need either the virtual or physical PCI IO base
- */
- return 0;
- }
- return ppc_md.pci_dev_io_base(bus, devfn, physical);
+ struct pci_controller* hose = hose_head;
+
+ for (; hose; hose = hose->next)
+ if (bus >= hose->first_busno && bus <= hose->last_busno)
+ return hose;
+ return NULL;
}
-void *
-pci_dev_mem_base(unsigned char bus, unsigned char devfn)
+void*
+pci_bus_io_base(unsigned int bus)
{
- /* Default memory base is 0 (1:1 mapping) */
- if (!ppc_md.pci_dev_mem_base) {
- /* Please, someone fix this for non-pmac machines.*/
+ struct pci_controller *hose;
+
+ hose = pci_bus_to_hose(bus);
+ if (!hose)
+ return NULL;
+ return hose->io_base_virt;
+}
+
+unsigned long
+pci_bus_io_base_phys(unsigned int bus)
+{
+ struct pci_controller *hose;
+
+ hose = pci_bus_to_hose(bus);
+ if (!hose)
return 0;
- }
- return ppc_md.pci_dev_mem_base(bus, devfn);
+ return hose->io_base_phys;
}
-/* Returns the root-bridge number (Uni-N number) of a device */
-int
-pci_dev_root_bridge(unsigned char bus, unsigned char devfn)
+unsigned long
+pci_bus_mem_base_phys(unsigned int bus)
{
- /* Defaults to 0 */
- if (!ppc_md.pci_dev_root_bridge)
+ struct pci_controller *hose;
+
+ hose = pci_bus_to_hose(bus);
+ if (!hose)
return 0;
- return ppc_md.pci_dev_root_bridge(bus, devfn);
+ return hose->pci_mem_offset;
+}
+
+#ifdef CONFIG_POWER4
+extern unsigned long pci_address_offset(int, unsigned int);
+#endif /* CONFIG_POWER4 */
+
+unsigned long
+pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
+{
+ /* Hack alert again ! See comments in chrp_pci.c
+ */
+#ifdef CONFIG_POWER4
+ unsigned long offset = pci_address_offset(pdev->bus->number, res->flags);
+ return res->start - offset;
+#else /* CONFIG_POWER4 */
+ struct pci_controller* hose =
+ (struct pci_controller *)pdev->sysdata;
+ if (hose && res->flags & IORESOURCE_MEM)
+ return res->start - hose->pci_mem_offset;
+ /* We may want to do something with IOs here... */
+ return res->start;
+#endif
+}
+
+/* Obsolete functions. Should be removed once the symbios driver
+ * is fixed
+ */
+unsigned long
+pci_phys_to_bus(unsigned long pa, int busnr)
+{
+#ifdef CONFIG_POWER4
+ return pa - pci_address_offset(busnr, IORESOURCE_MEM);
+#else /* CONFIG_POWER4 */
+ struct pci_controller* hose = pci_bus_to_hose(busnr);
+ if (!hose)
+ return pa;
+ return pa - hose->pci_mem_offset;
+#endif
+}
+
+unsigned long
+pci_bus_to_phys(unsigned int ba, int busnr)
+{
+#ifdef CONFIG_POWER4
+ return ba + pci_address_offset(dev->bus->number, IORESOURCE_MEM);
+#else /* CONFIG_POWER4 */
+ struct pci_controller* hose = pci_bus_to_hose(busnr);
+ if (!hose)
+ return ba;
+ return ba + hose->pci_mem_offset;
+#endif
}
/* Provide information on locations of various I/O regions in physical
@@ -430,23 +805,93 @@ pci_dev_root_bridge(unsigned char bus, unsigned char devfn)
* Note that the returned IO or memory base is a physical address
*/
-asmlinkage long
+long
sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
{
+ struct pci_controller* hose = pci_bus_to_hose(bus);
long result = -EOPNOTSUPP;
+
+ if (!hose)
+ return -ENODEV;
switch (which) {
case IOBASE_BRIDGE_NUMBER:
- return (long)pci_dev_root_bridge(bus, devfn);
+ return (long)hose->first_busno;
case IOBASE_MEMORY:
- return (long)pci_dev_mem_base(bus, devfn);
+ return (long)hose->pci_mem_offset;
case IOBASE_IO:
- result = (long)pci_dev_io_base(bus, devfn, 1);
- if (result == 0)
- result = -EOPNOTSUPP;
- break;
+ return (long)hose->io_base_phys;
+ case IOBASE_ISA_IO:
+ return (long)isa_io_base;
+ case IOBASE_ISA_MEM:
+ return (long)isa_mem_base;
}
return result;
}
+/*
+ * Null PCI config access functions, for the case when we can't
+ * find a hose.
+ */
+#define NULL_PCI_OP(rw, size, type) \
+static int \
+null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
+{ \
+ return PCIBIOS_DEVICE_NOT_FOUND; \
+}
+
+NULL_PCI_OP(read, byte, u8 *)
+NULL_PCI_OP(read, word, u16 *)
+NULL_PCI_OP(read, dword, u32 *)
+NULL_PCI_OP(write, byte, u8)
+NULL_PCI_OP(write, word, u16)
+NULL_PCI_OP(write, dword, u32)
+
+static struct pci_ops null_pci_ops =
+{
+ null_read_config_byte,
+ null_read_config_word,
+ null_read_config_dword,
+ null_write_config_byte,
+ null_write_config_word,
+ null_write_config_dword
+};
+
+/*
+ * These functions are used early on before PCI scanning is done
+ * and all of the pci_dev and pci_bus structures have been created.
+ */
+static struct pci_dev *
+fake_pci_dev(struct pci_controller *hose, int busnr, int devfn)
+{
+ static struct pci_dev dev;
+ static struct pci_bus bus;
+
+ if (hose == 0) {
+ hose = pci_bus_to_hose(busnr);
+ if (hose == 0)
+ printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
+ }
+ dev.bus = &bus;
+ dev.sysdata = hose;
+ dev.devfn = devfn;
+ bus.number = busnr;
+ bus.ops = hose? hose->ops: &null_pci_ops;
+ return &dev;
+}
+
+#define EARLY_PCI_OP(rw, size, type) \
+int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
+ int devfn, int offset, type value) \
+{ \
+ return pci_##rw##_config_##size(fake_pci_dev(hose, bus, devfn), \
+ offset, value); \
+}
+
+EARLY_PCI_OP(read, byte, u8 *)
+EARLY_PCI_OP(read, word, u16 *)
+EARLY_PCI_OP(read, dword, u32 *)
+EARLY_PCI_OP(write, byte, u8)
+EARLY_PCI_OP(write, word, u16)
+EARLY_PCI_OP(write, dword, u32)
diff --git a/arch/ppc/kernel/pci.h b/arch/ppc/kernel/pci.h
index d79eb0f4a..07a64d9f3 100644
--- a/arch/ppc/kernel/pci.h
+++ b/arch/ppc/kernel/pci.h
@@ -2,47 +2,23 @@
#ifndef __PPC_KERNEL_PCI_H__
#define __PPC_KERNEL_PCI_H__
+/* Configure those in your xxx_init() or xxx_setup_arch() function */
extern unsigned long isa_io_base;
extern unsigned long isa_mem_base;
extern unsigned long pci_dram_offset;
-extern unsigned int *pci_config_address;
-extern unsigned char *pci_config_data;
+/* Set this to 1 if you want the kernel to re-assign all PCI
+ * bus numbers
+ */
+extern int pci_assign_all_busses;
-void fix_intr(struct device_node *node, struct pci_dev *dev);
-#if 0
-#define decl_config_access_method(name) \
-struct pci_ops name##_pci_ops = { \
- name##_pcibios_read_config_byte, \
- name##_pcibios_read_config_word, \
- name##_pcibios_read_config_dword, \
- name##_pcibios_write_config_byte, \
- name##_pcibios_write_config_word, \
- name##_pcibios_write_config_dword \
-}
-#endif
+extern struct pci_controller* pcibios_alloc_controller(void);
+extern struct pci_controller* pci_find_hose_for_OF_device(
+ struct device_node* node);
-#define decl_config_access_method(name) \
-extern int name##_pcibios_read_config_byte(unsigned char bus, \
- unsigned char dev_fn, unsigned char offset, unsigned char *val); \
-extern int name##_pcibios_read_config_word(unsigned char bus, \
- unsigned char dev_fn, unsigned char offset, unsigned short *val); \
-extern int name##_pcibios_read_config_dword(unsigned char bus, \
- unsigned char dev_fn, unsigned char offset, unsigned int *val); \
-extern int name##_pcibios_write_config_byte(unsigned char bus, \
- unsigned char dev_fn, unsigned char offset, unsigned char val); \
-extern int name##_pcibios_write_config_word(unsigned char bus, \
- unsigned char dev_fn, unsigned char offset, unsigned short val); \
-extern int name##_pcibios_write_config_dword(unsigned char bus, \
- unsigned char dev_fn, unsigned char offset, unsigned int val)
-
-#define set_config_access_method(name) \
- ppc_md.pcibios_read_config_byte = name##_pcibios_read_config_byte; \
- ppc_md.pcibios_read_config_word = name##_pcibios_read_config_word; \
- ppc_md.pcibios_read_config_dword = name##_pcibios_read_config_dword; \
- ppc_md.pcibios_write_config_byte = name##_pcibios_write_config_byte; \
- ppc_md.pcibios_write_config_word = name##_pcibios_write_config_word; \
- ppc_md.pcibios_write_config_dword = name##_pcibios_write_config_dword
+extern void setup_indirect_pci(struct pci_controller* hose,
+ u32 cfg_addr, u32 cfg_data);
+extern void setup_grackle(struct pci_controller *hose, unsigned io_space_size);
#endif /* __PPC_KERNEL_PCI_H__ */
diff --git a/arch/ppc/kernel/pmac_pci.c b/arch/ppc/kernel/pmac_pci.c
index 8f7b3d7c2..9e1fffb49 100644
--- a/arch/ppc/kernel/pmac_pci.c
+++ b/arch/ppc/kernel/pmac_pci.c
@@ -27,23 +27,14 @@
#include "pci.h"
-struct bridge_data **bridges, *bridge_list;
-static int max_bus;
-
-struct uninorth_data {
- struct device_node* node;
- volatile unsigned int* cfg_addr;
- volatile unsigned int* cfg_data;
- void* iobase;
- unsigned long iobase_phys;
-};
-
-static struct uninorth_data uninorth_bridges[3];
-static int uninorth_count;
-static int uninorth_default = -1;
+#undef DEBUG
static void add_bridges(struct device_node *dev);
+/* XXX Could be per-controller, but I don't think we risk anything by
+ * assuming we won't have both UniNorth and Bandit */
+static int has_uninorth;
+
/*
* Magic constants for enabling cache coherency in the bandit/PSX bridge.
*/
@@ -56,477 +47,201 @@ static void add_bridges(struct device_node *dev);
#define BANDIT_MAGIC 0x50
#define BANDIT_COHERENT 0x40
-/* Obsolete, should be replaced by pmac_pci_dev_io_base() (below) */
-__pmac
-void *pci_io_base(unsigned int bus)
-{
- struct bridge_data *bp;
-
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return 0;
- return bp->io_base;
-}
-
-__pmac
-int pci_device_loc(struct device_node *dev, unsigned char *bus_ptr,
- unsigned char *devfn_ptr)
+static int __init
+fixup_one_level_bus_range(struct device_node *node, int higher)
{
- unsigned int *reg;
- int len;
+ for (; node != 0;node = node->sibling) {
+ int * bus_range;
+ unsigned int *class_code;
+ int len;
- reg = (unsigned int *) get_property(dev, "reg", &len);
- if (reg == 0 || len < 5 * sizeof(unsigned int)) {
- /* doesn't look like a PCI device */
- *bus_ptr = 0xff;
- *devfn_ptr = 0xff;
- return -1;
+ /* For PCI<->PCI bridges or CardBus bridges, we go down */
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
+ if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
+ (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
+ continue;
+ bus_range = (int *) get_property(node, "bus-range", &len);
+ if (bus_range != NULL && len > 2 * sizeof(int)) {
+ if (bus_range[1] > higher)
+ higher = bus_range[1];
+ }
+ higher = fixup_one_level_bus_range(node->child, higher);
}
- *bus_ptr = reg[0] >> 16;
- *devfn_ptr = reg[0] >> 8;
- return 0;
+ return higher;
}
-/* This routines figures out on which root bridge a given PCI device
- * is attached.
+/* This routine fixes the "bus-range" property of all bridges in the
+ * system since they tend to have their "last" member wrong on macs
+ *
+ * Note that the bus numbers manipulated here are OF bus numbers, they
+ * are not Linux bus numbers.
*/
-__pmac
-int
-pmac_pci_dev_root_bridge(unsigned char bus, unsigned char dev_fn)
+static void __init
+fixup_bus_range(struct device_node *bridge)
{
- struct device_node *node, *bridge_node;
- int bridge = uninorth_default;
-
- if (uninorth_count == 0)
- return 0;
- if (bus == 0 && PCI_SLOT(dev_fn) < 11)
- return 0;
+ int * bus_range;
+ int len;
- /* We look for the OF device corresponding to this bus/devfn pair. If we
- * don't find it, we default to the external PCI */
- bridge_node = NULL;
- node = find_pci_device_OFnode(bus, dev_fn & 0xf8);
- if (node) {
- /* note: we don't stop on the first occurence since we need to go
- * up to the root bridge */
- do {
- if (node->type && !strcmp(node->type, "pci")
- && device_is_compatible(node, "uni-north"))
- bridge_node = node;
- node=node->parent;
- } while (node);
- }
- if (bridge_node) {
- int i;
- for (i=0;i<uninorth_count;i++)
- if (uninorth_bridges[i].node == bridge_node) {
- bridge = i;
- break;
- }
- }
-
- if (bridge == -1) {
- printk(KERN_WARNING "pmac_pci: no default bridge !\n");
- return 0;
- }
-
- return bridge;
-}
-
-__pmac
-void *
-pmac_pci_dev_io_base(unsigned char bus, unsigned char devfn, int physical)
-{
- int bridge = -1;
- if (uninorth_count != 0)
- bridge = pmac_pci_dev_root_bridge(bus, devfn);
- if (bridge == -1) {
- struct bridge_data *bp;
-
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return 0;
- return physical ? (void *) bp->io_base_phys : bp->io_base;
+ /* Lookup the "bus-range" property for the hose */
+ bus_range = (int *) get_property(bridge, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int)) {
+ printk(KERN_WARNING "Can't get bus-range for %s\n",
+ bridge->full_name);
+ return;
}
- return physical ? (void *) uninorth_bridges[bridge].iobase_phys
- : uninorth_bridges[bridge].iobase;
-}
-
-__pmac
-void *
-pmac_pci_dev_mem_base(unsigned char bus, unsigned char devfn)
-{
- return 0;
+ bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
}
-/* This function only works for bus 0, uni-N uses a different mecanism for
- * other busses (see below)
+/*
+ * Apple MacRISC (UniNorth, Bandit) PCI controllers.
+ *
+ * The "Bandit" version is present in all early PCI PowerMacs,
+ * and up to the first ones using Grackle. Some machines may
+ * have 2 bandit controllers (2 PCI busses).
+ *
+ * The "UniNorth" version is present in all Core99 machines
+ * (iBook, G4, new IMacs, and all the recent Apple machines).
+ * It contains 3 controllers in one ASIC.
*/
-#define UNI_N_CFA0(devfn, off) \
+
+#define MACRISC_CFA0(devfn, off) \
((1 << (unsigned long)PCI_SLOT(dev_fn)) \
| (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
| (((unsigned long)(off)) & 0xFCUL))
-/* This one is for type 1 config accesses */
-#define UNI_N_CFA1(bus, devfn, off) \
+#define MACRISC_CFA1(bus, devfn, off) \
((((unsigned long)(bus)) << 16) \
|(((unsigned long)(devfn)) << 8) \
|(((unsigned long)(off)) & 0xFCUL) \
|1UL)
-__pmac static
-unsigned int
-uni_north_access_data(unsigned char bus, unsigned char dev_fn,
- unsigned char offset)
+static unsigned int __pmac
+macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
{
- int bridge;
unsigned int caddr;
-
- bridge = pmac_pci_dev_root_bridge(bus, dev_fn);
- if (bus == 0)
- caddr = UNI_N_CFA0(dev_fn, offset);
- else
- caddr = UNI_N_CFA1(bus, dev_fn, offset);
-
- if (bridge == -1) {
- printk(KERN_WARNING "pmac_pci: no default bridge !\n");
- return 0;
- }
-
- /* Uninorth will return garbage if we don't read back the value ! */
- out_le32(uninorth_bridges[bridge].cfg_addr, caddr);
- (void)in_le32(uninorth_bridges[bridge].cfg_addr);
- /* Yes, offset is & 7, not & 3 ! */
- return (unsigned int)(uninorth_bridges[bridge].cfg_data) + (offset & 0x07);
-}
-
-__pmac
-int uni_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char *val)
-{
- unsigned int addr;
-
- *val = 0xff;
- addr = uni_north_access_data(bus, dev_fn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- *val = in_8((volatile unsigned char*)addr);
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int uni_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short *val)
-{
- unsigned int addr;
-
- *val = 0xffff;
- addr = uni_north_access_data(bus, dev_fn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- *val = in_le16((volatile unsigned short*)addr);
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int uni_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int *val)
-{
- unsigned int addr;
- *val = 0xffff;
- addr = uni_north_access_data(bus, dev_fn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- *val = in_le32((volatile unsigned int*)addr);
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int uni_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char val)
-{
- unsigned int addr;
-
- addr = uni_north_access_data(bus, dev_fn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_8((volatile unsigned char *)addr, val);
- (void)in_8((volatile unsigned char *)addr);
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int uni_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short val)
-{
- unsigned int addr;
-
- addr = uni_north_access_data(bus, dev_fn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le16((volatile unsigned short *)addr, val);
- (void)in_le16((volatile unsigned short *)addr);
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int uni_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int val)
-{
- unsigned int addr;
-
- addr = uni_north_access_data(bus, dev_fn, offset);
- if (!addr)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le32((volatile unsigned int *)addr, val);
- (void)in_le32((volatile unsigned int *)addr);
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int pmac_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char *val)
-{
- struct bridge_data *bp;
-
- *val = 0xff;
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (bus == bp->bus_number) {
- if (dev_fn < (11 << 3))
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le32(bp->cfg_addr,
- (1UL << (dev_fn >> 3)) + ((dev_fn & 7) << 8)
- + (offset & ~3));
- } else {
- /* Bus number once again taken into consideration.
- * Change applied from 2.1.24. This makes devices located
- * behind PCI-PCI bridges visible.
- * -Ranjit Deshpande, 01/20/99
- */
- out_le32(bp->cfg_addr, (bus << 16) + (dev_fn << 8) + (offset & ~3) + 1);
- }
- udelay(2);
- *val = in_8(bp->cfg_data + (offset & 3));
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int pmac_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short *val)
-{
- struct bridge_data *bp;
-
- *val = 0xffff;
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((offset & 1) != 0)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- if (bus == bp->bus_number) {
- if (dev_fn < (11 << 3))
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le32(bp->cfg_addr,
- (1UL << (dev_fn >> 3)) + ((dev_fn & 7) << 8)
- + (offset & ~3));
- } else {
- /* See pci_read_config_byte */
- out_le32(bp->cfg_addr, (bus << 16) + (dev_fn << 8) + (offset & ~3) + 1);
- }
- udelay(2);
- *val = in_le16((volatile unsigned short *)(bp->cfg_data + (offset & 3)));
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int pmac_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int *val)
-{
- struct bridge_data *bp;
-
- *val = 0xffffffff;
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((offset & 3) != 0)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- if (bus == bp->bus_number) {
- if (dev_fn < (11 << 3))
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le32(bp->cfg_addr,
- (1UL << (dev_fn >> 3)) + ((dev_fn & 7) << 8)
- + offset);
- } else {
- /* See pci_read_config_byte */
- out_le32(bp->cfg_addr, (bus << 16) + (dev_fn << 8) + offset + 1);
- }
- udelay(2);
- *val = in_le32((volatile unsigned int *)bp->cfg_data);
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int pmac_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char val)
-{
- struct bridge_data *bp;
-
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (bus == bp->bus_number) {
+#ifdef DEBUG
+// printk("macrisc_config_access(hose: 0x%08lx, bus: 0x%x, devfb: 0x%x, offset: 0x%x)\n",
+// hose, bus, dev_fn, offset);
+#endif
+ if (bus == hose->first_busno) {
if (dev_fn < (11 << 3))
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le32(bp->cfg_addr,
- (1UL << (dev_fn >> 3)) + ((dev_fn & 7) << 8)
- + (offset & ~3));
- } else {
- /* See pci_read_config_byte */
- out_le32(bp->cfg_addr, (bus << 16) + (dev_fn << 8) + (offset & ~3) + 1);
- }
- udelay(2);
- out_8(bp->cfg_data + (offset & 3), val);
- return PCIBIOS_SUCCESSFUL;
-}
-
-__pmac
-int pmac_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short val)
-{
- struct bridge_data *bp;
+ return 0;
+ caddr = MACRISC_CFA0(dev_fn, offset);
+ } else
+ caddr = MACRISC_CFA1(bus, dev_fn, offset);
+
+ /* Uninorth will return garbage if we don't read back the value ! */
+ do {
+ out_le32(hose->cfg_addr, caddr);
+ } while(in_le32(hose->cfg_addr) != caddr);
+
+ offset &= has_uninorth ? 0x07 : 0x03;
+ return (unsigned int)(hose->cfg_data) + (unsigned int)offset;
+}
+
+#define cfg_read(val, addr, type, op, op2) \
+ *val = op((type)(addr))
+#define cfg_write(val, addr, type, op, op2) \
+ op((type *)(addr), (val)); (void) op2((type *)(addr))
+
+#define cfg_read_bad(val, size) *val = bad_##size;
+#define cfg_write_bad(val, size)
+
+#define bad_byte 0xff
+#define bad_word 0xffff
+#define bad_dword 0xffffffffU
+
+#define MACRISC_PCI_OP(rw, size, type, op, op2) \
+static int __pmac \
+macrisc_##rw##_config_##size(struct pci_dev *dev, int off, type val) \
+{ \
+ struct pci_controller *hose = dev->sysdata; \
+ unsigned int addr; \
+ \
+ addr = macrisc_cfg_access(hose, dev->bus->number, dev->devfn, off); \
+ if (!addr) { \
+ cfg_##rw##_bad(val, size) \
+ return PCIBIOS_DEVICE_NOT_FOUND; \
+ } \
+ cfg_##rw(val, addr, type, op, op2); \
+ return PCIBIOS_SUCCESSFUL; \
+}
+
+MACRISC_PCI_OP(read, byte, u8 *, in_8, x)
+MACRISC_PCI_OP(read, word, u16 *, in_le16, x)
+MACRISC_PCI_OP(read, dword, u32 *, in_le32, x)
+MACRISC_PCI_OP(write, byte, u8, out_8, in_8)
+MACRISC_PCI_OP(write, word, u16, out_le16, in_le16)
+MACRISC_PCI_OP(write, dword, u32, out_le32, in_le32)
+
+static struct pci_ops macrisc_pci_ops =
+{
+ macrisc_read_config_byte,
+ macrisc_read_config_word,
+ macrisc_read_config_dword,
+ macrisc_write_config_byte,
+ macrisc_write_config_word,
+ macrisc_write_config_dword
+};
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((offset & 1) != 0)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- if (bus == bp->bus_number) {
- if (dev_fn < (11 << 3))
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le32(bp->cfg_addr,
- (1UL << (dev_fn >> 3)) + ((dev_fn & 7) << 8)
- + (offset & ~3));
- } else {
- /* See pci_read_config_byte */
- out_le32(bp->cfg_addr, (bus << 16) + (dev_fn << 8) + (offset & ~3) + 1);
- }
- udelay(2);
- out_le16((volatile unsigned short *)(bp->cfg_data + (offset & 3)), val);
- return PCIBIOS_SUCCESSFUL;
-}
-__pmac
-int pmac_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int val)
+/*
+ * Apple "Chaos" PCI controller.
+ *
+ * This controller is present on some first generation "PowerSurge"
+ * machines (8500, 8600, ...). It's a very weird beast and will die
+ * in flames if we try to probe the config space.
+ * The long-term solution is to provide a config space "emulation"
+ * based on what we find in OF device tree
+ */
+
+static int chaos_config_read_byte(struct pci_dev *dev, int offset, u8 *val)
{
- struct bridge_data *bp;
-
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((offset & 3) != 0)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- if (bus == bp->bus_number) {
- if (dev_fn < (11 << 3))
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_le32(bp->cfg_addr,
- (1UL << (dev_fn >> 3)) + ((dev_fn & 7) << 8)
- + offset);
- } else {
- /* See pci_read_config_byte */
- out_le32(bp->cfg_addr, (bus << 16) + (dev_fn << 8) + (offset & ~3) + 1);
- }
- udelay(2);
- out_le32((volatile unsigned int *)bp->cfg_data, val);
- return PCIBIOS_SUCCESSFUL;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
-#define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \
- | (((o) & ~3) << 24))
-
-int grackle_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char *val)
+static int chaos_config_read_word(struct pci_dev *dev, int offset, u16 *val)
{
- struct bridge_data *bp;
-
- *val = 0xff;
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_be32(bp->cfg_addr, GRACKLE_CFA(bus, dev_fn, offset));
- *val = in_8(bp->cfg_data + (offset & 3));
- return PCIBIOS_SUCCESSFUL;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
-int grackle_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short *val)
+static int chaos_config_read_dword(struct pci_dev *dev, int offset, u32 *val)
{
- struct bridge_data *bp;
-
- *val = 0xffff;
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((offset & 1) != 0)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- out_be32(bp->cfg_addr, GRACKLE_CFA(bus, dev_fn, offset));
- *val = in_le16((volatile unsigned short *)(bp->cfg_data + (offset&3)));
- return PCIBIOS_SUCCESSFUL;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
-int grackle_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int *val)
+static int chaos_config_write_byte(struct pci_dev *dev, int offset, u8 val)
{
- struct bridge_data *bp;
-
- *val = 0xffffffff;
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((offset & 3) != 0)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- out_be32(bp->cfg_addr, GRACKLE_CFA(bus, dev_fn, offset));
- *val = in_le32((volatile unsigned int *)bp->cfg_data);
- return PCIBIOS_SUCCESSFUL;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
-int grackle_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned char val)
+static int chaos_config_write_word(struct pci_dev *dev, int offset, u16 val)
{
- struct bridge_data *bp;
-
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- out_be32(bp->cfg_addr, GRACKLE_CFA(bus, dev_fn, offset));
- out_8(bp->cfg_data + (offset & 3), val);
- return PCIBIOS_SUCCESSFUL;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
-int grackle_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short val)
+static int chaos_config_write_dword(struct pci_dev *dev, int offset, u32 val)
{
- struct bridge_data *bp;
-
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((offset & 1) != 0)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- out_be32(bp->cfg_addr, GRACKLE_CFA(bus, dev_fn, offset));
- out_le16((volatile unsigned short *)(bp->cfg_data + (offset&3)), val);
- return PCIBIOS_SUCCESSFUL;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
-int grackle_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned int val)
+static struct pci_ops chaos_pci_ops =
{
- struct bridge_data *bp;
+ chaos_config_read_byte,
+ chaos_config_read_word,
+ chaos_config_read_dword,
+ chaos_config_write_byte,
+ chaos_config_write_word,
+ chaos_config_write_dword
+};
- if (bus > max_bus || (bp = bridges[bus]) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((offset & 1) != 0)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- out_be32(bp->cfg_addr, GRACKLE_CFA(bus, dev_fn, offset));
- out_le32((volatile unsigned int *)bp->cfg_data, val);
- return PCIBIOS_SUCCESSFUL;
-}
/*
* For a bandit bridge, turn on cache coherency if necessary.
- * N.B. we can't use pcibios_*_config_* here because bridges[]
- * is not initialized yet.
+ * N.B. we could clean this up using the hose ops directly.
*/
-static void __init init_bandit(struct bridge_data *bp)
+static void __init init_bandit(struct pci_controller *bp)
{
unsigned int vendev, magic;
int rev;
@@ -543,8 +258,8 @@ static void __init init_bandit(struct bridge_data *bp)
rev = in_8(bp->cfg_data);
if (rev != BANDIT_REVID)
printk(KERN_WARNING
- "Unknown revision %d for bandit at %p\n",
- rev, bp->io_base);
+ "Unknown revision %d for bandit at %08lx\n",
+ rev, bp->io_base_phys);
} else if (vendev != (BANDIT_DEVID_2 << 16) + APPLE_VENDID) {
printk(KERN_WARNING "bandit isn't? (%x)\n", vendev);
return;
@@ -555,8 +270,8 @@ static void __init init_bandit(struct bridge_data *bp)
udelay(2);
rev = in_8(bp->cfg_data);
if (rev != BANDIT_REVID)
- printk(KERN_WARNING "Unknown revision %d for bandit at %p\n",
- rev, bp->io_base);
+ printk(KERN_WARNING "Unknown revision %d for bandit at %08lx\n",
+ rev, bp->io_base_phys);
/* read the word at offset 0x50 */
out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC);
@@ -567,16 +282,71 @@ static void __init init_bandit(struct bridge_data *bp)
magic |= BANDIT_COHERENT;
udelay(2);
out_le32((volatile unsigned int *)bp->cfg_data, magic);
- printk(KERN_INFO "Cache coherency enabled for bandit/PSX at %p\n",
- bp->io_base);
+ printk(KERN_INFO "Cache coherency enabled for bandit/PSX at %08lx\n",
+ bp->io_base_phys);
+}
+
+
+/*
+ * Tweak the PCI-PCI bridge chip on the blue & white G3s.
+ */
+static void __init
+init_p2pbridge(void)
+{
+ struct device_node *p2pbridge;
+ struct pci_controller* hose;
+ u8 bus, devfn;
+ u16 val;
+
+ /* XXX it would be better here to identify the specific
+ PCI-PCI bridge chip we have. */
+ if ((p2pbridge = find_devices("pci-bridge")) == 0
+ || p2pbridge->parent == NULL
+ || strcmp(p2pbridge->parent->name, "pci") != 0)
+ return;
+ if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
+#ifdef DEBUG
+ printk("Can't find PCI infos for PCI<->PCI bridge\n");
+#endif
+ return;
+ }
+ /* Warning: At this point, we have not yet renumbered all busses.
+ * So we must use OF walking to find out hose
+ */
+ hose = pci_find_hose_for_OF_device(p2pbridge);
+ if (!hose) {
+#ifdef DEBUG
+ printk("Can't find hose for PCI<->PCI bridge\n");
+#endif
+ return;
+ }
+ if (early_read_config_word(hose, bus, devfn,
+ PCI_BRIDGE_CONTROL, &val) < 0) {
+ printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n");
+ return;
+ }
+ val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
+ early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
}
+void __init
+pmac_find_bridges(void)
+{
+ add_bridges(find_devices("bandit"));
+ add_bridges(find_devices("chaos"));
+ add_bridges(find_devices("pci"));
+ init_p2pbridge();
+}
+
+#define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \
+ | (((o) & ~3) << 24))
+
#define GRACKLE_PICR1_STG 0x00000040
#define GRACKLE_PICR1_LOOPSNOOP 0x00000010
/* N.B. this is called before bridges is initialized, so we can't
use grackle_pcibios_{read,write}_config_dword. */
-static inline void grackle_set_stg(struct bridge_data *bp, int enable)
+static inline void grackle_set_stg(struct pci_controller* bp, int enable)
{
unsigned int val;
@@ -586,9 +356,10 @@ static inline void grackle_set_stg(struct bridge_data *bp, int enable)
(val & ~GRACKLE_PICR1_STG);
out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
out_le32((volatile unsigned int *)bp->cfg_data, val);
+ (void)in_le32((volatile unsigned int *)bp->cfg_data);
}
-static inline void grackle_set_loop_snoop(struct bridge_data *bp, int enable)
+static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
{
unsigned int val;
@@ -598,25 +369,68 @@ static inline void grackle_set_loop_snoop(struct bridge_data *bp, int enable)
(val & ~GRACKLE_PICR1_LOOPSNOOP);
out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
out_le32((volatile unsigned int *)bp->cfg_data, val);
+ (void)in_le32((volatile unsigned int *)bp->cfg_data);
}
+static void __init
+setup_uninorth(struct pci_controller* hose, struct reg_property* addr)
+{
+ pci_assign_all_busses = 1;
+ has_uninorth = 1;
+ hose->ops = &macrisc_pci_ops;
+ hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+ hose->io_base_phys = addr->address;
+ /* is 0x10000 enough for io space ? */
+ hose->io_base_virt = (void *)ioremap(addr->address, 0x10000);
+
+ /* XXX This is the bridge with the PCI expansion bus. We route
+ * legacy IOs to it.
+ */
+ if (addr->address == 0xf2000000)
+ isa_io_base = (unsigned long)hose->io_base_virt;
+}
-void __init pmac_find_bridges(void)
+static void __init
+setup_bandit(struct pci_controller* hose, struct reg_property* addr)
{
- int bus;
- struct bridge_data *bridge;
+ hose->ops = &macrisc_pci_ops;
+ hose->cfg_addr = (volatile unsigned int *)
+ ioremap(addr->address + 0x800000, 0x1000);
+ hose->cfg_data = (volatile unsigned char *)
+ ioremap(addr->address + 0xc00000, 0x1000);
+ hose->io_base_phys = addr->address;
+ hose->io_base_virt = (void *) ioremap(addr->address, 0x10000);
+ init_bandit(hose);
+}
- bridge_list = 0;
- max_bus = 0;
- add_bridges(find_devices("bandit"));
- add_bridges(find_devices("chaos"));
- add_bridges(find_devices("pci"));
- bridges = (struct bridge_data **)
- alloc_bootmem((max_bus + 1) * sizeof(struct bridge_data *));
- memset(bridges, 0, (max_bus + 1) * sizeof(struct bridge_data *));
- for (bridge = bridge_list; bridge != NULL; bridge = bridge->next)
- for (bus = bridge->bus_number; bus <= bridge->max_bus; ++bus)
- bridges[bus] = bridge;
+static void __init
+setup_chaos(struct pci_controller* hose, struct reg_property* addr)
+{
+ /* assume a `chaos' bridge */
+ hose->ops = &chaos_pci_ops;
+ hose->cfg_addr = (volatile unsigned int *)
+ ioremap(addr->address + 0x800000, 0x1000);
+ hose->cfg_data = (volatile unsigned char *)
+ ioremap(addr->address + 0xc00000, 0x1000);
+ hose->io_base_phys = addr->address;
+ hose->io_base_virt = (void *) ioremap(addr->address, 0x10000);
+}
+
+void __init
+setup_grackle(struct pci_controller *hose, unsigned io_space_size)
+{
+ setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
+ hose->io_base_phys = 0xfe000000;
+ hose->io_base_virt = (void *) ioremap(0xfe000000, io_space_size);
+ pci_dram_offset = 0;
+ isa_mem_base = 0xfd000000;
+ isa_io_base = (unsigned long) hose->io_base_virt;
+ if (machine_is_compatible("AAPL,PowerBook1998"))
+ grackle_set_loop_snoop(hose, 1);
+#if 0 /* Disabled for now, HW problems ??? */
+ grackle_set_stg(hose, 1);
+#endif
}
/*
@@ -626,11 +440,12 @@ void __init pmac_find_bridges(void)
*/
static void __init add_bridges(struct device_node *dev)
{
- int *bus_range;
int len;
- struct bridge_data *bp;
+ struct pci_controller *hose;
struct reg_property *addr;
-
+ char* disp_name;
+ int *bus_range;
+
for (; dev != NULL; dev = dev->next) {
addr = (struct reg_property *) get_property(dev, "reg", &len);
if (addr == NULL || len < sizeof(*addr)) {
@@ -640,108 +455,52 @@ static void __init add_bridges(struct device_node *dev)
}
bus_range = (int *) get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s\n",
- dev->full_name);
- continue;
- }
- if (bus_range[1] == bus_range[0])
- printk(KERN_INFO "PCI bus %d", bus_range[0]);
- else
- printk(KERN_INFO "PCI buses %d..%d", bus_range[0],
- bus_range[1]);
- printk(" controlled by %s at %x\n", dev->name, addr->address);
- if (device_is_compatible(dev, "uni-north")) {
- int i = uninorth_count++;
- uninorth_bridges[i].cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
- uninorth_bridges[i].cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
- uninorth_bridges[i].node = dev;
- uninorth_bridges[i].iobase_phys = addr->address;
- /* is 0x10000 enough for io space ? */
- uninorth_bridges[i].iobase = (void *)ioremap(addr->address, 0x10000);
- /* XXX This is the bridge with the PCI expansion bus. This is also the
- * address of the bus that will receive type 1 config accesses and io
- * accesses. Appears to be correct for iMac DV and G4 Sawtooth too.
- * That means that we cannot do io cycles on the AGP bus nor the internal
- * ethernet/fw bus. Fortunately, they appear not to be needed on iMac DV
- * and G4 neither.
- */
- if (addr->address == 0xf2000000)
- uninorth_default = i;
- else
- continue;
+ printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
+ dev->full_name);
}
- bp = (struct bridge_data *) alloc_bootmem(sizeof(*bp));
+ hose = pcibios_alloc_controller();
+ if (!hose)
+ continue;
+ hose->arch_data = dev;
+ hose->first_busno = bus_range ? bus_range[0] : 0;
+ hose->last_busno = bus_range ? bus_range[1] : 0xff;
+
+ disp_name = NULL;
if (device_is_compatible(dev, "uni-north")) {
- bp->cfg_addr = 0;
- bp->cfg_data = 0;
- bp->io_base = uninorth_bridges[uninorth_count-1].iobase;
- bp->io_base_phys = uninorth_bridges[uninorth_count-1].iobase_phys;
+ setup_uninorth(hose, addr);
+ disp_name = "UniNorth";
} else if (strcmp(dev->name, "pci") == 0) {
/* XXX assume this is a mpc106 (grackle) */
- bp->cfg_addr = (volatile unsigned int *)
- ioremap(0xfec00000, 0x1000);
- bp->cfg_data = (volatile unsigned char *)
- ioremap(0xfee00000, 0x1000);
- bp->io_base_phys = 0xfe000000;
- bp->io_base = (void *) ioremap(0xfe000000, 0x20000);
- if (machine_is_compatible("AAPL,PowerBook1998"))
- grackle_set_loop_snoop(bp, 1);
-#if 0 /* Disabled for now, HW problems ??? */
- grackle_set_stg(bp, 1);
-#endif
- } else {
- /* a `bandit' or `chaos' bridge */
- bp->cfg_addr = (volatile unsigned int *)
- ioremap(addr->address + 0x800000, 0x1000);
- bp->cfg_data = (volatile unsigned char *)
- ioremap(addr->address + 0xc00000, 0x1000);
- bp->io_base_phys = addr->address;
- bp->io_base = (void *) ioremap(addr->address, 0x10000);
+ setup_grackle(hose, 0x20000);
+ disp_name = "Grackle (MPC106)";
+ } else if (strcmp(dev->name, "bandit") == 0) {
+ setup_bandit(hose, addr);
+ disp_name = "Bandit";
+ } else if (strcmp(dev->name, "chaos") == 0) {
+ setup_chaos(hose, addr);
+ disp_name = "Chaos";
}
+ printk(KERN_INFO "Found %s PCI host bridge at 0x%08x. Firmware bus number: %d->%d\n",
+ disp_name, addr->address, hose->first_busno, hose->last_busno);
+#ifdef DEBUG
+ printk(" ->Hose at 0x%08lx, cfg_addr=0x%08lx,cfg_data=0x%08lx\n",
+ hose, hose->cfg_addr, hose->cfg_data);
+#endif
+
+ /* Setup a default isa_io_base */
if (isa_io_base == 0)
- isa_io_base = (unsigned long) bp->io_base;
- bp->bus_number = bus_range[0];
- bp->max_bus = bus_range[1];
- bp->next = bridge_list;
- bp->node = dev;
- bridge_list = bp;
- if (bp->max_bus > max_bus)
- max_bus = bp->max_bus;
-
- if (strcmp(dev->name, "bandit") == 0)
- init_bandit(bp);
- }
-}
-
-/* Recursively searches any node that is of type PCI-PCI bridge. Without
- * this, the old code would miss children of P2P bridges and hence not
- * fix IRQ's for cards located behind P2P bridges.
- * - Ranjit Deshpande, 01/20/99
- */
-void __init
-fix_intr(struct device_node *node, struct pci_dev *dev)
-{
- unsigned int *reg, *class_code;
+ isa_io_base = (unsigned long)hose->io_base_virt;
- for (; node != 0;node = node->sibling) {
- class_code = (unsigned int *) get_property(node, "class-code", 0);
- if(class_code && (*class_code >> 8) == PCI_CLASS_BRIDGE_PCI)
- fix_intr(node->child, dev);
- reg = (unsigned int *) get_property(node, "reg", 0);
- if (reg == 0 || ((reg[0] >> 8) & 0xff) != dev->devfn)
- continue;
- /* this is the node, see if it has interrupts */
- if (node->n_intrs > 0)
- dev->irq = node->intrs[0].line;
- break;
+ /* Fixup "bus-range" OF property */
+ fixup_bus_range(dev);
}
}
-void __init
-pmac_pcibios_fixup(void)
-{
- struct pci_dev *dev;
+static void
+pcibios_fixup_OF_interrupts(void)
+{
+ struct pci_dev* dev;
/*
* FIXME: This is broken: We should not assign IRQ's to IRQless
@@ -757,44 +516,43 @@ pmac_pcibios_fixup(void)
* should find the device node and se if it has an
* AAPL,interrupts property.
*/
- struct bridge_data *bp = bridges[dev->bus->number];
unsigned char pin;
+ struct device_node* node;
- if (pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin) ||
- !pin)
+ if (pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin) || !pin)
continue; /* No interrupt generated -> no fixup */
- /* We iterate all instances of uninorth for now */
- if (uninorth_count && dev->bus->number == 0) {
- int i;
- for (i=0;i<uninorth_count;i++)
- fix_intr(uninorth_bridges[i].node->child, dev);
- } else
- fix_intr(bp->node->child, dev);
+ node = pci_device_to_OF_node(dev);
+ if (!node) {
+ printk("No OF node for device %x:%x\n", dev->bus->number, dev->devfn >> 3);
+ continue;
+ }
+ /* this is the node, see if it has interrupts */
+ if (node->n_intrs > 0)
+ dev->irq = node->intrs[0].line;
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
}
}
void __init
-pmac_setup_pci_ptrs(void)
+pmac_pcibios_fixup(void)
{
- struct device_node* np;
+ /* Fixup interrupts according to OF tree */
+ pcibios_fixup_OF_interrupts();
+}
- np = find_devices("pci");
- if (np != 0)
- {
- if (device_is_compatible(np, "uni-north"))
- {
- /* looks like an Core99 powermac */
- set_config_access_method(uni);
- } else
- {
- /* looks like a G3 powermac */
- set_config_access_method(grackle);
- }
- } else
- {
- set_config_access_method(pmac);
+/* We don't want to enable USB controllers absent from the OF tree
+ * (iBook second controller)
+ */
+int
+pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
+{
+ if (dev->vendor == PCI_VENDOR_ID_APPLE
+ && dev->device == PCI_DEVICE_ID_APPLE_KL_USB) {
+ struct device_node* node;
+ node = pci_device_to_OF_node(dev);
+ if (!node)
+ return -EINVAL;
}
-
- ppc_md.pcibios_fixup = pmac_pcibios_fixup;
+ return 0;
}
diff --git a/arch/ppc/kernel/pmac_pic.c b/arch/ppc/kernel/pmac_pic.c
index efd767482..a9a1777ca 100644
--- a/arch/ppc/kernel/pmac_pic.c
+++ b/arch/ppc/kernel/pmac_pic.c
@@ -4,14 +4,15 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/pci.h>
-#include <linux/openpic.h>
#include <asm/init.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
+
#include "pmac_pic.h"
+#include "open_pic.h"
/* pmac */struct pmac_irq_hw {
unsigned int flag;
@@ -30,16 +31,14 @@ static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
static int max_irqs;
static int max_real_irqs;
-static int has_openpic = 0;
+static int pmac_has_openpic;
+
+spinlock_t pmac_pic_lock = SPIN_LOCK_UNLOCKED;
+
#define GATWICK_IRQ_POOL_SIZE 10
static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
-extern int pmac_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short *val);
-extern int pmac_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
- unsigned char offset, unsigned short val);
-
/*
* Mark an irq as "lost". This is only used on the pmac
* since it can lose interrupts (see pmac_set_irq_mask).
@@ -51,48 +50,11 @@ void __pmac __no_use_set_lost(unsigned long irq_nr)
atomic_inc(&ppc_n_lost_interrupts);
}
-static void pmac_openpic_mask_irq(unsigned int irq_nr)
-{
- openpic_disable_irq(irq_nr);
-}
-
-static void pmac_openpic_unmask_irq(unsigned int irq_nr)
-{
- openpic_enable_irq(irq_nr);
-}
-
-static void pmac_openpic_ack_irq(unsigned int irq_nr)
-{
- if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
- openpic_eoi(smp_processor_id());
- openpic_disable_irq(irq_nr);
-}
-
-static void pmac_openpic_end_irq(unsigned int irq_nr)
-{
- if ((irq_desc[irq_nr].status & IRQ_LEVEL) != 0)
- openpic_eoi(smp_processor_id());
- openpic_enable_irq(irq_nr);
-}
-
-struct hw_interrupt_type pmac_open_pic = {
- " OpenPIC ",
- NULL,
- NULL,
- pmac_openpic_unmask_irq,
- pmac_openpic_mask_irq,
- /* Theorically, the mask&ack should be NULL for OpenPIC. However, doing
- * so shows tons of bogus interrupts coming in.
- */
- pmac_openpic_ack_irq,
- pmac_openpic_end_irq,
- NULL
-};
-
static void __pmac pmac_mask_and_ack_irq(unsigned int irq_nr)
{
unsigned long bit = 1UL << (irq_nr & 0x1f);
int i = irq_nr >> 5;
+ unsigned long flags;
if ((unsigned)irq_nr >= max_irqs)
return;
@@ -100,6 +62,7 @@ static void __pmac pmac_mask_and_ack_irq(unsigned int irq_nr)
clear_bit(irq_nr, ppc_cached_irq_mask);
if (test_and_clear_bit(irq_nr, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
+ spin_lock_irqsave(&pmac_pic_lock, flags);
out_le32(&pmac_irq_hw[i]->ack, bit);
out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
out_le32(&pmac_irq_hw[i]->ack, bit);
@@ -108,16 +71,19 @@ static void __pmac pmac_mask_and_ack_irq(unsigned int irq_nr)
interrupts */
mb();
} while(in_le32(&pmac_irq_hw[i]->flag) & bit);
+ spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void __pmac pmac_set_irq_mask(unsigned int irq_nr)
{
unsigned long bit = 1UL << (irq_nr & 0x1f);
int i = irq_nr >> 5;
+ unsigned long flags;
if ((unsigned)irq_nr >= max_irqs)
return;
+ spin_lock_irqsave(&pmac_pic_lock, flags);
/* enable unmasked interrupts */
out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
@@ -137,6 +103,7 @@ static void __pmac pmac_set_irq_mask(unsigned int irq_nr)
&& (ld_le32(&pmac_irq_hw[i]->level) & bit)
&& !(ld_le32(&pmac_irq_hw[i]->flag) & bit))
__set_lost((ulong)irq_nr);
+ spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void __pmac pmac_mask_irq(unsigned int irq_nr)
@@ -152,6 +119,15 @@ static void __pmac pmac_unmask_irq(unsigned int irq_nr)
pmac_set_irq_mask(irq_nr);
}
+static void __pmac pmac_end_irq(unsigned int irq_nr)
+{
+ if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
+ set_bit(irq_nr, ppc_cached_irq_mask);
+ pmac_set_irq_mask(irq_nr);
+ }
+}
+
+
struct hw_interrupt_type pmac_pic = {
" PMAC-PIC ",
NULL,
@@ -159,7 +135,7 @@ struct hw_interrupt_type pmac_pic = {
pmac_unmask_irq,
pmac_mask_irq,
pmac_mask_and_ack_irq,
- pmac_unmask_irq,
+ pmac_end_irq,
NULL
};
@@ -170,7 +146,7 @@ struct hw_interrupt_type gatwick_pic = {
pmac_unmask_irq,
pmac_mask_irq,
pmac_mask_and_ack_irq,
- pmac_unmask_irq,
+ pmac_end_irq,
NULL
};
@@ -204,35 +180,22 @@ pmac_get_irq(struct pt_regs *regs)
unsigned long bits = 0;
#ifdef CONFIG_SMP
- void pmac_smp_message_recv(struct pt_regs *);
+ void psurge_smp_message_recv(struct pt_regs *);
- /* IPI's are a hack on the powersurge -- Cort */
- if ( smp_processor_id() != 0 )
- {
- pmac_smp_message_recv(regs);
+ /* IPI's are a hack on the powersurge -- Cort */
+ if ( smp_processor_id() != 0 ) {
+ psurge_smp_message_recv(regs);
return -2; /* ignore, already handled */
}
#endif /* CONFIG_SMP */
-
- if (has_openpic) {
- irq = openpic_irq(smp_processor_id());
- if (irq == OPENPIC_VEC_SPURIOUS)
- /* We get those when doing polled ADB requests,
- * using -2 is a temp hack to disable the printk
- */
- irq = -2; /*-1; */
- }
- else
- {
- for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
- int i = irq >> 5;
- bits = ld_le32(&pmac_irq_hw[i]->flag)
- | ppc_lost_interrupts[i];
- if (bits == 0)
- continue;
- irq += __ilog2(bits);
- break;
- }
+ for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
+ int i = irq >> 5;
+ bits = ld_le32(&pmac_irq_hw[i]->flag)
+ | ppc_lost_interrupts[i];
+ if (bits == 0)
+ continue;
+ irq += __ilog2(bits);
+ break;
}
return irq;
@@ -336,11 +299,16 @@ static void __init enable_second_ohare(void)
addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40);
pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20);
max_irqs = 64;
- if (pci_device_loc(irqctrler, &bus, &devfn) == 0) {
- pmac_pcibios_read_config_word(bus, devfn, PCI_COMMAND, &cmd);
- cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- cmd &= ~PCI_COMMAND_IO;
- pmac_pcibios_write_config_word(bus, devfn, PCI_COMMAND, cmd);
+ if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) {
+ struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler);
+ if (!hose)
+ printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
+ else {
+ early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ cmd &= ~PCI_COMMAND_IO;
+ early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
+ }
}
second_irq = irqctrler->intrs[0].line;
@@ -378,20 +346,26 @@ pmac_pic_init(void)
printk("PowerMac using OpenPIC irq controller\n");
if (irqctrler->n_addrs > 0)
{
+ int nmi_irq = -1;
+ unsigned char senses[NR_IRQS];
#ifdef CONFIG_XMON
struct device_node* pswitch;
-#endif /* CONFIG_XMON */
- OpenPIC = (volatile struct OpenPIC *)
- ioremap(irqctrler->addrs[0].address,
- irqctrler->addrs[0].size);
- for ( i = 0 ; i < NR_IRQS ; i++ )
- irq_desc[i].handler = &pmac_open_pic;
- openpic_init(1);
- has_openpic = 1;
-#ifdef CONFIG_XMON
+
pswitch = find_devices("programmer-switch");
if (pswitch && pswitch->n_intrs)
- request_irq(pswitch->intrs[0].line, xmon_irq, 0,
+ nmi_irq = pswitch->intrs[0].line;
+#endif /* CONFIG_XMON */
+ prom_get_irq_senses(senses, 0, NR_IRQS);
+ OpenPIC_InitSenses = senses;
+ OpenPIC_NumInitSenses = NR_IRQS;
+ ppc_md.get_irq = openpic_get_irq;
+ OpenPIC_Addr = ioremap(irqctrler->addrs[0].address,
+ irqctrler->addrs[0].size);
+ openpic_init(1, 0, 0, nmi_irq);
+ pmac_has_openpic = 1;
+#ifdef CONFIG_XMON
+ if (nmi_irq >= 0)
+ request_irq(nmi_irq, xmon_irq, 0,
"NMI - XMON", 0);
#endif /* CONFIG_XMON */
return;
diff --git a/arch/ppc/kernel/pmac_setup.c b/arch/ppc/kernel/pmac_setup.c
index b5bf03abc..e7be1114e 100644
--- a/arch/ppc/kernel/pmac_setup.c
+++ b/arch/ppc/kernel/pmac_setup.c
@@ -47,6 +47,7 @@
#include <linux/cuda.h>
#include <linux/pmu.h>
+#include <asm/processor.h>
#include <asm/init.h>
#include <asm/prom.h>
#include <asm/system.h>
@@ -73,7 +74,8 @@ extern unsigned long pmac_get_rtc_time(void);
extern int pmac_set_rtc_time(unsigned long nowtime);
extern void pmac_read_rtc_time(void);
extern void pmac_calibrate_decr(void);
-extern void pmac_setup_pci_ptrs(void);
+extern void pmac_pcibios_fixup(void);
+extern void pmac_find_bridges(void);
extern int mackbd_setkeycode(unsigned int scancode, unsigned int keycode);
extern int mackbd_getkeycode(unsigned int scancode);
@@ -99,9 +101,7 @@ extern char pckbd_unexpected_up(unsigned char keycode);
extern int keyboard_sends_linux_keycodes;
extern void pmac_nvram_update(void);
-extern void *pmac_pci_dev_io_base(unsigned char bus, unsigned char devfn, int physical);
-extern void *pmac_pci_dev_mem_base(unsigned char bus, unsigned char devfn);
-extern int pmac_pci_dev_root_bridge(unsigned char bus, unsigned char devfn);
+extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial);
unsigned char drive_info;
@@ -119,13 +119,34 @@ extern int pmac_newworld;
extern void zs_kgdb_hook(int tty_num);
static void ohare_init(void);
-static void init_p2pbridge(void);
#ifdef CONFIG_BOOTX_TEXT
void pmac_progress(char *s, unsigned short hex);
#endif
sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
+#ifdef CONFIG_SMP
+volatile static long int core99_l2_cache;
+void core99_init_l2(void)
+{
+ int cpu = smp_processor_id();
+
+ if ( (_get_PVR() >> 16) != 8 && (_get_PVR() >> 16) != 12 )
+ return;
+
+ if (cpu == 0){
+ core99_l2_cache = _get_L2CR();
+ printk("CPU0: L2CR is %lx\n", core99_l2_cache);
+ } else {
+ printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
+ _set_L2CR(0);
+ _set_L2CR(core99_l2_cache);
+ printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
+ }
+}
+#endif /* CONFIG_SMP */
+
+
__pmac
int
pmac_get_cpuinfo(char *buffer)
@@ -250,7 +271,7 @@ pmac_setup_arch(void)
struct device_node *cpu;
int *fp;
- /* Set loops_per_sec to a half-way reasonable value,
+ /* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
cpu = find_type_devices("cpu");
if (cpu != 0) {
@@ -263,13 +284,13 @@ pmac_setup_arch(void)
case 10: /* mach V (604ev5) */
case 12: /* G4 */
case 20: /* 620 */
- loops_per_sec = *fp;
+ loops_per_jiffy = *fp / HZ;
break;
default: /* 601, 603, etc. */
- loops_per_sec = *fp / 2;
+ loops_per_jiffy = *fp / (2*HZ);
}
} else
- loops_per_sec = 50000000;
+ loops_per_jiffy = 50000000 / HZ;
}
/* this area has the CPU identification register
@@ -278,8 +299,8 @@ pmac_setup_arch(void)
__ioremap(0xffc00000, 0x400000, pgprot_val(PAGE_READONLY));
ohare_init();
+ /* Lookup PCI hosts */
pmac_find_bridges();
- init_p2pbridge();
/* Checks "l2cr-value" property in the registry */
if ( (_get_PVR() >> 16) == 8 || (_get_PVR() >> 16) == 12 ) {
@@ -303,6 +324,11 @@ pmac_setup_arch(void)
ppc_override_l2cr_value, (ppc_override_l2cr_value & 0x80000000)
? "enabled" : "disabled");
+#ifdef CONFIG_SMP
+ /* somewhat of a hack */
+ core99_init_l2();
+#endif
+
#ifdef CONFIG_KGDB
zs_kgdb_hook(0);
#endif
@@ -330,32 +356,6 @@ pmac_setup_arch(void)
ROOT_DEV = to_kdev_t(DEFAULT_ROOT_DEVICE);
}
-/*
- * Tweak the PCI-PCI bridge chip on the blue & white G3s.
- */
-static void __init init_p2pbridge(void)
-{
- struct device_node *p2pbridge;
- unsigned char bus, devfn;
- unsigned short val;
-
- /* XXX it would be better here to identify the specific
- PCI-PCI bridge chip we have. */
- if ((p2pbridge = find_devices("pci-bridge")) == 0
- || p2pbridge->parent == NULL
- || strcmp(p2pbridge->parent->name, "pci") != 0)
- return;
- if (pci_device_loc(p2pbridge, &bus, &devfn) < 0)
- return;
- if (ppc_md.pcibios_read_config_word(bus, devfn, PCI_BRIDGE_CONTROL, &val) < 0) {
- printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n");
- return;
- }
- val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
- ppc_md.pcibios_write_config_word(bus, devfn, PCI_BRIDGE_CONTROL, val);
- ppc_md.pcibios_read_config_word(bus, devfn, PCI_BRIDGE_CONTROL, &val);
-}
-
static void __init ohare_init(void)
{
/*
@@ -448,7 +448,7 @@ kdev_t __init find_ide_boot(void)
void __init find_boot_device(void)
{
-#ifdef CONFIG_SCSI
+#if defined(CONFIG_SCSI) && defined(CONFIG_BLK_DEV_SD)
if (boot_host != NULL) {
boot_dev = sd_find_target(boot_host, boot_target);
if (boot_dev != 0)
@@ -493,7 +493,9 @@ pmac_restart(char *cmd)
struct adb_request req;
#endif /* CONFIG_ADB_CUDA */
+#ifdef CONFIG_NVRAM
pmac_nvram_update();
+#endif
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
@@ -509,7 +511,7 @@ pmac_restart(char *cmd)
pmu_restart();
break;
#endif /* CONFIG_ADB_PMU */
- default:
+ default: ;
}
}
@@ -520,7 +522,9 @@ pmac_power_off(void)
struct adb_request req;
#endif /* CONFIG_ADB_CUDA */
+#ifdef CONFIG_NVRAM
pmac_nvram_update();
+#endif
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
@@ -536,7 +540,7 @@ pmac_power_off(void)
pmu_shutdown();
break;
#endif /* CONFIG_ADB_PMU */
- default:
+ default: ;
}
}
@@ -566,17 +570,19 @@ pmac_ide_outsw(ide_ioreg_t port, void *buf, int ns)
int
pmac_ide_default_irq(ide_ioreg_t base)
{
- return 0;
-}
-
#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
-extern ide_ioreg_t pmac_ide_get_base(int index);
+ extern int pmac_ide_get_irq(ide_ioreg_t base);
+ return pmac_ide_get_irq(base);
+#else
+ return 0;
#endif
+}
ide_ioreg_t
pmac_ide_default_io_base(int index)
{
#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
+ extern ide_ioreg_t pmac_ide_get_base(int index);
return pmac_ide_get_base(index);
#else
return 0;
@@ -586,7 +592,14 @@ pmac_ide_default_io_base(int index)
int
pmac_ide_check_region(ide_ioreg_t from, unsigned int extent)
{
- return 0;
+ /*
+ * We only do the check_region if `from' looks like a genuine
+ * I/O port number. If it actually refers to a memory-mapped
+ * register, it should be OK.
+ */
+ if (from < ~_IO_BASE)
+ return 0;
+ return check_region(from, extent);
}
void
@@ -594,24 +607,16 @@ pmac_ide_request_region(ide_ioreg_t from,
unsigned int extent,
const char *name)
{
+ if (from < ~_IO_BASE)
+ request_region(from, extent, name);
}
void
pmac_ide_release_region(ide_ioreg_t from,
unsigned int extent)
{
-}
-
-/* Convert the shorts/longs in hd_driveid from little to big endian;
- * chars are endian independant, of course, but strings need to be flipped.
- * (Despite what it says in drivers/block/ide.h, they come up as little
- * endian...)
- *
- * Changes to linux/hdreg.h may require changes here. */
-void
-pmac_ide_fix_driveid(struct hd_driveid *id)
-{
- ppc_generic_ide_fix_driveid(id);
+ if (from < ~_IO_BASE)
+ release_region(from, extent);
}
#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
@@ -632,8 +637,6 @@ void __init
pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
- pmac_setup_pci_ptrs();
-
/* isa_io_base gets set in pmac_find_bridges */
isa_mem_base = PMAC_ISA_MEM_BASE;
pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
@@ -646,8 +649,11 @@ pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.get_cpuinfo = pmac_get_cpuinfo;
ppc_md.irq_cannonicalize = NULL;
ppc_md.init_IRQ = pmac_pic_init;
- ppc_md.get_irq = pmac_get_irq;
+ ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */
ppc_md.init = pmac_init2;
+
+ ppc_md.pcibios_fixup = pmac_pcibios_fixup;
+ ppc_md.pcibios_enable_device_hook = pmac_pci_enable_device_hook;
ppc_md.restart = pmac_restart;
ppc_md.power_off = pmac_power_off;
@@ -658,12 +664,8 @@ pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.get_rtc_time = pmac_get_rtc_time;
ppc_md.calibrate_decr = pmac_calibrate_decr;
- ppc_md.pci_dev_io_base = pmac_pci_dev_io_base;
- ppc_md.pci_dev_mem_base = pmac_pci_dev_mem_base;
- ppc_md.pci_dev_root_bridge = pmac_pci_dev_root_bridge;
-
#ifdef CONFIG_VT
-#ifdef CONFIG_INPUT_ADBHID
+#ifdef CONFIG_INPUT
ppc_md.kbd_init_hw = mac_hid_init_hw;
ppc_md.kbd_translate = mac_hid_kbd_translate;
ppc_md.kbd_unexpected_up = mac_hid_kbd_unexpected_up;
@@ -682,35 +684,33 @@ pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
}
#endif /* CONFIG_MAGIC_SYSRQ */
#elif defined(CONFIG_ADB_KEYBOARD)
- ppc_md.kbd_setkeycode = mackbd_setkeycode;
- ppc_md.kbd_getkeycode = mackbd_getkeycode;
- ppc_md.kbd_translate = mackbd_translate;
- ppc_md.kbd_unexpected_up = mackbd_unexpected_up;
- ppc_md.kbd_leds = mackbd_leds;
- ppc_md.kbd_init_hw = mackbd_init_hw;
+ ppc_md.kbd_setkeycode = mackbd_setkeycode;
+ ppc_md.kbd_getkeycode = mackbd_getkeycode;
+ ppc_md.kbd_translate = mackbd_translate;
+ ppc_md.kbd_unexpected_up = mackbd_unexpected_up;
+ ppc_md.kbd_leds = mackbd_leds;
+ ppc_md.kbd_init_hw = mackbd_init_hw;
#ifdef CONFIG_MAGIC_SYSRQ
- ppc_md.ppc_kbd_sysrq_xlate = mackbd_sysrq_xlate;
+ ppc_md.ppc_kbd_sysrq_xlate = mackbd_sysrq_xlate;
SYSRQ_KEY = 0x69;
#endif /* CONFIG_MAGIC_SYSRQ */
#endif /* CONFIG_INPUT_ADBHID/CONFIG_ADB_KEYBOARD */
#endif /* CONFIG_VT */
#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
- ppc_ide_md.insw = pmac_ide_insw;
- ppc_ide_md.outsw = pmac_ide_outsw;
- ppc_ide_md.default_irq = pmac_ide_default_irq;
- ppc_ide_md.default_io_base = pmac_ide_default_io_base;
- ppc_ide_md.ide_check_region = pmac_ide_check_region;
- ppc_ide_md.ide_request_region = pmac_ide_request_region;
- ppc_ide_md.ide_release_region = pmac_ide_release_region;
- ppc_ide_md.fix_driveid = pmac_ide_fix_driveid;
- ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports;
-
- ppc_ide_md.io_base = _IO_BASE; /* actually too early for this :-( */
-#endif
+ ppc_ide_md.insw = pmac_ide_insw;
+ ppc_ide_md.outsw = pmac_ide_outsw;
+ ppc_ide_md.default_irq = pmac_ide_default_irq;
+ ppc_ide_md.default_io_base = pmac_ide_default_io_base;
+ ppc_ide_md.ide_check_region = pmac_ide_check_region;
+ ppc_ide_md.ide_request_region = pmac_ide_request_region;
+ ppc_ide_md.ide_release_region = pmac_ide_release_region;
+ ppc_ide_md.fix_driveid = ppc_generic_ide_fix_driveid;
+ ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports;
+#endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */
#ifdef CONFIG_BOOTX_TEXT
ppc_md.progress = pmac_progress;
-#endif
+#endif /* CONFIG_BOOTX_TEXT */
if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0);
}
@@ -727,5 +727,5 @@ pmac_progress(char *s, unsigned short hex)
prom_drawstring(s);
prom_drawchar('\n');
}
-#endif CONFIG_BOOTX_TEXT
+#endif /* CONFIG_BOOTX_TEXT */
diff --git a/arch/ppc/kernel/pmac_time.c b/arch/ppc/kernel/pmac_time.c
index 00b6302a7..46d067e81 100644
--- a/arch/ppc/kernel/pmac_time.c
+++ b/arch/ppc/kernel/pmac_time.c
@@ -114,7 +114,7 @@ unsigned long pmac_get_rtc_time(void)
+ (req.reply[3] << 8) + req.reply[4];
return now - RTC_OFFSET;
#endif /* CONFIG_ADB_PMU */
- default:
+ default: ;
}
return 0;
}
@@ -135,7 +135,7 @@ int pmac_set_rtc_time(unsigned long nowtime)
return 0;
while (!req.complete)
cuda_poll();
-// if (req.reply_len != 7)
+ if ((req.reply_len != 3) && (req.reply_len != 7))
printk(KERN_ERR "pmac_set_rtc_time: got %d byte reply\n",
req.reply_len);
return 1;
diff --git a/arch/ppc/kernel/ppc_asm.h b/arch/ppc/kernel/ppc_asm.h
index 42b8c9c39..59c377c4d 100644
--- a/arch/ppc/kernel/ppc_asm.h
+++ b/arch/ppc/kernel/ppc_asm.h
@@ -50,7 +50,7 @@
*/
#define LVX(r,a,b) .long (31<<26)+((r)<<21)+((a)<<16)+((b)<<11)+(103<<1)
#define STVX(r,a,b) .long (31<<26)+((r)<<21)+((a)<<16)+((b)<<11)+(231<<1)
-#define MFVSCR(r) .long (4<<26)+((r)<<21)+(1540<<1)
+#define MFVSCR(r) .long (4<<26)+((r)<<21)+(770<<1)
#define MTVSCR(r) .long (4<<26)+((r)<<11)+(802<<1)
#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); STVX(n,b,base)
@@ -66,9 +66,13 @@
#define REST_16VR(n,b,base) REST_8VR(n,b,base); REST_8VR(n+8,b,base)
#define REST_32VR(n,b,base) REST_16VR(n,b,base); REST_16VR(n+16,b,base)
+#ifdef CONFIG_PPC601_SYNC_FIX
#define SYNC \
sync; \
isync
+#else
+#define SYNC
+#endif
/*
* This instruction is not implemented on the PPC 603 or 601; however, on
diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c
index 32f99ce0e..9768f2890 100644
--- a/arch/ppc/kernel/ppc_htab.c
+++ b/arch/ppc/kernel/ppc_htab.c
@@ -555,10 +555,7 @@ int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
break;
buffer += len;
left -= len;
- _set_L2CR(0);
_set_L2CR(val);
- while ( _get_L2CR() & 0x1 )
- /* wait for invalidate to finish */;
} else {
p = buf;
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 827e37540..6de2d94fe 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -12,6 +12,7 @@
#include <linux/console.h>
#include <linux/irq.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include <asm/page.h>
#include <asm/semaphore.h>
@@ -40,6 +41,7 @@
#include <asm/backlight.h>
#ifdef CONFIG_SMP
#include <asm/smplock.h>
+#include <asm/smp.h>
#endif /* CONFIG_SMP */
#include <asm/time.h>
@@ -53,9 +55,10 @@ extern void MachineCheckException(struct pt_regs *regs);
extern void AlignmentException(struct pt_regs *regs);
extern void ProgramCheckException(struct pt_regs *regs);
extern void SingleStepException(struct pt_regs *regs);
-extern int sys_sigreturn(struct pt_regs *regs);
extern void do_lost_interrupts(unsigned long);
extern int do_signal(sigset_t *, struct pt_regs *);
+extern int pmac_newworld;
+extern int sys_sigreturn(struct pt_regs *regs);
long long __ashrdi3(long long, int);
long long __ashldi3(long long, int);
@@ -98,10 +101,6 @@ EXPORT_SYMBOL(_prep_type);
EXPORT_SYMBOL(ucSystemType);
#endif
#endif
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_dev_io_base);
-EXPORT_SYMBOL(pci_dev_mem_base);
-#endif
#if !__INLINE_BITOPS
EXPORT_SYMBOL(set_bit);
@@ -125,6 +124,7 @@ EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strcasecmp);
/* EXPORT_SYMBOL(csum_partial); already in net/netsyms.c */
EXPORT_SYMBOL(csum_partial_copy_generic);
@@ -202,6 +202,10 @@ EXPORT_SYMBOL(_read_lock);
EXPORT_SYMBOL(_read_unlock);
EXPORT_SYMBOL(_write_lock);
EXPORT_SYMBOL(_write_unlock);
+EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_hw_index);
+EXPORT_SYMBOL(smp_num_cpus);
+EXPORT_SYMBOL(synchronize_irq);
#endif
#ifndef CONFIG_MACH_SPECIFIC
@@ -234,12 +238,14 @@ EXPORT_SYMBOL(pmu_enable_irled);
#ifdef CONFIG_PMAC_BACKLIGHT
EXPORT_SYMBOL(get_backlight_level);
EXPORT_SYMBOL(set_backlight_level);
+EXPORT_SYMBOL(set_backlight_enable);
+EXPORT_SYMBOL(register_backlight_controller);
#endif /* CONFIG_PMAC_BACKLIGHT */
-#if defined(CONFIG_ALL_PPC)
EXPORT_SYMBOL_NOVERS(sys_ctrler);
#ifndef CONFIG_MACH_SPECIFIC
EXPORT_SYMBOL_NOVERS(have_of);
#endif /* CONFIG_MACH_SPECIFIC */
+#if defined(CONFIG_ALL_PPC)
EXPORT_SYMBOL(find_devices);
EXPORT_SYMBOL(find_type_devices);
EXPORT_SYMBOL(find_compatible_devices);
@@ -247,18 +253,29 @@ EXPORT_SYMBOL(find_path_device);
EXPORT_SYMBOL(find_phandle);
EXPORT_SYMBOL(device_is_compatible);
EXPORT_SYMBOL(machine_is_compatible);
-EXPORT_SYMBOL(find_pci_device_OFnode);
EXPORT_SYMBOL(find_all_nodes);
EXPORT_SYMBOL(get_property);
-EXPORT_SYMBOL(pci_io_base);
-EXPORT_SYMBOL(pci_device_loc);
+EXPORT_SYMBOL(pci_bus_io_base);
+EXPORT_SYMBOL(pci_bus_io_base_phys);
+EXPORT_SYMBOL(pci_bus_mem_base_phys);
+EXPORT_SYMBOL(pci_device_to_OF_node);
+EXPORT_SYMBOL(pci_device_from_OF_node);
+EXPORT_SYMBOL(pci_bus_to_hose);
+EXPORT_SYMBOL(pci_resource_to_bus);
+EXPORT_SYMBOL(pci_phys_to_bus);
+EXPORT_SYMBOL(pci_bus_to_phys);
+EXPORT_SYMBOL(pmac_newworld);
EXPORT_SYMBOL(feature_set);
EXPORT_SYMBOL(feature_clear);
EXPORT_SYMBOL(feature_test);
EXPORT_SYMBOL(feature_set_gmac_power);
+EXPORT_SYMBOL(feature_set_gmac_phy_reset);
EXPORT_SYMBOL(feature_set_usb_power);
EXPORT_SYMBOL(feature_set_firewire_power);
#endif /* defined(CONFIG_ALL_PPC) */
+#if defined(CONFIG_BOOTX_TEXT)
+EXPORT_SYMBOL(bootx_update_display);
+#endif
#if defined(CONFIG_SCSI) && defined(CONFIG_ALL_PPC)
EXPORT_SYMBOL(note_scsi_host);
#endif
@@ -286,6 +303,7 @@ EXPORT_SYMBOL(abs);
EXPORT_SYMBOL(screen_info);
#endif
+EXPORT_SYMBOL(__delay);
EXPORT_SYMBOL(int_control);
EXPORT_SYMBOL(timer_interrupt_intercept);
EXPORT_SYMBOL(timer_interrupt);
@@ -300,6 +318,10 @@ EXPORT_SYMBOL(console_lock);
#ifdef CONFIG_XMON
EXPORT_SYMBOL(xmon);
#endif
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_interruptible);
+EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(down_read_failed);
EXPORT_SYMBOL(down_write_failed);
@@ -324,6 +346,12 @@ EXPORT_SYMBOL(do_softirq);
EXPORT_SYMBOL(next_mmu_context);
EXPORT_SYMBOL(set_context);
EXPORT_SYMBOL(mmu_context_overflow);
+#if !defined(CONFIG_8xx) && !defined(CONFIG_4xx)
+extern long *intercept_table;
+EXPORT_SYMBOL(intercept_table);
+#endif
+extern long *ret_from_intercept;
+EXPORT_SYMBOL(ret_from_intercept);
#ifdef CONFIG_MOL
extern ulong mol_interface[];
diff --git a/arch/ppc/kernel/prep_nvram.c b/arch/ppc/kernel/prep_nvram.c
index c3dcdea07..3d34a853f 100644
--- a/arch/ppc/kernel/prep_nvram.c
+++ b/arch/ppc/kernel/prep_nvram.c
@@ -16,19 +16,9 @@
#include <asm/machdep.h>
#include <asm/prep_nvram.h>
-/*
- * Allow for a maximum of 32K of PReP NvRAM data
- */
-#define MAX_PREP_NVRAM 0x8000
static char nvramData[MAX_PREP_NVRAM];
static NVRAM_MAP *nvram=(NVRAM_MAP *)&nvramData[0];
-#define PREP_NVRAM_AS0 0x74
-#define PREP_NVRAM_AS1 0x75
-#define PREP_NVRAM_DATA 0x77
-
-unsigned char *rs_pcNvRAM;
-
unsigned char __prep prep_nvram_read_val(int addr)
{
outb(addr, PREP_NVRAM_AS0);
@@ -44,20 +34,6 @@ void __prep prep_nvram_write_val(int addr,
outb(val, PREP_NVRAM_DATA);
}
-/*
- * Most Radstone boards have NvRAM memory mapped at offset 8M in ISA space
- */
-unsigned char __prep rs_nvram_read_val(int addr)
-{
- return rs_pcNvRAM[addr];
-}
-
-void __prep rs_nvram_write_val(int addr,
- unsigned char val)
-{
- rs_pcNvRAM[addr]=val;
-}
-
void __init init_prep_nvram(void)
{
unsigned char *nvp;
@@ -65,16 +41,6 @@ void __init init_prep_nvram(void)
int nvramSize;
/*
- * I'm making the assumption that 32k will always cover the
- * nvramsize. If this isn't the case please let me know and we can
- * map the header, then get the size from the header, then map
- * the whole size. -- Cort
- */
- if ( _prep_type == _PREP_Radstone )
- rs_pcNvRAM = (unsigned char *)ioremap(_ISA_MEM_BASE+0x00800000,
- 32<<10);
- request_region(PREP_NVRAM_AS0, 0x8, "PReP NVRAM");
- /*
* The following could fail if the NvRAM were corrupt but
* we expect the boot firmware to have checked its checksum
* before boot
diff --git a/arch/ppc/kernel/prep_pci.c b/arch/ppc/kernel/prep_pci.c
index fd14fc483..2f556d83d 100644
--- a/arch/ppc/kernel/prep_pci.c
+++ b/arch/ppc/kernel/prep_pci.c
@@ -7,11 +7,11 @@
* The motherboard routes/maps will disappear shortly. -- Cort
*/
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/openpic.h>
#include <asm/init.h>
#include <asm/byteorder.h>
@@ -25,6 +25,7 @@
#include <asm/machdep.h>
#include "pci.h"
+#include "open_pic.h"
#define MAX_DEVNR 22
@@ -39,9 +40,6 @@ unsigned char *Motherboard_routes;
/* Used for Motorola to store system config register */
static unsigned long *ProcInfo;
-extern int chrp_get_irq(struct pt_regs *);
-extern void chrp_post_irq(struct pt_regs* regs, int);
-
/* Tables for known hardware */
/* Motorola PowerStackII - Utah */
@@ -534,128 +532,45 @@ static char Nobis_pci_IRQ_routes[] __prepdata = {
#define CFGPTR(dev) (0x80800000 | (1<<(dev>>3)) | ((dev&7)<<8) | offset)
#define DEVNO(dev) (dev>>3)
-__prep
-int
-prep_pcibios_read_config_dword (unsigned char bus,
- unsigned char dev, unsigned char offset, unsigned int *val)
-{
- unsigned long _val;
- unsigned long *ptr;
-
- if ((bus != 0) || (DEVNO(dev) > MAX_DEVNR))
- {
- *val = 0xFFFFFFFF;
- return PCIBIOS_DEVICE_NOT_FOUND;
- } else
- {
- ptr = (unsigned long *)CFGPTR(dev);
- _val = le32_to_cpu(*ptr);
- }
- *val = _val;
- return PCIBIOS_SUCCESSFUL;
-}
-
-__prep
-int
-prep_pcibios_read_config_word (unsigned char bus,
- unsigned char dev, unsigned char offset, unsigned short *val)
-{
- unsigned short _val;
- unsigned short *ptr;
-
- if ((bus != 0) || (DEVNO(dev) > MAX_DEVNR))
- {
- *val = 0xFFFF;
- return PCIBIOS_DEVICE_NOT_FOUND;
- } else
- {
- ptr = (unsigned short *)CFGPTR(dev);
- _val = le16_to_cpu(*ptr);
- }
- *val = _val;
- return PCIBIOS_SUCCESSFUL;
-}
-
-__prep
-int
-prep_pcibios_read_config_byte (unsigned char bus,
- unsigned char dev, unsigned char offset, unsigned char *val)
-{
- unsigned char _val;
- unsigned char *ptr;
-
- if ((bus != 0) || (DEVNO(dev) > MAX_DEVNR))
- {
- *val = 0xFF;
- return PCIBIOS_DEVICE_NOT_FOUND;
- } else
- {
- ptr = (unsigned char *)CFGPTR(dev);
- _val = *ptr;
- }
- *val = _val;
- return PCIBIOS_SUCCESSFUL;
-}
-
-__prep
-int
-prep_pcibios_write_config_dword (unsigned char bus,
- unsigned char dev, unsigned char offset, unsigned int val)
-{
- unsigned long _val;
- unsigned long *ptr;
-
- _val = le32_to_cpu(val);
- if ((bus != 0) || (DEVNO(dev) > MAX_DEVNR))
- {
- return PCIBIOS_DEVICE_NOT_FOUND;
- } else
- {
- ptr = (unsigned long *)CFGPTR(dev);
- *ptr = _val;
- }
- return PCIBIOS_SUCCESSFUL;
+#define cfg_read(val, addr, type, op) *val = op((type)(addr))
+#define cfg_write(val, addr, type, op) op((type *)(addr), (val))
+
+#define cfg_read_bad(val, size) *val = bad_##size;
+#define cfg_write_bad(val, size)
+
+#define bad_byte 0xff
+#define bad_word 0xffff
+#define bad_dword 0xffffffffU
+
+#define PREP_PCI_OP(rw, size, type, op) \
+static int __prep \
+prep_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
+{ \
+ if ((dev->bus->number != 0) || (DEVNO(dev->devfn) > MAX_DEVNR)) \
+ { \
+ cfg_##rw##_bad(val, size) \
+ return PCIBIOS_DEVICE_NOT_FOUND; \
+ } \
+ cfg_##rw(val, CFGPTR(dev->devfn), type, op); \
+ return PCIBIOS_SUCCESSFUL; \
}
-__prep
-int
-prep_pcibios_write_config_word (unsigned char bus,
- unsigned char dev, unsigned char offset, unsigned short val)
-{
- unsigned short _val;
- unsigned short *ptr;
-
- _val = le16_to_cpu(val);
- if ((bus != 0) || (DEVNO(dev) > MAX_DEVNR))
- {
- return PCIBIOS_DEVICE_NOT_FOUND;
- } else
- {
- ptr = (unsigned short *)CFGPTR(dev);
- *ptr = _val;
- }
- return PCIBIOS_SUCCESSFUL;
-}
+PREP_PCI_OP(read, byte, u8 *, in_8)
+PREP_PCI_OP(read, word, u16 *, in_le16)
+PREP_PCI_OP(read, dword, u32 *, in_le32)
+PREP_PCI_OP(write, byte, u8, out_8)
+PREP_PCI_OP(write, word, u16, out_le16)
+PREP_PCI_OP(write, dword, u32, out_le32)
-__prep
-int
-prep_pcibios_write_config_byte (unsigned char bus,
- unsigned char dev, unsigned char offset, unsigned char val)
+static struct pci_ops prep_pci_ops =
{
- unsigned char _val;
- unsigned char *ptr;
-
- _val = val;
- if ((bus != 0) || (DEVNO(dev) > MAX_DEVNR))
- {
- return PCIBIOS_DEVICE_NOT_FOUND;
- } else
- {
- ptr = (unsigned char *)CFGPTR(dev);
- *ptr = _val;
- }
- return PCIBIOS_SUCCESSFUL;
-}
+ prep_read_config_byte,
+ prep_read_config_word,
+ prep_read_config_dword,
+ prep_write_config_byte,
+ prep_write_config_word,
+ prep_write_config_dword
+};
#define MOTOROLA_CPUTYPE_REG 0x800
#define MOTOROLA_BASETYPE_REG 0x803
@@ -685,7 +600,8 @@ static u_char mvme2600_openpic_initsenses[] __initdata = {
#define MOT_HAWK_PRESENT 0x2
int prep_keybd_present = 1;
-int MotMPIC = 0;
+int MotMPIC;
+int mot_multi;
int __init raven_init(void)
{
@@ -695,18 +611,18 @@ int __init raven_init(void)
/* Check to see if the Raven chip exists. */
if ( _prep_type != _PREP_Motorola) {
- OpenPIC = NULL;
+ OpenPIC_Addr = NULL;
return 0;
}
/* Check to see if this board is a type that might have a Raven. */
if ((inb(MOTOROLA_CPUTYPE_REG) & 0xF0) != 0xE0) {
- OpenPIC = NULL;
+ OpenPIC_Addr = NULL;
return 0;
}
/* Check the first PCI device to see if it is a Raven. */
- pcibios_read_config_dword(0, 0, PCI_VENDOR_ID, &devid);
+ early_read_config_dword(0, 0, 0, PCI_VENDOR_ID, &devid);
switch (devid & 0xffff0000) {
case MPIC_RAVEN_ID:
@@ -716,33 +632,37 @@ int __init raven_init(void)
MotMPIC = MOT_HAWK_PRESENT;
break;
default:
- OpenPIC = NULL;
+ OpenPIC_Addr = NULL;
return 0;
}
/* Read the memory base register. */
- pcibios_read_config_dword(0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
+ early_read_config_dword(0, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
if (pci_membase == 0) {
- OpenPIC = NULL;
+ OpenPIC_Addr = NULL;
return 0;
}
/* Map the Raven MPIC registers to virtual memory. */
- OpenPIC = (struct OpenPIC *)ioremap(pci_membase+0xC0000000, 0x22000);
+ OpenPIC_Addr = ioremap(pci_membase+0xC0000000, 0x22000);
OpenPIC_InitSenses = mvme2600_openpic_initsenses;
OpenPIC_NumInitSenses = sizeof(mvme2600_openpic_initsenses);
- ppc_md.get_irq = chrp_get_irq;
- ppc_md.post_irq = chrp_post_irq;
+ ppc_md.get_irq = openpic_get_irq;
/* If raven is present on Motorola store the system config register
* for later use.
*/
ProcInfo = (unsigned long *)ioremap(0xfef80400, 4);
+ /* Indicate to system if this is a multiprocessor board */
+ if (!(*ProcInfo & MOT_PROC2_BIT)) {
+ mot_multi = 1;
+ }
+
/* This is a hack. If this is a 2300 or 2400 mot board then there is
* no keyboard controller and we have to indicate that.
*/
@@ -898,72 +818,8 @@ unsigned long __init prep_route_pci_interrupts(void)
outb(pl_id|CAROLINA_IRQ_EDGE_MASK_HI, 0x04d1);
pl_id=inb(0x04d1);
/*printk("Hi mask now %#0x\n", pl_id);*/
- } else if ( _prep_type == _PREP_Radstone )
- {
- unsigned char ucElcrM, ucElcrS;
-
- /*
- * Set up edge/level
- */
- switch(ucSystemType)
- {
- case RS_SYS_TYPE_PPC1:
- {
- if(ucBoardRevMaj<5)
- {
- ucElcrS=ELCRS_INT15_LVL;
- }
- else
- {
- ucElcrS=ELCRS_INT9_LVL |
- ELCRS_INT11_LVL |
- ELCRS_INT14_LVL |
- ELCRS_INT15_LVL;
- }
- ucElcrM=ELCRM_INT5_LVL | ELCRM_INT7_LVL;
- break;
- }
-
- case RS_SYS_TYPE_PPC1a:
- {
- ucElcrS=ELCRS_INT9_LVL |
- ELCRS_INT11_LVL |
- ELCRS_INT14_LVL |
- ELCRS_INT15_LVL;
- ucElcrM=ELCRM_INT5_LVL;
- break;
- }
-
- case RS_SYS_TYPE_PPC2:
- case RS_SYS_TYPE_PPC2a:
- case RS_SYS_TYPE_PPC2ep:
- case RS_SYS_TYPE_PPC4:
- case RS_SYS_TYPE_PPC4a:
- default:
- {
- ucElcrS=ELCRS_INT9_LVL |
- ELCRS_INT10_LVL |
- ELCRS_INT11_LVL |
- ELCRS_INT14_LVL |
- ELCRS_INT15_LVL;
- ucElcrM=ELCRM_INT5_LVL |
- ELCRM_INT7_LVL;
- break;
- }
- }
-
- /*
- * Write edge/level selection
- */
- outb(ucElcrS, ISA8259_S_ELCR);
- outb(ucElcrM, ISA8259_M_ELCR);
-
- /*
- * Radstone boards have PCI interrupts all set up
- * so leave well alone
- */
- return 0;
- } else
+ }
+ else
{
printk("No known machine pci routing!\n");
return -1;
@@ -987,16 +843,10 @@ prep_pcibios_fixup(void)
extern unsigned char *Motherboard_routes;
unsigned char i;
- if ( _prep_type == _PREP_Radstone )
- {
- printk("Radstone boards require no PCI fixups\n");
- return;
- }
-
prep_route_pci_interrupts();
printk("Setting PCI interrupts for a \"%s\"\n", Motherboard_map_name);
- if (OpenPIC) {
+ if (OpenPIC_Addr) {
/* PCI interrupts are controlled by the OpenPIC */
pci_for_each_dev(dev) {
if (dev->bus->number == 0) {
@@ -1018,7 +868,12 @@ prep_pcibios_fixup(void)
for ( i = 0 ; i <= 5 ; i++ )
{
- if ( dev->resource[i].start > 0x10000000 )
+ /*
+ * Relocate PCI I/O resources if necessary so the
+ * standard 256MB BAT covers them.
+ */
+ if ( (pci_resource_flags(dev, i) & IORESOURCE_IO) &&
+ (dev->resource[i].start > 0x10000000) )
{
printk("Relocating PCI address %lx -> %lx\n",
dev->resource[i].start,
@@ -1029,6 +884,8 @@ prep_pcibios_fixup(void)
pci_write_config_dword(dev,
PCI_BASE_ADDRESS_0+(i*0x4),
dev->resource[i].start );
+ dev->resource[i].end =
+ (dev->resource[i].end & 0x00FFFFFF) | 0x01000000;
}
}
#if 0
@@ -1043,49 +900,50 @@ prep_pcibios_fixup(void)
}
}
-decl_config_access_method(indirect);
-
void __init
-prep_setup_pci_ptrs(void)
+prep_find_bridges(void)
{
- PPC_DEVICE *hostbridge;
-
- printk("PReP architecture\n");
- if ( _prep_type == _PREP_Radstone )
- {
- pci_config_address = (unsigned *)0x80000cf8;
- pci_config_data = (char *)0x80000cfc;
- set_config_access_method(indirect);
- }
- else
- {
- hostbridge = residual_find_device(PROCESSORDEVICE, NULL,
- BridgeController, PCIBridge, -1, 0);
- if (hostbridge &&
- hostbridge->DeviceId.Interface == PCIBridgeIndirect) {
- PnP_TAG_PACKET * pkt;
- set_config_access_method(indirect);
- pkt = PnP_find_large_vendor_packet(
+ struct pci_controller* hose;
+
+ hose = pcibios_alloc_controller();
+ if (!hose)
+ return;
+
+ hose->first_busno = 0;
+ hose->last_busno = 0xff;
+ hose->pci_mem_offset = PREP_ISA_MEM_BASE;
+
+ printk("PReP architecture\n");
+ {
+#ifdef CONFIG_PREP_RESIDUAL
+ PPC_DEVICE *hostbridge;
+
+ hostbridge = residual_find_device(PROCESSORDEVICE, NULL,
+ BridgeController, PCIBridge, -1, 0);
+ if (hostbridge &&
+ hostbridge->DeviceId.Interface == PCIBridgeIndirect) {
+ PnP_TAG_PACKET * pkt;
+ pkt = PnP_find_large_vendor_packet(
res->DevicePnPHeap+hostbridge->AllocatedOffset,
3, 0);
- if(pkt)
+ if(pkt)
{
#define p pkt->L4_Pack.L4_Data.L4_PPCPack
- pci_config_address= (unsigned *)ld_le32((unsigned *) p.PPCData);
- pci_config_data= (unsigned char *)ld_le32((unsigned *) (p.PPCData+8));
- }
+ setup_indirect_pci(hose,
+ ld_le32((unsigned *) (p.PPCData)),
+ ld_le32((unsigned *) (p.PPCData+8)));
+ }
else
{
- pci_config_address= (unsigned *) 0x80000cf8;
- pci_config_data= (unsigned char *) 0x80000cfc;
- }
- }
+ setup_indirect_pci(hose, 0x80000cf8, 0x80000cfc);
+ }
+ }
else
+#endif /* CONFIG_PREP_RESIDUAL */
{
- set_config_access_method(prep);
- }
-
- }
+ hose->ops = &prep_pci_ops;
+ }
+ }
ppc_md.pcibios_fixup = prep_pcibios_fixup;
}
diff --git a/arch/ppc/kernel/prep_setup.c b/arch/ppc/kernel/prep_setup.c
index 6e30462aa..64ad519cb 100644
--- a/arch/ppc/kernel/prep_setup.c
+++ b/arch/ppc/kernel/prep_setup.c
@@ -32,7 +32,6 @@
#include <linux/console.h>
#include <linux/timex.h>
#include <linux/pci.h>
-#include <linux/openpic.h>
#include <linux/ide.h>
#include <asm/init.h>
@@ -48,8 +47,9 @@
#include <asm/prep_nvram.h>
#include <asm/raven.h>
#include <asm/keyboard.h>
-
+#include <asm/vga.h>
#include <asm/time.h>
+
#include "local_irq.h"
#include "i8259.h"
#include "open_pic.h"
@@ -84,7 +84,7 @@ extern void pckbd_leds(unsigned char leds);
extern void pckbd_init_hw(void);
extern unsigned char pckbd_sysrq_xlate[128];
-extern void prep_setup_pci_ptrs(void);
+extern void prep_find_bridges(void);
extern char saved_command_line[256];
int _prep_type;
@@ -101,23 +101,23 @@ unsigned long empty_zero_page[1024];
extern PTE *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
extern int probingmem;
-extern unsigned long loops_per_sec;
+extern unsigned long loops_per_jiffy;
#ifdef CONFIG_BLK_DEV_RAM
extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
extern int rd_image_start; /* starting block # of image */
#endif
-#ifdef CONFIG_VGA_CONSOLE
-unsigned long vgacon_remap_base;
-#endif
int __prep
prep_get_cpuinfo(char *buffer)
{
extern char *Motherboard_map_name;
- int len, i;
-
+ int len;
+#ifdef CONFIG_PREP_RESIDUAL
+ int i;
+#endif
+
#ifdef CONFIG_SMP
#define CD(X) (cpu_data[n].X)
#else
@@ -190,7 +190,10 @@ prep_get_cpuinfo(char *buffer)
}
-no_l2:
+no_l2:
+#ifndef CONFIG_PREP_RESIDUAL
+ return len;
+#else
if ( res->ResidualLength == 0 )
return len;
@@ -205,8 +208,8 @@ no_l2:
res->Memories[i].SIMMSize);
}
len += sprintf(buffer+len,"\n");
-
return len;
+#endif
}
void __init
@@ -214,11 +217,16 @@ prep_setup_arch(void)
{
extern char cmd_line[];
unsigned char reg;
+#if 0 /* unused?? */
unsigned char ucMothMemType;
unsigned char ucEquipPres1;
+#endif
/* init to some ~sane value until calibrate_delay() runs */
- loops_per_sec = 50000000;
+ loops_per_jiffy = 50000000;
+
+ /* Lookup PCI host bridges */
+ prep_find_bridges();
/* Set up floppy in PS/2 mode */
outb(0x09, SIO_CONFIG_RA);
@@ -247,41 +255,6 @@ prep_setup_arch(void)
*(unsigned char *)(0x8000081c) |= 3;
ROOT_DEV = to_kdev_t(0x0802); /* sda2 */
break;
- case _PREP_Radstone:
- ROOT_DEV = to_kdev_t(0x0801); /* sda1 */
-
- /*
- * Determine system type
- */
- ucMothMemType=inb(0x866);
- ucEquipPres1=inb(0x80c);
-
- ucSystemType=((ucMothMemType&0x03)<<1) |
- ((ucEquipPres1&0x80)>>7);
- ucSystemType^=7;
-
- /*
- * Determine board revision for use by
- * rev. specific code
- */
- ucBoardRev=inb(0x854);
- ucBoardRevMaj=ucBoardRev>>5;
- ucBoardRevMin=ucBoardRev&0x1f;
-
- /*
- * Most Radstone boards have memory mapped NvRAM
- */
- if((ucSystemType==RS_SYS_TYPE_PPC1) && (ucBoardRevMaj<5))
- {
- ppc_md.nvram_read_val = prep_nvram_read_val;
- ppc_md.nvram_write_val = prep_nvram_write_val;
- }
- else
- {
- ppc_md.nvram_read_val = rs_nvram_read_val;
- ppc_md.nvram_write_val = rs_nvram_write_val;
- }
- break;
}
/* Read in NVRAM data */
@@ -341,12 +314,6 @@ prep_setup_arch(void)
#endif /* CONFIG_SOUND_CS4232 */
/*print_residual_device_info();*/
- request_region(0x20,0x20,"pic1");
- request_region(0xa0,0x20,"pic2");
- request_region(0x00,0x20,"dma1");
- request_region(0x40,0x20,"timer");
- request_region(0x80,0x10,"dma page reg");
- request_region(0xc0,0x20,"dma2");
raven_init();
@@ -365,6 +332,7 @@ prep_setup_arch(void)
*/
void __init prep_res_calibrate_decr(void)
{
+#ifdef CONFIG_PREP_RESIDUAL
unsigned long freq, divisor=4;
freq = res->VitalProductData.ProcessorBusHz;
@@ -372,6 +340,7 @@ void __init prep_res_calibrate_decr(void)
(freq/divisor)/1000000, (freq/divisor)%1000000);
tb_ticks_per_jiffy = freq / HZ / divisor;
tb_to_us = mulhwu_scale_factor(freq/divisor, 1000000);
+#endif
}
/*
@@ -585,15 +554,16 @@ prep_setup_residual(char *buffer)
{
int len = 0;
-
/* PREP's without residual data will give incorrect values here */
len += sprintf(len+buffer, "clock\t\t: ");
+#ifdef CONFIG_PREP_RESIDUAL
if ( res->ResidualLength )
len += sprintf(len+buffer, "%ldMHz\n",
(res->VitalProductData.ProcessorHz > 1024) ?
res->VitalProductData.ProcessorHz>>20 :
res->VitalProductData.ProcessorHz);
else
+#endif /* CONFIG_PREP_RESIDUAL */
len += sprintf(len+buffer, "???\n");
return len;
@@ -640,19 +610,11 @@ prep_init_IRQ(void)
{
int i;
- if (OpenPIC != NULL) {
- for ( i = 16 ; i < 36 ; i++ )
- irq_desc[i].handler = &open_pic;
- openpic_init(1);
- }
-
- for ( i = 0 ; i < 16 ; i++ )
+ if (OpenPIC_Addr != NULL)
+ openpic_init(1, NUM_8259_INTERRUPTS, 0, -1);
+ for ( i = 0 ; i < NUM_8259_INTERRUPTS ; i++ )
irq_desc[i].handler = &i8259_pic;
- i8259_init();
-#ifdef CONFIG_SMP
- request_irq(openpic_to_irq(OPENPIC_VEC_SPURIOUS), openpic_ipi_action,
- 0, "IPI0", 0);
-#endif /* CONFIG_SMP */
+ i8259_init();
}
#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
@@ -708,19 +670,14 @@ prep_ide_request_region(ide_ioreg_t from,
unsigned int extent,
const char *name)
{
- request_region(from, extent, name);
+ request_region(from, extent, name);
}
void __prep
prep_ide_release_region(ide_ioreg_t from,
unsigned int extent)
{
- release_region(from, extent);
-}
-
-void __prep
-prep_ide_fix_driveid(struct hd_driveid *id)
-{
+ release_region(from, extent);
}
void __init
@@ -743,17 +700,52 @@ prep_ide_init_hwif_ports (hw_regs_t *hw, ide_ioreg_t data_port, ide_ioreg_t ctrl
}
#endif
+unsigned long *MotSave_SmpIar;
+unsigned char *MotSave_CpusState[2];
+
+void __init
+prep_init2(void)
+{
+#ifdef CONFIG_NVRAM
+ request_region(PREP_NVRAM_AS0, 0x8, "nvram");
+#endif
+ request_region(0x20,0x20,"pic1");
+ request_region(0xa0,0x20,"pic2");
+ request_region(0x00,0x20,"dma1");
+ request_region(0x40,0x20,"timer");
+ request_region(0x80,0x10,"dma page reg");
+ request_region(0xc0,0x20,"dma2");
+}
+
void __init
prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
+#ifdef CONFIG_PREP_RESIDUAL
+ RESIDUAL *old_res = (RESIDUAL *)(r3 + KERNELBASE);
+
/* make a copy of residual data */
if ( r3 )
{
memcpy((void *)res,(void *)(r3+KERNELBASE),
sizeof(RESIDUAL));
+
+ /* These need to be saved for the Motorola Prep
+ * MVME4600 and Dual MTX boards.
+ */
+ MotSave_SmpIar = &old_res->VitalProductData.SmpIar;
+ MotSave_CpusState[0] = &old_res->Cpus[0].CpuState;
+ MotSave_CpusState[1] = &old_res->Cpus[1].CpuState;
}
+#endif
+ /* Copy cmd_line parameters */
+ if ( r6)
+ {
+ *(char *)(r7 + KERNELBASE) = 0;
+ strcpy(cmd_line, (char *)(r6 + KERNELBASE));
+ }
+
isa_io_base = PREP_ISA_IO_BASE;
isa_mem_base = PREP_ISA_MEM_BASE;
pci_dram_offset = PREP_PCI_DRAM_OFFSET;
@@ -762,29 +754,19 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
DMA_MODE_WRITE = 0x48;
/* figure out what kind of prep workstation we are */
+#ifdef CONFIG_PREP_RESIDUAL
if ( res->ResidualLength != 0 )
{
if ( !strncmp(res->VitalProductData.PrintableModel,"IBM",3) )
_prep_type = _PREP_IBM;
- else if (!strncmp(res->VitalProductData.PrintableModel,
- "Radstone",8))
- {
- extern char *Motherboard_map_name;
-
- _prep_type = _PREP_Radstone;
- Motherboard_map_name=
- res->VitalProductData.PrintableModel;
- }
- else
- _prep_type = _PREP_Motorola;
+ _prep_type = _PREP_Motorola;
}
else /* assume motorola if no residual (netboot?) */
+#endif
{
_prep_type = _PREP_Motorola;
}
- prep_setup_pci_ptrs();
-
ppc_md.setup_arch = prep_setup_arch;
ppc_md.setup_residual = prep_setup_residual;
ppc_md.get_cpuinfo = prep_get_cpuinfo;
@@ -792,40 +774,14 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.init_IRQ = prep_init_IRQ;
/* this gets changed later on if we have an OpenPIC -- Cort */
ppc_md.get_irq = prep_get_irq;
- ppc_md.init = NULL;
+ ppc_md.init = prep_init2;
ppc_md.restart = prep_restart;
ppc_md.power_off = prep_power_off;
ppc_md.halt = prep_halt;
ppc_md.time_init = NULL;
- if (_prep_type == _PREP_Radstone) {
- /*
- * We require a direct restart as port 92 does not work on
- * all Radstone boards
- */
- ppc_md.restart = prep_direct_restart;
- /*
- * The RTC device used varies according to board type
- */
- if(((ucSystemType==RS_SYS_TYPE_PPC1) && (ucBoardRevMaj>=5)) ||
- (ucSystemType==RS_SYS_TYPE_PPC1a))
- {
- ppc_md.set_rtc_time = mk48t59_set_rtc_time;
- ppc_md.get_rtc_time = mk48t59_get_rtc_time;
- ppc_md.time_init = mk48t59_init;
- }
- else
- {
- ppc_md.set_rtc_time = mc146818_set_rtc_time;
- ppc_md.get_rtc_time = mc146818_get_rtc_time;
- }
- /*
- * Determine the decrementer rate from the residual data
- */
- ppc_md.calibrate_decr = prep_res_calibrate_decr;
- }
- else if (_prep_type == _PREP_IBM) {
+ if (_prep_type == _PREP_IBM) {
ppc_md.set_rtc_time = mc146818_set_rtc_time;
ppc_md.get_rtc_time = mc146818_get_rtc_time;
ppc_md.calibrate_decr = prep_calibrate_decr;
@@ -845,7 +801,7 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_ide_md.ide_check_region = prep_ide_check_region;
ppc_ide_md.ide_request_region = prep_ide_request_region;
ppc_ide_md.ide_release_region = prep_ide_release_region;
- ppc_ide_md.fix_driveid = prep_ide_fix_driveid;
+ ppc_ide_md.fix_driveid = NULL;
ppc_ide_md.ide_init_hwif = prep_ide_init_hwif_ports;
#endif
ppc_ide_md.io_base = _IO_BASE;
diff --git a/arch/ppc/kernel/proc_rtas.c b/arch/ppc/kernel/proc_rtas.c
new file mode 100644
index 000000000..2e8144204
--- /dev/null
+++ b/arch/ppc/kernel/proc_rtas.c
@@ -0,0 +1,784 @@
+/*
+ * arch/ppc/kernel/proc_rtas.c
+ * Copyright (C) 2000 Tilmann Bitterberg
+ * (tilmann@bitterberg.de)
+ *
+ * RTAS (Runtime Abstraction Services) stuff
+ * Intention is to provide a clean user interface
+ * to use the RTAS.
+ *
+ * TODO:
+ * Split off a header file and maybe move it to a different
+ * location. Write Documentation on what the /proc/rtas/ entries
+ * actually do.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+#include <linux/time.h>
+#include <linux/string.h>
+
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h> /* for ppc_md */
+#include <asm/time.h>
+
+/* Token for Sensors */
+#define KEY_SWITCH 0x0001
+#define ENCLOSURE_SWITCH 0x0002
+#define THERMAL_SENSOR 0x0003
+#define LID_STATUS 0x0004
+#define POWER_SOURCE 0x0005
+#define BATTERY_VOLTAGE 0x0006
+#define BATTERY_REMAINING 0x0007
+#define BATTERY_PERCENTAGE 0x0008
+#define EPOW_SENSOR 0x0009
+#define BATTERY_CYCLESTATE 0x000a
+#define BATTERY_CHARGING 0x000b
+
+/* IBM specific sensors */
+#define IBM_SURVEILLANCE 0x2328 /* 9000 */
+#define IBM_FANRPM 0x2329 /* 9001 */
+#define IBM_VOLTAGE 0x232a /* 9002 */
+#define IBM_DRCONNECTOR 0x232b /* 9003 */
+#define IBM_POWERSUPPLY 0x232c /* 9004 */
+#define IBM_INTQUEUE 0x232d /* 9005 */
+
+/* Status return values */
+#define SENSOR_CRITICAL_HIGH 13
+#define SENSOR_WARNING_HIGH 12
+#define SENSOR_NORMAL 11
+#define SENSOR_WARNING_LOW 10
+#define SENSOR_CRITICAL_LOW 9
+#define SENSOR_SUCCESS 0
+#define SENSOR_HW_ERROR -1
+#define SENSOR_BUSY -2
+#define SENSOR_NOT_EXIST -3
+#define SENSOR_DR_ENTITY -9000
+
+/* Location Codes */
+#define LOC_SCSI_DEV_ADDR 'A'
+#define LOC_SCSI_DEV_LOC 'B'
+#define LOC_CPU 'C'
+#define LOC_DISKETTE 'D'
+#define LOC_ETHERNET 'E'
+#define LOC_FAN 'F'
+#define LOC_GRAPHICS 'G'
+/* reserved / not used 'H' */
+#define LOC_IO_ADAPTER 'I'
+/* reserved / not used 'J' */
+#define LOC_KEYBOARD 'K'
+#define LOC_LCD 'L'
+#define LOC_MEMORY 'M'
+#define LOC_NV_MEMORY 'N'
+#define LOC_MOUSE 'O'
+#define LOC_PLANAR 'P'
+#define LOC_OTHER_IO 'Q'
+#define LOC_PARALLEL 'R'
+#define LOC_SERIAL 'S'
+#define LOC_DEAD_RING 'T'
+#define LOC_RACKMOUNTED 'U' /* for _u_nit is rack mounted */
+#define LOC_VOLTAGE 'V'
+#define LOC_SWITCH_ADAPTER 'W'
+#define LOC_OTHER 'X'
+#define LOC_FIRMWARE 'Y'
+#define LOC_SCSI 'Z'
+
+/* Tokens for indicators */
+#define TONE_FREQUENCY 0x0001 /* 0 - 1000 (HZ)*/
+#define TONE_VOLUME 0x0002 /* 0 - 100 (%) */
+#define SYSTEM_POWER_STATE 0x0003
+#define WARNING_LIGHT 0x0004
+#define DISK_ACTIVITY_LIGHT 0x0005
+#define HEX_DISPLAY_UNIT 0x0006
+#define BATTERY_WARNING_TIME 0x0007
+#define CONDITION_CYCLE_REQUEST 0x0008
+#define SURVEILLANCE_INDICATOR 0x2328 /* 9000 */
+#define DR_ACTION 0x2329 /* 9001 */
+#define DR_INDICATOR 0x232a /* 9002 */
+/* 9003 - 9004: Vendor specific */
+#define GLOBAL_INTERRUPT_QUEUE 0x232d /* 9005 */
+/* 9006 - 9999: Vendor specific */
+
+/* other */
+#define MAX_SENSORS 17 /* I only know of 17 sensors */
+#define MAX_LINELENGTH 256
+#define SENSOR_PREFIX "ibm,sensor-"
+#define cel_to_fahr(x) ((x*9/5)+32)
+
+
+/* Globals */
+static struct proc_dir_entry *proc_rtas;
+static struct rtas_sensors sensors;
+static struct device_node *rtas;
+static unsigned long power_on_time = 0; /* Save the time the user set */
+static char progress_led[MAX_LINELENGTH];
+
+static unsigned long rtas_tone_frequency = 1000;
+static unsigned long rtas_tone_volume = 0;
+
+/* ****************STRUCTS******************************************* */
+struct individual_sensor {
+ unsigned int token;
+ unsigned int quant;
+};
+
+struct rtas_sensors {
+ struct individual_sensor sensor[MAX_SENSORS];
+ unsigned int quant;
+};
+
+/* ****************************************************************** */
+/* Declarations */
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data);
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+
+struct file_operations ppc_rtas_poweron_operations = {
+ read: ppc_rtas_poweron_read,
+ write: ppc_rtas_poweron_write
+};
+struct file_operations ppc_rtas_progress_operations = {
+ read: ppc_rtas_progress_read,
+ write: ppc_rtas_progress_write
+};
+
+struct file_operations ppc_rtas_clock_operations = {
+ read: ppc_rtas_clock_read,
+ write: ppc_rtas_clock_write
+};
+
+struct file_operations ppc_rtas_tone_freq_operations = {
+ read: ppc_rtas_tone_freq_read,
+ write: ppc_rtas_tone_freq_write
+};
+struct file_operations ppc_rtas_tone_volume_operations = {
+ read: ppc_rtas_tone_volume_read,
+ write: ppc_rtas_tone_volume_write
+};
+
+int ppc_rtas_find_all_sensors (void);
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf);
+char * ppc_rtas_process_error(int error);
+int get_location_code(struct individual_sensor s, char * buf);
+int check_location_string (char *c, char * buf);
+int check_location (char *c, int idx, char * buf);
+
+/* ****************************************************************** */
+/* MAIN */
+/* ****************************************************************** */
+void proc_rtas_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ rtas = find_devices("rtas");
+ if ((rtas == 0) || (_machine != _MACH_chrp)) {
+ return;
+ }
+
+ proc_rtas = proc_mkdir("rtas", 0);
+ if (proc_rtas == 0)
+ return;
+
+ /* /proc/rtas entries */
+
+ entry = create_proc_entry("progress", S_IRUGO|S_IWUSR, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_progress_operations;
+
+ entry = create_proc_entry("clock", S_IRUGO|S_IWUSR, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_clock_operations;
+
+ entry = create_proc_entry("poweron", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_poweron_operations;
+
+ create_proc_read_entry("sensors", S_IRUGO, proc_rtas,
+ ppc_rtas_sensor_read, NULL);
+
+ entry = create_proc_entry("frequency", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_tone_freq_operations;
+
+ entry = create_proc_entry("volume", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_tone_volume_operations;
+}
+
+/* ****************************************************************** */
+/* POWER-ON-TIME */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtc_time tm;
+ unsigned long nowtime;
+ char *dest;
+ int error;
+
+ nowtime = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_poweron_write: Invalid time\n");
+ return count;
+ }
+ power_on_time = nowtime; /* save the time */
+
+ to_tm(nowtime, &tm);
+
+ error = call_rtas("set-time-for-power-on", 7, 1, NULL,
+ tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting poweron time returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ if (power_on_time == 0)
+ n = sprintf(buf, "Power on time not set\n");
+ else
+ n = sprintf(buf, "%lu\n", power_on_time);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* PROGRESS */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long hex;
+
+ strcpy(progress_led, buf); /* save the string */
+ /* Lets see if the user passed hexdigits */
+ hex = simple_strtoul(buf, NULL, 10);
+
+ ppc_md.progress ((char *)buf, hex);
+ return count;
+
+ /* clear the line */ /* ppc_md.progress(" ", 0xffff);*/
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n = 0;
+ if (progress_led != NULL)
+ n = sprintf (buf, "%s\n", progress_led);
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* CLOCK */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtc_time tm;
+ unsigned long nowtime;
+ char *dest;
+ int error;
+
+ nowtime = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_clock_write: Invalid time\n");
+ return count;
+ }
+
+ to_tm(nowtime, &tm);
+ error = call_rtas("set-time-of-day", 7, 1, NULL,
+ tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting the clock returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ unsigned long *ret = kmalloc(4*8, GFP_KERNEL);
+ int n, error;
+
+ error = call_rtas("get-time-of-day", 0, 8, ret);
+
+ year = ret[0]; mon = ret[1]; day = ret[2];
+ hour = ret[3]; min = ret[4]; sec = ret[5];
+
+ if (error != 0){
+ printk(KERN_WARNING "error: reading the clock returned: %s\n",
+ ppc_rtas_process_error(error));
+ n = sprintf (buf, "0");
+ } else {
+ n = sprintf (buf, "%lu\n", mktime(year, mon, day, hour, min, sec));
+ }
+ kfree(ret);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* SENSOR STUFF */
+/* ****************************************************************** */
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i,j,n;
+ unsigned long ret;
+ int state, error;
+ char buffer[MAX_LINELENGTH*MAX_SENSORS]; /* May not be enough */
+
+ if (count < 0)
+ return -EINVAL;
+
+ n = sprintf ( buffer , "RTAS (RunTime Abstraction Services) Sensor Information\n");
+ n += sprintf ( buffer+n, "Sensor\t\tValue\t\tCondition\tLocation\n");
+ n += sprintf ( buffer+n, "********************************************************\n");
+
+ if (ppc_rtas_find_all_sensors() != 0) {
+ n += sprintf ( buffer+n, "\nNo sensors are available\n");
+ goto return_string;
+ }
+
+ for (i=0; i<sensors.quant; i++) {
+ j = sensors.sensor[i].quant;
+ /* A sensor may have multiple instances */
+ while (j >= 0) {
+ error = call_rtas("get-sensor-state", 2, 2, &ret,
+ sensors.sensor[i].token, sensors.sensor[i].quant-j);
+ state = (int) ret;
+ n += ppc_rtas_process_sensor(sensors.sensor[i], state, error, buffer+n );
+ n += sprintf (buffer+n, "\n");
+ j--;
+ } /* while */
+ } /* for */
+
+return_string:
+ if (off >= strlen(buffer)) {
+ *eof = 1;
+ return 0;
+ }
+ if (n > strlen(buffer) - off)
+ n = strlen(buffer) - off;
+ if (n > count)
+ n = count;
+ else
+ *eof = 1;
+ memcpy(buf, buffer + off, n);
+ *start = buf;
+ return n;
+}
+
+/* ****************************************************************** */
+
+int ppc_rtas_find_all_sensors (void)
+{
+ unsigned long *utmp;
+ int len, i, j;
+
+ utmp = (unsigned long *) get_property(rtas, "rtas-sensors", &len);
+ if (utmp == NULL) {
+ printk (KERN_ERR "error: could not get rtas-sensors\n");
+ return 1;
+ }
+
+ sensors.quant = len / 8; /* int + int */
+
+ for (i=0, j=0; j<sensors.quant; i+=2, j++) {
+ sensors.sensor[j].token = utmp[i];
+ sensors.sensor[j].quant = utmp[i+1];
+ }
+ return 0;
+}
+
+/* ****************************************************************** */
+/*
+ * Builds a string of what rtas returned
+ */
+char * ppc_rtas_process_error(int error)
+{
+ switch (error) {
+ case SENSOR_CRITICAL_HIGH:
+ return "(critical high)";
+ case SENSOR_WARNING_HIGH:
+ return "(warning high)";
+ case SENSOR_NORMAL:
+ return "(normal)";
+ case SENSOR_WARNING_LOW:
+ return "(warning low)";
+ case SENSOR_CRITICAL_LOW:
+ return "(critical low)";
+ case SENSOR_SUCCESS:
+ return "(read ok)";
+ case SENSOR_HW_ERROR:
+ return "(hardware error)";
+ case SENSOR_BUSY:
+ return "(busy)";
+ case SENSOR_NOT_EXIST:
+ return "(non existant)";
+ case SENSOR_DR_ENTITY:
+ return "(dr entity removed)";
+ default:
+ return "(UNKNOWN)";
+ }
+}
+
+/* ****************************************************************** */
+/*
+ * Builds a string out of what the sensor said
+ */
+
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf)
+{
+ /* Defined return vales */
+ const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t", "Mainenance" };
+ const char * enclosure_switch[] = { "Closed", "Open" };
+ const char * lid_status[] = { " ", "Open", "Closed" };
+ const char * power_source[] = { "AC\t", "Battery", "AC & Battery" };
+ const char * battery_remaining[] = { "Very Low", "Low", "Mid", "High" };
+ const char * epow_sensor[] = {
+ "EPOW Reset", "Cooling warning", "Power warning",
+ "System shutdown", "System halt", "EPOW main enclosure",
+ "EPOW power off" };
+ const char * battery_cyclestate[] = { "None", "In progress", "Requested" };
+ const char * battery_charging[] = { "Charging", "Discharching", "No current flow" };
+ const char * ibm_drconnector[] = { "Empty", "Present" };
+ const char * ibm_intqueue[] = { "Disabled", "Enabled" };
+
+ int have_strings = 0;
+ int temperature = 0;
+ int unknown = 0;
+ int n = 0;
+
+ /* What kind of sensor do we have here? */
+ switch (s.token) {
+ case KEY_SWITCH:
+ n += sprintf(buf+n, "Key switch:\t");
+ n += sprintf(buf+n, "%s\t", key_switch[state]);
+ have_strings = 1;
+ break;
+ case ENCLOSURE_SWITCH:
+ n += sprintf(buf+n, "Enclosure switch:\t");
+ n += sprintf(buf+n, "%s\t", enclosure_switch[state]);
+ have_strings = 1;
+ break;
+ case THERMAL_SENSOR:
+ n += sprintf(buf+n, "Temp. (°C/°F):\t");
+ temperature = 1;
+ break;
+ case LID_STATUS:
+ n += sprintf(buf+n, "Lid status:\t");
+ n += sprintf(buf+n, "%s\t", lid_status[state]);
+ have_strings = 1;
+ break;
+ case POWER_SOURCE:
+ n += sprintf(buf+n, "Power source:\t");
+ n += sprintf(buf+n, "%s\t", power_source[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_VOLTAGE:
+ n += sprintf(buf+n, "Battery voltage:\t");
+ break;
+ case BATTERY_REMAINING:
+ n += sprintf(buf+n, "Battery remaining:\t");
+ n += sprintf(buf+n, "%s\t", battery_remaining[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_PERCENTAGE:
+ n += sprintf(buf+n, "Battery percentage:\t");
+ break;
+ case EPOW_SENSOR:
+ n += sprintf(buf+n, "EPOW Sensor:\t");
+ n += sprintf(buf+n, "%s\t", epow_sensor[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_CYCLESTATE:
+ n += sprintf(buf+n, "Battery cyclestate:\t");
+ n += sprintf(buf+n, "%s\t", battery_cyclestate[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_CHARGING:
+ n += sprintf(buf+n, "Battery Charging:\t");
+ n += sprintf(buf+n, "%s\t", battery_charging[state]);
+ have_strings = 1;
+ break;
+ case IBM_SURVEILLANCE:
+ n += sprintf(buf+n, "Surveillance:\t");
+ break;
+ case IBM_FANRPM:
+ n += sprintf(buf+n, "Fan (rpm):\t");
+ break;
+ case IBM_VOLTAGE:
+ n += sprintf(buf+n, "Voltage (mv):\t");
+ break;
+ case IBM_DRCONNECTOR:
+ n += sprintf(buf+n, "DR connector:\t");
+ n += sprintf(buf+n, "%s\t", ibm_drconnector[state]);
+ have_strings = 1;
+ break;
+ case IBM_POWERSUPPLY:
+ n += sprintf(buf+n, "Powersupply:\t");
+ break;
+ case IBM_INTQUEUE:
+ n += sprintf(buf+n, "Interrupt queue:\t");
+ n += sprintf(buf+n, "%s\t", ibm_intqueue[state]);
+ have_strings = 1;
+ break;
+ default:
+ n += sprintf(buf+n, "Unkown sensor (type %d), ignoring it\n",
+ s.token);
+ unknown = 1;
+ have_strings = 1;
+ break;
+ }
+ if (have_strings == 0) {
+ if (temperature) {
+ n += sprintf(buf+n, "%4d /%4d\t", state, cel_to_fahr(state));
+ } else
+ n += sprintf(buf+n, "%10d\t", state);
+ }
+ if (unknown == 0) {
+ n += sprintf ( buf+n, "%s\t", ppc_rtas_process_error(error));
+ n += get_location_code(s, buf+n);
+ }
+ return n;
+}
+
+/* ****************************************************************** */
+
+int check_location (char *c, int idx, char * buf)
+{
+ int n = 0;
+
+ switch (*(c+idx)) {
+ case LOC_PLANAR:
+ n += sprintf ( buf, "Planar #%c", *(c+idx+1));
+ break;
+ case LOC_CPU:
+ n += sprintf ( buf, "CPU #%c", *(c+idx+1));
+ break;
+ case LOC_FAN:
+ n += sprintf ( buf, "Fan #%c", *(c+idx+1));
+ break;
+ case LOC_RACKMOUNTED:
+ n += sprintf ( buf, "Rack #%c", *(c+idx+1));
+ break;
+ case LOC_VOLTAGE:
+ n += sprintf ( buf, "Voltage #%c", *(c+idx+1));
+ break;
+ case LOC_LCD:
+ n += sprintf ( buf, "LCD #%c", *(c+idx+1));
+ break;
+ case '.':
+ n += sprintf ( buf, "- %c", *(c+idx+1));
+ default:
+ n += sprintf ( buf, "Unknown location");
+ break;
+ }
+ return n;
+}
+
+
+/* ****************************************************************** */
+/*
+ * Format:
+ * ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
+ * the '.' may be an abbrevation
+ */
+int check_location_string (char *c, char *buf)
+{
+ int n=0,i=0;
+
+ while (c[i]) {
+ if (isalpha(c[i]) || c[i] == '.') {
+ n += check_location(c, i, buf+n);
+ }
+ else if (c[i] == '/' || c[i] == '-')
+ n += sprintf(buf+n, " at ");
+ i++;
+ }
+ return n;
+}
+
+
+/* ****************************************************************** */
+
+int get_location_code(struct individual_sensor s, char * buffer)
+{
+ char rstr[512], tmp[10], tmp2[10];
+ int n=0, i=0, llen, len;
+ /* char *buf = kmalloc(MAX_LINELENGTH, GFP_KERNEL); */
+ char *ret;
+
+ static int pos = 0; /* remember position where buffer was */
+
+ /* construct the sensor number like 0003 */
+ /* fill with zeros */
+ n = sprintf(tmp, "%d", s.token);
+ len = strlen(tmp);
+ while (strlen(tmp) < 4)
+ n += sprintf (tmp+n, "0");
+
+ /* invert the string */
+ while (tmp[i]) {
+ if (i<len)
+ tmp2[4-len+i] = tmp[i];
+ else
+ tmp2[3-i] = tmp[i];
+ i++;
+ }
+ tmp2[4] = '\0';
+
+ sprintf (rstr, SENSOR_PREFIX"%s", tmp2);
+
+ ret = (char *) get_property(rtas, rstr, &llen);
+
+ n=0;
+ if (ret[0] == '\0')
+ n += sprintf ( buffer+n, "--- ");/* does not have a location */
+ else {
+ char t[50];
+ ret += pos;
+
+ n += check_location_string(ret, buffer + n);
+ n += sprintf ( buffer+n, " ");
+ /* see how many characters we have printed */
+ sprintf ( t, "%s ", ret);
+
+ pos += strlen(t);
+ if (pos >= llen) pos=0;
+ }
+ return n;
+}
+/* ****************************************************************** */
+/* INDICATORS - Tone Frequency */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long freq;
+ char *dest;
+ int error;
+ freq = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_freq_write: Invalid tone freqency\n");
+ return count;
+ }
+ if (freq < 0) freq = 0;
+ rtas_tone_frequency = freq; /* save it for later */
+ error = call_rtas("set-indicator", 3, 1, NULL,
+ TONE_FREQUENCY, 0, freq);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ n = sprintf(buf, "%lu\n", rtas_tone_frequency);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+/* ****************************************************************** */
+/* INDICATORS - Tone Volume */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long volume;
+ char *dest;
+ int error;
+ volume = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_volume_write: Invalid tone volume\n");
+ return count;
+ }
+ if (volume < 0) volume = 0;
+ if (volume > 100) volume = 100;
+
+ rtas_tone_volume = volume; /* save it for later */
+ error = call_rtas("set-indicator", 3, 1, NULL,
+ TONE_VOLUME, 0, volume);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting tone volume returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ n = sprintf(buf, "%lu\n", rtas_tone_volume);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index a200b1c78..c4c76adec 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -222,16 +222,17 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
giveup_fpu(prev);
#ifdef CONFIG_ALTIVEC
/*
- * If the previous thread 1) has some altivec regs it wants saved
- * (has bits in vrsave set) and 2) used altivec in the last quantum
+ * If the previous thread used altivec in the last quantum
* (thus changing altivec regs) then save them.
+ * We used to check the VRSAVE register but not all apps
+ * set it, so we don't rely on it now (and in fact we need
+ * to save & restore VSCR even if VRSAVE == 0). -- paulus
*
* On SMP we always save/restore altivec regs just to avoid the
* complexity of changing processors.
* -- Cort
*/
- if ( (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) &&
- prev->thread.vrsave )
+ if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
giveup_altivec(prev);
#endif /* CONFIG_ALTIVEC */
current_set[smp_processor_id()] = new;
@@ -251,13 +252,15 @@ void show_regs(struct pt_regs * regs)
{
int i;
- printk("NIP: %08lX XER: %08lX LR: %08lX REGS: %p TRAP: %04lx\n",
- regs->nip, regs->xer, regs->link, regs,regs->trap);
+ printk("NIP: %08lX XER: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx\n",
+ regs->nip, regs->xer, regs->link, regs->gpr[1], regs,regs->trap);
printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
regs->msr&MSR_IR ? 1 : 0,
regs->msr&MSR_DR ? 1 : 0);
+ if (regs->trap == 0x300 || regs->trap == 0x600)
+ printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
printk("TASK = %p[%d] '%s' ",
current, current->pid, current->comm);
printk("Last syscall: %ld ", current->thread.last_syscall);
@@ -285,7 +288,7 @@ void show_regs(struct pt_regs * regs)
printk("\n");
}
}
-out:
+out: ;
}
void exit_thread(void)
diff --git a/arch/ppc/kernel/prom.c b/arch/ppc/kernel/prom.c
index 5494f2f52..f3b39c222 100644
--- a/arch/ppc/kernel/prom.c
+++ b/arch/ppc/kernel/prom.c
@@ -28,10 +28,11 @@
#include <asm/smp.h>
#include <asm/bootx.h>
#include <asm/system.h>
-#include <asm/gemini.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/bitops.h>
+/* for openpic_to_irq */
+#include "open_pic.h"
#ifdef CONFIG_FB
#include <asm/linux_logo.h>
@@ -95,26 +96,28 @@ static interpret_func interpret_root_props;
#define FB_MAX 8
#endif
char *prom_display_paths[FB_MAX] __initdata = { 0, };
-unsigned int prom_num_displays = 0;
-char *of_stdout_device = 0;
+unsigned int prom_num_displays __initdata = 0;
+char *of_stdout_device __initdata = 0;
+ihandle prom_disp_node __initdata = 0;
-prom_entry prom = 0;
-ihandle prom_chosen = 0, prom_stdout = 0, prom_disp_node = 0;
+prom_entry prom __initdata = 0;
+ihandle prom_chosen __initdata = 0;
+ihandle prom_stdout __initdata = 0;
extern char *klimit;
-char *bootpath = 0;
-char *bootdevice = 0;
+char *bootpath;
+char *bootdevice;
-unsigned int rtas_data = 0; /* physical pointer */
-unsigned int rtas_entry = 0; /* physical pointer */
-unsigned int rtas_size = 0;
-unsigned int old_rtas = 0;
+unsigned int rtas_data; /* physical pointer */
+unsigned int rtas_entry; /* physical pointer */
+unsigned int rtas_size;
+unsigned int old_rtas;
/* Set for a newworld machine */
-int use_of_interrupt_tree = 0;
-int pmac_newworld = 0;
+int use_of_interrupt_tree;
+int pmac_newworld;
-static struct device_node *allnodes = 0;
+static struct device_node *allnodes;
#ifdef CONFIG_BOOTX_TEXT
@@ -134,13 +137,12 @@ static void draw_byte_32(unsigned char *bits, unsigned long *base, int rb);
static void draw_byte_16(unsigned char *bits, unsigned long *base, int rb);
static void draw_byte_8(unsigned char *bits, unsigned long *base, int rb);
-/* We want those in data, not BSS */
-static long g_loc_X = 0;
-static long g_loc_Y = 0;
-static long g_max_loc_X = 0;
-static long g_max_loc_Y = 0;
+static int g_loc_X;
+static int g_loc_Y;
+static int g_max_loc_X;
+static int g_max_loc_Y;
-unsigned long disp_BAT[2] = {0, 0};
+unsigned long disp_BAT[2] __initdata = {0, 0};
#define cmapsz (16*256)
@@ -173,10 +175,10 @@ extern unsigned long reloc_offset(void);
void phys_call_rtas(int, int, int, ...);
extern char cmd_line[512]; /* XXX */
-boot_infos_t *boot_infos = 0; /* init it so it's in data segment not bss */
+boot_infos_t *boot_infos;
#ifdef CONFIG_BOOTX_TEXT
-boot_infos_t *disp_bi = 0;
-boot_infos_t fake_bi = {0,};
+boot_infos_t *disp_bi;
+boot_infos_t fake_bi;
#endif
unsigned long dev_tree_size;
@@ -195,10 +197,6 @@ unsigned long dev_tree_size;
* OF calls should be done within prom_init(), and prom_init()
* and all routines called within it must be careful to relocate
* references as necessary.
- *
- * Note that the bss is cleared *after* prom_init runs, so we have
- * to make sure that any static or extern variables it accesses
- * are put in the data segment.
*/
#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
@@ -618,6 +616,11 @@ prom_init(int r3, int r4, prom_entry pp)
char *p, *d;
int prom_version = 0;
unsigned long phys;
+ extern char __bss_start, _end;
+
+ /* First zero the BSS -- use memset, some arches don't have
+ * caches on yet */
+ memset_io(PTRRELOC(&__bss_start),0 , &_end - &__bss_start);
/* Default */
phys = offset + KERNELBASE;
@@ -948,34 +951,6 @@ check_display(unsigned long mem)
if ((int) call_prom(RELOC("package-to-path"), 3, 1,
node, path, 255) < 0)
continue;
- prom_print(RELOC("opening display "));
- prom_print(path);
- ih = call_prom(RELOC("open"), 1, 1, path);
- if (ih == 0 || ih == (ihandle) -1) {
- prom_print(RELOC("... failed\n"));
- continue;
- }
- prom_print(RELOC("... ok\n"));
-
- if (RELOC(prom_disp_node) == 0)
- RELOC(prom_disp_node) = node;
-
- /* Setup a useable color table when the appropriate
- * method is available. Should update this to set-colors */
- for (i = 0; i < 32; i++)
- if (prom_set_color(ih, i, RELOC(default_colors)[i*3],
- RELOC(default_colors)[i*3+1],
- RELOC(default_colors)[i*3+2]) != 0)
- break;
-
-#ifdef CONFIG_FB
- for (i = 0; i < LINUX_LOGO_COLORS; i++)
- if (prom_set_color(ih, i + 32,
- RELOC(linux_logo_red)[i],
- RELOC(linux_logo_green)[i],
- RELOC(linux_logo_blue)[i]) != 0)
- break;
-#endif /* CONFIG_FB */
/*
* If this display is the device that OF is using for stdout,
@@ -990,9 +965,44 @@ check_display(unsigned long mem)
= RELOC(prom_display_paths[i-1]);
}
RELOC(prom_display_paths[i]) = PTRUNRELOC(path);
+ if (i == 0)
+ RELOC(prom_disp_node) = node;
if (RELOC(prom_num_displays) >= FB_MAX)
break;
}
+
+ /*
+ * Open the first display and set its colormap.
+ */
+ if (RELOC(prom_num_displays) > 0) {
+ path = PTRRELOC(RELOC(prom_display_paths[0]));
+ prom_print(RELOC("opening display "));
+ prom_print(path);
+ ih = call_prom(RELOC("open"), 1, 1, path);
+ if (ih == 0 || ih == (ihandle) -1) {
+ prom_print(RELOC("... failed\n"));
+ } else {
+ prom_print(RELOC("... ok\n"));
+
+ /* Setup a useable color table when the appropriate
+ * method is available. Should update this to set-colors */
+ for (i = 0; i < 32; i++)
+ if (prom_set_color(ih, i, RELOC(default_colors)[i*3],
+ RELOC(default_colors)[i*3+1],
+ RELOC(default_colors)[i*3+2]) != 0)
+ break;
+
+#ifdef CONFIG_FB
+ for (i = 0; i < LINUX_LOGO_COLORS; i++)
+ if (prom_set_color(ih, i + 32,
+ RELOC(linux_logo_red)[i],
+ RELOC(linux_logo_green)[i],
+ RELOC(linux_logo_blue)[i]) != 0)
+ break;
+#endif /* CONFIG_FB */
+ }
+ }
+
return ALIGN(mem);
}
@@ -1277,7 +1287,7 @@ finish_node(struct device_node *np, unsigned long mem_start,
if (!strcmp(np->name, "display"))
np->name = get_property(np, "compatible", 0);
- if (!strcmp(np->name, "device-tree"))
+ if (np->parent == NULL)
ifunc = interpret_root_props;
else if (np->type == 0)
ifunc = NULL;
@@ -1370,7 +1380,7 @@ finish_node_interrupts(struct device_node *np, unsigned long mem_start)
np->intrs[i].line = *interrupts++;
if (cvt_irq)
np->intrs[i].line = openpic_to_irq(np->intrs[i].line);
- np->intrs[i].sense = 0;
+ np->intrs[i].sense = 1;
if (isize > 1)
np->intrs[i].sense = *interrupts++;
for (j=2; j<isize; j++)
@@ -1540,7 +1550,7 @@ interpret_pci_props(struct device_node *np, unsigned long mem_start,
for (i = 0; (ml -= cell_size) >= 0; ++i) {
if (imp->addr.a_hi == devfn) {
np->intrs[np->n_intrs].line = imp->intr;
- np->intrs[np->n_intrs].sense = 0; /* FIXME */
+ np->intrs[np->n_intrs].sense = 1; /* FIXME */
++np->n_intrs;
}
imp = (struct pci_intr_map *)(((unsigned int)imp)
@@ -1561,7 +1571,7 @@ interpret_pci_props(struct device_node *np, unsigned long mem_start,
mem_start += np->n_intrs * sizeof(struct interrupt_info);
for (i = 0; i < np->n_intrs; ++i) {
np->intrs[i].line = *ip++;
- np->intrs[i].sense = 0;
+ np->intrs[i].sense = 1;
}
}
@@ -1614,7 +1624,7 @@ interpret_dbdma_props(struct device_node *np, unsigned long mem_start,
mem_start += np->n_intrs * sizeof(struct interrupt_info);
for (i = 0; i < np->n_intrs; ++i) {
np->intrs[i].line = *ip++;
- np->intrs[i].sense = 0;
+ np->intrs[i].sense = 1;
}
}
@@ -1675,7 +1685,7 @@ interpret_macio_props(struct device_node *np, unsigned long mem_start,
if (keylargo)
np->intrs[i].sense = *ip++;
else
- np->intrs[i].sense = 0;
+ np->intrs[i].sense = 1;
}
} else {
/* CHRP machines */
@@ -1771,7 +1781,7 @@ interpret_root_props(struct device_node *np, unsigned long mem_start,
mem_start += np->n_intrs * sizeof(struct interrupt_info);
for (i = 0; i < np->n_intrs; ++i) {
np->intrs[i].line = *ip++;
- np->intrs[i].sense = 0;
+ np->intrs[i].sense = 1;
}
}
@@ -1779,6 +1789,30 @@ interpret_root_props(struct device_node *np, unsigned long mem_start,
}
/*
+ * Work out the sense (active-low level / active-high edge)
+ * of each interrupt from the device tree.
+ */
+void __init
+prom_get_irq_senses(unsigned char *senses, int off, int max)
+{
+ struct device_node *np;
+ int i, j;
+
+ /* default to level-triggered */
+ memset(senses, 1, max - off);
+ if (!use_of_interrupt_tree)
+ return;
+
+ for (np = allnodes; np != 0; np = np->allnext) {
+ for (j = 0; j < np->n_intrs; j++) {
+ i = np->intrs[j].line;
+ if (i >= off && i < max)
+ senses[i-off] = np->intrs[j].sense;
+ }
+ }
+}
+
+/*
* Construct and return a list of the device_nodes with a given name.
*/
__openfirmware
@@ -1818,39 +1852,6 @@ find_type_devices(const char *type)
return head;
}
-/* Finds a device node given its PCI bus number, device number
- * and function number
- */
-__openfirmware
-struct device_node *
-find_pci_device_OFnode(unsigned char bus, unsigned char dev_fn)
-{
- struct device_node* np;
- unsigned int *reg;
- int l;
-
- for (np = allnodes; np != 0; np = np->allnext) {
- int in_macio = 0;
- struct device_node* parent = np->parent;
- while(parent) {
- char *pname = (char *)get_property(parent, "name", &l);
- if (pname && strcmp(pname, "mac-io") == 0) {
- in_macio = 1;
- break;
- }
- parent = parent->parent;
- }
- if (in_macio)
- continue;
- reg = (unsigned int *) get_property(np, "reg", &l);
- if (reg == 0 || l < sizeof(struct reg_property))
- continue;
- if (((reg[0] >> 8) & 0xff) == dev_fn && ((reg[0] >> 16) & 0xff) == bus)
- break;
- }
- return np;
-}
-
/*
* Returns all nodes linked together
*/
@@ -1983,6 +1984,21 @@ get_property(struct device_node *np, const char *name, int *lenp)
return 0;
}
+/*
+ * Add a property to a node
+ */
+__openfirmware
+void
+prom_add_property(struct device_node* np, struct property* prop)
+{
+ struct property **next = &np->properties;
+
+ prop->next = NULL;
+ while (*next)
+ next = &(*next)->next;
+ *next = prop;
+}
+
#if 0
__openfirmware
void
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 6bafa57c1..58fb87e9c 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/blk.h>
#include <linux/ide.h>
+#include <linux/bootmem.h>
#include <asm/init.h>
#include <asm/residual.h>
@@ -71,12 +72,6 @@ extern void apus_init(unsigned long r3,
unsigned long r6,
unsigned long r7);
-extern void gemini_init(unsigned long r3,
- unsigned long r4,
- unsigned long r5,
- unsigned long r6,
- unsigned long r7);
-
#ifdef CONFIG_XMON
extern void xmon_map_scc(void);
#endif
@@ -106,6 +101,10 @@ int have_of = 0;
unsigned long SYSRQ_KEY;
#endif /* CONFIG_MAGIC_SYSRQ */
+#ifdef CONFIG_VGA_CONSOLE
+unsigned long vgacon_remap_base;
+#endif
+
struct machdep_calls ppc_md;
/*
@@ -377,9 +376,9 @@ int get_cpuinfo(char *buffer)
len += sprintf(len+buffer, "revision\t: %hd.%hd\n", maj, min);
len += sprintf(buffer+len, "bogomips\t: %lu.%02lu\n",
- (CD(loops_per_sec)+2500)/500000,
- (CD(loops_per_sec)+2500)/5000 % 100);
- bogosum += CD(loops_per_sec);
+ (CD(loops_per_jiffy)+2500)/(500000/HZ),
+ (CD(loops_per_jiffy)+2500)/(5000/HZ) % 100);
+ bogosum += CD(loops_per_jiffy);
}
#ifdef CONFIG_SMP
@@ -549,11 +548,6 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5,
apus_init(r3, r4, r5, r6, r7);
break;
#endif
-#ifdef CONFIG_GEMINI
- case _MACH_gemini:
- gemini_init(r3, r4, r5, r6, r7);
- break;
-#endif
default:
printk("Unknown machine type in identify_machine!\n");
}
@@ -673,13 +667,14 @@ __setup("l2cr=", ppc_setup_l2cr);
void __init ppc_init(void)
{
/* clear the progress line */
- if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
+ if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
if (ppc_md.init != NULL) {
ppc_md.init();
}
}
+/* Warning, IO base is not yet inited */
void __init setup_arch(char **cmdline_p)
{
extern int panic_timeout;
@@ -688,7 +683,7 @@ void __init setup_arch(char **cmdline_p)
extern void do_init_bootmem(void);
/* so udelay does something sensible, assume <= 1000 bogomips */
- loops_per_sec = 500000000;
+ loops_per_jiffy = 500000000 / HZ;
#ifdef CONFIG_ALL_PPC
feature_init();
@@ -743,10 +738,34 @@ void __init setup_arch(char **cmdline_p)
ppc_md.setup_arch();
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
+#ifdef CONFIG_PCI
+ /* We create the "pci-OF-bus-map" property now so it appear in the
+ * /proc device tree
+ */
+ if (have_of) {
+ struct property* of_prop;
+
+ of_prop = (struct property*)alloc_bootmem(sizeof(struct property) + 256);
+ if (of_prop && find_path_device("/")) {
+ memset(of_prop, -1, sizeof(struct property) + 256);
+ of_prop->name = "pci-OF-bus-map";
+ of_prop->length = 256;
+ of_prop->value = (unsigned char *)&of_prop[1];
+ prom_add_property(find_path_device("/"), of_prop);
+ }
+ }
+#endif /* CONFIG_PCI */
+
paging_init();
sort_exception_table();
}
+/* Convert the shorts/longs in hd_driveid from little to big endian;
+ * chars are endian independant, of course, but strings need to be flipped.
+ * (Despite what it says in drivers/block/ide.h, they come up as little
+ * endian...)
+ *
+ * Changes to linux/hdreg.h may require changes here. */
void ppc_generic_ide_fix_driveid(struct hd_driveid *id)
{
int i;
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c
index dd3d1ae1b..fe16fae25 100644
--- a/arch/ppc/kernel/signal.c
+++ b/arch/ppc/kernel/signal.c
@@ -53,8 +53,6 @@
#define MSR_USERCHANGE (MSR_FE0 | MSR_FE1)
int do_signal(sigset_t *oldset, struct pt_regs *regs);
-extern int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
{
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index fb7f38444..7edf7209d 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -1,6 +1,4 @@
/*
- * $Id: smp.c,v 1.68 1999/09/17 19:38:05 cort Exp $
- *
* Smp support for ppc.
*
* Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
@@ -8,8 +6,11 @@
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*
- * Support for PReP (Motorola MTX/MVME) SMP by Troy Benjegerdes
- * (troy@microux.com, hozer@drgw.net)
+ * Support for PReP (Motorola MTX/MVME) and Macintosh G4 SMP
+ * by Troy Benjegerdes (hozer@drgw.net)
+ *
+ * Support for DayStar quad CPU cards
+ * Copyright (C) XLR8, Inc. 1994-2000
*/
#include <linux/config.h>
@@ -23,7 +24,6 @@
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
#include <linux/init.h>
-#include <linux/openpic.h>
#include <linux/spinlock.h>
#include <asm/ptrace.h>
@@ -37,47 +37,97 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
-#include <asm/gemini.h>
-
+#include <asm/residual.h>
+#include <asm/feature.h>
#include <asm/time.h>
+
#include "open_pic.h"
int smp_threads_ready;
volatile int smp_commenced;
int smp_num_cpus = 1;
+int smp_tb_synchronized;
struct cpuinfo_PPC cpu_data[NR_CPUS];
struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
-volatile unsigned char active_kernel_processor = NO_PROC_ID; /* Processor holding kernel spinlock */
-volatile unsigned long ipi_count;
+atomic_t ipi_recv;
+atomic_t ipi_sent;
spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
unsigned int prof_multiplier[NR_CPUS];
unsigned int prof_counter[NR_CPUS];
cycles_t cacheflush_time;
+static int max_cpus __initdata = NR_CPUS;
-/* this has to go in the data section because it is accessed from prom_init */
int smp_hw_index[NR_CPUS];
/* all cpu mappings are 1-1 -- Cort */
volatile unsigned long cpu_callin_map[NR_CPUS];
+#define TB_SYNC_PASSES 4
+volatile unsigned long __initdata tb_sync_flag = 0;
+volatile unsigned long __initdata tb_offset = 0;
+
int start_secondary(void *);
extern int cpu_idle(void *unused);
-u_int openpic_read(volatile u_int *addr);
void smp_call_function_interrupt(void);
void smp_message_pass(int target, int msg, unsigned long data, int wait);
+extern void __secondary_start_psurge(void);
+extern void __secondary_start_psurge2(void); /* Temporary horrible hack */
+extern void __secondary_start_psurge3(void); /* Temporary horrible hack */
+
+/* Addresses for powersurge registers */
+#define HAMMERHEAD_BASE 0xf8000000
+#define HHEAD_CONFIG 0x90
+#define HHEAD_SEC_INTR 0xc0
+
/* register for interrupting the primary processor on the powersurge */
/* N.B. this is actually the ethernet ROM! */
-#define PSURGE_PRI_INTR 0xf3019000
-/* register for interrupting the secondary processor on the powersurge */
-#define PSURGE_SEC_INTR 0xf80000c0
+#define PSURGE_PRI_INTR 0xf3019000
+
/* register for storing the start address for the secondary processor */
-#define PSURGE_START 0xf2800000
+/* N.B. this is the PCI config space address register for the 1st bridge */
+#define PSURGE_START 0xf2800000
+
+/* Daystar/XLR8 4-CPU card */
+#define PSURGE_QUAD_REG_ADDR 0xf8800000
+
+#define PSURGE_QUAD_IRQ_SET 0
+#define PSURGE_QUAD_IRQ_CLR 1
+#define PSURGE_QUAD_IRQ_PRIMARY 2
+#define PSURGE_QUAD_CKSTOP_CTL 3
+#define PSURGE_QUAD_PRIMARY_ARB 4
+#define PSURGE_QUAD_BOARD_ID 6
+#define PSURGE_QUAD_WHICH_CPU 7
+#define PSURGE_QUAD_CKSTOP_RDBK 8
+#define PSURGE_QUAD_RESET_CTL 11
+
+#define PSURGE_QUAD_OUT(r, v) (out_8((u8 *)(quad_base+((r)<<2)+1), (v)))
+#define PSURGE_QUAD_IN(r) (in_8((u8 *)(quad_base+((r)<<2)+1)) & 0x0f)
+#define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
+#define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
+
/* virtual addresses for the above */
-volatile u32 *psurge_pri_intr;
-volatile u32 *psurge_sec_intr;
-volatile u32 *psurge_start;
+static volatile u8 *hhead_base;
+static volatile u32 *quad_base;
+static volatile u32 *psurge_pri_intr;
+static volatile u8 *psurge_sec_intr;
+static volatile u32 *psurge_start;
+
+/* what sort of powersurge board we have */
+static int psurge_type;
+
+/* values for psurge_type */
+#define PSURGE_DUAL 0
+#define PSURGE_QUAD_OKEE 1
+#define PSURGE_QUAD_COTTON 2
+#define PSURGE_QUAD_ICEGRASS 3
-/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. */
+/* l2 cache stuff for dual G4 macs */
+extern void core99_init_l2(void);
+
+/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
+ *
+ * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
+ * in /proc/interrupts will be wrong!!! --Troy */
#define PPC_MSG_CALL_FUNCTION 0
#define PPC_MSG_RESCHEDULE 1
#define PPC_MSG_INVALIDATE_TLB 2
@@ -85,10 +135,577 @@ volatile u32 *psurge_start;
static inline void set_tb(unsigned int upper, unsigned int lower)
{
+ mtspr(SPRN_TBWL, 0);
mtspr(SPRN_TBWU, upper);
mtspr(SPRN_TBWL, lower);
}
+/*
+ * Set and clear IPIs for powersurge.
+ */
+static inline void psurge_set_ipi(int cpu)
+{
+ if (cpu == 0)
+ in_be32(psurge_pri_intr);
+ else if (psurge_type == PSURGE_DUAL)
+ out_8(psurge_sec_intr, 0);
+ else
+ PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
+}
+
+static inline void psurge_clr_ipi(int cpu)
+{
+ if (cpu > 0) {
+ if (psurge_type == PSURGE_DUAL)
+ out_8(psurge_sec_intr, ~0);
+ else
+ PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
+ }
+}
+
+/*
+ * On powersurge (old SMP powermac architecture) we don't have
+ * separate IPIs for separate messages like openpic does. Instead
+ * we have a bitmap for each processor, where a 1 bit means that
+ * the corresponding message is pending for that processor.
+ * Ideally each cpu's entry would be in a different cache line.
+ * -- paulus.
+ */
+static unsigned long psurge_smp_message[NR_CPUS];
+
+void psurge_smp_message_recv(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ int msg;
+
+ /* clear interrupt */
+ psurge_clr_ipi(cpu);
+
+ if (smp_num_cpus < 2)
+ return;
+
+ /* make sure there is a message there */
+ for (msg = 0; msg < 4; msg++)
+ if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
+ smp_message_recv(msg, regs);
+}
+
+void
+psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
+{
+ psurge_smp_message_recv(regs);
+}
+
+static void
+smp_psurge_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ int i;
+
+ if (smp_num_cpus < 2)
+ return;
+
+ for (i = 0; i < smp_num_cpus; i++) {
+ if (target == MSG_ALL
+ || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
+ || target == i) {
+ set_bit(msg, &psurge_smp_message[i]);
+ psurge_set_ipi(i);
+ }
+ }
+}
+
+/*
+ * Determine a quad card presence. We read the board ID register, we
+ * for the data bus to change to something else, and we read it again.
+ * It it's stable, then the register probably exist (ugh !)
+ */
+static int __init psurge_quad_probe(void)
+{
+ int type;
+ unsigned int i;
+
+ type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
+ if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
+ || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
+ return PSURGE_DUAL;
+
+ /* looks OK, try a slightly more rigorous test */
+ /* bogus is not necessarily cacheline-aligned,
+ though I don't suppose that really matters. -- paulus */
+ for (i = 0; i < 100; i++) {
+ volatile u32 bogus[8];
+ bogus[(0+i)%8] = 0x00000000;
+ bogus[(1+i)%8] = 0x55555555;
+ bogus[(2+i)%8] = 0xFFFFFFFF;
+ bogus[(3+i)%8] = 0xAAAAAAAA;
+ bogus[(4+i)%8] = 0x33333333;
+ bogus[(5+i)%8] = 0xCCCCCCCC;
+ bogus[(6+i)%8] = 0xCCCCCCCC;
+ bogus[(7+i)%8] = 0x33333333;
+ wmb();
+ asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
+ mb();
+ if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
+ return PSURGE_DUAL;
+ }
+ return type;
+}
+
+static void __init psurge_quad_init(void)
+{
+ int procbits;
+
+ if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
+ procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
+ if (psurge_type == PSURGE_QUAD_ICEGRASS)
+ PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
+ else
+ PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
+ mdelay(33);
+ out_8(psurge_sec_intr, ~0);
+ PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
+ PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
+ if (psurge_type != PSURGE_QUAD_ICEGRASS)
+ PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
+ PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
+ mdelay(33);
+ PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
+ mdelay(33);
+ PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
+ mdelay(33);
+}
+
+static int __init smp_psurge_probe(void)
+{
+ int i, ncpus;
+
+ /* We don't do SMP on the PPC601 -- paulus */
+ if ((_get_PVR() >> 16) == 1)
+ return 1;
+
+ /*
+ * The powersurge cpu board can be used in the generation
+ * of powermacs that have a socket for an upgradeable cpu card,
+ * including the 7500, 8500, 9500, 9600.
+ * The device tree doesn't tell you if you have 2 cpus because
+ * OF doesn't know anything about the 2nd processor.
+ * Instead we look for magic bits in magic registers,
+ * in the hammerhead memory controller in the case of the
+ * dual-cpu powersurge board. -- paulus.
+ */
+ if (find_devices("hammerhead") == NULL)
+ return 1;
+
+ hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
+ quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
+ psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
+
+ psurge_type = psurge_quad_probe();
+ if (psurge_type != PSURGE_DUAL) {
+ psurge_quad_init();
+ /* I believe we could "count" CPUs by counting 1 bits
+ * in procbits on a quad board. For now, we assume 4,
+ * non-present CPUs will just be seen as "stuck".
+ * (hope they are the higher-numbered ones -- paulus)
+ */
+ ncpus = 4;
+ } else {
+ iounmap((void *) quad_base);
+ if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
+ /* not a dual-cpu card */
+ iounmap((void *) hhead_base);
+ return 1;
+ }
+ ncpus = 2;
+ }
+
+ psurge_start = ioremap(PSURGE_START, 4);
+ psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
+
+ /* this is not actually strictly necessary -- paulus. */
+ for (i = 1; i < ncpus; ++i)
+ smp_hw_index[i] = i;
+
+ if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
+
+ return ncpus;
+}
+
+static void __init smp_psurge_kick_cpu(int nr)
+{
+ void (*start)(void) = __secondary_start_psurge;
+
+ if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
+
+ /* setup entry point of secondary processor */
+ switch (nr) {
+ case 2:
+ start = __secondary_start_psurge2;
+ break;
+ case 3:
+ start = __secondary_start_psurge3;
+ break;
+ }
+
+ out_be32(psurge_start, __pa(start));
+ mb();
+
+ psurge_set_ipi(nr);
+ udelay(10);
+ psurge_clr_ipi(nr);
+
+ if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
+}
+
+/*
+ * With the dual-cpu powersurge board, the decrementers and timebases
+ * of both cpus are frozen after the secondary cpu is started up,
+ * until we give the secondary cpu another interrupt. This routine
+ * uses this to get the timebases synchronized.
+ * -- paulus.
+ */
+static void __init psurge_dual_sync_tb(int cpu_nr)
+{
+ static volatile int sec_tb_reset = 0;
+ int t;
+
+ set_dec(tb_ticks_per_jiffy);
+ set_tb(0, 0);
+ last_jiffy_stamp(cpu_nr) = 0;
+
+ if (cpu_nr > 0) {
+ mb();
+ sec_tb_reset = 1;
+ return;
+ }
+
+ /* wait for the secondary to have reset its TB before proceeding */
+ for (t = 10000000; t > 0 && !sec_tb_reset; --t)
+ ;
+
+ /* now interrupt the secondary, starting both TBs */
+ psurge_set_ipi(1);
+
+ smp_tb_synchronized = 1;
+}
+
+static void
+smp_psurge_setup_cpu(int cpu_nr)
+{
+
+ if (cpu_nr == 0) {
+ if (smp_num_cpus < 2)
+ return;
+ /* reset the entry point so if we get another intr we won't
+ * try to startup again */
+ out_be32(psurge_start, 0x100);
+ if (request_irq(30, psurge_primary_intr, 0, "primary IPI", 0))
+ printk(KERN_ERR "Couldn't get primary IPI interrupt");
+ }
+
+ if (psurge_type == PSURGE_DUAL)
+ psurge_dual_sync_tb(cpu_nr);
+}
+
+
+static void
+smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ /* make sure we're sending something that translates to an IPI */
+ if ( msg > 0x3 ){
+ printk("SMP %d: smp_message_pass: unknown msg %d\n",
+ smp_processor_id(), msg);
+ return;
+ }
+ switch ( target )
+ {
+ case MSG_ALL:
+ openpic_cause_IPI(msg, 0xffffffff);
+ break;
+ case MSG_ALL_BUT_SELF:
+ openpic_cause_IPI(msg,
+ 0xffffffff & ~(1 << smp_hw_index[smp_processor_id()]));
+
+ break;
+ default:
+ openpic_cause_IPI(msg, smp_hw_index[1<<target]);
+ break;
+ }
+}
+
+static int
+smp_core99_probe(void)
+{
+ struct device_node *cpus;
+ int *pp;
+ int i, ncpus = 1;
+
+ if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
+#if 0 /* Paulus method.. doesn't seem to work on earlier dual G4's??*/
+ cpus = find_devices("cpus");
+ if (cpus != 0) {
+ pp = (int *) get_property(cpus, "#cpus", NULL);
+ if (pp != NULL)
+ ncpus = *pp;
+ }
+#else /* My original method -- Troy <hozer@drgw.net> */
+
+ cpus = find_type_devices("cpu");
+ if (cpus){
+ for ( ncpus = 1; cpus->next; cpus = cpus->next ){
+ ncpus++;
+ }
+ }
+#endif
+ printk("smp_core99_probe: OF reports %d cpus\n", ncpus);
+ if (ncpus > 1) {
+ openpic_request_IPIs();
+ for (i = 1; i < ncpus; ++i)
+ smp_hw_index[i] = i;
+ }
+
+ return ncpus;
+}
+
+static void
+smp_core99_kick_cpu(int nr)
+{
+ unsigned long save_int;
+ unsigned long flags;
+ volatile unsigned long *vector
+ = ((volatile unsigned long *)(KERNELBASE+0x500));
+
+ if (nr != 1)
+ return;
+ if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
+
+ local_irq_save(flags);
+ local_irq_disable();
+
+ /* Save EE vector */
+ save_int = *vector;
+
+ /* Setup fake EE vector that does
+ * b __secondary_start_psurge - KERNELBASE
+ */
+ *vector = 0x48000002 +
+ ((unsigned long)__secondary_start_psurge - KERNELBASE);
+
+ /* flush data cache and inval instruction cache */
+ flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+
+ /* Put some life in our friend */
+ feature_core99_kick_cpu1();
+
+ /* FIXME: We wait a bit for the CPU to take the exception, I should
+ * instead wait for the entry code to set something for me. Well,
+ * ideally, all that crap will be done in prom.c and the CPU left
+ * in a RAM-based wait loop like CHRP.
+ */
+ mdelay(1);
+
+ /* Restore our exception vector */
+ *vector = save_int;
+ flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+
+ local_irq_restore(flags);
+ if (ppc_md.progress) ppc_md.progress("smp_core99_probe done", 0x347);
+}
+
+static void
+smp_core99_setup_cpu(int cpu_nr)
+{
+ /* Setup openpic */
+ do_openpic_setup_cpu();
+
+ /* Setup L2 */
+ if (cpu_nr != 0)
+ core99_init_l2();
+ else
+ if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
+}
+
+static int
+smp_chrp_probe(void)
+{
+ extern unsigned long smp_chrp_cpu_nr;
+
+ if (smp_chrp_cpu_nr > 1)
+ openpic_request_IPIs();
+
+ return smp_chrp_cpu_nr;
+}
+
+static void
+smp_chrp_kick_cpu(int nr)
+{
+ *(unsigned long *)KERNELBASE = nr;
+ asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
+}
+
+static void
+smp_chrp_setup_cpu(int cpu_nr)
+{
+ static atomic_t ready = ATOMIC_INIT(1);
+ static volatile int frozen = 0;
+
+ if (cpu_nr == 0) {
+ /* wait for all the others */
+ while (atomic_read(&ready) < smp_num_cpus)
+ barrier();
+ atomic_set(&ready, 1);
+ /* freeze the timebase */
+ call_rtas("freeze-time-base", 0, 1, NULL);
+ mb();
+ frozen = 1;
+ /* XXX assumes this is not a 601 */
+ set_tb(0, 0);
+ last_jiffy_stamp(0) = 0;
+ while (atomic_read(&ready) < smp_num_cpus)
+ barrier();
+ /* thaw the timebase again */
+ call_rtas("thaw-time-base", 0, 1, NULL);
+ mb();
+ frozen = 0;
+ smp_tb_synchronized = 1;
+ } else {
+ atomic_inc(&ready);
+ while (!frozen)
+ barrier();
+ set_tb(0, 0);
+ last_jiffy_stamp(0) = 0;
+ mb();
+ atomic_inc(&ready);
+ while (frozen)
+ barrier();
+ }
+
+ if (OpenPIC_Addr)
+ do_openpic_setup_cpu();
+}
+
+#ifdef CONFIG_POWER4
+static void
+smp_xics_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ /* for now, only do reschedule messages
+ since we only have one IPI */
+ if (msg != PPC_MSG_RESCHEDULE)
+ return;
+ for (i = 0; i < smp_num_cpus; ++i) {
+ if (target == MSG_ALL || target == i
+ || (target == MSG_ALL_BUT_SELF
+ && i != smp_processor_id()))
+ xics_cause_IPI(i);
+ }
+}
+
+static int
+smp_xics_probe(void)
+{
+ return smp_chrp_cpu_nr;
+}
+
+static void
+smp_xics_setup_cpu(int cpu_nr)
+{
+ if (cpu_nr > 0)
+ xics_setup_cpu();
+}
+#endif /* CONFIG_POWER4 */
+
+static int
+smp_prep_probe(void)
+{
+ extern int mot_multi;
+
+ if (mot_multi) {
+ openpic_request_IPIs();
+ smp_hw_index[1] = 1;
+ return 2;
+ }
+
+ return 1;
+}
+
+static void
+smp_prep_kick_cpu(int nr)
+{
+ extern unsigned long *MotSave_SmpIar;
+ extern unsigned char *MotSave_CpusState[2];
+
+ *MotSave_SmpIar = (unsigned long)__secondary_start_psurge - KERNELBASE;
+ *MotSave_CpusState[1] = CPU_GOOD;
+ printk("CPU1 reset, waiting\n");
+}
+
+static void
+smp_prep_setup_cpu(int cpu_nr)
+{
+ if (OpenPIC_Addr)
+ do_openpic_setup_cpu();
+}
+
+static struct smp_ops_t {
+ void (*message_pass)(int target, int msg, unsigned long data, int wait);
+ int (*probe)(void);
+ void (*kick_cpu)(int nr);
+ void (*setup_cpu)(int nr);
+
+} *smp_ops;
+
+#define smp_message_pass(t,m,d,w) \
+ do { if (smp_ops) \
+ atomic_inc(&ipi_sent); \
+ smp_ops->message_pass((t),(m),(d),(w)); \
+ } while(0)
+
+
+/* PowerSurge-style Macs */
+static struct smp_ops_t psurge_smp_ops = {
+ smp_psurge_message_pass,
+ smp_psurge_probe,
+ smp_psurge_kick_cpu,
+ smp_psurge_setup_cpu,
+};
+
+/* Core99 Macs (dual G4s) */
+static struct smp_ops_t core99_smp_ops = {
+ smp_openpic_message_pass,
+ smp_core99_probe,
+ smp_core99_kick_cpu,
+ smp_core99_setup_cpu,
+};
+
+/* CHRP with openpic */
+static struct smp_ops_t chrp_smp_ops = {
+ smp_openpic_message_pass,
+ smp_chrp_probe,
+ smp_chrp_kick_cpu,
+ smp_chrp_setup_cpu,
+};
+
+#ifdef CONFIG_POWER4
+/* CHRP with new XICS interrupt controller */
+static struct smp_ops_t xics_smp_ops = {
+ smp_xics_message_pass,
+ smp_xics_probe,
+ smp_chrp_kick_cpu,
+ smp_xics_setup_cpu,
+};
+#endif /* CONFIG_POWER4 */
+
+/* PReP (MTX) */
+static struct smp_ops_t prep_smp_ops = {
+ smp_openpic_message_pass,
+ smp_prep_probe,
+ smp_prep_kick_cpu,
+ smp_prep_setup_cpu,
+};
+
+/*
+ * Common functions
+ */
void smp_local_timer_interrupt(struct pt_regs * regs)
{
int cpu = smp_processor_id();
@@ -101,7 +718,7 @@ void smp_local_timer_interrupt(struct pt_regs * regs)
void smp_message_recv(int msg, struct pt_regs *regs)
{
- ipi_count++;
+ atomic_inc(&ipi_recv);
switch( msg ) {
case PPC_MSG_CALL_FUNCTION:
@@ -126,47 +743,6 @@ void smp_message_recv(int msg, struct pt_regs *regs)
}
/*
- * As it is now, if we're sending two message at the same time
- * we have race conditions on Pmac. The PowerSurge doesn't easily
- * allow us to send IPI messages so we put the messages in
- * smp_message[].
- *
- * This is because don't have several IPI's on the PowerSurge even though
- * we do on the chrp. It would be nice to use actual IPI's such as with
- * openpic rather than this.
- * -- Cort
- */
-int pmac_smp_message[NR_CPUS];
-void pmac_smp_message_recv(struct pt_regs *regs)
-{
- int cpu = smp_processor_id();
- int msg;
-
- /* clear interrupt */
- if (cpu == 1)
- out_be32(psurge_sec_intr, ~0);
-
- if (smp_num_cpus < 2)
- return;
-
- /* make sure there is a message there */
- msg = pmac_smp_message[cpu];
- if (msg == 0)
- return;
-
- /* reset message */
- pmac_smp_message[cpu] = 0;
-
- smp_message_recv(msg - 1, regs);
-}
-
-void
-pmac_primary_intr(int irq, void *d, struct pt_regs *regs)
-{
- pmac_smp_message_recv(regs);
-}
-
-/*
* 750's don't broadcast tlb invalidates so
* we have to emulate that behavior.
* -- Cort
@@ -220,7 +796,7 @@ void smp_send_stop(void)
*/
static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
-static volatile struct call_data_struct {
+static struct call_data_struct {
void (*func) (void *info);
void *info;
atomic_t started;
@@ -317,87 +893,9 @@ void smp_call_function_interrupt(void)
atomic_inc(&call_data->finished);
}
-void smp_message_pass(int target, int msg, unsigned long data, int wait)
-{
- if ( !(_machine & (_MACH_Pmac|_MACH_chrp|_MACH_prep|_MACH_gemini)) )
- return;
-
- switch (_machine) {
- case _MACH_Pmac:
- /*
- * IPI's on the Pmac are a hack but without reasonable
- * IPI hardware SMP on Pmac is a hack.
- *
- * We assume here that the msg is not -1. If it is,
- * the recipient won't know the message was destined
- * for it. -- Cort
- */
- if (smp_processor_id() == 0) {
- /* primary cpu */
- if (target == 1 || target == MSG_ALL_BUT_SELF
- || target == MSG_ALL) {
- pmac_smp_message[1] = msg + 1;
- /* interrupt secondary processor */
- out_be32(psurge_sec_intr, ~0);
- out_be32(psurge_sec_intr, 0);
- }
- } else {
- /* secondary cpu */
- if (target == 0 || target == MSG_ALL_BUT_SELF
- || target == MSG_ALL) {
- pmac_smp_message[0] = msg + 1;
- /* interrupt primary processor */
- in_be32(psurge_pri_intr);
- }
- }
- if (target == smp_processor_id() || target == MSG_ALL) {
- /* sending a message to ourself */
- /* XXX maybe we shouldn't do this if ints are off */
- smp_message_recv(msg, NULL);
- }
- break;
- case _MACH_chrp:
- case _MACH_prep:
- case _MACH_gemini:
-#ifndef CONFIG_POWER4
- /* make sure we're sending something that translates to an IPI */
- if ( msg > 0x3 )
- break;
- switch ( target )
- {
- case MSG_ALL:
- openpic_cause_IPI(smp_processor_id(), msg, 0xffffffff);
- break;
- case MSG_ALL_BUT_SELF:
- openpic_cause_IPI(smp_processor_id(), msg,
- 0xffffffff & ~(1 << smp_processor_id()));
- break;
- default:
- openpic_cause_IPI(smp_processor_id(), msg, 1<<target);
- break;
- }
-#else /* CONFIG_POWER4 */
- /* for now, only do reschedule messages
- since we only have one IPI */
- if (msg != PPC_MSG_RESCHEDULE)
- break;
- for (i = 0; i < smp_num_cpus; ++i) {
- if (target == MSG_ALL || target == i
- || (target == MSG_ALL_BUT_SELF
- && i != smp_processor_id()))
- xics_cause_IPI(i);
- }
-#endif /* CONFIG_POWER4 */
- break;
- }
-}
-
void __init smp_boot_cpus(void)
{
extern struct task_struct *current_set[NR_CPUS];
- extern unsigned long smp_chrp_cpu_nr;
- extern void __secondary_start_psurge(void);
- extern void __secondary_start_chrp(void);
int i, cpu_nr;
struct task_struct *p;
unsigned long a;
@@ -411,7 +909,6 @@ void __init smp_boot_cpus(void)
* cpu 0, the master -- Cort
*/
cpu_callin_map[0] = 1;
- active_kernel_processor = 0;
current->processor = 0;
init_idle();
@@ -427,41 +924,40 @@ void __init smp_boot_cpus(void)
*/
cacheflush_time = 5 * 1024;
- if ( !(_machine & (_MACH_Pmac|_MACH_chrp|_MACH_gemini)) )
- {
- printk("SMP not supported on this machine.\n");
- return;
- }
-
- switch ( _machine )
- {
+ /* To be later replaced by some arch-specific routine */
+ switch(_machine) {
case _MACH_Pmac:
- /* assume powersurge board - 2 processors -- Cort */
- cpu_nr = 2;
- psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
- psurge_sec_intr = ioremap(PSURGE_SEC_INTR, 4);
- psurge_start = ioremap(PSURGE_START, 4);
+ /* Check for Core99 */
+ if (find_devices("uni-n"))
+ smp_ops = &core99_smp_ops;
+ else
+ smp_ops = &psurge_smp_ops;
break;
case _MACH_chrp:
- if (OpenPIC)
- for ( i = 0; i < 4 ; i++ )
- openpic_enable_IPI(i);
- cpu_nr = smp_chrp_cpu_nr;
+#ifndef CONFIG_POWER4
+ smp_ops = &chrp_smp_ops;
+#else
+ smp_ops = &xics_smp_ops;
+#endif /* CONFIG_POWER4 */
break;
- case _MACH_gemini:
- for ( i = 0; i < 4 ; i++ )
- openpic_enable_IPI(i);
- cpu_nr = (readb(GEMINI_CPUSTAT) & GEMINI_CPU_COUNT_MASK)>>2;
- cpu_nr = (cpu_nr == 0) ? 4 : cpu_nr;
+ case _MACH_prep:
+ smp_ops = &prep_smp_ops;
break;
+ default:
+ printk("SMP not supported on this machine.\n");
+ return;
}
+
+ /* Probe arch for CPUs */
+ cpu_nr = smp_ops->probe();
/*
* only check for cpus we know exist. We keep the callin map
* with cpus at the bottom -- Cort
*/
- for ( i = 1 ; i < cpu_nr; i++ )
- {
+ if (cpu_nr > max_cpus)
+ cpu_nr = max_cpus;
+ for (i = 1; i < cpu_nr; i++) {
int c;
struct pt_regs regs;
@@ -487,25 +983,7 @@ void __init smp_boot_cpus(void)
asm volatile("sync");
/* wake up cpus */
- switch ( _machine )
- {
- case _MACH_Pmac:
- /* setup entry point of secondary processor */
- out_be32(psurge_start, __pa(__secondary_start_psurge));
- /* interrupt secondary to begin executing code */
- out_be32(psurge_sec_intr, ~0);
- udelay(1);
- out_be32(psurge_sec_intr, 0);
- break;
- case _MACH_chrp:
- *(unsigned long *)KERNELBASE = i;
- asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
- break;
- case _MACH_gemini:
- openpic_init_processor( 1<<i );
- openpic_init_processor( 0 );
- break;
- }
+ smp_ops->kick_cpu(i);
/*
* wait to see if the cpu made a callin (is actually up).
@@ -517,40 +995,108 @@ void __init smp_boot_cpus(void)
if ( cpu_callin_map[i] )
{
+ char buf[32];
+ sprintf(buf, "found cpu %d", i);
+ if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
printk("Processor %d found.\n", i);
smp_num_cpus++;
} else {
+ char buf[32];
+ sprintf(buf, "didn't find cpu %d", i);
+ if (ppc_md.progress) ppc_md.progress(buf, 0x360+i);
printk("Processor %d is stuck.\n", i);
}
}
- if (OpenPIC && (_machine & (_MACH_gemini|_MACH_chrp|_MACH_prep)))
- do_openpic_setup_cpu();
+ /* Setup CPU 0 last (important) */
+ smp_ops->setup_cpu(0);
+}
- if ( _machine == _MACH_Pmac )
- {
- /* reset the entry point so if we get another intr we won't
- * try to startup again */
- out_be32(psurge_start, 0x100);
- if (request_irq(30, pmac_primary_intr, 0, "primary IPI", 0))
- printk(KERN_ERR "Couldn't get primary IPI interrupt");
- /*
- * The decrementers of both cpus are frozen at this point
- * until we give the secondary cpu another interrupt.
- * We set them both to decrementer_count and then send
- * the interrupt. This should get the decrementers
- * synchronized.
- * -- paulus.
- */
- set_dec(tb_ticks_per_jiffy);
- if ((_get_PVR() >> 16) != 1) {
- set_tb(0, 0); /* set timebase if not 601 */
- last_jiffy_stamp(0) = 0;
+void __init smp_software_tb_sync(int cpu)
+{
+#define PASSES 4 /* 4 passes.. */
+ int pass;
+ int i, j;
+
+ /* stop - start will be the number of timebase ticks it takes for cpu0
+ * to send a message to all others and the first reponse to show up.
+ *
+ * ASSUMPTION: this time is similiar for all cpus
+ * ASSUMPTION: the time to send a one-way message is ping/2
+ */
+ register unsigned long start = 0;
+ register unsigned long stop = 0;
+ register unsigned long temp = 0;
+
+ if (smp_num_cpus < 2) {
+ smp_tb_synchronized = 1;
+ return;
+ }
+
+ /* This code need fixing on >2 CPUs --BenH/paulus */
+ if (smp_num_cpus > 2) {
+ smp_tb_synchronized = 0;
+ return;
+ }
+
+ set_tb(0, 0);
+
+ /* multiple passes to get in l1 cache.. */
+ for (pass = 2; pass < 2+PASSES; pass++){
+ if (cpu == 0){
+ mb();
+ for (i = j = 1; i < smp_num_cpus; i++, j++){
+ /* skip stuck cpus */
+ while (!cpu_callin_map[j])
+ ++j;
+ while (cpu_callin_map[j] != pass)
+ barrier();
+ }
+ mb();
+ tb_sync_flag = pass;
+ start = get_tbl(); /* start timing */
+ while (tb_sync_flag)
+ mb();
+ stop = get_tbl(); /* end timing */
+ /* theoretically, the divisor should be 2, but
+ * I get better results on my dual mtx. someone
+ * please report results on other smp machines..
+ */
+ tb_offset = (stop-start)/4;
+ mb();
+ tb_sync_flag = pass;
+ udelay(10);
+ mb();
+ tb_sync_flag = 0;
+ mb();
+ set_tb(0,0);
+ mb();
+ } else {
+ cpu_callin_map[cpu] = pass;
+ mb();
+ while (!tb_sync_flag)
+ mb(); /* wait for cpu0 */
+ mb();
+ tb_sync_flag = 0; /* send response for timing */
+ mb();
+ while (!tb_sync_flag)
+ mb();
+ temp = tb_offset; /* make sure offset is loaded */
+ while (tb_sync_flag)
+ mb();
+ set_tb(0,temp); /* now, set the timebase */
+ mb();
}
- out_be32(psurge_sec_intr, ~0);
- udelay(1);
- out_be32(psurge_sec_intr, 0);
}
+ if (cpu == 0) {
+ smp_tb_synchronized = 1;
+ printk("smp_software_tb_sync: %d passes, final offset: %ld\n",
+ PASSES, tb_offset);
+ }
+ /* so time.c doesn't get confused */
+ set_dec(tb_ticks_per_jiffy);
+ last_jiffy_stamp(cpu) = 0;
+ cpu_callin_map[cpu] = 1;
}
void __init smp_commence(void)
@@ -558,8 +1104,48 @@ void __init smp_commence(void)
/*
* Lets the callin's below out of their loop.
*/
+ if (ppc_md.progress) ppc_md.progress("smp_commence", 0x370);
wmb();
smp_commenced = 1;
+ /* if the smp_ops->setup_cpu function has not already synched the
+ * timebases with a nicer hardware-based method, do so now
+ *
+ * I am open to suggestions for improvements to this method
+ * -- Troy <hozer@drgw.net>
+ *
+ * NOTE: if you are debugging, set smp_tb_synchronized for now
+ * since if this code runs pretty early and needs all cpus that
+ * reported in in smp_callin_map to be working
+ *
+ * NOTE2: this code doesn't seem to work on > 2 cpus. -- paulus
+ */
+ if (!smp_tb_synchronized) {
+ unsigned long flags;
+ __save_and_cli(flags);
+ smp_software_tb_sync(0);
+ __restore_flags(flags);
+ }
+}
+
+void __init smp_callin(void)
+{
+ int cpu = current->processor;
+
+ smp_store_cpu_info(cpu);
+ set_dec(tb_ticks_per_jiffy);
+ cpu_callin_map[cpu] = 1;
+
+ smp_ops->setup_cpu(cpu);
+
+ init_idle();
+
+ while(!smp_commenced)
+ barrier();
+ /* see smp_commence for more info */
+ if (!smp_tb_synchronized){
+ smp_software_tb_sync(cpu);
+ }
+ __sti();
}
/* intel needs this */
@@ -576,37 +1162,6 @@ int __init start_secondary(void *unused)
return cpu_idle(NULL);
}
-void __init smp_callin(void)
-{
- smp_store_cpu_info(current->processor);
- set_dec(tb_ticks_per_jiffy);
- if (_machine == _MACH_Pmac && (_get_PVR() >> 16) != 1) {
- set_tb(0, 0); /* set timebase if not 601 */
- last_jiffy_stamp(current->processor) = 0;
- }
- init_idle();
- cpu_callin_map[current->processor] = 1;
-
-#ifndef CONFIG_POWER4
- /*
- * Each processor has to do this and this is the best
- * place to stick it for now.
- * -- Cort
- */
- if (OpenPIC && _machine & (_MACH_gemini|_MACH_chrp|_MACH_prep))
- do_openpic_setup_cpu();
-#else
- xics_setup_cpu();
-#endif /* CONFIG_POWER4 */
-#ifdef CONFIG_GEMINI
- if ( _machine == _MACH_gemini )
- gemini_init_l2();
-#endif
- while(!smp_commenced)
- barrier();
- __sti();
-}
-
void __init smp_setup(char *str, int *ints)
{
}
@@ -621,6 +1176,14 @@ void __init smp_store_cpu_info(int id)
struct cpuinfo_PPC *c = &cpu_data[id];
/* assume bogomips are same for everything */
- c->loops_per_sec = loops_per_sec;
+ c->loops_per_jiffy = loops_per_jiffy;
c->pvr = _get_PVR();
}
+
+static int __init maxcpus(char *str)
+{
+ get_option(&str, &max_cpus);
+ return 1;
+}
+
+__setup("maxcpus=", maxcpus);
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
index f71c8cbbf..5b36cedcc 100644
--- a/arch/ppc/kernel/time.c
+++ b/arch/ppc/kernel/time.c
@@ -67,7 +67,12 @@
#include <asm/time.h>
-void smp_local_timer_interrupt(struct pt_regs *);
+#ifdef CONFIG_SMP
+extern void smp_local_timer_interrupt(struct pt_regs *);
+extern int smp_tb_synchronized;
+#endif /* CONFIG_SMP */
+
+extern int do_sys_settimeofday(struct timeval *tv, struct timezone *tz);
/* keep track of when we need to update the rtc */
time_t last_rtc_update;
@@ -97,6 +102,36 @@ static inline int tb_delta(unsigned *jiffy_stamp) {
return delta;
}
+extern unsigned long prof_cpu_mask;
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+extern char _stext;
+
+static inline void ppc_do_profile (unsigned long nip)
+{
+ if (!prof_buffer)
+ return;
+
+ /*
+ * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
+ * (default is all CPUs.)
+ */
+ if (!((1<<smp_processor_id()) & prof_cpu_mask))
+ return;
+
+ nip -= (unsigned long) &_stext;
+ nip >>= prof_shift;
+ /*
+ * Don't ignore out-of-bounds EIP values silently,
+ * put them into the last histogram slot, so if
+ * present, they will show up as a sharp peak.
+ */
+ if (nip > prof_len-1)
+ nip = prof_len-1;
+ atomic_inc((atomic_t *)&prof_buffer[nip]);
+}
+
/*
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
@@ -110,6 +145,9 @@ int timer_interrupt(struct pt_regs * regs)
hardirq_enter(cpu);
+ if (!user_mode(regs))
+ ppc_do_profile(instruction_pointer(regs));
+
do {
jiffy_stamp += tb_ticks_per_jiffy;
if (smp_processor_id()) continue;
@@ -151,7 +189,7 @@ int timer_interrupt(struct pt_regs * regs)
#ifdef CONFIG_SMP
smp_local_timer_interrupt(regs);
-#endif
+#endif /* CONFIG_SMP */
if (ppc_md.heartbeat && !ppc_md.heartbeat_count--)
ppc_md.heartbeat();
@@ -176,7 +214,7 @@ void do_gettimeofday(struct timeval *tv)
/* As long as timebases are not in sync, gettimeofday can only
* have jiffy resolution on SMP.
*/
- if (_machine != _MACH_Pmac)
+ if (!smp_tb_synchronized)
delta = 0;
#endif /* CONFIG_SMP */
lost_ticks = jiffies - wall_jiffies;
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 3b7473dda..8bd9ebded 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -65,21 +65,37 @@ int (*debugger_dabr_match)(struct pt_regs *regs);
void (*debugger_fault_handler)(struct pt_regs *regs);
#endif
#endif
+
/*
* Trap & Exception support
*/
+
+spinlock_t oops_lock = SPIN_LOCK_UNLOCKED;
+
+void die(const char * str, struct pt_regs * fp, long err)
+{
+ console_verbose();
+ spin_lock_irq(&oops_lock);
+ printk("Oops: %s, sig: %ld\n", str, err);
+ show_regs(fp);
+ print_backtrace((unsigned long *)fp->gpr[1]);
+ spin_unlock_irq(&oops_lock);
+ /* do_exit() should take care of panic'ing from an interrupt
+ * context so we don't handle it here
+ */
+ do_exit(err);
+}
+
void
_exception(int signr, struct pt_regs *regs)
{
if (!user_mode(regs))
{
- show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
#endif
- print_backtrace((unsigned long *)regs->gpr[1]);
- panic("Exception in kernel pc %lx signal %d",regs->nip,signr);
+ die("Exception in kernel mode", regs, signr);
}
force_sig(signr, current);
}
@@ -98,7 +114,7 @@ MachineCheckException(struct pt_regs *regs)
#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
/* the qspan pci read routines can cause machine checks -- Cort */
- bad_page_fault(regs, regs->dar);
+ bad_page_fault(regs, regs->dar, SIGBUS);
return;
#endif
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
@@ -151,12 +167,10 @@ MachineCheckException(struct pt_regs *regs)
default:
printk("Unknown values in msr\n");
}
- show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
#endif
- print_backtrace((unsigned long *)regs->gpr[1]);
- panic("machine check");
+ die("machine check", regs, SIGBUS);
}
void
@@ -217,13 +231,13 @@ emulate_instruction(struct pt_regs *regs)
uint rd;
uint retval;
- retval = EFAULT;
+ retval = EINVAL;
if (!user_mode(regs))
return retval;
if (get_user(instword, (uint *)(regs->nip)))
- return retval;
+ return EFAULT;
/* Emulate the mfspr rD, PVR.
*/
@@ -337,12 +351,10 @@ SoftwareEmulation(struct pt_regs *regs)
int errcode;
if (!user_mode(regs)) {
- show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
#endif
- print_backtrace((unsigned long *)regs->gpr[1]);
- panic("Kernel Mode Software FPU Emulation");
+ die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
}
#ifdef CONFIG_MATH_EMULATION
diff --git a/arch/ppc/lib/Makefile b/arch/ppc/lib/Makefile
index 1e973940c..b6f99174b 100644
--- a/arch/ppc/lib/Makefile
+++ b/arch/ppc/lib/Makefile
@@ -9,6 +9,6 @@ O_TARGET := lib.o
obj-y := checksum.o string.o strcase.o
-obj-$(CONFIG_SMP) := locks.o
+obj-$(CONFIG_SMP) += locks.o
include $(TOPDIR)/Rules.make
diff --git a/arch/ppc/lib/locks.c b/arch/ppc/lib/locks.c
index 60d8576da..a5b0a40a8 100644
--- a/arch/ppc/lib/locks.c
+++ b/arch/ppc/lib/locks.c
@@ -58,8 +58,9 @@ void _spin_unlock(spinlock_t *lp)
{
#ifdef DEBUG_LOCKS
if ( !lp->lock )
- printk("_spin_unlock(%p): no lock cpu %d %s/%d\n", lp,
- smp_processor_id(),current->comm,current->pid);
+ printk("_spin_unlock(%p): no lock cpu %d curr PC %p %s/%d\n",
+ lp, smp_processor_id(), __builtin_return_address(0),
+ current->comm, current->pid);
if ( lp->owner_cpu != smp_processor_id() )
printk("_spin_unlock(%p): cpu %d trying clear of cpu %d pc %lx val %lx\n",
lp, smp_processor_id(), (int)lp->owner_cpu,
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c
index b6da2cdfc..97bb6dbc1 100644
--- a/arch/ppc/mm/fault.c
+++ b/arch/ppc/mm/fault.c
@@ -48,7 +48,7 @@ unsigned long pte_errors = 0; /* updated by do_page_fault() */
unsigned int probingmem = 0;
extern void die_if_kernel(char *, struct pt_regs *, long);
-void bad_page_fault(struct pt_regs *, unsigned long);
+void bad_page_fault(struct pt_regs *, unsigned long, int sig);
void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
/*
@@ -96,7 +96,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
#endif /* CONFIG_XMON || CONFIG_KGDB */
if (in_interrupt() || mm == NULL) {
- bad_page_fault(regs, address);
+ bad_page_fault(regs, address, SIGSEGV);
return;
}
down(&mm->mmap_sem);
@@ -182,7 +182,7 @@ bad_area:
return;
}
- bad_page_fault(regs, address);
+ bad_page_fault(regs, address, SIGSEGV);
return;
/*
@@ -194,7 +194,7 @@ out_of_memory:
printk("VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_exit(SIGKILL);
- bad_page_fault(regs, address);
+ bad_page_fault(regs, address, SIGKILL);
return;
do_sigbus:
@@ -205,7 +205,7 @@ do_sigbus:
info.si_addr = (void *)address;
force_sig_info (SIGBUS, &info, current);
if (!user_mode(regs))
- bad_page_fault(regs, address);
+ bad_page_fault(regs, address, SIGBUS);
}
/*
@@ -214,8 +214,10 @@ do_sigbus:
* in traps.c.
*/
void
-bad_page_fault(struct pt_regs *regs, unsigned long address)
+bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
+ extern void die(const char *,struct pt_regs *,long);
+
unsigned long fixup;
/* Are we prepared to handle this fault? */
@@ -225,14 +227,11 @@ bad_page_fault(struct pt_regs *regs, unsigned long address)
}
/* kernel has accessed a bad area */
- show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_kernel_faults)
debugger(regs);
#endif
- print_backtrace( (unsigned long *)regs->gpr[1] );
- panic("kernel access of bad area pc %lx lr %lx address %lX tsk %s/%d",
- regs->nip,regs->link,address,current->comm,current->pid);
+ die("kernel access of bad area", regs, sig);
}
#ifdef CONFIG_8xx
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index c3d77a67e..839b618d0 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -34,7 +34,6 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/openpic.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#ifdef CONFIG_BLK_DEV_INITRD
@@ -62,7 +61,6 @@
#include <asm/machdep.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
-#include <asm/gemini.h>
#include "mem_pieces.h"
@@ -70,15 +68,17 @@
#include "4xx_tlb.h"
#endif
-#define MAX_LOW_MEM (640 << 20)
+#define MAX_LOW_MEM (512 << 20)
#define PGTOKB(pages) (((pages) * PAGE_SIZE) >> 10)
int prom_trashed;
atomic_t next_mmu_context;
+rwlock_t context_overflow_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
unsigned long *end_of_DRAM;
unsigned long total_memory;
unsigned long total_lowmem;
+unsigned long ram_phys_base;
int mem_init_done;
int init_bootmem_done;
int boot_mapsize;
@@ -114,7 +114,6 @@ static void *MMU_get_page(void);
unsigned long prep_find_end_of_memory(void);
unsigned long pmac_find_end_of_memory(void);
unsigned long apus_find_end_of_memory(void);
-unsigned long gemini_find_end_of_memory(void);
extern unsigned long find_end_of_memory(void);
#ifdef CONFIG_8xx
unsigned long m8xx_find_end_of_memory(void);
@@ -127,14 +126,13 @@ unsigned long m8260_find_end_of_memory(void);
#endif /* CONFIG_8260 */
static void mapin_ram(void);
void map_page(unsigned long va, unsigned long pa, int flags);
-void set_phys_avail(struct mem_pieces *mp);
+void set_phys_avail(unsigned long total_ram);
extern void die_if_kernel(char *,struct pt_regs *,long);
extern char _start[], _end[];
extern char _stext[], etext[];
extern struct task_struct *current_set[NR_CPUS];
-struct mem_pieces phys_mem;
char *klimit = _end;
struct mem_pieces phys_avail;
@@ -199,6 +197,8 @@ int __map_without_bats;
/* max amount of RAM to use */
unsigned long __max_memory;
+/* max amount of low RAM to map in */
+unsigned long __max_low_memory = MAX_LOW_MEM;
void __bad_pte(pmd_t *pmd)
{
@@ -399,8 +399,8 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
* If the address lies within the first 16 MB, assume it's in ISA
* memory space
*/
- if (p < 16*1024*1024)
- p += _ISA_MEM_BASE;
+ if ( p < 16*1024*1024 )
+ p += _ISA_MEM_BASE;
/*
* Don't allow anybody to remap normal RAM that we're using.
@@ -437,7 +437,11 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
return NULL;
v = VMALLOC_VMADDR(area->addr);
} else {
+#ifndef CONFIG_HIGHMEM
if (p >= ioremap_base)
+#else
+ if (p >= ioremap_base && p < PKMAP_BASE)
+#endif /* CONFIG_HIGHMEM */
v = p;
else
v = (ioremap_bot -= size);
@@ -491,16 +495,13 @@ unsigned long iopa(unsigned long addr)
void
map_page(unsigned long va, unsigned long pa, int flags)
{
- pmd_t *pd, oldpd;
+ pmd_t *pd;
pte_t *pg;
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pgd_offset_k(va), va);
- oldpd = *pd;
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc(pd, va);
- if (pmd_none(oldpd) && mem_init_done)
- set_pgdir(va, *(pgd_t *)pd);
set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
if (mem_init_done)
flush_hash_page(0, va);
@@ -532,6 +533,8 @@ local_flush_tlb_all(void)
* 0xd0000000 on 64-bit machines. */
flush_hash_segments(0xd, 0xffffff);
#else
+ /* this could cause problems on SMP with nobats -- paulus */
+ /* XXX no hash_table_lock? interesting -- paulus */
__clear_user(Hash, Hash_size);
_tlbia();
#ifdef CONFIG_SMP
@@ -548,6 +551,27 @@ local_flush_tlb_all(void)
void
local_flush_tlb_mm(struct mm_struct *mm)
{
+ if (mm->context == 0) {
+ /* don't try to reassign a new context to the kernel */
+ /*
+ * This could cause problems on SMP if we aren't using
+ * the BATs (e.g. on POWER4 or if the nobats option is used).
+ * The problem scenario is that one cpu is doing
+ * flush_hash_page or similar when another cpu clears
+ * out the HPTEs which map the flush_hash_page text
+ * and the hash table. hash_page will then deadlock.
+ * We need some way to have "protected" HPTEs or else
+ * do all hash-table manipulation with the MMU off.
+ * -- paulus.
+ */
+#ifdef CONFIG_PPC64BRIDGE
+ flush_hash_segments(0xd, 0xf);
+#else
+ flush_hash_segments(0xc, 0xf);
+#endif CONFIG_PPC64BRIDGE
+ _tlbia();
+ return;
+ }
mm->context = NO_CONTEXT;
if (mm == current->mm)
activate_mm(mm, mm);
@@ -581,16 +605,18 @@ local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long e
{
start &= PAGE_MASK;
- if (end - start > 20 * PAGE_SIZE)
- {
- flush_tlb_mm(mm);
- return;
+ if (mm->context != 0) {
+ if (end > TASK_SIZE)
+ end = TASK_SIZE;
+ if (end - start > 20 * PAGE_SIZE) {
+ flush_tlb_mm(mm);
+ return;
+ }
}
- for (; start < end && start < TASK_SIZE; start += PAGE_SIZE)
- {
+ for (; start < end; start += PAGE_SIZE)
flush_hash_page(mm->context, start);
- }
+
#ifdef CONFIG_SMP
smp_send_tlb_invalidate(0);
#endif
@@ -608,23 +634,36 @@ mmu_context_overflow(void)
struct task_struct *tsk;
printk(KERN_DEBUG "mmu_context_overflow\n");
- read_lock(&tasklist_lock);
- for_each_task(tsk) {
- if (tsk->mm)
- tsk->mm->context = NO_CONTEXT;
- }
- read_unlock(&tasklist_lock);
- flush_hash_segments(0x10, 0xffffff);
+ /* acquire the write lock for context overflow */
+ write_lock (&context_overflow_lock);
+ /* recheck if overflow still exists */
+ if (atomic_read(&next_mmu_context) == LAST_CONTEXT) {
+ read_lock(&tasklist_lock);
+ for_each_task(tsk) {
+ if (tsk->mm)
+ tsk->mm->context = NO_CONTEXT;
+ }
+ read_unlock(&tasklist_lock);
+ flush_hash_segments(0x10, 0xffffff);
#ifdef CONFIG_SMP
- smp_send_tlb_invalidate(0);
+ smp_send_tlb_invalidate(0);
#endif
- atomic_set(&next_mmu_context, 0);
+ atomic_set(&next_mmu_context, 0);
+ }
+ write_unlock (&context_overflow_lock);
/* make sure current always has a context */
- current->mm->context = MUNGE_CONTEXT(atomic_inc_return(&next_mmu_context));
- /* The PGD is only a placeholder. It is only used on
- * 8xx processors.
- */
- set_context(current->mm->context, current->mm->pgd);
+ /* need to check to assure current task has an mm */
+ /* - idle thread does not have an MM */
+ if (current->mm) {
+ current->mm->context = MUNGE_CONTEXT(atomic_inc_return(&next_mmu_context));
+ set_context(current->mm->context, current->mm->pgd);
+ }
+}
+#else /* CONFIG_8xx */
+void
+mmu_context_overflow(void)
+{
+ atomic_set(&next_mmu_context, -1);
}
#endif /* CONFIG_8xx */
@@ -727,22 +766,20 @@ void __init setbat(int index, unsigned long virt, unsigned long phys,
static void __init mapin_ram(void)
{
- int i;
unsigned long v, p, s, f;
#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx) && !defined(CONFIG_POWER4)
if (!__map_without_bats) {
- unsigned long tot, mem_base, bl, done;
+ unsigned long tot, bl, done;
unsigned long max_size = (256<<20);
unsigned long align;
/* Set up BAT2 and if necessary BAT3 to cover RAM. */
- mem_base = __pa(KERNELBASE);
/* Make sure we don't map a block larger than the
smallest alignment of the physical address. */
- /* alignment of mem_base */
- align = ~(mem_base-1) & mem_base;
+ /* alignment of ram_phys_base */
+ align = ~(ram_phys_base-1) & ram_phys_base;
/* set BAT block size to MIN(max_size, align) */
if (align && align < max_size)
max_size = align;
@@ -753,7 +790,7 @@ static void __init mapin_ram(void)
break;
}
- setbat(2, KERNELBASE, mem_base, bl, RAM_PAGE);
+ setbat(2, KERNELBASE, ram_phys_base, bl, RAM_PAGE);
done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
if ((done < tot) && !bat_addrs[3].limit) {
/* use BAT3 to cover a bit more */
@@ -761,41 +798,35 @@ static void __init mapin_ram(void)
for (bl = 128<<10; bl < max_size; bl <<= 1)
if (bl * 2 > tot)
break;
- setbat(3, KERNELBASE+done, mem_base+done, bl,
+ setbat(3, KERNELBASE+done, ram_phys_base+done, bl,
RAM_PAGE);
}
}
#endif /* !CONFIG_4xx && !CONFIG_8xx && !CONFIG_POWER4 */
- for (i = 0; i < phys_mem.n_regions; ++i) {
- v = (ulong)__va(phys_mem.regions[i].address);
- p = phys_mem.regions[i].address;
- if (p >= total_lowmem)
- break;
- for (s = 0; s < phys_mem.regions[i].size; s += PAGE_SIZE) {
- /* On the MPC8xx, we want the page shared so we
- * don't get ASID compares on kernel space.
- */
- f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED;
+ v = KERNELBASE;
+ p = ram_phys_base;
+ for (s = 0; s < total_lowmem; s += PAGE_SIZE) {
+ /* On the MPC8xx, we want the page shared so we
+ * don't get ASID compares on kernel space.
+ */
+ f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED;
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
- /* Allows stub to set breakpoints everywhere */
- f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE;
+ /* Allows stub to set breakpoints everywhere */
+ f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE;
#else
- if ((char *) v < _stext || (char *) v >= etext)
- f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE;
+ if ((char *) v < _stext || (char *) v >= etext)
+ f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE;
#ifndef CONFIG_8xx
- else
- /* On the powerpc (not 8xx), no user access
- forces R/W kernel access */
- f |= _PAGE_USER;
+ else
+ /* On the powerpc (not 8xx), no user access
+ forces R/W kernel access */
+ f |= _PAGE_USER;
#endif /* CONFIG_8xx */
#endif /* CONFIG_KGDB */
- map_page(v, p, f);
- v += PAGE_SIZE;
- p += PAGE_SIZE;
- if (p >= total_lowmem)
- break;
- }
+ map_page(v, p, f);
+ v += PAGE_SIZE;
+ p += PAGE_SIZE;
}
}
@@ -922,27 +953,64 @@ MMU_init(void)
mtspr(SPRN_DCCR, 0x80000000); /* 128 MB of data space at 0x0. */
mtspr(SPRN_ICCR, 0x80000000); /* 128 MB of instr. space at 0x0. */
}
-#else
- /* How about ppc_md.md_find_end_of_memory instead of these
- * ifdefs? -- Dan.
- */
-#ifdef CONFIG_BOOTX_TEXT
-extern boot_infos_t *disp_bi;
+
+#elif defined(CONFIG_8xx)
+void __init MMU_init(void)
+{
+ if ( ppc_md.progress ) ppc_md.progress("MMU:enter", 0x111);
+
+ total_memory = total_lowmem = m8xx_find_end_of_memory();
+#ifdef CONFIG_HIGHMEM
+ if (total_lowmem > MAX_LOW_MEM) {
+ total_lowmem = MAX_LOW_MEM;
+ mem_pieces_remove(&phys_avail, total_lowmem,
+ total_memory - total_lowmem, 0);
+ }
+#endif /* CONFIG_HIGHMEM */
+ end_of_DRAM = __va(total_lowmem);
+ set_phys_avail(total_lowmem);
+
+ /* Map in all of RAM starting at KERNELBASE */
+ mapin_ram();
+
+ /* Now map in some of the I/O space that is generically needed
+ * or shared with multiple devices.
+ * All of this fits into the same 4Mbyte region, so it only
+ * requires one page table page.
+ */
+ ioremap(IMAP_ADDR, IMAP_SIZE);
+#ifdef CONFIG_MBX
+ ioremap(NVRAM_ADDR, NVRAM_SIZE);
+ ioremap(MBX_CSR_ADDR, MBX_CSR_SIZE);
+ ioremap(PCI_CSR_ADDR, PCI_CSR_SIZE);
+
+ /* Map some of the PCI/ISA I/O space to get the IDE interface.
+ */
+ ioremap(PCI_ISA_IO_ADDR, 0x4000);
+ ioremap(PCI_IDE_ADDR, 0x4000);
+#endif
+#ifdef CONFIG_RPXLITE
+ ioremap(RPX_CSR_ADDR, RPX_CSR_SIZE);
+ ioremap(HIOX_CSR_ADDR, HIOX_CSR_SIZE);
+#endif
+#ifdef CONFIG_RPXCLASSIC
+ ioremap(PCI_CSR_ADDR, PCI_CSR_SIZE);
+ ioremap(RPX_CSR_ADDR, RPX_CSR_SIZE);
#endif
+ if ( ppc_md.progress ) ppc_md.progress("MMU:exit", 0x211);
+}
+
+#else /* not 4xx or 8xx */
void __init MMU_init(void)
{
if ( ppc_md.progress ) ppc_md.progress("MMU:enter", 0x111);
-#ifndef CONFIG_8xx
+
if (have_of)
total_memory = pmac_find_end_of_memory();
#ifdef CONFIG_APUS
else if (_machine == _MACH_apus )
total_memory = apus_find_end_of_memory();
#endif
-#ifdef CONFIG_GEMINI
- else if ( _machine == _MACH_gemini )
- total_memory = gemini_find_end_of_memory();
-#endif /* CONFIG_GEMINI */
#if defined(CONFIG_8260)
else
total_memory = m8260_find_end_of_memory();
@@ -950,16 +1018,17 @@ void __init MMU_init(void)
else /* prep */
total_memory = prep_find_end_of_memory();
#endif
-
+ if (__max_memory && total_memory > __max_memory)
+ total_memory = __max_memory;
total_lowmem = total_memory;
-#ifdef CONFIG_HIGHMEM
- if (total_lowmem > MAX_LOW_MEM) {
- total_lowmem = MAX_LOW_MEM;
- mem_pieces_remove(&phys_avail, total_lowmem,
- total_memory - total_lowmem, 0);
- }
+ if (total_lowmem > __max_low_memory) {
+ total_lowmem = __max_low_memory;
+#ifndef CONFIG_HIGHMEM
+ total_memory = total_lowmem;
#endif /* CONFIG_HIGHMEM */
+ }
end_of_DRAM = __va(total_lowmem);
+ set_phys_avail(total_lowmem);
if ( ppc_md.progress ) ppc_md.progress("MMU:hash init", 0x300);
hash_init();
@@ -991,12 +1060,15 @@ void __init MMU_init(void)
ioremap_base = 0xf0000000;
break;
case _MACH_chrp:
- setbat(0, 0xf8000000, 0xf8000000, 0x08000000, IO_PAGE);
-#ifdef CONFIG_PPC64BRIDGE
- setbat(1, 0x80000000, 0xc0000000, 0x10000000, IO_PAGE);
-#else
- setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);
- setbat(3, 0x90000000, 0x90000000, 0x10000000, IO_PAGE);
+ /*
+ * The code below tends to get removed, please don't take it out.
+ * The F50 needs this mapping and it you take it out I'll track you
+ * down and slap your hands. If it causes problems please email me.
+ * -- Cort <cort@fsmlabs.com>
+ */
+#ifndef CONFIG_POWER3
+ setbat(0, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);
+ setbat(1, 0x90000000, 0x90000000, 0x10000000, IO_PAGE);
#endif
break;
case _MACH_Pmac:
@@ -1008,10 +1080,6 @@ void __init MMU_init(void)
/* Map chip and ZorroII memory */
setbat(1, zTwoBase, 0x00000000, 0x01000000, IO_PAGE);
break;
- case _MACH_gemini:
- setbat(0, 0xf0000000, 0xf0000000, 0x10000000, IO_PAGE);
- setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);
- break;
case _MACH_8260:
/* Map the IMMR, plus anything else we can cover
* in that upper space according to the memory controller
@@ -1025,46 +1093,7 @@ void __init MMU_init(void)
}
ioremap_bot = ioremap_base;
#endif /* CONFIG_POWER4 */
-#else /* CONFIG_8xx */
-
- total_memory = total_lowmem = m8xx_find_end_of_memory();
-#ifdef CONFIG_HIGHMEM
- if (total_lowmem > MAX_LOW_MEM) {
- total_lowmem = MAX_LOW_MEM;
- mem_pieces_remove(&phys_avail, total_lowmem,
- total_memory - total_lowmem, 0);
- }
-#endif /* CONFIG_HIGHMEM */
- end_of_DRAM = __va(total_lowmem);
-
- /* Map in all of RAM starting at KERNELBASE */
- mapin_ram();
-
- /* Now map in some of the I/O space that is generically needed
- * or shared with multiple devices.
- * All of this fits into the same 4Mbyte region, so it only
- * requires one page table page.
- */
- ioremap(IMAP_ADDR, IMAP_SIZE);
-#ifdef CONFIG_MBX
- ioremap(NVRAM_ADDR, NVRAM_SIZE);
- ioremap(MBX_CSR_ADDR, MBX_CSR_SIZE);
- ioremap(PCI_CSR_ADDR, PCI_CSR_SIZE);
- /* Map some of the PCI/ISA I/O space to get the IDE interface.
- */
- ioremap(PCI_ISA_IO_ADDR, 0x4000);
- ioremap(PCI_IDE_ADDR, 0x4000);
-#endif
-#ifdef CONFIG_RPXLITE
- ioremap(RPX_CSR_ADDR, RPX_CSR_SIZE);
- ioremap(HIOX_CSR_ADDR, HIOX_CSR_SIZE);
-#endif
-#ifdef CONFIG_RPXCLASSIC
- ioremap(PCI_CSR_ADDR, PCI_CSR_SIZE);
- ioremap(RPX_CSR_ADDR, RPX_CSR_SIZE);
-#endif
-#endif /* CONFIG_8xx */
if ( ppc_md.progress ) ppc_md.progress("MMU:exit", 0x211);
#ifdef CONFIG_BOOTX_TEXT
/* Must be done last, or ppc_md.progress will die */
@@ -1189,7 +1218,8 @@ void __init mem_init(void)
#if defined(CONFIG_ALL_PPC)
/* mark the RTAS pages as reserved */
if ( rtas_data )
- for (addr = rtas_data; addr < PAGE_ALIGN(rtas_data+rtas_size) ;
+ for (addr = (ulong)__va(rtas_data);
+ addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ;
addr += PAGE_SIZE)
SetPageReserved(virt_to_page(addr));
#endif /* defined(CONFIG_ALL_PPC) */
@@ -1249,7 +1279,7 @@ void __init mem_init(void)
unsigned long __init pmac_find_end_of_memory(void)
{
unsigned long a, total;
- unsigned long ram_limit = 0xe0000000 - KERNELBASE;
+ struct mem_pieces phys_mem;
memory_node = find_devices("memory");
if (memory_node == NULL) {
@@ -1260,8 +1290,7 @@ unsigned long __init pmac_find_end_of_memory(void)
/*
* Find out where physical memory is, and check that it
* starts at 0 and is contiguous. It seems that RAM is
- * always physically contiguous on Power Macintoshes,
- * because MacOS can't cope if it isn't.
+ * always physically contiguous on Power Macintoshes.
*
* Supporting discontiguous physical memory isn't hard,
* it just makes the virtual <-> physical mapping functions
@@ -1274,23 +1303,14 @@ unsigned long __init pmac_find_end_of_memory(void)
a = phys_mem.regions[0].address;
if (a != 0)
panic("RAM doesn't start at physical address 0");
- if (__max_memory == 0 || __max_memory > ram_limit)
- __max_memory = ram_limit;
- if (phys_mem.regions[0].size >= __max_memory) {
- phys_mem.regions[0].size = __max_memory;
- phys_mem.n_regions = 1;
- }
total = phys_mem.regions[0].size;
-
+
if (phys_mem.n_regions > 1) {
printk("RAM starting at 0x%x is not contiguous\n",
phys_mem.regions[1].address);
printk("Using RAM from 0 to 0x%lx\n", total-1);
- phys_mem.n_regions = 1;
}
- set_phys_avail(&phys_mem);
-
return total;
}
#endif /* CONFIG_ALL_PPC */
@@ -1305,7 +1325,11 @@ unsigned long __init pmac_find_end_of_memory(void)
unsigned long __init prep_find_end_of_memory(void)
{
unsigned long total;
+#ifdef CONFIG_PREP_RESIDUAL
total = res->TotalMemory;
+#else
+ total = 0;
+#endif
if (total == 0 )
{
@@ -1317,33 +1341,11 @@ unsigned long __init prep_find_end_of_memory(void)
total = 0x02000000;
printk("Ramsize default to be %ldM\n", total>>20);
}
- mem_pieces_append(&phys_mem, 0, total);
- set_phys_avail(&phys_mem);
return (total);
}
#endif /* defined(CONFIG_ALL_PPC) */
-
-#if defined(CONFIG_GEMINI)
-unsigned long __init gemini_find_end_of_memory(void)
-{
- unsigned long total;
- unsigned char reg;
-
- reg = readb(GEMINI_MEMCFG);
- total = ((1<<((reg & 0x7) - 1)) *
- (8<<((reg >> 3) & 0x7)));
- total *= (1024*1024);
- phys_mem.regions[0].address = 0;
- phys_mem.regions[0].size = total;
- phys_mem.n_regions = 1;
-
- set_phys_avail(&phys_mem);
- return phys_mem.regions[0].size;
-}
-#endif /* defined(CONFIG_GEMINI) */
-
#ifdef CONFIG_8260
/*
* Same hack as 8xx.
@@ -1355,12 +1357,7 @@ unsigned long __init m8260_find_end_of_memory(void)
binfo = (bd_t *)__res;
- phys_mem.regions[0].address = 0;
- phys_mem.regions[0].size = binfo->bi_memsize;
- phys_mem.n_regions = 1;
-
- set_phys_avail(&phys_mem);
- return phys_mem.regions[0].size;
+ return binfo->bi_memsize;
}
#endif /* CONFIG_8260 */
@@ -1369,6 +1366,7 @@ unsigned long __init m8260_find_end_of_memory(void)
unsigned long __init apus_find_end_of_memory(void)
{
int shadow = 0;
+ unsigned long total;
/* The memory size reported by ADOS excludes the 512KB
reserved for PPC exception registers and possibly 512KB
@@ -1394,43 +1392,28 @@ unsigned long __init apus_find_end_of_memory(void)
memory[0].size = ((size+0x001fffff) & 0xffe00000);
}
- /* Now register the memory block. */
- mem_pieces_append(&phys_mem, memory[0].addr, memory[0].size);
- set_phys_avail(&phys_mem);
+ total = memory[0].size;
/* Remove the memory chunks that are controlled by special
Phase5 hardware. */
- {
- unsigned long top = memory[0].addr + memory[0].size;
- /* Remove the upper 512KB if it contains a shadow of
- the ADOS ROM. FIXME: It might be possible to
- disable this shadow HW. Check the booter
- (ppc_boot.c) */
- if (shadow)
- {
- top -= HARDWARE_MAPPED_SIZE;
- mem_pieces_remove(&phys_avail, top,
- HARDWARE_MAPPED_SIZE, 0);
- }
-
- /* Remove the upper 512KB where the PPC exception
- vectors are mapped. */
- top -= HARDWARE_MAPPED_SIZE;
-#if 0
- /* This would be neat, but it breaks on A3000 machines!? */
- mem_pieces_remove(&phys_avail, top, 16384, 0);
-#else
- mem_pieces_remove(&phys_avail, top, HARDWARE_MAPPED_SIZE, 0);
-#endif
+ /* Remove the upper 512KB if it contains a shadow of
+ the ADOS ROM. FIXME: It might be possible to
+ disable this shadow HW. Check the booter
+ (ppc_boot.c) */
+ if (shadow)
+ total -= HARDWARE_MAPPED_SIZE;
- }
+ /* Remove the upper 512KB where the PPC exception
+ vectors are mapped. */
+ total -= HARDWARE_MAPPED_SIZE;
/* Linux/APUS only handles one block of memory -- the one on
the PowerUP board. Other system memory is horrible slow in
comparison. The user can use other memory for swapping
using the z2ram device. */
- return memory[0].addr + memory[0].size;
+ ram_phys_base = memory[0].addr;
+ return total;
}
#endif /* CONFIG_APUS */
@@ -1440,12 +1423,11 @@ unsigned long __init apus_find_end_of_memory(void)
static void __init hash_init(void)
{
int Hash_bits, mb, mb2;
- unsigned int hmask, ramsize, h;
+ unsigned int hmask, h;
extern unsigned int hash_page_patch_A[], hash_page_patch_B[],
hash_page_patch_C[], hash_page[];
- ramsize = (ulong)end_of_DRAM - KERNELBASE;
#ifdef CONFIG_PPC64BRIDGE
/* The hash table has already been allocated and initialized
in prom.c */
@@ -1464,7 +1446,7 @@ static void __init hash_init(void)
* Allow 64k of hash table for every 16MB of memory,
* up to a maximum of 2MB.
*/
- for (h = 64<<10; h < ramsize / 256 && h < (2<<20); h *= 2)
+ for (h = 64<<10; h < total_memory / 256 && h < (2<<20); h *= 2)
;
Hash_size = h;
Hash_mask = (h >> 6) - 1;
@@ -1498,8 +1480,8 @@ static void __init hash_init(void)
Hash = 0;
#endif /* CONFIG_PPC64BRIDGE */
- printk("Total memory = %dMB; using %ldkB for hash table (at %p)\n",
- ramsize >> 20, Hash_size >> 10, Hash);
+ printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
+ total_memory >> 20, Hash_size >> 10, Hash);
if ( Hash_size )
{
if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
@@ -1560,12 +1542,7 @@ unsigned long __init m8xx_find_end_of_memory(void)
binfo = (bd_t *)__res;
- phys_mem.regions[0].address = 0;
- phys_mem.regions[0].size = binfo->bi_memsize;
- phys_mem.n_regions = 1;
-
- set_phys_avail(&phys_mem);
- return phys_mem.regions[0].address + phys_mem.regions[0].size;
+ return binfo->bi_memsize;
}
#endif /* !CONFIG_4xx && !CONFIG_8xx */
@@ -1582,20 +1559,16 @@ oak_find_end_of_memory(void)
unsigned long *ret;
bd_t *bip = (bd_t *)__res;
- phys_mem.regions[0].address = 0;
- phys_mem.regions[0].size = bip->bi_memsize;
- phys_mem.n_regions = 1;
-
- set_phys_avail(&phys_mem);
- return (phys_mem.regions[0].address + phys_mem.regions[0].size);
+ return bip->bi_memsize;
}
#endif
/*
- * Set phys_avail to phys_mem less the kernel text/data/bss.
+ * Set phys_avail to the amount of physical memory,
+ * less the kernel text/data/bss.
*/
void __init
-set_phys_avail(struct mem_pieces *mp)
+set_phys_avail(unsigned long total_memory)
{
unsigned long kstart, ksize;
@@ -1604,7 +1577,9 @@ set_phys_avail(struct mem_pieces *mp)
* physical memory.
*/
- phys_avail = *mp;
+ phys_avail.regions[0].address = 0;
+ phys_avail.regions[0].size = total_memory;
+ phys_avail.n_regions = 1;
/*
* Map out the kernel text/data/bss from the available physical
diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c
index 499c5b6c8..611b1e069 100644
--- a/arch/ppc/xmon/start.c
+++ b/arch/ppc/xmon/start.c
@@ -14,6 +14,10 @@
#include <asm/bootx.h>
#include <asm/feature.h>
#include <asm/processor.h>
+#include <asm/delay.h>
+#ifdef CONFIG_SMP
+#include <asm/bitops.h>
+#endif
static volatile unsigned char *sccc, *sccd;
unsigned long TXRDY, RXRDY;
@@ -22,10 +26,10 @@ extern void prom_drawchar(char);
extern void prom_drawstring(const char *str);
static int xmon_expect(const char *str, unsigned int timeout);
-static int console = 0;
-static int use_screen = 1; /* default */
-static int via_modem = 0;
-static int xmon_use_sccb = 0;
+static int console;
+static int use_screen;
+static int via_modem = 1;
+static int xmon_use_sccb;
static struct device_node *macio_node;
#define TB_SPEED 25000000
@@ -53,28 +57,32 @@ xmon_map_scc(void)
use_screen = 0;
- if ( _machine == _MACH_Pmac )
- {
+ if (_machine == _MACH_Pmac) {
struct device_node *np;
unsigned long addr;
#ifdef CONFIG_BOOTX_TEXT
extern boot_infos_t *disp_bi;
+ /* see if there is a keyboard in the device tree
+ with a parent of type "adb" */
+ for (np = find_devices("keyboard"); np; np = np->next)
+ if (np->parent && np->parent->type
+ && strcmp(np->parent->type, "adb") == 0)
+ break;
+
/* needs to be hacked if xmon_printk is to be used
from within find_via_pmu() */
#ifdef CONFIG_ADB_PMU
- if (!via_modem && disp_bi && find_via_pmu()) {
- prom_drawstring("xmon uses screen and keyboard\n");
+ if (np != NULL && disp_bi && find_via_pmu())
use_screen = 1;
- }
#endif
#ifdef CONFIG_ADB_CUDA
- if (!via_modem && disp_bi ) {
- prom_drawstring("xmon uses screen and keyboard\n");
+ if (np != NULL && disp_bi && find_via_cuda())
use_screen = 1;
- }
-#endif
#endif
+ if (use_screen)
+ prom_drawstring("xmon uses screen and keyboard\n");
+#endif /* CONFIG_BOOTX_TEXT */
#ifdef CHRP_ESCC
addr = 0xc1013020;
@@ -93,15 +101,6 @@ xmon_map_scc(void)
sccc = base + (addr & ~PAGE_MASK);
sccd = sccc + 0x10;
}
- else if ( _machine & _MACH_gemini )
- {
- /* should already be mapped by the kernel boot */
- sccc = (volatile unsigned char *) 0xffeffb0d;
- sccd = (volatile unsigned char *) 0xffeffb08;
- TXRDY = 0x20;
- RXRDY = 1;
- console = 1;
- }
else
{
/* should already be mapped by the kernel boot */
@@ -140,12 +139,22 @@ xmon_write(void *handle, void *ptr, int nb)
char *p = ptr;
int i, c, ct;
+#ifdef CONFIG_SMP
+ static unsigned long xmon_write_lock;
+ int lock_wait = 1000000;
+ int locked;
+
+ while ((locked = test_and_set_bit(0, &xmon_write_lock)) != 0)
+ if (--lock_wait == 0)
+ break;
+#endif
+
#ifdef CONFIG_BOOTX_TEXT
if (use_screen) {
/* write it on the screen */
for (i = 0; i < nb; ++i)
prom_drawchar(*p++);
- return nb;
+ goto out;
}
#endif
if (!scc_initialized)
@@ -166,8 +175,15 @@ xmon_write(void *handle, void *ptr, int nb)
}
buf_access();
*sccd = c;
+ eieio();
}
- return i;
+
+ out:
+#ifdef CONFIG_SMP
+ if (!locked)
+ clear_bit(0, &xmon_write_lock);
+#endif
+ return nb;
}
int xmon_wants_key;
diff --git a/arch/ppc/xmon/xmon.c b/arch/ppc/xmon/xmon.c
index 49c3be834..65082d458 100644
--- a/arch/ppc/xmon/xmon.c
+++ b/arch/ppc/xmon/xmon.c
@@ -406,6 +406,7 @@ cmds(struct pt_regs *excp)
break;
case 'M':
print_sysmap();
+ break;
case 'S':
super_regs();
break;
@@ -795,6 +796,8 @@ print_sysmap(void)
extern char *sysmap;
if ( sysmap )
printf("System.map: \n%s", sysmap);
+ else
+ printf("No System.map\n");
}
void
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index a4df59c65..7809a4a2b 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -52,8 +52,6 @@ typedef struct
struct ucontext uc;
} rt_sigframe;
-asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
/*
diff --git a/arch/sh/boot/compressed/head.S b/arch/sh/boot/compressed/head.S
index 75d8b4ef9..0edf49085 100644
--- a/arch/sh/boot/compressed/head.S
+++ b/arch/sh/boot/compressed/head.S
@@ -43,7 +43,7 @@ bss_start_addr:
end_addr:
.long _end
init_sr:
- .long 0x40000000 /* Privileged mode, Bank=0, Block=0, I3-I0=0 */
+ .long 0x400000F0 /* Privileged mode, Bank=0, Block=0, IMASK=0xF */
init_stack_addr:
.long stack_start
decompress_kernel_addr:
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
index 37d29a6c4..f2756f72c 100644
--- a/arch/sh/kernel/entry.S
+++ b/arch/sh/kernel/entry.S
@@ -36,23 +36,23 @@
* NOTE: This code uses a convention that instructions in the delay slot
* of a transfer-control instruction are indented by an extra space, thus:
*
- * jmp @$k0 ! control-transfer instruction
- * ldc $k1, $ssr ! delay slot
+ * jmp @k0 ! control-transfer instruction
+ * ldc k1, ssr ! delay slot
*
* Stack layout in 'ret_from_syscall':
* ptrace needs to have all regs on the stack.
* if the order here is changed, it needs to be
* updated in ptrace.c and ptrace.h
*
- * $r0
+ * r0
* ...
- * $r15 = stack pointer
- * $spc
- * $pr
- * $ssr
- * $gbr
- * $mach
- * $macl
+ * r15 = stack pointer
+ * spc
+ * pr
+ * ssr
+ * gbr
+ * mach
+ * macl
* syscall #
*
*/
@@ -88,16 +88,16 @@ MMU_TEA = 0xff00000c ! TLB Exception Address Register
#endif
/* Offsets to the stack */
-R0 = 0 /* Return value. New ABI also arg4 */
-R1 = 4 /* New ABI: arg5 */
-R2 = 8 /* New ABI: arg6 */
-R3 = 12 /* New ABI: syscall_nr */
-R4 = 16 /* New ABI: arg0 */
-R5 = 20 /* New ABI: arg1 */
-R6 = 24 /* New ABI: arg2 */
-R7 = 28 /* New ABI: arg3 */
-SP = (15*4)
-SR = (16*4+8)
+OFF_R0 = 0 /* Return value. New ABI also arg4 */
+OFF_R1 = 4 /* New ABI: arg5 */
+OFF_R2 = 8 /* New ABI: arg6 */
+OFF_R3 = 12 /* New ABI: syscall_nr */
+OFF_R4 = 16 /* New ABI: arg0 */
+OFF_R5 = 20 /* New ABI: arg1 */
+OFF_R6 = 24 /* New ABI: arg2 */
+OFF_R7 = 28 /* New ABI: arg3 */
+OFF_SP = (15*4)
+OFF_SR = (16*4+8)
SYSCALL_NR = (16*4+6*4)
@@ -140,117 +140,139 @@ SYSCALL_NR = (16*4+6*4)
!
#define STI() \
- mov.l __INV_IMASK, $r11; \
- stc $sr, $r10; \
- and $r11, $r10; \
- stc $k_g_imask, $r11; \
- or $r11, $r10; \
- ldc $r10, $sr
+ mov.l __INV_IMASK, r11; \
+ stc sr, r10; \
+ and r11, r10; \
+ stc k_g_imask, r11; \
+ or r11, r10; \
+ ldc r10, sr
.align 2
tlb_miss_load:
bra call_dpf
- mov #0, $r5
+ mov #0, r5
.align 2
tlb_miss_store:
bra call_dpf
- mov #1, $r5
+ mov #1, r5
.align 2
initial_page_write:
bra call_dpf
- mov #1, $r5
+ mov #1, r5
.align 2
tlb_protection_violation_load:
bra call_dpf
- mov #0, $r5
+ mov #0, r5
.align 2
tlb_protection_violation_store:
bra call_dpf
- mov #1, $r5
+ mov #1, r5
call_dpf:
- mov.l 1f, $r0
- mov $r5, $r8
- mov.l @$r0, $r6
- mov $r6, $r9
- mov.l 2f, $r0
- sts $pr, $r10
- jsr @$r0
- mov $r15, $r4
+ mov.l 1f, r0
+ mov r5, r8
+ mov.l @r0, r6
+ mov r6, r9
+ mov.l 2f, r0
+ sts pr, r10
+ jsr @r0
+ mov r15, r4
!
- tst #0xff, $r0
+ tst #0xff, r0
bf/s 0f
- lds $r10, $pr
+ lds r10, pr
rts
nop
0: STI()
- mov.l 3f, $r0
- mov $r9, $r6
- mov $r8, $r5
- jmp @$r0
- mov $r15, $r4
+ mov.l 3f, r0
+ mov r9, r6
+ mov r8, r5
+ jmp @r0
+ mov r15, r4
.align 2
1: .long MMU_TEA
2: .long SYMBOL_NAME(__do_page_fault)
3: .long SYMBOL_NAME(do_page_fault)
+ .align 2
+address_error_load:
+ bra call_dae
+ mov #0,r5 ! writeaccess = 0
+
+ .align 2
+address_error_store:
+ bra call_dae
+ mov #1,r5 ! writeaccess = 1
+
+call_dae:
+ mov.l 1f, r0
+ mov.l @r0, r6 ! address
+ mov.l 2f, r0
+ jmp @r0
+ mov r15, r4 ! regs
+
+ .align 2
+1: .long MMU_TEA
+2: .long SYMBOL_NAME(do_address_error)
+
#if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS)
.align 2
/* Unwind the stack and jmp to the debug entry */
debug_kernel:
- mov.l @$r15+, $r0
- mov.l @$r15+, $r1
- mov.l @$r15+, $r2
- mov.l @$r15+, $r3
- mov.l @$r15+, $r4
- mov.l @$r15+, $r5
- mov.l @$r15+, $r6
- mov.l @$r15+, $r7
- stc $sr, $r8
- mov.l 1f, $r9 ! BL =1, RB=1, IMASK=0x0F
- or $r9, $r8
- ldc $r8, $sr ! here, change the register bank
- mov.l @$r15+, $r8
- mov.l @$r15+, $r9
- mov.l @$r15+, $r10
- mov.l @$r15+, $r11
- mov.l @$r15+, $r12
- mov.l @$r15+, $r13
- mov.l @$r15+, $r14
- mov.l @$r15+, $k0
- ldc.l @$r15+, $spc
- lds.l @$r15+, $pr
- mov.l @$r15+, $k1
- ldc.l @$r15+, $gbr
- lds.l @$r15+, $mach
- lds.l @$r15+, $macl
- mov $k0, $r15
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+ stc sr, r8
+ mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
+ or r9, r8
+ ldc r8, sr ! here, change the register bank
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ mov.l @r15+, k0
+ ldc.l @r15+, spc
+ lds.l @r15+, pr
+ mov.l @r15+, k1
+ ldc.l @r15+, gbr
+ lds.l @r15+, mach
+ lds.l @r15+, macl
+ mov k0, r15
!
- mov.l 2f, $k0
- jmp @$k0
- ldc $k1, $ssr
+ mov.l 2f, k0
+ mov.l @k0, k0
+ jmp @k0
+ ldc k1, ssr
.align 2
1: .long 0x300000f0
-2: .long CONFIG_GDB_STUB_VBR + 0x100
+2: .long SYMBOL_NAME(gdb_vbr_vector)
#endif
.align 2
debug_trap:
#if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS)
- mov #SR, $r0
- mov.l @($r0,$r15), $r0 ! get status register
- shll $r0
- shll $r0 ! kernel space?
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
bt/s debug_kernel
#endif
- mov.l @$r15, $r0
- mov.l 1f, $r8
- jmp @$r8
+ mov.l @r15, r0
+ mov.l 1f, r8
+ jmp @r8
nop
.align 2
@@ -260,8 +282,8 @@ debug_trap:
error:
!
STI()
- mov.l 1f, $r0
- jmp @$r0
+ mov.l 1f, r0
+ jmp @r0
nop
.align 2
1: .long SYMBOL_NAME(do_exception_error)
@@ -272,7 +294,7 @@ error:
!
ENTRY(ret_from_fork)
bra SYMBOL_NAME(ret_from_syscall)
- add #4, $r15 ! pop down bogus r0 (see switch_to MACRO)
+ add #4, r15 ! pop down bogus r0 (see switch_to MACRO)
/*
* Old syscall interface:
@@ -305,90 +327,90 @@ ENTRY(ret_from_fork)
*/
system_call:
- mov.l __TRA, $r9
- mov.l @$r9, $r8
+ mov.l __TRA, r9
+ mov.l @r9, r8
!
! Is the trap argument >= 0x20? (TRA will be >= 0x80)
- mov #0x20, $r9
- extu.b $r9, $r9
- shll2 $r9
- cmp/hs $r9, $r8
+ mov #0x20, r9
+ extu.b r9, r9
+ shll2 r9
+ cmp/hs r9, r8
bt debug_trap
!
- mov #SYSCALL_NR, $r14
- add $r15, $r14
+ mov #SYSCALL_NR, r14
+ add r15, r14
!
#ifdef COMPAT_OLD_SYSCALL_ABI
- mov #0x40, $r9
- cmp/hs $r9, $r8
+ mov #0x40, r9
+ cmp/hs r9, r8
bf/s old_abi_system_call
nop
#endif
! New Syscall ABI
- add #-0x40, $r8
- shlr2 $r8
- shll8 $r8
- shll8 $r8 ! $r8 = num_args<<16
- mov $r3, $r10
- or $r8, $r10 ! Encode syscall # and # of arguments
- mov.l $r10, @$r14 ! set syscall_nr
+ add #-0x40, r8
+ shlr2 r8
+ shll8 r8
+ shll8 r8 ! r8 = num_args<<16
+ mov r3, r10
+ or r8, r10 ! Encode syscall # and # of arguments
+ mov.l r10, @r14 ! set syscall_nr
STI()
!
- stc $k_current, $r11
- mov.l @(tsk_ptrace,$r11), $r10 ! Is current PTRACE_SYSCALL'd?
- mov #PT_TRACESYS, $r11
- tst $r11, $r10
+ stc k_current, r11
+ mov.l @(tsk_ptrace,r11), r10 ! Is current PTRACE_SYSCALL'd?
+ mov #PT_TRACESYS, r11
+ tst r11, r10
bt 5f
! Yes it is traced.
- mov.l __syscall_trace, $r11 ! Call syscall_trace() which notifies
- jsr @$r11 ! superior (will chomp $R[0-7])
+ mov.l __syscall_trace, r11 ! Call syscall_trace() which notifies
+ jsr @r11 ! superior (will chomp R[0-7])
nop
- ! Reload $R0-$R4 from kernel stack, where the
+ ! Reload R0-R4 from kernel stack, where the
! parent may have modified them using
- ! ptrace(POKEUSR). (Note that $R0-$R2 are
+ ! ptrace(POKEUSR). (Note that R0-R2 are
! used by the system call handler directly
! from the kernel stack anyway, so don't need
! to be reloaded here.) This allows the parent
! to rewrite system calls and args on the fly.
- mov.l @(R4,$r15), $r4 ! arg0
- mov.l @(R5,$r15), $r5
- mov.l @(R6,$r15), $r6
- mov.l @(R7,$r15), $r7 ! arg3
- mov.l @(R3,$r15), $r3 ! syscall_nr
+ mov.l @(OFF_R4,r15), r4 ! arg0
+ mov.l @(OFF_R5,r15), r5
+ mov.l @(OFF_R6,r15), r6
+ mov.l @(OFF_R7,r15), r7 ! arg3
+ mov.l @(OFF_R3,r15), r3 ! syscall_nr
! Arrange for syscall_trace() to be called
! again as the system call returns.
- mov.l __syscall_ret_trace, $r10
+ mov.l __syscall_ret_trace, r10
bra 6f
- lds $r10, $pr
+ lds r10, pr
! No it isn't traced.
! Arrange for normal system call return.
-5: mov.l __syscall_ret, $r10
- lds $r10, $pr
+5: mov.l __syscall_ret, r10
+ lds r10, pr
! Call the system call handler through the table.
! (both normal and ptrace'd)
! First check for bad syscall number
-6: mov $r3, $r9
- mov.l __n_sys, $r10
- cmp/hs $r10, $r9
+6: mov r3, r9
+ mov.l __n_sys, r10
+ cmp/hs r10, r9
bf 2f
! Bad syscall number
rts ! go to syscall_ret or syscall_ret_trace
- mov #-ENOSYS, $r0
+ mov #-ENOSYS, r0
! Good syscall number
-2: shll2 $r9 ! x4
- mov.l __sct, $r11
- add $r11, $r9
- mov.l @$r9, $r11
- jmp @$r11 ! jump to specific syscall handler
+2: shll2 r9 ! x4
+ mov.l __sct, r11
+ add r11, r9
+ mov.l @r9, r11
+ jmp @r11 ! jump to specific syscall handler
nop
! In case of trace
syscall_ret_trace:
- mov.l $r0, @(R0,$r15) ! save the return value
- mov.l __syscall_trace, $r1
- mova SYMBOL_NAME(ret_from_syscall), $r0
- jmp @$r1 ! Call syscall_trace() which notifies superior
- lds $r0, $pr ! Then return to ret_from_syscall()
+ mov.l r0, @(OFF_R0,r15) ! save the return value
+ mov.l __syscall_trace, r1
+ mova SYMBOL_NAME(ret_from_syscall), r0
+ jmp @r1 ! Call syscall_trace() which notifies superior
+ lds r0, pr ! Then return to ret_from_syscall()
@@ -396,41 +418,41 @@ syscall_ret_trace:
! Handle old ABI system call.
! Note that ptrace(SYSCALL) is not supported for the old ABI.
! At this point:
-! $r0, $r4-7 as per ABI
-! $r8 = value of TRA register (= num_args<<2)
-! $r14 = points to SYSCALL_NR in stack frame
+! r0, r4-7 as per ABI
+! r8 = value of TRA register (= num_args<<2)
+! r14 = points to SYSCALL_NR in stack frame
old_abi_system_call:
- mov $r0, $r9 ! Save system call number in $r9
+ mov r0, r9 ! Save system call number in r9
! ! arrange for return which pops stack
- mov.l __old_abi_syscall_ret, $r10
- lds $r10, $pr
+ mov.l __old_abi_syscall_ret, r10
+ lds r10, pr
! Build the stack frame if TRA > 0
- mov $r8, $r10
- cmp/pl $r10
+ mov r8, r10
+ cmp/pl r10
bf 0f
- mov.l @(SP,$r15), $r0 ! get original user stack
-7: add #-4, $r10
-4: mov.l @($r0,$r10), $r1 ! May cause address error exception..
- mov.l $r1, @-$r15
- cmp/pl $r10
+ mov.l @(OFF_SP,r15), r0 ! get original user stack
+7: add #-4, r10
+4: mov.l @(r0,r10), r1 ! May cause address error exception..
+ mov.l r1, @-r15
+ cmp/pl r10
bt 7b
0:
- mov.l $r9, @$r14 ! set syscall_nr
+ mov.l r9, @r14 ! set syscall_nr
STI()
! Call the system call handler through the table.
! First check for bad syscall number
- mov.l __n_sys, $r10
- cmp/hs $r10, $r9
+ mov.l __n_sys, r10
+ cmp/hs r10, r9
bf 2f
! Bad syscall number
rts ! return to old_abi_syscall_ret
- mov #-ENOSYS, $r0
+ mov #-ENOSYS, r0
! Good syscall number
-2: shll2 $r9 ! x4
- mov.l __sct, $r11
- add $r11, $r9
- mov.l @$r9, $r11
- jmp @$r11 ! call specific syscall handler,
+2: shll2 r9 ! x4
+ mov.l __sct, r11
+ add r11, r9
+ mov.l @r9, r11
+ jmp @r11 ! call specific syscall handler,
nop
.align 2
@@ -440,16 +462,16 @@ __old_abi_syscall_ret:
! This code gets called on address error exception when copying
! syscall arguments from user stack to kernel stack. It is
! supposed to return -EINVAL through old_abi_syscall_ret, but it
- ! appears to have been broken for a long time in that the $r0
- ! return value will be saved into the kernel stack relative to $r15
- ! but the value of $r15 is not correct partway through the loop.
- ! So the user prog is returned its old $r0 value, not -EINVAL.
+ ! appears to have been broken for a long time in that the r0
+ ! return value will be saved into the kernel stack relative to r15
+ ! but the value of r15 is not correct partway through the loop.
+ ! So the user prog is returned its old r0 value, not -EINVAL.
! Greg Banks 28 Aug 2000.
.section .fixup,"ax"
fixup_syscall_argerr:
- ! First get $r15 back to
+ ! First get r15 back to
rts
- mov #-EINVAL, $r0
+ mov #-EINVAL, r0
.previous
.section __ex_table, "a"
@@ -473,18 +495,18 @@ __syscall_ret:
.align 2
reschedule:
- mova SYMBOL_NAME(ret_from_syscall), $r0
- mov.l 1f, $r1
- jmp @$r1
- lds $r0, $pr
+ mova SYMBOL_NAME(ret_from_syscall), r0
+ mov.l 1f, r1
+ jmp @r1
+ lds r0, pr
.align 2
1: .long SYMBOL_NAME(schedule)
ENTRY(ret_from_irq)
- mov #SR, $r0
- mov.l @($r0,$r15), $r0 ! get status register
- shll $r0
- shll $r0 ! kernel space?
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
bt restore_all ! Yes, it's from kernel, go back soon
!
STI()
@@ -492,10 +514,10 @@ ENTRY(ret_from_irq)
nop
ENTRY(ret_from_exception)
- mov #SR, $r0
- mov.l @($r0,$r15), $r0 ! get status register
- shll $r0
- shll $r0 ! kernel space?
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
bt restore_all ! Yes, it's from kernel, go back soon
!
STI()
@@ -508,38 +530,38 @@ __INV_IMASK:
.align 2
#ifdef COMPAT_OLD_SYSCALL_ABI
old_abi_syscall_ret:
- add $r8, $r15 ! pop off the arguments
+ add r8, r15 ! pop off the arguments
/* fall through */
#endif
syscall_ret:
- mov.l $r0, @(R0,$r15) ! save the return value
+ mov.l r0, @(OFF_R0,r15) ! save the return value
/* fall through */
ENTRY(ret_from_syscall)
- mov.l __irq_stat, $r0 ! softirq_active
- mov.l @$r0, $r1
- mov.l @(4,$r0), $r2 ! softirq_mask
- tst $r2, $r1
+ mov.l __irq_stat, r0 ! softirq_active
+ mov.l @r0, r1
+ mov.l @(4,r0), r2 ! softirq_mask
+ tst r2, r1
bt ret_with_reschedule
handle_softirq:
- mov.l __do_softirq, $r0
- jsr @$r0
+ mov.l __do_softirq, r0
+ jsr @r0
nop
ret_with_reschedule:
- stc $k_current, $r1
- mov.l @(need_resched,$r1), $r0
- tst #0xff, $r0
+ stc k_current, r1
+ mov.l @(need_resched,r1), r0
+ tst #0xff, r0
bf reschedule
- mov.l @(sigpending,$r1), $r0
- tst #0xff, $r0
+ mov.l @(sigpending,r1), r0
+ tst #0xff, r0
bt restore_all
signal_return:
- mov $r15, $r4
- mov #0, $r5
- mov.l __do_signal, $r1
- mova restore_all, $r0
- jmp @$r1
- lds $r0, $pr
+ mov r15, r4
+ mov #0, r5
+ mov.l __do_signal, r1
+ mova restore_all, r0
+ jmp @r1
+ lds r0, pr
.align 2
__do_signal:
.long SYMBOL_NAME(do_signal)
@@ -551,108 +573,108 @@ __do_softirq:
.align 2
restore_all:
#if defined(__SH4__)
- mov.l __fpu_prepare_fd, $r0
- jsr @$r0
- stc $sr, $r4
+ mov.l __fpu_prepare_fd, r0
+ jsr @r0
+ stc sr, r4
#endif
!
- mov.l @$r15+, $r0
- mov.l @$r15+, $r1
- mov.l @$r15+, $r2
- mov.l @$r15+, $r3
- mov.l @$r15+, $r4
- mov.l @$r15+, $r5
- mov.l @$r15+, $r6
- mov.l @$r15+, $r7
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
!
- stc $sr, $r8
- mov.l __blrb_flags, $r9 ! BL =1, RB=1
- or $r9, $r8
- ldc $r8, $sr ! here, change the register bank
+ stc sr, r8
+ mov.l __blrb_flags, r9 ! BL =1, RB=1
+ or r9, r8
+ ldc r8, sr ! here, change the register bank
!
- mov.l @$r15+, $r8
- mov.l @$r15+, $r9
- mov.l @$r15+, $r10
- mov.l @$r15+, $r11
- mov.l @$r15+, $r12
- mov.l @$r15+, $r13
- mov.l @$r15+, $r14
- mov.l @$r15+, $k4 ! original stack pointer
- ldc.l @$r15+, $spc
- lds.l @$r15+, $pr
- mov.l @$r15+, $k3 ! original SR
- ldc.l @$r15+, $gbr
- lds.l @$r15+, $mach
- lds.l @$r15+, $macl
- add #4, $r15 ! Skip syscall number
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ mov.l @r15+, k4 ! original stack pointer
+ ldc.l @r15+, spc
+ lds.l @r15+, pr
+ mov.l @r15+, k3 ! original SR
+ ldc.l @r15+, gbr
+ lds.l @r15+, mach
+ lds.l @r15+, macl
+ add #4, r15 ! Skip syscall number
!
! Calculate new SR value
- mov $k3, $k2 ! original SR value
- mov.l 1f, $k1
- stc $sr, $k0
- and $k1, $k0 ! Get current FD-bit
- mov.l 2f, $k1
- and $k1, $k2 ! Mask orignal SR value
- or $k0, $k2 ! Inherit current FD-bit
+ mov k3, k2 ! original SR value
+ mov.l 1f, k1
+ stc sr, k0
+ and k1, k0 ! Get current FD-bit
+ mov.l 2f, k1
+ and k1, k2 ! Mask orignal SR value
+ or k0, k2 ! Inherit current FD-bit
!
- mov $k3, $k0 ! Calculate IMASK-bits
- shlr2 $k0
- and #0x3c, $k0
- cmp/eq #0x3c, $k0
+ mov k3, k0 ! Calculate IMASK-bits
+ shlr2 k0
+ and #0x3c, k0
+ cmp/eq #0x3c, k0
bt/s 7f
- shll2 $k0
- mov $g_imask, $k0
+ shll2 k0
+ mov g_imask, k0
!
-7: or $k0, $k2 ! Set the IMASK-bits
- ldc $k2, $ssr
+7: or k0, k2 ! Set the IMASK-bits
+ ldc k2, ssr
!
#if defined(__SH4__)
- shll $k2
- shll $k2
+ shll k2
+ shll k2
bf 9f ! user mode
/* Kernel to kernel transition */
- mov.l 1f, $k1
- tst $k1, $k3
+ mov.l 1f, k1
+ tst k1, k3
bf 9f ! it hadn't FPU
! Kernel to kernel and FPU was used
! There's the case we don't get FPU now
- stc $sr, $k2
- tst $k1, $k2
+ stc sr, k2
+ tst k1, k2
bt 8f
! We need to grab FPU here
- xor $k1, $k2
- ldc $k2, $sr ! Grab FPU
- mov.l __init_task_flags, $k1
- mov.l @$k1, $k2
- mov.l __PF_USEDFPU, $k0
- or $k0, $k2
- mov.l $k2, @$k1 ! Set init_task.flags |= PF_USEDFPU
+ xor k1, k2
+ ldc k2, sr ! Grab FPU
+ mov.l __init_task_flags, k1
+ mov.l @k1, k2
+ mov.l __PF_USEDFPU, k0
+ or k0, k2
+ mov.l k2, @k1 ! Set init_task.flags |= PF_USEDFPU
!
! Restoring FPU...
!
-8: mov.l 3f, $k1
- lds $k1, $fpscr
- fmov.s @$r15+, $fr0
- fmov.s @$r15+, $fr1
- fmov.s @$r15+, $fr2
- fmov.s @$r15+, $fr3
- fmov.s @$r15+, $fr4
- fmov.s @$r15+, $fr5
- fmov.s @$r15+, $fr6
- fmov.s @$r15+, $fr7
- fmov.s @$r15+, $fr8
- fmov.s @$r15+, $fr9
- fmov.s @$r15+, $fr10
- fmov.s @$r15+, $fr11
- fmov.s @$r15+, $fr12
- fmov.s @$r15+, $fr13
- fmov.s @$r15+, $fr14
- fmov.s @$r15+, $fr15
- lds.l @$r15+, $fpscr
- lds.l @$r15+, $fpul
+8: mov.l 3f, k1
+ lds k1, fpscr
+ fmov.s @r15+, fr0
+ fmov.s @r15+, fr1
+ fmov.s @r15+, fr2
+ fmov.s @r15+, fr3
+ fmov.s @r15+, fr4
+ fmov.s @r15+, fr5
+ fmov.s @r15+, fr6
+ fmov.s @r15+, fr7
+ fmov.s @r15+, fr8
+ fmov.s @r15+, fr9
+ fmov.s @r15+, fr10
+ fmov.s @r15+, fr11
+ fmov.s @r15+, fr12
+ fmov.s @r15+, fr13
+ fmov.s @r15+, fr14
+ fmov.s @r15+, fr15
+ lds.l @r15+, fpscr
+ lds.l @r15+, fpul
9:
#endif
- mov $k4, $r15
+ mov k4, r15
rte
nop
@@ -680,10 +702,10 @@ ENTRY(vbr_base)
!
.balign 256,0,256
general_exception:
- mov.l 1f, $k2
- mov.l 2f, $k3
+ mov.l 1f, k2
+ mov.l 2f, k3
bra handle_exception
- mov.l @$k2, $k2
+ mov.l @k2, k2
.align 2
2: .long SYMBOL_NAME(ret_from_exception)
1: .long EXPEVT
@@ -691,17 +713,17 @@ general_exception:
!
.balign 1024,0,1024
tlb_miss:
- mov.l 1f, $k2
- mov.l 4f, $k3
+ mov.l 1f, k2
+ mov.l 4f, k3
bra handle_exception
- mov.l @$k2, $k2
+ mov.l @k2, k2
!
.balign 512,0,512
interrupt:
- mov.l 2f, $k2
- mov.l 3f, $k3
+ mov.l 2f, k2
+ mov.l 3f, k3
bra handle_exception
- mov.l @$k2, $k2
+ mov.l @k2, k2
.align 2
1: .long EXPEVT
@@ -715,102 +737,102 @@ handle_exception:
! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
! save all registers onto stack.
!
- stc $ssr, $k0 ! from kernel space?
- shll $k0 ! Check MD bit (bit30) by shifting it into the T bit
- shll $k0
+ stc ssr, k0 ! from kernel space?
+ shll k0 ! Check MD bit (bit30) by shifting it into the T bit
+ shll k0
#if defined(__SH4__)
bf/s 8f ! it's from user to kernel transition
- mov $r15, $k0 ! save original stack to k0
+ mov r15, k0 ! save original stack to k0
/* It's a kernel to kernel transition. */
/* Is the FPU disabled? */
- mov.l 2f, $k1
- stc $ssr, $k0
- tst $k1, $k0
- mov.l 4f, $k1
+ mov.l 2f, k1
+ stc ssr, k0
+ tst k1, k0
+ mov.l 4f, k1
bf/s 9f ! FPU is not enabled, no need to save it
- mov $r15, $k0 ! save original stack to k0
+ mov r15, k0 ! save original stack to k0
! FPU is enabled, save it
! /* XXX: Need to save another bank of FPU if all FPU feature is used */
! /* Currently it's not the case for GCC (only udivsi3_i4, divsi3_i4) */
- sts.l $fpul, @-$r15
- sts.l $fpscr, @-$r15
- mov.l 6f, $k1
- lds $k1, $fpscr
- mov.l 3f, $k1
- fmov.s $fr15, @-$r15
- fmov.s $fr14, @-$r15
- fmov.s $fr13, @-$r15
- fmov.s $fr12, @-$r15
- fmov.s $fr11, @-$r15
- fmov.s $fr10, @-$r15
- fmov.s $fr9, @-$r15
- fmov.s $fr8, @-$r15
- fmov.s $fr7, @-$r15
- fmov.s $fr6, @-$r15
- fmov.s $fr5, @-$r15
- fmov.s $fr4, @-$r15
- fmov.s $fr3, @-$r15
- fmov.s $fr2, @-$r15
- fmov.s $fr1, @-$r15
+ sts.l fpul, @-r15
+ sts.l fpscr, @-r15
+ mov.l 6f, k1
+ lds k1, fpscr
+ mov.l 3f, k1
+ fmov.s fr15, @-r15
+ fmov.s fr14, @-r15
+ fmov.s fr13, @-r15
+ fmov.s fr12, @-r15
+ fmov.s fr11, @-r15
+ fmov.s fr10, @-r15
+ fmov.s fr9, @-r15
+ fmov.s fr8, @-r15
+ fmov.s fr7, @-r15
+ fmov.s fr6, @-r15
+ fmov.s fr5, @-r15
+ fmov.s fr4, @-r15
+ fmov.s fr3, @-r15
+ fmov.s fr2, @-r15
+ fmov.s fr1, @-r15
bra 9f
- fmov.s $fr0, @-$r15
+ fmov.s fr0, @-r15
#else
- mov.l 3f, $k1
+ mov.l 3f, k1
bt/s 9f ! it's a kernel to kernel transition, and skip the FPU save.
- mov $r15, $k0 ! save original stack to k0 anyway
+ mov r15, k0 ! save original stack to k0 anyway
#endif
8: /* User space to kernel */
- mov #0x20, $k1
- shll8 $k1 ! $k1 <= 8192 == THREAD_SIZE
- add $current, $k1
- mov $k1, $r15 ! change to kernel stack
+ mov #0x20, k1
+ shll8 k1 ! k1 <= 8192 == THREAD_SIZE
+ add current, k1
+ mov k1, r15 ! change to kernel stack
!
- mov.l 4f, $k1 ! let kernel release FPU
+ mov.l 4f, k1 ! let kernel release FPU
9: ! Save the user registers on the stack.
! At this point, k1 should have been set to the new SR value
- mov #-1, $k4
- mov.l $k4, @-$r15 ! syscall_nr (default: -1)
+ mov #-1, k4
+ mov.l k4, @-r15 ! syscall_nr (default: -1)
!
- sts.l $macl, @-$r15
- sts.l $mach, @-$r15
- stc.l $gbr, @-$r15
- stc.l $ssr, @-$r15
- sts.l $pr, @-$r15
- stc.l $spc, @-$r15
+ sts.l macl, @-r15
+ sts.l mach, @-r15
+ stc.l gbr, @-r15
+ stc.l ssr, @-r15
+ sts.l pr, @-r15
+ stc.l spc, @-r15
!
- lds $k3, $pr ! Set the return address to pr
+ lds k3, pr ! Set the return address to pr
!
- mov.l $k0, @-$r15 ! save orignal stack
- mov.l $r14, @-$r15
- mov.l $r13, @-$r15
- mov.l $r12, @-$r15
- mov.l $r11, @-$r15
- mov.l $r10, @-$r15
- mov.l $r9, @-$r15
- mov.l $r8, @-$r15
+ mov.l k0, @-r15 ! save orignal stack
+ mov.l r14, @-r15
+ mov.l r13, @-r15
+ mov.l r12, @-r15
+ mov.l r11, @-r15
+ mov.l r10, @-r15
+ mov.l r9, @-r15
+ mov.l r8, @-r15
!
- stc $sr, $r8 ! Back to normal register bank, and
- or $k1, $r8 ! Block all interrupts, may release FPU
- mov.l 5f, $k1
- and $k1, $r8 ! ...
- ldc $r8, $sr ! ...changed here.
+ stc sr, r8 ! Back to normal register bank, and
+ or k1, r8 ! Block all interrupts, may release FPU
+ mov.l 5f, k1
+ and k1, r8 ! ...
+ ldc r8, sr ! ...changed here.
!
- mov.l $r7, @-$r15
- mov.l $r6, @-$r15
- mov.l $r5, @-$r15
- mov.l $r4, @-$r15
- mov.l $r3, @-$r15
- mov.l $r2, @-$r15
- mov.l $r1, @-$r15
- mov.l $r0, @-$r15
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ mov.l r0, @-r15
! Then, dispatch to the handler, according to the exception code.
- stc $k_ex_code, $r8
- shlr2 $r8
- shlr $r8
- mov.l 1f, $r9
- add $r8, $r9
- mov.l @$r9, $r9
- jmp @$r9
+ stc k_ex_code, r8
+ shlr2 r8
+ shlr r8
+ mov.l 1f, r9
+ add r8, r9
+ mov.l @r9, r9
+ jmp @r9
nop
.align 2
1: .long SYMBOL_NAME(exception_handling_table)
@@ -833,8 +855,8 @@ ENTRY(exception_handling_table)
.long initial_page_write
.long tlb_protection_violation_load
.long tlb_protection_violation_store
- .long error ! address_error_load (filled by trap_init)
- .long error ! address_error_store (filled by trap_init)
+ .long address_error_load
+ .long address_error_store
#if defined(__SH4__)
.long SYMBOL_NAME(do_fpu_error)
#else
diff --git a/arch/sh/kernel/fpu.c b/arch/sh/kernel/fpu.c
index cbeb60d31..9036b20c0 100644
--- a/arch/sh/kernel/fpu.c
+++ b/arch/sh/kernel/fpu.c
@@ -21,43 +21,43 @@
void
save_fpu(struct task_struct *tsk)
{
- asm volatile("sts.l $fpul, @-%0\n\t"
- "sts.l $fpscr, @-%0\n\t"
- "lds %1, $fpscr\n\t"
+ asm volatile("sts.l fpul, @-%0\n\t"
+ "sts.l fpscr, @-%0\n\t"
+ "lds %1, fpscr\n\t"
"frchg\n\t"
- "fmov.s $fr15, @-%0\n\t"
- "fmov.s $fr14, @-%0\n\t"
- "fmov.s $fr13, @-%0\n\t"
- "fmov.s $fr12, @-%0\n\t"
- "fmov.s $fr11, @-%0\n\t"
- "fmov.s $fr10, @-%0\n\t"
- "fmov.s $fr9, @-%0\n\t"
- "fmov.s $fr8, @-%0\n\t"
- "fmov.s $fr7, @-%0\n\t"
- "fmov.s $fr6, @-%0\n\t"
- "fmov.s $fr5, @-%0\n\t"
- "fmov.s $fr4, @-%0\n\t"
- "fmov.s $fr3, @-%0\n\t"
- "fmov.s $fr2, @-%0\n\t"
- "fmov.s $fr1, @-%0\n\t"
- "fmov.s $fr0, @-%0\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
"frchg\n\t"
- "fmov.s $fr15, @-%0\n\t"
- "fmov.s $fr14, @-%0\n\t"
- "fmov.s $fr13, @-%0\n\t"
- "fmov.s $fr12, @-%0\n\t"
- "fmov.s $fr11, @-%0\n\t"
- "fmov.s $fr10, @-%0\n\t"
- "fmov.s $fr9, @-%0\n\t"
- "fmov.s $fr8, @-%0\n\t"
- "fmov.s $fr7, @-%0\n\t"
- "fmov.s $fr6, @-%0\n\t"
- "fmov.s $fr5, @-%0\n\t"
- "fmov.s $fr4, @-%0\n\t"
- "fmov.s $fr3, @-%0\n\t"
- "fmov.s $fr2, @-%0\n\t"
- "fmov.s $fr1, @-%0\n\t"
- "fmov.s $fr0, @-%0"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0"
: /* no output */
: "r" ((char *)(&tsk->thread.fpu.hard.status)),
"r" (FPSCR_INIT)
@@ -70,43 +70,43 @@ save_fpu(struct task_struct *tsk)
static void
restore_fpu(struct task_struct *tsk)
{
- asm volatile("lds %1, $fpscr\n\t"
- "fmov.s @%0+, $fr0\n\t"
- "fmov.s @%0+, $fr1\n\t"
- "fmov.s @%0+, $fr2\n\t"
- "fmov.s @%0+, $fr3\n\t"
- "fmov.s @%0+, $fr4\n\t"
- "fmov.s @%0+, $fr5\n\t"
- "fmov.s @%0+, $fr6\n\t"
- "fmov.s @%0+, $fr7\n\t"
- "fmov.s @%0+, $fr8\n\t"
- "fmov.s @%0+, $fr9\n\t"
- "fmov.s @%0+, $fr10\n\t"
- "fmov.s @%0+, $fr11\n\t"
- "fmov.s @%0+, $fr12\n\t"
- "fmov.s @%0+, $fr13\n\t"
- "fmov.s @%0+, $fr14\n\t"
- "fmov.s @%0+, $fr15\n\t"
+ asm volatile("lds %1, fpscr\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
"frchg\n\t"
- "fmov.s @%0+, $fr0\n\t"
- "fmov.s @%0+, $fr1\n\t"
- "fmov.s @%0+, $fr2\n\t"
- "fmov.s @%0+, $fr3\n\t"
- "fmov.s @%0+, $fr4\n\t"
- "fmov.s @%0+, $fr5\n\t"
- "fmov.s @%0+, $fr6\n\t"
- "fmov.s @%0+, $fr7\n\t"
- "fmov.s @%0+, $fr8\n\t"
- "fmov.s @%0+, $fr9\n\t"
- "fmov.s @%0+, $fr10\n\t"
- "fmov.s @%0+, $fr11\n\t"
- "fmov.s @%0+, $fr12\n\t"
- "fmov.s @%0+, $fr13\n\t"
- "fmov.s @%0+, $fr14\n\t"
- "fmov.s @%0+, $fr15\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
"frchg\n\t"
- "lds.l @%0+, $fpscr\n\t"
- "lds.l @%0+, $fpul\n\t"
+ "lds.l @%0+, fpscr\n\t"
+ "lds.l @%0+, fpul\n\t"
: /* no output */
: "r" (&tsk->thread.fpu), "r" (FPSCR_INIT)
: "memory");
@@ -120,41 +120,41 @@ restore_fpu(struct task_struct *tsk)
void fpu_init(void)
{
- asm volatile("lds %0, $fpul\n\t"
- "lds %1, $fpscr\n\t"
- "fsts $fpul, $fr0\n\t"
- "fsts $fpul, $fr1\n\t"
- "fsts $fpul, $fr2\n\t"
- "fsts $fpul, $fr3\n\t"
- "fsts $fpul, $fr4\n\t"
- "fsts $fpul, $fr5\n\t"
- "fsts $fpul, $fr6\n\t"
- "fsts $fpul, $fr7\n\t"
- "fsts $fpul, $fr8\n\t"
- "fsts $fpul, $fr9\n\t"
- "fsts $fpul, $fr10\n\t"
- "fsts $fpul, $fr11\n\t"
- "fsts $fpul, $fr12\n\t"
- "fsts $fpul, $fr13\n\t"
- "fsts $fpul, $fr14\n\t"
- "fsts $fpul, $fr15\n\t"
+ asm volatile("lds %0, fpul\n\t"
+ "lds %1, fpscr\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
"frchg\n\t"
- "fsts $fpul, $fr0\n\t"
- "fsts $fpul, $fr1\n\t"
- "fsts $fpul, $fr2\n\t"
- "fsts $fpul, $fr3\n\t"
- "fsts $fpul, $fr4\n\t"
- "fsts $fpul, $fr5\n\t"
- "fsts $fpul, $fr6\n\t"
- "fsts $fpul, $fr7\n\t"
- "fsts $fpul, $fr8\n\t"
- "fsts $fpul, $fr9\n\t"
- "fsts $fpul, $fr10\n\t"
- "fsts $fpul, $fr11\n\t"
- "fsts $fpul, $fr12\n\t"
- "fsts $fpul, $fr13\n\t"
- "fsts $fpul, $fr14\n\t"
- "fsts $fpul, $fr15\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
"frchg"
: /* no output */
: "r" (0), "r" (FPSCR_INIT));
@@ -192,9 +192,9 @@ do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
*
* There's race condition in __cli:
*
- * (1) $SR --> register
+ * (1) SR --> register
* (2) Set IMASK of register
- * (3) $SR <-- register
+ * (3) SR <-- register
*
* Between (1) and (2), or (2) and (3) getting
* interrupt, and interrupt handler (or
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index f1ac6fd17..73c13dc7d 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -37,40 +37,40 @@ ENTRY(empty_zero_page)
*/
ENTRY(_stext)
! Initialize Status Register
- mov.l 1f, $r0 ! MD=1, RB=0, BL=0, IMASK=0xF
- ldc $r0, $sr
+ mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF
+ ldc r0, sr
! Initialize global interrupt mask
- mov #0, $r0
- ldc $r0, $r6_bank
+ mov #0, r0
+ ldc r0, r6_bank
!
- mov.l 2f, $r0
- mov $r0, $r15 ! Set initial r15 (stack pointer)
- mov #0x20, $r1 !
- shll8 $r1 ! $r1 = 8192
- sub $r1, $r0 !
- ldc $r0, $r7_bank ! ... and init_task
+ mov.l 2f, r0
+ mov r0, r15 ! Set initial r15 (stack pointer)
+ mov #0x20, r1 !
+ shll8 r1 ! r1 = 8192
+ sub r1, r0 !
+ ldc r0, r7_bank ! ... and init_task
!
#if defined(__SH4__)
! Initialize fpu
- mov.l 7f, $r0
- jsr @$r0
+ mov.l 7f, r0
+ jsr @r0
nop
#endif
! Enable cache
- mov.l 6f, $r0
- jsr @$r0
+ mov.l 6f, r0
+ jsr @r0
nop
! Clear BSS area
- mov.l 3f, $r1
- add #4, $r1
- mov.l 4f, $r2
- mov #0, $r0
-9: cmp/hs $r2, $r1
+ mov.l 3f, r1
+ add #4, r1
+ mov.l 4f, r2
+ mov #0, r0
+9: cmp/hs r2, r1
bf/s 9b ! while (r1 < r2)
- mov.l $r0,@-$r2
+ mov.l r0,@-r2
! Start kernel
- mov.l 5f, $r0
- jmp @$r0
+ mov.l 5f, r0
+ jmp @r0
nop
.balign 4
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 6451c4c9e..200148320 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -235,7 +235,7 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
unsigned int status;
/* Get IRQ number */
- asm volatile("stc $r2_bank, %0\n\t"
+ asm volatile("stc r2_bank, %0\n\t"
"shlr2 %0\n\t"
"shlr2 %0\n\t"
"shlr %0\n\t"
diff --git a/arch/sh/kernel/irq_imask.c b/arch/sh/kernel/irq_imask.c
index 27d91b372..d0b3aea9f 100644
--- a/arch/sh/kernel/irq_imask.c
+++ b/arch/sh/kernel/irq_imask.c
@@ -59,16 +59,16 @@ void static inline set_interrupt_registers(int ip)
{
unsigned long __dummy;
- asm volatile("ldc %2, $r6_bank\n\t"
- "stc $sr, %0\n\t"
+ asm volatile("ldc %2, r6_bank\n\t"
+ "stc sr, %0\n\t"
"and #0xf0, %0\n\t"
"shlr2 %0\n\t"
"cmp/eq #0x3c, %0\n\t"
"bt/s 1f ! CLI-ed\n\t"
- " stc $sr, %0\n\t"
+ " stc sr, %0\n\t"
"and %1, %0\n\t"
"or %2, %0\n\t"
- "ldc %0, $sr\n"
+ "ldc %0, sr\n"
"1:"
: "=&z" (__dummy)
: "r" (~0xf0), "r" (ip << 4)
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index c7511093b..1ce22f0fd 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -14,6 +14,8 @@
#define __KERNEL_SYSCALLS__
#include <stdarg.h>
+#include <linux/config.h>
+
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -31,6 +33,7 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -39,8 +42,9 @@
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/elf.h>
-
-#include <linux/irq.h>
+#ifdef CONFIG_SH_STANDARD_BIOS
+#include <asm/sh_bios.h>
+#endif
static int hlt_counter=0;
@@ -79,11 +83,17 @@ void cpu_idle(void *unused)
}
void machine_restart(char * __unused)
-{ /* Need to set MMU_TTB?? */
+{
+#ifdef CONFIG_SH_STANDARD_BIOS
+ sh_bios_shutdown(1);
+#endif
}
void machine_halt(void)
{
+#ifdef CONFIG_SH_STANDARD_BIOS
+ sh_bios_shutdown(0);
+#endif
}
void machine_power_off(void)
@@ -93,7 +103,7 @@ void machine_power_off(void)
void show_regs(struct pt_regs * regs)
{
printk("\n");
- printk("PC : %08lx SP : %08lx SR : %08lx TEA : %08lx\n",
+ printk("PC : %08lx SP : %08lx SR : %08lx TEA : %08x\n",
regs->pc, regs->regs[15], regs->sr, ctrl_inl(MMU_TEA));
printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
regs->regs[0],regs->regs[1],
@@ -144,12 +154,12 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
register unsigned long __sc9 __asm__ ("r9") = (long) fn;
__asm__("trapa #0x12\n\t" /* Linux/SH system call */
- "tst #0xff, $r0\n\t" /* child or parent? */
+ "tst #0xff, r0\n\t" /* child or parent? */
"bf 1f\n\t" /* parent - jump */
- "jsr @$r9\n\t" /* call fn */
- " mov $r8, $r4\n\t" /* push argument */
- "mov $r0, $r4\n\t" /* return value to arg of exit */
- "mov %1, $r3\n\t" /* exit */
+ "jsr @r9\n\t" /* call fn */
+ " mov r8, r4\n\t" /* push argument */
+ "mov r0, r4\n\t" /* return value to arg of exit */
+ "mov %1, r3\n\t" /* exit */
"trapa #0x11\n"
"1:"
: "=z" (__sc0)
@@ -285,7 +295,7 @@ void __switch_to(struct task_struct *prev, struct task_struct *next)
* Restore the kernel mode register
* k7 (r7_bank1)
*/
- asm volatile("ldc %0, $r7_bank"
+ asm volatile("ldc %0, r7_bank"
: /* no output */
:"r" (next));
}
@@ -376,7 +386,7 @@ unsigned long get_wchan(struct task_struct *p)
asmlinkage void print_syscall(int x)
{
unsigned long flags, sr;
- asm("stc $sr, %0": "=r" (sr));
+ asm("stc sr, %0": "=r" (sr));
save_and_cli(flags);
printk("%c: %c %c, %c: SYSCALL\n", (x&63)+32,
(current->flags&PF_USEDFPU)?'C':' ',
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 8dad245df..b6fb1e9a8 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -503,8 +503,8 @@ int get_cpuinfo(char *buffer)
"cache size\t: 8K-byte/16K-byte\n");
#endif
p += sprintf(p, "bogomips\t: %lu.%02lu\n\n",
- (loops_per_jiffy+2500)/(500000/HZ),
- ((loops_per_jiffy+2500)/(5000/HZ)) % 100);
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100);
p += sprintf(p, "Machine: %s\n", sh_mv.mv_name);
#define PRINT_CLOCK(name, value) \
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index 81a56b960..f72f71b98 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -1,4 +1,4 @@
-/* $Id: sh_bios.c,v 1.3 2000/09/30 03:43:30 gniibe Exp $
+/* $Id: sh_bios.c,v 1.5 2001/01/08 08:42:32 gniibe Exp $
*
* linux/arch/sh/kernel/sh_bios.c
* C interface for trapping into the standard LinuxSH BIOS.
@@ -7,13 +7,12 @@
*
*/
-#include <linux/config.h>
#include <asm/sh_bios.h>
-#ifdef CONFIG_SH_STANDARD_BIOS
-
#define BIOS_CALL_CONSOLE_WRITE 0
-#define BIOS_CALL_READ_BLOCK 1 /* not implemented */
+#define BIOS_CALL_READ_BLOCK 1
+#define BIOS_CALL_ETH_NODE_ADDR 10
+#define BIOS_CALL_SHUTDOWN 11
#define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */
#define BIOS_CALL_GDB_GET_MODE_PTR 0xfe
#define BIOS_CALL_GDB_DETACH 0xff
@@ -66,5 +65,12 @@ void sh_bios_gdb_detach(void)
sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
}
-#endif
+void sh_bios_get_node_addr (unsigned char *node_addr)
+{
+ sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0);
+}
+void sh_bios_shutdown(unsigned int how)
+{
+ sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
+}
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index 2b0298699..2bdcd75ea 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -64,10 +64,11 @@ EXPORT_SYMBOL(get_vm_area);
#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL_NOVERS(name)
/* These symbols are generated by the compiler itself */
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__sdivsi3);
+
#ifdef __SH4__
-DECLARE_EXPORT(__udivsi3_i4);
-DECLARE_EXPORT(__sdivsi3_i4);
DECLARE_EXPORT(__movstr_i4_even);
DECLARE_EXPORT(__movstr_i4_odd);
DECLARE_EXPORT(__ashrdi3);
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index fe2f1b319..9095c5c79 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -29,8 +29,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
@@ -433,7 +431,7 @@ static void setup_frame(int sig, struct k_sigaction *ka,
current->comm, current->pid, frame, regs->pc, regs->pr);
#endif
- flush_icache_range(regs->pr, regs->pr+4);
+ flush_cache_sigtramp(regs->pr);
return;
give_sigsegv:
@@ -507,7 +505,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->pc, regs->pr);
#endif
- flush_icache_range(regs->pr, regs->pr+4);
+ flush_cache_sigtramp(regs->pr);
return;
give_sigsegv:
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 4aeaf8efa..f929fba2a 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -2,13 +2,16 @@
*
* linux/arch/sh/traps.c
*
- * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf
+ * Copyright (C) 2000 David Howells
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'entry.S'.
*/
+#include <linux/config.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -35,7 +38,7 @@ asmlinkage void do_##name(unsigned long r4, unsigned long r5, \
{ \
unsigned long error_code; \
\
- asm volatile("stc $r2_bank, %0": "=r" (error_code)); \
+ asm volatile("stc r2_bank, %0": "=r" (error_code)); \
sti(); \
tsk->thread.error_code = error_code; \
tsk->thread.trap_no = trapnr; \
@@ -69,7 +72,16 @@ static inline void die_if_kernel(const char * str, struct pt_regs * regs, long e
die(str, regs, err);
}
-static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+static int handle_unaligned_notify_count = 10;
+
+/*
+ * try and fix up kernelspace address errors
+ * - userspace errors just cause EFAULT to be returned, resulting in SEGV
+ * - kernel/userspace interfaces cause a jump to an appropriate handler
+ * - other kernel errors are bad
+ * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
+ */
+static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs))
{
@@ -77,14 +89,407 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
fixup = search_exception_table(regs->pc);
if (fixup) {
regs->pc = fixup;
- return;
+ return 0;
}
die(str, regs, err);
}
+ return -EFAULT;
+}
+
+/*
+ * handle an instruction that does an unaligned memory access by emulating the
+ * desired behaviour
+ * - note that PC _may not_ point to the faulting instruction
+ * (if that instruction is in a branch delay slot)
+ * - return 0 if emulation okay, -EFAULT on existential error
+ */
+static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
+{
+ int ret, index, count;
+ unsigned long *rm, *rn;
+ unsigned char *src, *dst;
+
+ index = (instruction>>8)&15; /* 0x0F00 */
+ rn = &regs->regs[index];
+
+ index = (instruction>>4)&15; /* 0x00F0 */
+ rm = &regs->regs[index];
+
+ count = 1<<(instruction&3);
+
+ ret = -EFAULT;
+ switch (instruction>>12) {
+ case 0: /* mov.[bwl] to/from memory via r0+rn */
+ if (instruction & 8) {
+ /* from memory */
+ src = (unsigned char*) *rm;
+ src += regs->regs[0];
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+#ifdef __LITTLE_ENDIAN__
+ if (copy_from_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ dst += 4-count;
+
+ if (__copy_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[2] & 0x80) {
+ dst[0] = 0xff;
+ dst[1] = 0xff;
+ }
+#endif
+ } else {
+ /* to memory */
+ src = (unsigned char*) rm;
+#if !defined(__LITTLE_ENDIAN__)
+ src += 4-count;
+#endif
+ dst = (unsigned char*) *rn;
+ dst += regs->regs[0];
+
+ if (copy_to_user(dst, src, count))
+ goto fetch_fault;
+ }
+ ret = 0;
+ break;
+
+ case 1: /* mov.l Rm,@(disp,Rn) */
+ src = (unsigned char*) rm;
+ dst = (unsigned char*) *rn;
+ dst += (instruction&0x000F)<<2;
+
+ if (copy_to_user(dst,src,4))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
+ if (instruction & 4)
+ *rn -= count;
+ src = (unsigned char*) rm;
+ dst = (unsigned char*) *rn;
+#if !defined(__LITTLE_ENDIAN__)
+ src += 4-count;
+#endif
+ if (copy_to_user(dst, src, count))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 5: /* mov.l @(disp,Rm),Rn */
+ src = (unsigned char*) *rm;
+ src += (instruction&0x000F)<<2;
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+ if (copy_from_user(dst,src,4))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 6: /* mov.[bwl] from memory, possibly with post-increment */
+ src = (unsigned char*) *rm;
+ if (instruction & 4)
+ *rm += count;
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+#ifdef __LITTLE_ENDIAN__
+ if (copy_from_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ dst += 4-count;
+
+ if (copy_from_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[2] & 0x80) {
+ dst[0] = 0xff;
+ dst[1] = 0xff;
+ }
+#endif
+ ret = 0;
+ break;
+
+ case 8:
+ switch ((instruction&0xFF00)>>8) {
+ case 0x81: /* mov.w R0,@(disp,Rn) */
+ src = (unsigned char*) &regs->regs[0];
+#if !defined(__LITTLE_ENDIAN__)
+ src += 2;
+#endif
+ dst = (unsigned char*) *rm; /* called Rn in the spec */
+ dst += (instruction&0x000F)<<1;
+
+ if (copy_to_user(dst, src, 2))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 0x85: /* mov.w @(disp,Rm),R0 */
+ src = (unsigned char*) *rm;
+ src += (instruction&0x000F)<<1;
+ dst = (unsigned char*) &regs->regs[0];
+ *(unsigned long*)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+ dst += 2;
+#endif
+
+ if (copy_from_user(dst, src, 2))
+ goto fetch_fault;
+
+#ifdef __LITTLE_ENDIAN__
+ if (dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ if (dst[2] & 0x80) {
+ dst[0] = 0xff;
+ dst[1] = 0xff;
+ }
+#endif
+ ret = 0;
+ break;
+ }
+ break;
+ }
+ return ret;
+
+ fetch_fault:
+ /* Argh. Address not only misaligned but also non-existent.
+ * Raise an EFAULT and see if it's trapped
+ */
+ return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
+}
+
+/*
+ * emulate the instruction in the delay slot
+ * - fetches the instruction from PC+2
+ */
+static inline int handle_unaligned_delayslot(struct pt_regs *regs)
+{
+ u16 instruction;
+
+ if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
+ /* the instruction-fetch faulted */
+ if (user_mode(regs))
+ return -EFAULT;
+
+ /* kernel */
+ die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
+ }
+
+ return handle_unaligned_ins(instruction,regs);
+}
+
+/*
+ * handle an instruction that does an unaligned memory access
+ * - have to be careful of branch delay-slot instructions that fault
+ * - if the branch would be taken PC points to the branch
+ * - if the branch would not be taken, PC points to delay-slot
+ * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
+ */
+static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
+{
+ u_int rm;
+ int ret, index;
+
+ index = (instruction>>8)&15; /* 0x0F00 */
+ rm = regs->regs[index];
+
+ /* shout about the first ten userspace fixups */
+ if (user_mode(regs) && handle_unaligned_notify_count>0) {
+ handle_unaligned_notify_count--;
+
+ printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm,current->pid,(u16*)regs->pc,instruction);
+ }
+
+ ret = -EFAULT;
+ switch (instruction&0xF000) {
+ case 0x0000:
+ if (instruction==0x000B) {
+ /* rts */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc = regs->pr;
+ }
+ else if ((instruction&0x00FF)==0x0023) {
+ /* braf @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += rm + 4;
+ }
+ else if ((instruction&0x00FF)==0x0003) {
+ /* bsrf @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc += rm + 4;
+ }
+ }
+ else {
+ /* mov.[bwl] to/from memory via r0+rn */
+ goto simple;
+ }
+ break;
+
+ case 0x1000: /* mov.l Rm,@(disp,Rn) */
+ goto simple;
+
+ case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
+ goto simple;
+
+ case 0x4000:
+ if ((instruction&0x00FF)==0x002B) {
+ /* jmp @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc = rm;
+ }
+ else if ((instruction&0x00FF)==0x000B) {
+ /* jsr @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc = rm;
+ }
+ }
+ else {
+ /* mov.[bwl] to/from memory via r0+rn */
+ goto simple;
+ }
+ break;
+
+ case 0x5000: /* mov.l @(disp,Rm),Rn */
+ goto simple;
+
+ case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
+ goto simple;
+
+ case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
+ switch (instruction&0x0F00) {
+ case 0x0100: /* mov.w R0,@(disp,Rm) */
+ goto simple;
+ case 0x0500: /* mov.w @(disp,Rm),R0 */
+ goto simple;
+ case 0x0B00: /* bf lab - no delayslot*/
+ break;
+ case 0x0F00: /* bf/s lab */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += (instruction&0x00FF)*2 + 4;
+ break;
+ case 0x0900: /* bt lab - no delayslot */
+ break;
+ case 0x0D00: /* bt/s lab */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += (instruction&0x00FF)*2 + 4;
+ break;
+ }
+ break;
+
+ case 0xA000: /* bra label */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += (instruction&0x0FFF)*2 + 4;
+ break;
+
+ case 0xB000: /* bsr label */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc += (instruction&0x0FFF)*2 + 4;
+ }
+ break;
+ }
+ return ret;
+
+ /* handle non-delay-slot instruction */
+ simple:
+ ret = handle_unaligned_ins(instruction,regs);
+ if (ret==0)
+ regs->pc += 2;
+ return ret;
+}
+
+/*
+ * Handle various address error exceptions
+ */
+asmlinkage void do_address_error(struct pt_regs *regs,
+ unsigned long writeaccess,
+ unsigned long address)
+{
+ unsigned long error_code;
+ mm_segment_t oldfs;
+ u16 instruction;
+ int tmp;
+
+ asm volatile("stc r2_bank,%0": "=r" (error_code));
+
+ oldfs = get_fs();
+
+ if (user_mode(regs)) {
+ sti();
+ current->thread.error_code = error_code;
+ current->thread.trap_no = (writeaccess) ? 8 : 7;
+
+ /* bad PC is not something we can fix */
+ if (regs->pc & 1)
+ goto uspace_segv;
+
+ set_fs(USER_DS);
+ if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+ /* Argh. Fault on the instruction itself.
+ This should never happen non-SMP
+ */
+ set_fs(oldfs);
+ goto uspace_segv;
+ }
+
+ tmp = handle_unaligned_access(instruction, regs);
+ set_fs(oldfs);
+
+ if (tmp==0)
+ return; /* sorted */
+
+ uspace_segv:
+ printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
+ force_sig(SIGSEGV, current);
+ } else {
+ if (regs->pc & 1)
+ die("unaligned program counter", regs, error_code);
+
+ set_fs(KERNEL_DS);
+ if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+ /* Argh. Fault on the instruction itself.
+ This should never happen non-SMP
+ */
+ set_fs(oldfs);
+ die("insn faulting in do_address_error", regs, 0);
+ }
+
+ handle_unaligned_access(instruction, regs);
+ set_fs(oldfs);
+ }
}
-DO_ERROR( 7, SIGSEGV, "address error (load)", address_error_load, current)
-DO_ERROR( 8, SIGSEGV, "address error (store)", address_error_store, current)
DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
@@ -93,25 +498,42 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
struct pt_regs regs)
{
long ex;
- asm volatile("stc $r2_bank, %0" : "=r" (ex));
+ asm volatile("stc r2_bank, %0" : "=r" (ex));
die_if_kernel("exception", &regs, ex);
}
+#if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS)
+void *gdb_vbr_vector;
+#endif
+
void __init trap_init(void)
{
extern void *vbr_base;
extern void *exception_handling_table[14];
- exception_handling_table[7] = (void *)do_address_error_load;
- exception_handling_table[8] = (void *)do_address_error_store;
exception_handling_table[12] = (void *)do_reserved_inst;
exception_handling_table[13] = (void *)do_illegal_slot_inst;
+#if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS)
+ /*
+ * Read the old value of the VBR register to initialise
+ * the vector through which debug and BIOS traps are
+ * delegated by the Linux trap handler.
+ */
+ {
+ register unsigned long vbr;
+ asm volatile("stc vbr, %0" : "=r" (vbr));
+ gdb_vbr_vector = (void *)(vbr + 0x100);
+ printk("Setting GDB trap vector to 0x%08lx\n",
+ (unsigned long)gdb_vbr_vector);
+ }
+#endif
+
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
- asm volatile("ldc %0, $vbr"
+ asm volatile("ldc %0, vbr"
: /* no output */
: "r" (&vbr_base)
: "memory");
@@ -123,8 +545,8 @@ void dump_stack(void)
unsigned long *end;
unsigned long *p;
- asm("mov $r15, %0" : "=r" (start));
- asm("stc $r7_bank, %0" : "=r" (end));
+ asm("mov r15, %0" : "=r" (start));
+ asm("stc r7_bank, %0" : "=r" (end));
end += 8192/4;
printk("%08lx:%08lx\n", (unsigned long)start, (unsigned long)end);
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index c7e96a394..9d8e0f476 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -49,99 +49,99 @@ ENTRY(csum_partial)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
- mov $r5, $r1
- mov $r4, $r0
- tst #2, $r0 ! Check alignment.
+ mov r5, r1
+ mov r4, r0
+ tst #2, r0 ! Check alignment.
bt 2f ! Jump if alignment is ok.
!
- add #-2, $r5 ! Alignment uses up two bytes.
- cmp/pz $r5 !
+ add #-2, r5 ! Alignment uses up two bytes.
+ cmp/pz r5 !
bt/s 1f ! Jump if we had at least two bytes.
clrt
bra 6f
- add #2, $r5 ! $r5 was < 2. Deal with it.
+ add #2, r5 ! r5 was < 2. Deal with it.
1:
- mov.w @$r4+, $r0
- extu.w $r0, $r0
- addc $r0, $r6
+ mov.w @r4+, r0
+ extu.w r0, r0
+ addc r0, r6
bf 2f
- add #1, $r6
+ add #1, r6
2:
- mov #-5, $r0
- shld $r0, $r5
- tst $r5, $r5
+ mov #-5, r0
+ shld r0, r5
+ tst r5, r5
bt/s 4f ! if it's =0, go to 4f
clrt
.align 2
3:
- mov.l @$r4+, $r0
- mov.l @$r4+, $r2
- mov.l @$r4+, $r3
- addc $r0, $r6
- mov.l @$r4+, $r0
- addc $r2, $r6
- mov.l @$r4+, $r2
- addc $r3, $r6
- mov.l @$r4+, $r3
- addc $r0, $r6
- mov.l @$r4+, $r0
- addc $r2, $r6
- mov.l @$r4+, $r2
- addc $r3, $r6
- addc $r0, $r6
- addc $r2, $r6
- movt $r0
- dt $r5
+ mov.l @r4+, r0
+ mov.l @r4+, r2
+ mov.l @r4+, r3
+ addc r0, r6
+ mov.l @r4+, r0
+ addc r2, r6
+ mov.l @r4+, r2
+ addc r3, r6
+ mov.l @r4+, r3
+ addc r0, r6
+ mov.l @r4+, r0
+ addc r2, r6
+ mov.l @r4+, r2
+ addc r3, r6
+ addc r0, r6
+ addc r2, r6
+ movt r0
+ dt r5
bf/s 3b
- cmp/eq #1, $r0
- ! here, we know $r5==0
- addc $r5, $r6 ! add carry to $r6
+ cmp/eq #1, r0
+ ! here, we know r5==0
+ addc r5, r6 ! add carry to r6
4:
- mov $r1, $r0
- and #0x1c, $r0
- tst $r0, $r0
+ mov r1, r0
+ and #0x1c, r0
+ tst r0, r0
bt/s 6f
- mov $r0, $r5
- shlr2 $r5
- mov #0, $r2
+ mov r0, r5
+ shlr2 r5
+ mov #0, r2
5:
- addc $r2, $r6
- mov.l @$r4+, $r2
- movt $r0
- dt $r5
+ addc r2, r6
+ mov.l @r4+, r2
+ movt r0
+ dt r5
bf/s 5b
- cmp/eq #1, $r0
- addc $r2, $r6
- addc $r5, $r6 ! $r5==0 here, so it means add carry-bit
+ cmp/eq #1, r0
+ addc r2, r6
+ addc r5, r6 ! r5==0 here, so it means add carry-bit
6:
- mov $r1, $r5
- mov #3, $r0
- and $r0, $r5
- tst $r5, $r5
+ mov r1, r5
+ mov #3, r0
+ and r0, r5
+ tst r5, r5
bt 9f ! if it's =0 go to 9f
- mov #2, $r1
- cmp/hs $r1, $r5
+ mov #2, r1
+ cmp/hs r1, r5
bf 7f
- mov.w @r4+, $r0
- extu.w $r0, $r0
- cmp/eq $r1, $r5
+ mov.w @r4+, r0
+ extu.w r0, r0
+ cmp/eq r1, r5
bt/s 8f
clrt
- shll16 $r0
- addc $r0, $r6
+ shll16 r0
+ addc r0, r6
7:
- mov.b @$r4+, $r0
- extu.b $r0, $r0
+ mov.b @r4+, r0
+ extu.b r0, r0
#ifndef __LITTLE_ENDIAN__
- shll8 $r0
+ shll8 r0
#endif
8:
- addc $r0, $r6
- mov #0, $r0
- addc $r0, $r6
+ addc r0, r6
+ mov #0, r0
+ addc r0, r6
9:
rts
- mov $r6, $r0
+ mov r6, r0
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
@@ -159,14 +159,14 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
* them all but there's no guarantee.
*/
-#define SRC(x,y) \
- 9999: x,y; \
+#define SRC(...) \
+ 9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
-#define DST(x,y) \
- 9999: x,y; \
+#define DST(...) \
+ 9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6002f ; \
.previous
@@ -202,7 +202,7 @@ ENTRY(csum_partial_copy_generic)
bt/s 1f
clrt
bra 4f
- add #2,r6 ! $r6 was < 2. Deal with it.
+ add #2,r6 ! r6 was < 2. Deal with it.
3: ! Handle different src and dest alinments.
! This is not common, so simple byte by byte copy will do.
@@ -211,7 +211,8 @@ ENTRY(csum_partial_copy_generic)
tst r6, r6
bt 4f
clrt
-SRC(5: mov.b @r4+,r0 )
+5:
+SRC( mov.b @r4+,r0 )
DST( mov.b r0,@r5 )
add #1, r5
SRC( mov.b @r4+,r1 )
@@ -244,7 +245,8 @@ DST( mov.b r1,@r5 )
! src and dest equally aligned, but to a two byte boundary.
! Handle first two bytes as a special case
.align 5
-SRC(1: mov.w @r4+,r0 )
+1:
+SRC( mov.w @r4+,r0 )
DST( mov.w r0,@r5 )
add #2,r5
extu.w r0,r0
@@ -258,7 +260,8 @@ DST( mov.w r0,@r5 )
tst r6,r6
bt/s 2f
clrt
-SRC(1: mov.l @r4+,r0 )
+1:
+SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@r5 )
@@ -276,7 +279,7 @@ DST( mov.l r0,@r5 )
DST( mov.l r1,@r5 )
add #4,r5
-SRC( mov.l @r4+,r0 )
+SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@r5 )
@@ -307,7 +310,8 @@ DST( mov.l r1,@r5 )
bf/s 4f
clrt
shlr2 r6
-SRC(3: mov.l @r4+,r0 )
+3:
+SRC( mov.l @r4+,r0 )
addc r0,r7
DST( mov.l r0,@r5 )
add #4,r5
@@ -334,7 +338,8 @@ DST( mov.w r0,@r5 )
clrt
shll16 r0
addc r0,r7
-SRC(5: mov.b @r4+,r0 )
+5:
+SRC( mov.b @r4+,r0 )
DST( mov.b r0,@r5 )
extu.b r0,r0
#ifndef __LITTLE_ENDIAN__
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index 790fd1808..e1fcc970f 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -21,7 +21,11 @@ void __delay(unsigned long loops)
inline void __const_udelay(unsigned long xloops)
{
- xloops *= current_cpu_data.loops_per_jiffy;
+ __asm__("dmulu.l %0, %2\n\t"
+ "sts mach, %0"
+ : "=r" (xloops)
+ : "0" (xloops), "r" (current_cpu_data.loops_per_jiffy)
+ : "macl", "mach");
__delay(xloops * HZ);
}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index ab63535e4..2ea37e723 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -244,6 +244,22 @@ void flush_icache_range(unsigned long start, unsigned long end)
}
/*
+ * Write back the D-cache and purge the I-cache for signal trampoline.
+ */
+void flush_cache_sigtramp(unsigned long addr)
+{
+ unsigned long v, index;
+
+ v = addr & ~(L1_CACHE_BYTES-1);
+ asm volatile("ocbwb %0"
+ : /* no output */
+ : "m" (__m(v)));
+
+ index = CACHE_IC_ADDRESS_ARRAY| (v&CACHE_IC_ENTRY_MASK);
+ ctrl_outl(0, index); /* Clear out Valid-bit */
+}
+
+/*
* Invalidate the I-cache of the page (don't need to write back D-cache).
*
* Called from kernel/ptrace.c, mm/memory.c after flush_page_to_ram is called.
diff --git a/arch/sparc/config.in b/arch/sparc/config.in
index dbe1d1fb2..cb27cf77b 100644
--- a/arch/sparc/config.in
+++ b/arch/sparc/config.in
@@ -1,4 +1,4 @@
-# $Id: config.in,v 1.106 2000/11/17 04:27:52 davem Exp $
+# $Id: config.in,v 1.107 2001/01/06 00:46:44 davem Exp $
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/config-language.txt.
#
@@ -29,6 +29,9 @@ define_bool CONFIG_VT_CONSOLE y
bool 'Symmetric multi-processing support (does not work on sun4/sun4c)' CONFIG_SMP
+# Identify this as a Sparc32 build
+define_bool CONFIG_SPARC32 y
+
# Global things across all Sun machines.
define_bool CONFIG_ISA n
define_bool CONFIG_EISA n
diff --git a/arch/sparc/defconfig b/arch/sparc/defconfig
index 19c9feba0..20c0782e7 100644
--- a/arch/sparc/defconfig
+++ b/arch/sparc/defconfig
@@ -22,7 +22,10 @@ CONFIG_KMOD=y
CONFIG_VT=y
CONFIG_VT_CONSOLE=y
# CONFIG_SMP is not set
+CONFIG_SPARC32=y
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
+# CONFIG_MCA is not set
# CONFIG_PCMCIA is not set
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
@@ -64,6 +67,7 @@ CONFIG_PROM_CONSOLE=y
#
CONFIG_FB=y
CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FB_CYBER2000 is not set
CONFIG_FB_SBUS=y
CONFIG_FB_CGSIX=y
CONFIG_FB_BWTWO=y
@@ -111,10 +115,18 @@ CONFIG_SUN_AURORA=m
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_MD_LINEAR is not set
+# CONFIG_MD_RAID0 is not set
+# CONFIG_MD_RAID1 is not set
+# CONFIG_MD_RAID5 is not set
+# CONFIG_BLK_DEV_LVM is not set
+# CONFIG_LVM_PROC_FS is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
@@ -132,10 +144,8 @@ CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
# CONFIG_IP_ADVANCED_ROUTER is not set
# CONFIG_IP_PNP is not set
-# CONFIG_IP_ROUTER is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_ALIAS is not set
# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
CONFIG_IPV6=m
@@ -148,7 +158,6 @@ CONFIG_IPV6=m
#
CONFIG_IPX=m
# CONFIG_IPX_INTERN is not set
-# CONFIG_SPX is not set
CONFIG_ATALK=m
CONFIG_DECNET=m
CONFIG_DECNET_SIOCGIFCONF=y
@@ -157,6 +166,7 @@ CONFIG_DECNET_SIOCGIFCONF=y
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
# CONFIG_LLC is not set
+# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_NET_FASTROUTE is not set
@@ -228,6 +238,7 @@ CONFIG_SCSI_FCAL=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
+# CONFIG_TUN is not set
CONFIG_PPP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
@@ -302,6 +313,7 @@ CONFIG_NFSD=m
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
# CONFIG_NCP_FS is not set
# CONFIG_NCPFS_PACKET_SIGNING is not set
# CONFIG_NCPFS_IOCTL_LOCKING is not set
@@ -309,8 +321,6 @@ CONFIG_SMB_FS=m
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -320,6 +330,7 @@ CONFIG_SMB_FS=m
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
CONFIG_SUN_PARTITION=y
+CONFIG_SMB_NLS=y
CONFIG_NLS=y
#
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 70bbda891..47fcf6226 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1,4 +1,4 @@
-/* $Id: entry.S,v 1.168 2001/01/01 01:46:15 davem Exp $
+/* $Id: entry.S,v 1.169 2001/01/25 21:47:20 davem Exp $
* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -999,6 +999,7 @@ C_LABEL(invalid_segment_patch1):
ld [%l4 + 0x18], %l3
inc %l3 ! sun4c_kernel_ring.num_entries++
+ st %l3, [%l4 + 0x18]
b 4f
ld [%l6 + 0x08], %l5
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index c9ba85b27..9bc82a518 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -1,4 +1,4 @@
-/* $Id: pcic.c,v 1.20 2000/12/05 00:56:36 anton Exp $
+/* $Id: pcic.c,v 1.21 2001/01/18 00:23:00 davem Exp $
* pcic.c: Sparc/PCI controller support
*
* Copyright (C) 1998 V. Roganov and G. Raiko
@@ -898,93 +898,6 @@ void pcic_nmi(unsigned int pend, struct pt_regs *regs)
regs->npc += 4;
}
-/*
- * XXX Gleb wrote me that he needs this for X server (only).
- * Since we successfuly use XF86_FBDev, we do not need these anymore.
- *
- * Following code added to handle extra PCI-related system calls
- */
-asmlinkage int sys_pciconfig_read(unsigned long bus,
- unsigned long dfn,
- unsigned long off,
- unsigned long len,
- unsigned char *buf)
-{
- unsigned char ubyte;
- unsigned short ushort;
- unsigned int uint;
- int err = 0;
-
- if(!suser())
- return -EPERM;
-
- switch(len) {
- case 1:
- pcibios_read_config_byte(bus, dfn, off, &ubyte);
- put_user(ubyte, (unsigned char *)buf);
- break;
- case 2:
- pcibios_read_config_word(bus, dfn, off, &ushort);
- put_user(ushort, (unsigned short *)buf);
- break;
- case 4:
- pcibios_read_config_dword(bus, dfn, off, &uint);
- put_user(uint, (unsigned int *)buf);
- break;
-
- default:
- err = -EINVAL;
- break;
- };
-
- return err;
-}
-
-asmlinkage int sys_pciconfig_write(unsigned long bus,
- unsigned long dfn,
- unsigned long off,
- unsigned long len,
- unsigned char *buf)
-{
- unsigned char ubyte;
- unsigned short ushort;
- unsigned int uint;
- int err = 0;
-
- if(!suser())
- return -EPERM;
-
- switch(len) {
- case 1:
- err = get_user(ubyte, (unsigned char *)buf);
- if(err)
- break;
- pcibios_write_config_byte(bus, dfn, off, ubyte);
- break;
-
- case 2:
- err = get_user(ushort, (unsigned short *)buf);
- if(err)
- break;
- pcibios_write_config_byte(bus, dfn, off, ushort);
- break;
-
- case 4:
- err = get_user(uint, (unsigned int *)buf);
- if(err)
- break;
- pcibios_write_config_byte(bus, dfn, off, uint);
- break;
-
- default:
- err = -EINVAL;
- break;
-
- };
-
- return err;
-}
-
static inline unsigned long get_irqmask(int irq_nr)
{
return 1 << irq_nr;
diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c
index c949717b4..259a0a71b 100644
--- a/arch/sparc/kernel/semaphore.c
+++ b/arch/sparc/kernel/semaphore.c
@@ -1,4 +1,4 @@
-/* $Id: semaphore.c,v 1.5 2000/12/29 10:35:05 anton Exp $ */
+/* $Id: semaphore.c,v 1.6 2001/01/05 04:40:07 davem Exp $ */
/* sparc32 semaphore implementation, based on i386 version */
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
index 6ac8c4b00..957ddc17c 100644
--- a/arch/sparc/kernel/signal.c
+++ b/arch/sparc/kernel/signal.c
@@ -1,4 +1,4 @@
-/* $Id: signal.c,v 1.107 2000/09/05 21:44:54 davem Exp $
+/* $Id: signal.c,v 1.108 2001/01/24 21:05:12 davem Exp $
* linux/arch/sparc/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -28,9 +28,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
-
extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
void *fpqueue, unsigned long *fpqdepth);
extern void fpload(unsigned long *fpregs, unsigned long *fsr);
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index d9883654e..235d4b96e 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -1,4 +1,4 @@
-/* $Id: sparc_ksyms.c,v 1.105 2000/12/11 05:24:25 anton Exp $
+/* $Id: sparc_ksyms.c,v 1.106 2001/01/11 15:07:09 davem Exp $
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -154,6 +154,7 @@ EXPORT_SYMBOL(__cpu_logical_map);
#endif
EXPORT_SYMBOL(udelay);
+EXPORT_SYMBOL(mostek_lock);
EXPORT_SYMBOL(mstk48t02_regs);
#if CONFIG_SUN_AUXIO
EXPORT_SYMBOL(auxio_register);
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c
index ac252f55a..24ae3829d 100644
--- a/arch/sparc/kernel/sys_sunos.c
+++ b/arch/sparc/kernel/sys_sunos.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sunos.c,v 1.130 2000/08/12 13:25:41 davem Exp $
+/* $Id: sys_sunos.c,v 1.131 2001/01/24 21:05:12 davem Exp $
* sys_sunos.c: SunOS specific syscall compatibility support.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -834,7 +834,6 @@ asmlinkage int sunos_setpgrp(pid_t pid, pid_t pgid)
}
/* So stupid... */
-extern asmlinkage int sys_wait4(pid_t, unsigned int *, int, struct rusage *);
asmlinkage int sunos_wait4(pid_t pid, unsigned int *stat_addr, int options, struct rusage *ru)
{
int ret;
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 980981592..37113ef38 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -1,4 +1,4 @@
-/* $Id: time.c,v 1.57 2000/09/16 07:33:45 davem Exp $
+/* $Id: time.c,v 1.58 2001/01/11 15:07:09 davem Exp $
* linux/arch/sparc/kernel/time.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -44,6 +44,7 @@
extern rwlock_t xtime_lock;
enum sparc_clock_type sp_clock_typ;
+spinlock_t mostek_lock = SPIN_LOCK_UNLOCKED;
unsigned long mstk48t02_regs = 0UL;
static struct mostek48t08 *mstk48t08_regs = 0;
static int set_rtc_mmss(unsigned long);
@@ -158,12 +159,16 @@ static void __init kick_start_clock(void)
prom_printf("CLOCK: Clock was stopped. Kick start ");
+ spin_lock_irq(&mostek_lock);
+
/* Turn on the kick start bit to start the oscillator. */
regs->creg |= MSTK_CREG_WRITE;
regs->sec &= ~MSTK_STOP;
regs->hour |= MSTK_KICK_START;
regs->creg &= ~MSTK_CREG_WRITE;
+ spin_unlock_irq(&mostek_lock);
+
/* Delay to allow the clock oscillator to start. */
sec = MSTK_REG_SEC(regs);
for (i = 0; i < 3; i++) {
@@ -175,6 +180,8 @@ static void __init kick_start_clock(void)
}
prom_printf("\n");
+ spin_lock_irq(&mostek_lock);
+
/* Turn off kick start and set a "valid" time and date. */
regs->creg |= MSTK_CREG_WRITE;
regs->hour &= ~MSTK_KICK_START;
@@ -187,12 +194,17 @@ static void __init kick_start_clock(void)
MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
regs->creg &= ~MSTK_CREG_WRITE;
+ spin_unlock_irq(&mostek_lock);
+
/* Ensure the kick start bit is off. If it isn't, turn it off. */
while (regs->hour & MSTK_KICK_START) {
prom_printf("CLOCK: Kick start still on!\n");
+
+ spin_lock_irq(&mostek_lock);
regs->creg |= MSTK_CREG_WRITE;
regs->hour &= ~MSTK_KICK_START;
regs->creg &= ~MSTK_CREG_WRITE;
+ spin_unlock_irq(&mostek_lock);
}
prom_printf("CLOCK: Kick start procedure successful.\n");
@@ -204,10 +216,12 @@ static __inline__ int has_low_battery(void)
struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
unsigned char data1, data2;
+ spin_lock_irq(&mostek_lock);
data1 = regs->eeprom[0]; /* Read some data. */
regs->eeprom[0] = ~data1; /* Write back the complement. */
data2 = regs->eeprom[0]; /* Read back the complement. */
regs->eeprom[0] = data1; /* Restore the original value. */
+ spin_unlock_irq(&mostek_lock);
return (data1 == data2); /* Was the write blocked? */
}
@@ -376,6 +390,7 @@ void __init sbus_time_init(void)
prom_printf("Something wrong, clock regs not mapped yet.\n");
prom_halt();
}
+ spin_lock_irq(&mostek_lock);
mregs->creg |= MSTK_CREG_READ;
sec = MSTK_REG_SEC(mregs);
min = MSTK_REG_MIN(mregs);
@@ -386,6 +401,7 @@ void __init sbus_time_init(void)
xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
xtime.tv_usec = 0;
mregs->creg &= ~MSTK_CREG_READ;
+ spin_unlock_irq(&mostek_lock);
#ifdef CONFIG_SUN4
} else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) {
/* initialise the intersil on sun4 */
@@ -520,6 +536,7 @@ static int set_rtc_mmss(unsigned long nowtime)
{
int real_seconds, real_minutes, mostek_minutes;
struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
+ unsigned long flags;
#ifdef CONFIG_SUN4
struct intersil *iregs = intersil_clock;
int temp;
@@ -557,6 +574,8 @@ static int set_rtc_mmss(unsigned long nowtime)
}
#endif
}
+
+ spin_lock_irqsave(&mostek_lock, flags);
/* Read the current RTC minutes. */
regs->creg |= MSTK_CREG_READ;
mostek_minutes = MSTK_REG_MIN(regs);
@@ -579,8 +598,10 @@ static int set_rtc_mmss(unsigned long nowtime)
MSTK_SET_REG_SEC(regs,real_seconds);
MSTK_SET_REG_MIN(regs,real_minutes);
regs->creg &= ~MSTK_CREG_WRITE;
- } else
+ spin_unlock_irqrestore(&mostek_lock, flags);
+ return 0;
+ } else {
+ spin_unlock_irqrestore(&mostek_lock, flags);
return -1;
-
- return 0;
+ }
}
diff --git a/arch/sparc64/config.in b/arch/sparc64/config.in
index 19b05e28f..5c208915b 100644
--- a/arch/sparc64/config.in
+++ b/arch/sparc64/config.in
@@ -1,4 +1,4 @@
-# $Id: config.in,v 1.125 2000/10/10 01:05:53 davem Exp $
+# $Id: config.in,v 1.130 2001/01/18 04:47:44 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
@@ -26,6 +26,9 @@ define_bool CONFIG_VT_CONSOLE y
bool 'Symmetric multi-processing support' CONFIG_SMP
+# Identify this as a Sparc64 build
+define_bool CONFIG_SPARC64 y
+
# Global things across all Sun machines.
define_bool CONFIG_HAVE_DEC_LOCK y
define_bool CONFIG_ISA n
@@ -70,6 +73,7 @@ dep_tristate ' Parallel printer support' CONFIG_PRINTER $CONFIG_PARPORT
if [ "$CONFIG_PCI" = "y" ]; then
tristate 'SUNW, envctrl support' CONFIG_ENVCTRL
tristate '7-Segment Display support' CONFIG_DISPLAY7SEG
+ tristate 'CP1XXX Hardware Watchdog support' CONFIG_WATCHDOG_CP1XXX
fi
endmenu
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 7c2f852b3..d80f93cf6 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -20,8 +20,11 @@ CONFIG_KMOD=y
CONFIG_VT=y
CONFIG_VT_CONSOLE=y
# CONFIG_SMP is not set
+CONFIG_SPARC64=y
CONFIG_HAVE_DEC_LOCK=y
# CONFIG_ISA is not set
+# CONFIG_EISA is not set
+# CONFIG_MCA is not set
# CONFIG_PCMCIA is not set
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
@@ -66,6 +69,7 @@ CONFIG_PARPORT_1284=y
CONFIG_PRINTER=m
CONFIG_ENVCTRL=m
CONFIG_DISPLAY7SEG=m
+CONFIG_WATCHDOG_CP1XXX=m
#
# Console drivers
@@ -122,9 +126,7 @@ CONFIG_SUN_AURORA=m
# Linux/SPARC audio subsystem (EXPERIMENTAL)
#
CONFIG_SPARCAUDIO=y
-# CONFIG_SPARCAUDIO_AMD7930 is not set
CONFIG_SPARCAUDIO_CS4231=y
-# CONFIG_SPARCAUDIO_DBRI is not set
# CONFIG_SPARCAUDIO_DUMMY is not set
#
@@ -180,7 +182,6 @@ CONFIG_IPV6=m
#
CONFIG_IPX=m
# CONFIG_IPX_INTERN is not set
-# CONFIG_SPX is not set
CONFIG_ATALK=m
CONFIG_DECNET=m
CONFIG_DECNET_SIOCGIFCONF=y
@@ -415,6 +416,8 @@ CONFIG_DRM_FFB=m
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
CONFIG_AFFS_FS=m
@@ -470,8 +473,6 @@ CONFIG_LOCKD_V4=y
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_MOUNT_SUBDIR is not set
-# CONFIG_NCPFS_NDS_DOMAINS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
@@ -481,6 +482,7 @@ CONFIG_LOCKD_V4=y
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
CONFIG_SUN_PARTITION=y
+# CONFIG_SMB_NLS is not set
CONFIG_NLS=y
#
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index 81e11af70..67c1ec6d0 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -1,4 +1,4 @@
-/* $Id: ioctl32.c,v 1.104 2001/01/03 09:28:19 anton Exp $
+/* $Id: ioctl32.c,v 1.105 2001/01/18 04:47:44 davem Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
@@ -73,6 +73,7 @@
#include <asm/audioio.h>
#include <linux/ethtool.h>
#include <asm/display7seg.h>
+#include <asm/watchdog.h>
#include <asm/module.h>
#include <linux/soundcard.h>
@@ -3600,6 +3601,16 @@ COMPATIBLE_IOCTL(DRM_IOCTL_FINISH)
/* elevator */
COMPATIBLE_IOCTL(BLKELVGET)
COMPATIBLE_IOCTL(BLKELVSET)
+/* Big W */
+/* WIOC_GETSUPPORT not yet implemented -E */
+COMPATIBLE_IOCTL(WDIOC_GETSTATUS)
+COMPATIBLE_IOCTL(WDIOC_GETBOOTSTATUS)
+COMPATIBLE_IOCTL(WDIOC_GETTEMP)
+COMPATIBLE_IOCTL(WDIOC_SETOPTIONS)
+COMPATIBLE_IOCTL(WDIOC_KEEPALIVE)
+COMPATIBLE_IOCTL(WIOCSTART)
+COMPATIBLE_IOCTL(WIOCSTOP)
+COMPATIBLE_IOCTL(WIOCGSTAT)
/* And these ioctls need translation */
HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32)
HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf)
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 2b697dd87..009900165 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -1,4 +1,4 @@
-/* $Id: pci.c,v 1.20 2000/12/14 22:57:25 davem Exp $
+/* $Id: pci.c,v 1.21 2001/01/10 18:22:59 davem Exp $
* pci.c: UltraSparc PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
@@ -78,6 +78,9 @@ volatile int pci_poke_faulted;
/* Probe for all PCI controllers in the system. */
extern void sabre_init(int);
extern void psycho_init(int);
+#if 0
+extern void schizo_init(int);
+#endif
static struct {
char *model_name;
@@ -87,6 +90,10 @@ static struct {
{ "pci108e,a000", sabre_init },
{ "SUNW,psycho", psycho_init },
{ "pci108e,8000", psycho_init }
+#if 0
+ { "SUNW,schizo", schizo_init },
+ { "pci108e,8001", schizo_init }
+#endif
};
#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
sizeof(pci_controller_table[0]))
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index d7267880a..08d5b8ee3 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -1,4 +1,4 @@
-/* $Id: pci_iommu.c,v 1.11 2000/03/10 02:42:15 davem Exp $
+/* $Id: pci_iommu.c,v 1.12 2001/01/11 16:26:45 davem Exp $
* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -187,7 +187,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
memset((char *)first_page, 0, PAGE_SIZE << order);
pcp = pdev->sysdata;
- iommu = &pcp->pbm->parent->iommu;
+ iommu = pcp->pbm->iommu;
spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_consistent_cluster(iommu, size >> PAGE_SHIFT);
@@ -241,7 +241,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
pcp = pdev->sysdata;
- iommu = &pcp->pbm->parent->iommu;
+ iommu = pcp->pbm->iommu;
iopte = iommu->page_table +
((dvma - iommu->page_table_map_base) >> PAGE_SHIFT);
@@ -308,7 +308,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
unsigned long iopte_protection;
pcp = pdev->sysdata;
- iommu = &pcp->pbm->parent->iommu;
+ iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
if (direction == PCI_DMA_NONE)
@@ -356,7 +356,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
BUG();
pcp = pdev->sysdata;
- iommu = &pcp->pbm->parent->iommu;
+ iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
npages = PAGE_ALIGN(bus_addr + sz) - (bus_addr & PAGE_MASK);
@@ -504,7 +504,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
}
pcp = pdev->sysdata;
- iommu = &pcp->pbm->parent->iommu;
+ iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
if (direction == PCI_DMA_NONE)
@@ -568,7 +568,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
BUG();
pcp = pdev->sysdata;
- iommu = &pcp->pbm->parent->iommu;
+ iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
bus_addr = sglist->dvma_address & PAGE_MASK;
@@ -639,7 +639,7 @@ void pci_dma_sync_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, i
unsigned long flags, ctx, npages;
pcp = pdev->sysdata;
- iommu = &pcp->pbm->parent->iommu;
+ iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
if (!strbuf->strbuf_enabled)
@@ -700,7 +700,7 @@ void pci_dma_sync_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelem
unsigned long flags, ctx;
pcp = pdev->sysdata;
- iommu = &pcp->pbm->parent->iommu;
+ iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
if (!strbuf->strbuf_enabled)
@@ -762,7 +762,7 @@ int pci_dma_supported(struct pci_dev *pdev, dma_addr_t device_mask)
if (pdev == NULL) {
dma_addr_mask = 0xffffffff;
} else {
- struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
+ struct pci_iommu *iommu = pcp->pbm->iommu;
dma_addr_mask = iommu->dma_addr_mask;
}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index aff2de594..fbd7832cd 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1,4 +1,4 @@
-/* $Id: pci_psycho.c,v 1.17 2000/09/21 06:25:14 anton Exp $
+/* $Id: pci_psycho.c,v 1.18 2001/01/11 16:26:45 davem Exp $
* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
@@ -619,20 +619,21 @@ static void psycho_check_iommu_error(struct pci_controller_info *p,
unsigned long afar,
enum psycho_error_type type)
{
+ struct pci_iommu *iommu = p->pbm_A.iommu;
unsigned long iommu_tag[16];
unsigned long iommu_data[16];
unsigned long flags;
u64 control;
int i;
- spin_lock_irqsave(&p->iommu.lock, flags);
- control = psycho_read(p->iommu.iommu_control);
+ spin_lock_irqsave(&iommu->lock, flags);
+ control = psycho_read(iommu->iommu_control);
if (control & PSYCHO_IOMMU_CTRL_XLTEERR) {
char *type_string;
/* Clear the error encountered bit. */
control &= ~PSYCHO_IOMMU_CTRL_XLTEERR;
- psycho_write(p->iommu.iommu_control, control);
+ psycho_write(iommu->iommu_control, control);
switch((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
case 0:
@@ -662,7 +663,7 @@ static void psycho_check_iommu_error(struct pci_controller_info *p,
* get as much diagnostic information to the
* console as we can.
*/
- psycho_write(p->iommu.iommu_control,
+ psycho_write(iommu->iommu_control,
control | PSYCHO_IOMMU_CTRL_DENAB);
for (i = 0; i < 16; i++) {
unsigned long base = p->controller_regs;
@@ -678,7 +679,7 @@ static void psycho_check_iommu_error(struct pci_controller_info *p,
}
/* Leave diagnostic mode. */
- psycho_write(p->iommu.iommu_control, control);
+ psycho_write(iommu->iommu_control, control);
for (i = 0; i < 16; i++) {
unsigned long tag, data;
@@ -717,7 +718,7 @@ static void psycho_check_iommu_error(struct pci_controller_info *p,
}
}
__psycho_check_stc_error(p, afsr, afar, type);
- spin_unlock_irqrestore(&p->iommu.lock, flags);
+ spin_unlock_irqrestore(&iommu->lock, flags);
}
/* Uncorrectable Errors. Cause of the error and the address are
@@ -1255,24 +1256,25 @@ static void __init psycho_scan_bus(struct pci_controller_info *p)
static void __init psycho_iommu_init(struct pci_controller_info *p)
{
+ struct pci_iommu *iommu = p->pbm_A.iommu;
unsigned long tsbbase, i;
u64 control;
/* Setup initial software IOMMU state. */
- spin_lock_init(&p->iommu.lock);
- p->iommu.iommu_cur_ctx = 0;
+ spin_lock_init(&iommu->lock);
+ iommu->iommu_cur_ctx = 0;
/* Register addresses. */
- p->iommu.iommu_control = p->controller_regs + PSYCHO_IOMMU_CONTROL;
- p->iommu.iommu_tsbbase = p->controller_regs + PSYCHO_IOMMU_TSBBASE;
- p->iommu.iommu_flush = p->controller_regs + PSYCHO_IOMMU_FLUSH;
+ iommu->iommu_control = p->controller_regs + PSYCHO_IOMMU_CONTROL;
+ iommu->iommu_tsbbase = p->controller_regs + PSYCHO_IOMMU_TSBBASE;
+ iommu->iommu_flush = p->controller_regs + PSYCHO_IOMMU_FLUSH;
/* PSYCHO's IOMMU lacks ctx flushing. */
- p->iommu.iommu_ctxflush = 0;
+ iommu->iommu_ctxflush = 0;
/* We use the main control register of PSYCHO as the write
* completion register.
*/
- p->iommu.write_complete_reg = p->controller_regs + PSYCHO_CONTROL;
+ iommu->write_complete_reg = p->controller_regs + PSYCHO_CONTROL;
/*
* Invalidate TLB Entries.
@@ -1298,19 +1300,19 @@ static void __init psycho_iommu_init(struct pci_controller_info *p)
prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
prom_halt();
}
- p->iommu.page_table = (iopte_t *)tsbbase;
- p->iommu.page_table_sz_bits = 17;
- p->iommu.page_table_map_base = 0xc0000000;
- p->iommu.dma_addr_mask = 0xffffffff;
+ iommu->page_table = (iopte_t *)tsbbase;
+ iommu->page_table_sz_bits = 17;
+ iommu->page_table_map_base = 0xc0000000;
+ iommu->dma_addr_mask = 0xffffffff;
memset((char *)tsbbase, 0, PAGE_SIZE << 7);
/* We start with no consistent mappings. */
- p->iommu.lowest_consistent_map =
- 1 << (p->iommu.page_table_sz_bits - PBM_LOGCLUSTERS);
+ iommu->lowest_consistent_map =
+ 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
for (i = 0; i < PBM_NCLUSTERS; i++) {
- p->iommu.alloc_info[i].flush = 0;
- p->iommu.alloc_info[i].next = 0;
+ iommu->alloc_info[i].flush = 0;
+ iommu->alloc_info[i].next = 0;
}
psycho_write(p->controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
@@ -1515,6 +1517,7 @@ void __init psycho_init(int node)
{
struct linux_prom64_registers pr_regs[3];
struct pci_controller_info *p;
+ struct pci_iommu *iommu;
unsigned long flags;
u32 upa_portid;
int is_pbm_a, err;
@@ -1538,6 +1541,13 @@ void __init psycho_init(int node)
prom_halt();
}
memset(p, 0, sizeof(*p));
+ iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ if (!iommu) {
+ prom_printf("PSYCHO: Fatal memory allocation error.\n");
+ prom_halt();
+ }
+ memset(iommu, 0, sizeof(*iommu));
+ p->pbm_A.iommu = p->pbm_B.iommu = iommu;
spin_lock_irqsave(&pci_controller_lock, flags);
p->next = pci_controller_root;
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index f3a5adbec..ec74a3696 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1,4 +1,4 @@
-/* $Id: pci_sabre.c,v 1.20 2000/06/26 19:40:27 davem Exp $
+/* $Id: pci_sabre.c,v 1.22 2001/01/16 13:03:48 anton Exp $
* pci_sabre.c: Sabre specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
@@ -672,14 +672,15 @@ static void sabre_check_iommu_error(struct pci_controller_info *p,
unsigned long afsr,
unsigned long afar)
{
+ struct pci_iommu *iommu = p->pbm_A.iommu;
unsigned long iommu_tag[16];
unsigned long iommu_data[16];
unsigned long flags;
u64 control;
int i;
- spin_lock_irqsave(&p->iommu.lock, flags);
- control = sabre_read(p->iommu.iommu_control);
+ spin_lock_irqsave(&iommu->lock, flags);
+ control = sabre_read(iommu->iommu_control);
if (control & SABRE_IOMMUCTRL_ERR) {
char *type_string;
@@ -687,7 +688,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p,
* NOTE: On Sabre this is write 1 to clear,
* which is different from Psycho.
*/
- sabre_write(p->iommu.iommu_control, control);
+ sabre_write(iommu->iommu_control, control);
switch((control & SABRE_IOMMUCTRL_ERRSTS) >> 25UL) {
case 1:
type_string = "Invalid Error";
@@ -706,7 +707,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p,
* entries in the IOTLB.
*/
control &= ~(SABRE_IOMMUCTRL_ERRSTS | SABRE_IOMMUCTRL_ERR);
- sabre_write(p->iommu.iommu_control,
+ sabre_write(iommu->iommu_control,
(control | SABRE_IOMMUCTRL_DENAB));
for (i = 0; i < 16; i++) {
unsigned long base = p->controller_regs;
@@ -718,7 +719,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p,
sabre_write(base + SABRE_IOMMU_TAG + (i * 8UL), 0);
sabre_write(base + SABRE_IOMMU_DATA + (i * 8UL), 0);
}
- sabre_write(p->iommu.iommu_control, control);
+ sabre_write(iommu->iommu_control, control);
for (i = 0; i < 16; i++) {
unsigned long tag, data;
@@ -752,7 +753,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p,
((data & SABRE_IOMMUDATA_PPN) << PAGE_SHIFT));
}
}
- spin_unlock_irqrestore(&p->iommu.lock, flags);
+ spin_unlock_irqrestore(&iommu->lock, flags);
}
static void sabre_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -1158,20 +1159,21 @@ static void __init sabre_iommu_init(struct pci_controller_info *p,
int tsbsize, unsigned long dvma_offset,
u32 dma_mask)
{
+ struct pci_iommu *iommu = p->pbm_A.iommu;
unsigned long tsbbase, i, order;
u64 control;
/* Setup initial software IOMMU state. */
- spin_lock_init(&p->iommu.lock);
- p->iommu.iommu_cur_ctx = 0;
+ spin_lock_init(&iommu->lock);
+ iommu->iommu_cur_ctx = 0;
/* Register addresses. */
- p->iommu.iommu_control = p->controller_regs + SABRE_IOMMU_CONTROL;
- p->iommu.iommu_tsbbase = p->controller_regs + SABRE_IOMMU_TSBBASE;
- p->iommu.iommu_flush = p->controller_regs + SABRE_IOMMU_FLUSH;
- p->iommu.write_complete_reg = p->controller_regs + SABRE_WRSYNC;
+ iommu->iommu_control = p->controller_regs + SABRE_IOMMU_CONTROL;
+ iommu->iommu_tsbbase = p->controller_regs + SABRE_IOMMU_TSBBASE;
+ iommu->iommu_flush = p->controller_regs + SABRE_IOMMU_FLUSH;
+ iommu->write_complete_reg = p->controller_regs + SABRE_WRSYNC;
/* Sabre's IOMMU lacks ctx flushing. */
- p->iommu.iommu_ctxflush = 0;
+ iommu->iommu_ctxflush = 0;
/* Invalidate TLB Entries. */
control = sabre_read(p->controller_regs + SABRE_IOMMU_CONTROL);
@@ -1192,9 +1194,9 @@ static void __init sabre_iommu_init(struct pci_controller_info *p,
prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
prom_halt();
}
- p->iommu.page_table = (iopte_t *)tsbbase;
- p->iommu.page_table_map_base = dvma_offset;
- p->iommu.dma_addr_mask = dma_mask;
+ iommu->page_table = (iopte_t *)tsbbase;
+ iommu->page_table_map_base = dvma_offset;
+ iommu->dma_addr_mask = dma_mask;
memset((char *)tsbbase, 0, PAGE_SIZE << order);
sabre_write(p->controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
@@ -1205,11 +1207,11 @@ static void __init sabre_iommu_init(struct pci_controller_info *p,
switch(tsbsize) {
case 64:
control |= SABRE_IOMMU_TSBSZ_64K;
- p->iommu.page_table_sz_bits = 16;
+ iommu->page_table_sz_bits = 16;
break;
case 128:
control |= SABRE_IOMMU_TSBSZ_128K;
- p->iommu.page_table_sz_bits = 17;
+ iommu->page_table_sz_bits = 17;
break;
default:
prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
@@ -1219,12 +1221,12 @@ static void __init sabre_iommu_init(struct pci_controller_info *p,
sabre_write(p->controller_regs + SABRE_IOMMU_CONTROL, control);
/* We start with no consistent mappings. */
- p->iommu.lowest_consistent_map =
- 1 << (p->iommu.page_table_sz_bits - PBM_LOGCLUSTERS);
+ iommu->lowest_consistent_map =
+ 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
for (i = 0; i < PBM_NCLUSTERS; i++) {
- p->iommu.alloc_info[i].flush = 0;
- p->iommu.alloc_info[i].next = 0;
+ iommu->alloc_info[i].flush = 0;
+ iommu->alloc_info[i].next = 0;
}
}
@@ -1368,6 +1370,7 @@ void __init sabre_init(int pnode)
{
struct linux_prom64_registers pr_regs[2];
struct pci_controller_info *p;
+ struct pci_iommu *iommu;
unsigned long flags;
int tsbsize, err;
u32 busrange[2];
@@ -1380,10 +1383,17 @@ void __init sabre_init(int pnode)
prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n");
prom_halt();
}
+ memset(p, 0, sizeof(*p));
- upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff);
+ iommu = kmalloc(sizeof(*iommu), GFP_ATOMIC);
+ if (!iommu) {
+ prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
+ prom_halt();
+ }
+ memset(iommu, 0, sizeof(*iommu));
+ p->pbm_A.iommu = p->pbm_B.iommu = iommu;
- memset(p, 0, sizeof(*p));
+ upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff);
spin_lock_irqsave(&pci_controller_lock, flags);
p->next = pci_controller_root;
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
new file mode 100644
index 000000000..9299c2531
--- /dev/null
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -0,0 +1,169 @@
+/* $Id: pci_schizo.c,v 1.2 2001/01/12 02:43:30 davem Exp $
+ * pci_schizo.c: SCHIZO specific PCI controller support.
+ *
+ * Copyright (C) 2001 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/malloc.h>
+
+#include <asm/pbm.h>
+#include <asm/iommu.h>
+#include <asm/irq.h>
+
+#include "pci_impl.h"
+
+static int schizo_read_byte(struct pci_dev *dev, int where, u8 *value)
+{
+ /* IMPLEMENT ME */
+}
+
+static int schizo_read_word(struct pci_dev *dev, int where, u16 *value)
+{
+ /* IMPLEMENT ME */
+}
+
+static int schizo_read_dword(struct pci_dev *dev, int where, u32 *value)
+{
+ /* IMPLEMENT ME */
+}
+
+static int schizo_write_byte(struct pci_dev *dev, int where, u8 value)
+{
+ /* IMPLEMENT ME */
+}
+
+static int schizo_write_word(struct pci_dev *dev, int where, u16 value)
+{
+ /* IMPLEMENT ME */
+}
+
+static int schizo_write_dword(struct pci_dev *dev, int where, u32 value)
+{
+ /* IMPLEMENT ME */
+}
+
+static struct pci_ops schizo_ops = {
+ schizo_read_byte,
+ schizo_read_word,
+ schizo_read_dword,
+ schizo_write_byte,
+ schizo_write_word,
+ schizo_write_dword
+};
+
+static void __init schizo_scan_bus(struct pci_controller_info *p)
+{
+ /* IMPLEMENT ME */
+}
+
+static unsigned int __init schizo_irq_build(struct pci_controller_info *p,
+ struct pci_dev *pdev,
+ unsigned int ino)
+{
+ /* IMPLEMENT ME */
+}
+
+static void __init schizo_base_address_update(struct pci_dev *pdev, int resource)
+{
+ /* IMPLEMENT ME */
+}
+
+static void __init schizo_resource_adjust(struct pci_dev *pdev,
+ struct resource *res,
+ struct resource *root)
+{
+ /* IMPLEMENT ME */
+}
+
+static void schizo_pbm_init(struct pci_controller_info *p,
+ int prom_node, int is_pbm_a)
+{
+ /* IMPLEMENT ME */
+}
+
+void __init schizo_init(int node)
+{
+ struct linux_prom64_registers pr_regs[3];
+ struct pci_controller_info *p;
+ struct pci_iommu *iommu;
+ u32 portid;
+ int is_pbm_a, err;
+
+ portid = prom_getintdefault(node, "portid", 0xff);
+
+ spin_lock_irqsave(&pci_controller_lock, flags);
+ for(p = pci_controller_root; p; p = p->next) {
+ if (p->portid == portid) {
+ spin_unlock_irqrestore(&pci_controller_lock, flags);
+ is_pbm_a = (p->pbm_A.prom_node == 0);
+ schizo_pbm_init(p, node, is_pbm_a);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&pci_controller_lock, flags);
+
+ p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
+ if (!p) {
+ prom_printf("SCHIZO: Fatal memory allocation error.\n");
+ prom_halt();
+ }
+ memset(p, 0, sizeof(*p));
+
+ iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ if (!iommu) {
+ prom_printf("SCHIZO: Fatal memory allocation error.\n");
+ prom_halt();
+ }
+ memset(iommu, 0, sizeof(*iommu));
+ p->pbm_A.iommu = iommu;
+
+ iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ if (!iommu) {
+ prom_printf("SCHIZO: Fatal memory allocation error.\n");
+ prom_halt();
+ }
+ memset(iommu, 0, sizeof(*iommu));
+ p->pbm_B.iommu = iommu;
+
+ spin_lock_irqsave(&pci_controller_lock, flags);
+ p->next = pci_controller_root;
+ pci_controller_root = p;
+ spin_unlock_irqrestore(&pci_controller_lock, flags);
+
+ p->portid = portid;
+ p->index = pci_num_controllers++;
+ p->scan_bus = schizo_scan_bus;
+ p->irq_build = schizo_irq_build;
+ p->base_address_update = schizo_base_address_update;
+ p->resource_adjust = schizo_resource_adjust;
+ p->pci_ops = &schizo_ops;
+
+pbm_init:
+ /* Three OBP regs:
+ * 1) PBM controller regs
+ * 2) Schizo front-end controller regs (same for both PBMs)
+ * 3) Unknown... (0x7ffec000000 and 0x7ffee000000 on Excalibur)
+ */
+ err = prom_getproperty(node, "reg",
+ (char *)&pr_regs[0],
+ sizeof(pr_regs));
+ if (err == 0 || err == -1) {
+ prom_printf("SCHIZO: Fatal error, no reg property.\n");
+ prom_halt();
+ }
+
+ /* XXX Read REG base, record in controller/pbm structures. */
+
+ /* XXX Report controller to console. */
+
+ /* XXX Setup pci_memspace_mask */
+
+ /* XXX Init core controller and IOMMU */
+
+ is_pbm_a = XXX; /* Figure out this test */
+ schizo_pbm_init(p, node, is_pbm_a);
+}
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index c2a7833fb..23d0774b4 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -1,4 +1,4 @@
-/* $Id: signal.c,v 1.54 2000/09/05 21:44:54 davem Exp $
+/* $Id: signal.c,v 1.55 2001/01/24 21:05:13 davem Exp $
* arch/sparc64/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -31,9 +31,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
-
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
unsigned long orig_o0, int ret_from_syscall);
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index 6d06328dd..0886d9d39 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -1,4 +1,4 @@
-/* $Id: signal32.c,v 1.67 2000/09/05 21:44:54 davem Exp $
+/* $Id: signal32.c,v 1.68 2001/01/24 21:05:13 davem Exp $
* arch/sparc64/kernel/signal32.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -29,9 +29,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
-
asmlinkage int do_signal32(sigset_t *oldset, struct pt_regs *regs,
unsigned long orig_o0, int ret_from_syscall);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 25f6ecff3..76045d0d2 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -290,8 +290,8 @@ void __init smp_boot_cpus(void)
}
printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount + 1,
- (bogosum + 2500)/500000,
- ((bogosum + 2500)/5000)%100);
+ bogosum/(500000/HZ),
+ (bogosum/(5000/HZ))%100);
smp_activated = 1;
smp_num_cpus = cpucount + 1;
}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index a0311626e..f35e38a5d 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -1,4 +1,4 @@
-/* $Id: sparc64_ksyms.c,v 1.99 2000/12/09 04:15:24 anton Exp $
+/* $Id: sparc64_ksyms.c,v 1.100 2001/01/11 15:07:09 davem Exp $
* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -182,6 +182,7 @@ EXPORT_SYMBOL(__flushw_user);
EXPORT_SYMBOL(flush_icache_range);
EXPORT_SYMBOL(__flush_dcache_page);
+EXPORT_SYMBOL(mostek_lock);
EXPORT_SYMBOL(mstk48t02_regs);
EXPORT_SYMBOL(request_fast_irq);
#if CONFIG_SBUS
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 775c986c8..184b4169d 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sparc32.c,v 1.171 2000/12/13 16:34:55 davem Exp $
+/* $Id: sys_sparc32.c,v 1.172 2001/01/24 21:05:13 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -1794,9 +1794,6 @@ static int put_rusage (struct rusage32 *ru, struct rusage *r)
return err;
}
-extern asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr,
- int options, struct rusage * ru);
-
asmlinkage int sys32_wait4(__kernel_pid_t32 pid, unsigned int *stat_addr, int options, struct rusage32 *ru)
{
if (!ru)
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
index a9d143759..58e4704c4 100644
--- a/arch/sparc64/kernel/sys_sunos32.c
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sunos32.c,v 1.55 2000/11/18 02:10:59 davem Exp $
+/* $Id: sys_sunos32.c,v 1.56 2001/01/04 05:35:48 davem Exp $
* sys_sunos32.c: SunOS binary compatability layer on sparc64.
*
* Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -1047,6 +1047,7 @@ asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
(current->thread.kregs->u_regs[UREG_FP] & 0xffffffffUL);
if(get_user(arg5, &sp->xxargs[0])) {
rval = -EFAULT;
+ kfree(kmbuf);
break;
}
set_fs(KERNEL_DS);
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 0264f9418..a3340f54a 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -1,4 +1,4 @@
-/* $Id: time.c,v 1.32 2000/09/22 23:02:13 davem Exp $
+/* $Id: time.c,v 1.33 2001/01/11 15:07:09 davem Exp $
* time.c: UltraSparc timer and TOD clock support.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -34,7 +34,9 @@
extern rwlock_t xtime_lock;
+spinlock_t mostek_lock = SPIN_LOCK_UNLOCKED;
unsigned long mstk48t02_regs = 0UL;
+
static unsigned long mstk48t08_regs = 0UL;
static unsigned long mstk48t59_regs = 0UL;
@@ -187,6 +189,8 @@ static void __init kick_start_clock(void)
prom_printf("CLOCK: Clock was stopped. Kick start ");
+ spin_lock_irq(&mostek_lock);
+
/* Turn on the kick start bit to start the oscillator. */
tmp = mostek_read(regs + MOSTEK_CREG);
tmp |= MSTK_CREG_WRITE;
@@ -201,6 +205,8 @@ static void __init kick_start_clock(void)
tmp &= ~MSTK_CREG_WRITE;
mostek_write(regs + MOSTEK_CREG, tmp);
+ spin_unlock_irq(&mostek_lock);
+
/* Delay to allow the clock oscillator to start. */
sec = MSTK_REG_SEC(regs);
for (i = 0; i < 3; i++) {
@@ -212,6 +218,8 @@ static void __init kick_start_clock(void)
}
prom_printf("\n");
+ spin_lock_irq(&mostek_lock);
+
/* Turn off kick start and set a "valid" time and date. */
tmp = mostek_read(regs + MOSTEK_CREG);
tmp |= MSTK_CREG_WRITE;
@@ -230,9 +238,14 @@ static void __init kick_start_clock(void)
tmp &= ~MSTK_CREG_WRITE;
mostek_write(regs + MOSTEK_CREG, tmp);
+ spin_unlock_irq(&mostek_lock);
+
/* Ensure the kick start bit is off. If it isn't, turn it off. */
while (mostek_read(regs + MOSTEK_HOUR) & MSTK_KICK_START) {
prom_printf("CLOCK: Kick start still on!\n");
+
+ spin_lock_irq(&mostek_lock);
+
tmp = mostek_read(regs + MOSTEK_CREG);
tmp |= MSTK_CREG_WRITE;
mostek_write(regs + MOSTEK_CREG, tmp);
@@ -244,6 +257,8 @@ static void __init kick_start_clock(void)
tmp = mostek_read(regs + MOSTEK_CREG);
tmp &= ~MSTK_CREG_WRITE;
mostek_write(regs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irq(&mostek_lock);
}
prom_printf("CLOCK: Kick start procedure successful.\n");
@@ -255,11 +270,15 @@ static int __init has_low_battery(void)
unsigned long regs = mstk48t02_regs;
u8 data1, data2;
+ spin_lock_irq(&mostek_lock);
+
data1 = mostek_read(regs + MOSTEK_EEPROM); /* Read some data. */
mostek_write(regs + MOSTEK_EEPROM, ~data1); /* Write back the complement. */
data2 = mostek_read(regs + MOSTEK_EEPROM); /* Read back the complement. */
mostek_write(regs + MOSTEK_EEPROM, data1); /* Restore original value. */
+ spin_unlock_irq(&mostek_lock);
+
return (data1 == data2); /* Was the write blocked? */
}
@@ -278,6 +297,8 @@ static void __init set_system_time(void)
prom_halt();
}
+ spin_lock_irq(&mostek_lock);
+
tmp = mostek_read(mregs + MOSTEK_CREG);
tmp |= MSTK_CREG_READ;
mostek_write(mregs + MOSTEK_CREG, tmp);
@@ -294,6 +315,8 @@ static void __init set_system_time(void)
tmp = mostek_read(mregs + MOSTEK_CREG);
tmp &= ~MSTK_CREG_READ;
mostek_write(mregs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irq(&mostek_lock);
}
void __init clock_probe(void)
@@ -512,6 +535,7 @@ static int set_rtc_mmss(unsigned long nowtime)
{
int real_seconds, real_minutes, mostek_minutes;
unsigned long regs = mstk48t02_regs;
+ unsigned long flags;
u8 tmp;
/*
@@ -521,6 +545,8 @@ static int set_rtc_mmss(unsigned long nowtime)
if (!regs)
return -1;
+ spin_lock_irqsave(&mostek_lock, flags);
+
/* Read the current RTC minutes. */
tmp = mostek_read(regs + MOSTEK_CREG);
tmp |= MSTK_CREG_READ;
@@ -555,8 +581,13 @@ static int set_rtc_mmss(unsigned long nowtime)
tmp = mostek_read(regs + MOSTEK_CREG);
tmp &= ~MSTK_CREG_WRITE;
mostek_write(regs + MOSTEK_CREG, tmp);
- } else
- return -1;
- return 0;
+ spin_unlock_irqrestore(&mostek_lock, flags);
+
+ return 0;
+ } else {
+ spin_unlock_irqrestore(&mostek_lock, flags);
+
+ return -1;
+ }
}
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 307cb08ad..e1e0745a4 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -4,7 +4,7 @@
O_TARGET := acpi.o
-export-objs := ksyms.o
+export-objs := acpi_ksyms.o
export ACPI_CFLAGS
ACPI_CFLAGS := -D_LINUX
@@ -20,18 +20,26 @@ EXTRA_CFLAGS += -I./include
EXTRA_CFLAGS += $(ACPI_CFLAGS)
+# genksyms only reads $(CFLAGS), it should really read $(EXTRA_CFLAGS) as well.
+# Without EXTRA_CFLAGS the gcc pass for genksyms fails, resulting in an empty
+# include/linux/modules/acpi_ksyms.ver. Changing genkyms to use EXTRA_CFLAGS
+# will hit everything, too risky in 2.4.0-prerelease. Bandaid by tweaking
+# CFLAGS only for .ver targets. Review after 2.4.0 release. KAO
+
+$(MODINCL)/%.ver: CFLAGS := -I./include $(CFLAGS)
+
acpi-subdirs := common dispatcher events hardware \
interpreter namespace parser resources tables
subdir-$(CONFIG_ACPI) += $(acpi-subdirs)
obj-$(CONFIG_ACPI) := $(patsubst %,%.o,$(acpi-subdirs))
-obj-$(CONFIG_ACPI) += os.o ksyms.o
+obj-$(CONFIG_ACPI) += os.o acpi_ksyms.o
ifdef CONFIG_ACPI_KERNEL_CONFIG
obj-$(CONFIG_ACPI) += acpiconf.o osconf.o
else
- obj-$(CONFIG_ACPI) += driver.o cmbatt.o cpu.o ec.o ksyms.o sys.o table.o power.o
+ obj-$(CONFIG_ACPI) += driver.o cmbatt.o cpu.o ec.o acpi_ksyms.o sys.o table.o power.o
endif
include $(TOPDIR)/Rules.make
diff --git a/drivers/acpi/acpi_ksyms.c b/drivers/acpi/acpi_ksyms.c
new file mode 100644
index 000000000..8e1774c9a
--- /dev/null
+++ b/drivers/acpi/acpi_ksyms.c
@@ -0,0 +1,83 @@
+/*
+ * ksyms.c - ACPI exported symbols
+ *
+ * Copyright (C) 2000 Andrew Grover
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include "acpi.h"
+#include "acdebug.h"
+
+extern int acpi_in_debugger;
+
+#define _COMPONENT OS_DEPENDENT
+ MODULE_NAME ("symbols")
+
+#ifdef ENABLE_DEBUGGER
+EXPORT_SYMBOL(acpi_in_debugger);
+EXPORT_SYMBOL(acpi_db_user_commands);
+#endif
+
+EXPORT_SYMBOL(acpi_os_free);
+EXPORT_SYMBOL(acpi_os_breakpoint);
+EXPORT_SYMBOL(acpi_os_printf);
+EXPORT_SYMBOL(acpi_os_callocate);
+EXPORT_SYMBOL(acpi_os_sleep);
+EXPORT_SYMBOL(acpi_os_sleep_usec);
+EXPORT_SYMBOL(acpi_os_in8);
+EXPORT_SYMBOL(acpi_os_out8);
+EXPORT_SYMBOL(acpi_os_queue_for_execution);
+
+EXPORT_SYMBOL(acpi_dbg_layer);
+EXPORT_SYMBOL(acpi_dbg_level);
+EXPORT_SYMBOL(function_exit);
+EXPORT_SYMBOL(function_trace);
+EXPORT_SYMBOL(function_status_exit);
+EXPORT_SYMBOL(function_value_exit);
+EXPORT_SYMBOL(debug_print_raw);
+EXPORT_SYMBOL(debug_print_prefix);
+
+EXPORT_SYMBOL(acpi_cm_strncmp);
+EXPORT_SYMBOL(acpi_cm_memcpy);
+EXPORT_SYMBOL(acpi_cm_memset);
+
+EXPORT_SYMBOL(acpi_get_handle);
+EXPORT_SYMBOL(acpi_get_parent);
+EXPORT_SYMBOL(acpi_get_type);
+EXPORT_SYMBOL(acpi_get_name);
+EXPORT_SYMBOL(acpi_get_object_info);
+EXPORT_SYMBOL(acpi_get_next_object);
+EXPORT_SYMBOL(acpi_evaluate_object);
+
+EXPORT_SYMBOL(acpi_install_notify_handler);
+EXPORT_SYMBOL(acpi_remove_notify_handler);
+EXPORT_SYMBOL(acpi_install_gpe_handler);
+EXPORT_SYMBOL(acpi_remove_gpe_handler);
+EXPORT_SYMBOL(acpi_install_address_space_handler);
+EXPORT_SYMBOL(acpi_remove_address_space_handler);
+
+EXPORT_SYMBOL(acpi_get_current_resources);
+EXPORT_SYMBOL(acpi_get_possible_resources);
+EXPORT_SYMBOL(acpi_set_current_resources);
+
+EXPORT_SYMBOL(acpi_enable_event);
+EXPORT_SYMBOL(acpi_disable_event);
+EXPORT_SYMBOL(acpi_clear_event);
diff --git a/drivers/acpi/cmbatt.c b/drivers/acpi/cmbatt.c
index c45aa810c..d5dc5977a 100644
--- a/drivers/acpi/cmbatt.c
+++ b/drivers/acpi/cmbatt.c
@@ -113,10 +113,10 @@ acpi_get_battery_status(ACPI_HANDLE handle, struct cmbatt_status *result)
obj = (ACPI_OBJECT *) buf.pointer;
objs = obj->package.elements;
- result->state = objs[0].number.value;
- result->present_rate = objs[1].number.value;
- result->remaining_capacity = objs[2].number.value;
- result->present_voltage = objs[3].number.value;
+ result->state = objs[0].integer.value;
+ result->present_rate = objs[1].integer.value;
+ result->remaining_capacity = objs[2].integer.value;
+ result->present_voltage = objs[3].integer.value;
kfree(buf.pointer);
@@ -153,15 +153,15 @@ acpi_get_battery_info(ACPI_HANDLE handle, struct cmbatt_info *result)
obj = (ACPI_OBJECT *) buf.pointer;
objs = obj->package.elements;
- result->power_unit=objs[0].number.value;
- result->design_capacity=objs[1].number.value;
- result->last_full_capacity=objs[2].number.value;
- result->battery_technology=objs[3].number.value;
- result->design_voltage=objs[4].number.value;
- result->design_capacity_warning=objs[5].number.value;
- result->design_capacity_low=objs[6].number.value;
- result->battery_capacity_granularity_1=objs[7].number.value;
- result->battery_capacity_granularity_2=objs[8].number.value;
+ result->power_unit=objs[0].integer.value;
+ result->design_capacity=objs[1].integer.value;
+ result->last_full_capacity=objs[2].integer.value;
+ result->battery_technology=objs[3].integer.value;
+ result->design_voltage=objs[4].integer.value;
+ result->design_capacity_warning=objs[5].integer.value;
+ result->design_capacity_low=objs[6].integer.value;
+ result->battery_capacity_granularity_1=objs[7].integer.value;
+ result->battery_capacity_granularity_2=objs[8].integer.value;
/* BUG: trailing NULL issue */
strncpy(result->model_number, objs[9].string.pointer, MAX_BATT_STRLEN-1);
diff --git a/drivers/acpi/common/cmalloc.c b/drivers/acpi/common/cmalloc.c
index b7a64e5b6..d8514698f 100644
--- a/drivers/acpi/common/cmalloc.c
+++ b/drivers/acpi/common/cmalloc.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: cmalloc - local memory allocation routines
- * $Revision: 79 $
+ * $Revision: 84 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -57,8 +57,6 @@ _cm_allocate (
u32 line)
{
void *address = NULL;
- DEBUG_ONLY_MEMBERS (\
- ACPI_STATUS status)
/* Check for an inadvertent size of zero bytes */
@@ -107,8 +105,6 @@ _cm_callocate (
u32 line)
{
void *address = NULL;
- DEBUG_ONLY_MEMBERS (\
- ACPI_STATUS status)
/* Check for an inadvertent size of zero bytes */
diff --git a/drivers/acpi/common/cmclib.c b/drivers/acpi/common/cmclib.c
index 5146b09c5..88e92d17f 100644
--- a/drivers/acpi/common/cmclib.c
+++ b/drivers/acpi/common/cmclib.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: cmclib - Local implementation of C library functions
- * $Revision: 28 $
+ * $Revision: 32 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -347,7 +347,7 @@ acpi_cm_memcpy (
void *
acpi_cm_memset (
void *dest,
- u32 value,
+ NATIVE_UINT value,
NATIVE_UINT count)
{
NATIVE_CHAR *new = (NATIVE_CHAR *) dest;
@@ -522,6 +522,7 @@ static const u8 _acpi_ctype[257] = {
#define IS_LOWER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO))
#define IS_DIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_DI))
#define IS_SPACE(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_SP))
+#define IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD))
/*******************************************************************************
@@ -658,7 +659,7 @@ u32
acpi_cm_strtoul (
const NATIVE_CHAR *string,
NATIVE_CHAR **terminator,
- u32 base)
+ NATIVE_UINT base)
{
u32 converted = 0;
u32 index;
diff --git a/drivers/acpi/common/cmcopy.c b/drivers/acpi/common/cmcopy.c
index 68b7bda01..da3851c17 100644
--- a/drivers/acpi/common/cmcopy.c
+++ b/drivers/acpi/common/cmcopy.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: cmcopy - Internal to external object translation utilities
- * $Revision: 61 $
+ * $Revision: 62 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -118,9 +118,9 @@ acpi_cm_build_external_simple_object (
break;
- case ACPI_TYPE_NUMBER:
+ case ACPI_TYPE_INTEGER:
- external_obj->number.value= internal_obj->number.value;
+ external_obj->integer.value= internal_obj->integer.value;
break;
@@ -469,11 +469,11 @@ acpi_cm_build_internal_simple_object (
break;
- case ACPI_TYPE_NUMBER:
+ case ACPI_TYPE_INTEGER:
/*
* Number is included in the object itself
*/
- internal_obj->number.value = external_obj->number.value;
+ internal_obj->integer.value = external_obj->integer.value;
break;
diff --git a/drivers/acpi/common/cmdebug.c b/drivers/acpi/common/cmdebug.c
index a55372d5c..2d0022ac3 100644
--- a/drivers/acpi/common/cmdebug.c
+++ b/drivers/acpi/common/cmdebug.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: cmdebug - Debug print routines
- * $Revision: 61 $
+ * $Revision: 64 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -281,7 +281,7 @@ function_value_exit (
u32 line_number,
u32 component_id,
NATIVE_CHAR *function_name,
- NATIVE_UINT value)
+ ACPI_INTEGER value)
{
debug_print (module_name, line_number, component_id, TRACE_FUNCTIONS,
diff --git a/drivers/acpi/common/cmdelete.c b/drivers/acpi/common/cmdelete.c
index a6e74c12f..b516b691c 100644
--- a/drivers/acpi/common/cmdelete.c
+++ b/drivers/acpi/common/cmdelete.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: cmdelete - object deletion and reference count utilities
- * $Revision: 60 $
+ * $Revision: 62 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/common/cmeval.c b/drivers/acpi/common/cmeval.c
index 29a5cefa6..653e26a47 100644
--- a/drivers/acpi/common/cmeval.c
+++ b/drivers/acpi/common/cmeval.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: cmeval - Object evaluation
- * $Revision: 19 $
+ * $Revision: 21 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -77,7 +77,7 @@ acpi_cm_evaluate_numeric_object (
/* Is the return object of the correct type? */
- if (obj_desc->common.type != ACPI_TYPE_NUMBER) {
+ if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
status = AE_TYPE;
}
else {
@@ -85,7 +85,7 @@ acpi_cm_evaluate_numeric_object (
* Since the structure is a union, setting any field will set all
* of the variables in the union
*/
- *address = obj_desc->number.value;
+ *address = obj_desc->integer.value;
}
/* On exit, we must delete the return object */
@@ -142,17 +142,17 @@ acpi_cm_execute_HID (
* a string
*/
- if ((obj_desc->common.type != ACPI_TYPE_NUMBER) &&
+ if ((obj_desc->common.type != ACPI_TYPE_INTEGER) &&
(obj_desc->common.type != ACPI_TYPE_STRING))
{
status = AE_TYPE;
}
else {
- if (obj_desc->common.type == ACPI_TYPE_NUMBER) {
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
/* Convert the Numeric HID to string */
- acpi_aml_eisa_id_to_string ((u32) obj_desc->number.value, hid->buffer);
+ acpi_aml_eisa_id_to_string ((u32) obj_desc->integer.value, hid->buffer);
}
else {
@@ -217,17 +217,17 @@ acpi_cm_execute_UID (
* a string
*/
- if ((obj_desc->common.type != ACPI_TYPE_NUMBER) &&
+ if ((obj_desc->common.type != ACPI_TYPE_INTEGER) &&
(obj_desc->common.type != ACPI_TYPE_STRING))
{
status = AE_TYPE;
}
else {
- if (obj_desc->common.type == ACPI_TYPE_NUMBER) {
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
/* Convert the Numeric UID to string */
- acpi_aml_unsigned_integer_to_string (obj_desc->number.value, uid->buffer);
+ acpi_aml_unsigned_integer_to_string (obj_desc->integer.value, uid->buffer);
}
else {
@@ -289,14 +289,14 @@ acpi_cm_execute_STA (
/* Is the return object of the correct type? */
- if (obj_desc->common.type != ACPI_TYPE_NUMBER) {
+ if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
status = AE_TYPE;
}
else {
/* Extract the status flags */
- *flags = (u32) obj_desc->number.value;
+ *flags = (u32) obj_desc->integer.value;
}
/* On exit, we must delete the return object */
diff --git a/drivers/acpi/common/cmglobal.c b/drivers/acpi/common/cmglobal.c
index 4b4460f46..a388907f3 100644
--- a/drivers/acpi/common/cmglobal.c
+++ b/drivers/acpi/common/cmglobal.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: cmglobal - Global variables for the ACPI subsystem
- * $Revision: 112 $
+ * $Revision: 116 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -95,7 +95,7 @@ PREDEFINED_NAMES acpi_gbl_pre_defined_names[] =
{"_SB_", INTERNAL_TYPE_DEF_ANY},
{"_SI_", INTERNAL_TYPE_DEF_ANY},
{"_TZ_", INTERNAL_TYPE_DEF_ANY},
- {"_REV", ACPI_TYPE_NUMBER, "2"},
+ {"_REV", ACPI_TYPE_INTEGER, "2"},
{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
{"_GL_", ACPI_TYPE_MUTEX, "0"},
@@ -152,6 +152,13 @@ u8 acpi_gbl_ns_properties[] =
};
+/* Hex to ASCII conversion table */
+
+NATIVE_CHAR acpi_gbl_hex_to_ascii[] =
+ {'0','1','2','3','4','5','6','7',
+ '8','9','A','B','C','D','E','F'};
+
+
/******************************************************************************
*
* Table globals
diff --git a/drivers/acpi/common/cminit.c b/drivers/acpi/common/cminit.c
index e6cfb7655..0952720f1 100644
--- a/drivers/acpi/common/cminit.c
+++ b/drivers/acpi/common/cminit.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: cminit - Common ACPI subsystem initialization
- * $Revision: 91 $
+ * $Revision: 93 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -35,6 +35,9 @@
MODULE_NAME ("cminit")
+#define ACPI_OFFSET(d,o) ((u32) &(((d *)0)->o))
+#define ACPI_FADT_OFFSET(o) ACPI_OFFSET (FADT_DESCRIPTOR, o)
+
/*******************************************************************************
*
* FUNCTION: Acpi_cm_fadt_register_error
@@ -53,12 +56,13 @@
static ACPI_STATUS
acpi_cm_fadt_register_error (
NATIVE_CHAR *register_name,
- u32 value)
+ u32 value,
+ u32 offset)
{
REPORT_ERROR (
- ("Invalid FADT register value, %s=%X (FADT=%p)\n",
- register_name, value, acpi_gbl_FADT));
+ ("Invalid FADT value %s=%lX at offset %lX FADT=%p\n",
+ register_name, value, offset, acpi_gbl_FADT));
return (AE_BAD_VALUE);
@@ -91,39 +95,42 @@ acpi_cm_validate_fadt (
if (acpi_gbl_FADT->pm1_evt_len < 4) {
status = acpi_cm_fadt_register_error ("PM1_EVT_LEN",
- (u32) acpi_gbl_FADT->pm1_evt_len);
+ (u32) acpi_gbl_FADT->pm1_evt_len,
+ ACPI_FADT_OFFSET (pm1_evt_len));
}
if (!acpi_gbl_FADT->pm1_cnt_len) {
- status = acpi_cm_fadt_register_error ("PM1_CNT_LEN",
- 0);
+ status = acpi_cm_fadt_register_error ("PM1_CNT_LEN", 0,
+ ACPI_FADT_OFFSET (pm1_cnt_len));
}
if (!ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xpm1a_evt_blk.address)) {
- status = acpi_cm_fadt_register_error ("PM1a_EVT_BLK",
- 0);
+ status = acpi_cm_fadt_register_error ("X_PM1a_EVT_BLK", 0,
+ ACPI_FADT_OFFSET (Xpm1a_evt_blk.address));
}
if (!ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xpm1a_cnt_blk.address)) {
- status = acpi_cm_fadt_register_error ("PM1a_CNT_BLK",
- 0);
+ status = acpi_cm_fadt_register_error ("X_PM1a_CNT_BLK", 0,
+ ACPI_FADT_OFFSET (Xpm1a_cnt_blk.address));
}
if (!ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xpm_tmr_blk.address)) {
- status = acpi_cm_fadt_register_error ("PM_TMR_BLK",
- 0);
+ status = acpi_cm_fadt_register_error ("X_PM_TMR_BLK", 0,
+ ACPI_FADT_OFFSET (Xpm_tmr_blk.address));
}
if ((ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xpm2_cnt_blk.address) &&
!acpi_gbl_FADT->pm2_cnt_len))
{
status = acpi_cm_fadt_register_error ("PM2_CNT_LEN",
- (u32) acpi_gbl_FADT->pm2_cnt_len);
+ (u32) acpi_gbl_FADT->pm2_cnt_len,
+ ACPI_FADT_OFFSET (pm2_cnt_len));
}
if (acpi_gbl_FADT->pm_tm_len < 4) {
status = acpi_cm_fadt_register_error ("PM_TM_LEN",
- (u32) acpi_gbl_FADT->pm_tm_len);
+ (u32) acpi_gbl_FADT->pm_tm_len,
+ ACPI_FADT_OFFSET (pm_tm_len));
}
/* length of GPE blocks must be a multiple of 2 */
@@ -132,15 +139,17 @@ acpi_cm_validate_fadt (
if (ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xgpe0blk.address) &&
(acpi_gbl_FADT->gpe0blk_len & 1))
{
- status = acpi_cm_fadt_register_error ("GPE0_BLK_LEN",
- (u32) acpi_gbl_FADT->gpe0blk_len);
+ status = acpi_cm_fadt_register_error ("(x)GPE0_BLK_LEN",
+ (u32) acpi_gbl_FADT->gpe0blk_len,
+ ACPI_FADT_OFFSET (gpe0blk_len));
}
if (ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xgpe1_blk.address) &&
(acpi_gbl_FADT->gpe1_blk_len & 1))
{
- status = acpi_cm_fadt_register_error ("GPE1_BLK_LEN",
- (u32) acpi_gbl_FADT->gpe1_blk_len);
+ status = acpi_cm_fadt_register_error ("(x)GPE1_BLK_LEN",
+ (u32) acpi_gbl_FADT->gpe1_blk_len,
+ ACPI_FADT_OFFSET (gpe1_blk_len));
}
return (status);
diff --git a/drivers/acpi/common/cmobject.c b/drivers/acpi/common/cmobject.c
index 95e70fb14..5f73abaaf 100644
--- a/drivers/acpi/common/cmobject.c
+++ b/drivers/acpi/common/cmobject.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: cmobject - ACPI object create/delete/size/cache routines
- * $Revision: 34 $
+ * $Revision: 35 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -434,7 +434,7 @@ acpi_cm_get_simple_object_size (
break;
- case ACPI_TYPE_NUMBER:
+ case ACPI_TYPE_INTEGER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_POWER:
diff --git a/drivers/acpi/common/cmutils.c b/drivers/acpi/common/cmutils.c
index cb3d959c8..b0ee8b4d9 100644
--- a/drivers/acpi/common/cmutils.c
+++ b/drivers/acpi/common/cmutils.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: cmutils - common utility procedures
- * $Revision: 21 $
+ * $Revision: 23 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -647,16 +647,16 @@ acpi_cm_resolve_package_references (
if (sub_object->common.type == INTERNAL_TYPE_REFERENCE) {
if (sub_object->reference.op_code == AML_ZERO_OP) {
- sub_object->common.type = ACPI_TYPE_NUMBER;
- sub_object->number.value = 0;
+ sub_object->common.type = ACPI_TYPE_INTEGER;
+ sub_object->integer.value = 0;
}
else if (sub_object->reference.op_code == AML_ONE_OP) {
- sub_object->common.type = ACPI_TYPE_NUMBER;
- sub_object->number.value = 1;
+ sub_object->common.type = ACPI_TYPE_INTEGER;
+ sub_object->integer.value = 1;
}
else if (sub_object->reference.op_code == AML_ONES_OP) {
- sub_object->common.type = ACPI_TYPE_NUMBER;
- sub_object->number.value = ACPI_INTEGER_MAX;
+ sub_object->common.type = ACPI_TYPE_INTEGER;
+ sub_object->integer.value = ACPI_INTEGER_MAX;
}
}
}
diff --git a/drivers/acpi/common/cmxface.c b/drivers/acpi/common/cmxface.c
index fc063850c..ce87b50d2 100644
--- a/drivers/acpi/common/cmxface.c
+++ b/drivers/acpi/common/cmxface.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: cmxface - External interfaces for "global" ACPI functions
- * $Revision: 55 $
+ * $Revision: 62 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -304,16 +304,22 @@ acpi_get_system_info (
out_buffer->length = sizeof (ACPI_SYSTEM_INFO);
info_ptr = (ACPI_SYSTEM_INFO *) out_buffer->pointer;
- /* TBD [Future]: need a version number, or use the version string */
- info_ptr->acpi_ca_version = 0x1234;
+ info_ptr->acpi_ca_version = ACPI_CA_VERSION;
/* System flags (ACPI capabilities) */
info_ptr->flags = acpi_gbl_system_flags;
/* Timer resolution - 24 or 32 bits */
-
- info_ptr->timer_resolution = acpi_hw_pmt_resolution ();
+ if (!acpi_gbl_FADT) {
+ info_ptr->timer_resolution = 0;
+ }
+ else if (acpi_gbl_FADT->tmr_val_ext == 0) {
+ info_ptr->timer_resolution = 24;
+ }
+ else {
+ info_ptr->timer_resolution = 32;
+ }
/* Clear the reserved fields */
diff --git a/drivers/acpi/cpu.c b/drivers/acpi/cpu.c
index 00ec59da5..3e4adcee2 100644
--- a/drivers/acpi/cpu.c
+++ b/drivers/acpi/cpu.c
@@ -39,6 +39,9 @@ static int acpi_c3_tested = 0;
static int acpi_max_c_state = 1;
static int acpi_pm_tmr_len;
+#define MAX_C2_LATENCY 100
+#define MAX_C3_LATENCY 1000
+
/*
* Clear busmaster activity flag
*/
@@ -251,10 +254,7 @@ static ACPI_STATUS
acpi_found_cpu(ACPI_HANDLE handle, u32 level, void *ctx, void **value)
{
ACPI_OBJECT obj;
- ACPI_CX_STATE lat[4];
- ACPI_CPU_THROTTLING_STATE throttle[ACPI_MAX_THROTTLE];
ACPI_BUFFER buf;
- int i, count;
buf.length = sizeof(obj);
buf.pointer = &obj;
@@ -273,41 +273,28 @@ acpi_found_cpu(ACPI_HANDLE handle, u32 level, void *ctx, void **value)
acpi_pblk = obj.processor.pblk_address;
- buf.length = sizeof(lat);
- buf.pointer = lat;
- if (!ACPI_SUCCESS(acpi_get_processor_cx_info(handle, &buf)))
- return AE_OK;
+ if (acpi_fadt.plvl2_lat
+ && acpi_fadt.plvl2_lat <= MAX_C2_LATENCY) {
+ acpi_c2_exit_latency
+ = ACPI_MICROSEC_TO_TMR_TICKS(acpi_fadt.plvl2_lat);
+ acpi_c2_enter_latency
+ = ACPI_MICROSEC_TO_TMR_TICKS(ACPI_TMR_HZ / 1000);
+ acpi_max_c_state = 2;
- if (lat[2].latency < MAX_CX_STATE_LATENCY) {
printk(KERN_INFO "ACPI: System firmware supports: C2");
- acpi_c2_exit_latency = lat[2].latency;
- acpi_max_c_state = 2;
- if (lat[3].latency < MAX_CX_STATE_LATENCY) {
- printk(" C3");
- acpi_c3_exit_latency = lat[3].latency;
+ if (acpi_fadt.plvl3_lat
+ && acpi_fadt.plvl3_lat <= MAX_C3_LATENCY) {
+ acpi_c3_exit_latency
+ = ACPI_MICROSEC_TO_TMR_TICKS(acpi_fadt.plvl3_lat);
+ acpi_c3_enter_latency
+ = ACPI_MICROSEC_TO_TMR_TICKS(acpi_fadt.plvl3_lat * 5);
acpi_max_c_state = 3;
- }
- printk("\n");
- }
- memset(throttle, 0, sizeof(throttle));
- buf.length = sizeof(throttle);
- buf.pointer = throttle;
-
- if (!ACPI_SUCCESS(acpi_get_processor_throttling_info(handle, &buf)))
- return AE_OK;
-
- for (i = 0, count = 0; i < ACPI_MAX_THROTTLE; i++) {
- if (throttle[i].percent_of_clock)
- count++;
- }
-
- /* 0% throttled really doesn't count */
- count--;
+ printk(" C3");
+ }
- if (count > 0) {
- DEBUG_PRINT(ACPI_INFO, ("%d throttling states\n", count));
+ printk("\n");
}
return AE_OK;
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c
index 1ce75dbdb..897324547 100644
--- a/drivers/acpi/dispatcher/dsfield.c
+++ b/drivers/acpi/dispatcher/dsfield.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: dsfield - Dispatcher field routines
- * $Revision: 29 $
+ * $Revision: 31 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index d9d73ccea..4834f47ec 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
- * $Revision: 53 $
+ * $Revision: 56 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
index e6913fabf..dfaa2a103 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: dsmthdat - control method arguments and local variables
- * $Revision: 36 $
+ * $Revision: 39 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -40,7 +40,7 @@
*
* FUNCTION: Acpi_ds_method_data_init
*
- * PARAMETERS: *Obj_desc
+ * PARAMETERS: Walk_state - Current walk state object
*
* RETURN: Status
*
@@ -97,7 +97,7 @@ acpi_ds_method_data_init (
*
* FUNCTION: Acpi_ds_method_data_delete_all
*
- * PARAMETERS: None
+ * PARAMETERS: Walk_state - Current walk state object
*
* RETURN: Status
*
@@ -153,7 +153,9 @@ acpi_ds_method_data_delete_all (
*
* FUNCTION: Acpi_ds_method_data_init_args
*
- * PARAMETERS: None
+ * PARAMETERS: *Params - Pointer to a parameter list for the method
+ * Max_param_count - The arg count for this method
+ * Walk_state - Current walk state object
*
* RETURN: Status
*
@@ -214,6 +216,7 @@ acpi_ds_method_data_init_args (
* Index - Which local_var or argument to get
* Entry - Pointer to where a pointer to the stack
* entry is returned.
+ * Walk_state - Current walk state object
*
* RETURN: Status
*
@@ -276,6 +279,7 @@ acpi_ds_method_data_get_entry (
* PARAMETERS: Type - Either MTH_TYPE_LOCAL or MTH_TYPE_ARG
* Index - Which local_var or argument to get
* Object - Object to be inserted into the stack entry
+ * Walk_state - Current walk state object
*
* RETURN: Status
*
@@ -320,6 +324,7 @@ acpi_ds_method_data_set_entry (
* PARAMETERS: Type - Either MTH_TYPE_LOCAL or MTH_TYPE_ARG
* Index - Which local_var or argument whose type
* to get
+ * Walk_state - Current walk state object
*
* RETURN: Data type of selected Arg or Local
* Used only in Exec_monadic2()/Type_op.
@@ -366,6 +371,7 @@ acpi_ds_method_data_get_type (
* PARAMETERS: Type - Either MTH_TYPE_LOCAL or MTH_TYPE_ARG
* Index - Which local_var or argument whose type
* to get
+ * Walk_state - Current walk state object
*
* RETURN: Get the Node associated with a local or arg.
*
@@ -418,7 +424,8 @@ acpi_ds_method_data_get_nte (
*
* PARAMETERS: Type - Either MTH_TYPE_LOCAL or MTH_TYPE_ARG
* Index - Which local_var or argument to get
- * *Dest_desc - Descriptor into which selected Arg
+ * Walk_state - Current walk state object
+ * *Dest_desc - Ptr to Descriptor into which selected Arg
* or Local value should be copied
*
* RETURN: Status
@@ -474,10 +481,12 @@ acpi_ds_method_data_get_value (
switch (type)
{
case MTH_TYPE_ARG:
+
return (AE_AML_UNINITIALIZED_ARG);
break;
case MTH_TYPE_LOCAL:
+
return (AE_AML_UNINITIALIZED_LOCAL);
break;
}
@@ -502,6 +511,7 @@ acpi_ds_method_data_get_value (
*
* PARAMETERS: Type - Either MTH_TYPE_LOCAL or MTH_TYPE_ARG
* Index - Which local_var or argument to delete
+ * Walk_state - Current walk state object
*
* RETURN: Status
*
@@ -548,7 +558,6 @@ acpi_ds_method_data_delete_value (
* Decrement the reference count by one to balance the
* increment when the object was stored in the slot.
*/
-
acpi_cm_remove_reference (object);
}
@@ -563,18 +572,14 @@ acpi_ds_method_data_delete_value (
*
* PARAMETERS: Type - Either MTH_TYPE_LOCAL or MTH_TYPE_ARG
* Index - Which local_var or argument to set
- * *Src_desc - Value to be stored
- * *Dest_desc - Descriptor into which *Src_desc
- * can be copied, or NULL if one must
- * be allocated for the purpose. If
- * provided, this descriptor will be
- * used for the new value.
+ * Src_desc - Value to be stored
+ * Walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Store a value in an Arg or Local. The Src_desc is installed
* as the new value for the Arg or Local and the reference count
- * is incremented.
+ * for Src_desc is incremented.
*
******************************************************************************/
@@ -644,7 +649,6 @@ acpi_ds_method_data_set_value (
* Store this object into the Node
* (do the indirect store)
*/
-
status = acpi_ns_attach_object ((ACPI_NAMESPACE_NODE *) *entry, src_desc,
src_desc->common.type);
return (status);
@@ -652,10 +656,18 @@ acpi_ds_method_data_set_value (
/*
- * Otherwise, just delete the existing object
- * before storing the new one
+ * Perform "Implicit conversion" of the new object to the type of the
+ * existing object
*/
+ status = acpi_aml_convert_to_target_type ((*entry)->common.type, &src_desc, walk_state);
+ if (ACPI_FAILURE (status)) {
+ goto cleanup;
+ }
+ /*
+ * Delete the existing object
+ * before storing the new one
+ */
acpi_ds_method_data_delete_value (type, index, walk_state);
}
@@ -666,7 +678,6 @@ acpi_ds_method_data_set_value (
* Install the new object in the stack entry
* (increments the object reference count by one)
*/
-
status = acpi_ds_method_data_set_entry (type, index, src_desc, walk_state);
if (ACPI_FAILURE (status)) {
goto cleanup;
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 042cc4a80..0a6c88b4d 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: dsobject - Dispatcher object management routines
- * $Revision: 53 $
+ * $Revision: 56 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -241,14 +241,14 @@ acpi_ds_init_object_from_op (
/* We are expecting a number */
- if (arg_desc->common.type != ACPI_TYPE_NUMBER) {
+ if (arg_desc->common.type != ACPI_TYPE_INTEGER) {
acpi_cm_remove_reference (arg_desc);
return (AE_TYPE);
}
/* Get the value, delete the internal object */
- (*obj_desc)->buffer.length = (u32) arg_desc->number.value;
+ (*obj_desc)->buffer.length = (u32) arg_desc->integer.value;
acpi_cm_remove_reference (arg_desc);
/* Allocate the buffer */
@@ -304,8 +304,8 @@ acpi_ds_init_object_from_op (
status = acpi_ds_build_internal_object (walk_state, op, obj_desc);
break;
- case ACPI_TYPE_NUMBER:
- (*obj_desc)->number.value = op->value.integer;
+ case ACPI_TYPE_INTEGER:
+ (*obj_desc)->integer.value = op->value.integer;
break;
@@ -411,18 +411,20 @@ acpi_ds_build_internal_simple_obj (
acpi_ns_externalize_name (ACPI_UINT32_MAX, op->value.string, &length, &name);
if (name) {
- REPORT_WARNING (("Reference %s AML %X not found\n",
+ REPORT_WARNING (("Reference %s at AML %X not found\n",
name, op->aml_offset));
acpi_cm_free (name);
}
else {
- REPORT_WARNING (("Reference %s AML %X not found\n",
+ REPORT_WARNING (("Reference %s at AML %X not found\n",
op->value.string, op->aml_offset));
}
*obj_desc_ptr = NULL;
}
- return (status);
+ else {
+ return (status);
+ }
}
}
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 21e15bc26..ba06cf4db 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -2,12 +2,12 @@
*
* Module Name: dsopcode - Dispatcher Op Region support and handling of
* "control" opcodes
- * $Revision: 28 $
+ * $Revision: 30 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -355,7 +355,7 @@ acpi_ds_eval_field_unit_operands (
}
- offset = (u32) off_desc->number.value;
+ offset = (u32) off_desc->integer.value;
/*
@@ -427,7 +427,7 @@ acpi_ds_eval_field_unit_operands (
/* Offset is in bits, count is in bits */
bit_offset = offset;
- bit_count = (u16) cnt_desc->number.value;
+ bit_count = (u16) cnt_desc->integer.value;
break;
@@ -586,7 +586,7 @@ acpi_ds_eval_region_operands (
*/
operand_desc = walk_state->operands[walk_state->num_operands - 1];
- obj_desc->region.length = (u32) operand_desc->number.value;
+ obj_desc->region.length = (u32) operand_desc->integer.value;
acpi_cm_remove_reference (operand_desc);
/*
@@ -595,7 +595,7 @@ acpi_ds_eval_region_operands (
*/
operand_desc = walk_state->operands[walk_state->num_operands - 2];
- obj_desc->region.address = (ACPI_PHYSICAL_ADDRESS) operand_desc->number.value;
+ obj_desc->region.address = (ACPI_PHYSICAL_ADDRESS) operand_desc->integer.value;
acpi_cm_remove_reference (operand_desc);
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index 2efa43e51..d31464840 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: dsutils - Dispatcher utilities
- * $Revision: 50 $
+ * $Revision: 52 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -566,7 +566,7 @@ acpi_ds_map_opcode_to_data_type (
case AML_WORD_OP:
case AML_DWORD_OP:
- data_type = ACPI_TYPE_NUMBER;
+ data_type = ACPI_TYPE_INTEGER;
break;
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index 1f7e329ad..a2e1b73d3 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -2,12 +2,12 @@
*
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
- * $Revision: 50 $
+ * $Revision: 55 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -46,7 +46,7 @@
*
* RETURN: Status
*
- * DESCRIPTION:
+ * DESCRIPTION: Get the result of a predicate evaluation
*
****************************************************************************/
@@ -93,22 +93,22 @@ acpi_ds_get_predicate_value (
* be a number
*/
- if (obj_desc->common.type != ACPI_TYPE_NUMBER) {
+ if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
- /* TBD: 64/32-bit */
+ /* Truncate the predicate to 32-bits if necessary */
- obj_desc->number.value &= (UINT64) 0x00000000FFFFFFFF;
+ acpi_aml_truncate_for32bit_table (obj_desc, walk_state);
/*
* Save the result of the predicate evaluation on
* the control stack
*/
- if (obj_desc->number.value) {
+ if (obj_desc->integer.value) {
walk_state->control_state->common.value = TRUE;
}
@@ -330,6 +330,8 @@ acpi_ds_exec_end_op (
walk_state->num_operands = 0;
walk_state->return_desc = NULL;
+ walk_state->op_info = op_info;
+ walk_state->opcode = opcode;
/* Call debugger for single step support (DEBUG build only) */
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
index b3f1dc062..300d02b7f 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: dswload - Dispatcher namespace load callbacks
- * $Revision: 24 $
+ * $Revision: 26 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/dispatcher/dswscope.c
index 11b6a6cb9..332ab4ffd 100644
--- a/drivers/acpi/dispatcher/dswscope.c
+++ b/drivers/acpi/dispatcher/dswscope.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: dswscope - Scope stack manipulation
- * $Revision: 40 $
+ * $Revision: 42 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index a15a6f5f7..a436945df 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
- * $Revision: 36 $
+ * $Revision: 38 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,31 +37,6 @@
/*******************************************************************************
*
- * FUNCTION: Acpi_ds_result_stack_clear
- *
- * PARAMETERS: Walk_state - Current Walk state
- *
- * RETURN: Status
- *
- * DESCRIPTION: Reset this walk's result stack pointers to zero, thus setting
- * the stack to zero.
- *
- ******************************************************************************/
-
-ACPI_STATUS
-xxx_acpi_ds_result_stack_clear (
- ACPI_WALK_STATE *walk_state)
-{
-/*
- Walk_state->Num_results = 0;
- Walk_state->Current_result = 0;
-*/
- return (AE_OK);
-}
-
-
-/*******************************************************************************
- *
* FUNCTION: Acpi_ds_result_insert
*
* PARAMETERS: Object - Object to push
@@ -135,7 +110,7 @@ acpi_ds_result_remove (
/* Check for a valid result object */
if (!state->results.obj_desc [index]) {
- return (AE_AML_NO_OPERAND);
+ return (AE_AML_NO_RETURN_VALUE);
}
/* Remove the object */
@@ -179,7 +154,7 @@ acpi_ds_result_pop (
if (!state->results.num_results) {
- return (AE_STACK_UNDERFLOW);
+ return (AE_AML_NO_RETURN_VALUE);
}
/* Remove top element */
@@ -198,12 +173,12 @@ acpi_ds_result_pop (
}
- return (AE_STACK_UNDERFLOW);
+ return (AE_AML_NO_RETURN_VALUE);
}
/*******************************************************************************
*
- * FUNCTION: Acpi_ds_result_pop
+ * FUNCTION: Acpi_ds_result_pop_from_bottom
*
* PARAMETERS: Object - Where to return the popped object
* Walk_state - Current Walk state
@@ -231,7 +206,7 @@ acpi_ds_result_pop_from_bottom (
if (!state->results.num_results) {
- return (AE_STACK_UNDERFLOW);
+ return (AE_AML_NO_RETURN_VALUE);
}
/* Remove Bottom element */
@@ -250,7 +225,7 @@ acpi_ds_result_pop_from_bottom (
/* Check for a valid result object */
if (!*object) {
- return (AE_AML_NO_OPERAND);
+ return (AE_AML_NO_RETURN_VALUE);
}
@@ -260,15 +235,14 @@ acpi_ds_result_pop_from_bottom (
/*******************************************************************************
*
- * FUNCTION: Acpi_ds_result_pop
+ * FUNCTION: Acpi_ds_result_push
*
* PARAMETERS: Object - Where to return the popped object
* Walk_state - Current Walk state
*
* RETURN: Status
*
- * DESCRIPTION: Pop an object off the bottom of this walk's result stack. In
- * other words, this is a FIFO.
+ * DESCRIPTION: Push an object onto the current result stack
*
******************************************************************************/
@@ -282,7 +256,7 @@ acpi_ds_result_push (
state = walk_state->results;
if (!state) {
- return (AE_OK);
+ return (AE_AML_INTERNAL);
}
if (state->results.num_results == OBJ_NUM_OPERANDS) {
@@ -719,6 +693,7 @@ acpi_ds_create_walk_state (
ACPI_WALK_LIST *walk_list)
{
ACPI_WALK_STATE *walk_state;
+ ACPI_STATUS status;
acpi_cm_acquire_mutex (ACPI_MTX_CACHES);
@@ -736,7 +711,7 @@ acpi_ds_create_walk_state (
acpi_gbl_walk_state_cache_depth--;
acpi_cm_release_mutex (ACPI_MTX_CACHES);
- }
+ }
else {
/* The cache is empty, create a new object */
@@ -762,6 +737,14 @@ acpi_ds_create_walk_state (
acpi_ds_method_data_init (walk_state);
#endif
+ /* Create an initial result stack entry */
+
+ status = acpi_ds_result_stack_push (walk_state);
+ if (ACPI_FAILURE (status)) {
+ return (NULL);
+ }
+
+
/* Put the new state at the head of the walk list */
acpi_ds_push_walk_state (walk_state, walk_list);
diff --git a/drivers/acpi/driver.c b/drivers/acpi/driver.c
index 222598120..241528fc6 100644
--- a/drivers/acpi/driver.c
+++ b/drivers/acpi/driver.c
@@ -55,7 +55,7 @@ struct acpi_run_entry
static spinlock_t acpi_event_lock = SPIN_LOCK_UNLOCKED;
static volatile u32 acpi_event_status = 0;
-static volatile acpi_sstate_t acpi_event_state = ACPI_S0;
+static volatile acpi_sstate_t acpi_event_state = ACPI_STATE_S0;
static DECLARE_WAIT_QUEUE_HEAD(acpi_event_wait);
static volatile int acpi_thread_pid = -1;
@@ -243,7 +243,7 @@ acpi_do_event(ctl_table * ctl,
/*
* Enter system sleep state
*/
-static int
+/*static int
acpi_do_sleep(ctl_table * ctl,
int write,
struct file *file,
@@ -257,13 +257,13 @@ acpi_do_sleep(ctl_table * ctl,
}
}
else {
- int status = acpi_enter_sx(ACPI_S1);
+ int status = acpi_enter_sx(ACPI_STATE_S1);
if (status)
return status;
}
file->f_pos += *len;
return 0;
-}
+}*/
/*
@@ -382,7 +382,8 @@ static struct ctl_table acpi_table[] =
&acpi_c3_enter_latency, sizeof(acpi_c3_enter_latency),
0644, NULL, &acpi_do_ulong},
- {ACPI_SLEEP, "sleep", NULL, 0, 0600, NULL, &acpi_do_sleep},
+/* until it actually works */
+/* {ACPI_SLEEP, "sleep", NULL, 0, 0600, NULL, &acpi_do_sleep},*/
{ACPI_EVENT, "event", NULL, 0, 0400, NULL, &acpi_do_event},
@@ -415,7 +416,9 @@ static struct ctl_table acpi_dir_table[] =
static int
acpi_thread(void *context)
{
- ACPI_PHYSICAL_ADDRESS rsdp_phys;
+ ACPI_PHYSICAL_ADDRESS rsdp_phys;
+ ACPI_BUFFER buffer;
+ ACPI_SYSTEM_INFO sys_info;
/*
* initialize
@@ -437,8 +440,6 @@ acpi_thread(void *context)
rsdp_phys = efi.acpi;
#endif
- printk(KERN_ERR "ACPI: System description tables found\n");
-
if (!ACPI_SUCCESS(acpi_find_and_load_tables(rsdp_phys)))
return -ENODEV;
@@ -448,6 +449,17 @@ acpi_thread(void *context)
return -ENODEV;
}
+ buffer.length = sizeof(sys_info);
+ buffer.pointer = &sys_info;
+
+ if (!ACPI_SUCCESS (acpi_get_system_info(&buffer))) {
+ printk(KERN_ERR "ACPI: Could not get system info\n");
+ acpi_terminate();
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "ACPI: Core Subsystem version [%x]\n", sys_info.acpi_ca_version);
+
if (!ACPI_SUCCESS(acpi_enable_subsystem(ACPI_FULL_INITIALIZATION))) {
printk(KERN_ERR "ACPI: Subsystem enable failed\n");
acpi_terminate();
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 8f6f61e36..5e42683c6 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -270,25 +270,6 @@ ec_transaction (
return_ACPI_STATUS(status);
}
-static ACPI_STATUS
-ec_space_setup (
- ACPI_HANDLE region_handle,
- UINT32 function,
- void *handler_context,
- void **return_context)
-{
- // TODO: What is this function for?
- /*
- * The ec object is in the handler context and is needed
- * when calling the ec_space_handler.
- */
- *return_context = handler_context;
-
- return AE_OK;
-}
-
-
-
static void
ec_query_handler (
@@ -419,8 +400,6 @@ ec_region_setup (
{
FUNCTION_TRACE("acpi_ec_region_setup");
- printk("acpi_ec_region_setup\n");
-
if (function == ACPI_REGION_DEACTIVATE)
{
if (*region_context)
@@ -556,10 +535,10 @@ found_ec(
buf.length = sizeof(obj);
buf.pointer = &obj;
if (!ACPI_SUCCESS(acpi_evaluate_object(handle, "_GPE", NULL, &buf))
- || obj.type != ACPI_TYPE_NUMBER)
+ || obj.type != ACPI_TYPE_INTEGER)
return AE_OK;
- ec_cxt->gpe_bit = obj.number.value;
+ ec_cxt->gpe_bit = obj.integer.value;
/* determine if we need the Global Lock when accessing */
buf.length = sizeof(obj);
@@ -568,12 +547,12 @@ found_ec(
status = acpi_evaluate_object(handle, "_GLK", NULL, &buf);
if (status == AE_NOT_FOUND)
ec_cxt->need_global_lock = 0;
- else if (!ACPI_SUCCESS(status) || obj.type != ACPI_TYPE_NUMBER) {
+ else if (!ACPI_SUCCESS(status) || obj.type != ACPI_TYPE_INTEGER) {
DEBUG_PRINT(ACPI_ERROR, ("_GLK failed\n"));
return AE_OK;
}
- ec_cxt->need_global_lock = obj.number.value;
+ ec_cxt->need_global_lock = obj.integer.value;
printk(KERN_INFO "ACPI: found EC @ (0x%02x,0x%02x,gpe %d GL %d)\n",
ec_cxt->data_port, ec_cxt->status_port, ec_cxt->gpe_bit,
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index d5ce143a8..afdd477db 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -2,12 +2,12 @@
*
* Module Name: evevent - Fixed and General Purpose Acpi_event
* handling and dispatch
- * $Revision: 32 $
+ * $Revision: 33 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index a52f2dc3d..cae4a44b2 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -2,12 +2,12 @@
*
* Module Name: evmisc - ACPI device notification handler dispatch
* and ACPI Global Lock support
- * $Revision: 20 $
+ * $Revision: 22 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 53cae6392..071639a27 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: evregion - ACPI Address_space (Op_region) handler dispatch
- * $Revision: 93 $
+ * $Revision: 94 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -122,8 +122,8 @@ acpi_ev_execute_reg_method (
u32 function)
{
ACPI_OPERAND_OBJECT *params[3];
- ACPI_OPERAND_OBJECT space_iD_obj;
- ACPI_OPERAND_OBJECT function_obj;
+ ACPI_OPERAND_OBJECT space_id_desc;
+ ACPI_OPERAND_OBJECT function_desc;
ACPI_STATUS status;
@@ -141,24 +141,24 @@ acpi_ev_execute_reg_method (
* Passed as a parameter
*/
- acpi_cm_init_static_object (&space_iD_obj);
- acpi_cm_init_static_object (&function_obj);
+ acpi_cm_init_static_object (&space_id_desc);
+ acpi_cm_init_static_object (&function_desc);
/*
* Method requires two parameters.
*/
- params [0] = &space_iD_obj;
- params [1] = &function_obj;
+ params [0] = &space_id_desc;
+ params [1] = &function_desc;
params [2] = NULL;
/*
* Set up the parameter objects
*/
- space_iD_obj.common.type = ACPI_TYPE_NUMBER;
- space_iD_obj.number.value = region_obj->region.space_id;
+ space_id_desc.common.type = ACPI_TYPE_INTEGER;
+ space_id_desc.integer.value = region_obj->region.space_id;
- function_obj.common.type = ACPI_TYPE_NUMBER;
- function_obj.number.value = function;
+ function_desc.common.type = ACPI_TYPE_INTEGER;
+ function_desc.integer.value = function;
/*
* Execute the method, no return value
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index 92e5f198f..eb5e2033e 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: evrgnini- ACPI Address_space (Op_region) init
- * $Revision: 31 $
+ * $Revision: 33 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
index 02320e93c..369d2f184 100644
--- a/drivers/acpi/events/evsci.c
+++ b/drivers/acpi/events/evsci.c
@@ -2,12 +2,12 @@
*
* Module Name: evsci - System Control Interrupt configuration and
* legacy to ACPI mode state transition functions
- * $Revision: 67 $
+ * $Revision: 69 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index c3bbad0ff..f0e62934f 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: evxface - External interfaces for ACPI events
- * $Revision: 97 $
+ * $Revision: 101 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -187,26 +187,25 @@ acpi_install_notify_handler (
return (AE_BAD_PARAMETER);
}
- /* Convert and validate the device handle */
-
acpi_cm_acquire_mutex (ACPI_MTX_NAMESPACE);
+ /* Convert and validate the device handle */
+
device_node = acpi_ns_convert_handle_to_entry (device);
if (!device_node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
-
/*
- * Support for global notify handlers. These handlers are invoked for
- * every notifiy of the type specifiec
+ * Root Object:
+ * ------------
+ * Registering a notify handler on the root object indicates that the
+ * caller wishes to receive notifications for all objects. Note that
+ * only one <external> global handler can be regsitered (per notify type).
*/
-
if (device == ACPI_ROOT_OBJECT) {
- /*
- * Make sure the handler is not already installed.
- */
+ /* Make sure the handler is not already installed */
if (((handler_type == ACPI_SYSTEM_NOTIFY) &&
acpi_gbl_sys_notify.handler) ||
@@ -222,94 +221,89 @@ acpi_install_notify_handler (
acpi_gbl_sys_notify.handler = handler;
acpi_gbl_sys_notify.context = context;
}
-
- else {
+ else /* ACPI_DEVICE_NOTIFY */ {
acpi_gbl_drv_notify.node = device_node;
acpi_gbl_drv_notify.handler = handler;
acpi_gbl_drv_notify.context = context;
}
-
/* Global notify handler installed */
-
- goto unlock_and_exit;
}
-
/*
- * These are the ONLY objects that can receive ACPI notifications
+ * Other Objects:
+ * --------------
+ * Caller will only receive notifications specific to the target object.
+ * Note that only certain object types can receive notifications.
*/
-
- if ((device_node->type != ACPI_TYPE_DEVICE) &&
- (device_node->type != ACPI_TYPE_PROCESSOR) &&
- (device_node->type != ACPI_TYPE_POWER) &&
- (device_node->type != ACPI_TYPE_THERMAL))
- {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- /* Check for an existing internal object */
-
- obj_desc = acpi_ns_get_attached_object ((ACPI_HANDLE) device_node);
- if (obj_desc) {
+ else {
/*
- * The object exists.
- * Make sure the handler is not already installed.
+ * These are the ONLY objects that can receive ACPI notifications
*/
-
- if (((handler_type == ACPI_SYSTEM_NOTIFY) &&
- obj_desc->device.sys_handler) ||
- ((handler_type == ACPI_DEVICE_NOTIFY) &&
- obj_desc->device.drv_handler))
+ if ((device_node->type != ACPI_TYPE_DEVICE) &&
+ (device_node->type != ACPI_TYPE_PROCESSOR) &&
+ (device_node->type != ACPI_TYPE_POWER) &&
+ (device_node->type != ACPI_TYPE_THERMAL))
{
- status = AE_EXIST;
+ status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
- }
- else {
- /* Create a new object */
+ /* Check for an existing internal object */
- obj_desc = acpi_cm_create_internal_object (device_node->type);
- if (!obj_desc) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
+ obj_desc = acpi_ns_get_attached_object ((ACPI_HANDLE) device_node);
+ if (obj_desc) {
+
+ /* Object exists - make sure there's no handler */
+
+ if (((handler_type == ACPI_SYSTEM_NOTIFY) &&
+ obj_desc->device.sys_handler) ||
+ ((handler_type == ACPI_DEVICE_NOTIFY) &&
+ obj_desc->device.drv_handler))
+ {
+ status = AE_EXIST;
+ goto unlock_and_exit;
+ }
}
- /* Attach new object to the Node */
+ else {
+ /* Create a new object */
- status = acpi_ns_attach_object (device, obj_desc, (u8) device_node->type);
+ obj_desc = acpi_cm_create_internal_object (device_node->type);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
- if (ACPI_FAILURE (status)) {
- goto unlock_and_exit;
- }
- }
+ /* Attach new object to the Node */
+ status = acpi_ns_attach_object (device, obj_desc, (u8) device_node->type);
- /*
- * If we get here, we know that there is no handler installed
- * so let's party
- */
- notify_obj = acpi_cm_create_internal_object (INTERNAL_TYPE_NOTIFY);
- if (!notify_obj) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
- }
+ if (ACPI_FAILURE (status)) {
+ goto unlock_and_exit;
+ }
+ }
- notify_obj->notify_handler.node = device_node;
- notify_obj->notify_handler.handler = handler;
- notify_obj->notify_handler.context = context;
+ /* Install the handler */
+ notify_obj = acpi_cm_create_internal_object (INTERNAL_TYPE_NOTIFY);
+ if (!notify_obj) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
- if (handler_type == ACPI_SYSTEM_NOTIFY) {
- obj_desc->device.sys_handler = notify_obj;
- }
+ notify_obj->notify_handler.node = device_node;
+ notify_obj->notify_handler.handler = handler;
+ notify_obj->notify_handler.context = context;
- else {
- obj_desc->device.drv_handler = notify_obj;
- }
+ if (handler_type == ACPI_SYSTEM_NOTIFY) {
+ obj_desc->device.sys_handler = notify_obj;
+ }
+ else /* ACPI_DEVICE_NOTIFY */ {
+ obj_desc->device.drv_handler = notify_obj;
+ }
+ }
unlock_and_exit:
acpi_cm_release_mutex (ACPI_MTX_NAMESPACE);
@@ -343,7 +337,6 @@ acpi_remove_notify_handler (
ACPI_NAMESPACE_NODE *device_node;
ACPI_STATUS status = AE_OK;
-
/* Parameter validation */
if ((!handler) ||
@@ -363,63 +356,92 @@ acpi_remove_notify_handler (
}
/*
- * These are the ONLY objects that can receive ACPI notifications
+ * Root Object:
+ * ------------
*/
+ if (device == ACPI_ROOT_OBJECT) {
- if ((device_node->type != ACPI_TYPE_DEVICE) &&
- (device_node->type != ACPI_TYPE_PROCESSOR) &&
- (device_node->type != ACPI_TYPE_POWER) &&
- (device_node->type != ACPI_TYPE_THERMAL))
- {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- /* Check for an existing internal object */
+ if (((handler_type == ACPI_SYSTEM_NOTIFY) &&
+ !acpi_gbl_sys_notify.handler) ||
+ ((handler_type == ACPI_DEVICE_NOTIFY) &&
+ !acpi_gbl_drv_notify.handler))
+ {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
- obj_desc = acpi_ns_get_attached_object ((ACPI_HANDLE) device_node);
- if (!obj_desc) {
- status = AE_NOT_EXIST;
- goto unlock_and_exit;
+ if (handler_type == ACPI_SYSTEM_NOTIFY) {
+ acpi_gbl_sys_notify.node = NULL;
+ acpi_gbl_sys_notify.handler = NULL;
+ acpi_gbl_sys_notify.context = NULL;
+ }
+ else {
+ acpi_gbl_drv_notify.node = NULL;
+ acpi_gbl_drv_notify.handler = NULL;
+ acpi_gbl_drv_notify.context = NULL;
+ }
}
/*
- * The object exists.
- *
- * Make sure the handler is installed.
+ * Other Objects:
+ * --------------
*/
-
- if (handler_type == ACPI_SYSTEM_NOTIFY) {
- notify_obj = obj_desc->device.sys_handler;
- }
else {
- notify_obj = obj_desc->device.drv_handler;
- }
+ /*
+ * These are the ONLY objects that can receive ACPI notifications
+ */
+ if ((device_node->type != ACPI_TYPE_DEVICE) &&
+ (device_node->type != ACPI_TYPE_PROCESSOR) &&
+ (device_node->type != ACPI_TYPE_POWER) &&
+ (device_node->type != ACPI_TYPE_THERMAL))
+ {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
- if ((!notify_obj) ||
- (notify_obj->notify_handler.handler != handler))
- {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
+ /* Check for an existing internal object */
- /*
- * Now we can remove the handler
- */
- if (handler_type == ACPI_SYSTEM_NOTIFY) {
- obj_desc->device.sys_handler = NULL;
- }
- else {
- obj_desc->device.drv_handler = NULL;
+ obj_desc = acpi_ns_get_attached_object ((ACPI_HANDLE) device_node);
+ if (!obj_desc) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ /* Object exists - make sure there's an existing handler */
+
+ if (handler_type == ACPI_SYSTEM_NOTIFY) {
+ notify_obj = obj_desc->device.sys_handler;
+ }
+ else {
+ notify_obj = obj_desc->device.drv_handler;
+ }
+
+ if ((!notify_obj) ||
+ (notify_obj->notify_handler.handler != handler))
+ {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Remove the handler */
+
+ if (handler_type == ACPI_SYSTEM_NOTIFY) {
+ obj_desc->device.sys_handler = NULL;
+ }
+ else {
+ obj_desc->device.drv_handler = NULL;
+ }
+
+ acpi_cm_remove_reference (notify_obj);
}
- acpi_cm_remove_reference (notify_obj);
unlock_and_exit:
acpi_cm_release_mutex (ACPI_MTX_NAMESPACE);
return (status);
}
+
/******************************************************************************
*
* FUNCTION: Acpi_install_gpe_handler
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 5b7652e52..9864301a3 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
- * $Revision: 26 $
+ * $Revision: 28 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
index 71116cfc8..e54bbab24 100644
--- a/drivers/acpi/events/evxfregn.c
+++ b/drivers/acpi/events/evxfregn.c
@@ -2,12 +2,12 @@
*
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
- * $Revision: 26 $
+ * $Revision: 27 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/hardware/hwacpi.c
index d2154a1a2..304d3dec6 100644
--- a/drivers/acpi/hardware/hwacpi.c
+++ b/drivers/acpi/hardware/hwacpi.c
@@ -1,12 +1,13 @@
+
/******************************************************************************
*
- * Module Name: hwacpi - ACPI hardware functions - mode and timer
- * $Revision: 34 $
+ * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
+ * $Revision: 36 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -304,48 +305,3 @@ acpi_hw_get_mode_capabilities (void)
}
-/******************************************************************************
- *
- * FUNCTION: Acpi_hw_pmt_ticks
- *
- * PARAMETERS: none
- *
- * RETURN: Current value of the ACPI PMT (timer)
- *
- * DESCRIPTION: Obtains current value of ACPI PMT
- *
- ******************************************************************************/
-
-u32
-acpi_hw_pmt_ticks (void)
-{
- u32 ticks;
-
- ticks = acpi_os_in32 ((ACPI_IO_ADDRESS) ACPI_GET_ADDRESS (acpi_gbl_FADT->Xpm_tmr_blk.address));
-
- return (ticks);
-}
-
-
-/******************************************************************************
- *
- * FUNCTION: Acpi_hw_pmt_resolution
- *
- * PARAMETERS: none
- *
- * RETURN: Number of bits of resolution in the PMT (either 24 or 32)
- *
- * DESCRIPTION: Obtains resolution of the ACPI PMT (either 24bit or 32bit)
- *
- ******************************************************************************/
-
-u32
-acpi_hw_pmt_resolution (void)
-{
- if (0 == acpi_gbl_FADT->tmr_val_ext) {
- return (24);
- }
-
- return (32);
-}
-
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
index 2b413fac8..b7b777df6 100644
--- a/drivers/acpi/hardware/hwgpe.c
+++ b/drivers/acpi/hardware/hwgpe.c
@@ -1,12 +1,13 @@
+
/******************************************************************************
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
- * $Revision: 25 $
+ * $Revision: 28 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c
index 77b6a1c8c..fc96c51fb 100644
--- a/drivers/acpi/hardware/hwregs.c
+++ b/drivers/acpi/hardware/hwregs.c
@@ -3,12 +3,12 @@
*
* Module Name: hwregs - Read/write access functions for the various ACPI
* control and status registers.
- * $Revision: 86 $
+ * $Revision: 88 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,7 +37,7 @@
/* This matches the #defines in actypes.h. */
NATIVE_CHAR *sleep_state_table[] = {"\\_S0_","\\_S1_","\\_S2_","\\_S3_",
- "\\_S4_","\\_S4_b","\\_S5_"};
+ "\\_S4_","\\_S5_","\\_S4_b"};
/*******************************************************************************
@@ -53,7 +53,7 @@ NATIVE_CHAR *sleep_state_table[] = {"\\_S0_","\\_S1_","\\_S2_","
*
******************************************************************************/
-static u32
+u32
acpi_hw_get_bit_shift (
u32 mask)
{
@@ -185,9 +185,9 @@ acpi_hw_obtain_sleep_type_register_data (
}
else if (((obj_desc->package.elements[0])->common.type !=
- ACPI_TYPE_NUMBER) ||
+ ACPI_TYPE_INTEGER) ||
((obj_desc->package.elements[1])->common.type !=
- ACPI_TYPE_NUMBER))
+ ACPI_TYPE_INTEGER))
{
/* Must have two */
@@ -199,9 +199,9 @@ acpi_hw_obtain_sleep_type_register_data (
/*
* Valid _Sx_ package size, type, and value
*/
- *slp_typ_a = (u8) (obj_desc->package.elements[0])->number.value;
+ *slp_typ_a = (u8) (obj_desc->package.elements[0])->integer.value;
- *slp_typ_b = (u8) (obj_desc->package.elements[1])->number.value;
+ *slp_typ_b = (u8) (obj_desc->package.elements[1])->integer.value;
}
@@ -581,13 +581,8 @@ acpi_hw_register_read (
case PM1_CONTROL: /* 16-bit access */
- if (register_id != SLP_TYPE_B) {
- value |= acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1a_cnt_blk, 0);
- }
-
- if (register_id != SLP_TYPE_A) {
- value |= acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1b_cnt_blk, 0);
- }
+ value = acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1a_cnt_blk, 0);
+ value |= acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1b_cnt_blk, 0);
break;
@@ -696,30 +691,20 @@ acpi_hw_register_write (
case PM1_CONTROL: /* 16-bit access */
- /*
- * If SLP_TYP_A or SLP_TYP_B, only write to one reg block.
- * Otherwise, write to both.
- */
- if (register_id == SLP_TYPE_A) {
- acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1a_cnt_blk, 0);
- }
- else if (register_id == SLP_TYPE_B) {
- acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1b_cnt_blk, 0);
- }
- else {
- /* disable/re-enable interrupts if sleeping */
- if (register_id == SLP_EN) {
- disable();
- }
+ acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1a_cnt_blk, 0);
+ acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1b_cnt_blk, 0);
+ break;
- acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1a_cnt_blk, 0);
- acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1b_cnt_blk, 0);
- if (register_id == SLP_EN) {
- enable();
- }
- }
+ case PM1_a_CONTROL: /* 16-bit access */
+
+ acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1a_cnt_blk, 0);
+ break;
+
+
+ case PM1_b_CONTROL: /* 16-bit access */
+ acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1b_cnt_blk, 0);
break;
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
new file mode 100644
index 000000000..892c721c6
--- /dev/null
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -0,0 +1,189 @@
+
+/******************************************************************************
+ *
+ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Interface
+ * $Revision: 5 $
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000, 2001 R. Byron Moore
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "acpi.h"
+#include "acnamesp.h"
+#include "achware.h"
+
+#define _COMPONENT HARDWARE
+ MODULE_NAME ("hwsleep")
+
+
+/******************************************************************************
+ *
+ * FUNCTION: Acpi_set_firmware_waking_vector
+ *
+ * PARAMETERS: Physical_address - Physical address of ACPI real mode
+ * entry point.
+ *
+ * RETURN: AE_OK or AE_ERROR
+ *
+ * DESCRIPTION: Access function for d_firmware_waking_vector field in FACS
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_set_firmware_waking_vector (
+ ACPI_PHYSICAL_ADDRESS physical_address)
+{
+
+
+ /* Make sure that we have an FACS */
+
+ if (!acpi_gbl_FACS) {
+ return (AE_NO_ACPI_TABLES);
+ }
+
+ /* Set the vector */
+
+ if (acpi_gbl_FACS->vector_width == 32) {
+ * (u32 *) acpi_gbl_FACS->firmware_waking_vector = (u32) physical_address;
+ }
+ else {
+ *acpi_gbl_FACS->firmware_waking_vector = physical_address;
+ }
+
+ return (AE_OK);
+}
+
+
+/******************************************************************************
+ *
+ * FUNCTION: Acpi_get_firmware_waking_vector
+ *
+ * PARAMETERS: *Physical_address - Output buffer where contents of
+ * the Firmware_waking_vector field of
+ * the FACS will be stored.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Access function for d_firmware_waking_vector field in FACS
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_get_firmware_waking_vector (
+ ACPI_PHYSICAL_ADDRESS *physical_address)
+{
+
+
+ if (!physical_address) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Make sure that we have an FACS */
+
+ if (!acpi_gbl_FACS) {
+ return (AE_NO_ACPI_TABLES);
+ }
+
+ /* Get the vector */
+
+ if (acpi_gbl_FACS->vector_width == 32) {
+ *physical_address = * (u32 *) acpi_gbl_FACS->firmware_waking_vector;
+ }
+ else {
+ *physical_address = *acpi_gbl_FACS->firmware_waking_vector;
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: Acpi_enter_sleep_state
+ *
+ * PARAMETERS: Sleep_state - Which sleep state to enter
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_enter_sleep_state (
+ u8 sleep_state)
+{
+ ACPI_STATUS status;
+ ACPI_OBJECT_LIST arg_list;
+ ACPI_OBJECT arg;
+ u8 type_a;
+ u8 type_b;
+ u16 PM1_acontrol;
+ u16 PM1_bcontrol;
+
+ /*
+ * _PSW methods could be run here to enable wake-on keyboard, LAN, etc.
+ */
+
+ status = acpi_hw_obtain_sleep_type_register_data(sleep_state, &type_a, &type_b);
+
+ if (!ACPI_SUCCESS(status)) {
+ return status;
+ }
+
+ /* run the _PTS and _GTS methods */
+ MEMSET(&arg_list, 0, sizeof(arg_list));
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+
+ MEMSET(&arg, 0, sizeof(arg));
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = sleep_state;
+
+ acpi_evaluate_object(NULL, "\\_PTS", &arg_list, NULL);
+ acpi_evaluate_object(NULL, "\\_GTS", &arg_list, NULL);
+
+ /* clear wake status */
+ acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_LOCK, WAK_STS, 1);
+
+ PM1_acontrol = (u16) acpi_hw_register_read(ACPI_MTX_LOCK, PM1_CONTROL);
+
+ /* mask off SLP_EN and SLP_TYP fields */
+ PM1_acontrol &= 0xC3FF;
+
+ /* mask in SLP_EN */
+ PM1_acontrol |= (1 << acpi_hw_get_bit_shift (SLP_EN_MASK));
+
+ PM1_bcontrol = PM1_acontrol;
+
+ /* mask in SLP_TYP */
+ PM1_acontrol |= (type_a << acpi_hw_get_bit_shift (SLP_TYPE_X_MASK));
+ PM1_bcontrol |= (type_b << acpi_hw_get_bit_shift (SLP_TYPE_X_MASK));
+
+ /* the old version was disabling interrupts. let's try it without
+ * and see how that works
+ */
+ /*disable();*/
+
+ acpi_hw_register_write(ACPI_MTX_LOCK, PM1_a_CONTROL, PM1_acontrol);
+ acpi_hw_register_write(ACPI_MTX_LOCK, PM1_b_CONTROL, PM1_bcontrol);
+
+ /*enable();*/
+
+ return (AE_OK);
+}
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/hardware/hwtimer.c
new file mode 100644
index 000000000..b7f529ccf
--- /dev/null
+++ b/drivers/acpi/hardware/hwtimer.c
@@ -0,0 +1,196 @@
+
+/******************************************************************************
+ *
+ * Name: hwtimer.c - ACPI Power Management Timer Interface
+ * $Revision: 4 $
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000, 2001 R. Byron Moore
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "acpi.h"
+#include "achware.h"
+
+#define _COMPONENT HARDWARE
+ MODULE_NAME ("hwtimer")
+
+
+/******************************************************************************
+ *
+ * FUNCTION: Acpi_get_timer_resolution
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: Number of bits of resolution in the PM Timer (24 or 32).
+ *
+ * DESCRIPTION: Obtains resolution of the ACPI PM Timer.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_get_timer_resolution (
+ u32 *resolution)
+{
+ if (!resolution) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ if (0 == acpi_gbl_FADT->tmr_val_ext) {
+ *resolution = 24;
+ }
+ else {
+ *resolution = 32;
+ }
+
+ return (AE_OK);
+}
+
+
+/******************************************************************************
+ *
+ * FUNCTION: Acpi_get_timer
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: Current value of the ACPI PM Timer (in ticks).
+ *
+ * DESCRIPTION: Obtains current value of ACPI PM Timer.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_get_timer (
+ u32 *ticks)
+{
+ if (!ticks) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ *ticks = acpi_os_in32 ((ACPI_IO_ADDRESS) ACPI_GET_ADDRESS (acpi_gbl_FADT->Xpm_tmr_blk.address));
+
+ return (AE_OK);
+}
+
+
+/******************************************************************************
+ *
+ * FUNCTION: Acpi_get_timer_duration
+ *
+ * PARAMETERS: Start_ticks
+ * End_ticks
+ * Time_elapsed
+ *
+ * RETURN: Time_elapsed
+ *
+ * DESCRIPTION: Computes the time elapsed (in microseconds) between two
+ * PM Timer time stamps, taking into account the possibility of
+ * rollovers, the timer resolution, and timer frequency.
+ *
+ * The PM Timer's clock ticks at roughly 3.6 times per
+ * _microsecond_, and its clock continues through Cx state
+ * transitions (unlike many CPU timestamp counters) -- making it
+ * a versatile and accurate timer.
+ *
+ * Note that this function accomodates only a single timer
+ * rollover. Thus for 24-bit timers, this function should only
+ * be used for calculating durations less than ~4.6 seconds
+ * (~20 hours for 32-bit timers).
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_get_timer_duration (
+ u32 start_ticks,
+ u32 end_ticks,
+ u32 *time_elapsed)
+{
+ u32 delta_ticks = 0;
+ u32 seconds = 0;
+ u32 milliseconds = 0;
+ u32 microseconds = 0;
+ u32 remainder = 0;
+
+ if (!time_elapsed) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Compute Tick Delta:
+ * -------------------
+ * Handle timer rollovers on 24- versus 32-bit timers.
+ */
+ if (start_ticks < end_ticks) {
+ delta_ticks = end_ticks - start_ticks;
+ }
+ else if (start_ticks > end_ticks) {
+ /* 24-bit Timer */
+ if (0 == acpi_gbl_FADT->tmr_val_ext) {
+ delta_ticks = (0x00FFFFFF - start_ticks) + end_ticks;
+ }
+ /* 32-bit Timer */
+ else {
+ delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
+ }
+ }
+
+ /*
+ * Compute Duration:
+ * -----------------
+ * Since certain compilers (gcc/Linux, argh!) don't support 64-bit
+ * divides in kernel-space we have to do some trickery to preserve
+ * accuracy while using 32-bit math.
+ *
+ * TODO: Change to use 64-bit math when supported.
+ *
+ * The process is as follows:
+ * 1. Compute the number of seconds by dividing Delta Ticks by
+ * the timer frequency.
+ * 2. Compute the number of milliseconds in the remainder from step #1
+ * by multiplying by 1000 and then dividing by the timer frequency.
+ * 3. Compute the number of microseconds in the remainder from step #2
+ * by multiplying by 1000 and then dividing by the timer frequency.
+ * 4. Add the results from steps 1, 2, and 3 to get the total duration.
+ *
+ * Example: The time elapsed for Delta_ticks = 0xFFFFFFFF should be
+ * 1199864031 microseconds. This is computed as follows:
+ * Step #1: Seconds = 1199; Remainder = 3092840
+ * Step #2: Milliseconds = 864; Remainder = 113120
+ * Step #3: Microseconds = 31; Remainder = <don't care!>
+ */
+
+ /* Step #1 */
+ seconds = delta_ticks / PM_TIMER_FREQUENCY;
+ remainder = delta_ticks % PM_TIMER_FREQUENCY;
+
+ /* Step #2 */
+ milliseconds = (remainder * 1000) / PM_TIMER_FREQUENCY;
+ remainder = (remainder * 1000) % PM_TIMER_FREQUENCY;
+
+ /* Step #3 */
+ microseconds = (remainder * 1000) / PM_TIMER_FREQUENCY;
+
+ /* Step #4 */
+ *time_elapsed = seconds * 1000000;
+ *time_elapsed += milliseconds * 1000;
+ *time_elapsed += microseconds;
+
+ return (AE_OK);
+}
+
+
diff --git a/drivers/acpi/include/accommon.h b/drivers/acpi/include/accommon.h
index 37e13b228..61d54a220 100644
--- a/drivers/acpi/include/accommon.h
+++ b/drivers/acpi/include/accommon.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: accommon.h -- prototypes for the common (subsystem-wide) procedures
- * $Revision: 82 $
+ * $Revision: 87 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -100,6 +100,8 @@ acpi_cm_allocate_owner_id (
* Cm_clib - Local implementations of C library functions
*/
+#ifndef ACPI_USE_SYSTEM_CLIBRARY
+
NATIVE_UINT
acpi_cm_strlen (
const NATIVE_CHAR *string);
@@ -141,7 +143,7 @@ u32
acpi_cm_strtoul (
const NATIVE_CHAR *string,
NATIVE_CHAR **terminator,
- u32 base);
+ NATIVE_UINT base);
NATIVE_CHAR *
acpi_cm_strstr (
@@ -161,7 +163,7 @@ acpi_cm_memcpy (
void *
acpi_cm_memset (
void *dest,
- u32 value,
+ NATIVE_UINT value,
NATIVE_UINT count);
u32
@@ -172,6 +174,7 @@ u32
acpi_cm_to_lower (
u32 c);
+#endif /* ACPI_USE_SYSTEM_CLIBRARY */
/*
* Cm_copy - Object construction and conversion interfaces
@@ -297,7 +300,7 @@ function_value_exit (
u32 line_number,
u32 component_id,
NATIVE_CHAR *function_name,
- NATIVE_UINT value);
+ ACPI_INTEGER value);
void
function_ptr_exit (
@@ -596,7 +599,7 @@ acpi_cm_init_static_object (
#define acpi_cm_callocate(a) _cm_callocate(a, _COMPONENT,_THIS_MODULE,__LINE__)
#define acpi_cm_free(a) _cm_free(a,_COMPONENT,_THIS_MODULE,__LINE__)
-#ifndef ACPI_DEBUG
+#ifndef ACPI_DEBUG_TRACK_ALLOCATIONS
#define acpi_cm_add_element_to_alloc_list(a,b,c,d,e,f)
#define acpi_cm_delete_element_from_alloc_list(a,b,c,d)
diff --git a/drivers/acpi/include/acconfig.h b/drivers/acpi/include/acconfig.h
index 2b210339b..ea9be649d 100644
--- a/drivers/acpi/include/acconfig.h
+++ b/drivers/acpi/include/acconfig.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acconfig.h - Global configuration constants
- * $Revision: 48 $
+ * $Revision: 53 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -53,7 +53,7 @@
/* Version string */
-#define ACPI_CA_VERSION __DATE__
+#define ACPI_CA_VERSION 0x20010125
/* Maximum objects in the various object caches */
@@ -122,6 +122,10 @@
#define MTH_NUM_ARGS 7
#define MTH_MAX_ARG 6
+/* Maximum length of resulting string when converting from a buffer */
+
+#define ACPI_MAX_STRING_CONVERSION 200
+
/*
* Operand Stack (in WALK_STATE), Must be large enough to contain MTH_MAX_ARG
*/
diff --git a/drivers/acpi/include/acdebug.h b/drivers/acpi/include/acdebug.h
index 2bc9e7165..f1fa7094e 100644
--- a/drivers/acpi/include/acdebug.h
+++ b/drivers/acpi/include/acdebug.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acdebug.h - ACPI/AML debugger
- * $Revision: 37 $
+ * $Revision: 39 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/acdispat.h b/drivers/acpi/include/acdispat.h
index 599e46c36..f5d52cc41 100644
--- a/drivers/acpi/include/acdispat.h
+++ b/drivers/acpi/include/acdispat.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
- * $Revision: 33 $
+ * $Revision: 35 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/acenv.h b/drivers/acpi/include/acenv.h
index f867a348f..9e4e62339 100644
--- a/drivers/acpi/include/acenv.h
+++ b/drivers/acpi/include/acenv.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acenv.h - Generation environment specific items
- * $Revision: 65 $
+ * $Revision: 70 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,7 +28,7 @@
/*
- * Configuration for ACPI Utilities
+ * Configuration for ACPI tools and utilities
*/
#ifdef _ACPI_DUMP_APP
@@ -55,6 +55,16 @@
#define ACPI_USE_SYSTEM_CLIBRARY
#endif
+/*
+ * Memory allocation tracking. Used only if
+ * 1) This is the debug version
+ * 2) This is NOT a 16-bit version of the code (not enough real-mode memory)
+ */
+#ifdef ACPI_DEBUG
+#ifndef _IA16
+#define ACPI_DEBUG_TRACK_ALLOCATIONS
+#endif
+#endif
/*
* Environment configuration. The purpose of this file is to interface to the
@@ -154,17 +164,17 @@
#define STRUPR(s) strupr((s))
#define STRLEN(s) strlen((s))
#define STRCPY(d,s) strcpy((d), (s))
-#define STRNCPY(d,s,n) strncpy((d), (s), (n))
-#define STRNCMP(d,s,n) strncmp((d), (s), (n))
+#define STRNCPY(d,s,n) strncpy((d), (s), (NATIVE_INT)(n))
+#define STRNCMP(d,s,n) strncmp((d), (s), (NATIVE_INT)(n))
#define STRCMP(d,s) strcmp((d), (s))
#define STRCAT(d,s) strcat((d), (s))
-#define STRNCAT(d,s,n) strncat((d), (s), (n))
-#define STRTOUL(d,s,n) strtoul((d), (s), (n))
-#define MEMCPY(d,s,n) memcpy((d), (s), (n))
-#define MEMSET(d,s,n) memset((d), (s), (n))
+#define STRNCAT(d,s,n) strncat((d), (s), (NATIVE_INT)(n))
+#define STRTOUL(d,s,n) strtoul((d), (s), (NATIVE_INT)(n))
+#define MEMCPY(d,s,n) memcpy((d), (s), (NATIVE_INT)(n))
+#define MEMSET(d,s,n) memset((d), (s), (NATIVE_INT)(n))
#define TOUPPER toupper
#define TOLOWER tolower
-
+#define IS_XDIGIT isxdigit
/******************************************************************************
*
diff --git a/drivers/acpi/include/acevents.h b/drivers/acpi/include/acevents.h
index 3e76370bf..b7d335451 100644
--- a/drivers/acpi/include/acevents.h
+++ b/drivers/acpi/include/acevents.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acevents.h - Event subcomponent prototypes and defines
- * $Revision: 62 $
+ * $Revision: 63 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/acexcep.h b/drivers/acpi/include/acexcep.h
index 1629a0934..8fb267585 100644
--- a/drivers/acpi/include/acexcep.h
+++ b/drivers/acpi/include/acexcep.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
- * $Revision: 37 $
+ * $Revision: 41 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -116,8 +116,10 @@
#define AE_AML_NAME_NOT_FOUND (ACPI_STATUS) (0x0010 | AE_CODE_AML)
#define AE_AML_INTERNAL (ACPI_STATUS) (0x0011 | AE_CODE_AML)
#define AE_AML_INVALID_SPACE_ID (ACPI_STATUS) (0x0012 | AE_CODE_AML)
+#define AE_AML_STRING_LIMIT (ACPI_STATUS) (0x0013 | AE_CODE_AML)
+#define AE_AML_NO_RETURN_VALUE (ACPI_STATUS) (0x0014 | AE_CODE_AML)
-#define AE_CODE_AML_MAX 0x0012
+#define AE_CODE_AML_MAX 0x0014
/*
* Internal exceptions used for control
@@ -202,6 +204,8 @@ static NATIVE_CHAR *acpi_gbl_exception_names_aml[] =
"AE_AML_NAME_NOT_FOUND",
"AE_AML_INTERNAL",
"AE_AML_INVALID_SPACE_ID",
+ "AE_AML_STRING_LIMIT",
+ "AE_AML_NO_RETURN_VALUE",
};
static NATIVE_CHAR *acpi_gbl_exception_names_ctrl[] =
diff --git a/drivers/acpi/include/acgcc.h b/drivers/acpi/include/acgcc.h
index 5992f493d..82b1e5139 100644
--- a/drivers/acpi/include/acgcc.h
+++ b/drivers/acpi/include/acgcc.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acgcc.h - GCC specific defines, etc.
- * $Revision: 2 $
+ * $Revision: 4 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/acglobal.h b/drivers/acpi/include/acglobal.h
index 248f72c9d..36444038c 100644
--- a/drivers/acpi/include/acglobal.h
+++ b/drivers/acpi/include/acglobal.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acglobal.h - Declarations for global variables
- * $Revision: 92 $
+ * $Revision: 96 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -154,7 +154,6 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_set; /* TBD: [Restr
ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
-
ACPI_EXTERN ACPI_OBJECT_NOTIFY_HANDLER acpi_gbl_drv_notify;
ACPI_EXTERN ACPI_OBJECT_NOTIFY_HANDLER acpi_gbl_sys_notify;
@@ -162,7 +161,8 @@ ACPI_EXTERN ACPI_OBJECT_NOTIFY_HANDLER acpi_gbl_sys_notify;
extern u8 acpi_gbl_shutdown;
extern u32 acpi_gbl_system_flags;
extern u32 acpi_gbl_startup_flags;
-extern u8 acpi_gbl_decode_to8bit[];
+extern u8 acpi_gbl_decode_to8bit[8];
+extern NATIVE_CHAR acpi_gbl_hex_to_ascii[];
/*****************************************************************************
diff --git a/drivers/acpi/include/achware.h b/drivers/acpi/include/achware.h
index 1a206e8d2..efb97bd2f 100644
--- a/drivers/acpi/include/achware.h
+++ b/drivers/acpi/include/achware.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: achware.h -- hardware specific interfaces
- * $Revision: 48 $
+ * $Revision: 53 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -27,6 +27,10 @@
#define __ACHWARE_H__
+/* PM Timer ticks per second (HZ) */
+#define PM_TIMER_FREQUENCY 3579545
+
+
/* Prototypes */
@@ -92,6 +96,10 @@ void
acpi_hw_clear_acpi_status (
void);
+u32
+acpi_hw_get_bit_shift (
+ u32 mask);
+
/* GPE support */
@@ -121,89 +129,21 @@ acpi_hw_obtain_sleep_type_register_data (
u8 *slp_typ_b);
-/* Cx State Prototypes */
-
-ACPI_STATUS
-acpi_hw_enter_c1(
- ACPI_IO_ADDRESS pblk_address,
- u32 *pm_timer_ticks);
-
-ACPI_STATUS
-acpi_hw_enter_c2(
- ACPI_IO_ADDRESS pblk_address,
- u32 *pm_timer_ticks);
-
-ACPI_STATUS
-acpi_hw_enter_c3(
- ACPI_IO_ADDRESS pblk_address,
- u32 *pm_timer_ticks);
-
-ACPI_STATUS
-acpi_hw_enter_cx (
- ACPI_IO_ADDRESS pblk_address,
- u32 *pm_timer_ticks);
-
-ACPI_STATUS
-acpi_hw_set_cx (
- u32 cx_state);
-
-ACPI_STATUS
-acpi_hw_get_cx_info (
- u32 cx_states[]);
+/* ACPI Timer prototypes */
ACPI_STATUS
-acpi_hw_get_cx_handler (
- u32 cx_state,
- ACPI_C_STATE_HANDLER *handler);
+acpi_get_timer_resolution (
+ u32 *resolution);
ACPI_STATUS
-acpi_hw_set_cx_handler (
- u32 cx_state,
- ACPI_C_STATE_HANDLER handler);
-
-
-/* Throttling Prototypes */
-
-void
-acpi_hw_enable_throttling (
- ACPI_IO_ADDRESS pblk_address);
-
-void
-acpi_hw_disable_throttling (
- ACPI_IO_ADDRESS pblk_address);
-
-u32
-acpi_hw_get_duty_cycle (
- u8 duty_offset,
- ACPI_IO_ADDRESS pblk_address,
- u32 num_throttle_states);
-
-void
-acpi_hw_program_duty_cycle (
- u8 duty_offset,
- u32 duty_cycle,
- ACPI_IO_ADDRESS pblk_address,
- u32 num_throttle_states);
-
-NATIVE_UINT
-acpi_hw_local_pow (
- NATIVE_UINT x,
- NATIVE_UINT y);
-
-
-/* ACPI Timer prototypes */
-
-u32
-acpi_hw_pmt_ticks (
- void);
-
-u32
-acpi_hw_pmt_resolution (
- void);
+acpi_get_timer (
+ u32 *ticks);
ACPI_STATUS
-acpi_get_timer (
- u32 *out_ticks);
+acpi_get_timer_duration (
+ u32 start_ticks,
+ u32 end_ticks,
+ u32 *time_elapsed);
#endif /* __ACHWARE_H__ */
diff --git a/drivers/acpi/include/acinterp.h b/drivers/acpi/include/acinterp.h
index c8c967492..6eb571e5f 100644
--- a/drivers/acpi/include/acinterp.h
+++ b/drivers/acpi/include/acinterp.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
- * $Revision: 86 $
+ * $Revision: 91 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -70,9 +70,34 @@ acpi_aml_execute_method (
/*
- * amfield - ACPI AML (p-code) execution - field manipulation
+ * amconvrt - object conversion
*/
+ACPI_STATUS
+acpi_aml_convert_to_integer (
+ ACPI_OPERAND_OBJECT **obj_desc,
+ ACPI_WALK_STATE *walk_state);
+
+ACPI_STATUS
+acpi_aml_convert_to_buffer (
+ ACPI_OPERAND_OBJECT **obj_desc,
+ ACPI_WALK_STATE *walk_state);
+
+ACPI_STATUS
+acpi_aml_convert_to_string (
+ ACPI_OPERAND_OBJECT **obj_desc,
+ ACPI_WALK_STATE *walk_state);
+
+ACPI_STATUS
+acpi_aml_convert_to_target_type (
+ OBJECT_TYPE_INTERNAL destination_type,
+ ACPI_OPERAND_OBJECT **obj_desc,
+ ACPI_WALK_STATE *walk_state);
+
+
+/*
+ * amfield - ACPI AML (p-code) execution - field manipulation
+ */
ACPI_STATUS
acpi_aml_read_field (
@@ -419,17 +444,75 @@ acpi_aml_exec_store (
ACPI_WALK_STATE *walk_state);
ACPI_STATUS
-acpi_aml_store_object_to_object (
+acpi_aml_store_object_to_index (
ACPI_OPERAND_OBJECT *val_desc,
ACPI_OPERAND_OBJECT *dest_desc,
ACPI_WALK_STATE *walk_state);
ACPI_STATUS
acpi_aml_store_object_to_node (
- ACPI_OPERAND_OBJECT *val_desc,
+ ACPI_OPERAND_OBJECT *source_desc,
ACPI_NAMESPACE_NODE *node,
ACPI_WALK_STATE *walk_state);
+ACPI_STATUS
+acpi_aml_store_object_to_object (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *dest_desc,
+ ACPI_WALK_STATE *walk_state);
+
+
+/*
+ *
+ */
+
+ACPI_STATUS
+acpi_aml_resolve_object (
+ ACPI_OPERAND_OBJECT **source_desc_ptr,
+ OBJECT_TYPE_INTERNAL target_type,
+ ACPI_WALK_STATE *walk_state);
+
+ACPI_STATUS
+acpi_aml_store_object (
+ ACPI_OPERAND_OBJECT *source_desc,
+ OBJECT_TYPE_INTERNAL target_type,
+ ACPI_OPERAND_OBJECT **target_desc_ptr,
+ ACPI_WALK_STATE *walk_state);
+
+
+/*
+ * amcopy - object copy
+ */
+
+ACPI_STATUS
+acpi_aml_copy_buffer_to_buffer (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc);
+
+ACPI_STATUS
+acpi_aml_copy_string_to_string (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc);
+
+ACPI_STATUS
+acpi_aml_copy_integer_to_index_field (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc);
+
+ACPI_STATUS
+acpi_aml_copy_integer_to_bank_field (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc);
+
+ACPI_STATUS
+acpi_aml_copy_data_to_named_field (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_NAMESPACE_NODE *node);
+
+ACPI_STATUS
+acpi_aml_copy_integer_to_field_unit (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc);
/*
* amutils - interpreter/scanner utilities
diff --git a/drivers/acpi/include/aclinux.h b/drivers/acpi/include/aclinux.h
index 673d5f96b..0cf0e2845 100644
--- a/drivers/acpi/include/aclinux.h
+++ b/drivers/acpi/include/aclinux.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: aclinux.h - OS specific defines, etc.
- * $Revision: 6 $
+ * $Revision: 7 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/aclocal.h b/drivers/acpi/include/aclocal.h
index a647026f1..2d931387e 100644
--- a/drivers/acpi/include/aclocal.h
+++ b/drivers/acpi/include/aclocal.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
- * $Revision: 95 $
+ * $Revision: 100 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -74,7 +74,7 @@ typedef u32 ACPI_MUTEX_HANDLE;
#define NUM_MTX MAX_MTX+1
-#ifdef ACPI_DEBUG
+#if defined(ACPI_DEBUG) || defined(ENABLE_DEBUGGER)
#ifdef DEFINE_ACPI_GLOBALS
/* Names for the mutexes used in the subsystem */
@@ -476,7 +476,9 @@ typedef struct acpi_opcode_info
u32 parse_args; /* Grammar/Parse time arguments */
u32 runtime_args; /* Interpret time arguments */
- DEBUG_ONLY_MEMBERS (NATIVE_CHAR *name) /* op name (debug only) */
+#ifdef _OPCODE_NAMES
+ NATIVE_CHAR *name; /* op name (debug only) */
+#endif
} ACPI_OPCODE_INFO;
@@ -591,6 +593,7 @@ typedef struct acpi_walk_state
ACPI_GENERIC_STATE *scope_info; /* Stack of nested scopes */
ACPI_PARSE_STATE *parser_state; /* Current state of parser */
u8 *aml_last_while;
+ ACPI_OPCODE_INFO *op_info; /* Info on current opcode */
ACPI_PARSE_DOWNWARDS descending_callback;
ACPI_PARSE_UPWARDS ascending_callback;
@@ -605,6 +608,7 @@ typedef struct acpi_walk_state
u32 parse_flags;
u8 walk_type;
u8 return_used;
+ u16 opcode; /* Current AML opcode */
u32 prev_arg_types;
/* Debug support */
@@ -728,14 +732,16 @@ typedef struct acpi_get_devices_info
#define PM1_STS 0x0100
#define PM1_EN 0x0200
#define PM1_CONTROL 0x0300
-#define PM2_CONTROL 0x0400
-#define PM_TIMER 0x0500
-#define PROCESSOR_BLOCK 0x0600
-#define GPE0_STS_BLOCK 0x0700
-#define GPE0_EN_BLOCK 0x0800
-#define GPE1_STS_BLOCK 0x0900
-#define GPE1_EN_BLOCK 0x0A00
-#define SMI_CMD_BLOCK 0x0B00
+#define PM1_a_CONTROL 0x0400
+#define PM1_b_CONTROL 0x0500
+#define PM2_CONTROL 0x0600
+#define PM_TIMER 0x0700
+#define PROCESSOR_BLOCK 0x0800
+#define GPE0_STS_BLOCK 0x0900
+#define GPE0_EN_BLOCK 0x0A00
+#define GPE1_STS_BLOCK 0x0B00
+#define GPE1_EN_BLOCK 0x0C00
+#define SMI_CMD_BLOCK 0x0D00
/*
* Address space bitmasks for mmio or io spaces
@@ -751,66 +757,66 @@ typedef struct acpi_get_devices_info
/*
* Control bit definitions
*/
-#define TMR_STS (PM1_STS | 0x01)
-#define BM_STS (PM1_STS | 0x02)
-#define GBL_STS (PM1_STS | 0x03)
-#define PWRBTN_STS (PM1_STS | 0x04)
-#define SLPBTN_STS (PM1_STS | 0x05)
-#define RTC_STS (PM1_STS | 0x06)
-#define WAK_STS (PM1_STS | 0x07)
-
-#define TMR_EN (PM1_EN | 0x01)
+#define TMR_STS (PM1_STS | 0x01)
+#define BM_STS (PM1_STS | 0x02)
+#define GBL_STS (PM1_STS | 0x03)
+#define PWRBTN_STS (PM1_STS | 0x04)
+#define SLPBTN_STS (PM1_STS | 0x05)
+#define RTC_STS (PM1_STS | 0x06)
+#define WAK_STS (PM1_STS | 0x07)
+
+#define TMR_EN (PM1_EN | 0x01)
/* no BM_EN */
-#define GBL_EN (PM1_EN | 0x03)
-#define PWRBTN_EN (PM1_EN | 0x04)
-#define SLPBTN_EN (PM1_EN | 0x05)
-#define RTC_EN (PM1_EN | 0x06)
-#define WAK_EN (PM1_EN | 0x07)
+#define GBL_EN (PM1_EN | 0x03)
+#define PWRBTN_EN (PM1_EN | 0x04)
+#define SLPBTN_EN (PM1_EN | 0x05)
+#define RTC_EN (PM1_EN | 0x06)
+#define WAK_EN (PM1_EN | 0x07)
-#define SCI_EN (PM1_CONTROL | 0x01)
-#define BM_RLD (PM1_CONTROL | 0x02)
-#define GBL_RLS (PM1_CONTROL | 0x03)
-#define SLP_TYPE_A (PM1_CONTROL | 0x04)
-#define SLP_TYPE_B (PM1_CONTROL | 0x05)
-#define SLP_EN (PM1_CONTROL | 0x06)
+#define SCI_EN (PM1_CONTROL | 0x01)
+#define BM_RLD (PM1_CONTROL | 0x02)
+#define GBL_RLS (PM1_CONTROL | 0x03)
+#define SLP_TYPE_A (PM1_CONTROL | 0x04)
+#define SLP_TYPE_B (PM1_CONTROL | 0x05)
+#define SLP_EN (PM1_CONTROL | 0x06)
-#define ARB_DIS (PM2_CONTROL | 0x01)
+#define ARB_DIS (PM2_CONTROL | 0x01)
-#define TMR_VAL (PM_TIMER | 0x01)
+#define TMR_VAL (PM_TIMER | 0x01)
-#define GPE0_STS (GPE0_STS_BLOCK | 0x01)
-#define GPE0_EN (GPE0_EN_BLOCK | 0x01)
+#define GPE0_STS (GPE0_STS_BLOCK | 0x01)
+#define GPE0_EN (GPE0_EN_BLOCK | 0x01)
-#define GPE1_STS (GPE1_STS_BLOCK | 0x01)
-#define GPE1_EN (GPE1_EN_BLOCK | 0x01)
+#define GPE1_STS (GPE1_STS_BLOCK | 0x01)
+#define GPE1_EN (GPE1_EN_BLOCK | 0x01)
-#define TMR_STS_MASK 0x0001
-#define BM_STS_MASK 0x0010
-#define GBL_STS_MASK 0x0020
-#define PWRBTN_STS_MASK 0x0100
-#define SLPBTN_STS_MASK 0x0200
-#define RTC_STS_MASK 0x0400
-#define WAK_STS_MASK 0x8000
+#define TMR_STS_MASK 0x0001
+#define BM_STS_MASK 0x0010
+#define GBL_STS_MASK 0x0020
+#define PWRBTN_STS_MASK 0x0100
+#define SLPBTN_STS_MASK 0x0200
+#define RTC_STS_MASK 0x0400
+#define WAK_STS_MASK 0x8000
-#define ALL_FIXED_STS_BITS (TMR_STS_MASK | BM_STS_MASK | GBL_STS_MASK \
- | PWRBTN_STS_MASK | SLPBTN_STS_MASK \
- | RTC_STS_MASK | WAK_STS_MASK)
+#define ALL_FIXED_STS_BITS (TMR_STS_MASK | BM_STS_MASK | GBL_STS_MASK \
+ | PWRBTN_STS_MASK | SLPBTN_STS_MASK \
+ | RTC_STS_MASK | WAK_STS_MASK)
-#define TMR_EN_MASK 0x0001
-#define GBL_EN_MASK 0x0020
-#define PWRBTN_EN_MASK 0x0100
-#define SLPBTN_EN_MASK 0x0200
-#define RTC_EN_MASK 0x0400
+#define TMR_EN_MASK 0x0001
+#define GBL_EN_MASK 0x0020
+#define PWRBTN_EN_MASK 0x0100
+#define SLPBTN_EN_MASK 0x0200
+#define RTC_EN_MASK 0x0400
-#define SCI_EN_MASK 0x0001
-#define BM_RLD_MASK 0x0002
-#define GBL_RLS_MASK 0x0004
-#define SLP_TYPE_X_MASK 0x1C00
-#define SLP_EN_MASK 0x2000
+#define SCI_EN_MASK 0x0001
+#define BM_RLD_MASK 0x0002
+#define GBL_RLS_MASK 0x0004
+#define SLP_TYPE_X_MASK 0x1C00
+#define SLP_EN_MASK 0x2000
-#define ARB_DIS_MASK 0x0001
-#define TMR_VAL_MASK 0xFFFFFFFF
+#define ARB_DIS_MASK 0x0001
+#define TMR_VAL_MASK 0xFFFFFFFF
#define GPE0_STS_MASK
#define GPE0_EN_MASK
@@ -819,8 +825,8 @@ typedef struct acpi_get_devices_info
#define GPE1_EN_MASK
-#define ACPI_READ 1
-#define ACPI_WRITE 2
+#define ACPI_READ 1
+#define ACPI_WRITE 2
/* Plug and play */
diff --git a/drivers/acpi/include/acmacros.h b/drivers/acpi/include/acmacros.h
index 19cfa0591..7bed83ddd 100644
--- a/drivers/acpi/include/acmacros.h
+++ b/drivers/acpi/include/acmacros.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acmacros.h - C macros for the entire subsystem.
- * $Revision: 59 $
+ * $Revision: 62 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,6 +30,14 @@
* Data manipulation macros
*/
+#ifndef LODWORD
+#define LODWORD(l) ((u32)(UINT64)(l))
+#endif
+
+#ifndef HIDWORD
+#define HIDWORD(l) ((u32)((((UINT64)(l)) >> 32) & 0xFFFFFFFF))
+#endif
+
#ifndef LOWORD
#define LOWORD(l) ((u16)(NATIVE_UINT)(l))
#endif
@@ -64,10 +72,18 @@
#ifdef _IA16
+/*
+ * For 16-bit addresses, we have to assume that the upper 32 bits
+ * are zero.
+ */
#define ACPI_GET_ADDRESS(a) ((a).lo)
#define ACPI_STORE_ADDRESS(a,b) {(a).hi=0;(a).lo=(b);}
-#define ACPI_VALID_ADDRESS(a) ((a).hi && (a).lo)
+#define ACPI_VALID_ADDRESS(a) ((a).hi | (a).lo)
+
#else
+/*
+ * Full 64-bit address on 32-bit and 64-bit platforms
+ */
#define ACPI_GET_ADDRESS(a) (a)
#define ACPI_STORE_ADDRESS(a,b) ((a)=(b))
#define ACPI_VALID_ADDRESS(a) (a)
@@ -335,7 +351,7 @@
*/
#define return_VOID {function_exit(_THIS_MODULE,__LINE__,_COMPONENT,_proc_name);return;}
#define return_ACPI_STATUS(s) {function_status_exit(_THIS_MODULE,__LINE__,_COMPONENT,_proc_name,s);return(s);}
-#define return_VALUE(s) {function_value_exit(_THIS_MODULE,__LINE__,_COMPONENT,_proc_name,(NATIVE_UINT)s);return(s);}
+#define return_VALUE(s) {function_value_exit(_THIS_MODULE,__LINE__,_COMPONENT,_proc_name,(ACPI_INTEGER)s);return(s);}
#define return_PTR(s) {function_ptr_exit(_THIS_MODULE,__LINE__,_COMPONENT,_proc_name,(u8 *)s);return(s);}
@@ -346,6 +362,8 @@
#define DEBUG_DEFINE(a) a;
#define DEBUG_ONLY_MEMBERS(a) a;
+#define _OPCODE_NAMES
+#define _VERBOSE_STRUCTURES
/* Stack and buffer dumping */
@@ -458,9 +476,8 @@
*/
#ifdef _IA16
#undef DEBUG_ONLY_MEMBERS
+#undef _VERBOSE_STRUCTURES
#define DEBUG_ONLY_MEMBERS(a)
-#undef OP_INFO_ENTRY
-#define OP_INFO_ENTRY(flags,name,Pargs,Iargs) {flags,Pargs,Iargs}
#endif
diff --git a/drivers/acpi/include/acnamesp.h b/drivers/acpi/include/acnamesp.h
index e010a8118..d6acb8444 100644
--- a/drivers/acpi/include/acnamesp.h
+++ b/drivers/acpi/include/acnamesp.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
- * $Revision: 100 $
+ * $Revision: 101 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/acobject.h b/drivers/acpi/include/acobject.h
index c801ff117..9394b470a 100644
--- a/drivers/acpi/include/acobject.h
+++ b/drivers/acpi/include/acobject.h
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Name: acobject.h - Definition of ACPI_OPERAND_OBJECT (Internal object only)
- * $Revision: 75 $
+ * $Revision: 78 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -113,10 +113,10 @@ typedef struct /* NUMBER - has value */
ACPI_INTEGER value;
-} ACPI_OBJECT_NUMBER;
+} ACPI_OBJECT_INTEGER;
-typedef struct /* STRING - has length and pointer */
+typedef struct /* STRING - has length and pointer - Null terminated, ASCII characters only */
{
ACPI_OBJECT_COMMON_HEADER
@@ -126,13 +126,11 @@ typedef struct /* STRING - has length and pointer */
} ACPI_OBJECT_STRING;
-typedef struct /* BUFFER - has length, sequence, and pointer */
+typedef struct /* BUFFER - has length and pointer - not null terminated */
{
ACPI_OBJECT_COMMON_HEADER
u32 length;
- u32 sequence; /* Sequential count of buffers created */
-
u8 *pointer; /* points to the buffer in allocated space */
} ACPI_OBJECT_BUFFER;
@@ -398,7 +396,7 @@ typedef union acpi_operand_obj
{
ACPI_OBJECT_COMMON common;
ACPI_OBJECT_CACHE_LIST cache;
- ACPI_OBJECT_NUMBER number;
+ ACPI_OBJECT_INTEGER integer;
ACPI_OBJECT_STRING string;
ACPI_OBJECT_BUFFER buffer;
ACPI_OBJECT_PACKAGE package;
diff --git a/drivers/acpi/include/acoutput.h b/drivers/acpi/include/acoutput.h
index 664a5f8a8..5c20943cb 100644
--- a/drivers/acpi/include/acoutput.h
+++ b/drivers/acpi/include/acoutput.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acoutput.h -- debug output
- * $Revision: 66 $
+ * $Revision: 69 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,6 +58,7 @@
#define BATTERY 0x00040000
#define DEBUGGER 0x00100000
+#define COMPILER 0x00200000
#define ALL_COMPONENTS 0x001FFFFF
#define COMPONENT_DEFAULT (ALL_COMPONENTS)
diff --git a/drivers/acpi/include/acparser.h b/drivers/acpi/include/acparser.h
index d657749da..9b4bfabea 100644
--- a/drivers/acpi/include/acparser.h
+++ b/drivers/acpi/include/acparser.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
- * $Revision: 47 $
+ * $Revision: 49 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/acpi.h b/drivers/acpi/include/acpi.h
index 4896c4a7f..344b01a77 100644
--- a/drivers/acpi/include/acpi.h
+++ b/drivers/acpi/include/acpi.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acpi.h - Master include file, Publics and external data.
- * $Revision: 48 $
+ * $Revision: 50 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/acpiosxf.h b/drivers/acpi/include/acpiosxf.h
index 2f9eb4c13..ee289f33e 100644
--- a/drivers/acpi/include/acpiosxf.h
+++ b/drivers/acpi/include/acpiosxf.h
@@ -9,7 +9,7 @@
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -121,6 +121,10 @@ acpi_os_unmap_memory (
void *logical_address,
u32 length);
+ACPI_STATUS
+acpi_os_get_physical_address (
+ void *logical_address,
+ ACPI_PHYSICAL_ADDRESS *physical_address);
/*
* Interrupt handlers
diff --git a/drivers/acpi/include/acpixf.h b/drivers/acpi/include/acpixf.h
index d70fa75a7..8d293818f 100644
--- a/drivers/acpi/include/acpixf.h
+++ b/drivers/acpi/include/acpixf.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -285,6 +285,10 @@ acpi_get_firmware_waking_vector (
ACPI_PHYSICAL_ADDRESS *physical_address);
ACPI_STATUS
+acpi_enter_sleep_state (
+ u8 sleep_state);
+
+ACPI_STATUS
acpi_get_processor_throttling_info (
ACPI_HANDLE processor_handle,
ACPI_BUFFER *user_buffer);
diff --git a/drivers/acpi/include/acresrc.h b/drivers/acpi/include/acresrc.h
index 3bb19490d..0852367d2 100644
--- a/drivers/acpi/include/acresrc.h
+++ b/drivers/acpi/include/acresrc.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: acresrc.h - Resource Manager function prototypes
- * $Revision: 20 $
+ * $Revision: 22 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/actables.h b/drivers/acpi/include/actables.h
index 4dd724517..be8b5e3b0 100644
--- a/drivers/acpi/include/actables.h
+++ b/drivers/acpi/include/actables.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: actables.h - ACPI table management
- * $Revision: 27 $
+ * $Revision: 29 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/actbl.h b/drivers/acpi/include/actbl.h
index 34631e820..7a41842d2 100644
--- a/drivers/acpi/include/actbl.h
+++ b/drivers/acpi/include/actbl.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: actbl.h - Table data structures defined in ACPI specification
- * $Revision: 43 $
+ * $Revision: 45 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/actbl1.h b/drivers/acpi/include/actbl1.h
index 019ba1680..5c68ca050 100644
--- a/drivers/acpi/include/actbl1.h
+++ b/drivers/acpi/include/actbl1.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: actbl1.h - ACPI 1.0 tables
- * $Revision: 15 $
+ * $Revision: 17 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/actbl2.h b/drivers/acpi/include/actbl2.h
index e3ccf2188..0a05efa1c 100644
--- a/drivers/acpi/include/actbl2.h
+++ b/drivers/acpi/include/actbl2.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: actbl2.h - ACPI Specification Revision 2.0 Tables
- * $Revision: 19 $
+ * $Revision: 21 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -142,8 +142,8 @@ typedef struct
u16 plvl3_lat; /* worst case HW latency to enter/exit C3 state */
u16 flush_size; /* number of flush strides that need to be read */
u16 flush_stride; /* Processor's memory cache line width, in bytes */
- u8 duty_offset; /* Processor’s duty cycle index in processor's P_CNT reg*/
- u8 duty_width; /* Processor’s duty cycle value bit width in P_CNT register.*/
+ u8 duty_offset; /* Processor_’s duty cycle index in processor's P_CNT reg*/
+ u8 duty_width; /* Processor_’s duty cycle value bit width in P_CNT register.*/
u8 day_alrm; /* index to day-of-month alarm in RTC CMOS RAM */
u8 mon_alrm; /* index to month-of-year alarm in RTC CMOS RAM */
u8 century; /* index to century in RTC CMOS RAM */
diff --git a/drivers/acpi/include/actbl71.h b/drivers/acpi/include/actbl71.h
index 408ec402e..095806ccb 100644
--- a/drivers/acpi/include/actbl71.h
+++ b/drivers/acpi/include/actbl71.h
@@ -3,12 +3,12 @@
* Name: actbl71.h - IA-64 Extensions to the ACPI Spec Rev. 0.71
* This file includes tables specific to this
* specification revision.
- * $Revision: 7 $
+ * $Revision: 9 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/include/actypes.h b/drivers/acpi/include/actypes.h
index dfa28a9d9..826fc8901 100644
--- a/drivers/acpi/include/actypes.h
+++ b/drivers/acpi/include/actypes.h
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
- * $Revision: 159 $
+ * $Revision: 163 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -97,7 +97,7 @@ typedef INT16 NATIVE_INT;
typedef UINT32 ACPI_TBLPTR;
typedef UINT32 ACPI_IO_ADDRESS;
-typedef void *ACPI_PHYSICAL_ADDRESS;
+typedef char *ACPI_PHYSICAL_ADDRESS;
#define ALIGNED_ADDRESS_BOUNDARY 0x00000002
#define _HW_ALIGNMENT_SUPPORT
@@ -249,8 +249,9 @@ typedef UINT64 ACPI_INTEGER;
#define ACPI_STATE_S2 (u8) 2
#define ACPI_STATE_S3 (u8) 3
#define ACPI_STATE_S4 (u8) 4
-#define ACPI_STATE_S4_bIOS (u8) 5
-#define ACPI_STATE_S5 (u8) 6
+#define ACPI_STATE_S5 (u8) 5
+/* let's pretend S4_bIOS didn't exist for now. ASG */
+#define ACPI_STATE_S4_bIOS (u8) 6
#define ACPI_S_STATES_MAX ACPI_STATE_S5
@@ -284,8 +285,37 @@ typedef u32 ACPI_TABLE_TYPE;
typedef u32 ACPI_OBJECT_TYPE;
typedef u8 OBJECT_TYPE_INTERNAL;
+#define ACPI_BTYPE_ANY 0x00000000
+#define ACPI_BTYPE_INTEGER 0x00000001
+#define ACPI_BTYPE_STRING 0x00000002
+#define ACPI_BTYPE_BUFFER 0x00000004
+#define ACPI_BTYPE_PACKAGE 0x00000008
+#define ACPI_BTYPE_FIELD_UNIT 0x00000010
+#define ACPI_BTYPE_DEVICE 0x00000020
+#define ACPI_BTYPE_EVENT 0x00000040
+#define ACPI_BTYPE_METHOD 0x00000080
+#define ACPI_BTYPE_MUTEX 0x00000100
+#define ACPI_BTYPE_REGION 0x00000200
+#define ACPI_BTYPE_POWER 0x00000400
+#define ACPI_BTYPE_PROCESSOR 0x00000800
+#define ACPI_BTYPE_THERMAL 0x00001000
+#define ACPI_BTYPE_BUFFER_FIELD 0x00002000
+#define ACPI_BTYPE_DDB_HANDLE 0x00004000
+#define ACPI_BTYPE_DEBUG_OBJECT 0x00008000
+#define ACPI_BTYPE_REFERENCE 0x00010000
+#define ACPI_BTYPE_RESOURCE 0x00020000
+
+#define ACPI_BTYPE_COMPUTE_DATA (ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING | ACPI_BTYPE_BUFFER)
+
+#define ACPI_BTYPE_DATA (ACPI_BTYPE_COMPUTE_DATA | ACPI_BTYPE_PACKAGE)
+#define ACPI_BTYPE_DATA_REFERENCE (ACPI_BTYPE_DATA | ACPI_BTYPE_REFERENCE | ACPI_BTYPE_DDB_HANDLE)
+#define ACPI_BTYPE_DEVICE_OBJECTS (ACPI_BTYPE_DEVICE | ACPI_BTYPE_THERMAL | ACPI_BTYPE_PROCESSOR)
+#define ACPI_BTYPE_OBJECTS_AND_REFS 0x00017FFF /* ARG or LOCAL */
+#define ACPI_BTYPE_ALL_OBJECTS 0x00007FFF
+
+
#define ACPI_TYPE_ANY 0 /* 0x00 */
-#define ACPI_TYPE_NUMBER 1 /* 0x01 Byte/Word/Dword/Zero/One/Ones */
+#define ACPI_TYPE_INTEGER 1 /* 0x01 Byte/Word/Dword/Zero/One/Ones */
#define ACPI_TYPE_STRING 2 /* 0x02 */
#define ACPI_TYPE_BUFFER 3 /* 0x03 */
#define ACPI_TYPE_PACKAGE 4 /* 0x04 Byte_const, multiple Data_term/Constant/Super_name */
@@ -432,7 +462,7 @@ typedef union acpi_obj
{
ACPI_OBJECT_TYPE type;
ACPI_INTEGER value; /* The actual number */
- } number;
+ } integer;
struct
{
diff --git a/drivers/acpi/include/amlcode.h b/drivers/acpi/include/amlcode.h
index 9a5cb2c52..707e7535d 100644
--- a/drivers/acpi/include/amlcode.h
+++ b/drivers/acpi/include/amlcode.h
@@ -3,12 +3,12 @@
* Name: amlcode.h - Definitions for AML, as included in "definition blocks"
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
- * $Revision: 42 $
+ * $Revision: 46 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -211,27 +211,42 @@
#define ARGP_TERMARG 0x0E
#define ARGP_TERMLIST 0x0F
#define ARGP_WORDDATA 0x10
+#define ARGP_QWORDDATA 0x11
+#define ARGP_SIMPLENAME 0x12
/*
* Resolved argument types for the AML Interpreter
* Each field in the Arg_types u32 is 5 bits, allowing for a maximum of 6 arguments.
- * There can be up to 31 unique argument types
+ * There can be up to 31 unique argument types (0 is end-of-arg-list indicator)
*/
-#define ARGI_ANYTYPE 0x01
-#define ARGI_TARGETREF 0x02
-#define ARGI_REFERENCE 0x03
-#define ARGI_IF 0x04
-#define ARGI_NUMBER 0x05
-#define ARGI_STRING 0x06
-#define ARGI_BUFFER 0x07
-#define ARGI_PACKAGE 0x08
-#define ARGI_DATAOBJECT 0x09 /* Buffer, string, package or reference to a Node - Used only by Size_of operator*/
-#define ARGI_COMPLEXOBJ 0x0A /* Buffer or package */
-#define ARGI_MUTEX 0x0B
-#define ARGI_EVENT 0x0C
-#define ARGI_REGION 0x0D
-#define ARGI_DDBHANDLE 0x0E
+/* "Standard" ACPI types are 1-15 (0x0F) */
+
+#define ARGI_INTEGER ACPI_TYPE_INTEGER /* 1 */
+#define ARGI_STRING ACPI_TYPE_STRING /* 2 */
+#define ARGI_BUFFER ACPI_TYPE_BUFFER /* 3 */
+#define ARGI_PACKAGE ACPI_TYPE_PACKAGE /* 4 */
+#define ARGI_EVENT ACPI_TYPE_EVENT
+#define ARGI_MUTEX ACPI_TYPE_MUTEX
+#define ARGI_REGION ACPI_TYPE_REGION
+#define ARGI_DDBHANDLE ACPI_TYPE_DDB_HANDLE
+
+/* Custom types are 0x10 through 0x1F */
+
+#define ARGI_IF 0x10
+#define ARGI_ANYOBJECT 0x11
+#define ARGI_ANYTYPE 0x12
+#define ARGI_COMPUTEDATA 0x13 /* Buffer, String, or Integer */
+#define ARGI_DATAOBJECT 0x14 /* Buffer, string, package or reference to a Node - Used only by Size_of operator*/
+#define ARGI_COMPLEXOBJ 0x15 /* Buffer or package */
+#define ARGI_INTEGER_REF 0x16
+#define ARGI_OBJECT_REF 0x17
+#define ARGI_DEVICE_REF 0x18
+#define ARGI_REFERENCE 0x19
+#define ARGI_TARGETREF 0x1A /* Target, subject to implicit conversion */
+#define ARGI_FIXED_TARGET 0x1B /* Target, no implicit conversion */
+#define ARGI_SIMPLE_TARGET 0x1C /* Name, Local, Arg -- no implicit conversion */
+#define ARGI_BUFFERSTRING 0x1D
#define ARGI_INVALID_OPCODE 0xFFFFFFFF
diff --git a/drivers/acpi/interpreter/amconfig.c b/drivers/acpi/interpreter/amconfig.c
index 55e5b0510..497f35375 100644
--- a/drivers/acpi/interpreter/amconfig.c
+++ b/drivers/acpi/interpreter/amconfig.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: amconfig - Namespace reconfiguration (Load/Unload opcodes)
- * $Revision: 26 $
+ * $Revision: 29 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -73,7 +73,8 @@ acpi_aml_exec_load_table (
table_header.length = 0;
for (i = 0; i < sizeof (ACPI_TABLE_HEADER); i++) {
status = acpi_ev_address_space_dispatch (rgn_desc, ADDRESS_SPACE_READ,
- i, 8, (u32 *) ((u8 *) &table_header + i));
+ (ACPI_PHYSICAL_ADDRESS) i, 8,
+ (u32 *) ((u8 *) &table_header + i));
if (ACPI_FAILURE (status)) {
return (status);
}
@@ -96,7 +97,8 @@ acpi_aml_exec_load_table (
for (i = 0; i < table_header.length; i++) {
status = acpi_ev_address_space_dispatch (rgn_desc, ADDRESS_SPACE_READ,
- i, 8, (u32 *) (table_data_ptr + i));
+ (ACPI_PHYSICAL_ADDRESS)i, 8,
+ (u32 *) (table_data_ptr + i));
if (ACPI_FAILURE (status)) {
goto cleanup;
}
diff --git a/drivers/acpi/interpreter/amconvrt.c b/drivers/acpi/interpreter/amconvrt.c
new file mode 100644
index 000000000..4c895c7ed
--- /dev/null
+++ b/drivers/acpi/interpreter/amconvrt.c
@@ -0,0 +1,525 @@
+/******************************************************************************
+ *
+ * Module Name: amconvrt - Object conversion routines
+ * $Revision: 3 $
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000, 2001 R. Byron Moore
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include "acpi.h"
+#include "acparser.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+#include "acevents.h"
+#include "amlcode.h"
+#include "acdispat.h"
+
+
+#define _COMPONENT INTERPRETER
+ MODULE_NAME ("amconvrt")
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_convert_to_target_type
+ *
+ * PARAMETERS: *Obj_desc - Object to be converted.
+ * Walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION:
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_convert_to_target_type (
+ OBJECT_TYPE_INTERNAL destination_type,
+ ACPI_OPERAND_OBJECT **obj_desc,
+ ACPI_WALK_STATE *walk_state)
+{
+ ACPI_STATUS status = AE_OK;
+
+
+ /*
+ * If required by the target,
+ * perform implicit conversion on the source before we store it.
+ */
+
+ switch (GET_CURRENT_ARG_TYPE (walk_state->op_info->runtime_args))
+ {
+ case ARGI_SIMPLE_TARGET:
+ case ARGI_FIXED_TARGET:
+ case ARGI_INTEGER_REF: /* Handles Increment, Decrement cases */
+
+ switch (destination_type)
+ {
+ case INTERNAL_TYPE_DEF_FIELD:
+ /*
+ * Named field can always handle conversions
+ */
+ break;
+
+ default:
+ /* No conversion allowed for these types */
+
+ if (destination_type != (*obj_desc)->common.type) {
+ status = AE_TYPE;
+ }
+ }
+ break;
+
+
+ case ARGI_TARGETREF:
+
+ switch (destination_type)
+ {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_FIELD_UNIT:
+ case INTERNAL_TYPE_BANK_FIELD:
+ case INTERNAL_TYPE_INDEX_FIELD:
+ /*
+ * These types require an Integer operand. We can convert
+ * a Buffer or a String to an Integer if necessary.
+ */
+ status = acpi_aml_convert_to_integer (obj_desc, walk_state);
+ break;
+
+
+ case ACPI_TYPE_STRING:
+
+ /*
+ * The operand must be a String. We can convert an
+ * Integer or Buffer if necessary
+ */
+ status = acpi_aml_convert_to_string (obj_desc, walk_state);
+ break;
+
+
+ case ACPI_TYPE_BUFFER:
+
+ /*
+ * The operand must be a String. We can convert an
+ * Integer or Buffer if necessary
+ */
+ status = acpi_aml_convert_to_buffer (obj_desc, walk_state);
+ break;
+ }
+ break;
+
+
+ case ARGI_REFERENCE:
+ /*
+ * Create_xxxx_field cases - we are storing the field object into the name
+ */
+ break;
+
+
+ default:
+ status = AE_AML_INTERNAL;
+ }
+
+
+ /*
+ * Source-to-Target conversion semantics:
+ *
+ * If conversion to the target type cannot be performed, then simply
+ * overwrite the target with the new object and type.
+ */
+ if (status == AE_TYPE) {
+ status = AE_OK;
+ }
+
+ return (status);
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_convert_to_integer
+ *
+ * PARAMETERS: *Obj_desc - Object to be converted. Must be an
+ * Integer, Buffer, or String
+ * Walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an ACPI Object to an integer.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_convert_to_integer (
+ ACPI_OPERAND_OBJECT **obj_desc,
+ ACPI_WALK_STATE *walk_state)
+{
+ u32 i;
+ ACPI_OPERAND_OBJECT *ret_desc;
+ u32 count;
+ char *pointer;
+ ACPI_INTEGER result;
+ u32 integer_size = sizeof (ACPI_INTEGER);
+
+
+ switch ((*obj_desc)->common.type)
+ {
+ case ACPI_TYPE_INTEGER:
+ return (AE_OK);
+
+ case ACPI_TYPE_STRING:
+ pointer = (*obj_desc)->string.pointer;
+ count = (*obj_desc)->string.length;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ pointer = (char *) (*obj_desc)->buffer.pointer;
+ count = (*obj_desc)->buffer.length;
+ break;
+
+ default:
+ return (AE_TYPE);
+ }
+
+ /*
+ * Create a new integer
+ */
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
+ if (!ret_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+
+ /* Handle both ACPI 1.0 and ACPI 2.0 Integer widths */
+
+ if (walk_state->method_node->flags & ANOBJ_DATA_WIDTH_32) {
+ /*
+ * We are running a method that exists in a 32-bit ACPI table.
+ * Truncate the value to 32 bits by zeroing out the upper 32-bit field
+ */
+ integer_size = sizeof (u32);
+ }
+
+
+ /*
+ * Convert the buffer/string to an integer. Note that both buffers and
+ * strings are treated as raw data - we don't convert ascii to hex for
+ * strings.
+ *
+ * There are two terminating conditions for the loop:
+ * 1) The size of an integer has been reached, or
+ * 2) The end of the buffer or string has been reached
+ */
+ result = 0;
+
+ /* Transfer no more than an integer's worth of data */
+
+ if (count > integer_size) {
+ count = integer_size;
+ }
+
+ /*
+ * String conversion is different than Buffer conversion
+ */
+ switch ((*obj_desc)->common.type)
+ {
+ case ACPI_TYPE_STRING:
+
+ /* TBD: Need to use 64-bit STRTOUL */
+
+ /*
+ * Convert string to an integer
+ * String must be hexadecimal as per the ACPI specification
+ */
+
+ result = STRTOUL (pointer, NULL, 16);
+ break;
+
+
+ case ACPI_TYPE_BUFFER:
+
+ /*
+ * Buffer conversion - we simply grab enough raw data from the
+ * buffer to fill an integer
+ */
+ for (i = 0; i < count; i++) {
+ /*
+ * Get next byte and shift it into the Result.
+ * Little endian is used, meaning that the first byte of the buffer
+ * is the LSB of the integer
+ */
+ result |= (((ACPI_INTEGER) pointer[i]) << (i * 8));
+ }
+
+ break;
+ }
+
+ /* Save the Result, delete original descriptor, store new descriptor */
+
+ ret_desc->integer.value = result;
+
+ if (walk_state->opcode != AML_STORE_OP) {
+ acpi_cm_remove_reference (*obj_desc);
+ }
+
+ *obj_desc = ret_desc;
+
+ return (AE_OK);
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_convert_to_buffer
+ *
+ * PARAMETERS: *Obj_desc - Object to be converted. Must be an
+ * Integer, Buffer, or String
+ * Walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an ACPI Object to an Buffer
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_convert_to_buffer (
+ ACPI_OPERAND_OBJECT **obj_desc,
+ ACPI_WALK_STATE *walk_state)
+{
+ ACPI_OPERAND_OBJECT *ret_desc;
+ u32 i;
+ u32 integer_size = sizeof (ACPI_INTEGER);
+ u8 *new_buf;
+
+
+ switch ((*obj_desc)->common.type)
+ {
+ case ACPI_TYPE_INTEGER:
+
+ /*
+ * Create a new Buffer
+ */
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_BUFFER);
+ if (!ret_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Handle both ACPI 1.0 and ACPI 2.0 Integer widths */
+
+ if (walk_state->method_node->flags & ANOBJ_DATA_WIDTH_32) {
+ /*
+ * We are running a method that exists in a 32-bit ACPI table.
+ * Truncate the value to 32 bits by zeroing out the upper
+ * 32-bit field
+ */
+ integer_size = sizeof (u32);
+ }
+
+ /* Need enough space for one integers */
+
+ ret_desc->buffer.length = integer_size;
+ new_buf = acpi_cm_callocate (integer_size);
+ if (!new_buf) {
+ REPORT_ERROR
+ (("Aml_exec_dyadic2_r/Concat_op: Buffer allocation failure\n"));
+ acpi_cm_remove_reference (ret_desc);
+ return (AE_NO_MEMORY);
+ }
+
+ /* Copy the integer to the buffer */
+
+ for (i = 0; i < integer_size; i++) {
+ new_buf[i] = (u8) ((*obj_desc)->integer.value >> (i * 8));
+ }
+ ret_desc->buffer.pointer = new_buf;
+
+ /* Return the new buffer descriptor */
+
+ if (walk_state->opcode != AML_STORE_OP) {
+ acpi_cm_remove_reference (*obj_desc);
+ }
+ *obj_desc = ret_desc;
+ break;
+
+
+ case ACPI_TYPE_STRING:
+ break;
+
+
+ case ACPI_TYPE_BUFFER:
+ break;
+
+
+ default:
+ return (AE_TYPE);
+ break;
+ }
+
+ return (AE_OK);
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_convert_to_string
+ *
+ * PARAMETERS: *Obj_desc - Object to be converted. Must be an
+ * Integer, Buffer, or String
+ * Walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an ACPI Object to a string
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_convert_to_string (
+ ACPI_OPERAND_OBJECT **obj_desc,
+ ACPI_WALK_STATE *walk_state)
+{
+ ACPI_OPERAND_OBJECT *ret_desc;
+ u32 i;
+ u32 index;
+ u32 integer_size = sizeof (ACPI_INTEGER);
+ u8 *new_buf;
+ u8 *pointer;
+
+
+ switch ((*obj_desc)->common.type)
+ {
+ case ACPI_TYPE_INTEGER:
+
+ /*
+ * Create a new String
+ */
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_STRING);
+ if (!ret_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Handle both ACPI 1.0 and ACPI 2.0 Integer widths */
+
+ if (walk_state->method_node->flags & ANOBJ_DATA_WIDTH_32) {
+ /*
+ * We are running a method that exists in a 32-bit ACPI table.
+ * Truncate the value to 32 bits by zeroing out the upper
+ * 32-bit field
+ */
+ integer_size = sizeof (u32);
+ }
+
+ /* Need enough space for one ASCII integer plus null terminator */
+
+ ret_desc->string.length = (integer_size * 2) + 1;
+ new_buf = acpi_cm_callocate (ret_desc->string.length);
+ if (!new_buf) {
+ REPORT_ERROR
+ (("Aml_exec_dyadic2_r/Concat_op: Buffer allocation failure\n"));
+ acpi_cm_remove_reference (ret_desc);
+ return (AE_NO_MEMORY);
+ }
+
+ /* Copy the integer to the buffer */
+
+ for (i = 0; i < (integer_size * 2); i++) {
+ new_buf[i] = acpi_gbl_hex_to_ascii [((*obj_desc)->integer.value >> (i * 4)) & 0xF];
+ }
+
+ /* Null terminate */
+
+ new_buf [i] = 0;
+ ret_desc->buffer.pointer = new_buf;
+
+ /* Return the new buffer descriptor */
+
+ if (walk_state->opcode != AML_STORE_OP) {
+ acpi_cm_remove_reference (*obj_desc);
+ }
+ *obj_desc = ret_desc;
+
+ return (AE_OK);
+
+
+ case ACPI_TYPE_BUFFER:
+
+ if (((*obj_desc)->buffer.length * 3) > ACPI_MAX_STRING_CONVERSION) {
+ return (AE_AML_STRING_LIMIT);
+ }
+
+ /*
+ * Create a new String
+ */
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_STRING);
+ if (!ret_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Need enough space for one ASCII integer plus null terminator */
+
+ ret_desc->string.length = (*obj_desc)->buffer.length * 3;
+ new_buf = acpi_cm_callocate (ret_desc->string.length + 1);
+ if (!new_buf) {
+ REPORT_ERROR
+ (("Aml_exec_dyadic2_r/Concat_op: Buffer allocation failure\n"));
+ acpi_cm_remove_reference (ret_desc);
+ return (AE_NO_MEMORY);
+ }
+
+ /*
+ * Convert each byte of the buffer to two ASCII characters plus a space.
+ */
+ pointer = (*obj_desc)->buffer.pointer;
+ index = 0;
+ for (i = 0; i < (*obj_desc)->buffer.length; i++) {
+ new_buf[index + 0] = acpi_gbl_hex_to_ascii [pointer[i] & 0x0F];
+ new_buf[index + 1] = acpi_gbl_hex_to_ascii [(pointer[i] >> 4) & 0x0F];
+ new_buf[index + 2] = ' ';
+ index += 3;
+ }
+
+ /* Null terminate */
+
+ new_buf [index] = 0;
+ ret_desc->buffer.pointer = new_buf;
+
+ /* Return the new buffer descriptor */
+
+ if (walk_state->opcode != AML_STORE_OP) {
+ acpi_cm_remove_reference (*obj_desc);
+ }
+ *obj_desc = ret_desc;
+ break;
+
+
+ case ACPI_TYPE_STRING:
+ break;
+
+
+ default:
+ return (AE_TYPE);
+ break;
+ }
+
+ return (AE_OK);
+}
+
+
diff --git a/drivers/acpi/interpreter/amcreate.c b/drivers/acpi/interpreter/amcreate.c
index 02d7933e1..f19c422ce 100644
--- a/drivers/acpi/interpreter/amcreate.c
+++ b/drivers/acpi/interpreter/amcreate.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: amcreate - Named object creation
- * $Revision: 51 $
+ * $Revision: 53 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -353,7 +353,7 @@ acpi_aml_exec_create_mutex (
goto cleanup;
}
- obj_desc->mutex.sync_level = (u8) sync_desc->number.value;
+ obj_desc->mutex.sync_level = (u8) sync_desc->integer.value;
/* Obj_desc was on the stack top, and the name is below it */
diff --git a/drivers/acpi/interpreter/amdyadic.c b/drivers/acpi/interpreter/amdyadic.c
index ba67b062a..a02173f29 100644
--- a/drivers/acpi/interpreter/amdyadic.c
+++ b/drivers/acpi/interpreter/amdyadic.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: amdyadic - ACPI AML (p-code) execution for dyadic operators
- * $Revision: 68 $
+ * $Revision: 71 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,7 +37,174 @@
MODULE_NAME ("amdyadic")
-/*****************************************************************************
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_do_concatenate
+ *
+ * PARAMETERS: *Obj_desc - Object to be converted. Must be an
+ * Integer, Buffer, or String
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Concatenate two objects OF THE SAME TYPE.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_do_concatenate (
+ ACPI_OPERAND_OBJECT *obj_desc,
+ ACPI_OPERAND_OBJECT *obj_desc2,
+ ACPI_OPERAND_OBJECT **actual_ret_desc,
+ ACPI_WALK_STATE *walk_state)
+{
+ ACPI_STATUS status;
+ u32 i;
+ ACPI_INTEGER this_integer;
+ ACPI_OPERAND_OBJECT *ret_desc;
+ NATIVE_CHAR *new_buf;
+ u32 integer_size = sizeof (ACPI_INTEGER);
+
+
+ /*
+ * There are three cases to handle:
+ * 1) Two Integers concatenated to produce a buffer
+ * 2) Two Strings concatenated to produce a string
+ * 3) Two Buffers concatenated to produce a buffer
+ */
+ switch (obj_desc->common.type)
+ {
+ case ACPI_TYPE_INTEGER:
+
+ /* Handle both ACPI 1.0 and ACPI 2.0 Integer widths */
+
+ if (walk_state->method_node->flags & ANOBJ_DATA_WIDTH_32) {
+ /*
+ * We are running a method that exists in a 32-bit ACPI table.
+ * Truncate the value to 32 bits by zeroing out the upper
+ * 32-bit field
+ */
+ integer_size = sizeof (u32);
+ }
+
+ /* Result of two integers is a buffer */
+
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_BUFFER);
+ if (!ret_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Need enough space for two integers */
+
+ ret_desc->buffer.length = integer_size * 2;
+ new_buf = acpi_cm_callocate (ret_desc->buffer.length);
+ if (!new_buf) {
+ REPORT_ERROR
+ (("Aml_exec_dyadic2_r/Concat_op: Buffer allocation failure\n"));
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ ret_desc->buffer.pointer = (u8 *) new_buf;
+
+ /* Convert the first integer */
+
+ this_integer = obj_desc->integer.value;
+ for (i = 0; i < integer_size; i++) {
+ new_buf[i] = (u8) this_integer;
+ this_integer >>= 8;
+ }
+
+ /* Convert the second integer */
+
+ this_integer = obj_desc2->integer.value;
+ for (; i < (integer_size * 2); i++) {
+ new_buf[i] = (u8) this_integer;
+ this_integer >>= 8;
+ }
+
+ break;
+
+
+ case ACPI_TYPE_STRING:
+
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_STRING);
+ if (!ret_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Operand1 is string */
+
+ new_buf = acpi_cm_allocate (obj_desc->string.length +
+ obj_desc2->string.length + 1);
+ if (!new_buf) {
+ REPORT_ERROR
+ (("Aml_exec_dyadic2_r/Concat_op: String allocation failure\n"));
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ STRCPY (new_buf, obj_desc->string.pointer);
+ STRCPY (new_buf + obj_desc->string.length,
+ obj_desc2->string.pointer);
+
+ /* Point the return object to the new string */
+
+ ret_desc->string.pointer = new_buf;
+ ret_desc->string.length = obj_desc->string.length +=
+ obj_desc2->string.length;
+ break;
+
+
+ case ACPI_TYPE_BUFFER:
+
+ /* Operand1 is a buffer */
+
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_BUFFER);
+ if (!ret_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ new_buf = acpi_cm_allocate (obj_desc->buffer.length +
+ obj_desc2->buffer.length);
+ if (!new_buf) {
+ REPORT_ERROR
+ (("Aml_exec_dyadic2_r/Concat_op: Buffer allocation failure\n"));
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ MEMCPY (new_buf, obj_desc->buffer.pointer,
+ obj_desc->buffer.length);
+ MEMCPY (new_buf + obj_desc->buffer.length, obj_desc2->buffer.pointer,
+ obj_desc2->buffer.length);
+
+ /*
+ * Point the return object to the new buffer
+ */
+
+ ret_desc->buffer.pointer = (u8 *) new_buf;
+ ret_desc->buffer.length = obj_desc->buffer.length +
+ obj_desc2->buffer.length;
+ break;
+
+ default:
+ status = AE_AML_INTERNAL;
+ ret_desc = NULL;
+ }
+
+
+ *actual_ret_desc = ret_desc;
+ return (AE_OK);
+
+
+cleanup:
+
+ acpi_cm_remove_reference (ret_desc);
+ return (status);
+}
+
+
+/*******************************************************************************
*
* FUNCTION: Acpi_aml_exec_dyadic1
*
@@ -50,7 +217,7 @@
*
* ALLOCATION: Deletes both operands
*
- ****************************************************************************/
+ ******************************************************************************/
ACPI_STATUS
acpi_aml_exec_dyadic1 (
@@ -106,7 +273,7 @@ acpi_aml_exec_dyadic1 (
/* Dispatch the notify to the appropriate handler */
- acpi_ev_notify_dispatch (node, (u32) val_desc->number.value);
+ acpi_ev_notify_dispatch (node, (u32) val_desc->integer.value);
break;
default:
@@ -135,7 +302,7 @@ cleanup:
}
-/*****************************************************************************
+/*******************************************************************************
*
* FUNCTION: Acpi_aml_exec_dyadic2_r
*
@@ -148,7 +315,7 @@ cleanup:
*
* ALLOCATION: Deletes one operand descriptor -- other remains on stack
*
- ****************************************************************************/
+ ******************************************************************************/
ACPI_STATUS
acpi_aml_exec_dyadic2_r (
@@ -164,7 +331,6 @@ acpi_aml_exec_dyadic2_r (
ACPI_OPERAND_OBJECT *ret_desc2 = NULL;
ACPI_STATUS status = AE_OK;
u32 num_operands = 3;
- NATIVE_CHAR *new_buf;
/* Resolve all operands */
@@ -201,7 +367,7 @@ acpi_aml_exec_dyadic2_r (
case AML_SHIFT_RIGHT_OP:
case AML_SUBTRACT_OP:
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -222,8 +388,8 @@ acpi_aml_exec_dyadic2_r (
case AML_ADD_OP:
- ret_desc->number.value = obj_desc->number.value +
- obj_desc2->number.value;
+ ret_desc->integer.value = obj_desc->integer.value +
+ obj_desc2->integer.value;
break;
@@ -231,8 +397,8 @@ acpi_aml_exec_dyadic2_r (
case AML_BIT_AND_OP:
- ret_desc->number.value = obj_desc->number.value &
- obj_desc2->number.value;
+ ret_desc->integer.value = obj_desc->integer.value &
+ obj_desc2->integer.value;
break;
@@ -240,8 +406,8 @@ acpi_aml_exec_dyadic2_r (
case AML_BIT_NAND_OP:
- ret_desc->number.value = ~(obj_desc->number.value &
- obj_desc2->number.value);
+ ret_desc->integer.value = ~(obj_desc->integer.value &
+ obj_desc2->integer.value);
break;
@@ -249,8 +415,8 @@ acpi_aml_exec_dyadic2_r (
case AML_BIT_OR_OP:
- ret_desc->number.value = obj_desc->number.value |
- obj_desc2->number.value;
+ ret_desc->integer.value = obj_desc->integer.value |
+ obj_desc2->integer.value;
break;
@@ -258,8 +424,8 @@ acpi_aml_exec_dyadic2_r (
case AML_BIT_NOR_OP:
- ret_desc->number.value = ~(obj_desc->number.value |
- obj_desc2->number.value);
+ ret_desc->integer.value = ~(obj_desc->integer.value |
+ obj_desc2->integer.value);
break;
@@ -267,16 +433,16 @@ acpi_aml_exec_dyadic2_r (
case AML_BIT_XOR_OP:
- ret_desc->number.value = obj_desc->number.value ^
- obj_desc2->number.value;
+ ret_desc->integer.value = obj_desc->integer.value ^
+ obj_desc2->integer.value;
break;
- /* Def_divide := Divide_op Dividend Divisor Remainder Quotient */
+ /* Def_divide := Divide_op Dividend Divisor Remainder Quotient */
case AML_DIVIDE_OP:
- if (!obj_desc2->number.value) {
+ if (!obj_desc2->integer.value) {
REPORT_ERROR
(("Aml_exec_dyadic2_r/Divide_op: Divide by zero\n"));
@@ -284,7 +450,7 @@ acpi_aml_exec_dyadic2_r (
goto cleanup;
}
- ret_desc2 = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc2 = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc2) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -292,13 +458,13 @@ acpi_aml_exec_dyadic2_r (
/* Remainder (modulo) */
- ret_desc->number.value = ACPI_MODULO (obj_desc->number.value,
- obj_desc2->number.value);
+ ret_desc->integer.value = ACPI_MODULO (obj_desc->integer.value,
+ obj_desc2->integer.value);
/* Result (what we used to call the quotient) */
- ret_desc2->number.value = ACPI_DIVIDE (obj_desc->number.value,
- obj_desc2->number.value);
+ ret_desc2->integer.value = ACPI_DIVIDE (obj_desc->integer.value,
+ obj_desc2->integer.value);
break;
@@ -306,8 +472,8 @@ acpi_aml_exec_dyadic2_r (
case AML_MULTIPLY_OP:
- ret_desc->number.value = obj_desc->number.value *
- obj_desc2->number.value;
+ ret_desc->integer.value = obj_desc->integer.value *
+ obj_desc2->integer.value;
break;
@@ -315,8 +481,8 @@ acpi_aml_exec_dyadic2_r (
case AML_SHIFT_LEFT_OP:
- ret_desc->number.value = obj_desc->number.value <<
- obj_desc2->number.value;
+ ret_desc->integer.value = obj_desc->integer.value <<
+ obj_desc2->integer.value;
break;
@@ -324,8 +490,8 @@ acpi_aml_exec_dyadic2_r (
case AML_SHIFT_RIGHT_OP:
- ret_desc->number.value = obj_desc->number.value >>
- obj_desc2->number.value;
+ ret_desc->integer.value = obj_desc->integer.value >>
+ obj_desc2->integer.value;
break;
@@ -333,8 +499,8 @@ acpi_aml_exec_dyadic2_r (
case AML_SUBTRACT_OP:
- ret_desc->number.value = obj_desc->number.value -
- obj_desc2->number.value;
+ ret_desc->integer.value = obj_desc->integer.value -
+ obj_desc2->integer.value;
break;
@@ -342,79 +508,54 @@ acpi_aml_exec_dyadic2_r (
case AML_CONCAT_OP:
- if (obj_desc2->common.type != obj_desc->common.type) {
- status = AE_AML_OPERAND_TYPE;
- goto cleanup;
- }
- /* Both operands are now known to be the same */
-
- if (ACPI_TYPE_STRING == obj_desc->common.type) {
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_STRING);
- if (!ret_desc) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
+ /*
+ * Convert the second operand if necessary. The first operand
+ * determines the type of the second operand, (See the Data Types
+ * section of the ACPI specification.) Both object types are
+ * guaranteed to be either Integer/String/Buffer by the operand
+ * resolution mechanism above.
+ */
- /* Operand1 is string */
+ switch (obj_desc->common.type)
+ {
+ case ACPI_TYPE_INTEGER:
+ status = acpi_aml_convert_to_integer (&obj_desc2, walk_state);
+ break;
- new_buf = acpi_cm_allocate (obj_desc->string.length +
- obj_desc2->string.length + 1);
- if (!new_buf) {
- REPORT_ERROR
- (("Aml_exec_dyadic2_r/Concat_op: String allocation failure\n"));
- status = AE_NO_MEMORY;
- goto cleanup;
- }
+ case ACPI_TYPE_STRING:
+ status = acpi_aml_convert_to_string (&obj_desc2, walk_state);
+ break;
- STRCPY (new_buf, obj_desc->string.pointer);
- STRCPY (new_buf + obj_desc->string.length,
- obj_desc2->string.pointer);
+ case ACPI_TYPE_BUFFER:
+ status = acpi_aml_convert_to_buffer (&obj_desc2, walk_state);
+ break;
- /* Point the return object to the new string */
-
- ret_desc->string.pointer = new_buf;
- ret_desc->string.length = obj_desc->string.length +=
- obj_desc2->string.length;
+ default:
+ status = AE_AML_INTERNAL;
}
- else {
- /* Operand1 is not a string ==> must be a buffer */
-
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_BUFFER);
- if (!ret_desc) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- new_buf = acpi_cm_allocate (obj_desc->buffer.length +
- obj_desc2->buffer.length);
- if (!new_buf) {
- REPORT_ERROR
- (("Aml_exec_dyadic2_r/Concat_op: Buffer allocation failure\n"));
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- MEMCPY (new_buf, obj_desc->buffer.pointer,
- obj_desc->buffer.length);
- MEMCPY (new_buf + obj_desc->buffer.length, obj_desc2->buffer.pointer,
- obj_desc2->buffer.length);
+ if (ACPI_FAILURE (status)) {
+ goto cleanup;
+ }
- /*
- * Point the return object to the new buffer
- */
- ret_desc->buffer.pointer = (u8 *) new_buf;
- ret_desc->buffer.length = obj_desc->buffer.length +
- obj_desc2->buffer.length;
+ /*
+ * Both operands are now known to be the same object type
+ * (Both are Integer, String, or Buffer), and we can now perform the
+ * concatenation.
+ */
+ status = acpi_aml_do_concatenate (obj_desc, obj_desc2, &ret_desc, walk_state);
+ if (ACPI_FAILURE (status)) {
+ goto cleanup;
}
break;
default:
- REPORT_ERROR (("Acpi_aml_exec_dyadic2_r: Unknown dyadic opcode %X\n", opcode));
+ REPORT_ERROR (("Acpi_aml_exec_dyadic2_r: Unknown dyadic opcode %X\n",
+ opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
@@ -474,7 +615,7 @@ cleanup:
}
-/*****************************************************************************
+/*******************************************************************************
*
* FUNCTION: Acpi_aml_exec_dyadic2_s
*
@@ -486,7 +627,7 @@ cleanup:
*
* ALLOCATION: Deletes one operand descriptor -- other remains on stack
*
- ****************************************************************************/
+ ******************************************************************************/
ACPI_STATUS
acpi_aml_exec_dyadic2_s (
@@ -516,7 +657,7 @@ acpi_aml_exec_dyadic2_s (
/* Create the internal return object */
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -524,7 +665,7 @@ acpi_aml_exec_dyadic2_s (
/* Default return value is FALSE, operation did not time out */
- ret_desc->number.value = 0;
+ ret_desc->integer.value = 0;
/* Examine the opcode */
@@ -562,7 +703,7 @@ acpi_aml_exec_dyadic2_s (
*/
if (status == AE_TIME) {
- ret_desc->number.value = ACPI_INTEGER_MAX; /* TRUE, op timed out */
+ ret_desc->integer.value = ACPI_INTEGER_MAX; /* TRUE, op timed out */
status = AE_OK;
}
@@ -591,7 +732,7 @@ cleanup:
}
-/*****************************************************************************
+/*******************************************************************************
*
* FUNCTION: Acpi_aml_exec_dyadic2
*
@@ -605,7 +746,7 @@ cleanup:
* ALLOCATION: Deletes one operand descriptor -- other remains on stack
* containing result value
*
- ****************************************************************************/
+ ******************************************************************************/
ACPI_STATUS
acpi_aml_exec_dyadic2 (
@@ -636,7 +777,7 @@ acpi_aml_exec_dyadic2 (
/* Create the internal return object */
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -654,8 +795,8 @@ acpi_aml_exec_dyadic2 (
case AML_LAND_OP:
- lboolean = (u8) (obj_desc->number.value &&
- obj_desc2->number.value);
+ lboolean = (u8) (obj_desc->integer.value &&
+ obj_desc2->integer.value);
break;
@@ -663,8 +804,8 @@ acpi_aml_exec_dyadic2 (
case AML_LEQUAL_OP:
- lboolean = (u8) (obj_desc->number.value ==
- obj_desc2->number.value);
+ lboolean = (u8) (obj_desc->integer.value ==
+ obj_desc2->integer.value);
break;
@@ -672,8 +813,8 @@ acpi_aml_exec_dyadic2 (
case AML_LGREATER_OP:
- lboolean = (u8) (obj_desc->number.value >
- obj_desc2->number.value);
+ lboolean = (u8) (obj_desc->integer.value >
+ obj_desc2->integer.value);
break;
@@ -681,8 +822,8 @@ acpi_aml_exec_dyadic2 (
case AML_LLESS_OP:
- lboolean = (u8) (obj_desc->number.value <
- obj_desc2->number.value);
+ lboolean = (u8) (obj_desc->integer.value <
+ obj_desc2->integer.value);
break;
@@ -690,8 +831,8 @@ acpi_aml_exec_dyadic2 (
case AML_LOR_OP:
- lboolean = (u8) (obj_desc->number.value ||
- obj_desc2->number.value);
+ lboolean = (u8) (obj_desc->integer.value ||
+ obj_desc2->integer.value);
break;
@@ -707,10 +848,10 @@ acpi_aml_exec_dyadic2 (
/* Set return value to logical TRUE (all ones) or FALSE (zero) */
if (lboolean) {
- ret_desc->number.value = ACPI_INTEGER_MAX;
+ ret_desc->integer.value = ACPI_INTEGER_MAX;
}
else {
- ret_desc->number.value = 0;
+ ret_desc->integer.value = 0;
}
diff --git a/drivers/acpi/interpreter/amfield.c b/drivers/acpi/interpreter/amfield.c
index 356be14c2..3ed442c95 100644
--- a/drivers/acpi/interpreter/amfield.c
+++ b/drivers/acpi/interpreter/amfield.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: amfield - ACPI AML (p-code) execution - field manipulation
- * $Revision: 74 $
+ * $Revision: 77 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -174,7 +174,8 @@ acpi_aml_access_named_field (
u32 byte_field_length;
- /* Basic data checking */
+ /* Parameter validation */
+
if ((!named_field) || (ACPI_READ == mode && !buffer)) {
return (AE_AML_INTERNAL);
}
@@ -227,7 +228,7 @@ acpi_aml_access_named_field (
/* TBD: should these round down to a power of 2? */
- if (DIV_8(bit_granularity) > byte_field_length) {
+ if (DIV_8 (bit_granularity) > byte_field_length) {
bit_granularity = MUL_8(byte_field_length);
}
diff --git a/drivers/acpi/interpreter/amfldio.c b/drivers/acpi/interpreter/amfldio.c
index ce877c982..d16cd3d9b 100644
--- a/drivers/acpi/interpreter/amfldio.c
+++ b/drivers/acpi/interpreter/amfldio.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: amfldio - Aml Field I/O
- * $Revision: 32 $
+ * $Revision: 37 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -132,7 +132,7 @@ acpi_aml_read_field (
u32 this_field_byte_offset;
u32 this_field_datum_offset;
u32 previous_raw_datum;
- u32 this_raw_datum;
+ u32 this_raw_datum = 0;
u32 valid_field_bits;
u32 mask;
u32 merged_datum = 0;
@@ -203,32 +203,46 @@ acpi_aml_read_field (
while (this_field_datum_offset < datum_length) {
/*
- * Get the next raw datum, it contains bits of the current
- * field datum
+ * If the field is aligned on a byte boundary, we don't want
+ * to perform a final read, since this would potentially read
+ * past the end of the region.
+ *
+ * TBD: [Investigate] It may make more sense to just split the aligned
+ * and non-aligned cases since the aligned case is so very simple,
*/
-
- status = acpi_aml_read_field_data (obj_desc,
- this_field_byte_offset + byte_granularity,
- bit_granularity, &this_raw_datum);
- if (ACPI_FAILURE (status)) {
- goto cleanup;
- }
-
- /* Before merging the data, make sure the unused bits are clear */
-
- switch (byte_granularity)
+ if ((obj_desc->field.bit_offset != 0) ||
+ ((obj_desc->field.bit_offset == 0) &&
+ (this_field_datum_offset < (datum_length -1))))
{
- case 1:
- this_raw_datum &= 0x000000FF;
- previous_raw_datum &= 0x000000FF;
- break;
-
- case 2:
- this_raw_datum &= 0x0000FFFF;
- previous_raw_datum &= 0x0000FFFF;
- break;
+ /*
+ * Get the next raw datum, it contains some or all bits
+ * of the current field datum
+ */
+
+ status = acpi_aml_read_field_data (obj_desc,
+ this_field_byte_offset + byte_granularity,
+ bit_granularity, &this_raw_datum);
+ if (ACPI_FAILURE (status)) {
+ goto cleanup;
+ }
+
+ /* Before merging the data, make sure the unused bits are clear */
+
+ switch (byte_granularity)
+ {
+ case 1:
+ this_raw_datum &= 0x000000FF;
+ previous_raw_datum &= 0x000000FF;
+ break;
+
+ case 2:
+ this_raw_datum &= 0x0000FFFF;
+ previous_raw_datum &= 0x0000FFFF;
+ break;
+ }
}
+
/*
* Put together bits of the two raw data to make a complete
* field datum
@@ -391,8 +405,6 @@ acpi_aml_write_field_data_with_update_rule (
merged_value = field_value;
- /* Check if update rule needs to be applied (not if mask is all ones) */
-
/* Decode the update rule */
@@ -401,13 +413,17 @@ acpi_aml_write_field_data_with_update_rule (
case UPDATE_PRESERVE:
- /*
- * Read the current contents of the byte/word/dword containing
- * the field, and merge with the new field value.
- */
- status = acpi_aml_read_field_data (obj_desc, this_field_byte_offset,
- bit_granularity, &current_value);
- merged_value |= (current_value & ~mask);
+ /* Check if update rule needs to be applied (not if mask is all ones) */
+
+ if (((1 << bit_granularity) -1) & ~mask) {
+ /*
+ * Read the current contents of the byte/word/dword containing
+ * the field, and merge with the new field value.
+ */
+ status = acpi_aml_read_field_data (obj_desc, this_field_byte_offset,
+ bit_granularity, &current_value);
+ merged_value |= (current_value & ~mask);
+ }
break;
diff --git a/drivers/acpi/interpreter/ammisc.c b/drivers/acpi/interpreter/ammisc.c
index 907169dfc..3c58daeab 100644
--- a/drivers/acpi/interpreter/ammisc.c
+++ b/drivers/acpi/interpreter/ammisc.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: ammisc - ACPI AML (p-code) execution - specific opcodes
- * $Revision: 71 $
+ * $Revision: 73 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -172,7 +172,7 @@ acpi_aml_exec_index (
if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
/* Object to be indexed is a Package */
- if (idx_desc->number.value >= obj_desc->package.count) {
+ if (idx_desc->integer.value >= obj_desc->package.count) {
status = AE_AML_PACKAGE_LIMIT;
goto cleanup;
}
@@ -195,7 +195,7 @@ acpi_aml_exec_index (
* we are after.
*/
- tmp_desc = obj_desc->package.elements[idx_desc->number.value];
+ tmp_desc = obj_desc->package.elements[idx_desc->integer.value];
ret_desc->reference.op_code = AML_INDEX_OP;
ret_desc->reference.target_type = tmp_desc->common.type;
ret_desc->reference.object = tmp_desc;
@@ -210,13 +210,13 @@ acpi_aml_exec_index (
*/
ret_desc->reference.op_code = AML_INDEX_OP;
ret_desc->reference.target_type = ACPI_TYPE_PACKAGE;
- ret_desc->reference.where = &obj_desc->package.elements[idx_desc->number.value];
+ ret_desc->reference.where = &obj_desc->package.elements[idx_desc->integer.value];
}
else {
/* Object to be indexed is a Buffer */
- if (idx_desc->number.value >= obj_desc->buffer.length) {
+ if (idx_desc->integer.value >= obj_desc->buffer.length) {
status = AE_AML_BUFFER_LIMIT;
goto cleanup;
}
@@ -224,7 +224,7 @@ acpi_aml_exec_index (
ret_desc->reference.op_code = AML_INDEX_OP;
ret_desc->reference.target_type = ACPI_TYPE_BUFFER_FIELD;
ret_desc->reference.object = obj_desc;
- ret_desc->reference.offset = (u32) idx_desc->number.value;
+ ret_desc->reference.offset = (u32) idx_desc->integer.value;
status = acpi_aml_exec_store (ret_desc, res_desc, walk_state);
}
@@ -314,20 +314,20 @@ acpi_aml_exec_match (
/* Validate match comparison sub-opcodes */
- if ((op1_desc->number.value > MAX_MATCH_OPERATOR) ||
- (op2_desc->number.value > MAX_MATCH_OPERATOR))
+ if ((op1_desc->integer.value > MAX_MATCH_OPERATOR) ||
+ (op2_desc->integer.value > MAX_MATCH_OPERATOR))
{
status = AE_AML_OPERAND_VALUE;
goto cleanup;
}
- index = (u32) start_desc->number.value;
+ index = (u32) start_desc->integer.value;
if (index >= (u32) pkg_desc->package.count) {
status = AE_AML_PACKAGE_LIMIT;
goto cleanup;
}
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -351,7 +351,7 @@ acpi_aml_exec_match (
* should we examine its value?
*/
if (!pkg_desc->package.elements[index] ||
- ACPI_TYPE_NUMBER != pkg_desc->package.elements[index]->common.type)
+ ACPI_TYPE_INTEGER != pkg_desc->package.elements[index]->common.type)
{
continue;
}
@@ -362,7 +362,7 @@ acpi_aml_exec_match (
* "continue" (proceed to next iteration of enclosing
* "for" loop) signifies a non-match.
*/
- switch (op1_desc->number.value)
+ switch (op1_desc->integer.value)
{
case MATCH_MTR: /* always true */
@@ -372,8 +372,8 @@ acpi_aml_exec_match (
case MATCH_MEQ: /* true if equal */
- if (pkg_desc->package.elements[index]->number.value
- != V1_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ != V1_desc->integer.value)
{
continue;
}
@@ -382,8 +382,8 @@ acpi_aml_exec_match (
case MATCH_MLE: /* true if less than or equal */
- if (pkg_desc->package.elements[index]->number.value
- > V1_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ > V1_desc->integer.value)
{
continue;
}
@@ -392,8 +392,8 @@ acpi_aml_exec_match (
case MATCH_MLT: /* true if less than */
- if (pkg_desc->package.elements[index]->number.value
- >= V1_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ >= V1_desc->integer.value)
{
continue;
}
@@ -402,8 +402,8 @@ acpi_aml_exec_match (
case MATCH_MGE: /* true if greater than or equal */
- if (pkg_desc->package.elements[index]->number.value
- < V1_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ < V1_desc->integer.value)
{
continue;
}
@@ -412,8 +412,8 @@ acpi_aml_exec_match (
case MATCH_MGT: /* true if greater than */
- if (pkg_desc->package.elements[index]->number.value
- <= V1_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ <= V1_desc->integer.value)
{
continue;
}
@@ -426,7 +426,7 @@ acpi_aml_exec_match (
}
- switch(op2_desc->number.value)
+ switch(op2_desc->integer.value)
{
case MATCH_MTR:
@@ -436,8 +436,8 @@ acpi_aml_exec_match (
case MATCH_MEQ:
- if (pkg_desc->package.elements[index]->number.value
- != V2_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ != V2_desc->integer.value)
{
continue;
}
@@ -446,8 +446,8 @@ acpi_aml_exec_match (
case MATCH_MLE:
- if (pkg_desc->package.elements[index]->number.value
- > V2_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ > V2_desc->integer.value)
{
continue;
}
@@ -456,8 +456,8 @@ acpi_aml_exec_match (
case MATCH_MLT:
- if (pkg_desc->package.elements[index]->number.value
- >= V2_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ >= V2_desc->integer.value)
{
continue;
}
@@ -466,8 +466,8 @@ acpi_aml_exec_match (
case MATCH_MGE:
- if (pkg_desc->package.elements[index]->number.value
- < V2_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ < V2_desc->integer.value)
{
continue;
}
@@ -476,8 +476,8 @@ acpi_aml_exec_match (
case MATCH_MGT:
- if (pkg_desc->package.elements[index]->number.value
- <= V2_desc->number.value)
+ if (pkg_desc->package.elements[index]->integer.value
+ <= V2_desc->integer.value)
{
continue;
}
@@ -497,7 +497,7 @@ acpi_aml_exec_match (
/* Match_value is the return value */
- ret_desc->number.value = match_value;
+ ret_desc->integer.value = match_value;
cleanup:
diff --git a/drivers/acpi/interpreter/ammonad.c b/drivers/acpi/interpreter/ammonad.c
index df9671c06..ea8834a5c 100644
--- a/drivers/acpi/interpreter/ammonad.c
+++ b/drivers/acpi/interpreter/ammonad.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: ammonad - ACPI AML (p-code) execution for monadic operators
- * $Revision: 88 $
+ * $Revision: 89 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -180,7 +180,7 @@ acpi_aml_exec_monadic1 (
case AML_SLEEP_OP:
- acpi_aml_system_do_suspend ((u32) obj_desc->number.value);
+ acpi_aml_system_do_suspend ((u32) obj_desc->integer.value);
break;
@@ -188,7 +188,7 @@ acpi_aml_exec_monadic1 (
case AML_STALL_OP:
- acpi_aml_system_do_stall ((u32) obj_desc->number.value);
+ acpi_aml_system_do_stall ((u32) obj_desc->integer.value);
break;
@@ -267,7 +267,7 @@ acpi_aml_exec_monadic2_r (
case AML_TO_BCD_OP:
case AML_COND_REF_OF_OP:
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -283,7 +283,7 @@ acpi_aml_exec_monadic2_r (
case AML_BIT_NOT_OP:
- ret_desc->number.value = ~obj_desc->number.value;
+ ret_desc->integer.value = ~obj_desc->integer.value;
break;
@@ -291,17 +291,17 @@ acpi_aml_exec_monadic2_r (
case AML_FIND_SET_LEFT_BIT_OP:
- ret_desc->number.value = obj_desc->number.value;
+ ret_desc->integer.value = obj_desc->integer.value;
/*
* Acpi specification describes Integer type as a little
* endian unsigned value, so this boundry condition is valid.
*/
- for (res_val = 0; ret_desc->number.value && res_val < ACPI_INTEGER_BIT_SIZE; ++res_val) {
- ret_desc->number.value >>= 1;
+ for (res_val = 0; ret_desc->integer.value && res_val < ACPI_INTEGER_BIT_SIZE; ++res_val) {
+ ret_desc->integer.value >>= 1;
}
- ret_desc->number.value = res_val;
+ ret_desc->integer.value = res_val;
break;
@@ -309,19 +309,19 @@ acpi_aml_exec_monadic2_r (
case AML_FIND_SET_RIGHT_BIT_OP:
- ret_desc->number.value = obj_desc->number.value;
+ ret_desc->integer.value = obj_desc->integer.value;
/*
* Acpi specification describes Integer type as a little
* endian unsigned value, so this boundry condition is valid.
*/
- for (res_val = 0; ret_desc->number.value && res_val < ACPI_INTEGER_BIT_SIZE; ++res_val) {
- ret_desc->number.value <<= 1;
+ for (res_val = 0; ret_desc->integer.value && res_val < ACPI_INTEGER_BIT_SIZE; ++res_val) {
+ ret_desc->integer.value <<= 1;
}
/* Since returns must be 1-based, subtract from 33 (65) */
- ret_desc->number.value = res_val == 0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - res_val;
+ ret_desc->integer.value = res_val == 0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - res_val;
break;
@@ -332,11 +332,11 @@ acpi_aml_exec_monadic2_r (
/*
* The 64-bit ACPI integer can hold 16 4-bit BCD integers
*/
- ret_desc->number.value = 0;
+ ret_desc->integer.value = 0;
for (i = 0; i < ACPI_MAX_BCD_DIGITS; i++) {
/* Get one BCD digit */
- digit = (ACPI_INTEGER) ((obj_desc->number.value >> (i * 4)) & 0xF);
+ digit = (ACPI_INTEGER) ((obj_desc->integer.value >> (i * 4)) & 0xF);
/* Check the range of the digit */
@@ -352,7 +352,7 @@ acpi_aml_exec_monadic2_r (
digit *= 10;
}
- ret_desc->number.value += digit;
+ ret_desc->integer.value += digit;
}
}
break;
@@ -363,16 +363,16 @@ acpi_aml_exec_monadic2_r (
case AML_TO_BCD_OP:
- if (obj_desc->number.value > ACPI_MAX_BCD_VALUE) {
+ if (obj_desc->integer.value > ACPI_MAX_BCD_VALUE) {
status = AE_AML_NUMERIC_OVERFLOW;
goto cleanup;
}
- ret_desc->number.value = 0;
+ ret_desc->integer.value = 0;
for (i = 0; i < ACPI_MAX_BCD_DIGITS; i++) {
/* Divide by nth factor of 10 */
- digit = obj_desc->number.value;
+ digit = obj_desc->integer.value;
for (j = 0; j < i; j++) {
digit /= 10;
}
@@ -380,7 +380,7 @@ acpi_aml_exec_monadic2_r (
/* Create the BCD digit */
if (digit > 0) {
- ret_desc->number.value += (ACPI_MODULO (digit, 10) << (i * 4));
+ ret_desc->integer.value += (ACPI_MODULO (digit, 10) << (i * 4));
}
}
break;
@@ -402,7 +402,7 @@ acpi_aml_exec_monadic2_r (
* return FALSE
*/
- ret_desc->number.value = 0;
+ ret_desc->integer.value = 0;
/*
* Must delete the result descriptor since there is no reference
@@ -424,7 +424,7 @@ acpi_aml_exec_monadic2_r (
/* The object exists in the namespace, return TRUE */
- ret_desc->number.value = ACPI_INTEGER_MAX;
+ ret_desc->integer.value = ACPI_INTEGER_MAX;
goto cleanup;
break;
@@ -579,13 +579,13 @@ acpi_aml_exec_monadic2 (
case AML_LNOT_OP:
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
- ret_desc->number.value = !obj_desc->number.value;
+ ret_desc->integer.value = !obj_desc->integer.value;
break;
@@ -638,10 +638,10 @@ acpi_aml_exec_monadic2 (
/* Do the actual increment or decrement */
if (AML_INCREMENT_OP == opcode) {
- ret_desc->number.value++;
+ ret_desc->integer.value++;
}
else {
- ret_desc->number.value--;
+ ret_desc->integer.value--;
}
/* Store the result back in the original descriptor */
@@ -672,7 +672,7 @@ acpi_aml_exec_monadic2 (
/* Constants are of type Number */
- type = ACPI_TYPE_NUMBER;
+ type = ACPI_TYPE_INTEGER;
break;
@@ -733,13 +733,13 @@ acpi_aml_exec_monadic2 (
/* Allocate a descriptor to hold the type. */
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
- ret_desc->number.value = type;
+ ret_desc->integer.value = type;
break;
@@ -793,13 +793,13 @@ acpi_aml_exec_monadic2 (
* object to hold the value
*/
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
- ret_desc->number.value = value;
+ ret_desc->integer.value = value;
break;
@@ -910,14 +910,14 @@ acpi_aml_exec_monadic2 (
* sub-buffer of the main buffer, it is only a pointer to a
* single element (byte) of the buffer!
*/
- ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ ret_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!ret_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
tmp_desc = obj_desc->reference.object;
- ret_desc->number.value =
+ ret_desc->integer.value =
tmp_desc->buffer.pointer[obj_desc->reference.offset];
/* TBD: [Investigate] (see below) Don't add an additional
diff --git a/drivers/acpi/interpreter/amnames.c b/drivers/acpi/interpreter/amnames.c
index ea4c26e08..07be1a37c 100644
--- a/drivers/acpi/interpreter/amnames.c
+++ b/drivers/acpi/interpreter/amnames.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amnames - interpreter/scanner name load/execute
- * $Revision: 71 $
+ * $Revision: 73 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/interpreter/amprep.c b/drivers/acpi/interpreter/amprep.c
index 266cb0105..2a56d4826 100644
--- a/drivers/acpi/interpreter/amprep.c
+++ b/drivers/acpi/interpreter/amprep.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amprep - ACPI AML (p-code) execution - field prep utilities
- * $Revision: 72 $
+ * $Revision: 73 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/interpreter/amregion.c b/drivers/acpi/interpreter/amregion.c
index dfe4fab85..1a2e7351d 100644
--- a/drivers/acpi/interpreter/amregion.c
+++ b/drivers/acpi/interpreter/amregion.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amregion - ACPI default Op_region (address space) handlers
- * $Revision: 41 $
+ * $Revision: 44 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -99,8 +99,8 @@ acpi_aml_system_memory_space_handler (
*/
if ((address < mem_info->mapped_physical_address) ||
- ((address + length) >
- (mem_info->mapped_physical_address + mem_info->mapped_length)))
+ (((ACPI_INTEGER) address + length) >
+ ((ACPI_INTEGER) mem_info->mapped_physical_address + mem_info->mapped_length)))
{
/*
* The request cannot be resolved by the current memory mapping;
@@ -139,7 +139,7 @@ acpi_aml_system_memory_space_handler (
/* TBD: should these pointers go to 64-bit in all cases ? */
logical_addr_ptr = mem_info->mapped_logical_address +
- (address - mem_info->mapped_physical_address);
+ ((ACPI_INTEGER) address - (ACPI_INTEGER) mem_info->mapped_physical_address);
/* Perform the memory read or write */
diff --git a/drivers/acpi/interpreter/amresnte.c b/drivers/acpi/interpreter/amresnte.c
index 20c6a0b11..0dc67b613 100644
--- a/drivers/acpi/interpreter/amresnte.c
+++ b/drivers/acpi/interpreter/amresnte.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amresnte - AML Interpreter object resolution
- * $Revision: 25 $
+ * $Revision: 27 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -55,7 +55,7 @@
* can be either a pointer to an actual internal object or a pointer into the
* AML stream itself. These types are currently:
*
- * ACPI_TYPE_NUMBER
+ * ACPI_TYPE_INTEGER
* ACPI_TYPE_STRING
* ACPI_TYPE_BUFFER
* ACPI_TYPE_MUTEX
@@ -208,14 +208,14 @@ acpi_aml_resolve_node_to_value (
break;
- case ACPI_TYPE_NUMBER:
+ case ACPI_TYPE_INTEGER:
/*
* The Node has an attached internal object, make sure that it's a
* number
*/
- if (ACPI_TYPE_NUMBER != val_desc->common.type) {
+ if (ACPI_TYPE_INTEGER != val_desc->common.type) {
return (AE_AML_OPERAND_TYPE);
}
@@ -244,7 +244,7 @@ acpi_aml_resolve_node_to_value (
object_type = ACPI_TYPE_BUFFER;
}
else {
- object_type = ACPI_TYPE_NUMBER;
+ object_type = ACPI_TYPE_INTEGER;
}
/*
@@ -282,7 +282,7 @@ acpi_aml_resolve_node_to_value (
return (status);
}
- obj_desc->number.value = temp_val;
+ obj_desc->integer.value = temp_val;
}
@@ -330,12 +330,12 @@ acpi_aml_resolve_node_to_value (
/* Create an object for the result */
- obj_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ obj_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!obj_desc) {
return (AE_NO_MEMORY);
}
- obj_desc->number.value = temp_val;
+ obj_desc->integer.value = temp_val;
break;
@@ -378,12 +378,12 @@ acpi_aml_resolve_node_to_value (
/* Create an object for the result */
- obj_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ obj_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!obj_desc) {
return (AE_NO_MEMORY);
}
- obj_desc->number.value = temp_val;
+ obj_desc->integer.value = temp_val;
break;
@@ -477,12 +477,12 @@ acpi_aml_resolve_node_to_value (
/* Create object for result */
- obj_desc = acpi_cm_create_internal_object (ACPI_TYPE_NUMBER);
+ obj_desc = acpi_cm_create_internal_object (ACPI_TYPE_INTEGER);
if (!obj_desc) {
return (AE_NO_MEMORY);
}
- obj_desc->number.value = temp_val;
+ obj_desc->integer.value = temp_val;
/* Truncate value if we are executing from a 32-bit ACPI table */
diff --git a/drivers/acpi/interpreter/amresolv.c b/drivers/acpi/interpreter/amresolv.c
index 2fa59e9bb..c08a4731c 100644
--- a/drivers/acpi/interpreter/amresolv.c
+++ b/drivers/acpi/interpreter/amresolv.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amresolv - AML Interpreter object resolution
- * $Revision: 78 $
+ * $Revision: 81 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -118,19 +118,19 @@ acpi_aml_get_field_unit_value (
mask = ACPI_UINT32_MAX;
}
- result_desc->number.type = (u8) ACPI_TYPE_NUMBER;
+ result_desc->integer.type = (u8) ACPI_TYPE_INTEGER;
/* Get the 32 bit value at the location */
- MOVE_UNALIGNED32_TO_32 (&result_desc->number.value, location);
+ MOVE_UNALIGNED32_TO_32 (&result_desc->integer.value, location);
/*
* Shift the 32-bit word containing the field, and mask off the
* resulting value
*/
- result_desc->number.value =
- (result_desc->number.value >> field_desc->field_unit.bit_offset) & mask;
+ result_desc->integer.value =
+ (result_desc->integer.value >> field_desc->field_unit.bit_offset) & mask;
/* Release global lock if we acquired it earlier */
@@ -150,7 +150,7 @@ acpi_aml_get_field_unit_value (
*
* RETURN: Status
*
- * DESCRIPTION: Convert Reference entries on Obj_stack to Rvalues
+ * DESCRIPTION: Convert Reference objects to values
*
******************************************************************************/
@@ -279,7 +279,7 @@ acpi_aml_resolve_object_to_value (
acpi_cm_remove_reference (stack_desc);
*stack_ptr = obj_desc;
- if (ACPI_TYPE_NUMBER == obj_desc->common.type) {
+ if (ACPI_TYPE_INTEGER == obj_desc->common.type) {
/* Value is a Number */
}
@@ -311,7 +311,7 @@ acpi_aml_resolve_object_to_value (
acpi_cm_remove_reference (stack_desc);
*stack_ptr = obj_desc;
- if (ACPI_TYPE_NUMBER == obj_desc->common.type) {
+ if (ACPI_TYPE_INTEGER == obj_desc->common.type) {
/* Value is a Number */
}
@@ -326,22 +326,22 @@ acpi_aml_resolve_object_to_value (
case AML_ZERO_OP:
- stack_desc->common.type = (u8) ACPI_TYPE_NUMBER;
- stack_desc->number.value = 0;
+ stack_desc->common.type = (u8) ACPI_TYPE_INTEGER;
+ stack_desc->integer.value = 0;
break;
case AML_ONE_OP:
- stack_desc->common.type = (u8) ACPI_TYPE_NUMBER;
- stack_desc->number.value = 1;
+ stack_desc->common.type = (u8) ACPI_TYPE_INTEGER;
+ stack_desc->integer.value = 1;
break;
case AML_ONES_OP:
- stack_desc->common.type = (u8) ACPI_TYPE_NUMBER;
- stack_desc->number.value = ACPI_INTEGER_MAX;
+ stack_desc->common.type = (u8) ACPI_TYPE_INTEGER;
+ stack_desc->integer.value = ACPI_INTEGER_MAX;
/* Truncate value if we are executing from a 32-bit ACPI table */
diff --git a/drivers/acpi/interpreter/amresop.c b/drivers/acpi/interpreter/amresop.c
index 83fda4455..a837fd66b 100644
--- a/drivers/acpi/interpreter/amresop.c
+++ b/drivers/acpi/interpreter/amresop.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amresop - AML Interpreter operand/object resolution
- * $Revision: 18 $
+ * $Revision: 22 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -214,8 +214,13 @@ acpi_aml_resolve_operands (
switch (this_arg_type)
{
- case ARGI_REFERENCE: /* Reference */
- case ARGI_TARGETREF:
+ case ARGI_REFERENCE: /* References */
+ case ARGI_INTEGER_REF:
+ case ARGI_OBJECT_REF:
+ case ARGI_DEVICE_REF:
+ case ARGI_TARGETREF: /* TBD: must implement implicit conversion rules before store */
+ case ARGI_FIXED_TARGET: /* No implicit conversion before store to target */
+ case ARGI_SIMPLE_TARGET: /* Name, Local, or Arg - no implicit conversion */
/* Need an operand of type INTERNAL_TYPE_REFERENCE */
@@ -283,20 +288,6 @@ acpi_aml_resolve_operands (
* For the simple cases, only one type of resolved object
* is allowed
*/
- case ARGI_NUMBER: /* Number */
-
- /* Need an operand of type ACPI_TYPE_NUMBER */
-
- type_needed = ACPI_TYPE_NUMBER;
- break;
-
- case ARGI_BUFFER:
-
- /* Need an operand of type ACPI_TYPE_BUFFER */
-
- type_needed = ACPI_TYPE_BUFFER;
- break;
-
case ARGI_MUTEX:
/* Need an operand of type ACPI_TYPE_MUTEX */
@@ -344,11 +335,69 @@ acpi_aml_resolve_operands (
* The more complex cases allow multiple resolved object types
*/
+ case ARGI_INTEGER: /* Number */
+
+ /*
+ * Need an operand of type ACPI_TYPE_INTEGER,
+ * But we can implicitly convert from a STRING or BUFFER
+ */
+ status = acpi_aml_convert_to_integer (stack_ptr, walk_state);
+ if (ACPI_FAILURE (status)) {
+ if (status == AE_TYPE) {
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ return (status);
+ }
+
+ goto next_operand;
+ break;
+
+
+ case ARGI_BUFFER:
+
+ /*
+ * Need an operand of type ACPI_TYPE_BUFFER,
+ * But we can implicitly convert from a STRING or INTEGER
+ */
+ status = acpi_aml_convert_to_buffer (stack_ptr, walk_state);
+ if (ACPI_FAILURE (status)) {
+ if (status == AE_TYPE) {
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ return (status);
+ }
+
+ goto next_operand;
+ break;
+
+
case ARGI_STRING:
- /* Need an operand of type ACPI_TYPE_STRING or ACPI_TYPE_BUFFER */
+ /*
+ * Need an operand of type ACPI_TYPE_STRING,
+ * But we can implicitly convert from a BUFFER or INTEGER
+ */
+ status = acpi_aml_convert_to_string (stack_ptr, walk_state);
+ if (ACPI_FAILURE (status)) {
+ if (status == AE_TYPE) {
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ return (status);
+ }
+
+ goto next_operand;
+ break;
+
+
+ case ARGI_COMPUTEDATA:
+
+ /* Need an operand of type INTEGER, STRING or BUFFER */
- if ((ACPI_TYPE_STRING != (*stack_ptr)->common.type) &&
+ if ((ACPI_TYPE_INTEGER != (*stack_ptr)->common.type) &&
+ (ACPI_TYPE_STRING != (*stack_ptr)->common.type) &&
(ACPI_TYPE_BUFFER != (*stack_ptr)->common.type))
{
return (AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/interpreter/amstore.c b/drivers/acpi/interpreter/amstore.c
index 8887e0997..9f350bb29 100644
--- a/drivers/acpi/interpreter/amstore.c
+++ b/drivers/acpi/interpreter/amstore.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amstore - AML Interpreter object store support
- * $Revision: 117 $
+ * $Revision: 121 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -64,12 +64,7 @@ acpi_aml_exec_store (
ACPI_WALK_STATE *walk_state)
{
ACPI_STATUS status = AE_OK;
- ACPI_OPERAND_OBJECT *delete_dest_desc = NULL;
- ACPI_OPERAND_OBJECT *tmp_desc;
- ACPI_NAMESPACE_NODE *node = NULL;
- u8 value = 0;
- u32 length;
- u32 i;
+ ACPI_OPERAND_OBJECT *ref_desc = dest_desc;
/* Validate parameters */
@@ -78,27 +73,23 @@ acpi_aml_exec_store (
return (AE_AML_NO_OPERAND);
}
- /* Examine the datatype of the Dest_desc */
+ /* Dest_desc can be either a namespace node or an ACPI object */
if (VALID_DESCRIPTOR_TYPE (dest_desc, ACPI_DESC_TYPE_NAMED)) {
- /* Dest is an ACPI_HANDLE, create a new object */
-
- node = (ACPI_NAMESPACE_NODE *) dest_desc;
- dest_desc = acpi_cm_create_internal_object (INTERNAL_TYPE_REFERENCE);
- if (!dest_desc) {
- /* Allocation failure */
-
- return (AE_NO_MEMORY);
- }
+ /*
+ * Dest is a namespace node,
+ * Storing an object into a Name "container"
+ */
+ status = acpi_aml_store_object_to_node (val_desc,
+ (ACPI_NAMESPACE_NODE *) dest_desc, walk_state);
- /* Build a new Reference wrapper around the handle */
+ /* All done, that's it */
- dest_desc->reference.op_code = AML_NAME_OP;
- dest_desc->reference.object = node;
+ return (status);
}
- /* Destination object must be of type Reference */
+ /* Destination object must be an object of type Reference */
if (dest_desc->common.type != INTERNAL_TYPE_REFERENCE) {
/* Destination is not an Reference */
@@ -106,36 +97,130 @@ acpi_aml_exec_store (
return (AE_AML_OPERAND_TYPE);
}
- /* Examine the Reference opcode */
- switch (dest_desc->reference.op_code)
+ /*
+ * Examine the Reference opcode. These cases are handled:
+ *
+ * 1) Store to Name (Change the object associated with a name)
+ * 2) Store to an indexed area of a Buffer or Package
+ * 3) Store to a Method Local or Arg
+ * 4) Store to the debug object
+ * 5) Store to a constant -- a noop
+ */
+
+ switch (ref_desc->reference.op_code)
{
case AML_NAME_OP:
- /*
- * Storing into a Name
- */
- delete_dest_desc = dest_desc;
- status = acpi_aml_store_object_to_node (val_desc, dest_desc->reference.object,
- walk_state);
+ /* Storing an object into a Name "container" */
- break; /* Case Name_op */
+ status = acpi_aml_store_object_to_node (val_desc, ref_desc->reference.object,
+ walk_state);
+ break;
case AML_INDEX_OP:
- delete_dest_desc = dest_desc;
+ /* Storing to an Index (pointer into a packager or buffer) */
+
+ status = acpi_aml_store_object_to_index (val_desc, ref_desc, walk_state);
+ break;
+
+
+ case AML_LOCAL_OP:
+
+ status = acpi_ds_method_data_set_value (MTH_TYPE_LOCAL,
+ (ref_desc->reference.offset), val_desc, walk_state);
+ break;
+
+
+ case AML_ARG_OP:
+
+ status = acpi_ds_method_data_set_value (MTH_TYPE_ARG,
+ (ref_desc->reference.offset), val_desc, walk_state);
+ break;
+
+
+ case AML_DEBUG_OP:
/*
- * Valid source value and destination reference pointer.
+ * Storing to the Debug object causes the value stored to be
+ * displayed and otherwise has no effect -- see ACPI Specification
*
- * ACPI Specification 1.0B section 15.2.3.4.2.13:
- * Destination should point to either a buffer or a package
+ * TBD: print known object types "prettier".
*/
+ break;
+
+
+ case AML_ZERO_OP:
+ case AML_ONE_OP:
+ case AML_ONES_OP:
+
/*
- * Actually, storing to a package is not so simple. The source must be
+ * Storing to a constant is a no-op -- see ACPI Specification
+ * Delete the reference descriptor, however
+ */
+ break;
+
+
+ default:
+
+ /* TBD: [Restructure] use object dump routine !! */
+
+ status = AE_AML_INTERNAL;
+ break;
+
+ } /* switch (Ref_desc->Reference.Op_code) */
+
+
+ /* Always delete the reference descriptor object */
+
+ if (ref_desc) {
+ acpi_cm_remove_reference (ref_desc);
+ }
+
+ return (status);
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_store_object_to_index
+ *
+ * PARAMETERS: *Val_desc - Value to be stored
+ * *Node - Named object to recieve the value
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Store the object to the named object.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_store_object_to_index (
+ ACPI_OPERAND_OBJECT *val_desc,
+ ACPI_OPERAND_OBJECT *dest_desc,
+ ACPI_WALK_STATE *walk_state)
+{
+ ACPI_STATUS status = AE_OK;
+ ACPI_OPERAND_OBJECT *obj_desc;
+ u32 length;
+ u32 i;
+ u8 value = 0;
+
+
+ /*
+ * Destination must be a reference pointer, and
+ * must point to either a buffer or a package
+ */
+
+ switch (dest_desc->reference.target_type)
+ {
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * Storing to a package element is not simple. The source must be
* evaluated and converted to the type of the destination and then the
* source is copied into the destination - we can't just point to the
* source object.
@@ -145,8 +230,8 @@ acpi_aml_exec_store (
* The object at *(Dest_desc->Reference.Where) is the
* element within the package that is to be modified.
*/
- tmp_desc = *(dest_desc->reference.where);
- if (tmp_desc) {
+ obj_desc = *(dest_desc->reference.where);
+ if (obj_desc) {
/*
* If the Destination element is a package, we will delete
* that object and construct a new one.
@@ -155,41 +240,39 @@ acpi_aml_exec_store (
* to be packages?
* && (Val_desc->Common.Type == ACPI_TYPE_PACKAGE)
*/
- if (tmp_desc->common.type == ACPI_TYPE_PACKAGE) {
+ if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
/*
* Take away the reference for being part of a package and
* delete
*/
- acpi_cm_remove_reference (tmp_desc);
- acpi_cm_remove_reference (tmp_desc);
+ acpi_cm_remove_reference (obj_desc);
+ acpi_cm_remove_reference (obj_desc);
- tmp_desc = NULL;
+ obj_desc = NULL;
}
}
- if (!tmp_desc) {
+ if (!obj_desc) {
/*
- * If the Tmp_desc is NULL, that means an uninitialized package
- * has been used as a destination, therefore, we must create
- * the destination element to match the type of the source
- * element NOTE: Val_desc can be of any type.
+ * If the Obj_desc is NULL, it means that an uninitialized package
+ * element has been used as a destination (this is OK), therefore,
+ * we must create the destination element to match the type of the
+ * source element NOTE: Val_desc can be of any type.
*/
- tmp_desc = acpi_cm_create_internal_object (val_desc->common.type);
- if (!tmp_desc) {
- status = AE_NO_MEMORY;
- goto cleanup;
+ obj_desc = acpi_cm_create_internal_object (val_desc->common.type);
+ if (!obj_desc) {
+ return (AE_NO_MEMORY);
}
/*
* If the source is a package, copy the source to the new dest
*/
- if (ACPI_TYPE_PACKAGE == tmp_desc->common.type) {
+ if (ACPI_TYPE_PACKAGE == obj_desc->common.type) {
status = acpi_aml_build_copy_internal_package_object (
- val_desc, tmp_desc, walk_state);
+ val_desc, obj_desc, walk_state);
if (ACPI_FAILURE (status)) {
- acpi_cm_remove_reference (tmp_desc);
- tmp_desc = NULL;
- goto cleanup;
+ acpi_cm_remove_reference (obj_desc);
+ return (status);
}
}
@@ -199,38 +282,31 @@ acpi_aml_exec_store (
* part of the parent package
*/
- *(dest_desc->reference.where) = tmp_desc;
- acpi_cm_add_reference (tmp_desc);
+ *(dest_desc->reference.where) = obj_desc;
+ acpi_cm_add_reference (obj_desc);
}
- if (ACPI_TYPE_PACKAGE != tmp_desc->common.type) {
+ if (ACPI_TYPE_PACKAGE != obj_desc->common.type) {
/*
* The destination element is not a package, so we need to
* convert the contents of the source (Val_desc) and copy into
- * the destination (Tmp_desc)
+ * the destination (Obj_desc)
*/
- status = acpi_aml_store_object_to_object (val_desc, tmp_desc,
+ status = acpi_aml_store_object_to_object (val_desc, obj_desc,
walk_state);
if (ACPI_FAILURE (status)) {
/*
* An error occurrered when copying the internal object
* so delete the reference.
*/
- status = AE_AML_OPERAND_TYPE;
+ return (AE_AML_OPERAND_TYPE);
}
}
-
- break;
}
+ break;
- /*
- * Check that the destination is a Buffer Field type
- */
- if (dest_desc->reference.target_type != ACPI_TYPE_BUFFER_FIELD) {
- status = AE_AML_OPERAND_TYPE;
- break;
- }
+ case ACPI_TYPE_BUFFER_FIELD:
/*
* Storing into a buffer at a location defined by an Index.
*
@@ -239,13 +315,11 @@ acpi_aml_exec_store (
*/
/*
- * Set the Tmp_desc to the destination object and type check.
+ * Set the Obj_desc to the destination object and type check.
*/
- tmp_desc = dest_desc->reference.object;
-
- if (tmp_desc->common.type != ACPI_TYPE_BUFFER) {
- status = AE_AML_OPERAND_TYPE;
- break;
+ obj_desc = dest_desc->reference.object;
+ if (obj_desc->common.type != ACPI_TYPE_BUFFER) {
+ return (AE_AML_OPERAND_TYPE);
}
/*
@@ -256,15 +330,15 @@ acpi_aml_exec_store (
switch (val_desc->common.type)
{
/*
- * If the type is Integer, the Length is 4.
+ * If the type is Integer, assign bytewise
* This loop to assign each of the elements is somewhat
- * backward because of the Big Endian-ness of IA-64
+ * backward because of the Big Endian-ness of IA-64
*/
- case ACPI_TYPE_NUMBER:
- length = 4;
+ case ACPI_TYPE_INTEGER:
+ length = sizeof (ACPI_INTEGER);
for (i = length; i != 0; i--) {
- value = (u8)(val_desc->number.value >> (MUL_8 (i - 1)));
- tmp_desc->buffer.pointer[dest_desc->reference.offset] = value;
+ value = (u8)(val_desc->integer.value >> (MUL_8 (i - 1)));
+ obj_desc->buffer.pointer[dest_desc->reference.offset] = value;
}
break;
@@ -276,7 +350,7 @@ acpi_aml_exec_store (
length = val_desc->buffer.length;
for (i = 0; i < length; i++) {
value = *(val_desc->buffer.pointer + i);
- tmp_desc->buffer.pointer[dest_desc->reference.offset] = value;
+ obj_desc->buffer.pointer[dest_desc->reference.offset] = value;
}
break;
@@ -288,7 +362,7 @@ acpi_aml_exec_store (
length = val_desc->string.length;
for (i = 0; i < length; i++) {
value = *(val_desc->string.pointer + i);
- tmp_desc->buffer.pointer[dest_desc->reference.offset] = value;
+ obj_desc->buffer.pointer[dest_desc->reference.offset] = value;
}
break;
@@ -299,80 +373,207 @@ acpi_aml_exec_store (
status = AE_AML_OPERAND_TYPE;
break;
}
+ break;
- /*
- * If we had an error, break out of this case statement.
- */
- if (ACPI_FAILURE (status)) {
- break;
- }
-
- /*
- * Set the return pointer
- */
- dest_desc = tmp_desc;
+ default:
+ status = AE_AML_OPERAND_TYPE;
break;
+ }
- case AML_ZERO_OP:
- case AML_ONE_OP:
- case AML_ONES_OP:
- /*
- * Storing to a constant is a no-op -- see ACPI Specification
- * Delete the result descriptor.
- */
+ return (status);
+}
- delete_dest_desc = dest_desc;
- break;
+
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_store_object_to_node
+ *
+ * PARAMETERS: *Source_desc - Value to be stored
+ * *Node - Named object to recieve the value
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Store the object to the named object.
+ *
+ * The Assignment of an object to a named object is handled here
+ * The val passed in will replace the current value (if any)
+ * with the input value.
+ *
+ * When storing into an object the data is converted to the
+ * target object type then stored in the object. This means
+ * that the target object type (for an initialized target) will
+ * not be changed by a store operation.
+ *
+ * NOTE: the global lock is acquired early. This will result
+ * in the global lock being held a bit longer. Also, if the
+ * function fails during set up we may get the lock when we
+ * don't really need it. I don't think we care.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_store_object_to_node (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_NAMESPACE_NODE *node,
+ ACPI_WALK_STATE *walk_state)
+{
+ ACPI_STATUS status = AE_OK;
+ ACPI_OPERAND_OBJECT *target_desc;
+ OBJECT_TYPE_INTERNAL target_type = ACPI_TYPE_ANY;
- case AML_LOCAL_OP:
+ /*
+ * Assuming the parameters were already validated
+ */
+ ACPI_ASSERT((node) && (source_desc));
- status = acpi_ds_method_data_set_value (MTH_TYPE_LOCAL,
- (dest_desc->reference.offset), val_desc, walk_state);
- delete_dest_desc = dest_desc;
- break;
+ /*
+ * Get current type of the node, and object attached to Node
+ */
+ target_type = acpi_ns_get_type (node);
+ target_desc = acpi_ns_get_attached_object (node);
- case AML_ARG_OP:
- status = acpi_ds_method_data_set_value (MTH_TYPE_ARG,
- (dest_desc->reference.offset), val_desc, walk_state);
- delete_dest_desc = dest_desc;
+ /*
+ * Resolve the source object to an actual value
+ * (If it is a reference object)
+ */
+ status = acpi_aml_resolve_object (&source_desc, target_type, walk_state);
+ if (ACPI_FAILURE (status)) {
+ return (status);
+ }
+
+
+ /*
+ * Do the actual store operation
+ */
+ switch (target_type)
+ {
+ case INTERNAL_TYPE_DEF_FIELD:
+
+ /* Raw data copy for target types Integer/String/Buffer */
+
+ status = acpi_aml_copy_data_to_named_field (source_desc, node);
break;
- case AML_DEBUG_OP:
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ case INTERNAL_TYPE_BANK_FIELD:
+ case INTERNAL_TYPE_INDEX_FIELD:
+ case ACPI_TYPE_FIELD_UNIT:
/*
- * Storing to the Debug object causes the value stored to be
- * displayed and otherwise has no effect -- see ACPI Specification
+ * These target types are all of type Integer/String/Buffer, and
+ * therefore support implicit conversion before the store.
+ *
+ * Copy and/or convert the source object to a new target object
*/
+ status = acpi_aml_store_object (source_desc, target_type, &target_desc, walk_state);
+ if (ACPI_FAILURE (status)) {
+ return (status);
+ }
- delete_dest_desc = dest_desc;
+ /*
+ * Store the new Target_desc as the new value of the Name, and set
+ * the Name's type to that of the value being stored in it.
+ * Source_desc reference count is incremented by Attach_object.
+ */
+ status = acpi_ns_attach_object (node, target_desc, target_type);
break;
default:
- /* TBD: [Restructure] use object dump routine !! */
+ /* No conversions for all other types. Just attach the source object */
- delete_dest_desc = dest_desc;
- status = AE_AML_INTERNAL;
+ status = acpi_ns_attach_object (node, source_desc, source_desc->common.type);
- } /* switch(Dest_desc->Reference.Op_code) */
+ break;
+ }
-cleanup:
+ return (status);
+}
- /* Cleanup and exit*/
- if (delete_dest_desc) {
- acpi_cm_remove_reference (delete_dest_desc);
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_store_object_to_object
+ *
+ * PARAMETERS: *Source_desc - Value to be stored
+ * *Dest_desc - Object to receive the value
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Store an object to another object.
+ *
+ * The Assignment of an object to another (not named) object
+ * is handled here.
+ * The val passed in will replace the current value (if any)
+ * with the input value.
+ *
+ * When storing into an object the data is converted to the
+ * target object type then stored in the object. This means
+ * that the target object type (for an initialized target) will
+ * not be changed by a store operation.
+ *
+ * This module allows destination types of Number, String,
+ * and Buffer.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_store_object_to_object (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *dest_desc,
+ ACPI_WALK_STATE *walk_state)
+{
+ ACPI_STATUS status = AE_OK;
+ OBJECT_TYPE_INTERNAL destination_type = dest_desc->common.type;
+
+
+ /*
+ * Assuming the parameters are valid!
+ */
+ ACPI_ASSERT((dest_desc) && (source_desc));
+
+
+ /*
+ * From this interface, we only support Integers/Strings/Buffers
+ */
+ switch (destination_type)
+ {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ break;
+
+ default:
+ return (AE_NOT_IMPLEMENTED);
}
+
+ /*
+ * Resolve the source object to an actual value
+ * (If it is a reference object)
+ */
+ status = acpi_aml_resolve_object (&source_desc, destination_type, walk_state);
+ if (ACPI_FAILURE (status)) {
+ return (status);
+ }
+
+
+ /*
+ * Copy and/or convert the source object to the destination object
+ */
+ status = acpi_aml_store_object (source_desc, destination_type, &dest_desc, walk_state);
+
+
return (status);
}
-
diff --git a/drivers/acpi/interpreter/amstoren.c b/drivers/acpi/interpreter/amstoren.c
index 91b2095c5..eb62ed2f1 100644
--- a/drivers/acpi/interpreter/amstoren.c
+++ b/drivers/acpi/interpreter/amstoren.c
@@ -2,13 +2,13 @@
/******************************************************************************
*
* Module Name: amstoren - AML Interpreter object store support,
- * Store to Node (namespace object)
- * $Revision: 24 $
+ * Store to Node (namespace object)
+ * $Revision: 28 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,127 +41,72 @@
/*******************************************************************************
*
- * FUNCTION: Acpi_aml_store_object_to_node
+ * FUNCTION: Acpi_aml_resolve_object
*
- * PARAMETERS: *Val_desc - Value to be stored
- * *Node - Named object to recieve the value
+ * PARAMETERS: Source_desc_ptr - Pointer to the source object
+ * Target_type - Current type of the target
+ * Walk_state - Current walk state
*
- * RETURN: Status
- *
- * DESCRIPTION: Store the object to the named object.
- *
- * The Assignment of an object to a named object is handled here
- * The val passed in will replace the current value (if any)
- * with the input value.
+ * RETURN: Status, resolved object in Source_desc_ptr.
*
- * When storing into an object the data is converted to the
- * target object type then stored in the object. This means
- * that the target object type (for an initialized target) will
- * not be changed by a store operation.
- *
- * NOTE: the global lock is acquired early. This will result
- * in the global lock being held a bit longer. Also, if the
- * function fails during set up we may get the lock when we
- * don't really need it. I don't think we care.
+ * DESCRIPTION: Resolve an object. If the object is a reference, dereference
+ * it and return the actual object in the Source_desc_ptr.
*
******************************************************************************/
ACPI_STATUS
-acpi_aml_store_object_to_node (
- ACPI_OPERAND_OBJECT *val_desc,
- ACPI_NAMESPACE_NODE *node,
+acpi_aml_resolve_object (
+ ACPI_OPERAND_OBJECT **source_desc_ptr,
+ OBJECT_TYPE_INTERNAL target_type,
ACPI_WALK_STATE *walk_state)
{
+ ACPI_OPERAND_OBJECT *source_desc = *source_desc_ptr;
ACPI_STATUS status = AE_OK;
- u8 *buffer = NULL;
- u32 length = 0;
- u32 mask;
- u32 new_value;
- u8 locked = FALSE;
- u8 *location=NULL;
- ACPI_OPERAND_OBJECT *dest_desc;
- OBJECT_TYPE_INTERNAL destination_type = ACPI_TYPE_ANY;
/*
- * Assuming the parameters are valid!!!
+ * Ensure we have a Source that can be stored in the target
*/
- ACPI_ASSERT((node) && (val_desc));
+ switch (target_type)
+ {
- destination_type = acpi_ns_get_type (node);
+ /* This case handles the "interchangeable" types Integer, String, and Buffer. */
/*
- * First ensure we have a value that can be stored in the target
+ * These cases all require only Integers or values that
+ * can be converted to Integers (Strings or Buffers)
*/
- switch (destination_type)
- {
- /* Type of Name's existing value */
-
- case INTERNAL_TYPE_ALIAS:
-
- /*
- * Aliases are resolved by Acpi_aml_prep_operands
- */
-
- status = AE_AML_INTERNAL;
- break;
-
-
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_FIELD_UNIT:
case INTERNAL_TYPE_BANK_FIELD:
case INTERNAL_TYPE_INDEX_FIELD:
- case ACPI_TYPE_FIELD_UNIT:
- case ACPI_TYPE_NUMBER:
-
- /*
- * These cases all require only number values or values that
- * can be converted to numbers.
- *
- * If value is not a Number, try to resolve it to one.
- */
-
- if (val_desc->common.type != ACPI_TYPE_NUMBER) {
- /*
- * Initially not a number, convert
- */
- status = acpi_aml_resolve_to_value (&val_desc, walk_state);
- if (ACPI_SUCCESS (status) &&
- (val_desc->common.type != ACPI_TYPE_NUMBER))
- {
- /*
- * Conversion successful but still not a number
- */
- status = AE_AML_OPERAND_TYPE;
- }
- }
-
- break;
+ /*
+ * Stores into a Field/Region or into a Buffer/String
+ * are all essentially the same.
+ */
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
case INTERNAL_TYPE_DEF_FIELD:
/*
- * Storing into a Field in a region or into a buffer or into
- * a string all is essentially the same.
- *
- * If value is not a valid type, try to resolve it to one.
+ * If Source_desc is not a valid type, try to resolve it to one.
*/
-
- if ((val_desc->common.type != ACPI_TYPE_NUMBER) &&
- (val_desc->common.type != ACPI_TYPE_BUFFER) &&
- (val_desc->common.type != ACPI_TYPE_STRING))
+ if ((source_desc->common.type != ACPI_TYPE_INTEGER) &&
+ (source_desc->common.type != ACPI_TYPE_BUFFER) &&
+ (source_desc->common.type != ACPI_TYPE_STRING))
{
/*
- * Initially not a valid type, convert
+ * Initially not a valid type, convert
*/
- status = acpi_aml_resolve_to_value (&val_desc, walk_state);
+ status = acpi_aml_resolve_to_value (source_desc_ptr, walk_state);
if (ACPI_SUCCESS (status) &&
- (val_desc->common.type != ACPI_TYPE_NUMBER) &&
- (val_desc->common.type != ACPI_TYPE_BUFFER) &&
- (val_desc->common.type != ACPI_TYPE_STRING))
+ (source_desc->common.type != ACPI_TYPE_INTEGER) &&
+ (source_desc->common.type != ACPI_TYPE_BUFFER) &&
+ (source_desc->common.type != ACPI_TYPE_STRING))
{
/*
- * Conversion successful but still not a valid type
+ * Conversion successful but still not a valid type
*/
status = AE_AML_OPERAND_TYPE;
}
@@ -169,347 +114,133 @@ acpi_aml_store_object_to_node (
break;
- case ACPI_TYPE_PACKAGE:
+ case INTERNAL_TYPE_ALIAS:
/*
- * TBD: [Unhandled] Not real sure what to do here
+ * Aliases are resolved by Acpi_aml_prep_operands
*/
- status = AE_NOT_IMPLEMENTED;
+ status = AE_AML_INTERNAL;
break;
+ case ACPI_TYPE_PACKAGE:
default:
/*
- * All other types than Alias and the various Fields come here.
- * Store Val_desc as the new value of the Name, and set
- * the Name's type to that of the value being stored in it.
- * Val_desc reference count is incremented by Attach_object.
+ * All other types than Alias and the various Fields come here,
+ * including the untyped case - ACPI_TYPE_ANY.
*/
-
- status = acpi_ns_attach_object (node, val_desc, val_desc->common.type);
-
- goto clean_up_and_bail_out;
break;
}
- /* Exit now if failure above */
+ return (status);
+}
+
- if (ACPI_FAILURE (status)) {
- goto clean_up_and_bail_out;
- }
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_store_object
+ *
+ * PARAMETERS: Source_desc - Object to store
+ * Target_type - Current type of the target
+ * Target_desc_ptr - Pointer to the target
+ * Walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: "Store" an object to another object. This may include
+ * converting the source type to the target type (implicit
+ * conversion), and a copy of the value of the source to
+ * the target.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_store_object (
+ ACPI_OPERAND_OBJECT *source_desc,
+ OBJECT_TYPE_INTERNAL target_type,
+ ACPI_OPERAND_OBJECT **target_desc_ptr,
+ ACPI_WALK_STATE *walk_state)
+{
+ ACPI_OPERAND_OBJECT *target_desc = *target_desc_ptr;
+ ACPI_STATUS status;
- /*
- * Get descriptor for object attached to Node
- */
- dest_desc = acpi_ns_get_attached_object (node);
- if (!dest_desc) {
- /*
- * There is no existing object attached to this Node
- */
- status = AE_AML_INTERNAL;
- goto clean_up_and_bail_out;
- }
/*
- * Make sure the destination Object is the same as the Node
+ * Perform the "implicit conversion" of the source to the current type
+ * of the target - As per the ACPI specification.
+ *
+ * If no conversion performed, Source_desc is left alone, otherwise it
+ * is updated with a new object.
*/
- if (dest_desc->common.type != (u8) destination_type) {
- status = AE_AML_INTERNAL;
- goto clean_up_and_bail_out;
+ status = acpi_aml_convert_to_target_type (target_type, &source_desc, walk_state);
+ if (ACPI_FAILURE (status)) {
+ return (status);
}
+
/*
- * Acpi_everything is ready to execute now, We have
- * a value we can handle, just perform the update
+ * We now have two objects of identical types, and we can perform a
+ * copy of the *value* of the source object.
*/
-
- switch (destination_type)
+ switch (target_type)
{
- /* Type of Name's existing value */
-
- case INTERNAL_TYPE_BANK_FIELD:
+ case ACPI_TYPE_ANY:
+ case INTERNAL_TYPE_DEF_ANY:
/*
- * Get the global lock if needed
+ * The target namespace node is uninitialized (has no target object),
+ * and will take on the type of the source object
*/
- locked = acpi_aml_acquire_global_lock (dest_desc->bank_field.lock_rule);
-
- /*
- * Set Bank value to select proper Bank
- * Perform the update (Set Bank Select)
- */
-
- status = acpi_aml_access_named_field (ACPI_WRITE,
- dest_desc->bank_field.bank_select,
- &dest_desc->bank_field.value,
- sizeof (dest_desc->bank_field.value));
- if (ACPI_SUCCESS (status)) {
- /* Set bank select successful, set data value */
-
- status = acpi_aml_access_named_field (ACPI_WRITE,
- dest_desc->bank_field.bank_select,
- &val_desc->bank_field.value,
- sizeof (val_desc->bank_field.value));
- }
+ *target_desc_ptr = source_desc;
break;
- case INTERNAL_TYPE_DEF_FIELD:
-
- /*
- * Get the global lock if needed
- */
- locked = acpi_aml_acquire_global_lock (val_desc->field.lock_rule);
-
- /*
- * Perform the update
- */
+ case ACPI_TYPE_INTEGER:
- switch (val_desc->common.type)
- {
- case ACPI_TYPE_NUMBER:
- buffer = (u8 *) &val_desc->number.value;
- length = sizeof (val_desc->number.value);
- break;
-
- case ACPI_TYPE_BUFFER:
- buffer = (u8 *) val_desc->buffer.pointer;
- length = val_desc->buffer.length;
- break;
-
- case ACPI_TYPE_STRING:
- buffer = (u8 *) val_desc->string.pointer;
- length = val_desc->string.length;
- break;
- }
-
- status = acpi_aml_access_named_field (ACPI_WRITE,
- node, buffer, length);
-
- break; /* Global Lock released below */
+ target_desc->integer.value = source_desc->integer.value;
+ /* Truncate value if we are executing from a 32-bit ACPI table */
- case ACPI_TYPE_STRING:
-
- /*
- * Perform the update
- */
-
- switch (val_desc->common.type)
- {
- case ACPI_TYPE_NUMBER:
- buffer = (u8 *) &val_desc->number.value;
- length = sizeof (val_desc->number.value);
- break;
-
- case ACPI_TYPE_BUFFER:
- buffer = (u8 *) val_desc->buffer.pointer;
- length = val_desc->buffer.length;
- break;
-
- case ACPI_TYPE_STRING:
- buffer = (u8 *) val_desc->string.pointer;
- length = val_desc->string.length;
- break;
- }
-
- /*
- * Setting a string value replaces the old string
- */
-
- if (length < dest_desc->string.length) {
- /*
- * Zero fill, not willing to do pointer arithmetic for
- * archetecture independance. Just clear the whole thing
- */
- MEMSET(dest_desc->string.pointer, 0, dest_desc->string.length);
- MEMCPY(dest_desc->string.pointer, buffer, length);
- }
- else {
- /*
- * Free the current buffer, then allocate a buffer
- * large enough to hold the value
- */
- if ( dest_desc->string.pointer &&
- !acpi_tb_system_table_pointer (dest_desc->string.pointer))
- {
- /*
- * Only free if not a pointer into the DSDT
- */
-
- acpi_cm_free(dest_desc->string.pointer);
- }
+ acpi_aml_truncate_for32bit_table (target_desc, walk_state);
+ break;
- dest_desc->string.pointer = acpi_cm_allocate (length + 1);
- dest_desc->string.length = length;
- if (!dest_desc->string.pointer) {
- status = AE_NO_MEMORY;
- goto clean_up_and_bail_out;
- }
+ case ACPI_TYPE_FIELD_UNIT:
- MEMCPY(dest_desc->string.pointer, buffer, length);
- }
+ status = acpi_aml_copy_integer_to_field_unit (source_desc, target_desc);
break;
- case ACPI_TYPE_BUFFER:
-
- /*
- * Perform the update to the buffer
- */
-
- switch (val_desc->common.type)
- {
- case ACPI_TYPE_NUMBER:
- buffer = (u8 *) &val_desc->number.value;
- length = sizeof (val_desc->number.value);
- break;
-
- case ACPI_TYPE_BUFFER:
- buffer = (u8 *) val_desc->buffer.pointer;
- length = val_desc->buffer.length;
- break;
-
- case ACPI_TYPE_STRING:
- buffer = (u8 *) val_desc->string.pointer;
- length = val_desc->string.length;
- break;
- }
+ case INTERNAL_TYPE_BANK_FIELD:
- /*
- * Buffer is a static allocation,
- * only place what will fit in the buffer.
- */
- if (length <= dest_desc->buffer.length) {
- /*
- * Zero fill first, not willing to do pointer arithmetic for
- * archetecture independence. Just clear the whole thing
- */
- MEMSET(dest_desc->buffer.pointer, 0, dest_desc->buffer.length);
- MEMCPY(dest_desc->buffer.pointer, buffer, length);
- }
- else {
- /*
- * truncate, copy only what will fit
- */
- MEMCPY(dest_desc->buffer.pointer, buffer, dest_desc->buffer.length);
- }
+ status = acpi_aml_copy_integer_to_bank_field (source_desc, target_desc);
break;
case INTERNAL_TYPE_INDEX_FIELD:
- /*
- * Get the global lock if needed
- */
- locked = acpi_aml_acquire_global_lock (dest_desc->index_field.lock_rule);
-
- /*
- * Set Index value to select proper Data register
- * perform the update (Set index)
- */
-
- status = acpi_aml_access_named_field (ACPI_WRITE,
- dest_desc->index_field.index,
- &dest_desc->index_field.value,
- sizeof (dest_desc->index_field.value));
-
- if (ACPI_SUCCESS (status)) {
- /* set index successful, next set Data value */
-
- status = acpi_aml_access_named_field (ACPI_WRITE,
- dest_desc->index_field.data,
- &val_desc->number.value,
- sizeof (val_desc->number.value));
- }
+ status = acpi_aml_copy_integer_to_index_field (source_desc, target_desc);
break;
- case ACPI_TYPE_FIELD_UNIT:
-
-
- /*
- * If the Field Buffer and Index have not been previously evaluated,
- * evaluate them and save the results.
- */
- if (!(dest_desc->common.flags & AOPOBJ_DATA_VALID)) {
- status = acpi_ds_get_field_unit_arguments (dest_desc);
- if (ACPI_FAILURE (status)) {
- return (status);
- }
- }
-
- if ((!dest_desc->field_unit.container ||
- ACPI_TYPE_BUFFER != dest_desc->field_unit.container->common.type))
- {
- status = AE_AML_INTERNAL;
- goto clean_up_and_bail_out;
- }
-
- /*
- * Get the global lock if needed
- */
- locked = acpi_aml_acquire_global_lock (dest_desc->field_unit.lock_rule);
-
- /*
- * TBD: [Unhandled] REMOVE this limitation
- * Make sure the operation is within the limits of our implementation
- * this is not a Spec limitation!!
- */
- if (dest_desc->field_unit.length + dest_desc->field_unit.bit_offset > 32) {
- status = AE_NOT_IMPLEMENTED;
- goto clean_up_and_bail_out;
- }
-
- /* Field location is (base of buffer) + (byte offset) */
-
- location = dest_desc->field_unit.container->buffer.pointer
- + dest_desc->field_unit.offset;
-
- /*
- * Construct Mask with 1 bits where the field is,
- * 0 bits elsewhere
- */
- mask = ((u32) 1 << dest_desc->field_unit.length) - ((u32)1
- << dest_desc->field_unit.bit_offset);
-
- /* Zero out the field in the buffer */
-
- MOVE_UNALIGNED32_TO_32 (&new_value, location);
- new_value &= ~mask;
-
- /*
- * Shift and mask the new value into position,
- * and or it into the buffer.
- */
- new_value |= (val_desc->number.value << dest_desc->field_unit.bit_offset) &
- mask;
-
- /* Store back the value */
-
- MOVE_UNALIGNED32_TO_32 (location, &new_value);
+ case ACPI_TYPE_STRING:
+ status = acpi_aml_copy_string_to_string (source_desc, target_desc);
break;
- case ACPI_TYPE_NUMBER:
-
-
- dest_desc->number.value = val_desc->number.value;
-
- /* Truncate value if we are executing from a 32-bit ACPI table */
+ case ACPI_TYPE_BUFFER:
- acpi_aml_truncate_for32bit_table (dest_desc, walk_state);
+ status = acpi_aml_copy_buffer_to_buffer (source_desc, target_desc);
break;
case ACPI_TYPE_PACKAGE:
/*
- * TBD: [Unhandled] Not real sure what to do here
+ * TBD: [Unhandled] Not real sure what to do here
*/
status = AE_NOT_IMPLEMENTED;
break;
@@ -518,24 +249,13 @@ acpi_aml_store_object_to_node (
default:
/*
- * All other types than Alias and the various Fields come here.
- * Store Val_desc as the new value of the Name, and set
- * the Name's type to that of the value being stored in it.
- * Val_desc reference count is incremented by Attach_object.
+ * All other types come here.
*/
-
status = AE_NOT_IMPLEMENTED;
break;
}
-clean_up_and_bail_out:
-
- /*
- * Release global lock if we acquired it earlier
- */
- acpi_aml_release_global_lock (locked);
-
return (status);
}
diff --git a/drivers/acpi/interpreter/amstorob.c b/drivers/acpi/interpreter/amstorob.c
index f3a098bd2..8118f08bd 100644
--- a/drivers/acpi/interpreter/amstorob.c
+++ b/drivers/acpi/interpreter/amstorob.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amstorob - AML Interpreter object store support, store to object
- * $Revision: 18 $
+ * $Revision: 22 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -40,281 +40,383 @@
/*******************************************************************************
*
- * FUNCTION: Acpi_aml_store_object_to_object
+ * FUNCTION: Acpi_aml_copy_buffer_to_buffer
*
- * PARAMETERS: *Val_desc - Value to be stored
- * *Dest_desc - Object to receive the value
+ * PARAMETERS: Source_desc - Source object to copy
+ * Target_desc - Destination object of the copy
*
* RETURN: Status
*
- * DESCRIPTION: Store an object to another object.
+ * DESCRIPTION: Copy a buffer object to another buffer object.
*
- * The Assignment of an object to another (not named) object
- * is handled here.
- * The val passed in will replace the current value (if any)
- * with the input value.
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_copy_buffer_to_buffer (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc)
+{
+ u32 length;
+ u8 *buffer;
+
+ /*
+ * We know that Source_desc is a buffer by now
+ */
+ buffer = (u8 *) source_desc->buffer.pointer;
+ length = source_desc->buffer.length;
+
+ /*
+ * Buffer is a static allocation,
+ * only place what will fit in the buffer.
+ */
+ if (length <= target_desc->buffer.length) {
+ /* Clear existing buffer and copy in the new one */
+
+ MEMSET(target_desc->buffer.pointer, 0, target_desc->buffer.length);
+ MEMCPY(target_desc->buffer.pointer, buffer, length);
+ }
+
+ else {
+ /*
+ * Truncate the source, copy only what will fit
+ */
+ MEMCPY(target_desc->buffer.pointer, buffer, target_desc->buffer.length);
+
+ }
+
+ return (AE_OK);
+}
+
+
+/*******************************************************************************
*
- * When storing into an object the data is converted to the
- * target object type then stored in the object. This means
- * that the target object type (for an initialized target) will
- * not be changed by a store operation.
+ * FUNCTION: Acpi_aml_copy_string_to_string
+ *
+ * PARAMETERS: Source_desc - Source object to copy
+ * Target_desc - Destination object of the copy
+ *
+ * RETURN: Status
*
- * This module allows destination types of Number, String,
- * and Buffer.
+ * DESCRIPTION: Copy a String object to another String object
*
******************************************************************************/
ACPI_STATUS
-acpi_aml_store_object_to_object (
- ACPI_OPERAND_OBJECT *val_desc,
- ACPI_OPERAND_OBJECT *dest_desc,
- ACPI_WALK_STATE *walk_state)
+acpi_aml_copy_string_to_string (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc)
{
- ACPI_STATUS status = AE_OK;
- u8 *buffer = NULL;
- u32 length = 0;
- OBJECT_TYPE_INTERNAL destination_type = dest_desc->common.type;
+ u32 length;
+ u8 *buffer;
/*
- * Assuming the parameters are valid!!!
+ * We know that Source_desc is a string by now.
*/
- ACPI_ASSERT((dest_desc) && (val_desc));
+ buffer = (u8 *) source_desc->string.pointer;
+ length = source_desc->string.length;
/*
- * First ensure we have a value that can be stored in the target
+ * Setting a string value replaces the old string
*/
- switch (destination_type)
- {
- /* Type of Name's existing value */
+ if (length < target_desc->string.length) {
+ /* Clear old string and copy in the new one */
- case ACPI_TYPE_NUMBER:
+ MEMSET(target_desc->string.pointer, 0, target_desc->string.length);
+ MEMCPY(target_desc->string.pointer, buffer, length);
+ }
+ else {
/*
- * These cases all require only number values or values that
- * can be converted to numbers.
- *
- * If value is not a Number, try to resolve it to one.
+ * Free the current buffer, then allocate a buffer
+ * large enough to hold the value
*/
-
- if (val_desc->common.type != ACPI_TYPE_NUMBER) {
+ if (target_desc->string.pointer &&
+ !acpi_tb_system_table_pointer (target_desc->string.pointer))
+ {
/*
- * Initially not a number, convert
+ * Only free if not a pointer into the DSDT
*/
- status = acpi_aml_resolve_to_value (&val_desc, walk_state);
- if (ACPI_SUCCESS (status) &&
- (val_desc->common.type != ACPI_TYPE_NUMBER))
- {
- /*
- * Conversion successful but still not a number
- */
- status = AE_AML_OPERAND_TYPE;
- }
+ acpi_cm_free(target_desc->string.pointer);
}
- break;
+ target_desc->string.pointer = acpi_cm_allocate (length + 1);
+ target_desc->string.length = length;
- case ACPI_TYPE_STRING:
- case ACPI_TYPE_BUFFER:
+ if (!target_desc->string.pointer) {
+ return (AE_NO_MEMORY);
+ }
- /*
- * Storing into a Field in a region or into a buffer or into
- * a string all is essentially the same.
- *
- * If value is not a valid type, try to resolve it to one.
- */
+ MEMCPY(target_desc->string.pointer, buffer, length);
+ }
- if ((val_desc->common.type != ACPI_TYPE_NUMBER) &&
- (val_desc->common.type != ACPI_TYPE_BUFFER) &&
- (val_desc->common.type != ACPI_TYPE_STRING))
- {
- /*
- * Initially not a valid type, convert
- */
- status = acpi_aml_resolve_to_value (&val_desc, walk_state);
- if (ACPI_SUCCESS (status) &&
- (val_desc->common.type != ACPI_TYPE_NUMBER) &&
- (val_desc->common.type != ACPI_TYPE_BUFFER) &&
- (val_desc->common.type != ACPI_TYPE_STRING))
- {
- /*
- * Conversion successful but still not a valid type
- */
- status = AE_AML_OPERAND_TYPE;
- }
- }
- break;
+ return (AE_OK);
+}
- default:
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_copy_integer_to_index_field
+ *
+ * PARAMETERS: Source_desc - Source object to copy
+ * Target_desc - Destination object of the copy
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write an Integer to an Index Field
+ *
+ ******************************************************************************/
- /*
- * TBD: [Unhandled] What other combinations must be implemented?
- */
- status = AE_NOT_IMPLEMENTED;
- break;
- }
+ACPI_STATUS
+acpi_aml_copy_integer_to_index_field (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc)
+{
+ ACPI_STATUS status;
+ u8 locked;
+
+
+ /*
+ * Get the global lock if needed
+ */
+ locked = acpi_aml_acquire_global_lock (target_desc->index_field.lock_rule);
- /* Exit now if failure above */
+ /*
+ * Set Index value to select proper Data register
+ * perform the update (Set index)
+ */
+ status = acpi_aml_access_named_field (ACPI_WRITE,
+ target_desc->index_field.index,
+ &target_desc->index_field.value,
+ sizeof (target_desc->index_field.value));
+ if (ACPI_SUCCESS (status)) {
+ /* Set_index was successful, next set Data value */
+
+ status = acpi_aml_access_named_field (ACPI_WRITE,
+ target_desc->index_field.data,
+ &source_desc->integer.value,
+ sizeof (source_desc->integer.value));
- if (ACPI_FAILURE (status)) {
- goto clean_up_and_bail_out;
}
+
+
/*
- * Acpi_everything is ready to execute now, We have
- * a value we can handle, just perform the update
+ * Release global lock if we acquired it earlier
*/
+ acpi_aml_release_global_lock (locked);
- switch (destination_type)
- {
+ return (status);
+}
- case ACPI_TYPE_STRING:
- /*
- * Perform the update
- */
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_copy_integer_to_bank_field
+ *
+ * PARAMETERS: Source_desc - Source object to copy
+ * Target_desc - Destination object of the copy
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write an Integer to a Bank Field
+ *
+ ******************************************************************************/
- switch (val_desc->common.type)
- {
- case ACPI_TYPE_NUMBER:
- buffer = (u8 *) &val_desc->number.value;
- length = sizeof (val_desc->number.value);
- break;
-
- case ACPI_TYPE_BUFFER:
- buffer = (u8 *) val_desc->buffer.pointer;
- length = val_desc->buffer.length;
- break;
-
- case ACPI_TYPE_STRING:
- buffer = (u8 *) val_desc->string.pointer;
- length = val_desc->string.length;
- break;
- }
+ACPI_STATUS
+acpi_aml_copy_integer_to_bank_field (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc)
+{
+ ACPI_STATUS status;
+ u8 locked;
- /*
- * Setting a string value replaces the old string
- */
- if (length < dest_desc->string.length) {
- /*
- * Zero fill, not willing to do pointer arithmetic for
- * architecture independence. Just clear the whole thing
- */
- MEMSET(dest_desc->string.pointer, 0, dest_desc->string.length);
- MEMCPY(dest_desc->string.pointer, buffer, length);
- }
- else {
- /*
- * Free the current buffer, then allocate a buffer
- * large enough to hold the value
- */
- if ( dest_desc->string.pointer &&
- !acpi_tb_system_table_pointer (dest_desc->string.pointer))
- {
- /*
- * Only free if not a pointer into the DSDT
- */
+ /*
+ * Get the global lock if needed
+ */
+ locked = acpi_aml_acquire_global_lock (target_desc->index_field.lock_rule);
- acpi_cm_free(dest_desc->string.pointer);
- }
- dest_desc->string.pointer = acpi_cm_allocate (length + 1);
- dest_desc->string.length = length;
+ /*
+ * Set Bank value to select proper Bank
+ * Perform the update (Set Bank Select)
+ */
- if (!dest_desc->string.pointer) {
- status = AE_NO_MEMORY;
- goto clean_up_and_bail_out;
- }
+ status = acpi_aml_access_named_field (ACPI_WRITE,
+ target_desc->bank_field.bank_select,
+ &target_desc->bank_field.value,
+ sizeof (target_desc->bank_field.value));
+ if (ACPI_SUCCESS (status)) {
+ /* Set bank select successful, set data value */
+
+ status = acpi_aml_access_named_field (ACPI_WRITE,
+ target_desc->bank_field.bank_select,
+ &source_desc->bank_field.value,
+ sizeof (source_desc->bank_field.value));
+ }
- MEMCPY(dest_desc->string.pointer, buffer, length);
- }
- break;
- case ACPI_TYPE_BUFFER:
+ /*
+ * Release global lock if we acquired it earlier
+ */
+ acpi_aml_release_global_lock (locked);
- /*
- * Perform the update to the buffer
- */
+ return (status);
+}
- switch (val_desc->common.type)
- {
- case ACPI_TYPE_NUMBER:
- buffer = (u8 *) &val_desc->number.value;
- length = sizeof (val_desc->number.value);
- break;
-
- case ACPI_TYPE_BUFFER:
- buffer = (u8 *) val_desc->buffer.pointer;
- length = val_desc->buffer.length;
- break;
-
- case ACPI_TYPE_STRING:
- buffer = (u8 *) val_desc->string.pointer;
- length = val_desc->string.length;
- break;
- }
- /*
- * If the buffer is uninitialized,
- * memory needs to be allocated for the copy.
- */
- if(0 == dest_desc->buffer.length) {
- dest_desc->buffer.pointer = acpi_cm_callocate(length);
- dest_desc->buffer.length = length;
-
- if (!dest_desc->buffer.pointer) {
- status = AE_NO_MEMORY;
- goto clean_up_and_bail_out;
- }
- }
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_copy_data_to_named_field
+ *
+ * PARAMETERS: Source_desc - Source object to copy
+ * Node - Destination Namespace node
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Copy raw data to a Named Field. No implicit conversion
+ * is performed on the source object
+ *
+ ******************************************************************************/
- /*
- * Buffer is a static allocation,
- * only place what will fit in the buffer.
- */
- if (length <= dest_desc->buffer.length) {
- /*
- * Zero fill first, not willing to do pointer arithmetic for
- * architecture independence. Just clear the whole thing
- */
- MEMSET(dest_desc->buffer.pointer, 0, dest_desc->buffer.length);
- MEMCPY(dest_desc->buffer.pointer, buffer, length);
- }
- else {
- /*
- * truncate, copy only what will fit
- */
- MEMCPY(dest_desc->buffer.pointer, buffer, dest_desc->buffer.length);
- }
- break;
+ACPI_STATUS
+acpi_aml_copy_data_to_named_field (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_NAMESPACE_NODE *node)
+{
+ ACPI_STATUS status;
+ u8 locked;
+ u32 length;
+ u8 *buffer;
- case ACPI_TYPE_NUMBER:
- dest_desc->number.value = val_desc->number.value;
+ /*
+ * Named fields (Create_xxx_field) - We don't perform any conversions on the
+ * source operand, just use the raw data
+ */
+ switch (source_desc->common.type)
+ {
+ case ACPI_TYPE_INTEGER:
+ buffer = (u8 *) &source_desc->integer.value;
+ length = sizeof (source_desc->integer.value);
+ break;
- /* Truncate value if we are executing from a 32-bit ACPI table */
+ case ACPI_TYPE_BUFFER:
+ buffer = (u8 *) source_desc->buffer.pointer;
+ length = source_desc->buffer.length;
+ break;
- acpi_aml_truncate_for32bit_table (dest_desc, walk_state);
+ case ACPI_TYPE_STRING:
+ buffer = (u8 *) source_desc->string.pointer;
+ length = source_desc->string.length;
break;
default:
+ return (AE_TYPE);
+ }
- /*
- * All other types than Alias and the various Fields come here.
- * Store Val_desc as the new value of the Name, and set
- * the Name's type to that of the value being stored in it.
- * Val_desc reference count is incremented by Attach_object.
- */
+ /*
+ * Get the global lock if needed before the update
+ * TBD: not needed!
+ */
+ locked = acpi_aml_acquire_global_lock (source_desc->field.lock_rule);
- status = AE_NOT_IMPLEMENTED;
- break;
- }
+ status = acpi_aml_access_named_field (ACPI_WRITE,
+ node, buffer, length);
-clean_up_and_bail_out:
+ acpi_aml_release_global_lock (locked);
return (status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: Acpi_aml_copy_integer_to_field_unit
+ *
+ * PARAMETERS: Source_desc - Source object to copy
+ * Target_desc - Destination object of the copy
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write an Integer to a Field Unit.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+acpi_aml_copy_integer_to_field_unit (
+ ACPI_OPERAND_OBJECT *source_desc,
+ ACPI_OPERAND_OBJECT *target_desc)
+{
+ ACPI_STATUS status = AE_OK;
+ u8 *location = NULL;
+ u32 mask;
+ u32 new_value;
+ u8 locked = FALSE;
+
+
+ /*
+ * If the Field Buffer and Index have not been previously evaluated,
+ * evaluate them and save the results.
+ */
+ if (!(target_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status = acpi_ds_get_field_unit_arguments (target_desc);
+ if (ACPI_FAILURE (status)) {
+ return (status);
+ }
+ }
+
+ if ((!target_desc->field_unit.container ||
+ ACPI_TYPE_BUFFER != target_desc->field_unit.container->common.type))
+ {
+ return (AE_AML_INTERNAL);
+ }
+
+ /*
+ * Get the global lock if needed
+ */
+ locked = acpi_aml_acquire_global_lock (target_desc->field_unit.lock_rule);
+
+ /*
+ * TBD: [Unhandled] REMOVE this limitation
+ * Make sure the operation is within the limits of our implementation
+ * this is not a Spec limitation!!
+ */
+ if (target_desc->field_unit.length + target_desc->field_unit.bit_offset > 32) {
+ return (AE_NOT_IMPLEMENTED);
+ }
+
+ /* Field location is (base of buffer) + (byte offset) */
+
+ location = target_desc->field_unit.container->buffer.pointer
+ + target_desc->field_unit.offset;
+
+ /*
+ * Construct Mask with 1 bits where the field is,
+ * 0 bits elsewhere
+ */
+ mask = ((u32) 1 << target_desc->field_unit.length) - ((u32)1
+ << target_desc->field_unit.bit_offset);
+
+ /* Zero out the field in the buffer */
+
+ MOVE_UNALIGNED32_TO_32 (&new_value, location);
+ new_value &= ~mask;
+
+ /*
+ * Shift and mask the new value into position,
+ * and or it into the buffer.
+ */
+ new_value |= (source_desc->integer.value << target_desc->field_unit.bit_offset) &
+ mask;
+
+ /* Store back the value */
+
+ MOVE_UNALIGNED32_TO_32 (location, &new_value);
+
+ return (AE_OK);
+}
+
+
diff --git a/drivers/acpi/interpreter/amsystem.c b/drivers/acpi/interpreter/amsystem.c
index 9ad72c161..5e60538af 100644
--- a/drivers/acpi/interpreter/amsystem.c
+++ b/drivers/acpi/interpreter/amsystem.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amsystem - Interface to OS services
- * $Revision: 52 $
+ * $Revision: 54 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -202,7 +202,7 @@ acpi_aml_system_acquire_mutex (
}
status = acpi_aml_system_wait_semaphore (obj_desc->mutex.semaphore,
- (u32) time_desc->number.value);
+ (u32) time_desc->integer.value);
return (status);
}
@@ -299,7 +299,7 @@ acpi_aml_system_wait_event (
if (obj_desc) {
status = acpi_aml_system_wait_semaphore (obj_desc->event.semaphore,
- (u32) time_desc->number.value);
+ (u32) time_desc->integer.value);
}
diff --git a/drivers/acpi/interpreter/amutils.c b/drivers/acpi/interpreter/amutils.c
index 4e1359888..e3456099c 100644
--- a/drivers/acpi/interpreter/amutils.c
+++ b/drivers/acpi/interpreter/amutils.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amutils - interpreter/scanner utilities
- * $Revision: 66 $
+ * $Revision: 68 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -46,14 +46,11 @@ typedef struct internal_search_st
/* Used to traverse nested packages when copying*/
+/* TBD: This must be removed! */
INTERNAL_PKG_SEARCH_INFO copy_level[MAX_PACKAGE_DEPTH];
-static NATIVE_CHAR hex[] =
- {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'};
-
-
/*******************************************************************************
*
* FUNCTION: Acpi_aml_enter_interpreter
@@ -156,7 +153,7 @@ acpi_aml_truncate_for32bit_table (
*/
if ((!obj_desc) ||
- (obj_desc->common.type != ACPI_TYPE_NUMBER) ||
+ (obj_desc->common.type != ACPI_TYPE_INTEGER) ||
(!walk_state->method_node))
{
return;
@@ -167,7 +164,7 @@ acpi_aml_truncate_for32bit_table (
* We are running a method that exists in a 32-bit ACPI table.
* Truncate the value to 32 bits by zeroing out the upper 32-bit field
*/
- obj_desc->number.value &= (ACPI_INTEGER) ACPI_UINT32_MAX;
+ obj_desc->integer.value &= (ACPI_INTEGER) ACPI_UINT32_MAX;
}
}
@@ -343,10 +340,10 @@ acpi_aml_eisa_id_to_string (
out_string[0] = (char) ('@' + ((id >> 26) & 0x1f));
out_string[1] = (char) ('@' + ((id >> 21) & 0x1f));
out_string[2] = (char) ('@' + ((id >> 16) & 0x1f));
- out_string[3] = hex[(id >> 12) & 0xf];
- out_string[4] = hex[(id >> 8) & 0xf];
- out_string[5] = hex[(id >> 4) & 0xf];
- out_string[6] = hex[id & 0xf];
+ out_string[3] = acpi_gbl_hex_to_ascii[(id >> 12) & 0xf];
+ out_string[4] = acpi_gbl_hex_to_ascii[(id >> 8) & 0xf];
+ out_string[5] = acpi_gbl_hex_to_ascii[(id >> 4) & 0xf];
+ out_string[6] = acpi_gbl_hex_to_ascii[id & 0xf];
out_string[7] = 0;
return (AE_OK);
diff --git a/drivers/acpi/interpreter/amxface.c b/drivers/acpi/interpreter/amxface.c
index fd589d39b..20cf9820e 100644
--- a/drivers/acpi/interpreter/amxface.c
+++ b/drivers/acpi/interpreter/amxface.c
@@ -2,12 +2,12 @@
/******************************************************************************
*
* Module Name: amxface - External interpreter interfaces
- * $Revision: 22 $
+ * $Revision: 24 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c
index 27c02e22a..c2e6a71e0 100644
--- a/drivers/acpi/namespace/nsaccess.c
+++ b/drivers/acpi/namespace/nsaccess.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: nsaccess - Top-level functions for accessing ACPI namespace
- * $Revision: 117 $
+ * $Revision: 119 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -117,9 +117,9 @@ acpi_ns_root_initialize (void)
switch (init_val->type)
{
- case ACPI_TYPE_NUMBER:
+ case ACPI_TYPE_INTEGER:
- obj_desc->number.value =
+ obj_desc->integer.value =
(ACPI_INTEGER) STRTOUL (init_val->val, NULL, 10);
break;
@@ -239,7 +239,7 @@ acpi_ns_lookup (
ACPI_NAMESPACE_NODE **return_node)
{
ACPI_STATUS status;
- ACPI_NAMESPACE_NODE *prefix_node;
+ ACPI_NAMESPACE_NODE *prefix_node;
ACPI_NAMESPACE_NODE *current_node = NULL;
ACPI_NAMESPACE_NODE *scope_to_push = NULL;
ACPI_NAMESPACE_NODE *this_node = NULL;
@@ -248,8 +248,7 @@ acpi_ns_lookup (
u8 null_name_path = FALSE;
OBJECT_TYPE_INTERNAL type_to_check_for;
OBJECT_TYPE_INTERNAL this_search_type;
-
- DEBUG_ONLY_MEMBERS (u32 i)
+ u32 local_flags = flags & ~NS_ERROR_IF_FOUND;
if (!return_node) {
@@ -437,6 +436,7 @@ acpi_ns_lookup (
this_search_type = ACPI_TYPE_ANY;
if (!num_segments) {
this_search_type = type;
+ local_flags = flags;
}
/* Pluck one ACPI name from the front of the pathname */
@@ -447,7 +447,7 @@ acpi_ns_lookup (
status = acpi_ns_search_and_enter (simple_name, walk_state,
current_node, interpreter_mode,
- this_search_type, flags,
+ this_search_type, local_flags,
&this_node);
if (ACPI_FAILURE (status)) {
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/namespace/nsalloc.c
index 9f1d5377e..5cac5d111 100644
--- a/drivers/acpi/namespace/nsalloc.c
+++ b/drivers/acpi/namespace/nsalloc.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: nsalloc - Namespace allocation and deletion utilities
- * $Revision: 43 $
+ * $Revision: 45 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index fbba7840c..6c040d22e 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -2,12 +2,12 @@
*
* Module Name: nseval - Object evaluation interfaces -- includes control
* method lookup and execution.
- * $Revision: 79 $
+ * $Revision: 81 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index ddacd7b8c..76b535205 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: nsinit - namespace initialization
- * $Revision: 9 $
+ * $Revision: 12 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -239,7 +239,9 @@ acpi_ns_init_one_device (
status = acpi_cm_execute_STA (node, &flags);
if (ACPI_FAILURE (status)) {
- return (status);
+ /* Ignore error and move on to next device */
+
+ return (AE_OK);
}
info->num_STA++;
@@ -260,12 +262,15 @@ acpi_ns_init_one_device (
}
else if (ACPI_FAILURE (status)) {
- return (status);
+ /* Ignore error and move on to next device */
+
}
else {
+ /* Count of successfull INIs */
+
info->num_INI++;
}
- return (status);
+ return (AE_OK);
}
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
index 28a26376d..f0eaf273b 100644
--- a/drivers/acpi/namespace/nsload.c
+++ b/drivers/acpi/namespace/nsload.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
- * $Revision: 33 $
+ * $Revision: 35 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index faf8fe56a..c2fb49163 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: nsnames - Name manipulation and search
- * $Revision: 51 $
+ * $Revision: 53 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/namespace/nsobject.c
index 6d41b91ce..493cb1317 100644
--- a/drivers/acpi/namespace/nsobject.c
+++ b/drivers/acpi/namespace/nsobject.c
@@ -2,12 +2,12 @@
*
* Module Name: nsobject - Utilities for objects attached to namespace
* table entries
- * $Revision: 47 $
+ * $Revision: 49 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -205,7 +205,7 @@ acpi_ns_attach_object (
case AML_ZERO_OP: case AML_ONES_OP: case AML_ONE_OP:
case AML_BYTE_OP: case AML_WORD_OP: case AML_DWORD_OP:
- obj_type = ACPI_TYPE_NUMBER;
+ obj_type = ACPI_TYPE_INTEGER;
break;
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
index 001f57d9d..7a29e7542 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/namespace/nssearch.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: nssearch - Namespace search
- * $Revision: 62 $
+ * $Revision: 64 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -70,8 +70,6 @@ acpi_ns_search_node (
ACPI_NAMESPACE_NODE *next_node;
-
-
/*
* Search for name in this table, which is to say that we must search
* for the name among the children of this object
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
index 593064dbb..11457b33f 100644
--- a/drivers/acpi/namespace/nsutils.c
+++ b/drivers/acpi/namespace/nsutils.c
@@ -2,12 +2,12 @@
*
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
- * $Revision: 74 $
+ * $Revision: 77 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index 3682266b3..cf84be326 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: nswalk - Functions for walking the APCI namespace
- * $Revision: 17 $
+ * $Revision: 19 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
index 2947f7f06..957481a6d 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -2,12 +2,12 @@
*
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
- * $Revision: 73 $
+ * $Revision: 75 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/namespace/nsxfobj.c
index d4432ee8a..742c5da66 100644
--- a/drivers/acpi/namespace/nsxfobj.c
+++ b/drivers/acpi/namespace/nsxfobj.c
@@ -2,12 +2,12 @@
*
* Module Name: nsxfobj - Public interfaces to the ACPI subsystem
* ACPI Object oriented interfaces
- * $Revision: 75 $
+ * $Revision: 78 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -695,4 +695,3 @@ acpi_get_devices (
return (status);
}
-
diff --git a/drivers/acpi/os.c b/drivers/acpi/os.c
index 7bf86171b..172b6659b 100644
--- a/drivers/acpi/os.c
+++ b/drivers/acpi/os.c
@@ -24,8 +24,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <asm/io.h>
-#include <asm/delay.h>
#include "acpi.h"
#include "driver.h"
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
index 35d623668..da3ef9b30 100644
--- a/drivers/acpi/parser/psargs.c
+++ b/drivers/acpi/parser/psargs.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: psargs - Parse AML opcode arguments
- * $Revision: 42 $
+ * $Revision: 43 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index a7f061363..90a73f45b 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: psopcode - Parser opcode information table
- * $Revision: 24 $
+ * $Revision: 27 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -44,7 +44,7 @@
#define _PFX 0x6D
#define _UNKNOWN_OPCODE 0x02 /* An example unknown opcode */
-#define MAX_EXTENDED_OPCODE 0x87
+#define MAX_EXTENDED_OPCODE 0x88
#define NUM_EXTENDED_OPCODE MAX_EXTENDED_OPCODE + 1
#define MAX_INTERNAL_OPCODE
#define NUM_INTERNAL_OPCODE MAX_INTERNAL_OPCODE + 1
@@ -83,9 +83,11 @@
#define ARGP_WORD_OP ARGP_LIST1 (ARGP_WORDDATA)
#define ARGP_DWORD_OP ARGP_LIST1 (ARGP_DWORDDATA)
#define ARGP_STRING_OP ARGP_LIST1 (ARGP_CHARLIST)
+#define ARGP_QWORD_OP ARGP_LIST1 (ARGP_QWORDDATA)
#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST)
#define ARGP_BUFFER_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_BYTELIST)
#define ARGP_PACKAGE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_BYTEDATA, ARGP_DATAOBJLIST)
+#define ARGP_VAR_PACKAGE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_BYTEDATA, ARGP_DATAOBJLIST)
#define ARGP_METHOD_OP ARGP_LIST4 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_BYTEDATA, ARGP_TERMLIST)
#define ARGP_LOCAL0 ARG_NONE
#define ARGP_LOCAL1 ARG_NONE
@@ -122,6 +124,8 @@
#define ARGP_FIND_SET_LEFT_BIT_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
#define ARGP_FIND_SET_RIGHT_BIT_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
#define ARGP_DEREF_OF_OP ARGP_LIST1 (ARGP_TERMARG)
+#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_MOD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_NOTIFY_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_TERMARG)
#define ARGP_SIZE_OF_OP ARGP_LIST1 (ARGP_SUPERNAME)
#define ARGP_INDEX_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
@@ -131,12 +135,21 @@
#define ARGP_BYTE_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
#define ARGP_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
#define ARGP_TYPE_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_QWORD_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
#define ARGP_LAND_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_LOR_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_LNOT_OP ARGP_LIST1 (ARGP_TERMARG)
#define ARGP_LEQUAL_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_LGREATER_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_LLESS_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_TO_BUFFER_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TO_DEC_STR_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TO_HEX_STR_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TO_INTEGER_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TO_STRING_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_COPY_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SIMPLENAME)
+#define ARGP_MID_OP ARGP_LIST4 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_CONTINUE_OP ARG_NONE
#define ARGP_IF_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_TERMLIST)
#define ARGP_ELSE_OP ARGP_LIST2 (ARGP_PKGLENGTH, ARGP_TERMLIST)
#define ARGP_WHILE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_TERMLIST)
@@ -149,6 +162,7 @@
#define ARGP_EVENT_OP ARGP_LIST1 (ARGP_NAME)
#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
#define ARGP_CREATE_FIELD_OP ARGP_LIST4 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
+#define ARGP_LOAD_TABLE_OP ARGP_LIST6 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_LOAD_OP ARGP_LIST2 (ARGP_NAMESTRING, ARGP_SUPERNAME)
#define ARGP_STALL_OP ARGP_LIST1 (ARGP_TERMARG)
#define ARGP_SLEEP_OP ARGP_LIST1 (ARGP_TERMARG)
@@ -171,6 +185,7 @@
#define ARGP_THERMAL_ZONE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_OBJLIST)
#define ARGP_INDEX_FIELD_OP ARGP_LIST5 (ARGP_PKGLENGTH, ARGP_NAMESTRING, ARGP_NAMESTRING,ARGP_BYTEDATA, ARGP_FIELDLIST)
#define ARGP_BANK_FIELD_OP ARGP_LIST6 (ARGP_PKGLENGTH, ARGP_NAMESTRING, ARGP_NAMESTRING,ARGP_TERMARG, ARGP_BYTEDATA, ARGP_FIELDLIST)
+#define ARGP_DATA_REGION_OP ARGP_LIST4 (ARGP_NAMESTRING, ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_LNOTEQUAL_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_LLESSEQUAL_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_LGREATEREQUAL_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
@@ -187,7 +202,7 @@
* All AML opcodes and the runtime arguments for each. Used by the AML interpreter Each list is compressed
* into a 32-bit number and stored in the master opcode table at the end of this file.
*
- * (Used by Acpi_aml_prep_operands procedure)
+ * (Used by Acpi_aml_prep_operands procedure and the ASL Compiler)
*/
#define ARGI_ZERO_OP ARG_NONE
@@ -198,9 +213,11 @@
#define ARGI_WORD_OP ARGI_INVALID_OPCODE
#define ARGI_DWORD_OP ARGI_INVALID_OPCODE
#define ARGI_STRING_OP ARGI_INVALID_OPCODE
+#define ARGI_QWORD_OP ARGI_INVALID_OPCODE
#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE
#define ARGI_BUFFER_OP ARGI_INVALID_OPCODE
#define ARGI_PACKAGE_OP ARGI_INVALID_OPCODE
+#define ARGI_VAR_PACKAGE_OP ARGI_INVALID_OPCODE
#define ARGI_METHOD_OP ARGI_INVALID_OPCODE
#define ARGI_LOCAL0 ARG_NONE
#define ARGI_LOCAL1 ARG_NONE
@@ -218,40 +235,51 @@
#define ARGI_ARG5 ARG_NONE
#define ARGI_ARG6 ARG_NONE
#define ARGI_STORE_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_TARGETREF)
-#define ARGI_REF_OF_OP ARGI_LIST1 (ARGI_REFERENCE)
-#define ARGI_ADD_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_STRING, ARGI_STRING, ARGI_TARGETREF)
-#define ARGI_SUBTRACT_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_INCREMENT_OP ARGI_LIST1 (ARGI_REFERENCE)
-#define ARGI_DECREMENT_OP ARGI_LIST1 (ARGI_REFERENCE)
-#define ARGI_MULTIPLY_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_DIVIDE_OP ARGI_LIST4 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF, ARGI_TARGETREF)
-#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_BIT_AND_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_BIT_NAND_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_BIT_OR_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_BIT_NOR_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_BIT_XOR_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_BIT_NOT_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_FIND_SET_LEFT_BIT_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_FIND_SET_RIGHT_BIT_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_TARGETREF)
+#define ARGI_REF_OF_OP ARGI_LIST1 (ARGI_OBJECT_REF)
+#define ARGI_ADD_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF)
+#define ARGI_SUBTRACT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_INCREMENT_OP ARGI_LIST1 (ARGI_INTEGER_REF)
+#define ARGI_DECREMENT_OP ARGI_LIST1 (ARGI_INTEGER_REF)
+#define ARGI_MULTIPLY_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_DIVIDE_OP ARGI_LIST4 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF, ARGI_TARGETREF)
+#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_AND_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_NAND_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_OR_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_NOR_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_XOR_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_NOT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_FIND_SET_LEFT_BIT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_FIND_SET_RIGHT_BIT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_DEREF_OF_OP ARGI_LIST1 (ARGI_REFERENCE)
-#define ARGI_NOTIFY_OP ARGI_LIST2 (ARGI_REFERENCE, ARGI_NUMBER)
+#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF)
+#define ARGI_MOD_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_NOTIFY_OP ARGI_LIST2 (ARGI_DEVICE_REF, ARGI_INTEGER)
#define ARGI_SIZE_OF_OP ARGI_LIST1 (ARGI_DATAOBJECT)
-#define ARGI_INDEX_OP ARGI_LIST3 (ARGI_COMPLEXOBJ, ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_MATCH_OP ARGI_LIST6 (ARGI_PACKAGE, ARGI_NUMBER, ARGI_NUMBER, ARGI_NUMBER, ARGI_NUMBER, ARGI_NUMBER)
-#define ARGI_DWORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_NUMBER, ARGI_REFERENCE)
-#define ARGI_WORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_NUMBER, ARGI_REFERENCE)
-#define ARGI_BYTE_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_NUMBER, ARGI_REFERENCE)
-#define ARGI_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_NUMBER, ARGI_REFERENCE)
+#define ARGI_INDEX_OP ARGI_LIST3 (ARGI_COMPLEXOBJ, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_MATCH_OP ARGI_LIST6 (ARGI_PACKAGE, ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_DWORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_WORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_BYTE_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
#define ARGI_TYPE_OP ARGI_LIST1 (ARGI_ANYTYPE)
-#define ARGI_LAND_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_NUMBER)
-#define ARGI_LOR_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_NUMBER)
-#define ARGI_LNOT_OP ARGI_LIST1 (ARGI_NUMBER)
-#define ARGI_LEQUAL_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_NUMBER)
-#define ARGI_LGREATER_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_NUMBER)
-#define ARGI_LLESS_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_NUMBER)
+#define ARGI_QWORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_LAND_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_LOR_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_LNOT_OP ARGI_LIST1 (ARGI_INTEGER)
+#define ARGI_LEQUAL_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_LGREATER_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_LLESS_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_TO_BUFFER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
+#define ARGI_TO_DEC_STR_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
+#define ARGI_TO_HEX_STR_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
+#define ARGI_TO_INTEGER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
+#define ARGI_TO_STRING_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_FIXED_TARGET)
+#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET)
+#define ARGI_MID_OP ARGI_LIST4 (ARGI_BUFFERSTRING,ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE
#define ARGI_IF_OP ARGI_INVALID_OPCODE
#define ARGI_ELSE_OP ARGI_INVALID_OPCODE
#define ARGI_WHILE_OP ARGI_INVALID_OPCODE
@@ -262,23 +290,24 @@
#define ARGI_ONES_OP ARG_NONE
#define ARGI_MUTEX_OP ARGI_INVALID_OPCODE
#define ARGI_EVENT_OP ARGI_INVALID_OPCODE
-#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_REFERENCE, ARGI_TARGETREF)
-#define ARGI_CREATE_FIELD_OP ARGI_LIST4 (ARGI_BUFFER, ARGI_NUMBER, ARGI_NUMBER, ARGI_REFERENCE)
+#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
+#define ARGI_CREATE_FIELD_OP ARGI_LIST4 (ARGI_BUFFER, ARGI_INTEGER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_LOAD_TABLE_OP ARGI_LIST6 (ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_TARGETREF)
#define ARGI_LOAD_OP ARGI_LIST2 (ARGI_REGION, ARGI_TARGETREF)
-#define ARGI_STALL_OP ARGI_LIST1 (ARGI_NUMBER)
-#define ARGI_SLEEP_OP ARGI_LIST1 (ARGI_NUMBER)
-#define ARGI_ACQUIRE_OP ARGI_LIST2 (ARGI_MUTEX, ARGI_NUMBER)
+#define ARGI_STALL_OP ARGI_LIST1 (ARGI_INTEGER)
+#define ARGI_SLEEP_OP ARGI_LIST1 (ARGI_INTEGER)
+#define ARGI_ACQUIRE_OP ARGI_LIST2 (ARGI_MUTEX, ARGI_INTEGER)
#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT)
-#define ARGI_WAIT_OP ARGI_LIST2 (ARGI_EVENT, ARGI_NUMBER)
+#define ARGI_WAIT_OP ARGI_LIST2 (ARGI_EVENT, ARGI_INTEGER)
#define ARGI_RESET_OP ARGI_LIST1 (ARGI_EVENT)
#define ARGI_RELEASE_OP ARGI_LIST1 (ARGI_MUTEX)
-#define ARGI_FROM_BCD_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_TARGETREF)
-#define ARGI_TO_BCD_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_TARGETREF)
+#define ARGI_FROM_BCD_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_TO_BCD_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_FIXED_TARGET)
#define ARGI_UNLOAD_OP ARGI_LIST1 (ARGI_DDBHANDLE)
#define ARGI_REVISION_OP ARG_NONE
#define ARGI_DEBUG_OP ARG_NONE
-#define ARGI_FATAL_OP ARGI_LIST3 (ARGI_NUMBER, ARGI_NUMBER, ARGI_NUMBER)
-#define ARGI_REGION_OP ARGI_LIST2 (ARGI_NUMBER, ARGI_NUMBER)
+#define ARGI_FATAL_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_REGION_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
#define ARGI_DEF_FIELD_OP ARGI_INVALID_OPCODE
#define ARGI_DEVICE_OP ARGI_INVALID_OPCODE
#define ARGI_PROCESSOR_OP ARGI_INVALID_OPCODE
@@ -286,6 +315,7 @@
#define ARGI_THERMAL_ZONE_OP ARGI_INVALID_OPCODE
#define ARGI_INDEX_FIELD_OP ARGI_INVALID_OPCODE
#define ARGI_BANK_FIELD_OP ARGI_INVALID_OPCODE
+#define ARGI_DATA_REGION_OP ARGI_LIST3 (ARGI_STRING, ARGI_STRING, ARGI_STRING)
#define ARGI_LNOTEQUAL_OP ARGI_INVALID_OPCODE
#define ARGI_LLESSEQUAL_OP ARGI_INVALID_OPCODE
#define ARGI_LGREATEREQUAL_OP ARGI_INVALID_OPCODE
@@ -307,8 +337,8 @@ static ACPI_OPCODE_INFO aml_op_info[] =
{
/* Index Opcode Type Class Has Arguments? Name Parser Args Interpreter Args */
-/* 00 */ /* AML_ZERO_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONSTANT| AML_NO_ARGS, "Zero_op", ARGP_ZERO_OP, ARGI_ZERO_OP),
-/* 01 */ /* AML_ONE_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONSTANT| AML_NO_ARGS, "One_op", ARGP_ONE_OP, ARGI_ONE_OP),
+/* 00 */ /* AML_ZERO_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONSTANT| AML_NO_ARGS, "Zero", ARGP_ZERO_OP, ARGI_ZERO_OP),
+/* 01 */ /* AML_ONE_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONSTANT| AML_NO_ARGS, "One", ARGP_ONE_OP, ARGI_ONE_OP),
/* 02 */ /* AML_ALIAS_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP),
/* 03 */ /* AML_NAME_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Name", ARGP_NAME_OP, ARGI_NAME_OP),
/* 04 */ /* AML_BYTE_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_LITERAL| AML_NO_ARGS, "Byte_const", ARGP_BYTE_OP, ARGI_BYTE_OP),
@@ -337,7 +367,7 @@ static ACPI_OPCODE_INFO aml_op_info[] =
/* 1_b */ /* AML_STORE_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "Store", ARGP_STORE_OP, ARGI_STORE_OP),
/* 1_c */ /* AML_REF_OF_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2| AML_HAS_ARGS, "Ref_of", ARGP_REF_OF_OP, ARGI_REF_OF_OP),
/* 1_d */ /* AML_ADD_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_DYADIC2_r| AML_HAS_ARGS, "Add", ARGP_ADD_OP, ARGI_ADD_OP),
-/* 1_e */ /* AML_CONCAT_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_DYADIC2_r| AML_HAS_ARGS, "Concat", ARGP_CONCAT_OP, ARGI_CONCAT_OP),
+/* 1_e */ /* AML_CONCAT_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_DYADIC2_r| AML_HAS_ARGS, "Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP),
/* 1_f */ /* AML_SUBTRACT_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_DYADIC2_r| AML_HAS_ARGS, "Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP),
/* 20 */ /* AML_INCREMENT_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2| AML_HAS_ARGS, "Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP),
/* 21 */ /* AML_DECREMENT_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2| AML_HAS_ARGS, "Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP),
@@ -376,7 +406,7 @@ static ACPI_OPCODE_INFO aml_op_info[] =
/* 42 */ /* AML_RETURN_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONTROL| AML_HAS_ARGS, "Return", ARGP_RETURN_OP, ARGI_RETURN_OP),
/* 43 */ /* AML_BREAK_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONTROL| AML_NO_ARGS, "Break", ARGP_BREAK_OP, ARGI_BREAK_OP),
/* 44 */ /* AML_BREAK_POINT_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONTROL| AML_NO_ARGS, "Break_point", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP),
-/* 45 */ /* AML_ONES_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONSTANT| AML_NO_ARGS, "Ones_op", ARGP_ONES_OP, ARGI_ONES_OP),
+/* 45 */ /* AML_ONES_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONSTANT| AML_NO_ARGS, "Ones", ARGP_ONES_OP, ARGI_ONES_OP),
/* Prefixed opcodes (Two-byte opcodes with a prefix op) */
@@ -402,7 +432,7 @@ static ACPI_OPCODE_INFO aml_op_info[] =
/* 59 */ /* AML_DEF_FIELD_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Field", ARGP_DEF_FIELD_OP, ARGI_DEF_FIELD_OP),
/* 5_a */ /* AML_DEVICE_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP),
/* 5_b */ /* AML_PROCESSOR_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP),
-/* 5_c */ /* AML_POWER_RES_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Power_res", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP),
+/* 5_c */ /* AML_POWER_RES_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Power_resource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP),
/* 5_d */ /* AML_THERMAL_ZONE_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Thermal_zone", ARGP_THERMAL_ZONE_OP, ARGI_THERMAL_ZONE_OP),
/* 5_e */ /* AML_INDEX_FIELD_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Index_field", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP),
/* 5_f */ /* AML_BANK_FIELD_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_NAMED_OBJECT| AML_HAS_ARGS, "Bank_field", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP),
@@ -423,6 +453,26 @@ static ACPI_OPCODE_INFO aml_op_info[] =
/* 6_b */ /* UNKNOWN OPCODES */ OP_INFO_ENTRY (ACPI_OP_TYPE_UNKNOWN | OPTYPE_BOGUS| AML_HAS_ARGS, "UNKNOWN_OP!", ARG_NONE, ARG_NONE),
/* 6_c */ /* ASCII CHARACTERS */ OP_INFO_ENTRY (ACPI_OP_TYPE_ASCII | OPTYPE_BOGUS| AML_HAS_ARGS, "ASCII_ONLY!", ARG_NONE, ARG_NONE),
/* 6_d */ /* PREFIX CHARACTERS */ OP_INFO_ENTRY (ACPI_OP_TYPE_PREFIX | OPTYPE_BOGUS| AML_HAS_ARGS, "PREFIX_ONLY!", ARG_NONE, ARG_NONE),
+
+
+/* ACPI 2.0 (new) opcodes */
+
+/* 6_e */ /* AML_QWORD_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_LITERAL| AML_NO_ARGS, "Qword_const", ARGP_QWORD_OP, ARGI_QWORD_OP),
+/* 6_f */ /* AML_VAR_PACKAGE_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_DATA_TERM| AML_HAS_ARGS, "Var_package", ARGP_VAR_PACKAGE_OP, ARGI_VAR_PACKAGE_OP),
+/* 70 */ /* AML_CONCAT_RES_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_DYADIC2_r| AML_HAS_ARGS, "Concat_res", ARGP_CONCAT_RES_OP, ARGI_CONCAT_RES_OP),
+/* 71 */ /* AML_MOD_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_DYADIC2_r| AML_HAS_ARGS, "Mod", ARGP_MOD_OP, ARGI_MOD_OP),
+/* 72 */ /* AML_QWORD_FIELD_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CREATE_FIELD| AML_HAS_ARGS, "Create_qWord_field", ARGP_QWORD_FIELD_OP, ARGI_QWORD_FIELD_OP),
+/* 73 */ /* AML_TO_BUFFER_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "To_buffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP),
+/* 74 */ /* AML_TO_DEC_STR_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "To_dec_string", ARGP_TO_DEC_STR_OP, ARGI_TO_DEC_STR_OP),
+/* 75 */ /* AML_TO_HEX_STR_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "To_hex_string", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP),
+/* 76 */ /* AML_TO_INTEGER_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "To_integer", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP),
+/* 77 */ /* AML_TO_STRING_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "To_string", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP),
+/* 78 */ /* AML_COPY_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "Copy", ARGP_COPY_OP, ARGI_COPY_OP),
+/* 79 */ /* AML_MID_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "Mid", ARGP_MID_OP, ARGI_MID_OP),
+/* 7_a */ /* AML_CONTINUE_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_CONTROL| AML_NO_ARGS, "Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP),
+/* 7_b */ /* AML_LOAD_TABLE_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "Load_table", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP),
+/* 7_c */ /* AML_DATA_REGION_OP */ OP_INFO_ENTRY (ACPI_OP_TYPE_OPCODE | OPTYPE_MONADIC2_r| AML_HAS_ARGS, "Data_op_region", ARGP_DATA_REGION_OP, ARGI_DATA_REGION_OP),
+
};
/*
@@ -433,9 +483,10 @@ static ACPI_OPCODE_INFO aml_op_info[] =
static u8 aml_short_op_info_index[256] =
{
/* 0 1 2 3 4 5 6 7 */
+/* 8 9 A B C D E F */
/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
-/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, _UNK, _UNK,
-/* 0x10 */ 0x08, 0x09, 0x0a, _UNK, 0x0b, _UNK, _UNK, _UNK,
+/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
+/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
@@ -449,10 +500,10 @@ static u8 aml_short_op_info_index[256] =
/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
-/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, _UNK, _UNK, 0x2f, 0x30,
-/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, _UNK,
-/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, _UNK, _UNK,
-/* 0x98 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
+/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
+/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
+/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
@@ -471,10 +522,11 @@ static u8 aml_short_op_info_index[256] =
static u8 aml_long_op_info_index[NUM_EXTENDED_OPCODE] =
{
/* 0 1 2 3 4 5 6 7 */
+/* 8 9 A B C D E F */
/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
-/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x30 */ 0x55, 0x56, 0x57, _UNK, _UNK, _UNK, _UNK, _UNK,
@@ -488,13 +540,10 @@ static u8 aml_long_op_info_index[NUM_EXTENDED_OPCODE] =
/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+/* 0x88 */ 0x7C,
};
-/* 0 1 2 3 4 5 6 7 */
-/* 0x00 */
-
-
/*******************************************************************************
*
* FUNCTION: Acpi_ps_get_opcode_info
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
index 7471efdcb..7373013da 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/parser/psparse.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: psparse - Parser top level AML parse routines
- * $Revision: 71 $
+ * $Revision: 74 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -537,6 +537,12 @@ acpi_ps_parse_loop (
*/
status = acpi_ds_get_predicate_value (walk_state, NULL, TRUE);
+ if (ACPI_FAILURE (status) &&
+ ((status & AE_CODE_MASK) != AE_CODE_CONTROL))
+ {
+ return (status);
+ }
+
status = acpi_ps_next_parse_state (walk_state, op, status);
}
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/parser/psscope.c
index 2e8926ad1..37b5d2265 100644
--- a/drivers/acpi/parser/psscope.c
+++ b/drivers/acpi/parser/psscope.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: psscope - Parser scope stack management routines
- * $Revision: 22 $
+ * $Revision: 24 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/parser/pstree.c
index a22bb646d..dc2e6d4d7 100644
--- a/drivers/acpi/parser/pstree.c
+++ b/drivers/acpi/parser/pstree.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
- * $Revision: 25 $
+ * $Revision: 27 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/parser/psutils.c
index 3bac4a647..94518a920 100644
--- a/drivers/acpi/parser/psutils.c
+++ b/drivers/acpi/parser/psutils.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
- * $Revision: 30 $
+ * $Revision: 32 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/parser/pswalk.c
index 04a75917d..48c11eff3 100644
--- a/drivers/acpi/parser/pswalk.c
+++ b/drivers/acpi/parser/pswalk.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
- * $Revision: 50 $
+ * $Revision: 52 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
index a33ace051..328c755d9 100644
--- a/drivers/acpi/parser/psxface.c
+++ b/drivers/acpi/parser/psxface.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: psxface - Parser external interfaces
- * $Revision: 37 $
+ * $Revision: 40 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -121,6 +121,12 @@ acpi_psx_execute (
return (AE_NO_MEMORY);
}
+
+ /* Init new op with the method name and pointer back to the NS node */
+
+ acpi_ps_set_name (op, method_node->name);
+ op->node = method_node;
+
/*
* The walk of the parse tree is where we actually execute the method
*/
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0422bf94d..d24392895 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -81,12 +81,12 @@ proc_read_ac_adapter_status(char *page, char **start, off_t off,
buf.length = sizeof(obj);
buf.pointer = &obj;
if (!ACPI_SUCCESS(acpi_evaluate_object(ac_handle, "_PSR", NULL, &buf))
- || obj.type != ACPI_TYPE_NUMBER) {
+ || obj.type != ACPI_TYPE_INTEGER) {
p += sprintf(p, "Could not read AC status\n");
goto end;
}
- if (obj.number.value)
+ if (obj.integer.value)
p += sprintf(p, "on-line\n");
else
p += sprintf(p, "off-line\n");
diff --git a/drivers/acpi/resources/rsaddr.c b/drivers/acpi/resources/rsaddr.c
index e48666113..af91bc59f 100644
--- a/drivers/acpi/resources/rsaddr.c
+++ b/drivers/acpi/resources/rsaddr.c
@@ -4,12 +4,12 @@
* Acpi_rs_address16_stream
* Acpi_rs_address32_resource
* Acpi_rs_address32_stream
- * $Revision: 12 $
+ * $Revision: 14 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 2874aa3d0..86ff91be8 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -2,12 +2,12 @@
*
* Module Name: rscalc - Acpi_rs_calculate_byte_stream_length
* Acpi_rs_calculate_list_length
- * $Revision: 16 $
+ * $Revision: 18 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index bc95686a7..f221a41dc 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -3,12 +3,12 @@
* Module Name: rscreate - Acpi_rs_create_resource_list
* Acpi_rs_create_pci_routing_table
* Acpi_rs_create_byte_stream
- * $Revision: 22 $
+ * $Revision: 24 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -223,9 +223,9 @@ acpi_rs_create_pci_routing_table (
/*
* Dereference the Address
*/
- if (ACPI_TYPE_NUMBER == (*sub_object_list)->common.type) {
+ if (ACPI_TYPE_INTEGER == (*sub_object_list)->common.type) {
user_prt->data.address =
- (*sub_object_list)->number.value;
+ (*sub_object_list)->integer.value;
}
else {
@@ -237,9 +237,9 @@ acpi_rs_create_pci_routing_table (
*/
sub_object_list++;
- if (ACPI_TYPE_NUMBER == (*sub_object_list)->common.type) {
+ if (ACPI_TYPE_INTEGER == (*sub_object_list)->common.type) {
user_prt->data.pin =
- (u32) (*sub_object_list)->number.value;
+ (u32) (*sub_object_list)->integer.value;
}
else {
@@ -267,7 +267,7 @@ acpi_rs_create_pci_routing_table (
* is NULL, since the entire buffer was zeroed
* out, we can leave this alone.
*/
- if (ACPI_TYPE_NUMBER == (*sub_object_list)->common.type) {
+ if (ACPI_TYPE_INTEGER == (*sub_object_list)->common.type) {
/*
* Add to the Length field the length of
* the u32 NULL
@@ -289,9 +289,9 @@ acpi_rs_create_pci_routing_table (
*/
sub_object_list++;
- if (ACPI_TYPE_NUMBER == (*sub_object_list)->common.type) {
+ if (ACPI_TYPE_INTEGER == (*sub_object_list)->common.type) {
user_prt->data.source_index =
- (u32) (*sub_object_list)->number.value;
+ (u32) (*sub_object_list)->integer.value;
}
else {
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index 073f4ddd4..03d2da6f2 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: rsdump - Functions do dump out the resource structures.
- * $Revision: 13 $
+ * $Revision: 15 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/resources/rsio.c b/drivers/acpi/resources/rsio.c
index 0d6c507c0..58fcf8cd8 100644
--- a/drivers/acpi/resources/rsio.c
+++ b/drivers/acpi/resources/rsio.c
@@ -6,12 +6,12 @@
* Acpi_rs_fixed_io_stream
* Acpi_rs_dma_resource
* Acpi_rs_dma_stream
- * $Revision: 10 $
+ * $Revision: 12 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/resources/rsirq.c b/drivers/acpi/resources/rsirq.c
index 28a3a5045..eb47669ad 100644
--- a/drivers/acpi/resources/rsirq.c
+++ b/drivers/acpi/resources/rsirq.c
@@ -4,12 +4,12 @@
* Acpi_rs_irq_stream
* Acpi_rs_extended_irq_resource
* Acpi_rs_extended_irq_stream
- * $Revision: 11 $
+ * $Revision: 13 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index 8e39ddded..de47563d0 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -2,12 +2,12 @@
*
* Module Name: rslist - Acpi_rs_byte_stream_to_list
* Acpi_list_to_byte_stream
- * $Revision: 8 $
+ * $Revision: 10 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/resources/rsmemory.c b/drivers/acpi/resources/rsmemory.c
index 8aa4914f4..f9437e60c 100644
--- a/drivers/acpi/resources/rsmemory.c
+++ b/drivers/acpi/resources/rsmemory.c
@@ -6,12 +6,12 @@
* Acpi_rs_fixed_memory32_resource
* Acpi_rs_memory32_range_stream
* Acpi_rs_fixed_memory32_stream
- * $Revision: 10 $
+ * $Revision: 12 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index 01bbb377e..be0a1a6a0 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -8,12 +8,12 @@
* Acpi_rs_end_dependent_functions_resource
* Acpi_rs_start_dependent_functions_stream
* Acpi_rs_end_dependent_functions_stream
- * $Revision: 10 $
+ * $Revision: 12 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index 0a3f76668..2994bf0d3 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: rsutils - Utilities for the resource manager
- * $Revision: 12 $
+ * $Revision: 14 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 8691f8200..5c126aaa5 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -1,12 +1,12 @@
/*******************************************************************************
*
* Module Name: rsxface - Public interfaces to the ACPI subsystem
- * $Revision: 8 $
+ * $Revision: 10 $
*
******************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/sys.c b/drivers/acpi/sys.c
index 13648c255..2011174a1 100644
--- a/drivers/acpi/sys.c
+++ b/drivers/acpi/sys.c
@@ -27,18 +27,13 @@
#define _COMPONENT OS_DEPENDENT
MODULE_NAME ("sys")
-#define ACPI_SLP_TYP(typa, typb) (((int)(typa) << 8) | (int)(typb))
-#define ACPI_SLP_TYPA(value) ((value) >> 8)
-#define ACPI_SLP_TYPB(value) ((value) & 0xff)
-
struct acpi_enter_sx_ctx
{
wait_queue_head_t wait;
unsigned int state;
};
-volatile acpi_sstate_t acpi_sleep_state = ACPI_S0;
-static unsigned long acpi_slptyp[ACPI_S5 + 1] = {ACPI_INVALID,};
+volatile acpi_sstate_t acpi_sleep_state = ACPI_STATE_S0;
/*
* Enter system sleep state
@@ -50,56 +45,32 @@ acpi_enter_sx_async(void *context)
ACPI_OBJECT_LIST arg_list;
ACPI_OBJECT arg;
- /*
- * _PSW methods could be run here to enable wake-on keyboard, LAN, etc.
- */
-
- // run the _PTS method
- memset(&arg_list, 0, sizeof(arg_list));
- arg_list.count = 1;
- arg_list.pointer = &arg;
-
- memset(&arg, 0, sizeof(arg));
- arg.type = ACPI_TYPE_NUMBER;
- arg.number.value = ctx->state;
+ acpi_enter_sleep_state(ctx->state);
- acpi_evaluate_object(NULL, "\\_PTS", &arg_list, NULL);
-
- // clear wake status by writing a 1
- acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_LOCK, WAK_STS, 1);
-
- acpi_sleep_state = ctx->state;
-
- // set ACPI_SLP_TYPA/b and ACPI_SLP_EN
- acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_LOCK, SLP_TYPE_A,
- ACPI_SLP_TYPA(acpi_slptyp[ctx->state]));
- acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_LOCK, SLP_TYPE_B,
- ACPI_SLP_TYPB(acpi_slptyp[ctx->state]));
- acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_LOCK, SLP_EN, 1);
-
- if (ctx->state != ACPI_S1) {
- /* we should have just shut off - what are we doing here? */
- printk(KERN_ERR "ACPI: S%d failed\n", ctx->state);
+ /* either we are in S1, or the transition failed, as the other states resume
+ from the waking vector */
+ if (ctx->state != ACPI_STATE_S1) {
+ printk(KERN_ERR "Could not enter S%d\n", ctx->state);
goto out;
}
- // wait until S1 is entered
+ /* wait until S1 is entered */
while (!(acpi_hw_register_bit_access(ACPI_READ, ACPI_MTX_LOCK, WAK_STS)))
safe_halt();
- // run the _WAK method
+ /* run the _WAK method */
memset(&arg_list, 0, sizeof(arg_list));
arg_list.count = 1;
arg_list.pointer = &arg;
memset(&arg, 0, sizeof(arg));
- arg.type = ACPI_TYPE_NUMBER;
- arg.number.value = ctx->state;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = ctx->state;
acpi_evaluate_object(NULL, "\\_WAK", &arg_list, NULL);
out:
- acpi_sleep_state = ACPI_S0;
+ acpi_sleep_state = ACPI_STATE_S0;
if (waitqueue_active(&ctx->wait))
wake_up_interruptible(&ctx->wait);
@@ -112,13 +83,9 @@ static void
acpi_power_off(void)
{
struct acpi_enter_sx_ctx ctx;
-
- if ((STRNCMP(acpi_fadt.header.signature, ACPI_FADT_SIGNATURE, ACPI_SIG_LEN) != 0)
- || acpi_slptyp[ACPI_S5] == ACPI_INVALID)
- return;
init_waitqueue_head(&ctx.wait);
- ctx.state = ACPI_S5;
+ ctx.state = ACPI_STATE_S5;
acpi_enter_sx_async(&ctx);
}
@@ -132,10 +99,6 @@ acpi_enter_sx(acpi_sstate_t state)
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
- if ((STRNCMP(acpi_fadt.header.signature, ACPI_FADT_SIGNATURE, ACPI_SIG_LEN) != 0)
- || acpi_slptyp[state] == ACPI_INVALID)
- return -EINVAL;
-
init_waitqueue_head(&ctx.wait);
ctx.state = state;
@@ -166,19 +129,14 @@ acpi_sys_init(void)
printk(KERN_INFO "ACPI: System firmware supports:");
- for (sx = ACPI_S0; sx <= ACPI_S5; sx++) {
- int ca_sx = (sx <= ACPI_S4) ? sx : (sx + 1);
+ for (sx = ACPI_STATE_S0; sx <= ACPI_STATE_S5; sx++) {
if (ACPI_SUCCESS(
- acpi_hw_obtain_sleep_type_register_data(ca_sx,
+ acpi_hw_obtain_sleep_type_register_data(sx,
&type_a,
&type_b))) {
- acpi_slptyp[sx] = ACPI_SLP_TYP(type_a, type_b);
printk(" S%d", sx);
}
- else {
- acpi_slptyp[sx] = ACPI_INVALID;
- }
}
printk("\n");
diff --git a/drivers/acpi/table.c b/drivers/acpi/table.c
index 8379b4204..de0f99a27 100644
--- a/drivers/acpi/table.c
+++ b/drivers/acpi/table.c
@@ -47,21 +47,6 @@ acpi_fetch_fadt(void)
return -ENODEV;
}
- if (acpi_fadt.plvl2_lat
- && acpi_fadt.plvl2_lat <= ACPI_MAX_P_LVL2_LAT) {
- acpi_c2_exit_latency
- = ACPI_MICROSEC_TO_TMR_TICKS(acpi_fadt.plvl2_lat);
- acpi_c2_enter_latency
- = ACPI_MICROSEC_TO_TMR_TICKS(ACPI_TMR_HZ / 1000);
- }
- if (acpi_fadt.plvl3_lat
- && acpi_fadt.plvl3_lat <= ACPI_MAX_P_LVL3_LAT) {
- acpi_c3_exit_latency
- = ACPI_MICROSEC_TO_TMR_TICKS(acpi_fadt.plvl3_lat);
- acpi_c3_enter_latency
- = ACPI_MICROSEC_TO_TMR_TICKS(acpi_fadt.plvl3_lat * 5);
- }
-
return 0;
}
@@ -71,11 +56,7 @@ acpi_fetch_fadt(void)
int
acpi_find_and_load_tables(u64 rsdp)
{
- if (ACPI_SUCCESS(acpi_load_tables(rsdp)))
- {
- printk(KERN_INFO "ACPI: System description tables loaded\n");
- }
- else {
+ if (!ACPI_SUCCESS(acpi_load_tables(rsdp))) {
printk(KERN_INFO "ACPI: System description table load failed\n");
acpi_terminate();
return -1;
diff --git a/drivers/acpi/tables/tbconvrt.c b/drivers/acpi/tables/tbconvrt.c
index 624926d27..76d7fff15 100644
--- a/drivers/acpi/tables/tbconvrt.c
+++ b/drivers/acpi/tables/tbconvrt.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: tbconvrt - ACPI Table conversion utilities
- * $Revision: 15 $
+ * $Revision: 18 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -47,7 +47,7 @@
a.register_bit_width = (u8) MUL_8 (b);\
a.register_bit_offset = 0;\
a.reserved = 0;\
- a.address = (UINT64) c;}
+ ACPI_STORE_ADDRESS (a.address,c);}
/* ACPI V1.0 entries -- address space is always I/O */
@@ -126,8 +126,8 @@ acpi_tb_convert_to_xsdt (
new_table->table_offset_entry[i] =
((RSDT_DESCRIPTOR_REV071 *) table_info->pointer)->table_offset_entry[i];
#else
- new_table->table_offset_entry[i] =
- ((RSDT_DESCRIPTOR_REV1 *) table_info->pointer)->table_offset_entry[i];
+ ACPI_STORE_ADDRESS (new_table->table_offset_entry[i],
+ ((RSDT_DESCRIPTOR_REV1 *) table_info->pointer)->table_offset_entry[i]);
#endif
}
else {
@@ -312,11 +312,11 @@ acpi_tb_convert_table_fadt (void)
/* No 0.71 equivalence. Leave pre-zeroed. */
/* FADT2->Flush_stride = 0; */
- /* Processor’s duty cycle index in processor's P_CNT reg*/
+ /* Processor's duty cycle index in processor's P_CNT reg*/
/* No 0.71 equivalence. Leave pre-zeroed. */
/* FADT2->Duty_offset = 0; */
- /* Processor’s duty cycle value bit width in P_CNT register.*/
+ /* Processor's duty cycle value bit width in P_CNT register.*/
/* No 0.71 equivalence. Leave pre-zeroed. */
/* FADT2->Duty_width = 0; */
@@ -384,8 +384,8 @@ acpi_tb_convert_table_fadt (void)
/* Convert table pointers to 64-bit fields */
- FADT2->Xfirmware_ctrl = (UINT64) FADT1->firmware_ctrl;
- FADT2->Xdsdt = (UINT64) FADT1->dsdt;
+ ACPI_STORE_ADDRESS (FADT2->Xfirmware_ctrl, FADT1->firmware_ctrl);
+ ACPI_STORE_ADDRESS (FADT2->Xdsdt, FADT1->dsdt);
/* System Interrupt Model isn't used in ACPI 2.0*/
/* FADT2->Reserved1 = 0; */
@@ -448,6 +448,7 @@ acpi_tb_convert_table_fadt (void)
* Global FADT pointer will point to the common V2.0 FADT
*/
acpi_gbl_FADT = FADT2;
+ acpi_gbl_FADT->header.length = sizeof (FADT_DESCRIPTOR);
/* Free the original table */
@@ -464,8 +465,6 @@ acpi_tb_convert_table_fadt (void)
table_desc->length = sizeof (FADT_DESCRIPTOR_REV2);
- /* Dump the FADT Header */
-
/* Dump the entire FADT */
diff --git a/drivers/acpi/tables/tbget.c b/drivers/acpi/tables/tbget.c
index 2cf8eede7..8e05e2c7d 100644
--- a/drivers/acpi/tables/tbget.c
+++ b/drivers/acpi/tables/tbget.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: tbget - ACPI Table get* routines
- * $Revision: 40 $
+ * $Revision: 43 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -243,7 +243,7 @@ acpi_tb_get_all_tables (
/* Get the table via the XSDT */
status = acpi_tb_get_table ((ACPI_PHYSICAL_ADDRESS)
- acpi_gbl_XSDT->table_offset_entry[index],
+ ACPI_GET_ADDRESS (acpi_gbl_XSDT->table_offset_entry[index]),
table_ptr, &table_info);
/* Ignore a table that failed verification */
@@ -326,7 +326,8 @@ acpi_tb_get_all_tables (
* Get the DSDT (We know that the FADT is valid now)
*/
- status = acpi_tb_get_table (acpi_gbl_FADT->Xdsdt, table_ptr, &table_info);
+ status = acpi_tb_get_table ((ACPI_PHYSICAL_ADDRESS) ACPI_GET_ADDRESS (acpi_gbl_FADT->Xdsdt),
+ table_ptr, &table_info);
if (ACPI_FAILURE (status)) {
return (status);
}
@@ -472,13 +473,14 @@ acpi_tb_get_table_rsdt (
/* 0.71 RSDP has 64bit Rsdt address field */
physical_address = ((RSDP_DESCRIPTOR_REV071 *)acpi_gbl_RSDP)->rsdt_physical_address;
#else
- physical_address = acpi_gbl_RSDP->rsdt_physical_address;
+ physical_address = (ACPI_PHYSICAL_ADDRESS) acpi_gbl_RSDP->rsdt_physical_address;
#endif
table_signature = RSDT_SIG;
signature_length = sizeof (RSDT_SIG) -1;
}
else {
- physical_address = (ACPI_PHYSICAL_ADDRESS) acpi_gbl_RSDP->xsdt_physical_address;
+ physical_address = (ACPI_PHYSICAL_ADDRESS)
+ ACPI_GET_ADDRESS (acpi_gbl_RSDP->xsdt_physical_address);
table_signature = XSDT_SIG;
signature_length = sizeof (XSDT_SIG) -1;
}
@@ -586,7 +588,7 @@ acpi_tb_get_table_facs (
else {
/* Just map the physical memory to our address space */
- status = acpi_tb_map_acpi_table (acpi_gbl_FADT->Xfirmware_ctrl,
+ status = acpi_tb_map_acpi_table ((ACPI_PHYSICAL_ADDRESS) ACPI_GET_ADDRESS (acpi_gbl_FADT->Xfirmware_ctrl),
&size, &table_ptr);
if (ACPI_FAILURE(status)) {
return (status);
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index b3926a0e3..e753917b7 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: tbinstal - ACPI table installation and removal
- * $Revision: 34 $
+ * $Revision: 36 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
index baae6fb4c..d89b1508c 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/tables/tbutils.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: tbutils - Table manipulation utilities
- * $Revision: 31 $
+ * $Revision: 33 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index ee9eba62a..98c99873c 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -2,12 +2,12 @@
*
* Module Name: tbxface - Public interfaces to the ACPI subsystem
* ACPI table oriented interfaces
- * $Revision: 32 $
+ * $Revision: 34 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index 4c31f1c37..606e8ad10 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -1,12 +1,12 @@
/******************************************************************************
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
- * $Revision: 33 $
+ * $Revision: 35 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 R. Byron Moore
+ * Copyright (C) 2000, 2001 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile
index 2157f1570..5b602f9ba 100644
--- a/drivers/atm/Makefile
+++ b/drivers/atm/Makefile
@@ -46,7 +46,7 @@ ifeq ($(CONFIG_ATM_FORE200E_SBA),y)
endif
endif
-obj-$(CONFIG_ATM_FORE200E) += fore200e.o $(FORE200E_FW_OBJS)
+obj-$(CONFIG_ATM_FORE200E) += fore_200e.o
EXTRA_CFLAGS=-g
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 5e8dc19f9..d0aac4c7d 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1820,7 +1820,6 @@ static int DAC960_BackMergeFunction(RequestQueue_T *RequestQueue,
Request->nr_segments < Controller->DriverScatterGatherLimit)
{
Request->nr_segments++;
- RequestQueue->elevator.nr_segments++;
return true;
}
return false;
@@ -1844,7 +1843,6 @@ static int DAC960_FrontMergeFunction(RequestQueue_T *RequestQueue,
Request->nr_segments < Controller->DriverScatterGatherLimit)
{
Request->nr_segments++;
- RequestQueue->elevator.nr_segments++;
return true;
}
return false;
@@ -1864,17 +1862,12 @@ static int DAC960_MergeRequestsFunction(RequestQueue_T *RequestQueue,
DAC960_Controller_T *Controller =
(DAC960_Controller_T *) RequestQueue->queuedata;
int TotalSegments = Request->nr_segments + NextRequest->nr_segments;
- int SameSegment = 0;
if (Request->bhtail->b_data + Request->bhtail->b_size
== NextRequest->bh->b_data)
- {
TotalSegments--;
- SameSegment = 1;
- }
if (TotalSegments > MaxSegments ||
TotalSegments > Controller->DriverScatterGatherLimit)
return false;
- RequestQueue->elevator.nr_segments -= SameSegment;
Request->nr_segments = TotalSegments;
return true;
}
@@ -2834,6 +2827,7 @@ static void DAC960_RequestFunction(RequestQueue_T *RequestQueue)
static inline void DAC960_ProcessCompletedBuffer(BufferHeader_T *BufferHeader,
boolean SuccessfulIO)
{
+ blk_finished_io(BufferHeader->b_size >> 9);
BufferHeader->b_end_io(BufferHeader, SuccessfulIO);
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 35afe2a7f..47937edcc 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1086,6 +1086,7 @@ static inline void complete_buffers( struct buffer_head *bh, int status)
{
xbh = bh->b_reqnext;
bh->b_reqnext = NULL;
+ blk_finished_io(bh->b_size >> 9);
bh->b_end_io(bh, status);
bh = xbh;
}
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index afbf60ddc..7016cfff4 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -140,23 +140,7 @@ static int ida_release(struct inode *inode, struct file *filep);
static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
static int ida_ctlr_ioctl(int ctlr, int dsk, ida_ioctl_t *io);
-static void do_ida_request(int i);
-/*
- * This is a hack. This driver eats a major number for each controller, and
- * sets blkdev[xxx].request_fn to each one of these so the real request
- * function knows what controller its working with.
- */
-#define DO_IDA_REQUEST(x) { do_ida_request(x); }
-
-static void do_ida_request0(request_queue_t * q) DO_IDA_REQUEST(0);
-static void do_ida_request1(request_queue_t * q) DO_IDA_REQUEST(1);
-static void do_ida_request2(request_queue_t * q) DO_IDA_REQUEST(2);
-static void do_ida_request3(request_queue_t * q) DO_IDA_REQUEST(3);
-static void do_ida_request4(request_queue_t * q) DO_IDA_REQUEST(4);
-static void do_ida_request5(request_queue_t * q) DO_IDA_REQUEST(5);
-static void do_ida_request6(request_queue_t * q) DO_IDA_REQUEST(6);
-static void do_ida_request7(request_queue_t * q) DO_IDA_REQUEST(7);
-
+static void do_ida_request(request_queue_t *q);
static void start_io(ctlr_info_t *h);
static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
@@ -362,6 +346,47 @@ void cleanup_module(void)
}
#endif /* MODULE */
+static inline int cpq_new_segment(request_queue_t *q, struct request *rq,
+ int max_segments)
+{
+ if (rq->nr_segments < SG_MAX) {
+ rq->nr_segments++;
+ return 1;
+ }
+ return 0;
+}
+
+static int cpq_back_merge_fn(request_queue_t *q, struct request *rq,
+ struct buffer_head *bh, int max_segments)
+{
+ if (rq->bhtail->b_data + rq->bhtail->b_size == bh->b_data)
+ return 1;
+ return cpq_new_segment(q, rq, max_segments);
+}
+
+static int cpq_front_merge_fn(request_queue_t *q, struct request *rq,
+ struct buffer_head *bh, int max_segments)
+{
+ if (bh->b_data + bh->b_size == rq->bh->b_data)
+ return 1;
+ return cpq_new_segment(q, rq, max_segments);
+}
+
+static int cpq_merge_requests_fn(request_queue_t *q, struct request *rq,
+ struct request *nxt, int max_segments)
+{
+ int total_segments = rq->nr_segments + nxt->nr_segments;
+
+ if (rq->bhtail->b_data + rq->bhtail->b_size == nxt->bh->b_data)
+ total_segments--;
+
+ if (total_segments > SG_MAX)
+ return 0;
+
+ rq->nr_segments = total_segments;
+ return 1;
+}
+
/*
* This is it. Find all the controllers and register them. I really hate
* stealing all these major device numbers.
@@ -369,12 +394,7 @@ void cleanup_module(void)
*/
int __init cpqarray_init(void)
{
- void (*request_fns[MAX_CTLR])(request_queue_t *) = {
- do_ida_request0, do_ida_request1,
- do_ida_request2, do_ida_request3,
- do_ida_request4, do_ida_request5,
- do_ida_request6, do_ida_request7,
- };
+ request_queue_t *q;
int i,j;
int num_cntlrs_reg = 0;
@@ -495,16 +515,20 @@ int __init cpqarray_init(void)
hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
-
ida_procinit(i);
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i),
- request_fns[i]);
- blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR + i), 0);
+ q = BLK_DEFAULT_QUEUE(MAJOR_NR + i);
+ q->queuedata = hba[i];
+ blk_init_queue(q, do_ida_request);
+ blk_queue_headactive(q, 0);
blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
hardsect_size[MAJOR_NR+i] = ida_hardsizes + (i*256);
read_ahead[MAJOR_NR+i] = READ_AHEAD;
+ q->back_merge_fn = cpq_back_merge_fn;
+ q->front_merge_fn = cpq_front_merge_fn;
+ q->merge_requests_fn = cpq_merge_requests_fn;
+
ida_gendisk[i].major = MAJOR_NR + i;
ida_gendisk[i].major_name = "ida";
ida_gendisk[i].minor_shift = NWD_SHIFT;
@@ -872,37 +896,30 @@ static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
* are in here (either via the dummy do_ida_request functions or by being
* called from the interrupt handler
*/
-static void do_ida_request(int ctlr)
+static void do_ida_request(request_queue_t *q)
{
- ctlr_info_t *h = hba[ctlr];
+ ctlr_info_t *h = q->queuedata;
cmdlist_t *c;
int seg, sect;
char *lastdataend;
- struct list_head * queue_head;
+ struct list_head * queue_head = &q->queue_head;
struct buffer_head *bh;
struct request *creq;
- queue_head = &blk_dev[MAJOR_NR+ctlr].request_queue.queue_head;
-
- if (list_empty(queue_head))
- {
+ if (q->plugged || list_empty(queue_head)) {
start_io(h);
return;
}
creq = blkdev_entry_next_request(queue_head);
- if (creq->rq_status == RQ_INACTIVE)
- {
- start_io(h);
- return;
- }
-
+ if (creq->nr_segments > SG_MAX)
+ BUG();
- if (ctlr != MAJOR(creq->rq_dev)-MAJOR_NR ||
- ctlr > nr_ctlr || h == NULL)
+ if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR || h->ctlr > nr_ctlr)
{
printk(KERN_WARNING "doreq cmd for %d, %x at %p\n",
- ctlr, creq->rq_dev, creq);
+ h->ctlr, creq->rq_dev, creq);
+ blkdev_dequeue_request(creq);
complete_buffers(creq->bh, 0);
start_io(h);
return;
@@ -916,12 +933,12 @@ static void do_ida_request(int ctlr)
bh = creq->bh;
- c->ctlr = ctlr;
+ c->ctlr = h->ctlr;
c->hdr.unit = MINOR(creq->rq_dev) >> NWD_SHIFT;
c->hdr.size = sizeof(rblk_t) >> 2;
c->size += sizeof(rblk_t);
- c->req.hdr.blk = ida[(ctlr<<CTLR_SHIFT) + MINOR(creq->rq_dev)].start_sect + creq->sector;
+ c->req.hdr.blk = ida[(h->ctlr<<CTLR_SHIFT) + MINOR(creq->rq_dev)].start_sect + creq->sector;
c->bh = bh;
DBGPX(
if (bh == NULL)
@@ -933,21 +950,16 @@ DBGPX(
sect = 0;
while(bh) {
sect += bh->b_size/512;
-DBGPX(
- if (bh->b_size % 512) {
- printk("Oh damn. %d+%d, size = %d\n", creq->sector, sect, bh->b_size);
- panic("b_size %% 512 != 0");
- }
-);
if (bh->b_data == lastdataend) {
c->req.sg[seg-1].size += bh->b_size;
lastdataend += bh->b_size;
} else {
+ if (seg == SG_MAX)
+ BUG();
c->req.sg[seg].size = bh->b_size;
c->req.sg[seg].addr = (__u32)virt_to_bus(bh->b_data);
lastdataend = bh->b_data + bh->b_size;
- if (++seg == SG_MAX)
- break;
+ seg++;
}
bh = bh->b_reqnext;
}
@@ -955,30 +967,24 @@ DBGPX( printk("Submitting %d sectors in %d segments\n", sect, seg); );
c->req.hdr.sg_cnt = seg;
c->req.hdr.blk_cnt = sect;
- creq->sector += sect;
- creq->nr_sectors -= sect;
-
- /* Ready the next request:
- * Fix up creq if we still have more buffers in the buffer chain, or
- * mark this request as done and ready the next one.
+ /*
+ * Since we control our own merging, we know that this request
+ * is now fully setup and there's nothing left.
*/
- if (creq->nr_sectors) {
-DBGPX(
- if (bh==NULL) {
- printk("sector=%d, nr_sectors=%d, sect=%d, seg=%d\n",
- creq->sector, creq->nr_sectors, sect, seg);
- panic("mother...");
- }
-);
- creq->bh = bh->b_reqnext;
- bh->b_reqnext = NULL;
-DBGPX( printk("More to do on same request %p\n", creq); );
- } else {
-DBGPX( printk("Done with %p\n", creq); );
- blkdev_dequeue_request(creq);
- end_that_request_last(creq);
+ if (creq->nr_sectors != sect) {
+ printk("ida: %ld != %d sectors\n", creq->nr_sectors, sect);
+ BUG();
}
+ blkdev_dequeue_request(creq);
+
+ /*
+ * ehh, we can't really end the request here since it's not
+ * even started yet. for now it shouldn't hurt though
+ */
+DBGPX( printk("Done with %p\n", creq); );
+ end_that_request_last(creq);
+
c->req.hdr.cmd = (creq->cmd == READ) ? IDA_READ : IDA_WRITE;
c->type = CMD_RWREQ;
@@ -1025,6 +1031,7 @@ static inline void complete_buffers(struct buffer_head *bh, int ok)
xbh = bh->b_reqnext;
bh->b_reqnext = NULL;
+ blk_finished_io(bh->b_size >> 9);
bh->b_end_io(bh, ok);
bh = xbh;
@@ -1072,7 +1079,6 @@ static void do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
unsigned long flags;
__u32 a,a1;
-
istat = h->access.intr_pending(h);
/* Is this interrupt for us? */
if (istat == 0)
@@ -1116,7 +1122,7 @@ static void do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
/*
* See if we can queue up some more IO
*/
- do_ida_request(h->ctlr);
+ do_ida_request(BLK_DEFAULT_QUEUE(MAJOR_NR + h->ctlr));
spin_unlock_irqrestore(&io_request_lock, flags);
}
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
index 1200773c2..9917ad055 100644
--- a/drivers/block/elevator.c
+++ b/drivers/block/elevator.c
@@ -24,125 +24,115 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/blk.h>
+#include <linux/module.h>
#include <asm/uaccess.h>
-/*
- * Order ascending, but only allow a request to be skipped a certain
- * number of times
- */
-void elevator_linus(struct request *req, elevator_t *elevator,
- struct list_head *real_head,
- struct list_head *head, int orig_latency)
-{
- struct list_head *entry = real_head;
- struct request *tmp;
-
- req->elevator_sequence = orig_latency;
-
- while ((entry = entry->prev) != head) {
- tmp = blkdev_entry_to_request(entry);
- if (IN_ORDER(tmp, req))
- break;
- if (!tmp->elevator_sequence)
- break;
- tmp->elevator_sequence--;
- }
- list_add(&req->queue, entry);
-}
-
int elevator_linus_merge(request_queue_t *q, struct request **req,
+ struct list_head * head,
struct buffer_head *bh, int rw,
- int *max_sectors, int *max_segments)
+ int max_sectors, int max_segments)
{
- struct list_head *entry, *head = &q->queue_head;
+ struct list_head *entry = &q->queue_head;
unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
- entry = head;
- if (q->head_active && !q->plugged)
- head = head->next;
-
while ((entry = entry->prev) != head) {
- struct request *__rq = *req = blkdev_entry_to_request(entry);
+ struct request *__rq = blkdev_entry_to_request(entry);
+
+ /*
+ * simply "aging" of requests in queue
+ */
+ if (__rq->elevator_sequence-- <= 0) {
+ *req = __rq;
+ break;
+ }
+
if (__rq->sem)
continue;
if (__rq->cmd != rw)
continue;
- if (__rq->nr_sectors + count > *max_sectors)
- continue;
if (__rq->rq_dev != bh->b_rdev)
continue;
+ if (__rq->nr_sectors + count > max_sectors)
+ continue;
+ if (__rq->elevator_sequence < count)
+ break;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
ret = ELEVATOR_BACK_MERGE;
+ *req = __rq;
break;
- }
- if (!__rq->elevator_sequence)
- break;
- if (__rq->sector - count == bh->b_rsector) {
- __rq->elevator_sequence--;
+ } else if (__rq->sector - count == bh->b_rsector) {
ret = ELEVATOR_FRONT_MERGE;
+ __rq->elevator_sequence -= count;
+ *req = __rq;
break;
- }
+ } else if (!*req && BHRQ_IN_ORDER(bh, __rq))
+ *req = __rq;
}
+ return ret;
+}
+
+void elevator_linus_merge_cleanup(request_queue_t *q, struct request *req, int count)
+{
+ struct list_head *entry = &req->queue, *head = &q->queue_head;
+
/*
* second pass scan of requests that got passed over, if any
*/
- if (ret != ELEVATOR_NO_MERGE && *req) {
- while ((entry = entry->next) != &q->queue_head) {
- struct request *tmp = blkdev_entry_to_request(entry);
- tmp->elevator_sequence--;
- }
+ while ((entry = entry->next) != head) {
+ struct request *tmp = blkdev_entry_to_request(entry);
+ tmp->elevator_sequence -= count;
}
-
- return ret;
}
-/*
- * No request sorting, just add it to the back of the list
- */
-void elevator_noop(struct request *req, elevator_t *elevator,
- struct list_head *real_head, struct list_head *head,
- int orig_latency)
+void elevator_linus_merge_req(struct request *req, struct request *next)
{
- list_add_tail(&req->queue, real_head);
+ if (next->elevator_sequence < req->elevator_sequence)
+ req->elevator_sequence = next->elevator_sequence;
}
/*
- * See if we can find a request that is buffer can be coalesced with.
+ * See if we can find a request that this buffer can be coalesced with.
*/
int elevator_noop_merge(request_queue_t *q, struct request **req,
+ struct list_head * head,
struct buffer_head *bh, int rw,
- int *max_sectors, int *max_segments)
+ int max_sectors, int max_segments)
{
- struct list_head *entry, *head = &q->queue_head;
+ struct list_head *entry;
unsigned int count = bh->b_size >> 9;
- if (q->head_active && !q->plugged)
- head = head->next;
+ if (list_empty(&q->queue_head))
+ return ELEVATOR_NO_MERGE;
- entry = head;
+ entry = &q->queue_head;
while ((entry = entry->prev) != head) {
- struct request *__rq = *req = blkdev_entry_to_request(entry);
- if (__rq->sem)
- continue;
+ struct request *__rq = blkdev_entry_to_request(entry);
+
if (__rq->cmd != rw)
continue;
- if (__rq->nr_sectors + count > *max_sectors)
- continue;
if (__rq->rq_dev != bh->b_rdev)
continue;
- if (__rq->sector + __rq->nr_sectors == bh->b_rsector)
+ if (__rq->nr_sectors + count > max_sectors)
+ continue;
+ if (__rq->sem)
+ continue;
+ if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
+ *req = __rq;
return ELEVATOR_BACK_MERGE;
- if (__rq->sector - count == bh->b_rsector)
+ } else if (__rq->sector - count == bh->b_rsector) {
+ *req = __rq;
return ELEVATOR_FRONT_MERGE;
+ }
}
+
+ *req = blkdev_entry_to_request(q->queue_head.prev);
return ELEVATOR_NO_MERGE;
}
-/*
- * The noop "elevator" does not do any accounting
- */
-void elevator_noop_dequeue(struct request *req) {}
+void elevator_noop_merge_cleanup(request_queue_t *q, struct request *req, int count) {}
+
+void elevator_noop_merge_req(struct request *req, struct request *next) {}
int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
{
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index ef71dddc7..836a89f1e 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -19,6 +19,7 @@
#include <linux/config.h>
#include <linux/locks.h>
#include <linux/mm.h>
+#include <linux/swap.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
@@ -38,8 +39,6 @@
extern int mac_floppy_init(void);
#endif
-extern int lvm_init(void);
-
/*
* For the allocated request tables
*/
@@ -118,6 +117,19 @@ int * max_readahead[MAX_BLKDEV];
*/
int * max_sectors[MAX_BLKDEV];
+/*
+ * queued sectors for all devices, used to make sure we don't fill all
+ * of memory with locked buffers
+ */
+atomic_t queued_sectors;
+
+/*
+ * high and low watermark for above
+ */
+static int high_queued_sectors, low_queued_sectors;
+static int batch_requests, queue_nr_requests;
+static DECLARE_WAIT_QUEUE_HEAD(blk_buffers_wait);
+
static inline int get_max_sectors(kdev_t dev)
{
if (!max_sectors[MAJOR(dev)])
@@ -125,7 +137,7 @@ static inline int get_max_sectors(kdev_t dev)
return max_sectors[MAJOR(dev)][MINOR(dev)];
}
-static inline request_queue_t *__blk_get_queue(kdev_t dev)
+inline request_queue_t *__blk_get_queue(kdev_t dev)
{
struct blk_dev_struct *bdev = blk_dev + MAJOR(dev);
@@ -153,17 +165,14 @@ request_queue_t *blk_get_queue(kdev_t dev)
static int __blk_cleanup_queue(struct list_head *head)
{
- struct list_head *entry;
struct request *rq;
int i = 0;
if (list_empty(head))
return 0;
- entry = head->next;
do {
- rq = list_entry(entry, struct request, table);
- entry = entry->next;
+ rq = list_entry(head->next, struct request, table);
list_del(&rq->table);
kmem_cache_free(request_cachep, rq);
i++;
@@ -188,10 +197,12 @@ static int __blk_cleanup_queue(struct list_head *head)
**/
void blk_cleanup_queue(request_queue_t * q)
{
- int count = QUEUE_NR_REQUESTS;
+ int count = queue_nr_requests;
count -= __blk_cleanup_queue(&q->request_freelist[READ]);
count -= __blk_cleanup_queue(&q->request_freelist[WRITE]);
+ count -= __blk_cleanup_queue(&q->pending_freelist[READ]);
+ count -= __blk_cleanup_queue(&q->pending_freelist[WRITE]);
if (count)
printk("blk_cleanup_queue: leaked requests (%d)\n", count);
@@ -290,7 +301,6 @@ static inline int ll_new_segment(request_queue_t *q, struct request *req, int ma
{
if (req->nr_segments < max_segments) {
req->nr_segments++;
- q->elevator.nr_segments++;
return 1;
}
return 0;
@@ -316,18 +326,13 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
struct request *next, int max_segments)
{
int total_segments = req->nr_segments + next->nr_segments;
- int same_segment;
- same_segment = 0;
- if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) {
+ if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
total_segments--;
- same_segment = 1;
- }
if (total_segments > max_segments)
return 0;
- q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments;
return 1;
}
@@ -364,7 +369,7 @@ static inline void __generic_unplug_device(request_queue_t *q)
}
}
-static void generic_unplug_device(void *data)
+void generic_unplug_device(void *data)
{
request_queue_t *q = (request_queue_t *) data;
unsigned long flags;
@@ -379,19 +384,24 @@ static void blk_init_free_list(request_queue_t *q)
struct request *rq;
int i;
+ INIT_LIST_HEAD(&q->request_freelist[READ]);
+ INIT_LIST_HEAD(&q->request_freelist[WRITE]);
+ INIT_LIST_HEAD(&q->pending_freelist[READ]);
+ INIT_LIST_HEAD(&q->pending_freelist[WRITE]);
+ q->pending_free[READ] = q->pending_free[WRITE] = 0;
+
/*
- * Divide requests in half between read and write. This used to
- * be a 2/3 advantage for reads, but now reads can steal from
- * the write free list.
+ * Divide requests in half between read and write
*/
- for (i = 0; i < QUEUE_NR_REQUESTS; i++) {
+ for (i = 0; i < queue_nr_requests; i++) {
rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL);
+ memset(rq, 0, sizeof(struct request));
rq->rq_status = RQ_INACTIVE;
list_add(&rq->table, &q->request_freelist[i & 1]);
}
init_waitqueue_head(&q->wait_for_request);
- spin_lock_init(&q->request_lock);
+ spin_lock_init(&q->queue_lock);
}
static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh);
@@ -426,14 +436,12 @@ static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh);
* blk_queue_headactive().
*
* Note:
- * blk_init_queue() must be paired with a blk_cleanup-queue() call
+ * blk_init_queue() must be paired with a blk_cleanup_queue() call
* when the block device is deactivated (such as at module unload).
**/
void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
{
INIT_LIST_HEAD(&q->queue_head);
- INIT_LIST_HEAD(&q->request_freelist[READ]);
- INIT_LIST_HEAD(&q->request_freelist[WRITE]);
elevator_init(&q->elevator, ELEVATOR_LINUS);
blk_init_free_list(q);
q->request_fn = rfn;
@@ -455,7 +463,6 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
q->head_active = 1;
}
-
#define blkdev_free_rq(list) list_entry((list)->next, struct request, table);
/*
* Get a free request. io_request_lock must be held and interrupts
@@ -463,37 +470,16 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
*/
static inline struct request *get_request(request_queue_t *q, int rw)
{
- struct list_head *list = &q->request_freelist[rw];
- struct request *rq;
-
- /*
- * Reads get preferential treatment and are allowed to steal
- * from the write free list if necessary.
- */
- if (!list_empty(list)) {
- rq = blkdev_free_rq(list);
- goto got_rq;
- }
+ struct request *rq = NULL;
- /*
- * if the WRITE list is non-empty, we know that rw is READ
- * and that the READ list is empty. allow reads to 'steal'
- * from the WRITE list.
- */
- if (!list_empty(&q->request_freelist[WRITE])) {
- list = &q->request_freelist[WRITE];
- rq = blkdev_free_rq(list);
- goto got_rq;
+ if (!list_empty(&q->request_freelist[rw])) {
+ rq = blkdev_free_rq(&q->request_freelist[rw]);
+ list_del(&rq->table);
+ rq->rq_status = RQ_ACTIVE;
+ rq->special = NULL;
+ rq->q = q;
}
- return NULL;
-
-got_rq:
- list_del(&rq->table);
- rq->free_list = list;
- rq->rq_status = RQ_ACTIVE;
- rq->special = NULL;
- rq->q = q;
return rq;
}
@@ -581,40 +567,42 @@ inline void drive_stat_acct (kdev_t dev, int rw,
/*
* add-request adds a request to the linked list.
- * It disables interrupts (acquires the request spinlock) so that it can muck
- * with the request-lists in peace. Thus it should be called with no spinlocks
- * held.
+ * io_request_lock is held and interrupts disabled, as we muck with the
+ * request queue list.
*
* By this point, req->cmd is always either READ/WRITE, never READA,
* which is important for drive_stat_acct() above.
*/
-
static inline void add_request(request_queue_t * q, struct request * req,
- struct list_head *head, int lat)
+ struct list_head *insert_here)
{
int major;
drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
+ if (!q->plugged && q->head_active && insert_here == &q->queue_head) {
+ spin_unlock_irq(&io_request_lock);
+ BUG();
+ }
+
/*
- * let selected elevator insert the request
+ * elevator indicated where it wants this request to be
+ * inserted at elevator_merge time
*/
- q->elevator.elevator_fn(req, &q->elevator, &q->queue_head, head, lat);
-
- /*
- * FIXME(eric) I don't understand why there is a need for this
- * special case code. It clearly doesn't fit any more with
- * the new queueing architecture, and it got added in 2.3.10.
- * I am leaving this in here until I hear back from the COMPAQ
- * people.
- */
+ list_add(&req->queue, insert_here);
+
major = MAJOR(req->rq_dev);
- if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)
- (q->request_fn)(q);
- if (major >= COMPAQ_CISS_MAJOR+0 && major <= COMPAQ_CISS_MAJOR+7)
- (q->request_fn)(q);
if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7)
- (q->request_fn)(q);
+ q->request_fn(q);
+}
+
+void inline blk_refill_freelist(request_queue_t *q, int rw)
+{
+ if (q->pending_free[rw]) {
+ list_splice(&q->pending_freelist[rw], &q->request_freelist[rw]);
+ INIT_LIST_HEAD(&q->pending_freelist[rw]);
+ q->pending_free[rw] = 0;
+ }
}
/*
@@ -622,15 +610,34 @@ static inline void add_request(request_queue_t * q, struct request * req,
*/
void inline blkdev_release_request(struct request *req)
{
+ request_queue_t *q = req->q;
+ int rw = req->cmd;
+
req->rq_status = RQ_INACTIVE;
+ req->q = NULL;
/*
- * Request may not have originated from ll_rw_blk
+ * Request may not have originated from ll_rw_blk. if not,
+ * asumme it has free buffers and check waiters
*/
- if (req->free_list) {
- list_add(&req->table, req->free_list);
- req->free_list = NULL;
- wake_up(&req->q->wait_for_request);
+ if (q) {
+ /*
+ * we've released enough buffers to start I/O again
+ */
+ if (waitqueue_active(&blk_buffers_wait)
+ && atomic_read(&queued_sectors) < low_queued_sectors)
+ wake_up(&blk_buffers_wait);
+
+ /*
+ * Add to pending free list and batch wakeups
+ */
+ list_add(&req->table, &q->pending_freelist[rw]);
+
+ if (++q->pending_free[rw] >= batch_requests) {
+ int wake_up = q->pending_free[rw];
+ blk_refill_freelist(q, rw);
+ wake_up_nr(&q->wait_for_request, wake_up);
+ }
}
}
@@ -658,9 +665,10 @@ static void attempt_merge(request_queue_t * q,
* will have been updated to the appropriate number,
* and we shouldn't do it here too.
*/
- if(!(q->merge_requests_fn)(q, req, next, max_segments))
+ if (!q->merge_requests_fn(q, req, next, max_segments))
return;
+ q->elevator.elevator_merge_req_fn(req, next);
req->bhtail->b_reqnext = next->bh;
req->bhtail = next->bhtail;
req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
@@ -699,7 +707,7 @@ static int __make_request(request_queue_t * q, int rw,
int max_segments = MAX_SEGMENTS;
struct request * req = NULL, *freereq = NULL;
int rw_ahead, max_sectors, el_ret;
- struct list_head *head;
+ struct list_head *head, *insert_here;
int latency;
elevator_t *elevator = &q->elevator;
@@ -713,6 +721,7 @@ static int __make_request(request_queue_t * q, int rw,
rw = READ; /* drop into READ */
case READ:
case WRITE:
+ latency = elevator_request_latency(elevator, rw);
break;
default:
BUG();
@@ -741,38 +750,33 @@ static int __make_request(request_queue_t * q, int rw,
*/
max_sectors = get_max_sectors(bh->b_rdev);
- latency = elevator_request_latency(elevator, rw);
-
+again:
+ head = &q->queue_head;
/*
* Now we acquire the request spinlock, we have to be mega careful
* not to schedule or do something nonatomic
*/
-again:
spin_lock_irq(&io_request_lock);
- /*
- * skip first entry, for devices with active queue head
- */
- head = &q->queue_head;
- if (q->head_active && !q->plugged)
- head = head->next;
-
+ insert_here = head->prev;
if (list_empty(head)) {
q->plug_device_fn(q, bh->b_rdev); /* is atomic */
goto get_rq;
- }
+ } else if (q->head_active && !q->plugged)
+ head = head->next;
- el_ret = elevator->elevator_merge_fn(q, &req, bh, rw,
- &max_sectors, &max_segments);
+ el_ret = elevator->elevator_merge_fn(q, &req, head, bh, rw,
+ max_sectors, max_segments);
switch (el_ret) {
case ELEVATOR_BACK_MERGE:
if (!q->back_merge_fn(q, req, bh, max_segments))
break;
+ elevator->elevator_merge_cleanup_fn(q, req, count);
req->bhtail->b_reqnext = bh;
req->bhtail = bh;
req->nr_sectors = req->hard_nr_sectors += count;
- req->e = elevator;
+ blk_started_io(count);
drive_stat_acct(req->rq_dev, req->cmd, count, 0);
attempt_back_merge(q, req, max_sectors, max_segments);
goto out;
@@ -780,20 +784,29 @@ again:
case ELEVATOR_FRONT_MERGE:
if (!q->front_merge_fn(q, req, bh, max_segments))
break;
+ elevator->elevator_merge_cleanup_fn(q, req, count);
bh->b_reqnext = req->bh;
req->bh = bh;
req->buffer = bh->b_data;
req->current_nr_sectors = count;
req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += count;
- req->e = elevator;
+ blk_started_io(count);
drive_stat_acct(req->rq_dev, req->cmd, count, 0);
attempt_front_merge(q, head, req, max_sectors, max_segments);
goto out;
+
/*
* elevator says don't/can't merge. get new request
*/
case ELEVATOR_NO_MERGE:
+ /*
+ * use elevator hints as to where to insert the
+ * request. if no hints, just add it to the back
+ * of the queue
+ */
+ if (req)
+ insert_here = &req->queue;
break;
default:
@@ -802,10 +815,9 @@ again:
}
/*
- * Grab a free request from the freelist. Read first try their
- * own queue - if that is empty, we steal from the write list.
- * Writes must block if the write list is empty, and read aheads
- * are not crucial.
+ * Grab a free request from the freelist - if that is empty, check
+ * if we are doing read ahead and abort instead of blocking for
+ * a free slot.
*/
get_rq:
if (freereq) {
@@ -821,6 +833,7 @@ get_rq:
}
/* fill up the request-info, and add it to the queue */
+ req->elevator_sequence = latency;
req->cmd = rw;
req->errors = 0;
req->hard_sector = req->sector = sector;
@@ -833,13 +846,13 @@ get_rq:
req->bh = bh;
req->bhtail = bh;
req->rq_dev = bh->b_rdev;
- req->e = elevator;
- add_request(q, req, head, latency);
+ blk_started_io(count);
+ add_request(q, req, insert_here);
out:
- if (!q->plugged)
- (q->request_fn)(q);
if (freereq)
blkdev_release_request(freereq);
+ if (!q->plugged)
+ q->request_fn(q);
spin_unlock_irq(&io_request_lock);
return 0;
end_io:
@@ -886,13 +899,13 @@ void generic_make_request (int rw, struct buffer_head * bh)
int major = MAJOR(bh->b_rdev);
request_queue_t *q;
- if (!bh->b_end_io) BUG();
+ if (!bh->b_end_io)
+ BUG();
+
if (blk_size[major]) {
unsigned long maxsector = (blk_size[major][MINOR(bh->b_rdev)] << 1) + 1;
- unsigned int sector, count;
-
- count = bh->b_size >> 9;
- sector = bh->b_rsector;
+ unsigned long sector = bh->b_rsector;
+ unsigned int count = bh->b_size >> 9;
if (maxsector < count || maxsector - count < sector) {
bh->b_state &= (1 << BH_Lock) | (1 << BH_Mapped);
@@ -903,7 +916,7 @@ void generic_make_request (int rw, struct buffer_head * bh)
when mounting a device. */
printk(KERN_INFO
"attempt to access beyond end of device\n");
- printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n",
+ printk(KERN_INFO "%s: rw=%d, want=%ld, limit=%d\n",
kdevname(bh->b_rdev), rw,
(sector + count)>>1,
blk_size[major][MINOR(bh->b_rdev)]);
@@ -930,15 +943,13 @@ void generic_make_request (int rw, struct buffer_head * bh)
buffer_IO_error(bh);
break;
}
-
- }
- while (q->make_request_fn(q, rw, bh));
+ } while (q->make_request_fn(q, rw, bh));
}
/**
* submit_bh: submit a buffer_head to the block device later for I/O
- * @rw: whether to %READ or %WRITE, or mayve to %READA (read ahead)
+ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
* @bh: The &struct buffer_head which describes the I/O
*
* submit_bh() is very similar in purpose to generic_make_request(), and
@@ -961,7 +972,7 @@ void submit_bh(int rw, struct buffer_head * bh)
* further remap this.
*/
bh->b_rdev = bh->b_dev;
- bh->b_rsector = bh->b_blocknr * (bh->b_size>>9);
+ bh->b_rsector = bh->b_blocknr * (bh->b_size >> 9);
generic_make_request(rw, bh);
@@ -1021,6 +1032,9 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
int correct_size;
int i;
+ if (!nr)
+ return;
+
major = MAJOR(bhs[0]->b_dev);
/* Determine correct block size for this device. */
@@ -1033,9 +1047,8 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
/* Verify requested block sizes. */
for (i = 0; i < nr; i++) {
- struct buffer_head *bh;
- bh = bhs[i];
- if (bh->b_size != correct_size) {
+ struct buffer_head *bh = bhs[i];
+ if (bh->b_size % correct_size) {
printk(KERN_NOTICE "ll_rw_block: device %s: "
"only %d-char blocks implemented (%u)\n",
kdevname(bhs[0]->b_dev),
@@ -1051,8 +1064,17 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
}
for (i = 0; i < nr; i++) {
- struct buffer_head *bh;
- bh = bhs[i];
+ struct buffer_head *bh = bhs[i];
+
+ /*
+ * don't lock any more buffers if we are above the high
+ * water mark. instead start I/O on the queued stuff.
+ */
+ if (atomic_read(&queued_sectors) >= high_queued_sectors) {
+ run_task_queue(&tq_disk);
+ wait_event(blk_buffers_wait,
+ atomic_read(&queued_sectors) < low_queued_sectors);
+ }
/* Only one thread can actually submit the I/O. */
if (test_and_set_bit(BH_Lock, &bh->b_state))
@@ -1096,12 +1118,25 @@ sorry:
extern int stram_device_init (void);
#endif
-/*
- * First step of what used to be end_request
+
+/**
+ * end_that_request_first - end I/O on one buffer.
+ * @req: the request being processed
+ * @uptodate: 0 for I/O error
+ * @name: the name printed for an I/O error
*
- * 0 means continue with end_that_request_last,
- * 1 means we are done
- */
+ * Description:
+ * Ends I/O on the first buffer attached to @req, and sets it up
+ * for the next buffer_head (if any) in the cluster.
+ *
+ * Return:
+ * 0 - we are done with this request, call end_that_request_last()
+ * 1 - still buffers pending for this request
+ *
+ * Caveat:
+ * Drivers implementing their own end_request handling must call
+ * blk_finished_io() appropriately.
+ **/
int end_that_request_first (struct request *req, int uptodate, char *name)
{
@@ -1115,6 +1150,7 @@ int end_that_request_first (struct request *req, int uptodate, char *name)
if ((bh = req->bh) != NULL) {
nsect = bh->b_size >> 9;
+ blk_finished_io(nsect);
req->bh = bh->b_reqnext;
bh->b_reqnext = NULL;
bh->b_end_io(bh, uptodate);
@@ -1138,19 +1174,18 @@ int end_that_request_first (struct request *req, int uptodate, char *name)
void end_that_request_last(struct request *req)
{
- if (req->e) {
- printk("end_that_request_last called with non-dequeued req\n");
- BUG();
- }
if (req->sem != NULL)
up(req->sem);
blkdev_release_request(req);
}
+#define MB(kb) ((kb) << 10)
+
int __init blk_dev_init(void)
{
struct blk_dev_struct *dev;
+ int total_ram;
request_cachep = kmem_cache_create("blkdev_requests",
sizeof(struct request),
@@ -1165,6 +1200,44 @@ int __init blk_dev_init(void)
memset(ro_bits,0,sizeof(ro_bits));
memset(max_readahead, 0, sizeof(max_readahead));
memset(max_sectors, 0, sizeof(max_sectors));
+
+ atomic_set(&queued_sectors, 0);
+ total_ram = nr_free_pages() << (PAGE_SHIFT - 10);
+
+ /*
+ * Try to keep 128MB max hysteris. If not possible,
+ * use half of RAM
+ */
+ high_queued_sectors = (total_ram * 2) / 3;
+ low_queued_sectors = high_queued_sectors / 3;
+ if (high_queued_sectors - low_queued_sectors > MB(128))
+ low_queued_sectors = high_queued_sectors - MB(128);
+
+
+ /*
+ * make it sectors (512b)
+ */
+ high_queued_sectors <<= 1;
+ low_queued_sectors <<= 1;
+
+ /*
+ * Scale free request slots per queue too
+ */
+ total_ram = (total_ram + MB(32) - 1) & ~(MB(32) - 1);
+ if ((queue_nr_requests = total_ram >> 9) > QUEUE_NR_REQUESTS)
+ queue_nr_requests = QUEUE_NR_REQUESTS;
+
+ /*
+ * adjust batch frees according to queue length, with upper limit
+ */
+ if ((batch_requests = queue_nr_requests >> 3) > 32)
+ batch_requests = 32;
+
+ printk("block: queued sectors max/low %dkB/%dkB, %d slots per queue\n",
+ high_queued_sectors / 2,
+ low_queued_sectors / 2,
+ queue_nr_requests);
+
#ifdef CONFIG_AMIGA_Z2RAM
z2_init();
#endif
@@ -1268,9 +1341,6 @@ int __init blk_dev_init(void)
#ifdef CONFIG_SUN_JSFLASH
jsfd_init();
#endif
-#ifdef CONFIG_BLK_DEV_LVM
- lvm_init();
-#endif
return 0;
};
@@ -1279,9 +1349,12 @@ EXPORT_SYMBOL(end_that_request_first);
EXPORT_SYMBOL(end_that_request_last);
EXPORT_SYMBOL(blk_init_queue);
EXPORT_SYMBOL(blk_get_queue);
+EXPORT_SYMBOL(__blk_get_queue);
EXPORT_SYMBOL(blk_cleanup_queue);
EXPORT_SYMBOL(blk_queue_headactive);
EXPORT_SYMBOL(blk_queue_pluggable);
EXPORT_SYMBOL(blk_queue_make_request);
EXPORT_SYMBOL(generic_make_request);
EXPORT_SYMBOL(blkdev_release_request);
+EXPORT_SYMBOL(generic_unplug_device);
+EXPORT_SYMBOL(queued_sectors);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 9b1ff3a32..172acadbe 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -392,7 +392,6 @@ static inline int pd_new_segment(request_queue_t *q, struct request *req, int ma
if (req->nr_segments < max_segments) {
req->nr_segments++;
- q->elevator.nr_segments++;
return 1;
}
return 0;
@@ -432,7 +431,6 @@ static int pd_merge_requests_fn(request_queue_t *q, struct request *req,
if (total_segments > max_segments)
return 0;
- q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments;
return 1;
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 53f6ca3df..b71703f9b 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -346,7 +346,6 @@ static inline int pf_new_segment(request_queue_t *q, struct request *req, int ma
if (req->nr_segments < max_segments) {
req->nr_segments++;
- q->elevator.nr_segments++;
return 1;
}
return 0;
@@ -386,7 +385,6 @@ static int pf_merge_requests_fn(request_queue_t *q, struct request *req,
if (total_segments > max_segments)
return 0;
- q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments;
return 1;
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 1e555da55..cafe72afd 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -295,11 +295,11 @@ MODULE_PARM(check_media_type, "i");
/* These are used to simplify getting data in from and back to user land */
#define IOCTL_IN(arg, type, in) \
- if (copy_from_user(&in, (type *) arg, sizeof in)) \
+ if (copy_from_user(&(in), (type *) (arg), sizeof (in))) \
return -EFAULT;
#define IOCTL_OUT(arg, type, out) \
- if (copy_to_user((type *) arg, &out, sizeof out)) \
+ if (copy_to_user((type *) (arg), &(out), sizeof (out))) \
return -EFAULT;
/* The (cdo->capability & ~cdi->mask & CDC_XXX) construct was used in
diff --git a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c
index 942142832..ccd53dacc 100644
--- a/drivers/char/agp/agpgart_be.c
+++ b/drivers/char/agp/agpgart_be.c
@@ -1384,9 +1384,11 @@ static void via_cleanup(void)
aper_size_info_8 *previous_size;
previous_size = A_SIZE_8(agp_bridge.previous_size);
- pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0);
pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
previous_size->size_value);
+ /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
+ * during reinitialization.
+ */
}
static void via_tlbflush(agp_memory * mem)
@@ -2373,9 +2375,10 @@ static int __init agp_find_supported_device(void)
if (i810_dev == NULL) {
printk(KERN_ERR PFX "agpgart: Detected an "
"Intel i815, but could not find the"
- " secondary device.\n");
- agp_bridge.type = NOT_SUPPORTED;
- return -ENODEV;
+ " secondary device.\n"
+ "Assuming user has added an external AGP"
+ " card\n");
+ break;
}
printk(KERN_INFO PFX "agpgart: Detected an Intel i815 "
"Chipset.\n");
diff --git a/drivers/char/drm/Config.in b/drivers/char/drm/Config.in
index c69d37db5..6d7649eb0 100644
--- a/drivers/char/drm/Config.in
+++ b/drivers/char/drm/Config.in
@@ -9,7 +9,8 @@ bool 'Direct Rendering Manager (XFree86 DRI support)' CONFIG_DRM
if [ "$CONFIG_DRM" != "n" ]; then
tristate ' 3dfx Banshee/Voodoo3+' CONFIG_DRM_TDFX
tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA
- tristate ' ATI Rage 128' CONFIG_DRM_R128
+ dep_tristate ' ATI Rage 128' CONFIG_DRM_R128 $CONFIG_AGP
+ dep_tristate ' ATI Radeon' CONFIG_DRM_RADEON $CONFIG_AGP
dep_tristate ' Intel I810' CONFIG_DRM_I810 $CONFIG_AGP
dep_tristate ' Matrox g200/g400' CONFIG_DRM_MGA $CONFIG_AGP
fi
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index d25e33d47..41017340b 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -42,16 +42,18 @@ else
endif
endif
-gamma-objs := gamma_drv.o gamma_dma.o
-tdfx-objs := tdfx_drv.o tdfx_context.o
-r128-objs := r128_drv.o r128_cce.o r128_context.o r128_bufs.o r128_state.o
-ffb-objs := ffb_drv.o ffb_context.o
-mga-objs := mga_drv.o mga_dma.o mga_context.o mga_bufs.o mga_state.o
-i810-objs := i810_drv.o i810_dma.o i810_context.o i810_bufs.o
+gamma-objs := gamma_drv.o gamma_dma.o
+tdfx-objs := tdfx_drv.o tdfx_context.o
+r128-objs := r128_drv.o r128_cce.o r128_context.o r128_bufs.o r128_state.o
+ffb-objs := ffb_drv.o ffb_context.o
+mga-objs := mga_drv.o mga_dma.o mga_context.o mga_bufs.o mga_state.o
+i810-objs := i810_drv.o i810_dma.o i810_context.o i810_bufs.o
+radeon-objs := radeon_drv.o radeon_cp.o radeon_context.o radeon_bufs.o radeon_state.o
obj-$(CONFIG_DRM_GAMMA) += gamma.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o
obj-$(CONFIG_DRM_R128) += r128.o
+obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_FFB) += ffb.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
@@ -96,5 +98,8 @@ i810.o: $(i810-objs) $(lib)
r128.o: $(r128-objs) $(lib)
$(LD) -r -o $@ $(r128-objs) $(lib)
+radeon.o: $(radeon-objs) $(lib)
+ $(LD) -r -o $@ $(radeon-objs) $(lib)
+
ffb.o: $(ffb-objs) $(lib)
$(LD) -r -o $@ $(ffb-objs) $(lib)
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index aeecca77b..101556540 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -83,6 +83,7 @@ typedef struct drm_clip_rect {
#include "mga_drm.h"
#include "i810_drm.h"
#include "r128_drm.h"
+#include "radeon_drm.h"
#ifdef CONFIG_DRM_SIS
#include "sis_drm.h"
#endif
@@ -298,100 +299,116 @@ typedef struct drm_agp_info {
unsigned short id_device;
} drm_agp_info_t;
-#define DRM_IOCTL_BASE 'd'
-#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
-#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
-#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
-#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
-
-
-#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
-#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
-#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
-
-#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
-#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
-#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
-#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
-#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
-#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
-#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
-#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
-#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
-#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
-#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
-
-#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
-#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
-#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
-#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
-#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
-#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
-#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
-#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
-#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
-#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
-#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
-#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
-#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
-
-#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
-#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
-#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
-#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
+#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
+#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
+
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
/* Mga specific ioctls */
-#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
-#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t)
-#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t)
-#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t)
-#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t)
-#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t )
-#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
-#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x47, drm_mga_blit_t)
+#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
+#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t)
+#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t)
+#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t)
+#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t)
+#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t )
+#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
+#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x47, drm_mga_blit_t)
/* I810 specific ioctls */
-#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
-#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
-#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
-#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43)
-#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
-#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t)
-#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46)
-#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
-#define DRM_IOCTL_I810_DOCOPY DRM_IO ( 0x48)
+#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
+#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
+#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
+#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43)
+#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44)
+#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t)
+#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
+#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
+#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
/* Rage 128 specific ioctls */
-#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
-#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41)
-#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t)
-#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43)
-#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44)
-#define DRM_IOCTL_R128_RESET DRM_IO( 0x46)
-#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47)
-#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t)
-#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t)
-#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t)
-#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t)
-#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t)
-#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t)
-#define DRM_IOCTL_R128_PACKET DRM_IOWR(0x4e, drm_r128_packet_t)
+#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
+#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41)
+#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t)
+#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43)
+#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44)
+#define DRM_IOCTL_R128_RESET DRM_IO( 0x46)
+#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47)
+#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t)
+#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t)
+#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t)
+#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t)
+#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t)
+#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t)
+#define DRM_IOCTL_R128_PACKET DRM_IOWR(0x4e, drm_r128_packet_t)
+
+/* Radeon specific ioctls */
+#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t)
+#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41)
+#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t)
+#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43)
+#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44)
+#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45)
+#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t)
+#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47)
+#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t)
+#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t)
+#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t)
+#define DRM_IOCTL_RADEON_BLIT DRM_IOW( 0x4b, drm_radeon_blit_t)
+#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
+#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#ifdef CONFIG_DRM_SIS
/* SiS specific ioctls */
-#define SIS_IOCTL_FB_ALLOC DRM_IOWR( 0x44, drm_sis_mem_t)
-#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
-#define SIS_IOCTL_AGP_INIT DRM_IOWR( 0x53, drm_sis_agp_t)
-#define SIS_IOCTL_AGP_ALLOC DRM_IOWR( 0x54, drm_sis_mem_t)
-#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t)
-#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
-#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
-#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
+#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
+#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
+#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t)
+#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t)
+#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t)
+#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
+#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
+#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
#endif
#endif
diff --git a/drivers/char/drm/radeon_bufs.c b/drivers/char/drm/radeon_bufs.c
new file mode 100644
index 000000000..9a3093eb1
--- /dev/null
+++ b/drivers/char/drm/radeon_bufs.c
@@ -0,0 +1,298 @@
+/* radeon_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Kevin E. Martin <martin@valinux.com>
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include <linux/config.h>
+#include "drmP.h"
+#include "radeon_drv.h"
+#include "linux/un.h"
+
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+int radeon_addbufs_agp(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+
+ if (!dma) return -EINVAL;
+
+ if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
+ return -EFAULT;
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ byte_count = 0;
+ agp_offset = dev->agp->base + request.agp_start;
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %ld\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ if (dev->queue_count) return -EBUSY; /* Not while in use */
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ down(&dev->struct_sem);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ offset = 0;
+
+ for (offset = 0;
+ entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + offset);
+ buf->address = (void *)(agp_offset + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head(&buf->dma_wait);
+ buf->pid = 0;
+
+ buf->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
+ buf->dev_private = drm_alloc(sizeof(drm_radeon_buf_priv_t),
+ DRM_MEM_BUFS);
+ memset(buf->dev_private, 0, buf->dev_priv_size);
+
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+
+ byte_count += PAGE_SIZE << page_order;
+
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ up(&dev->struct_sem);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request)))
+ return -EFAULT;
+
+ dma->flags = _DRM_DMA_USE_AGP;
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+#endif
+
+int radeon_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_buf_desc_t request;
+
+ if (!dev_priv || dev_priv->is_pci) return -EINVAL;
+
+ if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
+ return -EFAULT;
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ if (request.flags & _DRM_AGP_BUFFER)
+ return radeon_addbufs_agp(inode, filp, cmd, arg);
+ else
+#endif
+ return -EINVAL;
+}
+
+int radeon_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ unsigned long virtual;
+ unsigned long address;
+ drm_buf_map_t request;
+ int i;
+
+ if (!dma || !dev_priv || dev_priv->is_pci) return -EINVAL;
+
+ DRM_DEBUG("\n");
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ if (copy_from_user(&request, (drm_buf_map_t *)arg, sizeof(request)))
+ return -EFAULT;
+
+ if (request.count >= dma->buf_count) {
+ if (dma->flags & _DRM_DMA_USE_AGP) {
+ drm_map_t *map;
+
+ map = dev_priv->buffers;
+ if (!map) {
+ retcode = -EINVAL;
+ goto done;
+ }
+
+ down(&current->mm->mmap_sem);
+ virtual = do_mmap(filp, 0, map->size,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ (unsigned long)map->offset);
+ up(&current->mm->mmap_sem);
+ } else {
+ down(&current->mm->mmap_sem);
+ virtual = do_mmap(filp, 0, dma->byte_count,
+ PROT_READ|PROT_WRITE, MAP_SHARED, 0);
+ up(&current->mm->mmap_sem);
+ }
+ if (virtual > -1024UL) {
+ /* Real error */
+ retcode = (signed long)virtual;
+ goto done;
+ }
+ request.virtual = (void *)virtual;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ if (copy_to_user(&request.list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof(request.list[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].total,
+ &dma->buflist[i]->total,
+ sizeof(request.list[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].used,
+ &zero,
+ sizeof(zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset;
+ if (copy_to_user(&request.list[i].address,
+ &address,
+ sizeof(address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ done:
+ request.count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
+
+ if (copy_to_user((drm_buf_map_t *)arg, &request, sizeof(request)))
+ return -EFAULT;
+
+ return retcode;
+}
diff --git a/drivers/char/drm/radeon_context.c b/drivers/char/drm/radeon_context.c
new file mode 100644
index 000000000..e428dc22c
--- /dev/null
+++ b/drivers/char/drm/radeon_context.c
@@ -0,0 +1,215 @@
+/* radeon_context.c -- IOCTLs for Radeon contexts -*- linux-c -*-
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Kevin E. Martin <martin@valinux.com>
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "radeon_drv.h"
+
+extern drm_ctx_t radeon_res_ctx;
+
+static int radeon_alloc_queue(drm_device_t *dev)
+{
+ return drm_ctxbitmap_next(dev);
+}
+
+int radeon_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ radeon_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ return 0;
+}
+
+int radeon_context_switch_complete(drm_device_t *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
+ - dev->ctx_start)]);
+
+#endif
+ clear_bit(0, &dev->context_flag);
+ wake_up(&dev->context_wait);
+
+ return 0;
+}
+
+
+int radeon_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
+ if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
+ return -EFAULT;
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
+ return -EFAULT;
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+ if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
+ return -EFAULT;
+ return 0;
+}
+
+
+int radeon_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ if ((ctx.handle = radeon_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = radeon_alloc_queue(dev);
+ }
+ DRM_DEBUG("%d\n", ctx.handle);
+ if (ctx.handle == -1) {
+ DRM_DEBUG("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+ return -ENOMEM;
+ }
+
+ if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
+ return -EFAULT;
+ return 0;
+}
+
+int radeon_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
+ return -EFAULT;
+ if (ctx.flags==_DRM_CONTEXT_PRESERVED)
+ radeon_res_ctx.handle=ctx.handle;
+ return 0;
+}
+
+int radeon_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
+ return -EFAULT;
+ /* This is 0, because we don't hanlde any context flags */
+ ctx.flags = 0;
+ if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
+ return -EFAULT;
+ return 0;
+}
+
+int radeon_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ DRM_DEBUG("%d\n", ctx.handle);
+ return radeon_context_switch(dev, dev->last_context, ctx.handle);
+}
+
+int radeon_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ DRM_DEBUG("%d\n", ctx.handle);
+ radeon_context_switch_complete(dev, ctx.handle);
+
+ return 0;
+}
+
+int radeon_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ DRM_DEBUG("%d\n", ctx.handle);
+ drm_ctxbitmap_free(dev, ctx.handle);
+
+ return 0;
+}
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
new file mode 100644
index 000000000..5d662bc08
--- /dev/null
+++ b/drivers/char/drm/radeon_cp.c
@@ -0,0 +1,1314 @@
+/* radeon_cp.c -- CP support for Radeon -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "radeon_drv.h"
+
+#include <linux/interrupt.h> /* For task queue support */
+#include <linux/delay.h>
+
+#define RADEON_FIFO_DEBUG 0
+
+
+/* CP microcode (from ATI) */
+static u32 radeon_cp_microcode[][2] = {
+ { 0x21007000, 0000000000 },
+ { 0x20007000, 0000000000 },
+ { 0x000000b4, 0x00000004 },
+ { 0x000000b8, 0x00000004 },
+ { 0x6f5b4d4c, 0000000000 },
+ { 0x4c4c427f, 0000000000 },
+ { 0x5b568a92, 0000000000 },
+ { 0x4ca09c6d, 0000000000 },
+ { 0xad4c4c4c, 0000000000 },
+ { 0x4ce1af3d, 0000000000 },
+ { 0xd8afafaf, 0000000000 },
+ { 0xd64c4cdc, 0000000000 },
+ { 0x4cd10d10, 0000000000 },
+ { 0x000f0000, 0x00000016 },
+ { 0x362f242d, 0000000000 },
+ { 0x00000012, 0x00000004 },
+ { 0x000f0000, 0x00000016 },
+ { 0x362f282d, 0000000000 },
+ { 0x000380e7, 0x00000002 },
+ { 0x04002c97, 0x00000002 },
+ { 0x000f0001, 0x00000016 },
+ { 0x333a3730, 0000000000 },
+ { 0x000077ef, 0x00000002 },
+ { 0x00061000, 0x00000002 },
+ { 0x00000021, 0x0000001a },
+ { 0x00004000, 0x0000001e },
+ { 0x00061000, 0x00000002 },
+ { 0x00000021, 0x0000001a },
+ { 0x00004000, 0x0000001e },
+ { 0x00061000, 0x00000002 },
+ { 0x00000021, 0x0000001a },
+ { 0x00004000, 0x0000001e },
+ { 0x00000017, 0x00000004 },
+ { 0x0003802b, 0x00000002 },
+ { 0x040067e0, 0x00000002 },
+ { 0x00000017, 0x00000004 },
+ { 0x000077e0, 0x00000002 },
+ { 0x00065000, 0x00000002 },
+ { 0x000037e1, 0x00000002 },
+ { 0x040067e1, 0x00000006 },
+ { 0x000077e0, 0x00000002 },
+ { 0x000077e1, 0x00000002 },
+ { 0x000077e1, 0x00000006 },
+ { 0xffffffff, 0000000000 },
+ { 0x10000000, 0000000000 },
+ { 0x0003802b, 0x00000002 },
+ { 0x040067e0, 0x00000006 },
+ { 0x00007675, 0x00000002 },
+ { 0x00007676, 0x00000002 },
+ { 0x00007677, 0x00000002 },
+ { 0x00007678, 0x00000006 },
+ { 0x0003802c, 0x00000002 },
+ { 0x04002676, 0x00000002 },
+ { 0x00007677, 0x00000002 },
+ { 0x00007678, 0x00000006 },
+ { 0x0000002f, 0x00000018 },
+ { 0x0000002f, 0x00000018 },
+ { 0000000000, 0x00000006 },
+ { 0x00000030, 0x00000018 },
+ { 0x00000030, 0x00000018 },
+ { 0000000000, 0x00000006 },
+ { 0x01605000, 0x00000002 },
+ { 0x00065000, 0x00000002 },
+ { 0x00098000, 0x00000002 },
+ { 0x00061000, 0x00000002 },
+ { 0x64c0603e, 0x00000004 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x00080000, 0x00000016 },
+ { 0000000000, 0000000000 },
+ { 0x0400251d, 0x00000002 },
+ { 0x00007580, 0x00000002 },
+ { 0x00067581, 0x00000002 },
+ { 0x04002580, 0x00000002 },
+ { 0x00067581, 0x00000002 },
+ { 0x00000049, 0x00000004 },
+ { 0x00005000, 0000000000 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x00061000, 0x00000002 },
+ { 0x0000750e, 0x00000002 },
+ { 0x00019000, 0x00000002 },
+ { 0x00011055, 0x00000014 },
+ { 0x00000055, 0x00000012 },
+ { 0x0400250f, 0x00000002 },
+ { 0x0000504f, 0x00000004 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x00007565, 0x00000002 },
+ { 0x00007566, 0x00000002 },
+ { 0x00000058, 0x00000004 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x01e655b4, 0x00000002 },
+ { 0x4401b0e4, 0x00000002 },
+ { 0x01c110e4, 0x00000002 },
+ { 0x26667066, 0x00000018 },
+ { 0x040c2565, 0x00000002 },
+ { 0x00000066, 0x00000018 },
+ { 0x04002564, 0x00000002 },
+ { 0x00007566, 0x00000002 },
+ { 0x0000005d, 0x00000004 },
+ { 0x00401069, 0x00000008 },
+ { 0x00101000, 0x00000002 },
+ { 0x000d80ff, 0x00000002 },
+ { 0x0080006c, 0x00000008 },
+ { 0x000f9000, 0x00000002 },
+ { 0x000e00ff, 0x00000002 },
+ { 0000000000, 0x00000006 },
+ { 0x0000008f, 0x00000018 },
+ { 0x0000005b, 0x00000004 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x00007576, 0x00000002 },
+ { 0x00065000, 0x00000002 },
+ { 0x00009000, 0x00000002 },
+ { 0x00041000, 0x00000002 },
+ { 0x0c00350e, 0x00000002 },
+ { 0x00049000, 0x00000002 },
+ { 0x00051000, 0x00000002 },
+ { 0x01e785f8, 0x00000002 },
+ { 0x00200000, 0x00000002 },
+ { 0x0060007e, 0x0000000c },
+ { 0x00007563, 0x00000002 },
+ { 0x006075f0, 0x00000021 },
+ { 0x20007073, 0x00000004 },
+ { 0x00005073, 0x00000004 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x00007576, 0x00000002 },
+ { 0x00007577, 0x00000002 },
+ { 0x0000750e, 0x00000002 },
+ { 0x0000750f, 0x00000002 },
+ { 0x00a05000, 0x00000002 },
+ { 0x00600083, 0x0000000c },
+ { 0x006075f0, 0x00000021 },
+ { 0x000075f8, 0x00000002 },
+ { 0x00000083, 0x00000004 },
+ { 0x000a750e, 0x00000002 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x0020750f, 0x00000002 },
+ { 0x00600086, 0x00000004 },
+ { 0x00007570, 0x00000002 },
+ { 0x00007571, 0x00000002 },
+ { 0x00007572, 0x00000006 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x00005000, 0x00000002 },
+ { 0x00a05000, 0x00000002 },
+ { 0x00007568, 0x00000002 },
+ { 0x00061000, 0x00000002 },
+ { 0x00000095, 0x0000000c },
+ { 0x00058000, 0x00000002 },
+ { 0x0c607562, 0x00000002 },
+ { 0x00000097, 0x00000004 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x00600096, 0x00000004 },
+ { 0x400070e5, 0000000000 },
+ { 0x000380e6, 0x00000002 },
+ { 0x040025c5, 0x00000002 },
+ { 0x000380e5, 0x00000002 },
+ { 0x000000a8, 0x0000001c },
+ { 0x000650aa, 0x00000018 },
+ { 0x040025bb, 0x00000002 },
+ { 0x000610ab, 0x00000018 },
+ { 0x040075bc, 0000000000 },
+ { 0x000075bb, 0x00000002 },
+ { 0x000075bc, 0000000000 },
+ { 0x00090000, 0x00000006 },
+ { 0x00090000, 0x00000002 },
+ { 0x000d8002, 0x00000006 },
+ { 0x00007832, 0x00000002 },
+ { 0x00005000, 0x00000002 },
+ { 0x000380e7, 0x00000002 },
+ { 0x04002c97, 0x00000002 },
+ { 0x00007820, 0x00000002 },
+ { 0x00007821, 0x00000002 },
+ { 0x00007800, 0000000000 },
+ { 0x01200000, 0x00000002 },
+ { 0x20077000, 0x00000002 },
+ { 0x01200000, 0x00000002 },
+ { 0x20007000, 0x00000002 },
+ { 0x00061000, 0x00000002 },
+ { 0x0120751b, 0x00000002 },
+ { 0x8040750a, 0x00000002 },
+ { 0x8040750b, 0x00000002 },
+ { 0x00110000, 0x00000002 },
+ { 0x000380e5, 0x00000002 },
+ { 0x000000c6, 0x0000001c },
+ { 0x000610ab, 0x00000018 },
+ { 0x844075bd, 0x00000002 },
+ { 0x000610aa, 0x00000018 },
+ { 0x840075bb, 0x00000002 },
+ { 0x000610ab, 0x00000018 },
+ { 0x844075bc, 0x00000002 },
+ { 0x000000c9, 0x00000004 },
+ { 0x804075bd, 0x00000002 },
+ { 0x800075bb, 0x00000002 },
+ { 0x804075bc, 0x00000002 },
+ { 0x00108000, 0x00000002 },
+ { 0x01400000, 0x00000002 },
+ { 0x006000cd, 0x0000000c },
+ { 0x20c07000, 0x00000020 },
+ { 0x000000cf, 0x00000012 },
+ { 0x00800000, 0x00000006 },
+ { 0x0080751d, 0x00000006 },
+ { 0000000000, 0000000000 },
+ { 0x0000775c, 0x00000002 },
+ { 0x00a05000, 0x00000002 },
+ { 0x00661000, 0x00000002 },
+ { 0x0460275d, 0x00000020 },
+ { 0x00004000, 0000000000 },
+ { 0x01e00830, 0x00000002 },
+ { 0x21007000, 0000000000 },
+ { 0x6464614d, 0000000000 },
+ { 0x69687420, 0000000000 },
+ { 0x00000073, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0x00005000, 0x00000002 },
+ { 0x000380d0, 0x00000002 },
+ { 0x040025e0, 0x00000002 },
+ { 0x000075e1, 0000000000 },
+ { 0x00000001, 0000000000 },
+ { 0x000380e0, 0x00000002 },
+ { 0x04002394, 0x00000002 },
+ { 0x00005000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0x00000008, 0000000000 },
+ { 0x00000004, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+};
+
+
+#define DO_IOREMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size)
+
+#define DO_IOREMAPFREE(_m) \
+ do { \
+ if ((_m)->handle && (_m)->size) \
+ drm_ioremapfree((_m)->handle, (_m)->size); \
+ } while (0)
+
+#define DO_FIND_MAP(_m, _o) \
+ do { \
+ int _i; \
+ for (_i = 0; _i < dev->map_count; _i++) { \
+ if (dev->maplist[_i]->offset == _o) { \
+ _m = dev->maplist[_i]; \
+ break; \
+ } \
+ } \
+ } while (0)
+
+
+int RADEON_READ_PLL(drm_device_t *dev, int addr)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
+ return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
+}
+
+#if RADEON_FIFO_DEBUG
+static void radeon_status( drm_radeon_private_t *dev_priv )
+{
+ printk( "%s:\n", __FUNCTION__ );
+ printk( "RBBM_STATUS = 0x%08x\n",
+ (unsigned int)RADEON_READ( RADEON_RBBM_STATUS ) );
+ printk( "CP_RB_RTPR = 0x%08x\n",
+ (unsigned int)RADEON_READ( RADEON_CP_RB_RPTR ) );
+ printk( "CP_RB_WTPR = 0x%08x\n",
+ (unsigned int)RADEON_READ( RADEON_CP_RB_WPTR ) );
+}
+#endif
+
+
+/* ================================================================
+ * Engine, FIFO control
+ */
+
+static int radeon_do_pixcache_flush( drm_radeon_private_t *dev_priv )
+{
+ u32 tmp;
+ int i;
+
+ tmp = RADEON_READ( RADEON_RB2D_DSTCACHE_CTLSTAT );
+ tmp |= RADEON_RB2D_DC_FLUSH_ALL;
+ RADEON_WRITE( RADEON_RB2D_DSTCACHE_CTLSTAT, tmp );
+
+ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+ if ( !(RADEON_READ( RADEON_RB2D_DSTCACHE_CTLSTAT )
+ & RADEON_RB2D_DC_BUSY) ) {
+ return 0;
+ }
+ udelay( 1 );
+ }
+
+#if RADEON_FIFO_DEBUG
+ DRM_ERROR( "failed!\n" );
+ radeon_status( dev_priv );
+#endif
+ return -EBUSY;
+}
+
+static int radeon_do_wait_for_fifo( drm_radeon_private_t *dev_priv,
+ int entries )
+{
+ int i;
+
+ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+ int slots = ( RADEON_READ( RADEON_RBBM_STATUS )
+ & RADEON_RBBM_FIFOCNT_MASK );
+ if ( slots >= entries ) return 0;
+ udelay( 1 );
+ }
+
+#if RADEON_FIFO_DEBUG
+ DRM_ERROR( "failed!\n" );
+ radeon_status( dev_priv );
+#endif
+ return -EBUSY;
+}
+
+static int radeon_do_wait_for_idle( drm_radeon_private_t *dev_priv )
+{
+ int i, ret;
+
+ ret = radeon_do_wait_for_fifo( dev_priv, 64 );
+ if ( ret < 0 ) return ret;
+
+ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+ if ( !(RADEON_READ( RADEON_RBBM_STATUS )
+ & RADEON_RBBM_ACTIVE) ) {
+ radeon_do_pixcache_flush( dev_priv );
+ return 0;
+ }
+ udelay( 1 );
+ }
+
+#if RADEON_FIFO_DEBUG
+ DRM_ERROR( "failed!\n" );
+ radeon_status( dev_priv );
+#endif
+ return -EBUSY;
+}
+
+
+/* ================================================================
+ * CP control, initialization
+ */
+
+/* Load the microcode for the CP */
+static void radeon_cp_load_microcode( drm_radeon_private_t *dev_priv )
+{
+ int i;
+
+ radeon_do_wait_for_idle( dev_priv );
+
+ RADEON_WRITE( RADEON_CP_ME_RAM_ADDR, 0 );
+ for ( i = 0 ; i < 256 ; i++ ) {
+ RADEON_WRITE( RADEON_CP_ME_RAM_DATAH,
+ radeon_cp_microcode[i][1] );
+ RADEON_WRITE( RADEON_CP_ME_RAM_DATAL,
+ radeon_cp_microcode[i][0] );
+ }
+}
+
+/* Flush any pending commands to the CP. This should only be used just
+ * prior to a wait for idle, as it informs the engine that the command
+ * stream is ending.
+ */
+static void radeon_do_cp_flush( drm_radeon_private_t *dev_priv )
+{
+#if 0
+ u32 tmp;
+
+ tmp = RADEON_READ( RADEON_CP_RB_WPTR ) | (1 << 31);
+ RADEON_WRITE( RADEON_CP_RB_WPTR, tmp );
+#endif
+}
+
+/* Wait for the CP to go idle.
+ */
+int radeon_do_cp_idle( drm_radeon_private_t *dev_priv )
+{
+ RING_LOCALS;
+
+ BEGIN_RING( 6 );
+
+ RADEON_PURGE_CACHE();
+ RADEON_PURGE_ZCACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+
+ ADVANCE_RING();
+
+ return radeon_do_wait_for_idle( dev_priv );
+}
+
+/* Start the Command Processor.
+ */
+static void radeon_do_cp_start( drm_radeon_private_t *dev_priv )
+{
+ RING_LOCALS;
+
+ radeon_do_wait_for_idle( dev_priv );
+
+ RADEON_WRITE( RADEON_CP_CSQ_CNTL, dev_priv->cp_mode );
+
+ dev_priv->cp_running = 1;
+
+ BEGIN_RING( 6 );
+
+ RADEON_PURGE_CACHE();
+ RADEON_PURGE_ZCACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+
+ ADVANCE_RING();
+}
+
+/* Reset the Command Processor. This will not flush any pending
+ * commands, so you must wait for the CP command stream to complete
+ * before calling this routine.
+ */
+static void radeon_do_cp_reset( drm_radeon_private_t *dev_priv )
+{
+ u32 cur_read_ptr;
+
+ cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR );
+ RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr );
+ *dev_priv->ring.head = cur_read_ptr;
+ dev_priv->ring.tail = cur_read_ptr;
+}
+
+/* Stop the Command Processor. This will not flush any pending
+ * commands, so you must flush the command stream and wait for the CP
+ * to go idle before calling this routine.
+ */
+static void radeon_do_cp_stop( drm_radeon_private_t *dev_priv )
+{
+ RADEON_WRITE( RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS );
+
+ dev_priv->cp_running = 0;
+}
+
+/* Reset the engine. This will stop the CP if it is running.
+ */
+static int radeon_do_engine_reset( drm_device_t *dev )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ radeon_do_pixcache_flush( dev_priv );
+
+ clock_cntl_index = RADEON_READ( RADEON_CLOCK_CNTL_INDEX );
+ mclk_cntl = RADEON_READ_PLL( dev, RADEON_MCLK_CNTL );
+
+ /* FIXME: remove magic number here and in radeon ddx driver!!! */
+ RADEON_WRITE_PLL( RADEON_MCLK_CNTL, mclk_cntl | 0x003f00000 );
+
+ rbbm_soft_reset = RADEON_READ( RADEON_RBBM_SOFT_RESET );
+
+ RADEON_WRITE( RADEON_RBBM_SOFT_RESET, ( rbbm_soft_reset |
+ RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB |
+ RADEON_SOFT_RESET_HDP ) );
+ RADEON_READ( RADEON_RBBM_SOFT_RESET );
+ RADEON_WRITE( RADEON_RBBM_SOFT_RESET, ( rbbm_soft_reset &
+ ~( RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB |
+ RADEON_SOFT_RESET_HDP ) ) );
+ RADEON_READ( RADEON_RBBM_SOFT_RESET );
+
+
+ RADEON_WRITE_PLL( RADEON_MCLK_CNTL, mclk_cntl );
+ RADEON_WRITE( RADEON_CLOCK_CNTL_INDEX, clock_cntl_index );
+ RADEON_WRITE( RADEON_RBBM_SOFT_RESET, rbbm_soft_reset );
+
+ /* Reset the CP ring */
+ radeon_do_cp_reset( dev_priv );
+
+ /* The CP is no longer running after an engine reset */
+ dev_priv->cp_running = 0;
+
+ /* Reset any pending vertex, indirect buffers */
+ radeon_freelist_reset( dev );
+
+ return 0;
+}
+
+static void radeon_cp_init_ring_buffer( drm_device_t *dev )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ u32 ring_start, cur_read_ptr;
+ u32 tmp;
+
+ /* Initialize the memory controller */
+ RADEON_WRITE( RADEON_MC_FB_LOCATION,
+ (dev_priv->agp_vm_start - 1) & 0xffff0000 );
+ RADEON_WRITE( RADEON_MC_AGP_LOCATION,
+ (((dev_priv->agp_vm_start - 1 +
+ dev_priv->agp_size) & 0xffff0000) |
+ (dev_priv->agp_vm_start >> 16)) );
+
+ ring_start = (dev_priv->cp_ring->offset
+ - dev->agp->base
+ + dev_priv->agp_vm_start);
+
+ RADEON_WRITE( RADEON_CP_RB_BASE, ring_start );
+
+ /* Set the write pointer delay */
+ RADEON_WRITE( RADEON_CP_RB_WPTR_DELAY, 0 );
+
+ /* Initialize the ring buffer's read and write pointers */
+ cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR );
+ RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr );
+ *dev_priv->ring.head = cur_read_ptr;
+ dev_priv->ring.tail = cur_read_ptr;
+
+ RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset );
+
+ /* Set ring buffer size */
+ RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw );
+
+ radeon_do_wait_for_idle( dev_priv );
+
+ /* Turn off PCI GART */
+ tmp = RADEON_READ( RADEON_AIC_CNTL ) & ~RADEON_PCIGART_TRANSLATE_EN;
+ RADEON_WRITE( RADEON_AIC_CNTL, tmp );
+
+ /* Turn on bus mastering */
+ tmp = RADEON_READ( RADEON_BUS_CNTL ) & ~RADEON_BUS_MASTER_DIS;
+ RADEON_WRITE( RADEON_BUS_CNTL, tmp );
+
+ /* Sync everything up */
+ RADEON_WRITE( RADEON_ISYNC_CNTL,
+ (RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI) );
+}
+
+static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
+{
+ drm_radeon_private_t *dev_priv;
+ int i;
+
+ dev_priv = drm_alloc( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER );
+ if ( dev_priv == NULL )
+ return -ENOMEM;
+ dev->dev_private = (void *)dev_priv;
+
+ memset( dev_priv, 0, sizeof(drm_radeon_private_t) );
+
+ dev_priv->is_pci = init->is_pci;
+
+ /* We don't support PCI cards until PCI GART is implemented.
+ * Fail here so we can remove all checks for PCI cards around
+ * the CP ring code.
+ */
+ if ( dev_priv->is_pci ) {
+ drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
+ dev->dev_private = NULL;
+ return -EINVAL;
+ }
+
+ dev_priv->usec_timeout = init->usec_timeout;
+ if ( dev_priv->usec_timeout < 1 ||
+ dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT ) {
+ drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
+ dev->dev_private = NULL;
+ return -EINVAL;
+ }
+
+ dev_priv->cp_mode = init->cp_mode;
+
+ /* Simple idle check.
+ */
+ atomic_set( &dev_priv->idle_count, 0 );
+
+ /* We don't support anything other than bus-mastering ring mode,
+ * but the ring can be in either AGP or PCI space for the ring
+ * read pointer.
+ */
+ if ( ( init->cp_mode != RADEON_CSQ_PRIBM_INDDIS ) &&
+ ( init->cp_mode != RADEON_CSQ_PRIBM_INDBM ) ) {
+ drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
+ dev->dev_private = NULL;
+ return -EINVAL;
+ }
+
+ switch ( init->fb_bpp ) {
+ case 16:
+ dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
+ break;
+ case 32:
+ default:
+ dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
+ break;
+ }
+ dev_priv->front_offset = init->front_offset;
+ dev_priv->front_pitch = init->front_pitch;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->back_pitch = init->back_pitch;
+
+ switch ( init->depth_bpp ) {
+ case 16:
+ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
+ break;
+ case 32:
+ default:
+ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
+ break;
+ }
+ dev_priv->depth_offset = init->depth_offset;
+ dev_priv->depth_pitch = init->depth_pitch;
+
+ dev_priv->front_pitch_offset = (((dev_priv->front_pitch/64) << 22) |
+ (dev_priv->front_offset >> 10));
+ dev_priv->back_pitch_offset = (((dev_priv->back_pitch/64) << 22) |
+ (dev_priv->back_offset >> 10));
+ dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch/64) << 22) |
+ (dev_priv->depth_offset >> 10));
+
+ /* Hardware state for depth clears. Remove this if/when we no
+ * longer clear the depth buffer with a 3D rectangle. Hard-code
+ * all values to prevent unwanted 3D state from slipping through
+ * and screwing with the clear operation.
+ */
+ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
+ RADEON_Z_ENABLE |
+ (dev_priv->color_fmt << 10) |
+ RADEON_ZBLOCK16);
+
+ dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt |
+ RADEON_Z_TEST_ALWAYS |
+ RADEON_STENCIL_TEST_ALWAYS |
+ RADEON_STENCIL_S_FAIL_KEEP |
+ RADEON_STENCIL_ZPASS_KEEP |
+ RADEON_STENCIL_ZFAIL_KEEP |
+ RADEON_Z_WRITE_ENABLE);
+
+ dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
+ RADEON_BFACE_SOLID |
+ RADEON_FFACE_SOLID |
+ RADEON_FLAT_SHADE_VTX_LAST |
+
+ RADEON_DIFFUSE_SHADE_FLAT |
+ RADEON_ALPHA_SHADE_FLAT |
+ RADEON_SPECULAR_SHADE_FLAT |
+ RADEON_FOG_SHADE_FLAT |
+
+ RADEON_VTX_PIX_CENTER_OGL |
+ RADEON_ROUND_MODE_TRUNC |
+ RADEON_ROUND_PREC_8TH_PIX);
+
+ /* FIXME: We want multiple shared areas, including one shared
+ * only by the X Server and kernel module.
+ */
+ for ( i = 0 ; i < dev->map_count ; i++ ) {
+ if ( dev->maplist[i]->type == _DRM_SHM ) {
+ dev_priv->sarea = dev->maplist[i];
+ break;
+ }
+ }
+
+ DO_FIND_MAP( dev_priv->fb, init->fb_offset );
+ DO_FIND_MAP( dev_priv->mmio, init->mmio_offset );
+ DO_FIND_MAP( dev_priv->cp_ring, init->ring_offset );
+ DO_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
+ DO_FIND_MAP( dev_priv->buffers, init->buffers_offset );
+
+ if ( !dev_priv->is_pci ) {
+ DO_FIND_MAP( dev_priv->agp_textures,
+ init->agp_textures_offset );
+ }
+
+ dev_priv->sarea_priv =
+ (drm_radeon_sarea_t *)((u8 *)dev_priv->sarea->handle +
+ init->sarea_priv_offset);
+
+ DO_IOREMAP( dev_priv->cp_ring );
+ DO_IOREMAP( dev_priv->ring_rptr );
+ DO_IOREMAP( dev_priv->buffers );
+#if 0
+ if ( !dev_priv->is_pci ) {
+ DO_IOREMAP( dev_priv->agp_textures );
+ }
+#endif
+
+ dev_priv->agp_size = init->agp_size;
+ dev_priv->agp_vm_start = RADEON_READ( RADEON_CONFIG_APER_SIZE );
+ dev_priv->agp_buffers_offset = (dev_priv->buffers->offset
+ - dev->agp->base
+ + dev_priv->agp_vm_start);
+
+ dev_priv->ring.head = ((__volatile__ u32 *)
+ dev_priv->ring_rptr->handle);
+
+ dev_priv->ring.start = (u32 *)dev_priv->cp_ring->handle;
+ dev_priv->ring.end = ((u32 *)dev_priv->cp_ring->handle
+ + init->ring_size / sizeof(u32));
+ dev_priv->ring.size = init->ring_size;
+ dev_priv->ring.size_l2qw = drm_order( init->ring_size / 8 );
+
+ dev_priv->ring.tail_mask =
+ (dev_priv->ring.size / sizeof(u32)) - 1;
+
+#if 0
+ /* Initialize the scratch register pointer. This will cause
+ * the scratch register values to be written out to memory
+ * whenever they are updated.
+ * FIXME: This doesn't quite work yet, so we're disabling it
+ * for the release.
+ */
+ RADEON_WRITE( RADEON_SCRATCH_ADDR, (dev_priv->ring_rptr->offset +
+ RADEON_SCRATCH_REG_OFFSET) );
+ RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 );
+#endif
+
+ dev_priv->scratch = ((__volatile__ u32 *)
+ dev_priv->ring_rptr->handle +
+ (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
+
+ dev_priv->sarea_priv->last_frame = 0;
+ RADEON_WRITE( RADEON_LAST_FRAME_REG,
+ dev_priv->sarea_priv->last_frame );
+
+ dev_priv->sarea_priv->last_dispatch = 0;
+ RADEON_WRITE( RADEON_LAST_DISPATCH_REG,
+ dev_priv->sarea_priv->last_dispatch );
+
+ dev_priv->sarea_priv->last_clear = 0;
+ RADEON_WRITE( RADEON_LAST_CLEAR_REG,
+ dev_priv->sarea_priv->last_clear );
+
+ radeon_cp_load_microcode( dev_priv );
+ radeon_cp_init_ring_buffer( dev );
+ radeon_do_engine_reset( dev );
+
+#if ROTATE_BUFS
+ dev_priv->last_buf = 0;
+#endif
+
+ return 0;
+}
+
+static int radeon_do_cleanup_cp( drm_device_t *dev )
+{
+ if ( dev->dev_private ) {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ DO_IOREMAPFREE( dev_priv->cp_ring );
+ DO_IOREMAPFREE( dev_priv->ring_rptr );
+ DO_IOREMAPFREE( dev_priv->buffers );
+#if 0
+ if ( !dev_priv->is_pci ) {
+ DO_IOREMAPFREE( dev_priv->agp_textures );
+ }
+#endif
+
+ drm_free( dev->dev_private, sizeof(drm_radeon_private_t),
+ DRM_MEM_DRIVER );
+ dev->dev_private = NULL;
+ }
+
+ return 0;
+}
+
+int radeon_cp_init( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_init_t init;
+
+ if ( copy_from_user( &init, (drm_radeon_init_t *)arg, sizeof(init) ) )
+ return -EFAULT;
+
+ switch ( init.func ) {
+ case RADEON_INIT_CP:
+ return radeon_do_init_cp( dev, &init );
+ case RADEON_CLEANUP_CP:
+ return radeon_do_cleanup_cp( dev );
+ }
+
+ return -EINVAL;
+}
+
+int radeon_cp_start( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+ if ( dev_priv->cp_running ) {
+ DRM_DEBUG( "%s while CP running\n", __FUNCTION__ );
+ return 0;
+ }
+ if ( dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS ) {
+ DRM_DEBUG( "%s called with bogus CP mode (%d)\n",
+ __FUNCTION__, dev_priv->cp_mode );
+ return 0;
+ }
+
+ radeon_do_cp_start( dev_priv );
+
+ return 0;
+}
+
+/* Stop the CP. The engine must have been idled before calling this
+ * routine.
+ */
+int radeon_cp_stop( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_cp_stop_t stop;
+ int ret;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( copy_from_user( &stop, (drm_radeon_init_t *)arg, sizeof(stop) ) )
+ return -EFAULT;
+
+ /* Flush any pending CP commands. This ensures any outstanding
+ * commands are exectuted by the engine before we turn it off.
+ */
+ if ( stop.flush ) {
+ radeon_do_cp_flush( dev_priv );
+ }
+
+ /* If we fail to make the engine go idle, we return an error
+ * code so that the DRM ioctl wrapper can try again.
+ */
+ if ( stop.idle ) {
+ ret = radeon_do_cp_idle( dev_priv );
+ if ( ret < 0 ) return ret;
+ }
+
+ /* Finally, we can turn off the CP. If the engine isn't idle,
+ * we will get some dropped triangles as they won't be fully
+ * rendered before the CP is shut down.
+ */
+ radeon_do_cp_stop( dev_priv );
+
+ /* Reset the engine */
+ radeon_do_engine_reset( dev );
+
+ return 0;
+}
+
+/* Just reset the CP ring. Called as part of an X Server engine reset.
+ */
+int radeon_cp_reset( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+ if ( !dev_priv ) {
+ DRM_DEBUG( "%s called before init done\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ radeon_do_cp_reset( dev_priv );
+
+ /* The CP is no longer running after an engine reset */
+ dev_priv->cp_running = 0;
+
+ return 0;
+}
+
+int radeon_cp_idle( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ return radeon_do_cp_idle( dev_priv );
+}
+
+int radeon_engine_reset( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ return radeon_do_engine_reset( dev );
+}
+
+
+/* ================================================================
+ * Fullscreen mode
+ */
+
+static int radeon_do_init_pageflip( drm_device_t *dev )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ dev_priv->crtc_offset = RADEON_READ( RADEON_CRTC_OFFSET );
+ dev_priv->crtc_offset_cntl = RADEON_READ( RADEON_CRTC_OFFSET_CNTL );
+
+ RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->front_offset );
+ RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL,
+ dev_priv->crtc_offset_cntl |
+ RADEON_CRTC_OFFSET_FLIP_CNTL );
+
+ dev_priv->page_flipping = 1;
+ dev_priv->current_page = 0;
+
+ return 0;
+}
+
+int radeon_do_cleanup_pageflip( drm_device_t *dev )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->crtc_offset );
+ RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl );
+
+ dev_priv->page_flipping = 0;
+ dev_priv->current_page = 0;
+
+ return 0;
+}
+
+int radeon_fullscreen( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_fullscreen_t fs;
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( copy_from_user( &fs, (drm_radeon_fullscreen_t *)arg,
+ sizeof(fs) ) )
+ return -EFAULT;
+
+ switch ( fs.func ) {
+ case RADEON_INIT_FULLSCREEN:
+ return radeon_do_init_pageflip( dev );
+ case RADEON_CLEANUP_FULLSCREEN:
+ return radeon_do_cleanup_pageflip( dev );
+ }
+
+ return -EINVAL;
+}
+
+
+/* ================================================================
+ * Freelist management
+ */
+#define RADEON_BUFFER_USED 0xffffffff
+#define RADEON_BUFFER_FREE 0
+
+#if 0
+static int radeon_freelist_init( drm_device_t *dev )
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_buf_t *buf;
+ drm_radeon_buf_priv_t *buf_priv;
+ drm_radeon_freelist_t *entry;
+ int i;
+
+ dev_priv->head = drm_alloc( sizeof(drm_radeon_freelist_t),
+ DRM_MEM_DRIVER );
+ if ( dev_priv->head == NULL )
+ return -ENOMEM;
+
+ memset( dev_priv->head, 0, sizeof(drm_radeon_freelist_t) );
+ dev_priv->head->age = RADEON_BUFFER_USED;
+
+ for ( i = 0 ; i < dma->buf_count ; i++ ) {
+ buf = dma->buflist[i];
+ buf_priv = buf->dev_private;
+
+ entry = drm_alloc( sizeof(drm_radeon_freelist_t),
+ DRM_MEM_DRIVER );
+ if ( !entry ) return -ENOMEM;
+
+ entry->age = RADEON_BUFFER_FREE;
+ entry->buf = buf;
+ entry->prev = dev_priv->head;
+ entry->next = dev_priv->head->next;
+ if ( !entry->next )
+ dev_priv->tail = entry;
+
+ buf_priv->discard = 0;
+ buf_priv->dispatched = 0;
+ buf_priv->list_entry = entry;
+
+ dev_priv->head->next = entry;
+
+ if ( dev_priv->head->next )
+ dev_priv->head->next->prev = entry;
+ }
+
+ return 0;
+
+}
+#endif
+
+drm_buf_t *radeon_freelist_get( drm_device_t *dev )
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_buf_priv_t *buf_priv;
+ drm_buf_t *buf;
+ int i, t;
+#if ROTATE_BUFS
+ int start;
+#endif
+
+ /* FIXME: Optimize -- use freelist code */
+
+ for ( i = 0 ; i < dma->buf_count ; i++ ) {
+ buf = dma->buflist[i];
+ buf_priv = buf->dev_private;
+ if ( buf->pid == 0 ) {
+ DRM_DEBUG( " ret buf=%d last=%d pid=0\n",
+ buf->idx, dev_priv->last_buf );
+ return buf;
+ }
+ DRM_DEBUG( " skipping buf=%d pid=%d\n",
+ buf->idx, buf->pid );
+ }
+
+#if ROTATE_BUFS
+ if ( ++dev_priv->last_buf >= dma->buf_count )
+ dev_priv->last_buf = 0;
+ start = dev_priv->last_buf;
+#endif
+ for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) {
+#if 0
+ /* FIXME: Disable this for now */
+ u32 done_age = dev_priv->scratch[RADEON_LAST_DISPATCH];
+#else
+ u32 done_age = RADEON_READ( RADEON_LAST_DISPATCH_REG );
+#endif
+#if ROTATE_BUFS
+ for ( i = start ; i < dma->buf_count ; i++ ) {
+#else
+ for ( i = 0 ; i < dma->buf_count ; i++ ) {
+#endif
+ buf = dma->buflist[i];
+ buf_priv = buf->dev_private;
+ if ( buf->pending && buf_priv->age <= done_age ) {
+ /* The buffer has been processed, so it
+ * can now be used.
+ */
+ buf->pending = 0;
+ DRM_DEBUG( " ret buf=%d last=%d age=%d done=%d\n", buf->idx, dev_priv->last_buf, buf_priv->age, done_age );
+ return buf;
+ }
+ DRM_DEBUG( " skipping buf=%d age=%d done=%d\n",
+ buf->idx, buf_priv->age,
+ done_age );
+#if ROTATE_BUFS
+ start = 0;
+#endif
+ }
+ udelay( 1 );
+ }
+
+ DRM_ERROR( "returning NULL!\n" );
+ return NULL;
+}
+
+void radeon_freelist_reset( drm_device_t *dev )
+{
+ drm_device_dma_t *dma = dev->dma;
+#if ROTATE_BUFS
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+#endif
+ int i;
+
+#if ROTATE_BUFS
+ dev_priv->last_buf = 0;
+#endif
+ for ( i = 0 ; i < dma->buf_count ; i++ ) {
+ drm_buf_t *buf = dma->buflist[i];
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ buf_priv->age = 0;
+ }
+}
+
+
+/* ================================================================
+ * CP command submission
+ */
+
+int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n )
+{
+ drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
+ int i;
+
+ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+ ring->space = *ring->head - ring->tail;
+ if ( ring->space <= 0 )
+ ring->space += ring->size;
+
+ if ( ring->space >= n )
+ return 0;
+
+ udelay( 1 );
+ }
+
+ /* FIXME: This return value is ignored in the BEGIN_RING macro! */
+ DRM_ERROR( "failed!\n" );
+ return -EBUSY;
+}
+
+void radeon_update_ring_snapshot( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
+
+ ring->space = *ring->head - ring->tail;
+ if ( ring->space == 0 )
+ atomic_inc( &dev_priv->idle_count );
+ if ( ring->space <= 0 )
+ ring->space += ring->size;
+}
+
+static int radeon_cp_get_buffers( drm_device_t *dev, drm_dma_t *d )
+{
+ int i;
+ drm_buf_t *buf;
+
+ for ( i = d->granted_count ; i < d->request_count ; i++ ) {
+ buf = radeon_freelist_get( dev );
+ if ( !buf ) return -EAGAIN;
+
+ buf->pid = current->pid;
+
+ if ( copy_to_user( &d->request_indices[i], &buf->idx,
+ sizeof(buf->idx) ) )
+ return -EFAULT;
+ if ( copy_to_user( &d->request_sizes[i], &buf->total,
+ sizeof(buf->total) ) )
+ return -EFAULT;
+
+ d->granted_count++;
+ }
+ return 0;
+}
+
+int radeon_cp_buffers( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ int ret = 0;
+ drm_dma_t d;
+
+ if ( copy_from_user( &d, (drm_dma_t *) arg, sizeof(d) ) )
+ return -EFAULT;
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ /* Please don't send us buffers.
+ */
+ if ( d.send_count != 0 ) {
+ DRM_ERROR( "Process %d trying to send %d buffers via drmDMA\n",
+ current->pid, d.send_count );
+ return -EINVAL;
+ }
+
+ /* We'll send you buffers.
+ */
+ if ( d.request_count < 0 || d.request_count > dma->buf_count ) {
+ DRM_ERROR( "Process %d trying to get %d buffers (of %d max)\n",
+ current->pid, d.request_count, dma->buf_count );
+ return -EINVAL;
+ }
+
+ d.granted_count = 0;
+
+ if ( d.request_count ) {
+ ret = radeon_cp_get_buffers( dev, &d );
+ }
+
+ if ( copy_to_user( (drm_dma_t *) arg, &d, sizeof(d) ) )
+ return -EFAULT;
+
+ return ret;
+}
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
new file mode 100644
index 000000000..c5f9f66d1
--- /dev/null
+++ b/drivers/char/drm/radeon_drm.h
@@ -0,0 +1,325 @@
+/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#ifndef __RADEON_DRM_H__
+#define __RADEON_DRM_H__
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (radeon_sarea.h)
+ */
+#ifndef __RADEON_SAREA_DEFINES__
+#define __RADEON_SAREA_DEFINES__
+
+/* What needs to be changed for the current vertex buffer?
+ */
+#define RADEON_UPLOAD_CONTEXT 0x00000001
+#define RADEON_UPLOAD_VERTFMT 0x00000002
+#define RADEON_UPLOAD_LINE 0x00000004
+#define RADEON_UPLOAD_BUMPMAP 0x00000008
+#define RADEON_UPLOAD_MASKS 0x00000010
+#define RADEON_UPLOAD_VIEWPORT 0x00000020
+#define RADEON_UPLOAD_SETUP 0x00000040
+#define RADEON_UPLOAD_TCL 0x00000080
+#define RADEON_UPLOAD_MISC 0x00000100
+#define RADEON_UPLOAD_TEX0 0x00000200
+#define RADEON_UPLOAD_TEX1 0x00000400
+#define RADEON_UPLOAD_TEX2 0x00000800
+#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
+#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
+#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
+#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
+#define RADEON_REQUIRE_QUIESCENCE 0x00010000
+#define RADEON_UPLOAD_ALL 0x0001ffff
+
+#define RADEON_FRONT 0x1
+#define RADEON_BACK 0x2
+#define RADEON_DEPTH 0x4
+
+/* Primitive types
+ */
+#define RADEON_POINTS 0x1
+#define RADEON_LINES 0x2
+#define RADEON_LINE_STRIP 0x3
+#define RADEON_TRIANGLES 0x4
+#define RADEON_TRIANGLE_FAN 0x5
+#define RADEON_TRIANGLE_STRIP 0x6
+
+/* Vertex/indirect buffer size
+ */
+#define RADEON_BUFFER_SIZE 16384
+
+/* Byte offsets for indirect buffer data
+ */
+#define RADEON_INDEX_PRIM_OFFSET 20
+#define RADEON_HOSTDATA_BLIT_OFFSET 32
+
+#define RADEON_SCRATCH_REG_OFFSET 32
+
+/* Keep these small for testing
+ */
+#define RADEON_NR_SAREA_CLIPRECTS 12
+
+/* There are 2 heaps (local/AGP). Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define RADEON_LOCAL_TEX_HEAP 0
+#define RADEON_AGP_TEX_HEAP 1
+#define RADEON_NR_TEX_HEAPS 2
+#define RADEON_NR_TEX_REGIONS 64
+#define RADEON_LOG_TEX_GRANULARITY 16
+
+#define RADEON_MAX_TEXTURE_LEVELS 11
+#define RADEON_MAX_TEXTURE_UNITS 3
+
+#endif /* __RADEON_SAREA_DEFINES__ */
+
+typedef struct {
+ unsigned int red;
+ unsigned int green;
+ unsigned int blue;
+ unsigned int alpha;
+} radeon_color_regs_t;
+
+typedef struct {
+ /* Context state */
+ unsigned int pp_misc; /* 0x1c14 */
+ unsigned int pp_fog_color;
+ unsigned int re_solid_color;
+ unsigned int rb3d_blendcntl;
+ unsigned int rb3d_depthoffset;
+ unsigned int rb3d_depthpitch;
+ unsigned int rb3d_zstencilcntl;
+
+ unsigned int pp_cntl; /* 0x1c38 */
+ unsigned int rb3d_cntl;
+ unsigned int rb3d_coloroffset;
+ unsigned int re_width_height;
+ unsigned int rb3d_colorpitch;
+ unsigned int se_cntl;
+
+ /* Vertex format state */
+ unsigned int se_coord_fmt; /* 0x1c50 */
+
+ /* Line state */
+ unsigned int re_line_pattern; /* 0x1cd0 */
+ unsigned int re_line_state;
+
+ unsigned int se_line_width; /* 0x1db8 */
+
+ /* Bumpmap state */
+ unsigned int pp_lum_matrix; /* 0x1d00 */
+
+ unsigned int pp_rot_matrix_0; /* 0x1d58 */
+ unsigned int pp_rot_matrix_1;
+
+ /* Mask state */
+ unsigned int rb3d_stencilrefmask; /* 0x1d7c */
+ unsigned int rb3d_ropcntl;
+ unsigned int rb3d_planemask;
+
+ /* Viewport state */
+ unsigned int se_vport_xscale; /* 0x1d98 */
+ unsigned int se_vport_xoffset;
+ unsigned int se_vport_yscale;
+ unsigned int se_vport_yoffset;
+ unsigned int se_vport_zscale;
+ unsigned int se_vport_zoffset;
+
+ /* Setup state */
+ unsigned int se_cntl_status; /* 0x2140 */
+
+#ifdef TCL_ENABLE
+ /* TCL state */
+ radeon_color_regs_t se_tcl_material_emmissive; /* 0x2210 */
+ radeon_color_regs_t se_tcl_material_ambient;
+ radeon_color_regs_t se_tcl_material_diffuse;
+ radeon_color_regs_t se_tcl_material_specular;
+ unsigned int se_tcl_shininess;
+ unsigned int se_tcl_output_vtx_fmt;
+ unsigned int se_tcl_output_vtx_sel;
+ unsigned int se_tcl_matrix_select_0;
+ unsigned int se_tcl_matrix_select_1;
+ unsigned int se_tcl_ucp_vert_blend_ctl;
+ unsigned int se_tcl_texture_proc_ctl;
+ unsigned int se_tcl_light_model_ctl;
+ unsigned int se_tcl_per_light_ctl[4];
+#endif
+
+ /* Misc state */
+ unsigned int re_top_left; /* 0x26c0 */
+ unsigned int re_misc;
+} drm_radeon_context_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int pp_txfilter;
+ unsigned int pp_txformat;
+ unsigned int pp_txoffset;
+ unsigned int pp_txcblend;
+ unsigned int pp_txablend;
+ unsigned int pp_tfactor;
+
+ unsigned int pp_border_color;
+
+#ifdef CUBIC_ENABLE
+ unsigned int pp_cubic_faces;
+ unsigned int pp_cubic_offset[5];
+#endif
+} drm_radeon_texture_regs_t;
+
+typedef struct {
+ unsigned char next, prev;
+ unsigned char in_use;
+ int age;
+} drm_radeon_tex_region_t;
+
+typedef struct {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex buffer.
+ */
+ drm_radeon_context_regs_t context_state;
+ drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
+ unsigned int dirty;
+ unsigned int vertsize;
+ unsigned int vc_format;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int last_frame;
+ unsigned int last_dispatch;
+ unsigned int last_clear;
+
+ drm_radeon_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS+1];
+ int tex_age[RADEON_NR_TEX_HEAPS];
+ int ctx_owner;
+} drm_radeon_sarea_t;
+
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmRadeon.h)
+ */
+typedef struct drm_radeon_init {
+ enum {
+ RADEON_INIT_CP = 0x01,
+ RADEON_CLEANUP_CP = 0x02
+ } func;
+ int sarea_priv_offset;
+ int is_pci;
+ int cp_mode;
+ int agp_size;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned int fb_offset;
+ unsigned int mmio_offset;
+ unsigned int ring_offset;
+ unsigned int ring_rptr_offset;
+ unsigned int buffers_offset;
+ unsigned int agp_textures_offset;
+} drm_radeon_init_t;
+
+typedef struct drm_radeon_cp_stop {
+ int flush;
+ int idle;
+} drm_radeon_cp_stop_t;
+
+typedef struct drm_radeon_fullscreen {
+ enum {
+ RADEON_INIT_FULLSCREEN = 0x01,
+ RADEON_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_radeon_fullscreen_t;
+
+#define CLEAR_X1 0
+#define CLEAR_Y1 1
+#define CLEAR_X2 2
+#define CLEAR_Y2 3
+#define CLEAR_DEPTH 4
+
+typedef struct drm_radeon_clear {
+ unsigned int flags;
+ int x, y, w, h;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ union {
+ float f[5];
+ unsigned int ui[5];
+ } rect;
+} drm_radeon_clear_t;
+
+typedef struct drm_radeon_vertex {
+ int prim;
+ int idx; /* Index of vertex buffer */
+ int count; /* Number of vertices in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_radeon_vertex_t;
+
+typedef struct drm_radeon_indices {
+ int prim;
+ int idx;
+ int start;
+ int end;
+ int discard; /* Client finished with buffer? */
+} drm_radeon_indices_t;
+
+typedef struct drm_radeon_blit {
+ int idx;
+ int pitch;
+ int offset;
+ int format;
+ unsigned short x, y;
+ unsigned short width, height;
+} drm_radeon_blit_t;
+
+typedef struct drm_radeon_stipple {
+ unsigned int *mask;
+} drm_radeon_stipple_t;
+
+typedef struct drm_radeon_indirect {
+ int idx;
+ int start;
+ int end;
+ int discard;
+} drm_radeon_indirect_t;
+
+#endif
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
new file mode 100644
index 000000000..0113ed97c
--- /dev/null
+++ b/drivers/char/drm/radeon_drv.c
@@ -0,0 +1,702 @@
+/* radeon_drv.c -- ATI Radeon driver -*- linux-c -*-
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Kevin E. Martin <martin@valinux.com>
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ */
+
+#include <linux/config.h>
+#include "drmP.h"
+#include "radeon_drv.h"
+
+#define RADEON_NAME "radeon"
+#define RADEON_DESC "ATI Radeon"
+#define RADEON_DATE "20010105"
+#define RADEON_MAJOR 1
+#define RADEON_MINOR 0
+#define RADEON_PATCHLEVEL 0
+
+static drm_device_t radeon_device;
+drm_ctx_t radeon_res_ctx;
+
+static struct file_operations radeon_fops = {
+#if LINUX_VERSION_CODE >= 0x020400
+ /* This started being used during 2.4.0-test */
+ owner: THIS_MODULE,
+#endif
+ open: radeon_open,
+ flush: drm_flush,
+ release: radeon_release,
+ ioctl: radeon_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+static struct miscdevice radeon_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: RADEON_NAME,
+ fops: &radeon_fops,
+};
+
+static drm_ioctl_desc_t radeon_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { radeon_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { radeon_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { radeon_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { radeon_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { radeon_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { radeon_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { radeon_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { radeon_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { radeon_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { radeon_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { radeon_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { radeon_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+#endif
+
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_BLIT)] = { radeon_cp_blit, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)]= { radeon_cp_indirect,1, 1 },
+};
+#define RADEON_IOCTL_COUNT DRM_ARRAY_SIZE(radeon_ioctls)
+
+#ifdef MODULE
+static char *radeon = NULL;
+#endif
+
+MODULE_AUTHOR("VA Linux Systems, Inc.");
+MODULE_DESCRIPTION("radeon");
+MODULE_PARM(radeon, "s");
+
+#ifndef MODULE
+/* radeon_options is called by the kernel to parse command-line options
+ * passed via the boot-loader (e.g., LILO). It calls the insmod option
+ * routine, drm_parse_drm.
+ */
+
+static int __init radeon_options(char *str)
+{
+ drm_parse_options(str);
+ return 1;
+}
+
+__setup("radeon=", radeon_options);
+#endif
+
+static int radeon_setup(drm_device_t *dev)
+{
+ int i;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head(&dev->lock.lock_queue);
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer(&dev->timer);
+ init_waitqueue_head(&dev->context_wait);
+
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head(&dev->buf_readers);
+ init_waitqueue_head(&dev->buf_writers);
+
+ radeon_res_ctx.handle = -1;
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int radeon_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ down(&dev->struct_sem);
+ del_timer(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until radeon_cleanup is called. */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound) drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired) _drm_agp_release();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+#endif
+
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ case _DRM_AGP:
+ /* Do nothing here, because this is all
+ handled in the AGP/GART driver. */
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible(&dev->lock.lock_queue);
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+/* radeon_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+static int __init radeon_init(void)
+{
+ int retcode;
+ drm_device_t *dev = &radeon_device;
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init(&dev->struct_sem, 1);
+
+#ifdef MODULE
+ drm_parse_options(radeon);
+#endif
+
+ if ((retcode = misc_register(&radeon_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", RADEON_NAME);
+ return retcode;
+ }
+ dev->device = MKDEV(MISC_MAJOR, radeon_misc.minor);
+ dev->name = RADEON_NAME;
+
+ drm_mem_init();
+ drm_proc_init(dev);
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ dev->agp = drm_agp_init();
+ if (dev->agp == NULL) {
+ DRM_ERROR("Cannot initialize agpgart module.\n");
+ drm_proc_cleanup();
+ misc_deregister(&radeon_misc);
+ radeon_takedown(dev);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_MTRR
+ dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size*1024*1024,
+ MTRR_TYPE_WRCOMB,
+ 1);
+#endif
+#endif
+
+ if((retcode = drm_ctxbitmap_init(dev))) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ drm_proc_cleanup();
+ misc_deregister(&radeon_misc);
+ radeon_takedown(dev);
+ return retcode;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ RADEON_NAME,
+ RADEON_MAJOR,
+ RADEON_MINOR,
+ RADEON_PATCHLEVEL,
+ RADEON_DATE,
+ radeon_misc.minor);
+
+ return 0;
+}
+
+/* radeon_cleanup is called via cleanup_module at module unload time. */
+
+static void __exit radeon_cleanup(void)
+{
+ drm_device_t *dev = &radeon_device;
+
+ DRM_DEBUG("\n");
+
+ drm_proc_cleanup();
+ if (misc_deregister(&radeon_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+ drm_ctxbitmap_cleanup(dev);
+ radeon_takedown(dev);
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ if (dev->agp) {
+ drm_agp_uninit();
+ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+ dev->agp = NULL;
+ }
+#endif
+}
+
+module_init(radeon_init);
+module_exit(radeon_cleanup);
+
+
+int radeon_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ if (copy_from_user(&version,
+ (drm_version_t *)arg,
+ sizeof(version)))
+ return -EFAULT;
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ if (copy_to_user(name, value, len)) \
+ return -EFAULT; \
+ }
+
+ version.version_major = RADEON_MAJOR;
+ version.version_minor = RADEON_MINOR;
+ version.version_patchlevel = RADEON_PATCHLEVEL;
+
+ DRM_COPY(version.name, RADEON_NAME);
+ DRM_COPY(version.date, RADEON_DATE);
+ DRM_COPY(version.desc, RADEON_DESC);
+
+ if (copy_to_user((drm_version_t *)arg,
+ &version,
+ sizeof(version)))
+ return -EFAULT;
+ return 0;
+}
+
+int radeon_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = &radeon_device;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_open_helper(inode, filp, dev))) {
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc(&dev->total_open);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ return radeon_setup(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+
+ return retcode;
+}
+
+int radeon_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev;
+ int retcode = 0;
+
+ lock_kernel();
+ dev = priv->dev;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ /* Force the cleanup of page flipping when required */
+ if ( dev->dev_private ) {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ if ( dev_priv->page_flipping ) {
+ radeon_do_cleanup_pageflip( dev );
+ }
+ }
+
+ if (!(retcode = drm_release(inode, filp))) {
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc(&dev->total_close);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ spin_unlock(&dev->count_lock);
+ unlock_kernel();
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ unlock_kernel();
+ return radeon_takedown(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+
+ unlock_kernel();
+ return retcode;
+}
+
+/* radeon_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int radeon_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated);
+
+ if (nr >= RADEON_IOCTL_COUNT) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &radeon_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)(inode, filp, cmd, arg);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int radeon_lock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ DECLARE_WAITQUEUE(entry, current);
+ int ret = 0;
+ drm_lock_t lock;
+#if DRM_DMA_HISTOGRAM
+ cycles_t start;
+
+ dev->lck_start = start = get_cycles();
+#endif
+
+ if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
+ return -EFAULT;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, current->pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+ if (lock.context < 0 /* || lock.context >= dev->queue_count */)
+ return -EINVAL;
+
+ if (!ret) {
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ current->state = TASK_INTERRUPTIBLE;
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = current->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+
+ if (!ret) {
+ sigemptyset(&dev->sigmask);
+ sigaddset(&dev->sigmask, SIGSTOP);
+ sigaddset(&dev->sigmask, SIGTSTP);
+ sigaddset(&dev->sigmask, SIGTTIN);
+ sigaddset(&dev->sigmask, SIGTTOU);
+ dev->sigdata.context = lock.context;
+ dev->sigdata.lock = dev->lock.hw_lock;
+ block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+ if (lock.flags & _DRM_LOCK_READY) {
+ /* Wait for space in DMA/FIFO */
+ }
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ /* Make hardware quiescent */
+ DRM_DEBUG("not quiescent!\n");
+#if 0
+ radeon_quiescent(dev);
+#endif
+ }
+ }
+
+#if LINUX_VERSION_CODE < 0x020400
+ if (lock.context != radeon_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY/4;
+ }
+#endif
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
+#endif
+
+ return ret;
+}
+
+
+int radeon_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
+ return -EFAULT;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ /* FIXME: Try to send data to card here */
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+#if LINUX_VERSION_CODE < 0x020400
+ if (lock.context != radeon_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY;
+ }
+#endif
+ unblock_all_signals();
+ return 0;
+}
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
new file mode 100644
index 000000000..06b541991
--- /dev/null
+++ b/drivers/char/drm/radeon_drv.h
@@ -0,0 +1,709 @@
+/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#ifndef __RADEON_DRV_H__
+#define __RADEON_DRV_H__
+
+typedef struct drm_radeon_freelist {
+ unsigned int age;
+ drm_buf_t *buf;
+ struct drm_radeon_freelist *next;
+ struct drm_radeon_freelist *prev;
+} drm_radeon_freelist_t;
+
+typedef struct drm_radeon_ring_buffer {
+ u32 *start;
+ u32 *end;
+ int size;
+ int size_l2qw;
+
+ volatile u32 *head;
+ u32 tail;
+ u32 tail_mask;
+ int space;
+} drm_radeon_ring_buffer_t;
+
+typedef struct drm_radeon_depth_clear_t {
+ u32 rb3d_cntl;
+ u32 rb3d_zstencilcntl;
+ u32 se_cntl;
+} drm_radeon_depth_clear_t;
+
+typedef struct drm_radeon_private {
+ drm_radeon_ring_buffer_t ring;
+ drm_radeon_sarea_t *sarea_priv;
+
+ int agp_size;
+ u32 agp_vm_start;
+ u32 agp_buffers_offset;
+
+ int cp_mode;
+ int cp_running;
+
+ drm_radeon_freelist_t *head;
+ drm_radeon_freelist_t *tail;
+/* FIXME: ROTATE_BUFS is a hask to cycle through bufs until freelist
+ code is used. Note this hides a problem with the scratch register
+ (used to keep track of last buffer completed) being written to before
+ the last buffer has actually completed rendering. */
+#define ROTATE_BUFS 1
+#if ROTATE_BUFS
+ int last_buf;
+#endif
+ volatile u32 *scratch;
+
+ int usec_timeout;
+ int is_pci;
+
+ atomic_t idle_count;
+
+ int page_flipping;
+ int current_page;
+ u32 crtc_offset;
+ u32 crtc_offset_cntl;
+
+ unsigned int color_fmt;
+ unsigned int front_offset;
+ unsigned int front_pitch;
+ unsigned int back_offset;
+ unsigned int back_pitch;
+
+ unsigned int depth_fmt;
+ unsigned int depth_offset;
+ unsigned int depth_pitch;
+
+ u32 front_pitch_offset;
+ u32 back_pitch_offset;
+ u32 depth_pitch_offset;
+
+ drm_radeon_depth_clear_t depth_clear;
+
+ drm_map_t *sarea;
+ drm_map_t *fb;
+ drm_map_t *mmio;
+ drm_map_t *cp_ring;
+ drm_map_t *ring_rptr;
+ drm_map_t *buffers;
+ drm_map_t *agp_textures;
+} drm_radeon_private_t;
+
+typedef struct drm_radeon_buf_priv {
+ u32 age;
+ int prim;
+ int discard;
+ int dispatched;
+ drm_radeon_freelist_t *list_entry;
+} drm_radeon_buf_priv_t;
+
+ /* radeon_drv.c */
+extern int radeon_version( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_open( struct inode *inode, struct file *filp );
+extern int radeon_release( struct inode *inode, struct file *filp );
+extern int radeon_ioctl( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_lock( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_unlock( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+
+ /* radeon_cp.c */
+extern int radeon_cp_init( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_start( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_stop( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_reset( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_idle( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_engine_reset( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_fullscreen( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_buffers( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+
+extern void radeon_freelist_reset( drm_device_t *dev );
+extern drm_buf_t *radeon_freelist_get( drm_device_t *dev );
+
+extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
+extern void radeon_update_ring_snapshot( drm_radeon_private_t *dev_priv );
+
+extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv );
+extern int radeon_do_cleanup_pageflip( drm_device_t *dev );
+
+ /* radeon_state.c */
+extern int radeon_cp_clear( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_swap( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_vertex( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_indices( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_blit( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_stipple( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_cp_indirect( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+
+ /* radeon_bufs.c */
+extern int radeon_addbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_mapbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+ /* radeon_context.c */
+extern int radeon_resctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_addctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_modctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_getctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_switchctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_newctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_rmctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern int radeon_context_switch(drm_device_t *dev, int old, int new);
+extern int radeon_context_switch_complete(drm_device_t *dev, int new);
+
+
+/* Register definitions, register access macros and drmAddMap constants
+ * for Radeon kernel driver.
+ */
+
+#define RADEON_AUX_SCISSOR_CNTL 0x26f0
+# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
+# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
+# define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26)
+# define RADEON_SCISSOR_0_ENABLE (1 << 28)
+# define RADEON_SCISSOR_1_ENABLE (1 << 29)
+# define RADEON_SCISSOR_2_ENABLE (1 << 30)
+
+#define RADEON_BUS_CNTL 0x0030
+# define RADEON_BUS_MASTER_DIS (1 << 6)
+
+#define RADEON_CLOCK_CNTL_DATA 0x000c
+# define RADEON_PLL_WR_EN (1 << 7)
+#define RADEON_CLOCK_CNTL_INDEX 0x0008
+#define RADEON_CONFIG_APER_SIZE 0x0108
+#define RADEON_CRTC_OFFSET 0x0224
+#define RADEON_CRTC_OFFSET_CNTL 0x0228
+# define RADEON_CRTC_TILE_EN (1 << 15)
+# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
+
+#define RADEON_RB3D_COLORPITCH 0x1c48
+#define RADEON_RB3D_DEPTHCLEARVALUE 0x1c30
+#define RADEON_RB3D_DEPTHXY_OFFSET 0x1c60
+
+#define RADEON_DP_GUI_MASTER_CNTL 0x146c
+# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
+# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
+# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
+# define RADEON_GMC_BRUSH_NONE (15 << 4)
+# define RADEON_GMC_DST_16BPP (4 << 8)
+# define RADEON_GMC_DST_24BPP (5 << 8)
+# define RADEON_GMC_DST_32BPP (6 << 8)
+# define RADEON_GMC_DST_DATATYPE_SHIFT 8
+# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
+# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
+# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
+# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
+# define RADEON_GMC_WR_MSK_DIS (1 << 30)
+# define RADEON_ROP3_S 0x00cc0000
+# define RADEON_ROP3_P 0x00f00000
+#define RADEON_DP_WRITE_MASK 0x16cc
+#define RADEON_DST_PITCH_OFFSET 0x142c
+#define RADEON_DST_PITCH_OFFSET_C 0x1c80
+# define RADEON_DST_TILE_LINEAR (0 << 30)
+# define RADEON_DST_TILE_MACRO (1 << 30)
+# define RADEON_DST_TILE_MICRO (2 << 30)
+# define RADEON_DST_TILE_BOTH (3 << 30)
+
+#define RADEON_SCRATCH_REG0 0x15e0
+#define RADEON_SCRATCH_REG1 0x15e4
+#define RADEON_SCRATCH_REG2 0x15e8
+#define RADEON_SCRATCH_REG3 0x15ec
+#define RADEON_SCRATCH_REG4 0x15f0
+#define RADEON_SCRATCH_REG5 0x15f4
+#define RADEON_SCRATCH_UMSK 0x0770
+#define RADEON_SCRATCH_ADDR 0x0774
+
+#define RADEON_HOST_PATH_CNTL 0x0130
+# define RADEON_HDP_SOFT_RESET (1 << 26)
+# define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28)
+# define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28)
+
+#define RADEON_ISYNC_CNTL 0x1724
+# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
+# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
+# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
+# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
+# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
+# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
+
+#define RADEON_MC_AGP_LOCATION 0x014c
+#define RADEON_MC_FB_LOCATION 0x0148
+#define RADEON_MCLK_CNTL 0x0012
+
+#define RADEON_PP_BORDER_COLOR_0 0x1d40
+#define RADEON_PP_BORDER_COLOR_1 0x1d44
+#define RADEON_PP_BORDER_COLOR_2 0x1d48
+#define RADEON_PP_CNTL 0x1c38
+# define RADEON_SCISSOR_ENABLE (1 << 1)
+#define RADEON_PP_LUM_MATRIX 0x1d00
+#define RADEON_PP_MISC 0x1c14
+#define RADEON_PP_ROT_MATRIX_0 0x1d58
+#define RADEON_PP_TXFILTER_0 0x1c54
+#define RADEON_PP_TXFILTER_1 0x1c6c
+#define RADEON_PP_TXFILTER_2 0x1c84
+
+#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
+# define RADEON_RB2D_DC_FLUSH (3 << 0)
+# define RADEON_RB2D_DC_FREE (3 << 2)
+# define RADEON_RB2D_DC_FLUSH_ALL 0xf
+# define RADEON_RB2D_DC_BUSY (1 << 31)
+#define RADEON_RB3D_CNTL 0x1c3c
+# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
+# define RADEON_PLANE_MASK_ENABLE (1 << 1)
+# define RADEON_DITHER_ENABLE (1 << 2)
+# define RADEON_ROUND_ENABLE (1 << 3)
+# define RADEON_SCALE_DITHER_ENABLE (1 << 4)
+# define RADEON_DITHER_INIT (1 << 5)
+# define RADEON_ROP_ENABLE (1 << 6)
+# define RADEON_STENCIL_ENABLE (1 << 7)
+# define RADEON_Z_ENABLE (1 << 8)
+# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9)
+# define RADEON_ZBLOCK8 (0 << 15)
+# define RADEON_ZBLOCK16 (1 << 15)
+#define RADEON_RB3D_DEPTHOFFSET 0x1c24
+#define RADEON_RB3D_PLANEMASK 0x1d84
+#define RADEON_RB3D_STENCILREFMASK 0x1d7c
+#define RADEON_RB3D_ZCACHE_MODE 0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254
+# define RADEON_RB3D_ZC_FLUSH (1 << 0)
+# define RADEON_RB3D_ZC_FREE (1 << 2)
+# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
+# define RADEON_RB3D_ZC_BUSY (1 << 31)
+#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
+# define RADEON_Z_TEST_MASK (7 << 4)
+# define RADEON_Z_TEST_ALWAYS (7 << 4)
+# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
+# define RADEON_STENCIL_S_FAIL_KEEP (0 << 16)
+# define RADEON_STENCIL_ZPASS_KEEP (0 << 20)
+# define RADEON_STENCIL_ZFAIL_KEEP (0 << 20)
+# define RADEON_Z_WRITE_ENABLE (1 << 30)
+#define RADEON_RBBM_SOFT_RESET 0x00f0
+# define RADEON_SOFT_RESET_CP (1 << 0)
+# define RADEON_SOFT_RESET_HI (1 << 1)
+# define RADEON_SOFT_RESET_SE (1 << 2)
+# define RADEON_SOFT_RESET_RE (1 << 3)
+# define RADEON_SOFT_RESET_PP (1 << 4)
+# define RADEON_SOFT_RESET_E2 (1 << 5)
+# define RADEON_SOFT_RESET_RB (1 << 6)
+# define RADEON_SOFT_RESET_HDP (1 << 7)
+#define RADEON_RBBM_STATUS 0x0e40
+# define RADEON_RBBM_FIFOCNT_MASK 0x007f
+# define RADEON_RBBM_ACTIVE (1 << 31)
+#define RADEON_RE_LINE_PATTERN 0x1cd0
+#define RADEON_RE_MISC 0x26c4
+#define RADEON_RE_TOP_LEFT 0x26c0
+#define RADEON_RE_WIDTH_HEIGHT 0x1c44
+#define RADEON_RE_STIPPLE_ADDR 0x1cc8
+#define RADEON_RE_STIPPLE_DATA 0x1ccc
+
+#define RADEON_SCISSOR_TL_0 0x1cd8
+#define RADEON_SCISSOR_BR_0 0x1cdc
+#define RADEON_SCISSOR_TL_1 0x1ce0
+#define RADEON_SCISSOR_BR_1 0x1ce4
+#define RADEON_SCISSOR_TL_2 0x1ce8
+#define RADEON_SCISSOR_BR_2 0x1cec
+#define RADEON_SE_COORD_FMT 0x1c50
+#define RADEON_SE_CNTL 0x1c4c
+# define RADEON_FFACE_CULL_CW (0 << 0)
+# define RADEON_BFACE_SOLID (3 << 1)
+# define RADEON_FFACE_SOLID (3 << 3)
+# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
+# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
+# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
+# define RADEON_ALPHA_SHADE_FLAT (1 << 10)
+# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
+# define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
+# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+# define RADEON_FOG_SHADE_FLAT (1 << 14)
+# define RADEON_FOG_SHADE_GOURAUD (2 << 14)
+# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
+# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
+# define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
+# define RADEON_ROUND_MODE_TRUNC (0 << 28)
+# define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
+#define RADEON_SE_CNTL_STATUS 0x2140
+#define RADEON_SE_LINE_WIDTH 0x1db8
+#define RADEON_SE_VPORT_XSCALE 0x1d98
+#define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8
+#define RADEON_SURFACE_ACCESS_CLR 0x0bfc
+#define RADEON_SURFACE_CNTL 0x0b00
+# define RADEON_SURF_TRANSLATION_DIS (1 << 8)
+# define RADEON_NONSURF_AP0_SWP_MASK (3 << 20)
+# define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20)
+# define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20)
+# define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20)
+# define RADEON_NONSURF_AP1_SWP_MASK (3 << 22)
+# define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22)
+# define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22)
+# define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22)
+#define RADEON_SURFACE0_INFO 0x0b0c
+# define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0)
+# define RADEON_SURF_TILE_MODE_MASK (3 << 16)
+# define RADEON_SURF_TILE_MODE_MACRO (0 << 16)
+# define RADEON_SURF_TILE_MODE_MICRO (1 << 16)
+# define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16)
+# define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16)
+#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
+#define RADEON_SURFACE1_INFO 0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
+#define RADEON_SURFACE2_INFO 0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
+#define RADEON_SURFACE3_INFO 0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
+#define RADEON_SURFACE4_INFO 0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
+#define RADEON_SURFACE5_INFO 0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
+#define RADEON_SURFACE6_INFO 0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
+#define RADEON_SURFACE7_INFO 0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
+#define RADEON_SW_SEMAPHORE 0x013c
+
+#define RADEON_WAIT_UNTIL 0x1720
+# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
+# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
+# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
+# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
+
+#define RADEON_RB3D_ZMASKOFFSET 0x1c34
+#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
+# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
+
+
+/* CP registers */
+#define RADEON_CP_ME_RAM_ADDR 0x07d4
+#define RADEON_CP_ME_RAM_RADDR 0x07d8
+#define RADEON_CP_ME_RAM_DATAH 0x07dc
+#define RADEON_CP_ME_RAM_DATAL 0x07e0
+
+#define RADEON_CP_RB_BASE 0x0700
+#define RADEON_CP_RB_CNTL 0x0704
+#define RADEON_CP_RB_RPTR_ADDR 0x070c
+#define RADEON_CP_RB_RPTR 0x0710
+#define RADEON_CP_RB_WPTR 0x0714
+
+#define RADEON_CP_RB_WPTR_DELAY 0x0718
+# define RADEON_PRE_WRITE_TIMER_SHIFT 0
+# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
+
+#define RADEON_CP_IB_BASE 0x0738
+
+#define RADEON_CP_CSQ_CNTL 0x0740
+# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
+# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
+# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
+# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
+# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
+# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
+# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
+
+#define RADEON_AIC_CNTL 0x01d0
+# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
+
+/* CP command packets */
+#define RADEON_CP_PACKET0 0x00000000
+# define RADEON_ONE_REG_WR (1 << 15)
+#define RADEON_CP_PACKET1 0x40000000
+#define RADEON_CP_PACKET2 0x80000000
+#define RADEON_CP_PACKET3 0xC0000000
+# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
+# define RADEON_WAIT_FOR_IDLE 0x00002600
+# define RADEON_3D_DRAW_IMMD 0x00002900
+# define RADEON_3D_CLEAR_ZMASK 0x00003200
+# define RADEON_CNTL_HOSTDATA_BLT 0x00009400
+# define RADEON_CNTL_PAINT_MULTI 0x00009A00
+# define RADEON_CNTL_BITBLT_MULTI 0x00009B00
+
+#define RADEON_CP_PACKET_MASK 0xC0000000
+#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
+#define RADEON_CP_PACKET0_REG_MASK 0x000007ff
+#define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
+#define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
+
+#define RADEON_VTX_Z_PRESENT (1 << 31)
+
+#define RADEON_PRIM_TYPE_NONE (0 << 0)
+#define RADEON_PRIM_TYPE_POINT (1 << 0)
+#define RADEON_PRIM_TYPE_LINE (2 << 0)
+#define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0)
+#define RADEON_PRIM_TYPE_TRI_LIST (4 << 0)
+#define RADEON_PRIM_TYPE_TRI_FAN (5 << 0)
+#define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0)
+#define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0)
+#define RADEON_PRIM_TYPE_RECT_LIST (8 << 0)
+#define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
+#define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
+#define RADEON_PRIM_WALK_IND (1 << 4)
+#define RADEON_PRIM_WALK_LIST (2 << 4)
+#define RADEON_PRIM_WALK_RING (3 << 4)
+#define RADEON_COLOR_ORDER_BGRA (0 << 6)
+#define RADEON_COLOR_ORDER_RGBA (1 << 6)
+#define RADEON_MAOS_ENABLE (1 << 7)
+#define RADEON_VTX_FMT_R128_MODE (0 << 8)
+#define RADEON_VTX_FMT_RADEON_MODE (1 << 8)
+#define RADEON_NUM_VERTICES_SHIFT 16
+
+#define RADEON_COLOR_FORMAT_CI8 2
+#define RADEON_COLOR_FORMAT_ARGB1555 3
+#define RADEON_COLOR_FORMAT_RGB565 4
+#define RADEON_COLOR_FORMAT_ARGB8888 6
+#define RADEON_COLOR_FORMAT_RGB332 7
+#define RADEON_COLOR_FORMAT_RGB8 9
+#define RADEON_COLOR_FORMAT_ARGB4444 15
+
+#define RADEON_TXF_8BPP_I 0
+#define RADEON_TXF_16BPP_AI88 1
+#define RADEON_TXF_8BPP_RGB332 2
+#define RADEON_TXF_16BPP_ARGB1555 3
+#define RADEON_TXF_16BPP_RGB565 4
+#define RADEON_TXF_16BPP_ARGB4444 5
+#define RADEON_TXF_32BPP_ARGB8888 6
+#define RADEON_TXF_32BPP_RGBA8888 7
+
+/* Constants */
+#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+
+#define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0
+#define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1
+#define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2
+#define RADEON_LAST_DISPATCH 1
+
+#define RADEON_MAX_VB_AGE 0x7fffffff
+#define RADEON_MAX_VB_VERTS (0xffff)
+
+
+#define RADEON_BASE(reg) ((u32)(dev_priv->mmio->handle))
+#define RADEON_ADDR(reg) (RADEON_BASE(reg) + reg)
+
+#define RADEON_DEREF(reg) *(__volatile__ u32 *)RADEON_ADDR(reg)
+#define RADEON_READ(reg) RADEON_DEREF(reg)
+#define RADEON_WRITE(reg,val) do { RADEON_DEREF(reg) = val; } while (0)
+
+#define RADEON_DEREF8(reg) *(__volatile__ u8 *)RADEON_ADDR(reg)
+#define RADEON_READ8(reg) RADEON_DEREF8(reg)
+#define RADEON_WRITE8(reg,val) do { RADEON_DEREF8(reg) = val; } while (0)
+
+#define RADEON_WRITE_PLL(addr,val) \
+do { \
+ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \
+ ((addr) & 0x1f) | RADEON_PLL_WR_EN); \
+ RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \
+} while (0)
+
+extern int RADEON_READ_PLL(drm_device_t *dev, int addr);
+
+
+
+#define CP_PACKET0( reg, n ) \
+ (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET0_TABLE( reg, n ) \
+ (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET1( reg0, reg1 ) \
+ (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
+#define CP_PACKET2() \
+ (RADEON_CP_PACKET2)
+#define CP_PACKET3( pkt, n ) \
+ (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
+
+
+/* ================================================================
+ * Engine control helper macros
+ */
+
+#define RADEON_WAIT_UNTIL_2D_IDLE() \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
+ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
+ RADEON_WAIT_HOST_IDLECLEAN) ); \
+} while (0)
+
+#define RADEON_WAIT_UNTIL_3D_IDLE() \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
+ OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \
+ RADEON_WAIT_HOST_IDLECLEAN) ); \
+} while (0)
+
+#define RADEON_WAIT_UNTIL_IDLE() \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
+ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
+ RADEON_WAIT_3D_IDLECLEAN | \
+ RADEON_WAIT_HOST_IDLECLEAN) ); \
+} while (0)
+
+#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
+ OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \
+} while (0)
+
+#define RADEON_FLUSH_CACHE() \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_RB2D_DSTCACHE_CTLSTAT, 0 ) ); \
+ OUT_RING( RADEON_RB2D_DC_FLUSH ); \
+} while (0)
+
+#define RADEON_PURGE_CACHE() \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_RB2D_DSTCACHE_CTLSTAT, 0 ) ); \
+ OUT_RING( RADEON_RB2D_DC_FLUSH_ALL ); \
+} while (0)
+
+#define RADEON_FLUSH_ZCACHE() \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
+ OUT_RING( RADEON_RB3D_ZC_FLUSH ); \
+} while (0)
+
+#define RADEON_PURGE_ZCACHE() \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
+ OUT_RING( RADEON_RB3D_ZC_FLUSH_ALL ); \
+} while (0)
+
+
+/* ================================================================
+ * Misc helper macros
+ */
+
+#define VB_AGE_CHECK_WITH_RET( dev_priv ) \
+do { \
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \
+ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \
+ int __ret = radeon_do_cp_idle( dev_priv ); \
+ if ( __ret < 0 ) return __ret; \
+ sarea_priv->last_dispatch = 0; \
+ radeon_freelist_reset( dev ); \
+ } \
+} while (0)
+
+#define RADEON_DISPATCH_AGE( age ) \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \
+ OUT_RING( age ); \
+} while (0)
+
+#define RADEON_FRAME_AGE( age ) \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \
+ OUT_RING( age ); \
+} while (0)
+
+#define RADEON_CLEAR_AGE( age ) \
+do { \
+ OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \
+ OUT_RING( age ); \
+} while (0)
+
+
+/* ================================================================
+ * Ring control
+ */
+
+#define radeon_flush_write_combine() mb()
+
+
+#define RADEON_VERBOSE 0
+
+#define RING_LOCALS int write; unsigned int mask; volatile u32 *ring;
+
+#define BEGIN_RING( n ) do { \
+ if ( RADEON_VERBOSE ) { \
+ DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
+ n, __FUNCTION__ ); \
+ } \
+ if ( dev_priv->ring.space < (n) * sizeof(u32) ) { \
+ radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \
+ } \
+ dev_priv->ring.space -= (n) * sizeof(u32); \
+ ring = dev_priv->ring.start; \
+ write = dev_priv->ring.tail; \
+ mask = dev_priv->ring.tail_mask; \
+} while (0)
+
+#define ADVANCE_RING() do { \
+ if ( RADEON_VERBOSE ) { \
+ DRM_INFO( "ADVANCE_RING() tail=0x%06x wr=0x%06x\n", \
+ write, dev_priv->ring.tail ); \
+ } \
+ radeon_flush_write_combine(); \
+ dev_priv->ring.tail = write; \
+ RADEON_WRITE( RADEON_CP_RB_WPTR, write ); \
+} while (0)
+
+#define OUT_RING( x ) do { \
+ if ( RADEON_VERBOSE ) { \
+ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
+ (unsigned int)(x), write ); \
+ } \
+ ring[write++] = (x); \
+ write &= mask; \
+} while (0)
+
+#define RADEON_PERFORMANCE_BOXES 0
+
+#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
new file mode 100644
index 000000000..1a4da8005
--- /dev/null
+++ b/drivers/char/drm/radeon_state.c
@@ -0,0 +1,1447 @@
+/* radeon_state.c -- State support for Radeon -*- linux-c -*-
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "radeon_drv.h"
+#include "drm.h"
+#include <linux/delay.h>
+
+
+/* ================================================================
+ * CP hardware state programming functions
+ */
+
+static inline void radeon_emit_clip_rect( drm_radeon_private_t *dev_priv,
+ drm_clip_rect_t *box )
+{
+ RING_LOCALS;
+
+ DRM_DEBUG( " box: x1=%d y1=%d x2=%d y2=%d\n",
+ box->x1, box->y1, box->x2, box->y2 );
+
+ BEGIN_RING( 4 );
+
+ OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) );
+ OUT_RING( (box->y1 << 16) | box->x1 );
+
+ OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) );
+ OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_context( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_context_regs_t *ctx = &sarea_priv->context_state;
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 14 );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) );
+ OUT_RING( ctx->pp_misc );
+ OUT_RING( ctx->pp_fog_color );
+ OUT_RING( ctx->re_solid_color );
+ OUT_RING( ctx->rb3d_blendcntl );
+ OUT_RING( ctx->rb3d_depthoffset );
+ OUT_RING( ctx->rb3d_depthpitch );
+ OUT_RING( ctx->rb3d_zstencilcntl );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) );
+ OUT_RING( ctx->pp_cntl );
+ OUT_RING( ctx->rb3d_cntl );
+ OUT_RING( ctx->rb3d_coloroffset );
+
+ OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) );
+ OUT_RING( ctx->rb3d_colorpitch );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_vertfmt( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_context_regs_t *ctx = &sarea_priv->context_state;
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 2 );
+
+ OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) );
+ OUT_RING( ctx->se_coord_fmt );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_line( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_context_regs_t *ctx = &sarea_priv->context_state;
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 5 );
+
+ OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) );
+ OUT_RING( ctx->re_line_pattern );
+ OUT_RING( ctx->re_line_state );
+
+ OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) );
+ OUT_RING( ctx->se_line_width );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_bumpmap( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_context_regs_t *ctx = &sarea_priv->context_state;
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 5 );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) );
+ OUT_RING( ctx->pp_lum_matrix );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) );
+ OUT_RING( ctx->pp_rot_matrix_0 );
+ OUT_RING( ctx->pp_rot_matrix_1 );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_masks( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_context_regs_t *ctx = &sarea_priv->context_state;
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 4 );
+
+ OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) );
+ OUT_RING( ctx->rb3d_stencilrefmask );
+ OUT_RING( ctx->rb3d_ropcntl );
+ OUT_RING( ctx->rb3d_planemask );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_viewport( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_context_regs_t *ctx = &sarea_priv->context_state;
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 7 );
+
+ OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) );
+ OUT_RING( ctx->se_vport_xscale );
+ OUT_RING( ctx->se_vport_xoffset );
+ OUT_RING( ctx->se_vport_yscale );
+ OUT_RING( ctx->se_vport_yoffset );
+ OUT_RING( ctx->se_vport_zscale );
+ OUT_RING( ctx->se_vport_zoffset );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_setup( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_context_regs_t *ctx = &sarea_priv->context_state;
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 4 );
+
+ OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) );
+ OUT_RING( ctx->se_cntl );
+ OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) );
+ OUT_RING( ctx->se_cntl_status );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_tcl( drm_radeon_private_t *dev_priv )
+{
+#ifdef TCL_ENABLE
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_context_regs_t *ctx = &sarea_priv->context_state;
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 29 );
+
+ OUT_RING( CP_PACKET0( RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 27 ) );
+ OUT_RING( ctx->se_tcl_material_emmissive.red );
+ OUT_RING( ctx->se_tcl_material_emmissive.green );
+ OUT_RING( ctx->se_tcl_material_emmissive.blue );
+ OUT_RING( ctx->se_tcl_material_emmissive.alpha );
+ OUT_RING( ctx->se_tcl_material_ambient.red );
+ OUT_RING( ctx->se_tcl_material_ambient.green );
+ OUT_RING( ctx->se_tcl_material_ambient.blue );
+ OUT_RING( ctx->se_tcl_material_ambient.alpha );
+ OUT_RING( ctx->se_tcl_material_diffuse.red );
+ OUT_RING( ctx->se_tcl_material_diffuse.green );
+ OUT_RING( ctx->se_tcl_material_diffuse.blue );
+ OUT_RING( ctx->se_tcl_material_diffuse.alpha );
+ OUT_RING( ctx->se_tcl_material_specular.red );
+ OUT_RING( ctx->se_tcl_material_specular.green );
+ OUT_RING( ctx->se_tcl_material_specular.blue );
+ OUT_RING( ctx->se_tcl_material_specular.alpha );
+ OUT_RING( ctx->se_tcl_shininess );
+ OUT_RING( ctx->se_tcl_output_vtx_fmt );
+ OUT_RING( ctx->se_tcl_output_vtx_sel );
+ OUT_RING( ctx->se_tcl_matrix_select_0 );
+ OUT_RING( ctx->se_tcl_matrix_select_1 );
+ OUT_RING( ctx->se_tcl_ucp_vert_blend_ctl );
+ OUT_RING( ctx->se_tcl_texture_proc_ctl );
+ OUT_RING( ctx->se_tcl_light_model_ctl );
+ for ( i = 0 ; i < 4 ; i++ ) {
+ OUT_RING( ctx->se_tcl_per_light_ctl[i] );
+ }
+
+ ADVANCE_RING();
+#else
+ DRM_ERROR( "TCL not enabled!\n" );
+#endif
+}
+
+static inline void radeon_emit_misc( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_context_regs_t *ctx = &sarea_priv->context_state;
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 2 );
+
+ OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) );
+ OUT_RING( ctx->re_misc );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_tex0( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[0];
+ RING_LOCALS;
+ DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset );
+
+ BEGIN_RING( 9 );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) );
+ OUT_RING( tex->pp_txfilter );
+ OUT_RING( tex->pp_txformat );
+ OUT_RING( tex->pp_txoffset );
+ OUT_RING( tex->pp_txcblend );
+ OUT_RING( tex->pp_txablend );
+ OUT_RING( tex->pp_tfactor );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) );
+ OUT_RING( tex->pp_border_color );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_tex1( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[1];
+ RING_LOCALS;
+ DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset );
+
+ BEGIN_RING( 9 );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) );
+ OUT_RING( tex->pp_txfilter );
+ OUT_RING( tex->pp_txformat );
+ OUT_RING( tex->pp_txoffset );
+ OUT_RING( tex->pp_txcblend );
+ OUT_RING( tex->pp_txablend );
+ OUT_RING( tex->pp_tfactor );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) );
+ OUT_RING( tex->pp_border_color );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_tex2( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[2];
+ RING_LOCALS;
+ DRM_DEBUG( " %s\n", __FUNCTION__ );
+
+ BEGIN_RING( 9 );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) );
+ OUT_RING( tex->pp_txfilter );
+ OUT_RING( tex->pp_txformat );
+ OUT_RING( tex->pp_txoffset );
+ OUT_RING( tex->pp_txcblend );
+ OUT_RING( tex->pp_txablend );
+ OUT_RING( tex->pp_tfactor );
+
+ OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) );
+ OUT_RING( tex->pp_border_color );
+
+ ADVANCE_RING();
+}
+
+static inline void radeon_emit_state( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int dirty = sarea_priv->dirty;
+
+ DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty );
+
+ if ( dirty & RADEON_UPLOAD_CONTEXT ) {
+ radeon_emit_context( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_CONTEXT;
+ }
+
+ if ( dirty & RADEON_UPLOAD_VERTFMT ) {
+ radeon_emit_vertfmt( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_VERTFMT;
+ }
+
+ if ( dirty & RADEON_UPLOAD_LINE ) {
+ radeon_emit_line( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_LINE;
+ }
+
+ if ( dirty & RADEON_UPLOAD_BUMPMAP ) {
+ radeon_emit_bumpmap( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_BUMPMAP;
+ }
+
+ if ( dirty & RADEON_UPLOAD_MASKS ) {
+ radeon_emit_masks( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_MASKS;
+ }
+
+ if ( dirty & RADEON_UPLOAD_VIEWPORT ) {
+ radeon_emit_viewport( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_VIEWPORT;
+ }
+
+ if ( dirty & RADEON_UPLOAD_SETUP ) {
+ radeon_emit_setup( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_SETUP;
+ }
+
+ if ( dirty & RADEON_UPLOAD_TCL ) {
+#ifdef TCL_ENABLE
+ radeon_emit_tcl( dev_priv );
+#endif
+ sarea_priv->dirty &= ~RADEON_UPLOAD_TCL;
+ }
+
+ if ( dirty & RADEON_UPLOAD_MISC ) {
+ radeon_emit_misc( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_MISC;
+ }
+
+ if ( dirty & RADEON_UPLOAD_TEX0 ) {
+ radeon_emit_tex0( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_TEX0;
+ }
+
+ if ( dirty & RADEON_UPLOAD_TEX1 ) {
+ radeon_emit_tex1( dev_priv );
+ sarea_priv->dirty &= ~RADEON_UPLOAD_TEX1;
+ }
+
+ if ( dirty & RADEON_UPLOAD_TEX2 ) {
+#if 0
+ radeon_emit_tex2( dev_priv );
+#endif
+ sarea_priv->dirty &= ~RADEON_UPLOAD_TEX2;
+ }
+
+ sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+ RADEON_UPLOAD_TEX1IMAGES |
+ RADEON_UPLOAD_TEX2IMAGES |
+ RADEON_REQUIRE_QUIESCENCE);
+}
+
+
+#if RADEON_PERFORMANCE_BOXES
+/* ================================================================
+ * Performance monitoring functions
+ */
+
+static void radeon_clear_box( drm_radeon_private_t *dev_priv,
+ int x, int y, int w, int h,
+ int r, int g, int b )
+{
+ u32 pitch, offset;
+ u32 color;
+ RING_LOCALS;
+
+ switch ( dev_priv->color_fmt ) {
+ case RADEON_COLOR_FORMAT_RGB565:
+ color = (((r & 0xf8) << 8) |
+ ((g & 0xfc) << 3) |
+ ((b & 0xf8) >> 3));
+ break;
+ case RADEON_COLOR_FORMAT_ARGB8888:
+ default:
+ color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
+ break;
+ }
+
+ offset = dev_priv->back_offset;
+ pitch = dev_priv->back_pitch >> 3;
+
+ BEGIN_RING( 6 );
+
+ OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
+ OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_SOLID_COLOR |
+ (dev_priv->color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_P |
+ RADEON_GMC_CLR_CMP_CNTL_DIS );
+
+ OUT_RING( (pitch << 22) | (offset >> 5) );
+ OUT_RING( color );
+
+ OUT_RING( (x << 16) | y );
+ OUT_RING( (w << 16) | h );
+
+ ADVANCE_RING();
+}
+
+static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv )
+{
+ if ( atomic_read( &dev_priv->idle_count ) == 0 ) {
+ radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 );
+ } else {
+ atomic_set( &dev_priv->idle_count, 0 );
+ }
+}
+
+#endif
+
+
+/* ================================================================
+ * CP command dispatch functions
+ */
+
+static void radeon_print_dirty( const char *msg, unsigned int flags )
+{
+ DRM_DEBUG( "%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ msg,
+ flags,
+ (flags & RADEON_UPLOAD_CONTEXT) ? "context, " : "",
+ (flags & RADEON_UPLOAD_VERTFMT) ? "vertfmt, " : "",
+ (flags & RADEON_UPLOAD_LINE) ? "line, " : "",
+ (flags & RADEON_UPLOAD_BUMPMAP) ? "bumpmap, " : "",
+ (flags & RADEON_UPLOAD_MASKS) ? "masks, " : "",
+ (flags & RADEON_UPLOAD_VIEWPORT) ? "viewport, " : "",
+ (flags & RADEON_UPLOAD_SETUP) ? "setup, " : "",
+ (flags & RADEON_UPLOAD_TCL) ? "tcl, " : "",
+ (flags & RADEON_UPLOAD_MISC) ? "misc, " : "",
+ (flags & RADEON_UPLOAD_TEX0) ? "tex0, " : "",
+ (flags & RADEON_UPLOAD_TEX1) ? "tex1, " : "",
+ (flags & RADEON_UPLOAD_TEX2) ? "tex2, " : "",
+ (flags & RADEON_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
+ (flags & RADEON_REQUIRE_QUIESCENCE) ? "quiescence, " : "" );
+}
+
+static void radeon_cp_dispatch_clear( drm_device_t *dev,
+ drm_radeon_clear_t *clear )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ unsigned int flags = clear->flags;
+ int i;
+ RING_LOCALS;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ radeon_update_ring_snapshot( dev_priv );
+
+ if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
+ unsigned int tmp = flags;
+
+ flags &= ~(RADEON_FRONT | RADEON_BACK);
+ if ( tmp & RADEON_FRONT ) flags |= RADEON_BACK;
+ if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT;
+ }
+
+ for ( i = 0 ; i < nbox ; i++ ) {
+ int x = pbox[i].x1;
+ int y = pbox[i].y1;
+ int w = pbox[i].x2 - x;
+ int h = pbox[i].y2 - y;
+
+ DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n",
+ x, y, w, h, flags );
+
+ if ( flags & (RADEON_FRONT | RADEON_BACK) ) {
+ BEGIN_RING( 4 );
+
+ /* Ensure the 3D stream is idle before doing a
+ * 2D fill to clear the front or back buffer.
+ */
+ RADEON_WAIT_UNTIL_3D_IDLE();
+
+ OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
+ OUT_RING( sarea_priv->context_state.rb3d_planemask );
+
+ ADVANCE_RING();
+
+ /* Make sure we restore the 3D state next time.
+ */
+ dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT |
+ RADEON_UPLOAD_MASKS);
+ }
+
+ if ( flags & RADEON_FRONT ) {
+ BEGIN_RING( 6 );
+
+ OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
+ OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_SOLID_COLOR |
+ (dev_priv->color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_P |
+ RADEON_GMC_CLR_CMP_CNTL_DIS );
+
+ OUT_RING( dev_priv->front_pitch_offset );
+ OUT_RING( clear->clear_color );
+
+ OUT_RING( (x << 16) | y );
+ OUT_RING( (w << 16) | h );
+
+ ADVANCE_RING();
+ }
+
+ if ( flags & RADEON_BACK ) {
+ BEGIN_RING( 6 );
+
+ OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
+ OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_SOLID_COLOR |
+ (dev_priv->color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_P |
+ RADEON_GMC_CLR_CMP_CNTL_DIS );
+
+ OUT_RING( dev_priv->back_pitch_offset );
+ OUT_RING( clear->clear_color );
+
+ OUT_RING( (x << 16) | y );
+ OUT_RING( (w << 16) | h );
+
+ ADVANCE_RING();
+
+ }
+
+ if ( flags & RADEON_DEPTH ) {
+ drm_radeon_depth_clear_t *depth_clear =
+ &dev_priv->depth_clear;
+
+ if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
+ radeon_emit_state( dev_priv );
+ }
+
+ /* FIXME: Render a rectangle to clear the depth
+ * buffer. So much for those "fast Z clears"...
+ */
+ BEGIN_RING( 23 );
+
+ RADEON_WAIT_UNTIL_2D_IDLE();
+
+ OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) );
+ OUT_RING( 0x00000000 );
+ OUT_RING( depth_clear->rb3d_cntl );
+ OUT_RING( CP_PACKET0( RADEON_RB3D_ZSTENCILCNTL, 0 ) );
+ OUT_RING( depth_clear->rb3d_zstencilcntl );
+ OUT_RING( CP_PACKET0( RADEON_RB3D_PLANEMASK, 0 ) );
+ OUT_RING( 0x00000000 );
+ OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) );
+ OUT_RING( depth_clear->se_cntl );
+
+ OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 10 ) );
+ OUT_RING( RADEON_VTX_Z_PRESENT );
+ OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
+ RADEON_PRIM_WALK_RING |
+ RADEON_MAOS_ENABLE |
+ RADEON_VTX_FMT_RADEON_MODE |
+ (3 << RADEON_NUM_VERTICES_SHIFT)) );
+
+ OUT_RING( clear->rect.ui[CLEAR_X1] );
+ OUT_RING( clear->rect.ui[CLEAR_Y1] );
+ OUT_RING( clear->rect.ui[CLEAR_DEPTH] );
+
+ OUT_RING( clear->rect.ui[CLEAR_X1] );
+ OUT_RING( clear->rect.ui[CLEAR_Y2] );
+ OUT_RING( clear->rect.ui[CLEAR_DEPTH] );
+
+ OUT_RING( clear->rect.ui[CLEAR_X2] );
+ OUT_RING( clear->rect.ui[CLEAR_Y2] );
+ OUT_RING( clear->rect.ui[CLEAR_DEPTH] );
+
+ ADVANCE_RING();
+
+ /* Make sure we restore the 3D state next time.
+ */
+ dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT |
+ RADEON_UPLOAD_SETUP |
+ RADEON_UPLOAD_MASKS);
+ }
+ }
+
+ /* Increment the clear counter. The client-side 3D driver must
+ * wait on this value before performing the clear ioctl. We
+ * need this because the card's so damned fast...
+ */
+ dev_priv->sarea_priv->last_clear++;
+
+ BEGIN_RING( 4 );
+
+ RADEON_CLEAR_AGE( dev_priv->sarea_priv->last_clear );
+ RADEON_WAIT_UNTIL_IDLE();
+
+ ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_swap( drm_device_t *dev )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int i;
+ RING_LOCALS;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ radeon_update_ring_snapshot( dev_priv );
+
+#if RADEON_PERFORMANCE_BOXES
+ /* Do some trivial performance monitoring...
+ */
+ radeon_cp_performance_boxes( dev_priv );
+#endif
+
+ /* Wait for the 3D stream to idle before dispatching the bitblt.
+ * This will prevent data corruption between the two streams.
+ */
+ BEGIN_RING( 2 );
+
+ RADEON_WAIT_UNTIL_3D_IDLE();
+
+ ADVANCE_RING();
+
+ for ( i = 0 ; i < nbox ; i++ ) {
+ int x = pbox[i].x1;
+ int y = pbox[i].y1;
+ int w = pbox[i].x2 - x;
+ int h = pbox[i].y2 - y;
+
+ DRM_DEBUG( "dispatch swap %d,%d-%d,%d\n",
+ x, y, w, h );
+
+ BEGIN_RING( 7 );
+
+ OUT_RING( CP_PACKET3( RADEON_CNTL_BITBLT_MULTI, 5 ) );
+ OUT_RING( RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (dev_priv->color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_MEMORY |
+ RADEON_GMC_CLR_CMP_CNTL_DIS |
+ RADEON_GMC_WR_MSK_DIS );
+
+ OUT_RING( dev_priv->back_pitch_offset );
+ OUT_RING( dev_priv->front_pitch_offset );
+
+ OUT_RING( (x << 16) | y );
+ OUT_RING( (x << 16) | y );
+ OUT_RING( (w << 16) | h );
+
+ ADVANCE_RING();
+ }
+
+ /* Increment the frame counter. The client-side 3D driver must
+ * throttle the framerate by waiting for this value before
+ * performing the swapbuffer ioctl.
+ */
+ dev_priv->sarea_priv->last_frame++;
+
+ BEGIN_RING( 4 );
+
+ RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
+ RADEON_WAIT_UNTIL_2D_IDLE();
+
+ ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_flip( drm_device_t *dev )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+ DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page );
+
+ radeon_update_ring_snapshot( dev_priv );
+
+#if RADEON_PERFORMANCE_BOXES
+ /* Do some trivial performance monitoring...
+ */
+ radeon_cp_performance_boxes( dev_priv );
+#endif
+
+ BEGIN_RING( 6 );
+
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ RADEON_WAIT_UNTIL_PAGE_FLIPPED();
+
+ OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET, 0 ) );
+
+ if ( dev_priv->current_page == 0 ) {
+ OUT_RING( dev_priv->back_offset );
+ dev_priv->current_page = 1;
+ } else {
+ OUT_RING( dev_priv->front_offset );
+ dev_priv->current_page = 0;
+ }
+
+ ADVANCE_RING();
+
+ /* Increment the frame counter. The client-side 3D driver must
+ * throttle the framerate by waiting for this value before
+ * performing the swapbuffer ioctl.
+ */
+ dev_priv->sarea_priv->last_frame++;
+
+ BEGIN_RING( 2 );
+
+ RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
+
+ ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_vertex( drm_device_t *dev,
+ drm_buf_t *buf )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int format = sarea_priv->vc_format;
+ int offset = dev_priv->agp_buffers_offset + buf->offset;
+ int size = buf->used;
+ int prim = buf_priv->prim;
+ int i = 0;
+ RING_LOCALS;
+ DRM_DEBUG( "%s: nbox=%d\n", __FUNCTION__, sarea_priv->nbox );
+
+ radeon_update_ring_snapshot( dev_priv );
+
+ if ( 0 )
+ radeon_print_dirty( "dispatch_vertex", sarea_priv->dirty );
+
+ if ( buf->used ) {
+ buf_priv->dispatched = 1;
+
+ if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
+ radeon_emit_state( dev_priv );
+ }
+
+ do {
+ /* Emit the next set of up to three cliprects */
+ if ( i < sarea_priv->nbox ) {
+ radeon_emit_clip_rect( dev_priv,
+ &sarea_priv->boxes[i] );
+ }
+
+ /* Emit the vertex buffer rendering commands */
+ BEGIN_RING( 5 );
+
+ OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) );
+ OUT_RING( offset );
+ OUT_RING( size );
+ OUT_RING( format );
+ OUT_RING( prim | RADEON_PRIM_WALK_LIST |
+ RADEON_COLOR_ORDER_RGBA |
+ RADEON_VTX_FMT_RADEON_MODE |
+ (size << RADEON_NUM_VERTICES_SHIFT) );
+
+ ADVANCE_RING();
+
+ i++;
+ } while ( i < sarea_priv->nbox );
+ }
+
+ if ( buf_priv->discard ) {
+ buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+ /* Emit the vertex buffer age */
+ BEGIN_RING( 2 );
+ RADEON_DISPATCH_AGE( buf_priv->age );
+ ADVANCE_RING();
+
+ buf->pending = 1;
+ buf->used = 0;
+ /* FIXME: Check dispatched field */
+ buf_priv->dispatched = 0;
+ }
+
+ dev_priv->sarea_priv->last_dispatch++;
+
+ sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS;
+ sarea_priv->nbox = 0;
+}
+
+
+static void radeon_cp_dispatch_indirect( drm_device_t *dev,
+ drm_buf_t *buf,
+ int start, int end )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ RING_LOCALS;
+ DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
+ buf->idx, start, end );
+
+ radeon_update_ring_snapshot( dev_priv );
+
+ if ( start != end ) {
+ int offset = (dev_priv->agp_buffers_offset
+ + buf->offset + start);
+ int dwords = (end - start + 3) / sizeof(u32);
+
+ /* Indirect buffer data must be an even number of
+ * dwords, so if we've been given an odd number we must
+ * pad the data with a Type-2 CP packet.
+ */
+ if ( dwords & 1 ) {
+ u32 *data = (u32 *)
+ ((char *)dev_priv->buffers->handle
+ + buf->offset + start);
+ data[dwords++] = RADEON_CP_PACKET2;
+ }
+
+ buf_priv->dispatched = 1;
+
+ /* Fire off the indirect buffer */
+ BEGIN_RING( 3 );
+
+ OUT_RING( CP_PACKET0( RADEON_CP_IB_BASE, 1 ) );
+ OUT_RING( offset );
+ OUT_RING( dwords );
+
+ ADVANCE_RING();
+ }
+
+ if ( buf_priv->discard ) {
+ buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+ /* Emit the indirect buffer age */
+ BEGIN_RING( 2 );
+ RADEON_DISPATCH_AGE( buf_priv->age );
+ ADVANCE_RING();
+
+ buf->pending = 1;
+ buf->used = 0;
+ /* FIXME: Check dispatched field */
+ buf_priv->dispatched = 0;
+ }
+
+ dev_priv->sarea_priv->last_dispatch++;
+}
+
+static void radeon_cp_dispatch_indices( drm_device_t *dev,
+ drm_buf_t *buf,
+ int start, int end,
+ int count )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int format = sarea_priv->vc_format;
+ int offset = dev_priv->agp_buffers_offset;
+ int prim = buf_priv->prim;
+ u32 *data;
+ int dwords;
+ int i = 0;
+ RING_LOCALS;
+ DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count );
+
+ radeon_update_ring_snapshot( dev_priv );
+
+ if ( 0 )
+ radeon_print_dirty( "dispatch_indices", sarea_priv->dirty );
+
+ if ( start != end ) {
+ buf_priv->dispatched = 1;
+
+ if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
+ radeon_emit_state( dev_priv );
+ }
+
+ dwords = (end - start + 3) / sizeof(u32);
+
+ data = (u32 *)((char *)dev_priv->buffers->handle
+ + buf->offset + start);
+
+ data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
+
+ data[1] = offset;
+ data[2] = RADEON_MAX_VB_VERTS;
+ data[3] = format;
+ data[4] = (prim | RADEON_PRIM_WALK_IND |
+ RADEON_COLOR_ORDER_RGBA |
+ RADEON_VTX_FMT_RADEON_MODE |
+ (count << RADEON_NUM_VERTICES_SHIFT) );
+
+ if ( count & 0x1 ) {
+ data[dwords-1] &= 0x0000ffff;
+ }
+
+ do {
+ /* Emit the next set of up to three cliprects */
+ if ( i < sarea_priv->nbox ) {
+ radeon_emit_clip_rect( dev_priv,
+ &sarea_priv->boxes[i] );
+ }
+
+ radeon_cp_dispatch_indirect( dev, buf, start, end );
+
+ i++;
+ } while ( i < sarea_priv->nbox );
+ }
+
+ if ( buf_priv->discard ) {
+ buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+ /* Emit the vertex buffer age */
+ BEGIN_RING( 2 );
+ RADEON_DISPATCH_AGE( buf_priv->age );
+ ADVANCE_RING();
+
+ buf->pending = 1;
+ /* FIXME: Check dispatched field */
+ buf_priv->dispatched = 0;
+ }
+
+ dev_priv->sarea_priv->last_dispatch++;
+
+ sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS;
+ sarea_priv->nbox = 0;
+}
+
+static int radeon_cp_dispatch_blit( drm_device_t *dev,
+ drm_radeon_blit_t *blit )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_radeon_buf_priv_t *buf_priv;
+ u32 format;
+ u32 *data;
+ int dword_shift, dwords;
+ RING_LOCALS;
+ DRM_DEBUG( "blit: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
+ blit->offset >> 10, blit->pitch, blit->format,
+ blit->x, blit->y, blit->width, blit->height );
+
+ radeon_update_ring_snapshot( dev_priv );
+
+ /* The compiler won't optimize away a division by a variable,
+ * even if the only legal values are powers of two. Thus, we'll
+ * use a shift instead.
+ */
+ switch ( blit->format ) {
+ case RADEON_TXF_32BPP_ARGB8888:
+ case RADEON_TXF_32BPP_RGBA8888:
+ format = RADEON_COLOR_FORMAT_ARGB8888;
+ dword_shift = 0;
+ break;
+ case RADEON_TXF_16BPP_AI88:
+ case RADEON_TXF_16BPP_ARGB1555:
+ case RADEON_TXF_16BPP_RGB565:
+ case RADEON_TXF_16BPP_ARGB4444:
+ format = RADEON_COLOR_FORMAT_RGB565;
+ dword_shift = 1;
+ break;
+ case RADEON_TXF_8BPP_I:
+ case RADEON_TXF_8BPP_RGB332:
+ format = RADEON_COLOR_FORMAT_CI8;
+ dword_shift = 2;
+ break;
+ default:
+ DRM_ERROR( "invalid blit format %d\n", blit->format );
+ return -EINVAL;
+ }
+
+ /* Flush the pixel cache. This ensures no pixel data gets mixed
+ * up with the texture data from the host data blit, otherwise
+ * part of the texture image may be corrupted.
+ */
+ BEGIN_RING( 4 );
+
+ RADEON_FLUSH_CACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+
+ ADVANCE_RING();
+
+ /* Dispatch the indirect buffer.
+ */
+ buf = dma->buflist[blit->idx];
+ buf_priv = buf->dev_private;
+
+ if ( buf->pid != current->pid ) {
+ DRM_ERROR( "process %d using buffer owned by %d\n",
+ current->pid, buf->pid );
+ return -EINVAL;
+ }
+ if ( buf->pending ) {
+ DRM_ERROR( "sending pending buffer %d\n", blit->idx );
+ return -EINVAL;
+ }
+
+ buf_priv->discard = 1;
+
+ dwords = (blit->width * blit->height) >> dword_shift;
+ if ( !dwords ) dwords = 1;
+
+ data = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
+
+ data[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
+ data[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (format << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_HOST_DATA |
+ RADEON_GMC_CLR_CMP_CNTL_DIS |
+ RADEON_GMC_WR_MSK_DIS);
+
+ data[2] = (blit->pitch << 22) | (blit->offset >> 10);
+ data[3] = 0xffffffff;
+ data[4] = 0xffffffff;
+ data[5] = (blit->y << 16) | blit->x;
+ data[6] = (blit->height << 16) | blit->width;
+ data[7] = dwords;
+
+ buf->used = (dwords + 8) * sizeof(u32);
+
+ radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
+
+ /* Flush the pixel cache after the blit completes. This ensures
+ * the texture data is written out to memory before rendering
+ * continues.
+ */
+ BEGIN_RING( 4 );
+
+ RADEON_FLUSH_CACHE();
+ RADEON_WAIT_UNTIL_2D_IDLE();
+
+ ADVANCE_RING();
+
+ return 0;
+}
+
+static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int i;
+ RING_LOCALS;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ radeon_update_ring_snapshot( dev_priv );
+
+ BEGIN_RING( 35 );
+
+ OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );
+ OUT_RING( 0x00000000 );
+
+ OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) );
+ for ( i = 0 ; i < 32 ; i++ ) {
+ OUT_RING( stipple[i] );
+ }
+
+ ADVANCE_RING();
+}
+
+
+/* ================================================================
+ * IOCTL functions
+ */
+
+int radeon_cp_clear( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_clear_t clear;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( copy_from_user( &clear, (drm_radeon_clear_t *) arg,
+ sizeof(clear) ) )
+ return -EFAULT;
+
+ if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
+ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+ radeon_cp_dispatch_clear( dev, &clear );
+
+ return 0;
+}
+
+int radeon_cp_swap( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
+ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+ if ( !dev_priv->page_flipping ) {
+ radeon_cp_dispatch_swap( dev );
+ dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT |
+ RADEON_UPLOAD_MASKS);
+ } else {
+ radeon_cp_dispatch_flip( dev );
+ }
+
+ return 0;
+}
+
+int radeon_cp_vertex( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_radeon_buf_priv_t *buf_priv;
+ drm_radeon_vertex_t vertex;
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+ if ( !dev_priv || dev_priv->is_pci ) {
+ DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( copy_from_user( &vertex, (drm_radeon_vertex_t *)arg,
+ sizeof(vertex) ) )
+ return -EFAULT;
+
+ DRM_DEBUG( "%s: pid=%d index=%d count=%d discard=%d\n",
+ __FUNCTION__, current->pid,
+ vertex.idx, vertex.count, vertex.discard );
+
+ if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
+ DRM_ERROR( "buffer index %d (of %d max)\n",
+ vertex.idx, dma->buf_count - 1 );
+ return -EINVAL;
+ }
+ if ( vertex.prim < 0 ||
+ vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
+ DRM_ERROR( "buffer prim %d\n", vertex.prim );
+ return -EINVAL;
+ }
+
+ VB_AGE_CHECK_WITH_RET( dev_priv );
+
+ buf = dma->buflist[vertex.idx];
+ buf_priv = buf->dev_private;
+
+ if ( buf->pid != current->pid ) {
+ DRM_ERROR( "process %d using buffer owned by %d\n",
+ current->pid, buf->pid );
+ return -EINVAL;
+ }
+ if ( buf->pending ) {
+ DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
+ return -EINVAL;
+ }
+
+ buf->used = vertex.count;
+ buf_priv->prim = vertex.prim;
+ buf_priv->discard = vertex.discard;
+
+ radeon_cp_dispatch_vertex( dev, buf );
+
+ return 0;
+}
+
+int radeon_cp_indices( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_radeon_buf_priv_t *buf_priv;
+ drm_radeon_indices_t elts;
+ int count;
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+ if ( !dev_priv || dev_priv->is_pci ) {
+ DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( copy_from_user( &elts, (drm_radeon_indices_t *)arg,
+ sizeof(elts) ) )
+ return -EFAULT;
+
+ DRM_DEBUG( "%s: pid=%d index=%d start=%d end=%d discard=%d\n",
+ __FUNCTION__, current->pid,
+ elts.idx, elts.start, elts.end, elts.discard );
+
+ if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {
+ DRM_ERROR( "buffer index %d (of %d max)\n",
+ elts.idx, dma->buf_count - 1 );
+ return -EINVAL;
+ }
+ if ( elts.prim < 0 ||
+ elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
+ DRM_ERROR( "buffer prim %d\n", elts.prim );
+ return -EINVAL;
+ }
+
+ VB_AGE_CHECK_WITH_RET( dev_priv );
+
+ buf = dma->buflist[elts.idx];
+ buf_priv = buf->dev_private;
+
+ if ( buf->pid != current->pid ) {
+ DRM_ERROR( "process %d using buffer owned by %d\n",
+ current->pid, buf->pid );
+ return -EINVAL;
+ }
+ if ( buf->pending ) {
+ DRM_ERROR( "sending pending buffer %d\n", elts.idx );
+ return -EINVAL;
+ }
+
+ count = (elts.end - elts.start) / sizeof(u16);
+ elts.start -= RADEON_INDEX_PRIM_OFFSET;
+
+ if ( elts.start & 0x7 ) {
+ DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );
+ return -EINVAL;
+ }
+ if ( elts.start < buf->used ) {
+ DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );
+ return -EINVAL;
+ }
+
+ buf->used = elts.end;
+ buf_priv->prim = elts.prim;
+ buf_priv->discard = elts.discard;
+
+ radeon_cp_dispatch_indices( dev, buf, elts.start, elts.end, count );
+
+ return 0;
+}
+
+int radeon_cp_blit( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_radeon_blit_t blit;
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( copy_from_user( &blit, (drm_radeon_blit_t *)arg,
+ sizeof(blit) ) )
+ return -EFAULT;
+
+ DRM_DEBUG( "%s: pid=%d index=%d\n",
+ __FUNCTION__, current->pid, blit.idx );
+
+ if ( blit.idx < 0 || blit.idx > dma->buf_count ) {
+ DRM_ERROR( "sending %d buffers (of %d max)\n",
+ blit.idx, dma->buf_count );
+ return -EINVAL;
+ }
+
+ VB_AGE_CHECK_WITH_RET( dev_priv );
+
+ return radeon_cp_dispatch_blit( dev, &blit );
+}
+
+int radeon_cp_stipple( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_stipple_t stipple;
+ u32 mask[32];
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( copy_from_user( &stipple, (drm_radeon_stipple_t *)arg,
+ sizeof(stipple) ) )
+ return -EFAULT;
+
+ if ( copy_from_user( &mask, stipple.mask,
+ 32 * sizeof(u32) ) )
+ return -EFAULT;
+
+ radeon_cp_dispatch_stipple( dev, mask );
+
+ return 0;
+}
+
+int radeon_cp_indirect( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_radeon_buf_priv_t *buf_priv;
+ drm_radeon_indirect_t indirect;
+ RING_LOCALS;
+
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+ if ( !dev_priv || dev_priv->is_pci ) {
+ DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( copy_from_user( &indirect, (drm_radeon_indirect_t *)arg,
+ sizeof(indirect) ) )
+ return -EFAULT;
+
+ DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
+ indirect.idx, indirect.start,
+ indirect.end, indirect.discard );
+
+ if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
+ DRM_ERROR( "buffer index %d (of %d max)\n",
+ indirect.idx, dma->buf_count - 1 );
+ return -EINVAL;
+ }
+
+ buf = dma->buflist[indirect.idx];
+ buf_priv = buf->dev_private;
+
+ if ( buf->pid != current->pid ) {
+ DRM_ERROR( "process %d using buffer owned by %d\n",
+ current->pid, buf->pid );
+ return -EINVAL;
+ }
+ if ( buf->pending ) {
+ DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
+ return -EINVAL;
+ }
+
+ if ( indirect.start < buf->used ) {
+ DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
+ indirect.start, buf->used );
+ return -EINVAL;
+ }
+
+ VB_AGE_CHECK_WITH_RET( dev_priv );
+
+ buf->used = indirect.end;
+ buf_priv->discard = indirect.discard;
+
+ /* Wait for the 3D stream to idle before the indirect buffer
+ * containing 2D acceleration commands is processed.
+ */
+ BEGIN_RING( 2 );
+
+ RADEON_WAIT_UNTIL_3D_IDLE();
+
+ ADVANCE_RING();
+
+ /* Dispatch the indirect buffer full of commands from the
+ * X server. This is insecure and is thus only available to
+ * privileged clients.
+ */
+ radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end );
+
+ return 0;
+}
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 4c141d668..e4edb9f07 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -70,7 +70,6 @@ extern int psaux_init(void);
extern void gfx_register(void);
#endif
extern void streamable_init(void);
-extern int rtc_sun_init(void); /* Combines MK48T02 and MK48T08 */
extern int rtc_DP8570A_init(void);
extern int rtc_MK48T08_init(void);
extern int ds1286_init(void);
@@ -259,9 +258,6 @@ int __init misc_init(void)
#ifdef CONFIG_BVME6000
rtc_DP8570A_init();
#endif
-#if defined(CONFIG_SUN_MOSTEK_RTC)
- rtc_sun_init();
-#endif
#ifdef CONFIG_SGI_DS1286
ds1286_init();
#endif
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 6f4ebe44c..c2ae319af 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -86,6 +86,15 @@ static inline void free_buf(unsigned char *buf)
free_page((unsigned long) buf);
}
+static inline void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
+{
+ if (tty->read_cnt < N_TTY_BUF_SIZE) {
+ tty->read_buf[tty->read_head] = c;
+ tty->read_head = (tty->read_head + 1) & (N_TTY_BUF_SIZE-1);
+ tty->read_cnt++;
+ }
+}
+
static inline void put_tty_queue(unsigned char c, struct tty_struct *tty)
{
unsigned long flags;
@@ -94,11 +103,7 @@ static inline void put_tty_queue(unsigned char c, struct tty_struct *tty)
* Why didn't anyone see this one comming? --AJK
*/
spin_lock_irqsave(&tty->read_lock, flags);
- if (tty->read_cnt < N_TTY_BUF_SIZE) {
- tty->read_buf[tty->read_head] = c;
- tty->read_head = (tty->read_head + 1) & (N_TTY_BUF_SIZE-1);
- tty->read_cnt++;
- }
+ put_tty_queue_nolock(c, tty);
spin_unlock_irqrestore(&tty->read_lock, flags);
}
@@ -499,6 +504,8 @@ static inline void n_tty_receive_parity_error(struct tty_struct *tty,
static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
{
+ unsigned long flags;
+
if (tty->raw) {
put_tty_queue(c, tty);
return;
@@ -651,10 +658,12 @@ send_signal:
put_tty_queue(c, tty);
handle_newline:
+ spin_lock_irqsave(&tty->read_lock, flags);
set_bit(tty->read_head, &tty->read_flags);
- put_tty_queue(c, tty);
+ put_tty_queue_nolock(c, tty);
tty->canon_head = tty->read_head;
tty->canon_data++;
+ spin_unlock_irqrestore(&tty->read_lock, flags);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
@@ -1055,12 +1064,6 @@ do_it_again:
tty->read_tail = ((tty->read_tail+1) &
(N_TTY_BUF_SIZE-1));
tty->read_cnt--;
- spin_unlock_irqrestore(&tty->read_lock, flags);
-
- if (!eol || (c != __DISABLED_CHAR)) {
- put_user(c, b++);
- nr--;
- }
if (eol) {
/* this test should be redundant:
* we shouldn't be reading data if
@@ -1068,8 +1071,15 @@ do_it_again:
*/
if (--tty->canon_data < 0)
tty->canon_data = 0;
- break;
}
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+
+ if (!eol || (c != __DISABLED_CHAR)) {
+ put_user(c, b++);
+ nr--;
+ }
+ if (eol)
+ break;
}
} else {
int uncopied;
diff --git a/drivers/i2o/i2o_block.c b/drivers/i2o/i2o_block.c
index 8389039d6..857b32a7e 100644
--- a/drivers/i2o/i2o_block.c
+++ b/drivers/i2o/i2o_block.c
@@ -392,7 +392,6 @@ static inline int i2ob_new_segment(request_queue_t *q, struct request *req,
if (req->nr_segments < max_segments) {
req->nr_segments++;
- q->elevator.nr_segments++;
return 1;
}
return 0;
@@ -421,22 +420,16 @@ static int i2ob_merge_requests(request_queue_t *q,
{
int max_segments = i2ob_dev[MINOR(req->rq_dev)].max_segments;
int total_segments = req->nr_segments + next->nr_segments;
- int same_segment;
if (__max_segments < max_segments)
max_segments = __max_segments;
- same_segment = 0;
if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
- {
total_segments--;
- same_segment = 1;
- }
if (total_segments > max_segments)
return 0;
- q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments;
return 1;
}
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 62e2437e5..38cde0b40 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -55,6 +55,15 @@ const char *bad_ata100_5[] = {
};
const char *bad_ata66_4[] = {
+ "IBM-DTLA-307075",
+ "IBM-DTLA-307060",
+ "IBM-DTLA-307045",
+ "IBM-DTLA-307030",
+ "IBM-DTLA-307020",
+ "IBM-DTLA-307015",
+ "IBM-DTLA-305040",
+ "IBM-DTLA-305030",
+ "IBM-DTLA-305020",
"WDC AC310200R",
NULL
};
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 3a6a9e8d9..433951e95 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -288,6 +288,9 @@
* 4.59 Aug 11, 2000 - Fix changer problem in cdrom_read_toc, we weren't
* correctly sensing a disc change.
* - Rearranged some code
+ * - Use extended sense on drives that support it for
+ * correctly reporting tray status -- from
+ * Michael D Johnson <johnsom@orst.edu>
*
*************************************************************************/
@@ -759,16 +762,13 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
* changed 5 parameters to 3 for dvd-ram
* struct packet_command *pc; now packet_command_t *pc;
*/
-#undef CLASSIC_PACKET_STRUCT
static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
struct packet_command *pc,
ide_handler_t *handler)
{
-#ifdef CLASSIC_PACKET_STRUCT
unsigned char *cmd_buf = pc->c;
int cmd_len = sizeof(pc->c);
unsigned int timeout = pc->timeout;
-#endif
ide_startstop_t startstop;
if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
@@ -786,22 +786,10 @@ static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
}
/* Arm the interrupt handler. */
-#ifdef CLASSIC_PACKET_STRUCT
- /* Arm the interrupt handler. */
ide_set_handler (drive, handler, timeout, cdrom_timer_expiry);
/* Send the command to the device. */
atapi_output_bytes (drive, cmd_buf, cmd_len);
-#else /* !CLASSIC_PACKET_STRUCT */
- /* Arm the interrupt handler. */
-// ide_set_handler (drive, handler, (unsigned int) pc->timeout, cdrom_timer_expiry);
- ide_set_handler (drive, handler, pc->timeout, cdrom_timer_expiry);
-
- /* Send the command to the device. */
-// atapi_output_bytes (drive, (void *)pc->c, (unsigned int) sizeof(pc->c));
- atapi_output_bytes (drive, pc->c, sizeof(pc->c));
-#endif /* CLASSIC_PACKET_STRUCT */
-
return ide_started;
}
@@ -1884,9 +1872,6 @@ static int cdrom_read_toc(ide_drive_t *drive, struct request_sense *sense)
If it is, just return. */
(void) cdrom_check_status(drive, sense);
- if (CDROM_STATE_FLAGS(drive)->toc_valid)
- return 0;
-
/* First read just the header, so we know how long the TOC is. */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
sizeof(struct atapi_toc_header), sense);
@@ -2324,11 +2309,17 @@ int ide_cdrom_drive_status (struct cdrom_device_info *cdi, int slot_nr)
sense.ascq == 0x04)
return CDS_DISC_OK;
+
+ /*
+ * If not using Mt Fuji extended media tray reports,
+ * just return TRAY_OPEN since ATAPI doesn't provide
+ * any other way to detect this...
+ */
if (sense.sense_key == NOT_READY) {
- /* ATAPI doesn't have anything that can help
- us decide whether the drive is really
- emtpy or the tray is just open. irk. */
- return CDS_TRAY_OPEN;
+ if (sense.asc == 0x3a && (!sense.ascq||sense.ascq == 1))
+ return CDS_NO_DISC;
+ else
+ return CDS_TRAY_OPEN;
}
return CDS_DRIVE_NOT_READY;
@@ -2597,7 +2588,7 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
if (CDROM_CONFIG_FLAGS (drive)->dvd_r|CDROM_CONFIG_FLAGS (drive)->dvd_ram)
printk (" DVD%s%s",
(CDROM_CONFIG_FLAGS (drive)->dvd_r)? "-R" : "",
- (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "AM" : "");
+ (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "-RAM" : "");
if (CDROM_CONFIG_FLAGS (drive)->cd_r|CDROM_CONFIG_FLAGS (drive)->cd_rw)
printk (" CD%s%s",
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 7caafab9b..7158d3461 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -226,6 +226,9 @@ static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq)
unsigned char *virt_addr = bh->b_data;
unsigned int size = bh->b_size;
+ if (nents >= PRD_ENTRIES)
+ return 0;
+
while ((bh = bh->b_reqnext) != NULL) {
if ((virt_addr + size) != (unsigned char *) bh->b_data)
break;
@@ -259,6 +262,9 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
HWIF(drive)->sg_nents = i = ide_build_sglist(HWIF(drive), HWGROUP(drive)->rq);
+ if (!i)
+ return 0;
+
sg = HWIF(drive)->sg_table;
while (i && sg_dma_len(sg)) {
u32 cur_addr;
@@ -274,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
*/
while (cur_len) {
- if (++count >= PRD_ENTRIES) {
+ if (count++ >= PRD_ENTRIES) {
printk("%s: DMA table too small\n", drive->name);
pci_unmap_sg(HWIF(drive)->pci_dev,
HWIF(drive)->sg_table,
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index decbc9f8d..64d9121c8 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -134,7 +134,7 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
break;
}
#endif
- printk ("CDROM");
+ printk ("CD/DVD-ROM");
break;
case ide_tape:
printk ("TAPE");
@@ -761,9 +761,10 @@ static void init_gendisk (ide_hwif_t *hwif)
for (unit = 0; unit < minors; ++unit) {
*bs++ = BLOCK_SIZE;
#ifdef CONFIG_BLK_DEV_PDC4030
- *max_sect++ = ((hwif->chipset == ide_pdc4030) ? 127 : MAX_SECTORS);
+ *max_sect++ = ((hwif->chipset == ide_pdc4030) ? 127 : 256);
#else
- *max_sect++ = MAX_SECTORS;
+ /* IDE can do up to 128K per request. */
+ *max_sect++ = 256;
#endif
*max_ra++ = MAX_READAHEAD;
}
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 3ea1e261e..356e7a4cc 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -602,7 +602,9 @@ void __init ide_init_via82cxxx(ide_hwif_t *hwif)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->dmaproc = &via82cxxx_dmaproc;
+#ifdef CONFIG_IDEDMA_AUTO
hwif->autodma = 1;
+#endif
}
#endif /* CONFIG_BLK_DEV_IDEDMA */
}
diff --git a/drivers/isdn/hisax/Makefile b/drivers/isdn/hisax/Makefile
index 66f6b5d4a..72d990134 100644
--- a/drivers/isdn/hisax/Makefile
+++ b/drivers/isdn/hisax/Makefile
@@ -34,8 +34,8 @@ hisax-objs-$(CONFIG_HISAX_DIEHLDIVA) += diva.o isac.o arcofi.o hscx.o
hisax-objs-$(CONFIG_HISAX_ASUSCOM) += asuscom.o isac.o arcofi.o hscx.o
hisax-objs-$(CONFIG_HISAX_TELEINT) += teleint.o isac.o arcofi.o hfc_2bs0.o
hisax-objs-$(CONFIG_HISAX_SEDLBAUER) += sedlbauer.o isac.o arcofi.o hscx.o isar.o
-hisax-objs-$(CONFIG_HISAX_SPORTSTER) += sportster.o isac.o arcofi.o hfc_2bs0.o
-hisax-objs-$(CONFIG_HISAX_MIC) += mic.o isac.o arcofi.o hfc_2bs0.o
+hisax-objs-$(CONFIG_HISAX_SPORTSTER) += sportster.o isac.o arcofi.o hscx.o
+hisax-objs-$(CONFIG_HISAX_MIC) += mic.o isac.o arcofi.o hscx.o
hisax-objs-$(CONFIG_HISAX_NETJET) += nj_s.o netjet.o isac.o arcofi.o
hisax-objs-$(CONFIG_HISAX_NETJET_U) += nj_u.o netjet.o icc.o
hisax-objs-$(CONFIG_HISAX_HFCS) += hfcscard.o hfc_2bds0.o
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 0b60e89b3..6d0366cf2 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1708,8 +1708,8 @@ int __devinit hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *
}
static struct pci_device_id hisax_pci_tbl[] __initdata = {
-#ifdef CONFIG_HISAX_FRTIZPCI
- {PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_FRITZ, PCI_ANY_ID, PCI_ANY_ID},
+#ifdef CONFIG_HISAX_FRITZPCI
+ {PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID},
#endif
#ifdef CONFIG_HISAX_DIEHLDIVA
{PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA20, PCI_ANY_ID, PCI_ANY_ID},
diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c
index b484635f1..1c2ab1042 100644
--- a/drivers/isdn/hisax/isdnl3.c
+++ b/drivers/isdn/hisax/isdnl3.c
@@ -566,7 +566,7 @@ l3_msg(struct PStack *st, int pr, void *arg)
} else {
struct sk_buff *skb = arg;
- skb_queue_head(&st->l3.squeue, skb);
+ skb_queue_tail(&st->l3.squeue, skb);
FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
}
break;
diff --git a/drivers/isdn/hisax/md5sums.asc b/drivers/isdn/hisax/md5sums.asc
index 4cc1f8b83..5ffd61db0 100644
--- a/drivers/isdn/hisax/md5sums.asc
+++ b/drivers/isdn/hisax/md5sums.asc
@@ -10,7 +10,7 @@
ca7bd9bac39203f3074f3f093948cc3c isac.c
a2ad619fd404b3149099a2984de9d23c isdnl1.c
d2a78e407f3d94876deac160c6f9aae6 isdnl2.c
-a109841c2e75b11fc8ef2c8718e24c3e isdnl3.c
+e7932ca7ae39c497c17f13a2e1434fcd isdnl3.c
afb5f2f4ac296d6de45c856993b161e1 tei.c
00023e2a482cb86a26ea870577ade5d6 callc.c
a1834e9b2ec068440cff2e899eff4710 cert.c
@@ -25,9 +25,9 @@ a1834e9b2ec068440cff2e899eff4710 cert.c
Version: 2.6.3i
Charset: noconv
-iQCVAwUBOlMTgDpxHvX/mS9tAQFSbgP/W9y6tnnWHTRLGqyr3EY1OHZiQXERkAAu
-hp+Y8PIoX1GgAh4yZ7xhYwUsk6y0z5USdGuhC9ZHh+oZd57lPsJMnhkEZR5BVsYT
-r7jHwelP527+QCLkVUCHIVIWUW0ANzeZBhDV2vefkFb+gWLiZsBhaHssbcKGsMNG
-Ak4xS1ByqsM=
-=lsIJ
+iQCVAwUBOlxeLTpxHvX/mS9tAQH6RwP8DhyvqAnXFV6WIGi16iQ3vKikkPoqnDQs
+GEn5uCW0dPYKlwthD2Grj/JbMYZhOmCFuDxF7ufJnjTSDe/D8XNe2wngxzAiwcIe
+WjCrT8X95cuP3HZHscbFTEinVV0GAnoI0ZEgs5eBDhVHDqILLYMaTFBQaRH3jgXc
+i5VH88jPfUM=
+=qc+J
-----END PGP SIGNATURE-----
diff --git a/drivers/isdn/isdn_common.c b/drivers/isdn/isdn_common.c
index 7a5074c79..f4c1d429d 100644
--- a/drivers/isdn/isdn_common.c
+++ b/drivers/isdn/isdn_common.c
@@ -1512,7 +1512,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
int i;
if ((ret = verify_area(VERIFY_READ, (void *) arg,
- (ISDN_MODEM_NUMREG + ISDN_MSNLEN)
+ (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN)
* ISDN_MAX_CHANNELS)))
return ret;
@@ -1521,6 +1521,9 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
ISDN_MODEM_NUMREG))
return -EFAULT;
p += ISDN_MODEM_NUMREG;
+ if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN))
+ return -EFAULT;
+ p += ISDN_LMSNLEN;
if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN))
return -EFAULT;
p += ISDN_MSNLEN;
diff --git a/drivers/isdn/isdn_net.c b/drivers/isdn/isdn_net.c
index 757c59ca9..672dd8c5c 100644
--- a/drivers/isdn/isdn_net.c
+++ b/drivers/isdn/isdn_net.c
@@ -2325,6 +2325,7 @@ isdn_net_new(char *name, struct net_device *master)
memset(netdev, 0, sizeof(isdn_net_dev));
if (!(netdev->local = (isdn_net_local *) kmalloc(sizeof(isdn_net_local), GFP_KERNEL))) {
printk(KERN_WARNING "isdn_net: Could not allocate device locals\n");
+ kfree(netdev);
return NULL;
}
memset(netdev->local, 0, sizeof(isdn_net_local));
diff --git a/drivers/isdn/isdn_ppp.c b/drivers/isdn/isdn_ppp.c
index 3bebcdfcf..f1cbb8ab7 100644
--- a/drivers/isdn/isdn_ppp.c
+++ b/drivers/isdn/isdn_ppp.c
@@ -1131,9 +1131,9 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
proto = PPP_IPX; /* untested */
break;
default:
- dev_kfree_skb(skb);
printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n",
skb->protocol);
+ dev_kfree_skb(skb);
return 0;
}
@@ -2310,8 +2310,7 @@ static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb,struct ippp_struc
rsparm.data = rsdata;
rsparm.maxdlen = IPPP_RESET_MAXDATABYTES;
- /* !!!HACK,HACK,HACK!!! 2048 is only assumed */
- skb_out = dev_alloc_skb(2048);
+ skb_out = dev_alloc_skb(is->mru + PPP_HDRLEN);
len = ipc->decompress(stat, skb, skb_out, &rsparm);
kfree_skb(skb);
if (len <= 0) {
@@ -2332,14 +2331,9 @@ static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb,struct ippp_struc
kfree_skb(skb_out);
return NULL;
}
-
- if (isdn_ppp_skip_ac(ri, skb) < 0) {
- kfree_skb(skb);
- return NULL;
- }
- *proto = isdn_ppp_strip_proto(skb);
+ *proto = isdn_ppp_strip_proto(skb_out);
if (*proto < 0) {
- kfree_skb(skb);
+ kfree_skb(skb_out);
return NULL;
}
return skb_out;
diff --git a/drivers/isdn/isdn_v110.c b/drivers/isdn/isdn_v110.c
index 08c54a866..5dd9f2c58 100644
--- a/drivers/isdn/isdn_v110.c
+++ b/drivers/isdn/isdn_v110.c
@@ -102,7 +102,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize)
int i;
isdn_v110_stream *v;
- if ((v = kmalloc(sizeof(isdn_v110_stream), GFP_KERNEL)) == NULL)
+ if ((v = kmalloc(sizeof(isdn_v110_stream), GFP_ATOMIC)) == NULL)
return NULL;
memset(v, 0, sizeof(isdn_v110_stream));
v->key = key;
@@ -134,7 +134,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize)
v->b = 0;
v->skbres = hdrlen;
v->maxsize = maxsize - hdrlen;
- if ((v->encodebuf = kmalloc(maxsize, GFP_KERNEL)) == NULL) {
+ if ((v->encodebuf = kmalloc(maxsize, GFP_ATOMIC)) == NULL) {
kfree(v);
return NULL;
}
diff --git a/drivers/md/Config.in b/drivers/md/Config.in
index 565055a68..30438c77f 100644
--- a/drivers/md/Config.in
+++ b/drivers/md/Config.in
@@ -11,12 +11,7 @@ dep_tristate ' Linear (append) mode' CONFIG_MD_LINEAR $CONFIG_BLK_DEV_MD
dep_tristate ' RAID-0 (striping) mode' CONFIG_MD_RAID0 $CONFIG_BLK_DEV_MD
dep_tristate ' RAID-1 (mirroring) mode' CONFIG_MD_RAID1 $CONFIG_BLK_DEV_MD
dep_tristate ' RAID-4/RAID-5 mode' CONFIG_MD_RAID5 $CONFIG_BLK_DEV_MD
-if [ "$CONFIG_MD_LINEAR" = "y" -o "$CONFIG_MD_RAID0" = "y" -o "$CONFIG_MD_RAID1" = "y" -o "$CONFIG_MD_RAID5" = "y" ]; then
- bool ' Boot support' CONFIG_MD_BOOT
- bool ' Auto Detect support' CONFIG_AUTODETECT_RAID
-fi
dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD
-dep_mbool ' LVM information in proc filesystem' CONFIG_LVM_PROC_FS $CONFIG_BLK_DEV_LVM
endmenu
diff --git a/drivers/md/lvm-snap.c b/drivers/md/lvm-snap.c
index 980694ee3..e28ffdbe9 100644
--- a/drivers/md/lvm-snap.c
+++ b/drivers/md/lvm-snap.c
@@ -21,6 +21,14 @@
*
*/
+/*
+ * Changelog
+ *
+ * 05/07/2000 - implemented persistent snapshot support
+ * 23/11/2000 - used cpu_to_le64 rather than my own macro
+ *
+ */
+
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
@@ -30,7 +38,9 @@
#include <linux/lvm.h>
-static char *lvm_snap_version __attribute__ ((unused)) = "LVM 0.9 snapshot code (13/11/2000)\n";
+#include "lvm-snap.h"
+
+static char *lvm_snap_version __attribute__ ((unused)) = "LVM 0.9.1_beta2 snapshot code (18/01/2001)\n";
extern const char *const lvm_name;
extern int lvm_blocksizes[];
@@ -214,10 +224,10 @@ void lvm_snapshot_fill_COW_page(vg_t * vg, lv_t * lv_snap)
memset(lv_COW_table, 0, blksize_snap);
for ( ; is < lv_snap->lv_remap_ptr; is++, id++) {
/* store new COW_table entry */
- lv_COW_table[id].pv_org_number = LVM_TO_DISK64(lvm_pv_get_number(vg, lv_snap->lv_block_exception[is].rdev_org));
- lv_COW_table[id].pv_org_rsector = LVM_TO_DISK64(lv_snap->lv_block_exception[is].rsector_org);
- lv_COW_table[id].pv_snap_number = LVM_TO_DISK64(lvm_pv_get_number(vg, lv_snap->lv_block_exception[is].rdev_new));
- lv_COW_table[id].pv_snap_rsector = LVM_TO_DISK64(lv_snap->lv_block_exception[is].rsector_new);
+ lv_COW_table[id].pv_org_number = cpu_to_le64(lvm_pv_get_number(vg, lv_snap->lv_block_exception[is].rdev_org));
+ lv_COW_table[id].pv_org_rsector = cpu_to_le64(lv_snap->lv_block_exception[is].rsector_org);
+ lv_COW_table[id].pv_snap_number = cpu_to_le64(lvm_pv_get_number(vg, lv_snap->lv_block_exception[is].rdev_new));
+ lv_COW_table[id].pv_snap_rsector = cpu_to_le64(lv_snap->lv_block_exception[is].rsector_new);
}
}
@@ -227,8 +237,7 @@ void lvm_snapshot_fill_COW_page(vg_t * vg, lv_t * lv_snap)
*
*/
-int lvm_write_COW_table_block(vg_t * vg,
- lv_t * lv_snap)
+int lvm_write_COW_table_block(vg_t * vg, lv_t * lv_snap)
{
int blksize_snap;
int end_of_table;
@@ -268,10 +277,10 @@ int lvm_write_COW_table_block(vg_t * vg,
blocks[0] = (snap_pe_start + COW_table_sector_offset) >> (blksize_snap >> 10);
/* store new COW_table entry */
- lv_COW_table[idx_COW_table].pv_org_number = LVM_TO_DISK64(lvm_pv_get_number(vg, lv_snap->lv_block_exception[idx].rdev_org));
- lv_COW_table[idx_COW_table].pv_org_rsector = LVM_TO_DISK64(lv_snap->lv_block_exception[idx].rsector_org);
- lv_COW_table[idx_COW_table].pv_snap_number = LVM_TO_DISK64(lvm_pv_get_number(vg, snap_phys_dev));
- lv_COW_table[idx_COW_table].pv_snap_rsector = LVM_TO_DISK64(lv_snap->lv_block_exception[idx].rsector_new);
+ lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(lvm_pv_get_number(vg, lv_snap->lv_block_exception[idx].rdev_org));
+ lv_COW_table[idx_COW_table].pv_org_rsector = cpu_to_le64(lv_snap->lv_block_exception[idx].rsector_org);
+ lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(lvm_pv_get_number(vg, snap_phys_dev));
+ lv_COW_table[idx_COW_table].pv_snap_rsector = cpu_to_le64(lv_snap->lv_block_exception[idx].rsector_new);
length_tmp = iobuf->length;
iobuf->length = blksize_snap;
@@ -568,6 +577,7 @@ void lvm_snapshot_release(lv_t * lv)
}
if (lv->lv_iobuf)
{
+ kiobuf_wait_for_io(lv->lv_iobuf);
unmap_kiobuf(lv->lv_iobuf);
free_kiovec(1, &lv->lv_iobuf);
lv->lv_iobuf = NULL;
diff --git a/drivers/md/lvm-snap.h b/drivers/md/lvm-snap.h
new file mode 100644
index 000000000..23538a1b7
--- /dev/null
+++ b/drivers/md/lvm-snap.h
@@ -0,0 +1,47 @@
+/*
+ * kernel/lvm-snap.h
+ *
+ * Copyright (C) 2001 Sistina Software
+ *
+ *
+ * LVM driver is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * LVM driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ */
+
+/*
+ * Changelog
+ *
+ * 05/01/2001:Joe Thornber - Factored this file out of lvm.c
+ *
+ */
+
+#ifndef LVM_SNAP_H
+#define LVM_SNAP_H
+
+/* external snapshot calls */
+extern inline int lvm_get_blksize(kdev_t);
+extern int lvm_snapshot_alloc(lv_t *);
+extern void lvm_snapshot_fill_COW_page(vg_t *, lv_t *);
+extern int lvm_snapshot_COW(kdev_t, ulong, ulong, ulong, lv_t *);
+extern int lvm_snapshot_remap_block(kdev_t *, ulong *, ulong, lv_t *);
+extern void lvm_snapshot_release(lv_t *);
+extern int lvm_write_COW_table_block(vg_t *, lv_t *);
+extern inline void lvm_hash_link(lv_block_exception_t *,
+ kdev_t, ulong, lv_t *);
+extern int lvm_snapshot_alloc_hash_table(lv_t *);
+extern void lvm_drop_snapshot(lv_t *, const char *);
+
+#endif
diff --git a/drivers/md/lvm.c b/drivers/md/lvm.c
index ea276c57c..a4ca05e90 100644
--- a/drivers/md/lvm.c
+++ b/drivers/md/lvm.c
@@ -7,22 +7,23 @@
* April-May,July-August,November 1998
* January-March,May,July,September,October 1999
* January,February,July,September-November 2000
+ * January 2001
*
*
* LVM driver is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
- *
+ *
* LVM driver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * Boston, MA 02111-1307, USA.
*
*/
@@ -138,13 +139,21 @@
* 01/11/2000 - added memory information on hash tables to
* lvm_proc_get_global_info()
* 02/11/2000 - implemented /proc/lvm/ hierarchy
- * 07/12/2000 - make sure lvm_make_request_fn returns correct value - 0 or 1 - NeilBrown
+ * 22/11/2000 - changed lvm_do_create_proc_entry_of_pv () to work
+ * with devfs
+ * 26/11/2000 - corrected #ifdef locations for PROC_FS
+ * 28/11/2000 - fixed lvm_do_vg_extend() NULL pointer BUG
+ * - fixed lvm_do_create_proc_entry_of_pv() buffer tampering BUG
+ * 08/01/2001 - Removed conditional compiles related to PROC_FS,
+ * procfs is always supported now. (JT)
+ * 12/01/2001 - avoided flushing logical volume in case of shrinking
+ * because of unecessary overhead in case of heavy updates
*
*/
-static char *lvm_version = "LVM version 0.9 by Heinz Mauelshagen (13/11/2000)\n";
-static char *lvm_short_version = "version 0.9 (13/11/2000)";
+static char *lvm_version = "LVM version 0.9.1_beta2 by Heinz Mauelshagen (18/01/2001)\n";
+static char *lvm_short_version = "version 0.9.1_beta2 (18/01/2001)";
#define MAJOR_NR LVM_BLK_MAJOR
#define DEVICE_OFF(device)
@@ -190,6 +199,8 @@ static char *lvm_short_version = "version 0.9 (13/11/2000)";
#include <linux/errno.h>
#include <linux/lvm.h>
+#include "lvm-snap.h"
+
#define LVM_CORRECT_READ_AHEAD( a) \
if ( a < LVM_MIN_READ_AHEAD || \
a > LVM_MAX_READ_AHEAD) a = LVM_MAX_READ_AHEAD;
@@ -198,19 +209,28 @@ static char *lvm_short_version = "version 0.9 (13/11/2000)";
# define WRITEA WRITE
#endif
-/*
- * External function prototypes
- */
-#ifdef MODULE
-int init_module(void);
-void cleanup_module(void);
+/* debug macros */
+#ifdef DEBUG_IOCTL
+#define P_IOCTL(fmt, args...) printk(KERN_DEBUG "lvm ioctl: " fmt, ## args)
#else
-extern int lvm_init(void);
+#define P_IOCTL(fmt, args...)
#endif
-static void lvm_dummy_device_request(request_queue_t *);
-#define DEVICE_REQUEST lvm_dummy_device_request
+#ifdef DEBUG_MAP
+#define P_MAP(fmt, args...) printk(KERN_DEBUG "lvm map: " fmt, ## args)
+#else
+#define P_MAP(fmt, args...)
+#endif
+
+#ifdef DEBUG_KFREE
+#define P_KFREE(fmt, args...) printk(KERN_DEBUG "lvm kfree: " fmt, ## args)
+#else
+#define P_KFREE(fmt, args...)
+#endif
+/*
+ * External function prototypes
+ */
static int lvm_make_request_fn(request_queue_t*, int, struct buffer_head*);
static int lvm_blk_ioctl(struct inode *, struct file *, uint, ulong);
@@ -224,42 +244,29 @@ static int lvm_user_bmap(struct inode *, struct lv_bmap *);
static int lvm_chr_ioctl(struct inode *, struct file *, uint, ulong);
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
int lvm_proc_read_vg_info(char *, char **, off_t, int, int *, void *);
int lvm_proc_read_lv_info(char *, char **, off_t, int, int *, void *);
int lvm_proc_read_pv_info(char *, char **, off_t, int, int *, void *);
static int lvm_proc_get_global_info(char *, char **, off_t, int, int *, void *);
+
+void lvm_do_create_devfs_entry_of_vg ( vg_t *);
+
void lvm_do_create_proc_entry_of_vg ( vg_t *);
-inline void lvm_do_remove_proc_entry_of_vg ( vg_t *);
-inline void lvm_do_create_proc_entry_of_lv ( vg_t *, lv_t *);
-inline void lvm_do_remove_proc_entry_of_lv ( vg_t *, lv_t *);
-inline void lvm_do_create_proc_entry_of_pv ( vg_t *, pv_t *);
-inline void lvm_do_remove_proc_entry_of_pv ( vg_t *, pv_t *);
-#endif
+void lvm_do_remove_proc_entry_of_vg ( vg_t *);
+void lvm_do_create_proc_entry_of_lv ( vg_t *, lv_t *);
+void lvm_do_remove_proc_entry_of_lv ( vg_t *, lv_t *);
+void lvm_do_create_proc_entry_of_pv ( vg_t *, pv_t *);
+void lvm_do_remove_proc_entry_of_pv ( vg_t *, pv_t *);
-#ifdef LVM_HD_NAME
-void lvm_hd_name(char *, int);
-#endif
/* End external function prototypes */
/*
* Internal function prototypes
*/
+static void lvm_cleanup(void);
static void lvm_init_vars(void);
-/* external snapshot calls */
-extern inline int lvm_get_blksize(kdev_t);
-extern int lvm_snapshot_alloc(lv_t *);
-extern void lvm_snapshot_fill_COW_page(vg_t *, lv_t *);
-extern int lvm_snapshot_COW(kdev_t, ulong, ulong, ulong, lv_t *);
-extern int lvm_snapshot_remap_block(kdev_t *, ulong *, ulong, lv_t *);
-extern void lvm_snapshot_release(lv_t *);
-extern int lvm_write_COW_table_block(vg_t *, lv_t *);
-extern inline void lvm_hash_link(lv_block_exception_t *, kdev_t, ulong, lv_t *);
-extern int lvm_snapshot_alloc_hash_table(lv_t *);
-extern void lvm_drop_snapshot(lv_t *, char *);
-
#ifdef LVM_HD_NAME
extern void (*lvm_hd_name_ptr) (char *, int);
#endif
@@ -288,9 +295,9 @@ static int lvm_do_vg_reduce(vg_t *, void *);
static int lvm_do_vg_rename(vg_t *, void *);
static int lvm_do_vg_remove(int);
static void lvm_geninit(struct gendisk *);
-#ifdef LVM_GET_INODE
-static struct inode *lvm_get_inode(int);
-void lvm_clear_inode(struct inode *);
+static char *lvm_show_uuid ( char *);
+#ifdef LVM_HD_NAME
+void lvm_hd_name(char *, int);
#endif
/* END Internal function prototypes */
@@ -298,12 +305,10 @@ void lvm_clear_inode(struct inode *);
/* volume group descriptor area pointers */
static vg_t *vg[ABS_MAX_VG];
-#ifdef CONFIG_DEVFS_FS
static devfs_handle_t lvm_devfs_handle;
static devfs_handle_t vg_devfs_handle[MAX_VG];
static devfs_handle_t ch_devfs_handle[MAX_VG];
static devfs_handle_t lv_devfs_handle[MAX_LV];
-#endif
static pv_t *pvp = NULL;
static lv_t *lvp = NULL;
@@ -340,18 +345,15 @@ static int loadtime = 0;
static uint vg_count = 0;
static long lvm_chr_open_count = 0;
static ushort lvm_iop_version = LVM_DRIVER_IOP_VERSION;
-static DECLARE_WAIT_QUEUE_HEAD(lvm_snapshot_wait);
static DECLARE_WAIT_QUEUE_HEAD(lvm_wait);
static DECLARE_WAIT_QUEUE_HEAD(lvm_map_wait);
static spinlock_t lvm_lock = SPIN_LOCK_UNLOCKED;
static spinlock_t lvm_snapshot_lock = SPIN_LOCK_UNLOCKED;
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
static struct proc_dir_entry *lvm_proc_dir = NULL;
static struct proc_dir_entry *lvm_proc_vg_subdir = NULL;
struct proc_dir_entry *pde = NULL;
-#endif
static struct file_operations lvm_chr_fops =
{
@@ -360,7 +362,7 @@ static struct file_operations lvm_chr_fops =
ioctl: lvm_chr_ioctl,
};
-#define BLOCK_DEVICE_OPERATIONS
+
/* block device operations structure needed for 2.3.38? and above */
static struct block_device_operations lvm_blk_dops =
{
@@ -391,22 +393,10 @@ static struct gendisk lvm_gendisk =
NULL, /* pointer to next gendisk struct (internal) */
};
-
-#ifdef MODULE
-/*
- * Module initialization...
- */
-int init_module(void)
-#else
/*
* Driver initialization...
*/
-#ifdef __initfunc
-__initfunc(int lvm_init(void))
-#else
-int __init lvm_init(void)
-#endif
-#endif /* #ifdef MODULE */
+int lvm_init(void)
{
struct gendisk *gendisk_ptr = NULL;
@@ -414,11 +404,7 @@ int __init lvm_init(void)
printk(KERN_ERR "%s -- register_chrdev failed\n", lvm_name);
return -EIO;
}
-#ifdef BLOCK_DEVICE_OPERATIONS
if (register_blkdev(MAJOR_NR, lvm_name, &lvm_blk_dops) < 0)
-#else
- if (register_blkdev(MAJOR_NR, lvm_name, &lvm_blk_fops) < 0)
-#endif
{
printk("%s -- register_blkdev failed\n", lvm_name);
if (unregister_chrdev(LVM_CHAR_MAJOR, lvm_name) < 0)
@@ -426,21 +412,17 @@ int __init lvm_init(void)
return -EIO;
}
-#ifdef CONFIG_DEVFS_FS
lvm_devfs_handle = devfs_register(
0 , "lvm", 0, 0, LVM_CHAR_MAJOR,
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
&lvm_chr_fops, NULL);
-#endif
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_proc_dir = create_proc_entry (LVM_DIR, S_IFDIR, &proc_root);
if (lvm_proc_dir != NULL) {
lvm_proc_vg_subdir = create_proc_entry (LVM_VG_SUBDIR, S_IFDIR, lvm_proc_dir);
pde = create_proc_entry(LVM_GLOBAL, S_IFREG, lvm_proc_dir);
if ( pde != NULL) pde->read_proc = &lvm_proc_get_global_info;
}
-#endif
lvm_init_vars();
lvm_geninit(&lvm_gendisk);
@@ -464,9 +446,9 @@ int __init lvm_init(void)
lvm_hd_name_ptr = lvm_hd_name;
#endif
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), lvm_make_request_fn);
+
/* optional read root VGDA */
/*
if ( *rootvg != 0) vg_read_with_pv_and_lv ( rootvg, &vg);
@@ -483,20 +465,17 @@ int __init lvm_init(void)
lvm_version, lvm_name);
return 0;
-} /* init_module() / lvm_init() */
+} /* lvm_init() */
-#ifdef MODULE
/*
- * Module cleanup...
+ * cleanup...
*/
-void cleanup_module(void)
+static void lvm_cleanup(void)
{
struct gendisk *gendisk_ptr = NULL, *gendisk_ptr_prev = NULL;
-#ifdef CONFIG_DEVFS_FS
devfs_unregister (lvm_devfs_handle);
-#endif
if (unregister_chrdev(LVM_CHAR_MAJOR, lvm_name) < 0) {
printk(KERN_ERR "%s -- unregister_chrdev failed\n", lvm_name);
@@ -504,7 +483,7 @@ void cleanup_module(void)
if (unregister_blkdev(MAJOR_NR, lvm_name) < 0) {
printk(KERN_ERR "%s -- unregister_blkdev failed\n", lvm_name);
}
- blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
+
gendisk_ptr = gendisk_ptr_prev = gendisk_head;
while (gendisk_ptr != NULL) {
@@ -521,11 +500,9 @@ void cleanup_module(void)
blksize_size[MAJOR_NR] = NULL;
hardsect_size[MAJOR_NR] = NULL;
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
remove_proc_entry(LVM_GLOBAL, lvm_proc_dir);
remove_proc_entry(LVM_VG_SUBDIR, lvm_proc_dir);
remove_proc_entry(LVM_DIR, &proc_root);
-#endif
#ifdef LVM_HD_NAME
/* reference from linux/drivers/block/genhd.c */
@@ -535,18 +512,13 @@ void cleanup_module(void)
printk(KERN_INFO "%s -- Module successfully deactivated\n", lvm_name);
return;
-} /* void cleanup_module() */
-#endif /* #ifdef MODULE */
+} /* lvm_cleanup() */
/*
* support function to initialize lvm variables
*/
-#ifdef __initfunc
-__initfunc(void lvm_init_vars(void))
-#else
void __init lvm_init_vars(void)
-#endif
{
int v;
@@ -626,13 +598,9 @@ static int lvm_chr_ioctl(struct inode *inode, struct file *file,
/* otherwise cc will complain about unused variables */
(void) lvm_lock;
-
-#ifdef DEBUG_IOCTL
- printk(KERN_DEBUG
- "%s -- lvm_chr_ioctl: command: 0x%X MINOR: %d "
- "VG#: %d mode: 0x%X\n",
- lvm_name, command, minor, VG_CHR(minor), file->f_mode);
-#endif
+ P_IOCTL("%s -- lvm_chr_ioctl: command: 0x%X MINOR: %d "
+ "VG#: %d mode: 0x%X\n",
+ lvm_name, command, minor, VG_CHR(minor), file->f_mode);
#ifdef LVM_TOTAL_RESET
if (lvm_reset_spindown > 0) return -EACCES;
@@ -890,14 +858,12 @@ static int lvm_blk_open(struct inode *inode, struct file *file)
if (lv_ptr->lv_status & LV_SPINDOWN) return -EPERM;
/* Check inactive LV and open for read/write */
- if (file->f_mode & O_RDWR) {
- if (!(lv_ptr->lv_status & LV_ACTIVE)) return -EPERM;
- if (!(lv_ptr->lv_access & LV_WRITE)) return -EACCES;
- }
+ if (!(lv_ptr->lv_status & LV_ACTIVE))
+ return -EPERM;
+ if (!(lv_ptr->lv_access & LV_WRITE) &&
+ (file->f_mode & FMODE_WRITE))
+ return -EACCES;
-#ifndef BLOCK_DEVICE_OPERATIONS
- file->f_op = &lvm_blk_fops;
-#endif
/* be sure to increment VG counter */
if (lv_ptr->lv_open == 0) vg_ptr->lv_open++;
@@ -930,24 +896,18 @@ static int lvm_blk_ioctl(struct inode *inode, struct file *file,
void *arg = (void *) a;
struct hd_geometry *hd = (struct hd_geometry *) a;
-#ifdef DEBUG_IOCTL
- printk(KERN_DEBUG
- "%s -- lvm_blk_ioctl MINOR: %d command: 0x%X arg: %X "
- "VG#: %dl LV#: %d\n",
- lvm_name, minor, command, (ulong) arg,
- VG_BLK(minor), LV_BLK(minor));
-#endif
+ P_IOCTL("%s -- lvm_blk_ioctl MINOR: %d command: 0x%X arg: %X "
+ "VG#: %dl LV#: %d\n",
+ lvm_name, minor, command, (ulong) arg,
+ VG_BLK(minor), LV_BLK(minor));
switch (command) {
case BLKGETSIZE:
/* return device size */
-#ifdef DEBUG_IOCTL
- printk(KERN_DEBUG
- "%s -- lvm_blk_ioctl -- BLKGETSIZE: %u\n",
- lvm_name, lv_ptr->lv_size);
-#endif
+ P_IOCTL("%s -- lvm_blk_ioctl -- BLKGETSIZE: %u\n",
+ lvm_name, lv_ptr->lv_size);
if (put_user(lv_ptr->lv_size, (long *)arg))
- return -EFAULT;
+ return -EFAULT;
break;
@@ -955,10 +915,8 @@ static int lvm_blk_ioctl(struct inode *inode, struct file *file,
/* flush buffer cache */
if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-#ifdef DEBUG_IOCTL
- printk(KERN_DEBUG
- "%s -- lvm_blk_ioctl -- BLKFLSBUF\n", lvm_name);
-#endif
+ P_IOCTL("%s -- lvm_blk_ioctl -- BLKFLSBUF\n", lvm_name);
+
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
break;
@@ -968,11 +926,9 @@ static int lvm_blk_ioctl(struct inode *inode, struct file *file,
/* set read ahead for block device */
if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-#ifdef DEBUG_IOCTL
- printk(KERN_DEBUG
- "%s -- lvm_blk_ioctl -- BLKRASET: %d sectors for %02X:%02X\n",
- lvm_name, (long) arg, MAJOR(inode->i_rdev), minor);
-#endif
+ P_IOCTL("%s -- lvm_blk_ioctl -- BLKRASET: %d sectors for %02X:%02X\n",
+ lvm_name, (long) arg, MAJOR(inode->i_rdev), minor);
+
if ((long) arg < LVM_MIN_READ_AHEAD ||
(long) arg > LVM_MAX_READ_AHEAD)
return -EINVAL;
@@ -982,10 +938,7 @@ static int lvm_blk_ioctl(struct inode *inode, struct file *file,
case BLKRAGET:
/* get current read ahead setting */
-#ifdef DEBUG_IOCTL
- printk(KERN_DEBUG
- "%s -- lvm_blk_ioctl -- BLKRAGET\n", lvm_name);
-#endif
+ P_IOCTL("%s -- lvm_blk_ioctl -- BLKRAGET\n", lvm_name);
if (put_user(lv_ptr->lv_read_ahead, (long *)arg))
return -EFAULT;
break;
@@ -993,10 +946,7 @@ static int lvm_blk_ioctl(struct inode *inode, struct file *file,
case HDIO_GETGEO:
/* get disk geometry */
-#ifdef DEBUG_IOCTL
- printk(KERN_DEBUG
- "%s -- lvm_blk_ioctl -- HDIO_GETGEO\n", lvm_name);
-#endif
+ P_IOCTL("%s -- lvm_blk_ioctl -- HDIO_GETGEO\n", lvm_name);
if (hd == NULL)
return -EINVAL;
{
@@ -1016,11 +966,8 @@ static int lvm_blk_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
}
-#ifdef DEBUG_IOCTL
- printk(KERN_DEBUG
- "%s -- lvm_blk_ioctl -- cylinders: %d\n",
- lvm_name, lv_ptr->lv_size / heads / sectors);
-#endif
+ P_IOCTL("%s -- lvm_blk_ioctl -- cylinders: %d\n",
+ lvm_name, lv_ptr->lv_size / heads / sectors);
break;
@@ -1127,22 +1074,22 @@ static int lvm_user_bmap(struct inode *inode, struct lv_bmap *user_result)
struct buffer_head bh;
unsigned long block;
int err;
-
+
if (get_user(block, &user_result->lv_block))
- return -EFAULT;
-
+ return -EFAULT;
+
memset(&bh,0,sizeof bh);
bh.b_rsector = block;
bh.b_dev = bh.b_rdev = inode->i_dev;
bh.b_size = lvm_get_blksize(bh.b_dev);
if ((err=lvm_map(&bh, READ)) < 0) {
- printk("lvm map failed: %d\n", err);
- return -EINVAL;
+ printk("lvm map failed: %d\n", err);
+ return -EINVAL;
}
-
- return put_user( kdev_t_to_nr(bh.b_rdev), &user_result->lv_dev) ||
+
+ return put_user(kdev_t_to_nr(bh.b_rdev), &user_result->lv_dev) ||
put_user(bh.b_rsector, &user_result->lv_block) ? -EFAULT : 0;
-}
+}
/*
@@ -1168,7 +1115,7 @@ int lvm_vg_info(vg_t *vg_ptr, char *buf) {
vg_ptr->pe_total,
vg_ptr->pe_allocated * vg_ptr->pe_size >> 1,
vg_ptr->pe_allocated,
- (vg_ptr->pe_total - vg_ptr->pe_allocated) *
+ (vg_ptr->pe_total - vg_ptr->pe_allocated) *
vg_ptr->pe_size >> 1,
vg_ptr->pe_total - vg_ptr->pe_allocated);
return sz;
@@ -1263,7 +1210,6 @@ int lvm_pv_info(pv_t *pv_ptr, char *buf) {
}
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
/*
* Support functions /proc-Filesystem
*/
@@ -1325,10 +1271,7 @@ static int lvm_proc_get_global_info(char *page, char **start, off_t pos, int cou
lv_block_exception_t_bytes *= sizeof(lv_block_exception_t);
if (buf != NULL) {
-#ifdef DEBUG_KFREE
- printk(KERN_DEBUG
- "%s -- vfree %d\n", lvm_name, __LINE__);
-#endif
+ P_KFREE("%s -- vfree %d\n", lvm_name, __LINE__);
lock_kernel();
vfree(buf);
unlock_kernel();
@@ -1452,7 +1395,6 @@ static int lvm_proc_get_global_info(char *page, char **start, off_t pos, int cou
else
return count;
} /* lvm_proc_get_global_info() */
-#endif /* #if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS */
/*
@@ -1478,7 +1420,7 @@ int lvm_proc_read_vg_info(char *page, char **start, off_t off,
sz += sprintf ( page+sz, "PE size: %u\n", vg->pe_size / 2);
sz += sprintf ( page+sz, "PE total: %u\n", vg->pe_total);
sz += sprintf ( page+sz, "PE allocated: %u\n", vg->pe_allocated);
- sz += sprintf ( page+sz, "uuid: %s\n", vg->vg_uuid);
+ sz += sprintf ( page+sz, "uuid: %s\n", lvm_show_uuid(vg->vg_uuid));
return sz;
}
@@ -1525,7 +1467,7 @@ int lvm_proc_read_pv_info(char *page, char **start, off_t off,
sz += sprintf ( page+sz, "PE allocated: %u\n", pv->pe_allocated);
sz += sprintf ( page+sz, "device: %02u:%02u\n",
MAJOR(pv->pv_dev), MINOR(pv->pv_dev));
- sz += sprintf ( page+sz, "uuid: %s\n", pv->pv_uuid);
+ sz += sprintf ( page+sz, "uuid: %s\n", lvm_show_uuid(pv->pv_uuid));
return sz;
@@ -1565,15 +1507,13 @@ static int lvm_map(struct buffer_head *bh, int rw)
lvm_name, lv->lv_name);
return -1;
}
-#ifdef DEBUG_MAP
- printk(KERN_DEBUG
- "%s - lvm_map minor:%d *rdev: %02d:%02d *rsector: %lu "
- "size:%lu\n",
- lvm_name, minor,
- MAJOR(rdev_tmp),
- MINOR(rdev_tmp),
- rsector_tmp, size);
-#endif
+
+ P_MAP("%s - lvm_map minor:%d *rdev: %02d:%02d *rsector: %lu "
+ "size:%lu\n",
+ lvm_name, minor,
+ MAJOR(rdev_tmp),
+ MINOR(rdev_tmp),
+ rsector_tmp, size);
if (rsector_tmp + size > lv->lv_size) {
printk(KERN_ALERT
@@ -1595,15 +1535,13 @@ lvm_second_remap:
(rsector_tmp % vg_this->pe_size);
rdev_tmp = lv->lv_current_pe[index].dev;
-#ifdef DEBUG_MAP
- printk(KERN_DEBUG
- "lv_current_pe[%ld].pe: %ld rdev: %02d:%02d rsector:%ld\n",
+ P_MAP("lv_current_pe[%ld].pe: %ld rdev: %02d:%02d "
+ "rsector:%ld\n",
index,
lv->lv_current_pe[index].pe,
MAJOR(rdev_tmp),
MINOR(rdev_tmp),
rsector_tmp);
-#endif
/* striped mapping */
} else {
@@ -1624,9 +1562,7 @@ lvm_second_remap:
rdev_tmp = lv->lv_current_pe[index].dev;
}
-#ifdef DEBUG_MAP
- printk(KERN_DEBUG
- "lv_current_pe[%ld].pe: %ld rdev: %02d:%02d rsector:%ld\n"
+ P_MAP("lv_current_pe[%ld].pe: %ld rdev: %02d:%02d rsector:%ld\n"
"stripe_length: %ld stripe_index: %ld\n",
index,
lv->lv_current_pe[index].pe,
@@ -1635,7 +1571,6 @@ lvm_second_remap:
rsector_tmp,
stripe_length,
stripe_index);
-#endif
/* handle physical extents on the move */
if (pe_lock_req.lock == LOCK_PE) {
@@ -1659,6 +1594,8 @@ lvm_second_remap:
if (lv->lv_access & (LV_SNAPSHOT|LV_SNAPSHOT_ORG)) {
/* original logical volume */
if (lv->lv_access & LV_SNAPSHOT_ORG) {
+ /* Serializes the access to the lv_snapshot_next list */
+ down(&lv->lv_snapshot_sem);
if (rw == WRITE || rw == WRITEA)
{
lv_t *lv_ptr;
@@ -1669,7 +1606,8 @@ lvm_second_remap:
lv_ptr = lv_ptr->lv_snapshot_next) {
/* Check for inactive snapshot */
if (!(lv_ptr->lv_status & LV_ACTIVE)) continue;
- down(&lv->lv_snapshot_org->lv_snapshot_sem);
+ /* Serializes the COW with the accesses to the snapshot device */
+ down(&lv_ptr->lv_snapshot_sem);
/* do we still have exception storage for this snapshot free? */
if (lv_ptr->lv_block_exception != NULL) {
rdev_sav = rdev_tmp;
@@ -1690,9 +1628,10 @@ lvm_second_remap:
rdev_tmp = rdev_sav;
rsector_tmp = rsector_sav;
}
- up(&lv->lv_snapshot_org->lv_snapshot_sem);
+ up(&lv_ptr->lv_snapshot_sem);
}
}
+ up(&lv->lv_snapshot_sem);
} else {
/* remap snapshot logical volume */
down(&lv->lv_snapshot_sem);
@@ -1733,31 +1672,12 @@ void lvm_hd_name(char *buf, int minor)
/*
- * this one never should be called...
- */
-static void lvm_dummy_device_request(request_queue_t * t)
-{
- printk(KERN_EMERG
- "%s -- oops, got lvm request for %02d:%02d [sector: %lu]\n",
- lvm_name,
- MAJOR(CURRENT->rq_dev),
- MINOR(CURRENT->rq_dev),
- CURRENT->sector);
- return;
-}
-
-
-/*
* make request function
*/
static int lvm_make_request_fn(request_queue_t *q,
int rw,
- struct buffer_head *bh)
-{
- if (lvm_map(bh, rw)<0)
- return 0; /* failure, buffer_IO_error has been called, don't recurse */
- else
- return 1; /* all ok, mapping done, call lower level driver */
+ struct buffer_head *bh) {
+ return (lvm_map(bh, rw) < 0) ? 0 : 1;
}
@@ -1774,10 +1694,8 @@ static int lvm_do_lock_lvm(void)
lock_try_again:
spin_lock(&lvm_lock);
if (lock != 0 && lock != current->pid) {
-#ifdef DEBUG_IOCTL
- printk(KERN_INFO "lvm_do_lock_lvm: %s is locked by pid %d ...\n",
- lvm_name, lock);
-#endif
+ P_IOCTL("lvm_do_lock_lvm: %s is locked by pid %d ...\n",
+ lvm_name, lock);
spin_unlock(&lvm_lock);
interruptible_sleep_on(&lvm_wait);
if (current->sigpending != 0)
@@ -1966,6 +1884,8 @@ int lvm_do_vg_create(int minor, void *arg)
}
}
+ lvm_do_create_devfs_entry_of_vg ( vg_ptr);
+
/* Second path to correct snapshot logical volumes which are not
in place during first path above */
for (l = 0; l < ls; l++) {
@@ -1980,18 +1900,7 @@ int lvm_do_vg_create(int minor, void *arg)
}
}
-#ifdef CONFIG_DEVFS_FS
- vg_devfs_handle[vg_ptr->vg_number] = devfs_mk_dir(0, vg_ptr->vg_name, NULL);
- ch_devfs_handle[vg_ptr->vg_number] = devfs_register(
- vg_devfs_handle[vg_ptr->vg_number] , "group",
- DEVFS_FL_DEFAULT, LVM_CHAR_MAJOR, vg_ptr->vg_number,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
- &lvm_chr_fops, NULL);
-#endif
-
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_do_create_proc_entry_of_vg ( vg_ptr);
-#endif
vfree(snap_lv_ptr);
@@ -2021,25 +1930,15 @@ static int lvm_do_vg_extend(vg_t *vg_ptr, void *arg)
for (p = 0; p < vg_ptr->pv_max; p++) {
if ( ( pv_ptr = vg_ptr->pv[p]) == NULL) {
ret = lvm_do_pv_create(arg, vg_ptr, p);
- lvm_do_create_proc_entry_of_pv ( vg_ptr, pv_ptr);
if ( ret != 0) return ret;
-
- /* We don't need the PE list
- in kernel space like LVs pe_t list */
- pv_ptr->pe = NULL;
- vg_ptr->pv_cur++;
- vg_ptr->pv_act++;
- vg_ptr->pe_total +=
- pv_ptr->pe_total;
-#ifdef LVM_GET_INODE
- /* insert a dummy inode for fs_may_mount */
- pv_ptr->inode = lvm_get_inode(pv_ptr->pv_dev);
-#endif
+ pv_ptr = vg_ptr->pv[p];
+ vg_ptr->pe_total += pv_ptr->pe_total;
+ lvm_do_create_proc_entry_of_pv(vg_ptr, pv_ptr);
return 0;
}
}
}
-return -EPERM;
+ return -EPERM;
} /* lvm_do_vg_extend() */
@@ -2060,10 +1959,6 @@ static int lvm_do_vg_reduce(vg_t *vg_ptr, void *arg) {
strcmp(pv_ptr->pv_name,
pv_name) == 0) {
if (pv_ptr->lv_cur > 0) return -EPERM;
- vg_ptr->pe_total -=
- pv_ptr->pe_total;
- vg_ptr->pv_cur--;
- vg_ptr->pv_act--;
lvm_do_pv_remove(vg_ptr, p);
/* Make PV pointer array contiguous */
for (; p < vg_ptr->pv_max - 1; p++)
@@ -2091,9 +1986,7 @@ static int lvm_do_vg_rename(vg_t *vg_ptr, void *arg)
if (copy_from_user(vg_name, arg, sizeof(vg_name)) != 0)
return -EFAULT;
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_do_remove_proc_entry_of_vg ( vg_ptr);
-#endif
strncpy ( vg_ptr->vg_name, vg_name, sizeof ( vg_name)-1);
for ( l = 0; l < vg_ptr->lv_max; l++)
@@ -2115,9 +2008,7 @@ static int lvm_do_vg_rename(vg_t *vg_ptr, void *arg)
strncpy(pv_ptr->vg_name, vg_name, NAME_LEN);
}
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_do_create_proc_entry_of_vg ( vg_ptr);
-#endif
return 0;
} /* lvm_do_vg_rename */
@@ -2166,27 +2057,17 @@ static int lvm_do_vg_remove(int minor)
/* free PVs */
for (i = 0; i < vg_ptr->pv_max; i++) {
if ((pv_ptr = vg_ptr->pv[i]) != NULL) {
-#ifdef DEBUG_KFREE
- printk(KERN_DEBUG
- "%s -- kfree %d\n", lvm_name, __LINE__);
-#endif
+ P_KFREE("%s -- kfree %d\n", lvm_name, __LINE__);
lvm_do_pv_remove(vg_ptr, i);
}
}
-#ifdef CONFIG_DEVFS_FS
devfs_unregister (ch_devfs_handle[vg_ptr->vg_number]);
devfs_unregister (vg_devfs_handle[vg_ptr->vg_number]);
-#endif
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_do_remove_proc_entry_of_vg ( vg_ptr);
-#endif
-
-#ifdef DEBUG_KFREE
- printk(KERN_DEBUG "%s -- kfree %d\n", lvm_name, __LINE__);
-#endif
+ P_KFREE("%s -- kfree %d\n", lvm_name, __LINE__);
kfree(vg_ptr);
vg[VG_CHR(minor)] = NULL;
@@ -2222,11 +2103,6 @@ static int lvm_do_pv_create(pv_t *pvp, vg_t *vg_ptr, ulong p) {
vg_ptr->pv_act++;
vg_ptr->pv_cur++;
-#ifdef LVM_GET_INODE
- /* insert a dummy inode for fs_may_mount */
- pv_ptr->inode = lvm_get_inode(pv_ptr->pv_dev);
-#endif
-
return 0;
} /* lvm_do_pv_create() */
@@ -2237,11 +2113,8 @@ static int lvm_do_pv_create(pv_t *pvp, vg_t *vg_ptr, ulong p) {
static int lvm_do_pv_remove(vg_t *vg_ptr, ulong p) {
pv_t *pv_ptr = vg_ptr->pv[p];
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_do_remove_proc_entry_of_pv ( vg_ptr, pv_ptr);
-#endif
- vg_ptr->pe_total -=
- pv_ptr->pe_total;
+ vg_ptr->pe_total -= pv_ptr->pe_total;
vg_ptr->pv_cur--;
vg_ptr->pv_act--;
#ifdef LVM_GET_INODE
@@ -2320,11 +2193,9 @@ static int lvm_do_lv_create(int minor, char *lv_name, lv_t *lv)
"%s -- LV_CREATE: vmalloc error LV_CURRENT_PE of %d Byte "
"at line %d\n",
lvm_name, size, __LINE__);
-#ifdef DEBUG_KFREE
- printk(KERN_DEBUG "%s -- kfree %d\n", lvm_name, __LINE__);
-#endif
+ P_KFREE("%s -- kfree %d\n", lvm_name, __LINE__);
kfree(lv_ptr);
- vg[VG_CHR(minor)]->lv[l] = NULL;
+ vg_ptr->lv[l] = NULL;
return -ENOMEM;
}
if (copy_from_user(lv_ptr->lv_current_pe, pep, size)) {
@@ -2354,9 +2225,8 @@ static int lvm_do_lv_create(int minor, char *lv_name, lv_t *lv)
"%s -- lvm_do_lv_create: vmalloc error LV_BLOCK_EXCEPTION "
"of %d byte at line %d\n",
lvm_name, size, __LINE__);
-#ifdef DEBUG_KFREE
- printk(KERN_DEBUG "%s -- kfree %d\n", lvm_name, __LINE__);
-#endif
+ P_KFREE("%s -- kfree %d\n", lvm_name,
+ __LINE__);
kfree(lv_ptr);
vg_ptr->lv[l] = NULL;
return -ENOMEM;
@@ -2364,7 +2234,7 @@ static int lvm_do_lv_create(int minor, char *lv_name, lv_t *lv)
if (copy_from_user(lv_ptr->lv_block_exception, lvbe, size)) {
vfree(lv_ptr->lv_block_exception);
kfree(lv_ptr);
- vg[VG_CHR(minor)]->lv[l] = NULL;
+ vg_ptr->lv[l] = NULL;
return -EFAULT;
}
/* point to the original logical volume */
@@ -2372,33 +2242,32 @@ static int lvm_do_lv_create(int minor, char *lv_name, lv_t *lv)
lv_ptr->lv_snapshot_minor = 0;
lv_ptr->lv_snapshot_org = lv_ptr;
- lv_ptr->lv_snapshot_prev = NULL;
- /* walk thrugh the snapshot list */
- while (lv_ptr->lv_snapshot_next != NULL)
- lv_ptr = lv_ptr->lv_snapshot_next;
- /* now lv_ptr points to the last existing snapshot in the chain */
- vg_ptr->lv[l]->lv_snapshot_prev = lv_ptr;
/* our new one now back points to the previous last in the chain
which can be the original logical volume */
lv_ptr = vg_ptr->lv[l];
/* now lv_ptr points to our new last snapshot logical volume */
- lv_ptr->lv_snapshot_org = lv_ptr->lv_snapshot_prev->lv_snapshot_org;
- lv_ptr->lv_snapshot_next = NULL;
lv_ptr->lv_current_pe = lv_ptr->lv_snapshot_org->lv_current_pe;
+ lv_ptr->lv_allocated_snapshot_le = lv_ptr->lv_allocated_le;
lv_ptr->lv_allocated_le = lv_ptr->lv_snapshot_org->lv_allocated_le;
lv_ptr->lv_current_le = lv_ptr->lv_snapshot_org->lv_current_le;
lv_ptr->lv_size = lv_ptr->lv_snapshot_org->lv_size;
lv_ptr->lv_stripes = lv_ptr->lv_snapshot_org->lv_stripes;
lv_ptr->lv_stripesize = lv_ptr->lv_snapshot_org->lv_stripesize;
+
+ /* Update the VG PE(s) used by snapshot reserve space. */
+ vg_ptr->pe_allocated += lv_ptr->lv_allocated_snapshot_le;
+
if ((ret = lvm_snapshot_alloc(lv_ptr)) != 0)
{
vfree(lv_ptr->lv_block_exception);
kfree(lv_ptr);
- vg[VG_CHR(minor)]->lv[l] = NULL;
+ vg_ptr->lv[l] = NULL;
return ret;
}
for ( e = 0; e < lv_ptr->lv_remap_ptr; e++)
- lvm_hash_link (lv_ptr->lv_block_exception + e, lv_ptr->lv_block_exception[e].rdev_org, lv_ptr->lv_block_exception[e].rsector_org, lv_ptr);
+ lvm_hash_link (lv_ptr->lv_block_exception + e,
+ lv_ptr->lv_block_exception[e].rdev_org,
+ lv_ptr->lv_block_exception[e].rsector_org, lv_ptr);
/* need to fill the COW exception table data
into the page for disk i/o */
lvm_snapshot_fill_COW_page(vg_ptr, lv_ptr);
@@ -2426,9 +2295,8 @@ static int lvm_do_lv_create(int minor, char *lv_name, lv_t *lv)
vg_ptr->lv_cur++;
lv_ptr->lv_status = lv_status_save;
-#ifdef CONFIG_DEVFS_FS
{
- char *lv_tmp, *lv_buf = NULL;
+ char *lv_tmp, *lv_buf = lv->lv_name;
strtok(lv->lv_name, "/"); /* /dev */
while((lv_tmp = strtok(NULL, "/")) != NULL)
@@ -2440,24 +2308,29 @@ static int lvm_do_lv_create(int minor, char *lv_name, lv_t *lv)
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
&lvm_blk_dops, NULL);
}
-#endif
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_do_create_proc_entry_of_lv ( vg_ptr, lv_ptr);
-#endif
/* optionally add our new snapshot LV */
if (lv_ptr->lv_access & LV_SNAPSHOT) {
+ lv_t *org = lv_ptr->lv_snapshot_org, *last;
+
/* sync the original logical volume */
- fsync_dev(lv_ptr->lv_snapshot_org->lv_dev);
+ fsync_dev(org->lv_dev);
#ifdef LVM_VFS_ENHANCEMENT
/* VFS function call to sync and lock the filesystem */
- fsync_dev_lockfs(lv_ptr->lv_snapshot_org->lv_dev);
+ fsync_dev_lockfs(org->lv_dev);
#endif
- lv_ptr->lv_snapshot_org->lv_access |= LV_SNAPSHOT_ORG;
- lv_ptr->lv_access &= ~LV_SNAPSHOT_ORG;
- /* put ourselve into the chain */
- lv_ptr->lv_snapshot_prev->lv_snapshot_next = lv_ptr;
+
+ down(&org->lv_snapshot_sem);
+ org->lv_access |= LV_SNAPSHOT_ORG;
+ lv_ptr->lv_access &= ~LV_SNAPSHOT_ORG; /* this can only hide an userspace bug */
+
+ /* Link in the list of snapshot volumes */
+ for (last = org; last->lv_snapshot_next; last = last->lv_snapshot_next);
+ lv_ptr->lv_snapshot_prev = last;
+ last->lv_snapshot_next = lv_ptr;
+ up(&org->lv_snapshot_sem);
}
/* activate the logical volume */
@@ -2513,6 +2386,31 @@ static int lvm_do_lv_remove(int minor, char *lv_name, int l)
lv_ptr->lv_snapshot_next != NULL)
return -EPERM;
+ if (lv_ptr->lv_access & LV_SNAPSHOT) {
+ /*
+ * Atomically make the the snapshot invisible
+ * to the original lv before playing with it.
+ */
+ lv_t * org = lv_ptr->lv_snapshot_org;
+ down(&org->lv_snapshot_sem);
+
+ /* remove this snapshot logical volume from the chain */
+ lv_ptr->lv_snapshot_prev->lv_snapshot_next = lv_ptr->lv_snapshot_next;
+ if (lv_ptr->lv_snapshot_next != NULL) {
+ lv_ptr->lv_snapshot_next->lv_snapshot_prev =
+ lv_ptr->lv_snapshot_prev;
+ }
+ up(&org->lv_snapshot_sem);
+
+ /* no more snapshots? */
+ if (!org->lv_snapshot_next)
+ org->lv_access &= ~LV_SNAPSHOT_ORG;
+ lvm_snapshot_release(lv_ptr);
+
+ /* Update the VG PE(s) used by snapshot reserve space. */
+ vg_ptr->pe_allocated -= lv_ptr->lv_allocated_snapshot_le;
+ }
+
lv_ptr->lv_status |= LV_SPINDOWN;
/* sync the buffers */
@@ -2532,7 +2430,8 @@ static int lvm_do_lv_remove(int minor, char *lv_name, int l)
vg_lv_map[MINOR(lv_ptr->lv_dev)].vg_number = ABS_MAX_VG;
vg_lv_map[MINOR(lv_ptr->lv_dev)].lv_number = -1;
- /* correct the PE count in PVs if this is no snapshot logical volume */
+ /* correct the PE count in PVs if this is not a snapshot
+ logical volume */
if (!(lv_ptr->lv_access & LV_SNAPSHOT)) {
/* only if this is no snapshot logical volume because
we share the lv_current_pe[] structs with the
@@ -2546,31 +2445,13 @@ static int lvm_do_lv_remove(int minor, char *lv_name, int l)
}
}
vfree(lv_ptr->lv_current_pe);
- /* LV_SNAPSHOT */
- } else {
- /* remove this snapshot logical volume from the chain */
- lv_ptr->lv_snapshot_prev->lv_snapshot_next = lv_ptr->lv_snapshot_next;
- if (lv_ptr->lv_snapshot_next != NULL) {
- lv_ptr->lv_snapshot_next->lv_snapshot_prev =
- lv_ptr->lv_snapshot_prev;
- }
- /* no more snapshots? */
- if (lv_ptr->lv_snapshot_org->lv_snapshot_next == NULL)
- lv_ptr->lv_snapshot_org->lv_access &= ~LV_SNAPSHOT_ORG;
- lvm_snapshot_release(lv_ptr);
}
-#ifdef CONFIG_DEVFS_FS
devfs_unregister(lv_devfs_handle[lv_ptr->lv_number]);
-#endif
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_do_remove_proc_entry_of_lv ( vg_ptr, lv_ptr);
-#endif
-#ifdef DEBUG_KFREE
- printk(KERN_DEBUG "%s -- kfree %d\n", lvm_name, __LINE__);
-#endif
+ P_KFREE("%s -- kfree %d\n", lvm_name, __LINE__);
kfree(lv_ptr);
vg_ptr->lv[l] = NULL;
vg_ptr->lv_cur--;
@@ -2638,21 +2519,24 @@ static int lvm_do_lv_extend_reduce(int minor, char *lv_name, lv_t *lv)
}
memcpy(lvbe,
lv_ptr->lv_block_exception,
- (lv->lv_remap_end > lv_ptr->lv_remap_end ? lv_ptr->lv_remap_ptr : lv->lv_remap_end) * sizeof(lv_block_exception_t));
+ (lv->lv_remap_end > lv_ptr->lv_remap_end ?
+ lv_ptr->lv_remap_ptr : lv->lv_remap_end) * sizeof(lv_block_exception_t));
lv_ptr->lv_block_exception = lvbe;
lv_ptr->lv_remap_end = lv->lv_remap_end;
if (lvm_snapshot_alloc_hash_table(lv_ptr) != 0)
{
- lvm_drop_snapshot(lv_ptr, "hash_alloc");
+ lvm_drop_snapshot(lv_ptr, "no memory for hash table");
up(&lv_ptr->lv_snapshot_org->lv_snapshot_sem);
vfree(lvbe_old);
vfree(lvs_hash_table_old);
- return 1;
+ return -ENOMEM;
}
for (e = 0; e < lv_ptr->lv_remap_ptr; e++)
- lvm_hash_link (lv_ptr->lv_block_exception + e, lv_ptr->lv_block_exception[e].rdev_org, lv_ptr->lv_block_exception[e].rsector_org, lv_ptr);
+ lvm_hash_link (lv_ptr->lv_block_exception + e,
+ lv_ptr->lv_block_exception[e].rdev_org,
+ lv_ptr->lv_block_exception[e].rsector_org, lv_ptr);
up(&lv_ptr->lv_snapshot_org->lv_snapshot_sem);
@@ -2677,15 +2561,6 @@ static int lvm_do_lv_extend_reduce(int minor, char *lv_name, lv_t *lv)
return -EFAULT;
}
-#ifdef DEBUG
- printk(KERN_DEBUG
- "%s -- fsync_dev and "
- "invalidate_buffers for %s [%s] in %s\n",
- lvm_name, lv_ptr->lv_name,
- kdevname(lv_ptr->lv_dev),
- vg_ptr->vg_name);
-#endif
-
/* reduce allocation counters on PV(s) */
for (le = 0; le < lv_ptr->lv_allocated_le; le++) {
vg_ptr->pe_allocated--;
@@ -2714,9 +2589,6 @@ static int lvm_do_lv_extend_reduce(int minor, char *lv_name, lv_t *lv)
/* save # of old allocated logical extents */
old_allocated_le = lv_ptr->lv_allocated_le;
- /* in case of shrinking -> let's flush */
- if ( end > lv->lv_current_le) fsync_dev(lv_ptr->lv_dev);
-
/* copy preloaded LV */
memcpy((char *) lv_ptr, (char *) lv, sizeof(lv_t));
@@ -2914,15 +2786,11 @@ static int lvm_do_lv_rename(vg_t *vg_ptr, lv_req_t *lv_req, lv_t *lv)
if ( (lv_ptr = vg_ptr->lv[l]) == NULL) continue;
if (lv_ptr->lv_dev == lv->lv_dev)
{
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_do_remove_proc_entry_of_lv ( vg_ptr, lv_ptr);
-#endif
strncpy(lv_ptr->lv_name,
lv_req->lv_name,
NAME_LEN);
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
lvm_do_create_proc_entry_of_lv ( vg_ptr, lv_ptr);
-#endif
break;
}
}
@@ -3004,9 +2872,22 @@ static int lvm_do_pv_status(vg_t *vg_ptr, void *arg)
/*
+ * create a devfs entry for a volume group
+ */
+void lvm_do_create_devfs_entry_of_vg ( vg_t *vg_ptr) {
+ vg_devfs_handle[vg_ptr->vg_number] = devfs_mk_dir(0, vg_ptr->vg_name, NULL);
+ ch_devfs_handle[vg_ptr->vg_number] = devfs_register(
+ vg_devfs_handle[vg_ptr->vg_number] , "group",
+ DEVFS_FL_DEFAULT, LVM_CHAR_MAJOR, vg_ptr->vg_number,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
+ &lvm_chr_fops, NULL);
+}
+
+
+/*
* create a /proc entry for a logical volume
*/
-inline void lvm_do_create_proc_entry_of_lv ( vg_t *vg_ptr, lv_t *lv_ptr) {
+void lvm_do_create_proc_entry_of_lv ( vg_t *vg_ptr, lv_t *lv_ptr) {
char *basename;
if ( vg_ptr->lv_subdir_pde != NULL) {
@@ -3026,7 +2907,7 @@ inline void lvm_do_create_proc_entry_of_lv ( vg_t *vg_ptr, lv_t *lv_ptr) {
/*
* remove a /proc entry for a logical volume
*/
-inline void lvm_do_remove_proc_entry_of_lv ( vg_t *vg_ptr, lv_t *lv_ptr) {
+void lvm_do_remove_proc_entry_of_lv ( vg_t *vg_ptr, lv_t *lv_ptr) {
char *basename;
if ( vg_ptr->lv_subdir_pde != NULL) {
@@ -3041,13 +2922,17 @@ inline void lvm_do_remove_proc_entry_of_lv ( vg_t *vg_ptr, lv_t *lv_ptr) {
/*
* create a /proc entry for a physical volume
*/
-inline void lvm_do_create_proc_entry_of_pv ( vg_t *vg_ptr, pv_t *pv_ptr) {
+void lvm_do_create_proc_entry_of_pv ( vg_t *vg_ptr, pv_t *pv_ptr) {
+ int offset = 0;
char *basename;
-
- basename = strrchr(pv_ptr->pv_name, '/');
- if (basename == NULL) basename = pv_ptr->pv_name;
- else basename++;
- pde = create_proc_entry(basename, S_IFREG, vg_ptr->pv_subdir_pde);
+ char buffer[NAME_LEN];
+
+ basename = pv_ptr->pv_name;
+ if (strncmp(basename, "/dev/", 5) == 0) offset = 5;
+ strncpy(buffer, basename + offset, sizeof(buffer));
+ basename = buffer;
+ while ( ( basename = strchr ( basename, '/')) != NULL) *basename = '_';
+ pde = create_proc_entry(buffer, S_IFREG, vg_ptr->pv_subdir_pde);
if ( pde != NULL) {
pde->read_proc = lvm_proc_read_pv_info;
pde->data = pv_ptr;
@@ -3058,7 +2943,7 @@ inline void lvm_do_create_proc_entry_of_pv ( vg_t *vg_ptr, pv_t *pv_ptr) {
/*
* remove a /proc entry for a physical volume
*/
-inline void lvm_do_remove_proc_entry_of_pv ( vg_t *vg_ptr, pv_t *pv_ptr) {
+void lvm_do_remove_proc_entry_of_pv ( vg_t *vg_ptr, pv_t *pv_ptr) {
char *basename;
basename = strrchr(pv_ptr->pv_name, '/');
@@ -3074,7 +2959,6 @@ inline void lvm_do_remove_proc_entry_of_pv ( vg_t *vg_ptr, pv_t *pv_ptr) {
/*
* create a /proc entry for a volume group
*/
-#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
void lvm_do_create_proc_entry_of_vg ( vg_t *vg_ptr) {
int l, p;
pv_t *pv_ptr;
@@ -3090,24 +2974,25 @@ void lvm_do_create_proc_entry_of_vg ( vg_t *vg_ptr) {
pde->read_proc = lvm_proc_read_vg_info;
pde->data = vg_ptr;
}
- vg_ptr->lv_subdir_pde =
- create_proc_entry(LVM_LV_SUBDIR, S_IFDIR,
- vg_ptr->vg_dir_pde);
- vg_ptr->pv_subdir_pde =
- create_proc_entry(LVM_PV_SUBDIR, S_IFDIR,
- vg_ptr->vg_dir_pde);
- }
-
- if ( vg_ptr->pv_subdir_pde != NULL) {
- for ( l = 0; l < vg_ptr->lv_max; l++) {
- if ( ( lv_ptr = vg_ptr->lv[l]) == NULL) continue;
- lvm_do_create_proc_entry_of_lv ( vg_ptr, lv_ptr);
- }
- for ( p = 0; p < vg_ptr->pv_max; p++) {
- if ( ( pv_ptr = vg_ptr->pv[p]) == NULL) continue;
- lvm_do_create_proc_entry_of_pv ( vg_ptr, pv_ptr);
- }
- }
+ pde = create_proc_entry(LVM_LV_SUBDIR, S_IFDIR,
+ vg_ptr->vg_dir_pde);
+ if ( pde != NULL) {
+ vg_ptr->lv_subdir_pde = pde;
+ for ( l = 0; l < vg_ptr->lv_max; l++) {
+ if ( ( lv_ptr = vg_ptr->lv[l]) == NULL) continue;
+ lvm_do_create_proc_entry_of_lv ( vg_ptr, lv_ptr);
+ }
+ }
+ pde = create_proc_entry(LVM_PV_SUBDIR, S_IFDIR,
+ vg_ptr->vg_dir_pde);
+ if ( pde != NULL) {
+ vg_ptr->pv_subdir_pde = pde;
+ for ( p = 0; p < vg_ptr->pv_max; p++) {
+ if ( ( pv_ptr = vg_ptr->pv[p]) == NULL) continue;
+ lvm_do_create_proc_entry_of_pv ( vg_ptr, pv_ptr);
+ }
+ }
+ }
}
/*
@@ -3133,18 +3018,12 @@ void lvm_do_remove_proc_entry_of_vg ( vg_t *vg_ptr) {
remove_proc_entry(vg_ptr->vg_name, lvm_proc_vg_subdir);
}
}
-#endif
/*
* support function initialize gendisk variables
*/
-#ifdef __initfunc
-__initfunc(void lvm_geninit(struct gendisk *lvm_gdisk))
-#else
-void __init
- lvm_geninit(struct gendisk *lvm_gdisk)
-#endif
+void __init lvm_geninit(struct gendisk *lvm_gdisk)
{
int i = 0;
@@ -3166,39 +3045,30 @@ void __init
} /* lvm_gen_init() */
-#ifdef LVM_GET_INODE
/*
- * support function to get an empty inode
- *
- * Gets an empty inode to be inserted into the inode hash,
- * so that a physical volume can't be mounted.
- * This is analog to drivers/block/md.c
- *
- * Is this the real thing?
- *
+ * return a pointer to a '-' padded uuid
*/
-struct inode *lvm_get_inode(int dev)
-{
- struct inode *inode_this = NULL;
+static char *lvm_show_uuid ( char *uuidstr) {
+ int i, j;
+ static char uuid[NAME_LEN] = { 0, };
- /* Lock the device by inserting a dummy inode. */
- inode_this = get_empty_inode();
- inode_this->i_dev = dev;
- insert_inode_hash(inode_this);
- return inode_this;
-}
+ memset ( uuid, 0, NAME_LEN);
+ i = 6;
+ memcpy ( uuid, uuidstr, i);
+ uuidstr += i;
-/*
- * support function to clear an inode
- *
- */
-void lvm_clear_inode(struct inode *inode)
-{
-#ifdef I_FREEING
- inode->i_state |= I_FREEING;
-#endif
- clear_inode(inode);
- return;
+ for ( j = 0; j < 6; j++) {
+ uuid[i++] = '-';
+ memcpy ( &uuid[i], uuidstr, 4);
+ uuidstr += 4;
+ i += 4;
+ }
+
+ memcpy ( &uuid[i], uuidstr, 2 );
+
+ return uuid;
}
-#endif /* #ifdef LVM_GET_INODE */
+
+module_init(lvm_init);
+module_exit(lvm_cleanup);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8542bc2b0..5d4bab6c9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2033,68 +2033,65 @@ abort:
struct {
int set;
int noautodetect;
+} raid_setup_args md__initdata;
-} raid_setup_args md__initdata = { 0, 0 };
-
-void md_setup_drive(void) md__init;
+void md_setup_drive (void) md__init;
/*
* Searches all registered partitions for autorun RAID arrays
* at boot time.
*/
-#ifdef CONFIG_AUTODETECT_RAID
-static int detected_devices[128] md__initdata = { 0, };
-static int dev_cnt=0;
+static int detected_devices[128] md__initdata;
+static int dev_cnt;
+
void md_autodetect_dev(kdev_t dev)
{
if (dev_cnt >= 0 && dev_cnt < 127)
detected_devices[dev_cnt++] = dev;
}
-#endif
-int md__init md_run_setup(void)
+
+static void autostart_arrays (void)
{
-#ifdef CONFIG_AUTODETECT_RAID
mdk_rdev_t *rdev;
int i;
- if (raid_setup_args.noautodetect)
- printk(KERN_INFO "skipping autodetection of RAID arrays\n");
- else {
-
- printk(KERN_INFO "autodetecting RAID arrays\n");
+ printk(KERN_INFO "autodetecting RAID arrays\n");
- for (i=0; i<dev_cnt; i++) {
- kdev_t dev = detected_devices[i];
+ for (i=0; i<dev_cnt; i++) {
+ kdev_t dev = detected_devices[i];
- if (md_import_device(dev,1)) {
- printk(KERN_ALERT "could not import %s!\n",
- partition_name(dev));
- continue;
- }
- /*
- * Sanity checks:
- */
- rdev = find_rdev_all(dev);
- if (!rdev) {
- MD_BUG();
- continue;
- }
- if (rdev->faulty) {
- MD_BUG();
- continue;
- }
- md_list_add(&rdev->pending, &pending_raid_disks);
+ if (md_import_device(dev,1)) {
+ printk(KERN_ALERT "could not import %s!\n",
+ partition_name(dev));
+ continue;
}
-
- autorun_devices(-1);
+ /*
+ * Sanity checks:
+ */
+ rdev = find_rdev_all(dev);
+ if (!rdev) {
+ MD_BUG();
+ continue;
+ }
+ if (rdev->faulty) {
+ MD_BUG();
+ continue;
+ }
+ md_list_add(&rdev->pending, &pending_raid_disks);
}
+ autorun_devices(-1);
+}
+
+int md__init md_run_setup(void)
+{
+ if (raid_setup_args.noautodetect)
+ printk(KERN_INFO "skipping autodetection of RAID arrays\n");
+ else
+ autostart_arrays();
dev_cnt = -1; /* make sure further calls to md_autodetect_dev are ignored */
-#endif
-#ifdef CONFIG_MD_BOOT
md_setup_drive();
-#endif
return 0;
}
@@ -2558,6 +2555,11 @@ static int md_ioctl (struct inode *inode, struct file *file,
md_print_devices();
goto done_unlock;
+ case RAID_AUTORUN:
+ err = 0;
+ autostart_arrays();
+ goto done;
+
case BLKGETSIZE: /* Return device size */
if (!arg) {
err = -EINVAL;
@@ -3639,14 +3641,12 @@ int md__init md_init (void)
return (0);
}
-#ifdef CONFIG_MD_BOOT
-#define MAX_MD_BOOT_DEVS 8
-struct {
- unsigned long set;
- int pers[MAX_MD_BOOT_DEVS];
- int chunk[MAX_MD_BOOT_DEVS];
- kdev_t devices[MAX_MD_BOOT_DEVS][MD_SB_DISKS];
-} md_setup_args md__initdata = { 0, };
+static struct {
+ char device_set [MAX_MD_DEVS];
+ int pers[MAX_MD_DEVS];
+ int chunk[MAX_MD_DEVS];
+ kdev_t devices[MAX_MD_DEVS][MD_SB_DISKS];
+} md_setup_args md__initdata;
/*
* Parse the command-line parameters given our kernel, but do not
@@ -3676,10 +3676,10 @@ static int md__init md_setup(char *str)
printk("md: Too few arguments supplied to md=.\n");
return 0;
}
- if (minor >= MAX_MD_BOOT_DEVS) {
+ if (minor >= MAX_MD_DEVS) {
printk ("md: Minor device number too high.\n");
return 0;
- } else if (md_setup_args.set & (1 << minor)) {
+ } else if (md_setup_args.device_set[minor]) {
printk ("md: Warning - md=%d,... has been specified twice;\n"
" will discard the first definition.\n", minor);
}
@@ -3737,7 +3737,7 @@ static int md__init md_setup(char *str)
printk ("md: Will configure md%d (%s) from %s, below.\n",
minor, pername, devnames);
md_setup_args.devices[minor][i] = (kdev_t) 0;
- md_setup_args.set |= (1 << minor);
+ md_setup_args.device_set[minor] = 1;
return 1;
}
@@ -3747,10 +3747,11 @@ void md__init md_setup_drive(void)
kdev_t dev;
mddev_t*mddev;
- for (minor = 0; minor < MAX_MD_BOOT_DEVS; minor++) {
+ for (minor = 0; minor < MAX_MD_DEVS; minor++) {
mdu_disk_info_t dinfo;
- int err=0;
- if (!(md_setup_args.set & (1 << minor)))
+
+ int err = 0;
+ if (!md_setup_args.device_set[minor])
continue;
printk("md: Loading md%d.\n", minor);
if (mddev_map[minor].mddev) {
@@ -3776,7 +3777,7 @@ void md__init md_setup_drive(void)
ainfo.layout = 0;
ainfo.chunk_size = md_setup_args.chunk[minor];
err = set_array_info(mddev, &ainfo);
- for (i=0; !err && (dev = md_setup_args.devices[minor][i]); i++) {
+ for (i = 0; !err && (dev = md_setup_args.devices[minor][i]); i++) {
dinfo.number = i;
dinfo.raid_disk = i;
dinfo.state = (1<<MD_DISK_ACTIVE)|(1<<MD_DISK_SYNC);
@@ -3807,7 +3808,6 @@ void md__init md_setup_drive(void)
}
__setup("md=", md_setup);
-#endif
#ifdef MODULE
int init_module (void)
@@ -3859,9 +3859,7 @@ void cleanup_module (void)
#endif
__initcall(md_init);
-#if defined(CONFIG_AUTODETECT_RAID) || defined(CONFIG_MD_BOOT)
__initcall(md_run_setup);
-#endif
MD_EXPORT_SYMBOL(md_size);
MD_EXPORT_SYMBOL(register_md_personality);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7981fe764..3ad3940a9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -714,6 +714,11 @@ static void compute_parity(struct stripe_head *sh, int method)
break;
}
spin_unlock_irq(&conf->device_lock);
+ if (count>1) {
+ xor_block(count, bh_ptr);
+ count = 1;
+ }
+
for (i = disks; i--;)
if (chosen[i]) {
struct buffer_head *bh = sh->bh_cache[i];
diff --git a/drivers/md/xor.c b/drivers/md/xor.c
index b9b1cefe9..f0b76d466 100644
--- a/drivers/md/xor.c
+++ b/drivers/md/xor.c
@@ -57,8 +57,7 @@ xor_block(unsigned int count, struct buffer_head **bh_ptr)
/* Set of all registered templates. */
static struct xor_block_template *template_list;
-/* The -6*32 shift factor colors the cache. */
-#define BENCH_SIZE (PAGE_SIZE-6*32)
+#define BENCH_SIZE (PAGE_SIZE)
static void
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index ae4054d35..02c9204e9 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -118,6 +118,14 @@
LK1.1.11 13 Nov 2000 andrewm
- Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
+ LK1.1.12 1 Jan 2001 andrewm
+ - Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
+ - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
+ - Added extended wait_for_completion for the 3c905CX.
+ - Look for an MII on PHY index 24 first (3c905CX oddity).
+ - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
+ - Don't free skbs we don't own on oom path in vortex_open().
+
- See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details.
- Also see Documentation/networking/vortex.txt
*/
@@ -203,7 +211,7 @@ static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0, rx_csumhits;
#include <linux/delay.h>
static char version[] __devinitdata =
-"3c59x.c:LK1.1.11 13 Nov 2000 Donald Becker and others. http://www.scyld.com/network/vortex.html " "$Revision: 1.102.2.46 $\n";
+"3c59x.c:LK1.1.12 06 Jan 2000 Donald Becker and others. http://www.scyld.com/network/vortex.html " "$Revision: 1.102.2.46 $\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("3Com 3c59x/3c90x/3c575 series Vortex/Boomerang/Cyclone driver");
@@ -424,7 +432,7 @@ static struct vortex_chip_info {
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, },
{"3cSOHO100-TX Hurricane",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, },
{"3c555 Laptop Hurricane",
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT, 128, },
{"3c556 Laptop Tornado",
@@ -843,10 +851,15 @@ static int __devinit vortex_init_one (struct pci_dev *pdev,
{
int rc;
- rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq,
- ent->driver_data, vortex_cards_found);
- if (rc == 0)
- vortex_cards_found++;
+ /* wake up and enable device */
+ if (pci_enable_device (pdev)) {
+ rc = -EIO;
+ } else {
+ rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq,
+ ent->driver_data, vortex_cards_found);
+ if (rc == 0)
+ vortex_cards_found++;
+ }
return rc;
}
@@ -863,7 +876,7 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
struct vortex_private *vp;
int option;
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
- int i;
+ int i, step;
struct net_device *dev;
static int printed_version;
int retval;
@@ -889,7 +902,6 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
vci->name,
ioaddr);
- /* private struct aligned and zeroed by init_etherdev */
vp = dev->priv;
dev->base_addr = ioaddr;
dev->irq = irq;
@@ -908,19 +920,29 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
if (pdev) {
/* EISA resources already marked, so only PCI needs to do this here */
/* Ignore return value, because Cardbus drivers already allocate for us */
- if (request_region(ioaddr, vci->io_size, dev->name) != NULL) {
+ if (request_region(ioaddr, vci->io_size, dev->name) != NULL)
vp->must_free_region = 1;
- }
-
- /* wake up and enable device */
- if (pci_enable_device (pdev)) {
- retval = -EIO;
- goto free_region;
- }
/* enable bus-mastering if necessary */
if (vci->flags & PCI_USES_MASTER)
pci_set_master (pdev);
+
+ if (vci->drv_flags & IS_VORTEX) {
+ u8 pci_latency;
+ u8 new_latency = 248;
+
+ /* Check the PCI latency value. On the 3c590 series the latency timer
+ must be set to the maximum value to avoid data corruption that occurs
+ when the timer expires during a transfer. This bug exists the Vortex
+ chip only. */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < new_latency) {
+ printk(KERN_INFO "%s: Overriding PCI latency"
+ " timer (CFLT) setting of %d, new value is %d.\n",
+ dev->name, pci_latency, new_latency);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
+ }
+ }
}
spin_lock_init(&vp->lock);
@@ -1025,6 +1047,13 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
dev->irq);
#endif
+ EL3WINDOW(4);
+ step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
+ printk(KERN_INFO " product code '%c%c' rev %02x.%d date %02d-"
+ "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
+ step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
+
+
if (pdev && vci->drv_flags & HAS_CB_FNS) {
unsigned long fn_st_addr; /* Cardbus function status space */
unsigned short n;
@@ -1089,8 +1118,19 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
mii_preamble_required++;
mii_preamble_required++;
mdio_read(dev, 24, 1);
- for (phy = 1; phy <= 32 && phy_idx < sizeof(vp->phys); phy++) {
- int mii_status, phyx = phy & 0x1f;
+ for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
+ int mii_status, phyx;
+
+ /*
+ * For the 3c905CX we look at index 24 first, because it bogusly
+ * reports an external PHY at all indices
+ */
+ if (phy == 0)
+ phyx = 24;
+ else if (phy <= 24)
+ phyx = phy - 1;
+ else
+ phyx = phy;
mii_status = mdio_read(dev, phyx, 1);
if (mii_status && mii_status != 0xffff) {
vp->phys[phy_idx++] = phyx;
@@ -1135,12 +1175,13 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
dev->set_multicast_list = set_rx_mode;
dev->tx_timeout = vortex_tx_timeout;
dev->watchdog_timeo = (watchdog * HZ) / 1000;
-
+// publish_netdev(dev);
return 0;
free_region:
if (vp->must_free_region)
release_region(ioaddr, vci->io_size);
+// withdraw_netdev(dev);
unregister_netdev(dev);
kfree (dev);
printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval);
@@ -1150,13 +1191,23 @@ out:
static void wait_for_completion(struct net_device *dev, int cmd)
{
- int i = 4000;
+ int i;
outw(cmd, dev->base_addr + EL3_CMD);
- while (--i > 0) {
+ for (i = 0; i < 2000; i++) {
if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress))
return;
}
+
+ /* OK, that didn't work. Do it the slow way. One second */
+ for (i = 0; i < 100000; i++) {
+ if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) {
+ printk(KERN_INFO "%s: command 0x%04x took %d usecs! Please tell andrewm@uow.edu.au\n",
+ dev->name, cmd, i * 10);
+ return;
+ }
+ udelay(10);
+ }
printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
dev->name, cmd, inw(dev->base_addr + EL3_STATUS));
}
@@ -1331,6 +1382,7 @@ vortex_up(struct net_device *dev)
set_rx_mode(dev);
outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+// wait_for_completion(dev, SetTxStart|0x07ff);
outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
/* Allow status bits to be seen. */
@@ -1384,7 +1436,8 @@ vortex_open(struct net_device *dev)
}
if (i != RX_RING_SIZE) {
int j;
- for (j = 0; j < RX_RING_SIZE; j++) {
+ printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name);
+ for (j = 0; j < i; j++) {
if (vp->rx_skbuff[j]) {
dev_kfree_skb(vp->rx_skbuff[j]);
vp->rx_skbuff[j] = 0;
@@ -1532,7 +1585,10 @@ static void vortex_tx_timeout(struct net_device *dev)
printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
dev->name, inb(ioaddr + TxStatus),
inw(ioaddr + EL3_STATUS));
-
+ EL3WINDOW(4);
+ printk(KERN_ERR " diagnostics: net %04x media %04x dma %8.8x.\n",
+ inw(ioaddr + Wn4_NetDiag), inw(ioaddr + Wn4_Media),
+ inl(ioaddr + PktStatus));
/* Slight code bloat to be user friendly. */
if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
@@ -1663,6 +1719,12 @@ vortex_error(struct net_device *dev, int status)
dev->name, fifo_diag);
/* Adapter failure requires Tx/Rx reset and reinit. */
if (vp->full_bus_master_tx) {
+ int bus_status = inl(ioaddr + PktStatus);
+ /* 0x80000000 PCI master abort. */
+ /* 0x40000000 PCI target abort. */
+ if (vortex_debug)
+ printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
+
/* In this case, blow the card away */
vortex_down(dev);
wait_for_completion(dev, TotalReset | 0xff);
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 290efb51c..5f9658fa5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -26,7 +26,7 @@ ifeq ($(CONFIG_ISDN_PPP),y)
obj-$(CONFIG_ISDN) += slhc.o
endif
-subdir-$(CONFIG_PCMCIA) += pcmcia
+subdir-$(CONFIG_NET_PCMCIA) += pcmcia
subdir-$(CONFIG_TULIP) += tulip
subdir-$(CONFIG_IRDA) += irda
subdir-$(CONFIG_TR) += tokenring
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 448baa800..d5d3fa19f 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1817,7 +1817,9 @@ EISA_signature(char *name, s32 eisa_id)
ManCode[5]='\0';
for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
- if (strstr(ManCode, signatures[i]) != NULL) {
+ const char * volatile lhs = ManCode;
+ const char * volatile rhs = signatures[i]; /* egcs-1.1.2 bug */
+ if (strstr(lhs, rhs) != NULL) {
strcpy(name,ManCode);
status = 1;
}
diff --git a/drivers/net/dmfe.c b/drivers/net/dmfe.c
index 481b28700..b160afd78 100644
--- a/drivers/net/dmfe.c
+++ b/drivers/net/dmfe.c
@@ -1596,10 +1596,10 @@ static int __init dmfe_init_module(void)
break;
}
- rc = pci_register_driver(&dmfe_driver);
+ rc = pci_module_init(&dmfe_driver);
if (rc < 0)
return rc;
- if (rc > 0) {
+ if (rc >= 0) {
printk (KERN_INFO "Davicom DM91xx net driver loaded, version "
DMFE_VERSION "\n");
return 0;
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 298a317e2..2970fac9f 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -29,7 +29,7 @@
static const char *version =
"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html\n"
-"eepro100.c: $Revision: 1.35 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
+"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
/* A few user-configurable values that apply to all boards.
First set is undocumented and spelled per Intel recommendations. */
@@ -698,6 +698,7 @@ static int speedo_found1(struct pci_dev *pdev,
This takes less than 10usec and will easily finish before the next
action. */
outl(PortReset, ioaddr + SCBPort);
+ inl(ioaddr + SCBPort);
udelay(10);
if (eeprom[3] & 0x0100)
@@ -785,6 +786,7 @@ static int speedo_found1(struct pci_dev *pdev,
#endif /* kernel_bloat */
outl(PortReset, ioaddr + SCBPort);
+ inl(ioaddr + SCBPort);
udelay(10);
/* Return the chip to its original power state. */
@@ -801,7 +803,7 @@ static int speedo_found1(struct pci_dev *pdev,
sp->tx_ring = tx_ring_space;
sp->tx_ring_dma = tx_ring_dma;
sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
- sp->lstats_dma = cpu_to_le32(TX_RING_ELEM_DMA(sp, TX_RING_SIZE));
+ sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
init_timer(&sp->timer); /* used in ioctl() */
sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
@@ -1002,7 +1004,9 @@ static void speedo_resume(struct net_device *dev)
/* Set the segment registers to '0'. */
wait_for_cmd_done(ioaddr + SCBCmd);
outl(0, ioaddr + SCBPointer);
- inl(ioaddr + SCBPointer); /* XXX */
+ /* impose a delay to avoid a bug */
+ inl(ioaddr + SCBPointer);
+ udelay(10);
outb(RxAddrLoad, ioaddr + SCBCmd);
wait_for_cmd_done(ioaddr + SCBCmd);
outb(CUCmdBase, ioaddr + SCBCmd);
@@ -1010,7 +1014,6 @@ static void speedo_resume(struct net_device *dev)
/* Load the statistics block and rx ring addresses. */
wait_for_cmd_done(ioaddr + SCBCmd);
outl(sp->lstats_dma, ioaddr + SCBPointer);
- inl(ioaddr + SCBPointer); /* XXX */
outb(CUStatsAddr, ioaddr + SCBCmd);
sp->lstats->done_marker = 0;
@@ -1045,7 +1048,7 @@ static void speedo_resume(struct net_device *dev)
/* Start the chip's Tx process and unmask interrupts. */
wait_for_cmd_done(ioaddr + SCBCmd);
- outl(cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE)),
+ outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
ioaddr + SCBPointer);
/* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
remain masked --Dragan */
@@ -1274,7 +1277,7 @@ static void speedo_tx_timeout(struct net_device *dev)
/* Only the command unit has stopped. */
printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
dev->name);
- outl(cpu_to_le32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE])),
+ outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
ioaddr + SCBPointer);
outw(CUStart, ioaddr + SCBCmd);
reset_mii(dev);
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 7dab437e6..3ec09be5a 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -153,7 +153,7 @@ static int check_crc_flex(unsigned char *cp, int size)
/* Find a free channel, and link in this `tty' line. */
static inline struct ax_disp *ax_alloc(void)
{
- ax25_ctrl_t *axp;
+ ax25_ctrl_t *axp=NULL;
int i;
for (i = 0; i < ax25_maxdev; i++) {
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index d65fddc41..cc31e9e30 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -136,7 +136,7 @@
/* ----------------------------------------------------------------------- */
-#undef SCC_LDELAY 1 /* slow it even a bit more down */
+#undef SCC_LDELAY /* slow it even a bit more down */
#undef SCC_DONT_CHECK /* don't look if the SCCs you specified are available */
#define SCC_MAXCHIPS 4 /* number of max. supported chips */
@@ -1776,7 +1776,7 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (hwcfg.vector_latch) {
if (!request_region(Vector_Latch, 1, "scc vector latch"))
- printk(KERN_WARNING "z8530drv: warning, cannot reserve vector latch port 0x%x\n, disabled.", hwcfg.vector_latch);
+ printk(KERN_WARNING "z8530drv: warning, cannot reserve vector latch port 0x%x\n, disabled.", (unsigned int)hwcfg.vector_latch);
else
Vector_Latch = hwcfg.vector_latch;
}
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index ba9c60250..56ca24689 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -892,6 +892,9 @@ static int __init myri_ether_init(struct net_device *dev, struct sbus_dev *sdev,
DET(("myri_ether_init(%p,%p,%d):\n", dev, sdev, num));
dev = init_etherdev(0, sizeof(struct myri_eth));
+ if (!dev)
+ return -ENOMEM;
+
if (version_printed++ == 0)
printk(version);
@@ -982,7 +985,7 @@ static int __init myri_ether_init(struct net_device *dev, struct sbus_dev *sdev,
mp->reg_size, "MyriCOM Regs");
if (!mp->regs) {
printk("MyriCOM: Cannot map MyriCOM registers.\n");
- return -ENODEV;
+ goto err;
}
mp->lanai = (unsigned short *) (mp->regs + (256 * 1024));
mp->lanai3 = (unsigned int *) mp->lanai;
@@ -1059,7 +1062,7 @@ static int __init myri_ether_init(struct net_device *dev, struct sbus_dev *sdev,
if (request_irq(dev->irq, &myri_interrupt,
SA_SHIRQ, "MyriCOM Ethernet", (void *) dev)) {
printk("MyriCOM: Cannot register interrupt handler.\n");
- return -ENODEV;
+ goto err;
}
DET(("ether_setup()\n"));
@@ -1083,6 +1086,9 @@ static int __init myri_ether_init(struct net_device *dev, struct sbus_dev *sdev,
root_myri_dev = mp;
#endif
return 0;
+err: unregister_netdev(dev);
+ kfree(dev);
+ return -ENODEV;
}
static int __init myri_sbus_match(struct sbus_dev *sdev)
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index ccb92e8a5..fa8b951a7 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -695,6 +695,7 @@ pcnet32_probe1(unsigned long ioaddr, unsigned char irq_line, int shared, int car
if (a == NULL) {
printk(KERN_ERR "pcnet32: No access methods\n");
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
return -ENODEV;
}
lp->a = *a;
@@ -741,6 +742,7 @@ pcnet32_probe1(unsigned long ioaddr, unsigned char irq_line, int shared, int car
printk(", probed IRQ %d.\n", dev->irq);
else {
printk(", failed to detect IRQ line.\n");
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
return -ENODEV;
}
}
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index d6b2c47f3..c1234c5c8 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -33,13 +33,6 @@
#include <linux/init.h>
#include <asm/uaccess.h>
-#ifndef spin_trylock_bh
-#define spin_trylock_bh(lock) ({ int __r; local_bh_disable(); \
- __r = spin_trylock(lock); \
- if (!__r) local_bh_enable(); \
- __r; })
-#endif
-
#define PPP_VERSION "2.4.1"
#define OBUFSIZE 256
@@ -76,6 +69,7 @@ struct asyncppp {
/* Bit numbers in xmit_flags */
#define XMIT_WAKEUP 0
#define XMIT_FULL 1
+#define XMIT_BUSY 2
/* State bits */
#define SC_TOSS 0x20000000
@@ -181,18 +175,14 @@ ppp_asynctty_close(struct tty_struct *tty)
}
/*
- * Read does nothing.
+ * Read does nothing - no data is ever available this way.
+ * Pppd reads and writes packets via /dev/ppp instead.
*/
static ssize_t
ppp_asynctty_read(struct tty_struct *tty, struct file *file,
unsigned char *buf, size_t count)
{
- /* For now, do the same as the old 2.3.x code useta */
- struct asyncppp *ap = tty->disc_data;
-
- if (ap == 0)
- return -ENXIO;
- return ppp_channel_read(&ap->chan, file, buf, count);
+ return -EAGAIN;
}
/*
@@ -203,12 +193,7 @@ static ssize_t
ppp_asynctty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t count)
{
- /* For now, do the same as the old 2.3.x code useta */
- struct asyncppp *ap = tty->disc_data;
-
- if (ap == 0)
- return -ENXIO;
- return ppp_channel_write(&ap->chan, buf, count);
+ return -EAGAIN;
}
static int
@@ -259,25 +244,6 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
err = 0;
break;
-/*
- * For now, do the same as the old 2.3 driver useta
- */
- case PPPIOCGFLAGS:
- case PPPIOCSFLAGS:
- case PPPIOCGASYNCMAP:
- case PPPIOCSASYNCMAP:
- case PPPIOCGRASYNCMAP:
- case PPPIOCSRASYNCMAP:
- case PPPIOCGXASYNCMAP:
- case PPPIOCSXASYNCMAP:
- case PPPIOCGMRU:
- case PPPIOCSMRU:
- err = -EPERM;
- if (!capable(CAP_NET_ADMIN))
- break;
- err = ppp_async_ioctl(&ap->chan, cmd, arg);
- break;
-
case PPPIOCATTACH:
case PPPIOCDETACH:
err = ppp_channel_ioctl(&ap->chan, cmd, arg);
@@ -294,18 +260,7 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
static unsigned int
ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
- unsigned int mask;
- struct asyncppp *ap = tty->disc_data;
-
- mask = POLLOUT | POLLWRNORM;
-/*
- * For now, do the same as the old 2.3 driver useta
- */
- if (ap != 0)
- mask |= ppp_channel_poll(&ap->chan, file, wait);
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags) || tty_hung_up_p(file))
- mask |= POLLHUP;
- return mask;
+ return 0;
}
static int
@@ -637,8 +592,18 @@ ppp_async_push(struct asyncppp *ap)
int tty_stuffed = 0;
set_bit(XMIT_WAKEUP, &ap->xmit_flags);
- if (!spin_trylock_bh(&ap->xmit_lock))
+ /*
+ * We can get called recursively here if the tty write
+ * function calls our wakeup function. This can happen
+ * for example on a pty with both the master and slave
+ * set to PPP line discipline.
+ * We use the XMIT_BUSY bit to detect this and get out,
+ * leaving the XMIT_WAKEUP bit set to tell the other
+ * instance that it may now be able to write more now.
+ */
+ if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
return 0;
+ spin_lock_bh(&ap->xmit_lock);
for (;;) {
if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
tty_stuffed = 0;
@@ -653,7 +618,7 @@ ppp_async_push(struct asyncppp *ap)
tty_stuffed = 1;
continue;
}
- if (ap->optr == ap->olim && ap->tpkt != 0) {
+ if (ap->optr >= ap->olim && ap->tpkt != 0) {
if (ppp_async_encode(ap)) {
/* finished processing ap->tpkt */
clear_bit(XMIT_FULL, &ap->xmit_flags);
@@ -661,17 +626,29 @@ ppp_async_push(struct asyncppp *ap)
}
continue;
}
- /* haven't made any progress */
- spin_unlock_bh(&ap->xmit_lock);
+ /*
+ * We haven't made any progress this time around.
+ * Clear XMIT_BUSY to let other callers in, but
+ * after doing so we have to check if anyone set
+ * XMIT_WAKEUP since we last checked it. If they
+ * did, we should try again to set XMIT_BUSY and go
+ * around again in case XMIT_BUSY was still set when
+ * the other caller tried.
+ */
+ clear_bit(XMIT_BUSY, &ap->xmit_flags);
+ /* any more work to do? if not, exit the loop */
if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
|| (!tty_stuffed && ap->tpkt != 0)))
break;
- if (!spin_trylock_bh(&ap->xmit_lock))
+ /* more work to do, see if we can do it now */
+ if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
break;
}
+ spin_unlock_bh(&ap->xmit_lock);
return done;
flush:
+ clear_bit(XMIT_BUSY, &ap->xmit_flags);
if (ap->tpkt != 0) {
kfree_skb(ap->tpkt);
ap->tpkt = 0;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 8744f6186..7fffdf4f7 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -5,23 +5,24 @@
* PPPoE --- PPP over Ethernet (RFC 2516)
*
*
- * Version: 0.6.4
+ * Version: 0.6.5
*
* 030700 : Fixed connect logic to allow for disconnect.
- * 270700 : Fixed potential SMP problems; we must protect against
- * simultaneous invocation of ppp_input
+ * 270700 : Fixed potential SMP problems; we must protect against
+ * simultaneous invocation of ppp_input
* and ppp_unregister_channel.
* 040800 : Respect reference count mechanisms on net-devices.
* 200800 : fix kfree(skb) in pppoe_rcv (acme)
* Module reference count is decremented in the right spot now,
- * guards against sock_put not actually freeing the sk
+ * guards against sock_put not actually freeing the sk
* in pppoe_release.
* 051000 : Initialization cleanup.
* 111100 : Fix recvmsg.
+ * 050101 : Fix PADT procesing.
*
* Author: Michal Ostrowski <mostrows@styx.uwaterloo.ca>
* Contributors:
- * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
*
* License:
* This program is free software; you can redistribute it and/or
@@ -110,7 +111,7 @@ static int hash_item(unsigned long sid, unsigned char *addr)
hash ^= sid >> (i*PPPOE_HASH_BITS);
return hash & ( PPPOE_HASH_SIZE - 1 );
-}
+}
static struct pppox_opt *item_hash_table[PPPOE_HASH_SIZE] = { 0, };
@@ -238,7 +239,7 @@ static int pppoe_device_event(struct notifier_block *this,
struct net_device *dev = (struct net_device *) ptr;
struct pppox_opt *po = NULL;
int hash = 0;
-
+
/* Only look at sockets that are using this specific device. */
switch (event) {
case NETDEV_CHANGEMTU:
@@ -255,13 +256,13 @@ static int pppoe_device_event(struct notifier_block *this,
po = item_hash_table[hash];
++hash;
}
-
+
while (po && hash < PPPOE_HASH_SIZE){
if(po->pppoe_dev == dev){
lock_sock(po->sk);
if (po->sk->state & (PPPOX_CONNECTED|PPPOX_BOUND)){
pppox_unbind_sock(po->sk);
-
+
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
@@ -308,7 +309,7 @@ int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb){
if (sk->state & PPPOX_BOUND) {
skb_pull(skb, sizeof(struct pppoe_hdr));
-
+
ppp_input(&po->chan, skb);
} else if( sk->state & PPPOX_RELAY ){
@@ -318,7 +319,7 @@ int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb){
!( relay_po->sk->state & PPPOX_CONNECTED ) ){
goto abort;
}
-
+
skb_pull(skb, sizeof(struct pppoe_hdr));
if( !__pppoe_xmit( relay_po->sk , skb) ){
goto abort;
@@ -369,7 +370,7 @@ static int pppoe_rcv(struct sk_buff *skb,
}else{
ret = pppoe_rcv_core(sk, skb);
}
-
+
bh_unlock_sock(sk);
sock_put(sk);
return ret;
@@ -412,13 +413,12 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
po = get_item((unsigned long) ph->sid, skb->mac.ethernet->h_source);
if (!po)
- goto abort_put;
+ goto abort;
sk = po->sk;
pppox_unbind_sock(sk);
- abort_put:
sock_put(sk);
abort:
kfree_skb(skb);
@@ -466,9 +466,9 @@ static int pppoe_create(struct socket *sock)
{
int error = 0;
struct sock *sk;
-
+
MOD_INC_USE_COUNT;
-
+
sk = sk_alloc(PF_PPPOX, GFP_KERNEL, 1);
if (!sk)
return -ENOMEM;
@@ -528,7 +528,7 @@ int pppoe_release(struct socket *sock)
po = sk->protinfo.pppox;
if (po->pppoe_pa.sid)
delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
-
+
if (po->pppoe_dev)
dev_put(po->pppoe_dev);
@@ -945,7 +945,7 @@ int pppoe_proc_info(char *buffer, char **start, off_t offset, int length)
off_t begin = 0;
int size;
int i;
-
+
len += sprintf(buffer,
"Id Address Device\n");
pos = len;
@@ -1025,9 +1025,10 @@ int __init pppoe_init(void)
int err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
if (err == 0) {
- printk(KERN_INFO "Registered PPPoE v0.6.4\n");
+ printk(KERN_INFO "Registered PPPoE v0.6.5\n");
dev_add_pack(&pppoes_ptype);
+ dev_add_pack(&pppoed_ptype);
register_netdevice_notifier(&pppoe_notifier);
proc_net_create("pppoe", 0, pppoe_proc_info);
}
@@ -1038,6 +1039,7 @@ void __exit pppoe_exit(void)
{
unregister_pppox_proto(PX_PROTO_OE);
dev_remove_pack(&pppoes_ptype);
+ dev_remove_pack(&pppoed_ptype);
unregister_netdevice_notifier(&pppoe_notifier);
proc_net_remove("pppoe");
}
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 3036684f9..253548599 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1,12 +1,9 @@
-/* $Id: sunbmac.c,v 1.21 2000/10/22 16:08:38 davem Exp $
+/* $Id: sunbmac.c,v 1.23 2001/01/20 03:36:40 davem Exp $
* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
*/
-static char *version =
- "sunbmac.c:v1.9 11/Sep/99 David S. Miller (davem@redhat.com)\n";
-
#include <linux/module.h>
#include <linux/kernel.h>
@@ -41,6 +38,9 @@ static char *version =
#include "sunbmac.h"
+static char version[] __initdata =
+ "sunbmac.c:v1.9 11/Sep/99 David S. Miller (davem@redhat.com)\n";
+
#undef DEBUG_PROBE
#undef DEBUG_TX
#undef DEBUG_IRQ
@@ -1051,10 +1051,10 @@ static void bigmac_set_multicast(struct net_device *dev)
static int __init bigmac_ether_init(struct net_device *dev, struct sbus_dev *qec_sdev)
{
- static int version_printed = 0;
- struct bigmac *bp = 0;
+ static int version_printed;
+ struct bigmac *bp;
u8 bsizes, bsizes_more;
- int i, res = ENOMEM;
+ int i;
/* Get a new device struct for this interface. */
dev = init_etherdev(0, sizeof(struct bigmac));
@@ -1062,6 +1062,9 @@ static int __init bigmac_ether_init(struct net_device *dev, struct sbus_dev *qec
if (version_printed++ == 0)
printk(KERN_INFO "%s", version);
+ if (!dev)
+ return -ENOMEM;
+
/* Report what we have found to the user. */
printk(KERN_INFO "%s: BigMAC 100baseT Ethernet ", dev->name);
dev->base_addr = (long) qec_sdev;
@@ -1077,9 +1080,6 @@ static int __init bigmac_ether_init(struct net_device *dev, struct sbus_dev *qec
spin_lock_init(&bp->lock);
- /* All further failures we find return this. */
- res = ENODEV;
-
/* Verify the registers we expect, are actually there. */
if ((bp->bigmac_sdev->num_registers != 3) ||
(bp->qec_sdev->num_registers != 2)) {
@@ -1205,28 +1205,25 @@ static int __init bigmac_ether_init(struct net_device *dev, struct sbus_dev *qec
fail_and_cleanup:
/* Something went wrong, undo whatever we did so far. */
- if (bp) {
- /* Free register mappings if any. */
- if (bp->gregs)
- sbus_iounmap(bp->gregs, GLOB_REG_SIZE);
- if (bp->creg)
- sbus_iounmap(bp->creg, CREG_REG_SIZE);
- if (bp->bregs)
- sbus_iounmap(bp->bregs, BMAC_REG_SIZE);
- if (bp->tregs)
- sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
-
- if (bp->bmac_block)
- sbus_free_consistent(bp->bigmac_sdev,
- PAGE_SIZE,
- bp->bmac_block,
- bp->bblock_dvma);
-
- /* Free the BigMAC softc. */
- kfree(bp);
- dev->priv = 0;
- }
- return res; /* Return error code. */
+ /* Free register mappings if any. */
+ if (bp->gregs)
+ sbus_iounmap(bp->gregs, GLOB_REG_SIZE);
+ if (bp->creg)
+ sbus_iounmap(bp->creg, CREG_REG_SIZE);
+ if (bp->bregs)
+ sbus_iounmap(bp->bregs, BMAC_REG_SIZE);
+ if (bp->tregs)
+ sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
+
+ if (bp->bmac_block)
+ sbus_free_consistent(bp->bigmac_sdev,
+ PAGE_SIZE,
+ bp->bmac_block,
+ bp->bblock_dvma);
+
+ unregister_netdev(dev);
+ kfree(dev);
+ return -ENODEV;
}
/* QEC can be the parent of either QuadEthernet or
diff --git a/drivers/net/tulip/ChangeLog b/drivers/net/tulip/ChangeLog
index dfac94349..bb5b70f01 100644
--- a/drivers/net/tulip/ChangeLog
+++ b/drivers/net/tulip/ChangeLog
@@ -1,7 +1,37 @@
-2000-12-17 Alan Cox <alan@redhat.com>
+2001-01-16 Jeff Garzik <jgarzik@mandrakesoft.com>
- * merge support for the Davicom's quirks into the main tulip. Patch
- by Tobias Ringstrom
+ * tulip_core.c: static vars no longer explicitly
+ initialized to zero.
+ * eeprom.c (tulip_read_eeprom): Make sure to delay between
+ EE_ENB and EE_ENB|EE_SHIFT_CLK. Merged from becker tulip.c.
+
+2001-01-05 Peter De Schrijver <p2@mind.be>
+
+ * eeprom.c (tulip_parse_eeprom): Interpret a bit more of 21142
+ extended format type 3 info blocks in a tulip SROM.
+
+2001-01-03 Matti Aarnio <matti.aarnio@zmailer.org>
+
+ * media.c (tulip_select_media): Support media types 5 and 6
+
+2001-??-?? ??
+
+ * tulip_core.c: Add comment about LanMedia needing
+ a different driver.
+ Enable workarounds for early PCI chipsets.
+ Add IA64 csr0 support, update HPPA csr0 support.
+
+2000-12-17 Alan Cox <alan@redhat.com>
+
+ * eeprom.c, timer.c, tulip.h, tulip_core.c: Merge support
+ for the Davicom's quirks into the main tulip.
+ Patch by Tobias Ringstrom
+
+2000-11-08 Jim Studt <jim@federated.com>
+
+ * eeprom.c (tulip_parse_eeprom): Check array bounds for
+ medianame[] and block_name[] arrays to avoid oops due
+ to bad values returned from hardware.
2000-11-02 Jeff Garzik <jgarzik@mandrakesoft.com>
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 49e8bf42e..f42baefc2 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -207,8 +207,13 @@ subsequent_board:
p += (p[0] & 0x3f) + 1;
continue;
} else if (p[1] & 1) {
+ int gpr_len, reset_len;
+
mtable->has_mii = 1;
leaf->media = 11;
+ gpr_len=p[3]*2;
+ reset_len=p[4+gpr_len]*2;
+ new_advertise |= get_u16(&p[7+gpr_len+reset_len]);
} else {
mtable->has_nonmii = 1;
leaf->media = p[2] & 0x0f;
@@ -247,9 +252,9 @@ subsequent_board:
}
printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
"by a %s (%d) block.\n",
- dev->name, i, medianame[leaf->media], leaf->media,
- leaf->type >= ARRAY_SIZE(block_name) ? "UNKNOWN" :
- block_name[leaf->type], leaf->type);
+ dev->name, i, medianame[leaf->media & 15], leaf->media,
+ leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+ leaf->type);
}
if (new_advertise)
tp->to_advertise = new_advertise;
@@ -278,6 +283,7 @@ int __devinit tulip_read_eeprom(long ioaddr, int location, int addr_len)
retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
}
outl(EE_ENB, ee_addr);
+ eeprom_delay();
for (i = 16; i > 0; i--) {
outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index b8203f33a..bc8e7bbbc 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -148,7 +148,7 @@ void tulip_select_media(struct net_device *dev, int startup)
long ioaddr = dev->base_addr;
struct tulip_private *tp = (struct tulip_private *)dev->priv;
struct mediatable *mtable = tp->mtable;
- u32 new_csr6;
+ u32 new_csr6=0;
int i;
if (mtable) {
@@ -265,7 +265,9 @@ void tulip_select_media(struct net_device *dev, int startup)
}
case 5: case 6: {
u16 setup[5];
- u32 csr13val, csr14val, csr15dir, csr15val;
+
+ new_csr6 = 0; /* FIXME */
+
for (i = 0; i < 5; i++)
setup[i] = get_u16(&p[i*2 + 1]);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 765cd6c7c..0517b90af 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -28,7 +28,7 @@
#include <asm/unaligned.h>
static char version[] __devinitdata =
- "Linux Tulip driver version 0.9.13 (January 2, 2001)\n";
+ "Linux Tulip driver version 0.9.13a (January 20, 2001)\n";
/* A few user-configurable values. */
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index eaba0cd34..3f23aec1c 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -45,25 +45,10 @@
#include <linux/lapb.h>
#include <linux/init.h>
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static int lapbeth_rcv(struct sk_buff *, struct net_device *, struct packet_type *);
-static int lapbeth_device_event(struct notifier_block *, unsigned long, void *);
-
-static struct packet_type lapbeth_packet_type = {
- 0, /* ntohs(ETH_P_DEC),*/
- 0, /* copy */
- lapbeth_rcv,
- NULL,
- NULL,
-};
-
-static struct notifier_block lapbeth_dev_notifier = {
- lapbeth_device_event,
- 0
-};
-
+static char bcast_addr[6] = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
+/* If this number is made larger, check that the temporary string buffer
+ * in lapbeth_new_device is large enough to store the probe device name.*/
#define MAXLAPBDEV 100
static struct lapbethdev {
@@ -72,29 +57,14 @@ static struct lapbethdev {
struct net_device *ethdev; /* link to ethernet device */
struct net_device axdev; /* lapbeth device (lapb#) */
struct net_device_stats stats; /* some statistics */
-} *lapbeth_devices = NULL;
-
+} *lapbeth_devices /* = NULL initially */;
/* ------------------------------------------------------------------------ */
-
-/*
- * Get the ethernet device for a LAPB device
- */
-#if 0
-static __inline__ struct net_device *lapbeth_get_ether_dev(struct net_device *dev)
-{
- struct lapbethdev *lapbeth;
-
- lapbeth = (struct lapbethdev *)dev->priv;
-
- return (lapbeth != NULL) ? lapbeth->ethdev : NULL;
-}
-#endif
/*
* Get the LAPB device for the ethernet device
*/
-static __inline__ struct net_device *lapbeth_get_x25_dev(struct net_device *dev)
+static inline struct net_device *lapbeth_get_x25_dev(struct net_device *dev)
{
struct lapbethdev *lapbeth;
@@ -105,7 +75,7 @@ static __inline__ struct net_device *lapbeth_get_x25_dev(struct net_device *dev)
return NULL;
}
-static __inline__ int dev_is_ethdev(struct net_device *dev)
+static inline int dev_is_ethdev(struct net_device *dev)
{
return (
dev->type == ARPHRD_ETHER
@@ -122,7 +92,7 @@ static int lapbeth_check_devices(struct net_device *dev)
struct lapbethdev *lapbeth, *lapbeth_prev;
int result = 0;
unsigned long flags;
-
+
save_flags(flags);
cli();
@@ -134,7 +104,7 @@ static int lapbeth_check_devices(struct net_device *dev)
lapbeth_prev->next = lapbeth->next;
else
lapbeth_devices = lapbeth->next;
-
+
if (&lapbeth->axdev == dev)
result = 1;
@@ -145,16 +115,14 @@ static int lapbeth_check_devices(struct net_device *dev)
lapbeth_prev = lapbeth;
}
-
+
restore_flags(flags);
-
+
return result;
}
-
/* ------------------------------------------------------------------------ */
-
/*
* Receive a LAPB frame via an ethernet interface.
*/
@@ -164,7 +132,7 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
struct lapbethdev *lapbeth;
skb->sk = NULL; /* Initially we don't know who it's for */
-
+
dev = lapbeth_get_x25_dev(dev);
if (dev == NULL || !netif_running(dev)) {
@@ -172,7 +140,7 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
return 0;
}
- lapbeth = (struct lapbethdev *)dev->priv;
+ lapbeth = (struct lapbethdev *) dev->priv;
lapbeth->stats.rx_packets++;
@@ -191,7 +159,7 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
static int lapbeth_data_indication(void *token, struct sk_buff *skb)
{
- struct lapbethdev *lapbeth = (struct lapbethdev *)token;
+ struct lapbethdev *lapbeth = (struct lapbethdev *) token;
unsigned char *ptr;
ptr = skb_push(skb, 1);
@@ -206,14 +174,12 @@ static int lapbeth_data_indication(void *token, struct sk_buff *skb)
}
/*
- * Send a LAPB frame via an ethernet interface
+ * Send a LAPB frame via an ethernet interface
*/
static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct lapbethdev *lapbeth;
+ struct lapbethdev *lapbeth = (struct lapbethdev *) dev->priv;
int err;
-
- lapbeth = (struct lapbethdev *)dev->priv;
/*
* Just to be *really* sure not to send anything if the interface
@@ -253,10 +219,10 @@ static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-
+
static void lapbeth_data_transmit(void *token, struct sk_buff *skb)
{
- struct lapbethdev *lapbeth = (struct lapbethdev *)token;
+ struct lapbethdev *lapbeth = (struct lapbethdev *) token;
unsigned char *ptr;
struct net_device *dev;
int size;
@@ -281,7 +247,7 @@ static void lapbeth_data_transmit(void *token, struct sk_buff *skb)
static void lapbeth_connected(void *token, int reason)
{
- struct lapbethdev *lapbeth = (struct lapbethdev *)token;
+ struct lapbethdev *lapbeth = (struct lapbethdev *) token;
struct sk_buff *skb;
unsigned char *ptr;
@@ -303,7 +269,7 @@ static void lapbeth_connected(void *token, int reason)
static void lapbeth_disconnected(void *token, int reason)
{
- struct lapbethdev *lapbeth = (struct lapbethdev *)token;
+ struct lapbethdev *lapbeth = (struct lapbethdev *) token;
struct sk_buff *skb;
unsigned char *ptr;
@@ -328,10 +294,7 @@ static void lapbeth_disconnected(void *token, int reason)
*/
static struct net_device_stats *lapbeth_get_stats(struct net_device *dev)
{
- struct lapbethdev *lapbeth;
-
- lapbeth = (struct lapbethdev *)dev->priv;
-
+ struct lapbethdev *lapbeth = (struct lapbethdev *) dev->priv;
return &lapbeth->stats;
}
@@ -340,18 +303,11 @@ static struct net_device_stats *lapbeth_get_stats(struct net_device *dev)
*/
static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
{
- struct sockaddr *sa = (struct sockaddr *)addr;
-
+ struct sockaddr *sa = (struct sockaddr *) addr;
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
-
return 0;
}
-static int lapbeth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- return -EINVAL;
-}
-
/*
* open/close a device
*/
@@ -363,8 +319,8 @@ static int lapbeth_open(struct net_device *dev)
if (lapbeth_check_devices(dev))
return -ENODEV; /* oops, it's gone */
-
- lapbeth = (struct lapbethdev *)dev->priv;
+
+ lapbeth = (struct lapbethdev *) dev->priv;
lapbeth_callbacks.connect_confirmation = lapbeth_connected;
lapbeth_callbacks.connect_indication = lapbeth_connected;
@@ -378,31 +334,20 @@ static int lapbeth_open(struct net_device *dev)
return -ENODEV;
}
- MOD_INC_USE_COUNT;
netif_start_queue(dev);
-
return 0;
}
static int lapbeth_close(struct net_device *dev)
{
- struct lapbethdev *lapbeth;
+ struct lapbethdev *lapbeth = (struct lapbethdev *) dev->priv;
int err;
netif_stop_queue(dev);
-
- lapbeth = (struct lapbethdev *)dev->priv;
if ((err = lapb_unregister(lapbeth)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_unregister error - %d\n", err);
- MOD_DEC_USE_COUNT;
-
- return 0;
-}
-
-static int lapbeth_dev_init(struct net_device *dev)
-{
return 0;
}
@@ -414,14 +359,14 @@ static int lapbeth_dev_init(struct net_device *dev)
static int lapbeth_new_device(struct net_device *dev)
{
int k;
- unsigned char *buf;
+ unsigned char buf[14];
struct lapbethdev *lapbeth, *lapbeth2;
-
+
if ((lapbeth = kmalloc(sizeof(struct lapbethdev), GFP_KERNEL)) == NULL)
return -ENOMEM;
-
+
memset(lapbeth, 0, sizeof(struct lapbethdev));
-
+
dev_hold(dev);
lapbeth->ethdev = dev;
@@ -429,7 +374,7 @@ static int lapbeth_new_device(struct net_device *dev)
strncpy(lapbeth->ethname, dev->name, sizeof(lapbeth->ethname)-1);
dev = &lapbeth->axdev;
- buf = kmalloc(14, GFP_KERNEL);
+ SET_MODULE_OWNER(dev);
for (k = 0; k < MAXLAPBDEV; k++) {
struct net_device *odev;
@@ -445,10 +390,9 @@ static int lapbeth_new_device(struct net_device *dev)
kfree(lapbeth);
return -ENODEV;
}
-
+
dev->priv = (void *)lapbeth; /* pointer back */
strcpy(dev->name, buf);
- dev->init = lapbeth_dev_init;
if (register_netdev(dev) != 0) {
dev_put(dev);
@@ -463,10 +407,6 @@ static int lapbeth_new_device(struct net_device *dev)
dev->stop = lapbeth_close;
dev->set_mac_address = lapbeth_set_mac_address;
dev->get_stats = lapbeth_get_stats;
- dev->do_ioctl = lapbeth_ioctl;
-
- dev->flags = 0;
-
dev->type = ARPHRD_X25;
dev->hard_header_len = 3;
dev->mtu = 1000;
@@ -480,20 +420,19 @@ static int lapbeth_new_device(struct net_device *dev)
for (lapbeth2 = lapbeth_devices; lapbeth2->next != NULL; lapbeth2 = lapbeth2->next);
lapbeth2->next = lapbeth;
}
-
+
sti();
return 0;
}
-
/*
* Handle device status changes.
*/
-static int lapbeth_device_event(struct notifier_block *this,unsigned long event, void *ptr)
+static int lapbeth_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
- struct net_device *dev = (struct net_device *)ptr;
-
+ struct net_device *dev = (struct net_device *) ptr;
+
if (!dev_is_ethdev(dev))
return NOTIFY_DONE;
@@ -518,23 +457,28 @@ static int lapbeth_device_event(struct notifier_block *this,unsigned long event,
return NOTIFY_DONE;
}
-
/* ------------------------------------------------------------------------ */
-/*
- * Initialize driver. To be called from af_ax25 if not compiled as a
- * module
- */
-int lapbeth_init(void)
+static struct packet_type lapbeth_packet_type = {
+ type: __constant_htons(ETH_P_DEC),
+ func: lapbeth_rcv,
+};
+
+static struct notifier_block lapbeth_dev_notifier = {
+ notifier_call: lapbeth_device_event,
+};
+
+static const char banner[] __initdata = KERN_INFO "LAPB Ethernet driver version 0.01\n";
+
+static int __init lapbeth_init_driver(void)
{
struct net_device *dev;
- lapbeth_packet_type.type = htons(ETH_P_DEC);
dev_add_pack(&lapbeth_packet_type);
register_netdevice_notifier(&lapbeth_dev_notifier);
- printk(KERN_INFO "LAPB Ethernet driver version 0.01\n");
+ printk(banner);
read_lock_bh(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
@@ -548,19 +492,9 @@ int lapbeth_init(void)
return 0;
}
+module_init(lapbeth_init_driver);
-#ifdef MODULE
-EXPORT_NO_SYMBOLS;
-
-MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
-MODULE_DESCRIPTION("The unofficial LAPB over Ethernet driver");
-
-int init_module(void)
-{
- return lapbeth_init();
-}
-
-void cleanup_module(void)
+static void __exit lapbeth_cleanup_driver(void)
{
struct lapbethdev *lapbeth;
@@ -571,4 +505,10 @@ void cleanup_module(void)
for (lapbeth = lapbeth_devices; lapbeth != NULL; lapbeth = lapbeth->next)
unregister_netdev(&lapbeth->axdev);
}
-#endif
+module_exit(lapbeth_cleanup_driver);
+
+EXPORT_NO_SYMBOLS;
+
+MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
+MODULE_DESCRIPTION("The unofficial LAPB over Ethernet driver");
+
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
index 91b9e8f00..882e58c1b 100644
--- a/drivers/net/wan/lmc/lmc.h
+++ b/drivers/net/wan/lmc/lmc.h
@@ -29,4 +29,5 @@ extern lmc_media_t lmc_hssi_media;
static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 );
#endif
-#endif \ No newline at end of file
+#endif
+
diff --git a/drivers/net/wan/lmc/lmc_media.h b/drivers/net/wan/lmc/lmc_media.h
index 7cc6c1650..ddcc00403 100644
--- a/drivers/net/wan/lmc/lmc_media.h
+++ b/drivers/net/wan/lmc/lmc_media.h
@@ -61,4 +61,5 @@ lmc_media_t lmc_t1_media = {
};
-#endif \ No newline at end of file
+#endif
+
diff --git a/drivers/net/wan/lmc/lmc_prot.h b/drivers/net/wan/lmc/lmc_prot.h
index 859ef0f00..f3b1df9e2 100644
--- a/drivers/net/wan/lmc/lmc_prot.h
+++ b/drivers/net/wan/lmc/lmc_prot.h
@@ -11,4 +11,5 @@ void lmc_proto_close(lmc_softc_t *sc const)
unsigned short lmc_proto_type(lmc_softc_t *sc const, struct skbuff *skb)
-#endif \ No newline at end of file
+#endif
+
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
index 6136dfad7..080a55773 100644
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -12,4 +12,5 @@ unsigned short lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused);
-#endif \ No newline at end of file
+#endif
+
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index ef489b1a6..1eb66b44e 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -744,7 +744,7 @@ static void sdla_receive(struct net_device *dev)
struct buf_entry *pbuf;
unsigned long flags;
- int i, received, success, addr, buf_base, buf_top;
+ int i=0, received, success, addr, buf_base, buf_top;
short dlci, len, len2, split;
flp = dev->priv;
diff --git a/drivers/pnp/isapnp_proc.c b/drivers/pnp/isapnp_proc.c
index ee4c2aee3..9f3cc0fc3 100644
--- a/drivers/pnp/isapnp_proc.c
+++ b/drivers/pnp/isapnp_proc.c
@@ -783,7 +783,10 @@ static int isapnp_set_card(char *line)
unsigned int id;
char index[16], value[32];
- isapnp_info_card = NULL;
+ if (isapnp_info_card) {
+ isapnp_cfg_end();
+ isapnp_info_card = NULL;
+ }
line = isapnp_get_str(index, line, sizeof(index));
isapnp_get_str(value, line, sizeof(value));
idx = idx1 = simple_strtoul(index, NULL, 0);
@@ -853,10 +856,7 @@ static int isapnp_set_device(char *line)
static int isapnp_autoconfigure(void)
{
- if (isapnp_info_device == NULL) {
- printk("isapnp: device is not set\n");
- return 0;
- }
+ isapnp_cfg_end();
if (isapnp_info_device->active)
isapnp_info_device->deactivate(isapnp_info_device);
if (isapnp_info_device->prepare(isapnp_info_device) < 0) {
@@ -867,6 +867,13 @@ static int isapnp_autoconfigure(void)
printk("isapnp: cannot activate device");
return 0;
}
+ if (isapnp_cfg_begin(isapnp_info_card->number, -1)<0) {
+ printk("isapnp: configuration start sequence for card %d failed\n", isapnp_info_card->number);
+ isapnp_info_card = NULL;
+ isapnp_info_device = NULL;
+ return 1;
+ }
+ isapnp_device(isapnp_info_device->devfn);
return 0;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 55e27c7d8..a48213aea 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -952,7 +952,6 @@ do_dasd_request (request_queue_t *queue)
go = 1;
while (go && !list_empty(&queue->queue_head)) {
req = blkdev_entry_next_request(&queue->queue_head);
- req = blkdev_entry_next_request(&queue->queue_head);
di = DEVICE_NR (req->rq_dev);
dasd_debug ((unsigned long) req); /* req */
dasd_debug (0xc4d90000 + /* DR## */
diff --git a/drivers/sbus/audio/Config.in b/drivers/sbus/audio/Config.in
index a9419f78c..40a431f81 100644
--- a/drivers/sbus/audio/Config.in
+++ b/drivers/sbus/audio/Config.in
@@ -8,9 +8,11 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
comment 'Linux/SPARC audio subsystem (EXPERIMENTAL)'
tristate 'Audio support (EXPERIMENTAL)' CONFIG_SPARCAUDIO
- dep_tristate ' AMD7930 Lowlevel Driver' CONFIG_SPARCAUDIO_AMD7930 $CONFIG_SPARCAUDIO
+ if [ "$CONFIG_SPARC64" != "y" ]; then
+ dep_tristate ' AMD7930 Lowlevel Driver' CONFIG_SPARCAUDIO_AMD7930 $CONFIG_SPARCAUDIO
+ dep_tristate ' DBRI Lowlevel Driver' CONFIG_SPARCAUDIO_DBRI $CONFIG_SPARCAUDIO
+ fi
dep_tristate ' CS4231 Lowlevel Driver' CONFIG_SPARCAUDIO_CS4231 $CONFIG_SPARCAUDIO
- dep_tristate ' DBRI Lowlevel Driver' CONFIG_SPARCAUDIO_DBRI $CONFIG_SPARCAUDIO
dep_tristate ' Dummy Lowlevel Driver' CONFIG_SPARCAUDIO_DUMMY $CONFIG_SPARCAUDIO
endmenu
fi
diff --git a/drivers/sbus/audio/amd7930.c b/drivers/sbus/audio/amd7930.c
index f3f593bce..1ddfb5017 100644
--- a/drivers/sbus/audio/amd7930.c
+++ b/drivers/sbus/audio/amd7930.c
@@ -1,4 +1,4 @@
-/* $Id: amd7930.c,v 1.24 2000/01/22 05:10:27 anton Exp $
+/* $Id: amd7930.c,v 1.25 2001/01/08 04:19:16 davem Exp $
* drivers/sbus/audio/amd7930.c
*
* Copyright (C) 1996,1997 Thomas K. Dyas (tdyas@eden.rutgers.edu)
@@ -107,7 +107,7 @@ static __u8 mulaw2bilinear(__u8 data);
static __u8 linear2mulaw(__u16 data);
static __u16 mulaw2linear(__u8 data);
-#if defined (AMD79C30_ISDN) || defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE > 0x200ff
+#if defined (AMD79C30_ISDN) && defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE > 0x200ff
#include "../../isdn/hisax/hisax.h"
#include "../../isdn/hisax/isdnl1.h"
#include "../../isdn/hisax/foreign.h"
@@ -1131,7 +1131,7 @@ static int amd7930_ioctl(struct inode * inode, struct file * file,
*
*/
-#if defined (AMD79C30_ISDN) || defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE > 0x200ff
+#if defined (AMD79C30_ISDN) && defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE > 0x200ff
static int amd7930_get_irqnum(int dev)
{
struct amd7930_info *info;
diff --git a/drivers/sbus/audio/dbri.c b/drivers/sbus/audio/dbri.c
index ad0d879b1..665bb4b31 100644
--- a/drivers/sbus/audio/dbri.c
+++ b/drivers/sbus/audio/dbri.c
@@ -1,4 +1,4 @@
-/* $Id: dbri.c,v 1.22 2000/10/27 07:01:38 uzi Exp $
+/* $Id: dbri.c,v 1.23 2001/01/08 04:19:16 davem Exp $
* drivers/sbus/audio/dbri.c
*
* Copyright (C) 1997 Rudolf Koenig (rfkoenig@immd4.informatik.uni-erlangen.de)
@@ -61,7 +61,7 @@
#include <asm/audioio.h>
#include "dbri.h"
-#if defined(DBRI_ISDN) || defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE > 0x200ff
+#if defined(DBRI_ISDN) && defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE > 0x200ff
#include "../../isdn/hisax/hisax.h"
#include "../../isdn/hisax/isdnl1.h"
#include "../../isdn/hisax/foreign.h"
@@ -2227,7 +2227,7 @@ void dbri_brecv(int dev, unsigned int chan,
recv_on_pipe(dbri, 8+chan, buffer, size, callback, callback_arg);
}
-#if defined(DBRI_ISDN) || defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE > 0x200ff
+#if defined(DBRI_ISDN) && defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE > 0x200ff
struct foreign_interface dbri_foreign_interface = {
dbri_get_irqnum,
dbri_get_liu_state,
diff --git a/drivers/sbus/char/Makefile b/drivers/sbus/char/Makefile
index b3a736db5..437f2d28c 100644
--- a/drivers/sbus/char/Makefile
+++ b/drivers/sbus/char/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_PCI) += su.o pcikbd.o
obj-$(CONFIG_SAB82532) += sab82532.o
obj-$(CONFIG_ENVCTRL) += envctrl.o
obj-$(CONFIG_DISPLAY7SEG) += display7seg.o
+obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwatchdog.o
obj-$(CONFIG_OBP_FLASH) += flash.o
obj-$(CONFIG_SUN_OPENPROMIO) += openprom.o
obj-$(CONFIG_SUN_MOSTEK_RTC) += rtc.o
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index c96a141a5..2be051503 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/ioport.h>
#include <linux/major.h>
@@ -432,6 +433,7 @@ static int terminate(unsigned minor)
return 0;
}
+static spinlock_t bpp_open_lock = SPIN_LOCK_UNLOCKED;
/*
* Allow only one process to open the device at a time.
@@ -439,13 +441,25 @@ static int terminate(unsigned minor)
static int bpp_open(struct inode *inode, struct file *f)
{
unsigned minor = MINOR(inode->i_rdev);
- if (minor >= BPP_NO) return -ENODEV;
- if (! instances[minor].present) return -ENODEV;
- if (instances[minor].opened) return -EBUSY;
-
- instances[minor].opened = 1;
+ int ret;
+
+ spin_lock(&bpp_open_lock);
+ ret = 0;
+ if (minor >= BPP_NO) {
+ ret = -ENODEV;
+ } else {
+ if (! instances[minor].present) {
+ ret = -ENODEV;
+ } else {
+ if (instances[minor].opened)
+ ret = -EBUSY;
+ else
+ instances[minor].opened = 1;
+ }
+ }
+ spin_unlock(&bpp_open_lock);
- return 0;
+ return ret;
}
/*
@@ -458,12 +472,14 @@ static int bpp_release(struct inode *inode, struct file *f)
{
unsigned minor = MINOR(inode->i_rdev);
- lock_kernel();
+ spin_lock(&bpp_open_lock);
instances[minor].opened = 0;
if (instances[minor].mode != COMPATIBILITY)
- terminate(minor);
- unlock_kernel();
+ terminate(minor);
+
+ spin_unlock(&bpp_open_lock);
+
return 0;
}
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c
new file mode 100644
index 000000000..c319b4d33
--- /dev/null
+++ b/drivers/sbus/char/cpwatchdog.c
@@ -0,0 +1,838 @@
+/* cpwatchdog.c - driver implementation for hardware watchdog
+ * timers found on Sun Microsystems CP1400 and CP1500 boards.
+ *
+ * This device supports both the generic Linux watchdog
+ * interface and Solaris-compatible ioctls as best it is
+ * able.
+ *
+ * NOTE: CP1400 systems appear to have a defective intr_mask
+ * register on the PLD, preventing the disabling of
+ * timer interrupts. We use a timer to periodically
+ * reset 'stopped' watchdogs on affected platforms.
+ *
+ * TODO: DevFS support (/dev/watchdogs/0 ... /dev/watchdogs/2)
+ *
+ * Copyright (c) 2000 Eric Brower (ebrower@usa.net)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/major.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/timer.h>
+#include <asm/irq.h>
+#include <asm/ebus.h>
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+
+#include <asm/watchdog.h>
+
+#define WD_OBPNAME "watchdog"
+#define WD_BADMODEL "SUNW,501-5336"
+#define WD_BTIMEOUT (jiffies + (HZ * 1000))
+#define WD_BLIMIT 0xFFFF
+
+#define WD0_DEVNAME "watchdog0"
+#define WD1_DEVNAME "watchdog1"
+#define WD2_DEVNAME "watchdog2"
+
+#define WD0_MINOR 212
+#define WD1_MINOR 213
+#define WD2_MINOR 214
+
+
+/* Internal driver definitions
+ */
+#define WD0_ID 0 /* Watchdog0 */
+#define WD1_ID 1 /* Watchdog1 */
+#define WD2_ID 2 /* Watchdog2 */
+#define WD_NUMDEVS 3 /* Device contains 3 timers */
+
+#define WD_INTR_OFF 0 /* Interrupt disable value */
+#define WD_INTR_ON 1 /* Interrupt enable value */
+
+#define WD_STAT_INIT 0x01 /* Watchdog timer is initialized */
+#define WD_STAT_BSTOP 0x02 /* Watchdog timer is brokenstopped */
+#define WD_STAT_SVCD 0x04 /* Watchdog interrupt occurred */
+
+/* Register value definitions
+ */
+#define WD0_INTR_MASK 0x01 /* Watchdog device interrupt masks */
+#define WD1_INTR_MASK 0x02
+#define WD2_INTR_MASK 0x04
+
+#define WD_S_RUNNING 0x01 /* Watchdog device status running */
+#define WD_S_EXPIRED 0x02 /* Watchdog device status expired */
+
+/* Sun uses Altera PLD EPF8820ATC144-4
+ * providing three hardware watchdogs:
+ *
+ * 1) RIC - sends an interrupt when triggered
+ * 2) XIR - asserts XIR_B_RESET when triggered, resets CPU
+ * 3) POR - asserts POR_B_RESET when triggered, resets CPU, backplane, board
+ *
+ *** Timer register block definition (struct wd_timer_regblk)
+ *
+ * dcntr and limit registers (halfword access):
+ * -------------------
+ * | 15 | ...| 1 | 0 |
+ * -------------------
+ * |- counter val -|
+ * -------------------
+ * dcntr - Current 16-bit downcounter value.
+ * When downcounter reaches '0' watchdog expires.
+ * Reading this register resets downcounter with 'limit' value.
+ * limit - 16-bit countdown value in 1/10th second increments.
+ * Writing this register begins countdown with input value.
+ * Reading from this register does not affect counter.
+ * NOTES: After watchdog reset, dcntr and limit contain '1'
+ *
+ * status register (byte access):
+ * ---------------------------
+ * | 7 | ... | 2 | 1 | 0 |
+ * --------------+------------
+ * |- UNUSED -| EXP | RUN |
+ * ---------------------------
+ * status- Bit 0 - Watchdog is running
+ * Bit 1 - Watchdog has expired
+ *
+ *** PLD register block definition (struct wd_pld_regblk)
+ *
+ * intr_mask register (byte access):
+ * ---------------------------------
+ * | 7 | ... | 3 | 2 | 1 | 0 |
+ * +-------------+------------------
+ * |- UNUSED -| WD3 | WD2 | WD1 |
+ * ---------------------------------
+ * WD3 - 1 == Interrupt disabled for watchdog 3
+ * WD2 - 1 == Interrupt disabled for watchdog 2
+ * WD1 - 1 == Interrupt disabled for watchdog 1
+ *
+ * pld_status register (byte access):
+ * UNKNOWN, MAGICAL MYSTERY REGISTER
+ *
+ */
+struct wd_timer_regblk {
+ volatile __u16 dcntr; /* down counter - hw */
+ volatile __u16 dcntr_pad;
+ volatile __u16 limit; /* limit register - hw */
+ volatile __u16 limit_pad;
+ volatile __u8 status; /* status register - b */
+ volatile __u8 status_pad;
+ volatile __u16 status_pad2;
+ volatile __u32 pad32; /* yet more padding */
+};
+
+struct wd_pld_regblk {
+ volatile __u8 intr_mask; /* interrupt mask - b */
+ volatile __u8 intr_mask_pad;
+ volatile __u16 intr_mask_pad2;
+ volatile __u8 status; /* device status - b */
+ volatile __u8 status_pad;
+ volatile __u16 status_pad2;
+};
+
+struct wd_regblk {
+ volatile struct wd_timer_regblk wd0_regs;
+ volatile struct wd_timer_regblk wd1_regs;
+ volatile struct wd_timer_regblk wd2_regs;
+ volatile struct wd_pld_regblk pld_regs;
+};
+
+/* Individual timer structure
+ */
+struct wd_timer {
+ __u16 timeout;
+ __u8 intr_mask;
+ unsigned char runstatus;
+ volatile struct wd_timer_regblk* regs;
+};
+
+/* Device structure
+ */
+struct wd_device {
+ int irq;
+ spinlock_t lock;
+ unsigned char isbaddoggie; /* defective PLD */
+ unsigned char opt_enable;
+ unsigned char opt_reboot;
+ unsigned short opt_timeout;
+ unsigned char initialized;
+ struct wd_timer watchdog[WD_NUMDEVS];
+ volatile struct wd_regblk* regs;
+};
+
+static struct wd_device wd_dev = {
+ 0, SPIN_LOCK_UNLOCKED, 0, 0, 0, 0,
+};
+
+struct timer_list wd_timer;
+
+static int wd0_timeout = 0;
+static int wd1_timeout = 0;
+static int wd2_timeout = 0;
+
+#ifdef MODULE
+EXPORT_NO_SYMBOLS;
+
+MODULE_PARM (wd0_timeout, "i");
+MODULE_PARM_DESC(wd0_timeout, "Default watchdog0 timeout in 1/10secs");
+MODULE_PARM (wd1_timeout, "i");
+MODULE_PARM_DESC(wd1_timeout, "Default watchdog1 timeout in 1/10secs");
+MODULE_PARM (wd2_timeout, "i");
+MODULE_PARM_DESC(wd2_timeout, "Default watchdog2 timeout in 1/10secs");
+
+MODULE_AUTHOR
+ ("Eric Brower <ebrower@usa.net>");
+MODULE_DESCRIPTION
+ ("Hardware watchdog driver for Sun Microsystems CP1400/1500");
+MODULE_SUPPORTED_DEVICE
+ ("watchdog");
+#endif /* ifdef MODULE */
+
+/* Forward declarations of internal methods
+ */
+void wd_dumpregs(void);
+void wd_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+void wd_toggleintr(struct wd_timer* pTimer, int enable);
+void wd_pingtimer(struct wd_timer* pTimer);
+void wd_starttimer(struct wd_timer* pTimer);
+void wd_resetbrokentimer(struct wd_timer* pTimer);
+void wd_stoptimer(struct wd_timer* pTimer);
+void wd_brokentimer(unsigned long data);
+int wd_getstatus(struct wd_timer* pTimer);
+
+/* PLD expects words to be written in LSB format,
+ * so we must flip all words prior to writing them to regs
+ */
+inline unsigned short flip_word(unsigned short word)
+{
+ return ((word & 0xff) << 8) | ((word >> 8) & 0xff);
+}
+
+#define wd_writew(val, addr) (writew(flip_word(val), addr))
+#define wd_readw(addr) (flip_word(readw(addr)))
+#define wd_writeb(val, addr) (writeb(val, addr))
+#define wd_readb(addr) (readb(addr))
+
+
+/* CP1400s seem to have broken PLD implementations--
+ * the interrupt_mask register cannot be written, so
+ * no timer interrupts can be masked within the PLD.
+ */
+static inline int wd_isbroken(void)
+{
+ /* we could test this by read/write/read/restore
+ * on the interrupt mask register only if OBP
+ * 'watchdog-enable?' == FALSE, but it seems
+ * ubiquitous on CP1400s
+ */
+ char val[32];
+ prom_getproperty(prom_root_node, "model", val, sizeof(val));
+ return((!strcmp(val, WD_BADMODEL)) ? 1 : 0);
+}
+
+/* Retrieve watchdog-enable? option from OBP
+ * Returns 0 if false, 1 if true
+ */
+static inline int wd_opt_enable(void)
+{
+ int opt_node;
+
+ opt_node = prom_getchild(prom_root_node);
+ opt_node = prom_searchsiblings(opt_node, "options");
+ return((-1 == prom_getint(opt_node, "watchdog-enable?")) ? 0 : 1);
+}
+
+/* Retrieve watchdog-reboot? option from OBP
+ * Returns 0 if false, 1 if true
+ */
+static inline int wd_opt_reboot(void)
+{
+ int opt_node;
+
+ opt_node = prom_getchild(prom_root_node);
+ opt_node = prom_searchsiblings(opt_node, "options");
+ return((-1 == prom_getint(opt_node, "watchdog-reboot?")) ? 0 : 1);
+}
+
+/* Retrieve watchdog-timeout option from OBP
+ * Returns OBP value, or 0 if not located
+ */
+static inline int wd_opt_timeout(void)
+{
+ int opt_node;
+ char value[32];
+ char *p = value;
+
+ opt_node = prom_getchild(prom_root_node);
+ opt_node = prom_searchsiblings(opt_node, "options");
+ opt_node = prom_getproperty(opt_node,
+ "watchdog-timeout",
+ value,
+ sizeof(value));
+ if(-1 != opt_node) {
+ /* atoi implementation */
+ for(opt_node = 0; /* nop */; p++) {
+ if(*p >= '0' && *p <= '9') {
+ opt_node = (10*opt_node)+(*p-'0');
+ }
+ else {
+ break;
+ }
+ }
+ }
+ return((-1 == opt_node) ? (0) : (opt_node));
+}
+
+static int wd_open(struct inode *inode, struct file *f)
+{
+ switch(MINOR(inode->i_rdev))
+ {
+ case WD0_MINOR:
+ f->private_data = &wd_dev.watchdog[WD0_ID];
+ break;
+ case WD1_MINOR:
+ f->private_data = &wd_dev.watchdog[WD1_ID];
+ break;
+ case WD2_MINOR:
+ f->private_data = &wd_dev.watchdog[WD2_ID];
+ break;
+ default:
+ return(-ENODEV);
+ }
+
+ /* Register IRQ on first open of device */
+ if(0 == wd_dev.initialized)
+ {
+ if (request_irq(wd_dev.irq,
+ &wd_interrupt,
+ SA_SHIRQ,
+ WD_OBPNAME,
+ (void *)wd_dev.regs)) {
+ printk("%s: Cannot register IRQ %s\n",
+ WD_OBPNAME, __irq_itoa(wd_dev.irq));
+ return(-EBUSY);
+ }
+ wd_dev.initialized = 1;
+ }
+
+ MOD_INC_USE_COUNT;
+ return(0);
+}
+
+static int wd_release(struct inode *inode, struct file *file)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static int wd_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int setopt = 0;
+ struct wd_timer* pTimer = (struct wd_timer*)file->private_data;
+ struct watchdog_info info = {
+ 0,
+ 0,
+ "Altera EPF8820ATC144-4"
+ };
+
+ if(NULL == pTimer) {
+ return(-EINVAL);
+ }
+
+ switch(cmd)
+ {
+ /* Generic Linux IOCTLs */
+ case WDIOC_GETSUPPORT:
+ if(copy_to_user((struct watchdog_info *)arg,
+ (struct watchdog_info *)&info,
+ sizeof(struct watchdog_info *))) {
+ return(-EFAULT);
+ }
+ break;
+ case WDIOC_KEEPALIVE:
+ wd_pingtimer(pTimer);
+ break;
+ case WDIOC_SETOPTIONS:
+ if(copy_from_user(&setopt, (void*) arg, sizeof(unsigned int))) {
+ return -EFAULT;
+ }
+ if(setopt & WDIOS_DISABLECARD) {
+ if(wd_dev.opt_enable) {
+ printk(
+ "%s: cannot disable watchdog in ENABLED mode\n",
+ WD_OBPNAME);
+ return(-EINVAL);
+ }
+ wd_stoptimer(pTimer);
+ }
+ else if(setopt & WDIOS_ENABLECARD) {
+ wd_starttimer(pTimer);
+ }
+ else {
+ return(-EINVAL);
+ }
+ break;
+ /* Solaris-compatible IOCTLs */
+ case WIOCGSTAT:
+ setopt = wd_getstatus(pTimer);
+ if(copy_to_user((void*)arg, &setopt, sizeof(unsigned int))) {
+ return(-EFAULT);
+ }
+ break;
+ case WIOCSTART:
+ wd_starttimer(pTimer);
+ break;
+ case WIOCSTOP:
+ if(wd_dev.opt_enable) {
+ printk("%s: cannot disable watchdog in ENABLED mode\n",
+ WD_OBPNAME);
+ return(-EINVAL);
+ }
+ wd_stoptimer(pTimer);
+ break;
+ default:
+ return(-EINVAL);
+ }
+ return(0);
+}
+
+static ssize_t wd_write( struct file *file,
+ const char *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct wd_timer* pTimer = (struct wd_timer*)file->private_data;
+
+ if(NULL == pTimer) {
+ return(-EINVAL);
+ }
+
+ wd_pingtimer(pTimer);
+ return(count);
+}
+
+static ssize_t wd_read(struct file * file, char * buffer,
+ size_t count, loff_t *ppos)
+{
+#ifdef WD_DEBUG
+ wd_dumpregs();
+ return(0);
+#else
+ return(-EINVAL);
+#endif /* ifdef WD_DEBUG */
+}
+
+void wd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* Only WD0 will interrupt-- others are NMI and we won't
+ * see them here....
+ */
+ spin_lock_irq(&wd_dev.lock);
+ if((unsigned long)wd_dev.regs == (unsigned long)dev_id)
+ {
+ wd_stoptimer(&wd_dev.watchdog[WD0_ID]);
+ wd_dev.watchdog[WD0_ID].runstatus |= WD_STAT_SVCD;
+ }
+ spin_unlock_irq(&wd_dev.lock);
+ return;
+}
+
+static struct file_operations wd_fops = {
+ owner: THIS_MODULE,
+ ioctl: wd_ioctl,
+ open: wd_open,
+ write: wd_write,
+ read: wd_read,
+ release: wd_release,
+};
+
+static struct miscdevice wd0_miscdev = { WD0_MINOR, WD0_DEVNAME, &wd_fops };
+static struct miscdevice wd1_miscdev = { WD1_MINOR, WD1_DEVNAME, &wd_fops };
+static struct miscdevice wd2_miscdev = { WD2_MINOR, WD2_DEVNAME, &wd_fops };
+
+void wd_dumpregs(void)
+{
+ /* Reading from downcounters initiates watchdog countdown--
+ * Example is included below for illustration purposes.
+ */
+ int i;
+ printk("%s: dumping register values\n", WD_OBPNAME);
+ for(i = WD0_ID; i < WD_NUMDEVS; ++i) {
+ /* printk("\t%s%i: dcntr at 0x%lx: 0x%x\n",
+ * WD_OBPNAME,
+ * i,
+ * (unsigned long)(&wd_dev.watchdog[i].regs->dcntr),
+ * readw(&wd_dev.watchdog[i].regs->dcntr));
+ */
+ printk("\t%s%i: limit at 0x%lx: 0x%x\n",
+ WD_OBPNAME,
+ i,
+ (unsigned long)(&wd_dev.watchdog[i].regs->limit),
+ readw(&wd_dev.watchdog[i].regs->limit));
+ printk("\t%s%i: status at 0x%lx: 0x%x\n",
+ WD_OBPNAME,
+ i,
+ (unsigned long)(&wd_dev.watchdog[i].regs->status),
+ readb(&wd_dev.watchdog[i].regs->status));
+ printk("\t%s%i: driver status: 0x%x\n",
+ WD_OBPNAME,
+ i,
+ wd_getstatus(&wd_dev.watchdog[i]));
+ }
+ printk("\tintr_mask at 0x%lx: 0x%x\n",
+ (unsigned long)(&wd_dev.regs->pld_regs.intr_mask),
+ readb(&wd_dev.regs->pld_regs.intr_mask));
+ printk("\tpld_status at 0x%lx: 0x%x\n",
+ (unsigned long)(&wd_dev.regs->pld_regs.status),
+ readb(&wd_dev.regs->pld_regs.status));
+}
+
+/* Enable or disable watchdog interrupts
+ * Because of the CP1400 defect this should only be
+ * called during initialzation or by wd_[start|stop]timer()
+ *
+ * pTimer - pointer to timer device, or NULL to indicate all timers
+ * enable - non-zero to enable interrupts, zero to disable
+ */
+void wd_toggleintr(struct wd_timer* pTimer, int enable)
+{
+ unsigned char curregs = wd_readb(&wd_dev.regs->pld_regs.intr_mask);
+ unsigned char setregs =
+ (NULL == pTimer) ?
+ (WD0_INTR_MASK | WD1_INTR_MASK | WD2_INTR_MASK) :
+ (pTimer->intr_mask);
+
+ (WD_INTR_ON == enable) ?
+ (curregs &= ~setregs):
+ (curregs |= setregs);
+
+ wd_writeb(curregs, &wd_dev.regs->pld_regs.intr_mask);
+ return;
+}
+
+/* Reset countdown timer with 'limit' value and continue countdown.
+ * This will not start a stopped timer.
+ *
+ * pTimer - pointer to timer device
+ */
+void wd_pingtimer(struct wd_timer* pTimer)
+{
+ if(wd_readb(&pTimer->regs->status) & WD_S_RUNNING) {
+ wd_readb(&pTimer->regs->dcntr);
+ }
+}
+
+/* Stop a running watchdog timer-- the timer actually keeps
+ * running, but the interrupt is masked so that no action is
+ * taken upon expiration.
+ *
+ * pTimer - pointer to timer device
+ */
+void wd_stoptimer(struct wd_timer* pTimer)
+{
+ if(wd_readb(&pTimer->regs->status) & WD_S_RUNNING) {
+ wd_toggleintr(pTimer, WD_INTR_OFF);
+
+ if(wd_dev.isbaddoggie) {
+ pTimer->runstatus |= WD_STAT_BSTOP;
+ wd_brokentimer((unsigned long)&wd_dev);
+ }
+ }
+}
+
+/* Start a watchdog timer with the specified limit value
+ * If the watchdog is running, it will be restarted with
+ * the provided limit value.
+ *
+ * This function will enable interrupts on the specified
+ * watchdog.
+ *
+ * pTimer - pointer to timer device
+ * limit - limit (countdown) value in 1/10th seconds
+ */
+void wd_starttimer(struct wd_timer* pTimer)
+{
+ if(wd_dev.isbaddoggie) {
+ pTimer->runstatus &= ~WD_STAT_BSTOP;
+ }
+ pTimer->runstatus &= ~WD_STAT_SVCD;
+
+ wd_writew(pTimer->timeout, &pTimer->regs->limit);
+ wd_toggleintr(pTimer, WD_INTR_ON);
+}
+
+/* Restarts timer with maximum limit value and
+ * does not unset 'brokenstop' value.
+ */
+void wd_resetbrokentimer(struct wd_timer* pTimer)
+{
+ wd_toggleintr(pTimer, WD_INTR_ON);
+ wd_writew(WD_BLIMIT, &pTimer->regs->limit);
+}
+
+/* Timer device initialization helper.
+ * Returns 0 on success, other on failure
+ */
+int wd_inittimer(int whichdog)
+{
+ struct miscdevice *whichmisc;
+ volatile struct wd_timer_regblk *whichregs;
+ char whichident[8];
+ int whichmask;
+ __u16 whichlimit;
+
+ switch(whichdog)
+ {
+ case WD0_ID:
+ whichmisc = &wd0_miscdev;
+ strcpy(whichident, "RIC");
+ whichregs = &wd_dev.regs->wd0_regs;
+ whichmask = WD0_INTR_MASK;
+ whichlimit= (0 == wd0_timeout) ?
+ (wd_dev.opt_timeout):
+ (wd0_timeout);
+ break;
+ case WD1_ID:
+ whichmisc = &wd1_miscdev;
+ strcpy(whichident, "XIR");
+ whichregs = &wd_dev.regs->wd1_regs;
+ whichmask = WD1_INTR_MASK;
+ whichlimit= (0 == wd1_timeout) ?
+ (wd_dev.opt_timeout):
+ (wd1_timeout);
+ break;
+ case WD2_ID:
+ whichmisc = &wd2_miscdev;
+ strcpy(whichident, "POR");
+ whichregs = &wd_dev.regs->wd2_regs;
+ whichmask = WD2_INTR_MASK;
+ whichlimit= (0 == wd2_timeout) ?
+ (wd_dev.opt_timeout):
+ (wd2_timeout);
+ break;
+ default:
+ printk("%s: %s: invalid watchdog id: %i\n",
+ WD_OBPNAME, __FUNCTION__, whichdog);
+ return(1);
+ }
+ if(0 != misc_register(whichmisc))
+ {
+ return(1);
+ }
+ wd_dev.watchdog[whichdog].regs = whichregs;
+ wd_dev.watchdog[whichdog].timeout = whichlimit;
+ wd_dev.watchdog[whichdog].intr_mask = whichmask;
+ wd_dev.watchdog[whichdog].runstatus &= ~WD_STAT_BSTOP;
+ wd_dev.watchdog[whichdog].runstatus |= WD_STAT_INIT;
+
+ printk("%s%i: %s hardware watchdog [%01i.%i sec] %s\n",
+ WD_OBPNAME,
+ whichdog,
+ whichident,
+ wd_dev.watchdog[whichdog].timeout / 10,
+ wd_dev.watchdog[whichdog].timeout % 10,
+ (0 != wd_dev.opt_enable) ? "in ENABLED mode" : "");
+ return(0);
+}
+
+/* Timer method called to reset stopped watchdogs--
+ * because of the PLD bug on CP1400, we cannot mask
+ * interrupts within the PLD so me must continually
+ * reset the timers ad infinitum.
+ */
+void wd_brokentimer(unsigned long data)
+{
+ struct wd_device* pDev = (struct wd_device*)data;
+ int id, tripped = 0;
+
+ /* kill a running timer instance, in case we
+ * were called directly instead of by kernel timer
+ */
+ if(timer_pending(&wd_timer)) {
+ del_timer(&wd_timer);
+ }
+
+ for(id = WD0_ID; id < WD_NUMDEVS; ++id) {
+ if(pDev->watchdog[id].runstatus & WD_STAT_BSTOP) {
+ ++tripped;
+ wd_resetbrokentimer(&pDev->watchdog[id]);
+ }
+ }
+
+ if(tripped) {
+ /* there is at least one timer brokenstopped-- reschedule */
+ wd_timer.expires = WD_BTIMEOUT;
+ add_timer(&wd_timer);
+ }
+}
+
+int wd_getstatus(struct wd_timer* pTimer)
+{
+ unsigned char stat = wd_readb(&pTimer->regs->status);
+ unsigned char intr = wd_readb(&wd_dev.regs->pld_regs.intr_mask);
+ unsigned char ret = WD_STOPPED;
+
+ /* determine STOPPED */
+ if(0 == stat ) {
+ return(ret);
+ }
+ /* determine EXPIRED vs FREERUN vs RUNNING */
+ else if(WD_S_EXPIRED & stat) {
+ ret = WD_EXPIRED;
+ }
+ else if(WD_S_RUNNING & stat) {
+ if(intr & pTimer->intr_mask) {
+ ret = WD_FREERUN;
+ }
+ else {
+ /* Fudge WD_EXPIRED status for defective CP1400--
+ * IF timer is running
+ * AND brokenstop is set
+ * AND an interrupt has been serviced
+ * we are WD_EXPIRED.
+ *
+ * IF timer is running
+ * AND brokenstop is set
+ * AND no interrupt has been serviced
+ * we are WD_FREERUN.
+ */
+ if(wd_dev.isbaddoggie && (pTimer->runstatus & WD_STAT_BSTOP)) {
+ if(pTimer->runstatus & WD_STAT_SVCD) {
+ ret = WD_EXPIRED;
+ }
+ else {
+ /* we could as well pretend we are expired */
+ ret = WD_FREERUN;
+ }
+ }
+ else {
+ ret = WD_RUNNING;
+ }
+ }
+ }
+
+ /* determine SERVICED */
+ if(pTimer->runstatus & WD_STAT_SVCD) {
+ ret |= WD_SERVICED;
+ }
+
+ return(ret);
+}
+
+static int __init wd_init(void)
+{
+ int id;
+ struct linux_ebus *ebus = NULL;
+ struct linux_ebus_device *edev = NULL;
+
+ for_each_ebus(ebus) {
+ for_each_ebusdev(edev, ebus) {
+ if (!strcmp(edev->prom_name, WD_OBPNAME))
+ goto ebus_done;
+ }
+ }
+
+ebus_done:
+ if(!edev) {
+ printk("%s: unable to locate device\n", WD_OBPNAME);
+ return -ENODEV;
+ }
+
+ wd_dev.regs =
+ ioremap(edev->resource[0].start, sizeof(struct wd_regblk));
+
+ if(NULL == wd_dev.regs) {
+ printk("%s: unable to map registers\n", WD_OBPNAME);
+ return(-ENODEV);
+ }
+
+ /* initialize device structure from OBP parameters */
+ wd_dev.irq = edev->irqs[0];
+ wd_dev.opt_enable = wd_opt_enable();
+ wd_dev.opt_reboot = wd_opt_reboot();
+ wd_dev.opt_timeout = wd_opt_timeout();
+ wd_dev.isbaddoggie = wd_isbroken();
+
+ /* disable all interrupts unless watchdog-enabled? == true */
+ if(! wd_dev.opt_enable) {
+ wd_toggleintr(NULL, WD_INTR_OFF);
+ }
+
+ /* register miscellaneous devices */
+ for(id = WD0_ID; id < WD_NUMDEVS; ++id) {
+ if(0 != wd_inittimer(id)) {
+ printk("%s%i: unable to initialize\n", WD_OBPNAME, id);
+ }
+ }
+
+ /* warn about possible defective PLD */
+ if(wd_dev.isbaddoggie) {
+ init_timer(&wd_timer);
+ wd_timer.function = wd_brokentimer;
+ wd_timer.data = (unsigned long)&wd_dev;
+ wd_timer.expires = WD_BTIMEOUT;
+
+ printk("%s: PLD defect workaround enabled for model %s\n",
+ WD_OBPNAME, WD_BADMODEL);
+ }
+ return(0);
+}
+
+static void __exit wd_cleanup(void)
+{
+ int id;
+
+ /* if 'watchdog-enable?' == TRUE, timers are not stopped
+ * when module is unloaded. All brokenstopped timers will
+ * also now eventually trip.
+ */
+ for(id = WD0_ID; id < WD_NUMDEVS; ++id) {
+ if(WD_S_RUNNING == wd_readb(&wd_dev.watchdog[id].regs->status)) {
+ if(wd_dev.opt_enable) {
+ printk(KERN_WARNING "%s%i: timer not stopped at release\n",
+ WD_OBPNAME, id);
+ }
+ else {
+ wd_stoptimer(&wd_dev.watchdog[id]);
+ if(wd_dev.watchdog[id].runstatus & WD_STAT_BSTOP) {
+ wd_resetbrokentimer(&wd_dev.watchdog[id]);
+ printk(KERN_WARNING
+ "%s%i: defect workaround disabled at release, "\
+ "timer expires in ~%01i sec\n",
+ WD_OBPNAME, id,
+ wd_readw(&wd_dev.watchdog[id].regs->limit) / 10);
+ }
+ }
+ }
+ }
+
+ if(wd_dev.isbaddoggie && timer_pending(&wd_timer)) {
+ del_timer(&wd_timer);
+ }
+ if(0 != (wd_dev.watchdog[WD0_ID].runstatus & WD_STAT_INIT)) {
+ misc_deregister(&wd0_miscdev);
+ }
+ if(0 != (wd_dev.watchdog[WD1_ID].runstatus & WD_STAT_INIT)) {
+ misc_deregister(&wd1_miscdev);
+ }
+ if(0 != (wd_dev.watchdog[WD2_ID].runstatus & WD_STAT_INIT)) {
+ misc_deregister(&wd2_miscdev);
+ }
+ if(0 != wd_dev.initialized) {
+ free_irq(wd_dev.irq, (void *)wd_dev.regs);
+ }
+ iounmap(wd_dev.regs);
+}
+
+module_init(wd_init);
+module_exit(wd_cleanup);
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 71809b59d..c6c927712 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -1,4 +1,4 @@
-/* $Id: flash.c,v 1.20 2000/11/08 04:57:49 davem Exp $
+/* $Id: flash.c,v 1.21 2001/01/11 15:29:36 davem Exp $
* flash.c: Allow mmap access to the OBP Flash, for OBP updates.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
@@ -14,6 +14,7 @@
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -22,6 +23,7 @@
#include <asm/sbus.h>
#include <asm/ebus.h>
+static spinlock_t flash_lock = SPIN_LOCK_UNLOCKED;
static struct {
unsigned long read_base; /* Physical read address */
unsigned long write_base; /* Physical write address */
@@ -38,14 +40,14 @@ flash_mmap(struct file *file, struct vm_area_struct *vma)
unsigned long addr;
unsigned long size;
- lock_kernel();
+ spin_lock(&flash_lock);
if (flash.read_base == flash.write_base) {
addr = flash.read_base;
size = flash.read_size;
} else {
if ((vma->vm_flags & VM_READ) &&
(vma->vm_flags & VM_WRITE)) {
- unlock_kernel();
+ spin_unlock(&flash_lock);
return -EINVAL;
}
if (vma->vm_flags & VM_READ) {
@@ -55,11 +57,11 @@ flash_mmap(struct file *file, struct vm_area_struct *vma)
addr = flash.write_base;
size = flash.write_size;
} else {
- unlock_kernel();
+ spin_unlock(&flash_lock);
return -ENXIO;
}
}
- unlock_kernel();
+ spin_unlock(&flash_lock);
if ((vma->vm_pgoff << PAGE_SHIFT) > size)
return -ENXIO;
@@ -127,9 +129,10 @@ flash_open(struct inode *inode, struct file *file)
static int
flash_release(struct inode *inode, struct file *file)
{
- lock_kernel();
+ spin_lock(&flash_lock);
flash.busy = 0;
- unlock_kernel();
+ spin_unlock(&flash_lock);
+
return 0;
}
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index f87850b2c..8faec19dd 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -505,9 +505,7 @@ static int jsfd_open(struct inode *inode, struct file *file)
static int jsf_release(struct inode *inode, struct file *file)
{
- lock_kernel();
jsf0.busy = 0;
- unlock_kernel();
return 0;
}
diff --git a/drivers/sbus/char/pcikbd.c b/drivers/sbus/char/pcikbd.c
index 6ddccf114..ea37949fa 100644
--- a/drivers/sbus/char/pcikbd.c
+++ b/drivers/sbus/char/pcikbd.c
@@ -1,4 +1,4 @@
-/* $Id: pcikbd.c,v 1.49 2000/07/13 08:06:40 davem Exp $
+/* $Id: pcikbd.c,v 1.50 2001/01/11 15:29:36 davem Exp $
* pcikbd.c: Ultra/AX PC keyboard support.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
@@ -746,13 +746,13 @@ static int aux_release(struct inode * inode, struct file * file)
{
unsigned long flags;
- lock_kernel();
aux_fasync(-1, file, 0);
- if (--aux_count)
- goto out;
spin_lock_irqsave(&pcikbd_lock, flags);
+ if (--aux_count)
+ goto out;
+
/* Disable controller ints */
aux_write_cmd(AUX_INTS_OFF);
poll_aux_status();
@@ -761,9 +761,8 @@ static int aux_release(struct inode * inode, struct file * file)
pcimouse_outb(KBD_CCMD_MOUSE_DISABLE, pcimouse_iobase + KBD_CNTL_REG);
poll_aux_status();
- spin_unlock_irqrestore(&pcikbd_lock, flags);
out:
- unlock_kernel();
+ spin_unlock_irqrestore(&pcikbd_lock, flags);
return 0;
}
@@ -780,11 +779,13 @@ static int aux_open(struct inode * inode, struct file * file)
if (!aux_present)
return -ENODEV;
- if (aux_count++)
- return 0;
-
spin_lock_irqsave(&pcikbd_lock, flags);
+ if (aux_count++) {
+ spin_unlock_irqrestore(&pcikbd_lock, flags);
+ return 0;
+ }
+
if (!poll_aux_status()) {
aux_count--;
spin_unlock_irqrestore(&pcikbd_lock, flags);
diff --git a/drivers/sbus/char/rtc.c b/drivers/sbus/char/rtc.c
index d8454cf25..6105e4229 100644
--- a/drivers/sbus/char/rtc.c
+++ b/drivers/sbus/char/rtc.c
@@ -1,4 +1,4 @@
-/* $Id: rtc.c,v 1.23 2000/08/29 07:01:55 davem Exp $
+/* $Id: rtc.c,v 1.24 2001/01/11 15:07:09 davem Exp $
*
* Linux/SPARC Real Time Clock Driver
* Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
@@ -31,11 +31,9 @@ static int rtc_busy = 0;
void get_rtc_time(struct rtc_time *t)
{
unsigned long regs = mstk48t02_regs;
- unsigned long flags;
u8 tmp;
- save_flags(flags);
- cli();
+ spin_lock_irq(&mostek_lock);
tmp = mostek_read(regs + MOSTEK_CREG);
tmp |= MSTK_CREG_READ;
@@ -52,18 +50,18 @@ void get_rtc_time(struct rtc_time *t)
tmp = mostek_read(regs + MOSTEK_CREG);
tmp &= ~MSTK_CREG_READ;
mostek_write(regs + MOSTEK_CREG, tmp);
- restore_flags(flags);
+
+ spin_unlock_irq(&mostek_lock);
}
/* Set the current date and time inthe real time clock. */
void set_rtc_time(struct rtc_time *t)
{
unsigned long regs = mstk48t02_regs;
- unsigned long flags;
u8 tmp;
- save_flags(flags);
- cli();
+ spin_lock_irq(&mostek_lock);
+
tmp = mostek_read(regs + MOSTEK_CREG);
tmp |= MSTK_CREG_WRITE;
mostek_write(regs + MOSTEK_CREG, tmp);
@@ -79,7 +77,8 @@ void set_rtc_time(struct rtc_time *t)
tmp = mostek_read(regs + MOSTEK_CREG);
tmp &= ~MSTK_CREG_WRITE;
mostek_write(regs + MOSTEK_CREG, tmp);
- restore_flags(flags);
+
+ spin_unlock_irq(&mostek_lock);
}
static long long rtc_lseek(struct file *file, long long offset, int origin)
@@ -121,20 +120,24 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
static int rtc_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ spin_lock_irq(&mostek_lock);
+ if (rtc_busy) {
+ ret = -EBUSY;
+ } else {
+ rtc_busy = 1;
+ ret = 0;
+ }
+ spin_unlock_irq(&mostek_lock);
- if (rtc_busy)
- return -EBUSY;
-
- rtc_busy = 1;
-
- return 0;
+ return ret;
}
static int rtc_release(struct inode *inode, struct file *file)
{
- lock_kernel();
rtc_busy = 0;
- unlock_kernel();
+
return 0;
}
@@ -150,11 +153,7 @@ static struct miscdevice rtc_dev = { RTC_MINOR, "rtc", &rtc_fops };
EXPORT_NO_SYMBOLS;
-#ifdef MODULE
-int init_module(void)
-#else
-int __init rtc_sun_init(void)
-#endif
+static int __init rtc_sun_init(void)
{
int error;
@@ -173,9 +172,10 @@ int __init rtc_sun_init(void)
return 0;
}
-#ifdef MODULE
-void cleanup_module(void)
+static void __exit rtc_sun_cleanup(void)
{
misc_deregister(&rtc_dev);
}
-#endif
+
+module_init(rtc_sun_init);
+module_exit(rtc_sun_cleanup);
diff --git a/drivers/sbus/char/sunkbd.c b/drivers/sbus/char/sunkbd.c
index f34c12250..467200699 100644
--- a/drivers/sbus/char/sunkbd.c
+++ b/drivers/sbus/char/sunkbd.c
@@ -1521,15 +1521,17 @@ kbd_ioctl (struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
static int
kbd_open (struct inode *i, struct file *f)
{
+ spin_lock_irq(&kbd_queue_lock);
kbd_active++;
if (kbd_opened)
- return 0;
+ goto out;
kbd_opened = fg_console + 1;
- spin_lock_irq(&kbd_queue_lock);
kbd_head = kbd_tail = 0;
+
+ out:
spin_unlock_irq(&kbd_queue_lock);
return 0;
@@ -1538,7 +1540,7 @@ kbd_open (struct inode *i, struct file *f)
static int
kbd_close (struct inode *i, struct file *f)
{
- lock_kernel();
+ spin_lock_irq(&kbd_queue_lock);
if (!--kbd_active) {
if (kbd_redirected)
kbd_table [kbd_redirected-1].kbdmode = VC_XLATE;
@@ -1546,7 +1548,8 @@ kbd_close (struct inode *i, struct file *f)
kbd_opened = 0;
kbd_fasync (-1, f, 0);
}
- unlock_kernel();
+ spin_unlock_irq(&kbd_queue_lock);
+
return 0;
}
diff --git a/drivers/sbus/char/sunmouse.c b/drivers/sbus/char/sunmouse.c
index fd92bc434..8bb45cbde 100644
--- a/drivers/sbus/char/sunmouse.c
+++ b/drivers/sbus/char/sunmouse.c
@@ -391,11 +391,14 @@ sun_mouse_inbyte(unsigned char byte, int is_break)
static int
sun_mouse_open(struct inode * inode, struct file * file)
{
+ spin_lock_irq(&sunmouse.lock);
if (sunmouse.active++)
- return 0;
+ goto out;
sunmouse.delta_x = sunmouse.delta_y = 0;
sunmouse.button_state = 0x80;
sunmouse.vuid_mode = VUID_NATIVE;
+out:
+ spin_unlock_irq(&sunmouse.lock);
return 0;
}
@@ -412,10 +415,12 @@ static int sun_mouse_fasync (int fd, struct file *filp, int on)
static int
sun_mouse_close(struct inode *inode, struct file *file)
{
- lock_kernel();
sun_mouse_fasync (-1, file, 0);
+
+ spin_lock_irq(&sunmouse.lock);
sunmouse.active--;
- unlock_kernel();
+ spin_unlock_irq(&sunmouse.lock);
+
return 0;
}
diff --git a/drivers/sbus/char/vfc_dev.c b/drivers/sbus/char/vfc_dev.c
index 73158f351..a4c0f347e 100644
--- a/drivers/sbus/char/vfc_dev.c
+++ b/drivers/sbus/char/vfc_dev.c
@@ -22,6 +22,7 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
@@ -181,17 +182,26 @@ struct vfc_dev *vfc_get_dev_ptr(int instance)
return vfc_dev_lst[instance];
}
+static spinlock_t vfc_dev_lock = SPIN_LOCK_UNLOCKED;
+
static int vfc_open(struct inode *inode, struct file *file)
{
struct vfc_dev *dev;
+ spin_lock(&vfc_dev_lock);
dev = vfc_get_dev_ptr(MINOR(inode->i_rdev));
- if (dev == NULL)
+ if (dev == NULL) {
+ spin_unlock(&vfc_dev_lock);
return -ENODEV;
- if (dev->busy)
+ }
+ if (dev->busy) {
+ spin_unlock(&vfc_dev_lock);
return -EBUSY;
+ }
dev->busy = 1;
+ spin_unlock(&vfc_dev_lock);
+
vfc_lock_device(dev);
vfc_csr_init(dev);
@@ -209,14 +219,14 @@ static int vfc_release(struct inode *inode,struct file *file)
{
struct vfc_dev *dev;
- lock_kernel();
+ spin_lock(&vfc_dev_lock);
dev = vfc_get_dev_ptr(MINOR(inode->i_rdev));
if (!dev || !dev->busy) {
- unlock_kernel();
+ spin_unlock(&vfc_dev_lock);
return -EINVAL;
}
dev->busy = 0;
- unlock_kernel();
+ spin_unlock(&vfc_dev_lock);
return 0;
}
@@ -611,12 +621,10 @@ static int vfc_mmap(struct inode *inode, struct file *file,
unsigned int map_size, ret, map_offset;
struct vfc_dev *dev;
- lock_kernel();
dev = vfc_get_dev_ptr(MINOR(inode->i_rdev));
- if(dev == NULL) {
- unlock_kernel();
+ if(dev == NULL)
return -ENODEV;
- }
+
map_size = vma->vm_end - vma->vm_start;
if(map_size > sizeof(struct vfc_regs))
map_size = sizeof(struct vfc_regs);
@@ -626,7 +634,7 @@ static int vfc_mmap(struct inode *inode, struct file *file,
map_offset = (unsigned int) (long)dev->phys_regs;
ret = io_remap_page_range(vma->vm_start, map_offset, map_size,
vma->vm_page_prot, dev->which_io);
- unlock_kernel();
+
if(ret)
return -EAGAIN;
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 942a6c59b..032776c70 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -1,4 +1,4 @@
-/* $Id: sbus.c,v 1.91 2000/11/08 05:04:06 davem Exp $
+/* $Id: sbus.c,v 1.92 2001/01/25 17:15:59 davem Exp $
* sbus.c: SBus support routines.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -238,10 +238,14 @@ static void __init __apply_ranges_to_regs(struct linux_prom_ranges *ranges,
break;
}
if (rngnum == num_ranges) {
- prom_printf("sbus_apply_ranges: Cannot find matching "
- "range nregs[%d] nranges[%d].\n",
- num_regs, num_ranges);
- prom_halt();
+ /* We used to flag this as an error. Actually
+ * some devices do not report the regs as we expect.
+ * For example, see SUNW,pln device. In that case
+ * the reg property is in a format internal to that
+ * node, ie. it is not in the SBUS register space
+ * per se. -DaveM
+ */
+ return;
}
regs[regnum].which_io = ranges[rngnum].ot_parent_space;
regs[regnum].phys_addr += ranges[rngnum].ot_parent_base;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 5a74bc2df..73bd8029e 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -776,7 +776,7 @@ void print_sense_internal(const char * devclass,
printk("%s%s: sns = %2x %2x\n", devclass,
kdevname(dev), sense_buffer[0], sense_buffer[2]);
- printk("Non-extended sense class %d code 0x%0x ", sense_class, code);
+ printk("Non-extended sense class %d code 0x%0x\n", sense_class, code);
s = 4;
}
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index f966bf5a7..0e5ee6935 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -16,6 +16,7 @@
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
#error "This driver works only with kernel 2.4.0 or higher!"
#endif
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ctype.h>
@@ -38,7 +39,7 @@
#include <linux/config.h>
/* current version of this driver-source: */
-#define IBMMCA_SCSI_DRIVER_VERSION "4.0a"
+#define IBMMCA_SCSI_DRIVER_VERSION "4.0b"
#define IBMLOCK spin_lock_irqsave(&io_request_lock, flags);
#define IBMUNLOCK spin_unlock_irqrestore(&io_request_lock, flags);
@@ -443,7 +444,6 @@ static int scsi_id[IM_MAX_HOSTS] = { 7, 7, 7, 7, 7, 7, 7, 7 };
(that is kernel version 2.1.x) */
#if defined(MODULE)
static char *boot_options = NULL;
-#include <linux/module.h>
MODULE_PARM(boot_options, "s");
MODULE_PARM(io_port, "1-" __MODULE_STRING(IM_MAX_HOSTS) "i");
MODULE_PARM(scsi_id, "1-" __MODULE_STRING(IM_MAX_HOSTS) "i");
@@ -2563,9 +2563,6 @@ static int option_setup(char *str)
__setup("ibmmcascsi=", option_setup);
-#ifdef MODULE
-/* Eventually this will go into an include file, but this will be later */
-Scsi_Host_Template driver_template = IBMMCA;
+static Scsi_Host_Template driver_template = IBMMCA;
#include "scsi_module.c"
-#endif
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 264ae2fe0..106e39b8a 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -149,7 +149,6 @@
#include <linux/version.h>
#ifdef MODULE
-#include <linux/modversions.h>
#include <linux/module.h>
char kernel_version[] = UTS_RELEASE;
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index d82eed0b7..fc0af0cbf 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -222,8 +222,8 @@ int ppa_detect(Scsi_Host_Template * host)
printk(" supported by the imm (ZIP Plus) driver. If the\n");
printk(" cable is marked with \"AutoDetect\", this is what has\n");
printk(" happened.\n");
- return 0;
spin_lock_irq(&io_request_lock);
+ return 0;
}
try_again = 1;
goto retry_entry;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ace7c7e2c..dcde48437 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -50,6 +50,50 @@
* This entire source file deals with the new queueing code.
*/
+/*
+ * Function: __scsi_insert_special()
+ *
+ * Purpose: worker for scsi_insert_special_*()
+ *
+ * Arguments: q - request queue where request should be inserted
+ * rq - request to be inserted
+ * data - private data
+ * at_head - insert request at head or tail of queue
+ *
+ * Lock status: Assumed that io_request_lock is not held upon entry.
+ *
+ * Returns: Nothing
+ */
+static void __scsi_insert_special(request_queue_t *q, struct request *rq,
+ void *data, int at_head)
+{
+ unsigned long flags;
+
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ rq->cmd = SPECIAL;
+ rq->special = data;
+ rq->q = NULL;
+ rq->nr_segments = 0;
+ rq->elevator_sequence = 0;
+
+ /*
+ * We have the option of inserting the head or the tail of the queue.
+ * Typically we use the tail for new ioctls and so forth. We use the
+ * head of the queue for things like a QUEUE_FULL message from a
+ * device, or a host that is unable to accept a particular command.
+ */
+ spin_lock_irqsave(&io_request_lock, flags);
+
+ if (at_head)
+ list_add(&rq->queue, &q->queue_head);
+ else
+ list_add_tail(&rq->queue, &q->queue_head);
+
+ q->request_fn(q);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+}
+
/*
* Function: scsi_insert_special_cmd()
@@ -73,52 +117,9 @@
*/
int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
{
- unsigned long flags;
- request_queue_t *q;
-
- ASSERT_LOCK(&io_request_lock, 0);
-
- /*
- * The SCpnt already contains a request structure - we will doctor the
- * thing up with the appropriate values and use that in the actual
- * request queue.
- */
- q = &SCpnt->device->request_queue;
- SCpnt->request.cmd = SPECIAL;
- SCpnt->request.special = (void *) SCpnt;
- SCpnt->request.q = NULL;
- SCpnt->request.free_list = NULL;
- SCpnt->request.nr_segments = 0;
-
- /*
- * We have the option of inserting the head or the tail of the queue.
- * Typically we use the tail for new ioctls and so forth. We use the
- * head of the queue for things like a QUEUE_FULL message from a
- * device, or a host that is unable to accept a particular command.
- */
- spin_lock_irqsave(&io_request_lock, flags);
-
- if (at_head) {
- list_add(&SCpnt->request.queue, &q->queue_head);
- } else {
- /*
- * FIXME(eric) - we always insert at the tail of the
- * list. Otherwise ioctl commands would always take
- * precedence over normal I/O. An ioctl on a busy
- * disk might be delayed indefinitely because the
- * request might not float high enough in the queue
- * to be scheduled.
- */
- list_add_tail(&SCpnt->request.queue, &q->queue_head);
- }
+ request_queue_t *q = &SCpnt->device->request_queue;
- /*
- * Now hit the requeue function for the queue. If the host is
- * already busy, so be it - we have nothing special to do. If
- * the host can queue it, then send it off.
- */
- q->request_fn(q);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ __scsi_insert_special(q, &SCpnt->request, SCpnt, at_head);
return 0;
}
@@ -144,51 +145,9 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
*/
int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
{
- unsigned long flags;
- request_queue_t *q;
-
- ASSERT_LOCK(&io_request_lock, 0);
-
- /*
- * The SCpnt already contains a request structure - we will doctor the
- * thing up with the appropriate values and use that in the actual
- * request queue.
- */
- q = &SRpnt->sr_device->request_queue;
- SRpnt->sr_request.cmd = SPECIAL;
- SRpnt->sr_request.special = (void *) SRpnt;
- SRpnt->sr_request.q = NULL;
- SRpnt->sr_request.nr_segments = 0;
-
- /*
- * We have the option of inserting the head or the tail of the queue.
- * Typically we use the tail for new ioctls and so forth. We use the
- * head of the queue for things like a QUEUE_FULL message from a
- * device, or a host that is unable to accept a particular command.
- */
- spin_lock_irqsave(&io_request_lock, flags);
+ request_queue_t *q = &SRpnt->sr_device->request_queue;
- if (at_head) {
- list_add(&SRpnt->sr_request.queue, &q->queue_head);
- } else {
- /*
- * FIXME(eric) - we always insert at the tail of the
- * list. Otherwise ioctl commands would always take
- * precedence over normal I/O. An ioctl on a busy
- * disk might be delayed indefinitely because the
- * request might not float high enough in the queue
- * to be scheduled.
- */
- list_add_tail(&SRpnt->sr_request.queue, &q->queue_head);
- }
-
- /*
- * Now hit the requeue function for the queue. If the host is
- * already busy, so be it - we have nothing special to do. If
- * the host can queue it, then send it off.
- */
- q->request_fn(q);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ __scsi_insert_special(q, &SRpnt->sr_request, SRpnt, at_head);
return 0;
}
@@ -403,6 +362,7 @@ static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
struct request *req;
struct buffer_head *bh;
Scsi_Device * SDpnt;
+ int nsect;
ASSERT_LOCK(&io_request_lock, 0);
@@ -414,11 +374,13 @@ static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
}
do {
if ((bh = req->bh) != NULL) {
+ nsect = bh->b_size >> 9;
+ blk_finished_io(nsect);
req->bh = bh->b_reqnext;
- req->nr_sectors -= bh->b_size >> 9;
- req->sector += bh->b_size >> 9;
+ req->nr_sectors -= nsect;
+ req->sector += nsect;
bh->b_reqnext = NULL;
- sectors -= bh->b_size >> 9;
+ sectors -= nsect;
bh->b_end_io(bh, uptodate);
if ((bh = req->bh) != NULL) {
req->current_nr_sectors = bh->b_size >> 9;
@@ -863,17 +825,6 @@ void scsi_request_fn(request_queue_t * q)
SHpnt = SDpnt->host;
/*
- * If the host for this device is in error recovery mode, don't
- * do anything at all here. When the host leaves error recovery
- * mode, it will automatically restart things and start queueing
- * commands again. Same goes if the queue is actually plugged,
- * if the device itself is blocked, or if the host is fully
- * occupied.
- */
- if (SHpnt->in_recovery || q->plugged)
- return;
-
- /*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
*/
@@ -896,10 +847,11 @@ void scsi_request_fn(request_queue_t * q)
|| (SHpnt->host_blocked)
|| (SHpnt->host_self_blocked)) {
/*
- * If we are unable to process any commands at all for this
- * device, then we consider it to be starved. What this means
- * is that there are no outstanding commands for this device
- * and hence we need a little help getting it started again
+ * If we are unable to process any commands at all for
+ * this device, then we consider it to be starved.
+ * What this means is that there are no outstanding
+ * commands for this device and hence we need a
+ * little help getting it started again
* once the host isn't quite so busy.
*/
if (SDpnt->device_busy == 0) {
@@ -1000,8 +952,8 @@ void scsi_request_fn(request_queue_t * q)
}
/*
* If so, we are ready to do something. Bump the count
- * while the queue is locked and then break out of the loop.
- * Otherwise loop around and try another request.
+ * while the queue is locked and then break out of the
+ * loop. Otherwise loop around and try another request.
*/
if (!SCpnt) {
break;
@@ -1029,8 +981,9 @@ void scsi_request_fn(request_queue_t * q)
memcpy(&SCpnt->request, req, sizeof(struct request));
/*
- * We have copied the data out of the request block - it is now in
- * a field in SCpnt. Release the request block.
+ * We have copied the data out of the request block -
+ * it is now in a field in SCpnt. Release the request
+ * block.
*/
blkdev_release_request(req);
}
@@ -1047,12 +1000,14 @@ void scsi_request_fn(request_queue_t * q)
/*
* This will do a couple of things:
* 1) Fill in the actual SCSI command.
- * 2) Fill in any other upper-level specific fields (timeout).
+ * 2) Fill in any other upper-level specific fields
+ * (timeout).
*
- * If this returns 0, it means that the request failed (reading
- * past end of disk, reading offline device, etc). This won't
- * actually talk to the device, but some kinds of consistency
- * checking may cause the request to be rejected immediately.
+ * If this returns 0, it means that the request failed
+ * (reading past end of disk, reading offline device,
+ * etc). This won't actually talk to the device, but
+ * some kinds of consistency checking may cause the
+ * request to be rejected immediately.
*/
if (STpnt == NULL) {
STpnt = scsi_get_request_dev(req);
@@ -1103,8 +1058,8 @@ void scsi_request_fn(request_queue_t * q)
scsi_dispatch_cmd(SCpnt);
/*
- * Now we need to grab the lock again. We are about to mess with
- * the request queue and try to find another command.
+ * Now we need to grab the lock again. We are about to mess
+ * with the request queue and try to find another command.
*/
spin_lock_irq(&io_request_lock);
}
diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c
index 7483eb970..4ebadd868 100644
--- a/drivers/scsi/scsi_merge.c
+++ b/drivers/scsi/scsi_merge.c
@@ -324,7 +324,6 @@ static inline int scsi_new_mergeable(request_queue_t * q,
req->nr_segments >= SHpnt->sg_tablesize)
return 0;
req->nr_segments++;
- q->elevator.nr_segments++;
return 1;
}
@@ -341,11 +340,8 @@ static inline int scsi_new_segment(request_queue_t * q,
if (req->nr_hw_segments >= SHpnt->sg_tablesize ||
req->nr_segments >= SHpnt->sg_tablesize)
return 0;
- if (req->nr_segments >= max_segments)
- return 0;
req->nr_hw_segments++;
req->nr_segments++;
- q->elevator.nr_segments++;
return 1;
}
#else
@@ -361,7 +357,6 @@ static inline int scsi_new_segment(request_queue_t * q,
* counter.
*/
req->nr_segments++;
- q->elevator.nr_segments++;
return 1;
} else {
return 0;
@@ -417,8 +412,10 @@ __inline static int __scsi_back_merge_fn(request_queue_t * q,
SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host;
+#ifdef DMA_CHUNK_SIZE
if (max_segments > 64)
max_segments = 64;
+#endif
if (use_clustering) {
/*
@@ -471,8 +468,10 @@ __inline static int __scsi_front_merge_fn(request_queue_t * q,
SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host;
+#ifdef DMA_CHUNK_SIZE
if (max_segments > 64)
max_segments = 64;
+#endif
if (use_clustering) {
/*
@@ -601,10 +600,10 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
SDpnt = (Scsi_Device *) q->queuedata;
SHpnt = SDpnt->host;
+#ifdef DMA_CHUNK_SIZE
if (max_segments > 64)
max_segments = 64;
-#ifdef DMA_CHUNK_SIZE
/* If it would not fit into prepared memory space for sg chain,
* then don't allow the merge.
*/
@@ -664,7 +663,6 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
* This one is OK. Let it go.
*/
req->nr_segments += next->nr_segments - 1;
- q->elevator.nr_segments--;
#ifdef DMA_CHUNK_SIZE
req->nr_hw_segments += next->nr_hw_segments - 1;
#endif
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 04e4a6a28..c064dfde4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -694,6 +694,7 @@ static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
(void *)SRpnt->sr_buffer, hp->dxfer_len,
sg_cmd_done_bh, timeout, SG_DEFAULT_RETRIES);
/* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
+ generic_unplug_device(&SRpnt->sr_device->request_queue);
return 0;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 71ec104f4..7a2cde23b 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -671,12 +671,14 @@ void get_capabilities(int i)
cmd[3] = cmd[5] = 0;
rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL);
- if (-EINVAL == rc) {
- /* failed, drive has'nt this mode page */
+ if (rc) {
+ /* failed, drive doesn't have capabilities mode page */
scsi_CDs[i].cdi.speed = 1;
- /* disable speed select, drive probably can't do this either */
- scsi_CDs[i].cdi.mask |= CDC_SELECT_SPEED;
+ scsi_CDs[i].cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
+ CDC_DVD | CDC_DVD_RAM |
+ CDC_SELECT_DISC | CDC_SELECT_SPEED);
scsi_free(buffer, 512);
+ printk("sr%i: scsi-1 drive\n", i);
return;
}
n = buffer[3] + 4;
diff --git a/drivers/sound/Config.in b/drivers/sound/Config.in
index 9fd35efcb..10235dd2e 100644
--- a/drivers/sound/Config.in
+++ b/drivers/sound/Config.in
@@ -142,9 +142,9 @@ if [ "$CONFIG_SOUND_OSS" = "y" -o "$CONFIG_SOUND_OSS" = "m" ]; then
dep_tristate ' Yamaha FM synthesizer (YM3812/OPL-3) support' CONFIG_SOUND_YM3812 $CONFIG_SOUND_OSS
dep_tristate ' Yamaha OPL3-SA1 audio controller' CONFIG_SOUND_OPL3SA1 $CONFIG_SOUND_OSS
dep_tristate ' Yamaha OPL3-SA2, SA3, and SAx based PnP cards' CONFIG_SOUND_OPL3SA2 $CONFIG_SOUND_OSS
- dep_tristate ' Yamaha YMF7xx PCI audio (legacy mode)' CONFIG_SOUND_YMPCI $CONFIG_SOUND_OSS $CONFIG_PCI
- if [ "$CONFIG_SOUND_YMPCI" = "n" ]; then
- dep_tristate ' Yamaha YMF7xx PCI audio (native mode) (EXPERIMENTAL)' CONFIG_SOUND_YMFPCI $CONFIG_SOUND_OSS $CONFIG_PCI $CONFIG_EXPERIMENTAL
+ dep_tristate ' Yamaha YMF7xx PCI audio (native mode)' CONFIG_SOUND_YMFPCI $CONFIG_SOUND_OSS $CONFIG_PCI
+ if [ "$CONFIG_SOUND_YMFPCI" != "n" ]; then
+ bool ' Yamaha PCI legacy ports support' CONFIG_SOUND_YMFPCI_LEGACY
fi
dep_tristate ' 6850 UART support' CONFIG_SOUND_UART6850 $CONFIG_SOUND_OSS
diff --git a/drivers/sound/Makefile b/drivers/sound/Makefile
index e7ee317e6..824f08ec8 100644
--- a/drivers/sound/Makefile
+++ b/drivers/sound/Makefile
@@ -47,8 +47,10 @@ obj-$(CONFIG_SOUND_ACI_MIXER) += aci.o
obj-$(CONFIG_SOUND_AWE32_SYNTH) += awe_wave.o
obj-$(CONFIG_SOUND_VIA82CXXX) += via82cxxx_audio.o ac97_codec.o
-obj-$(CONFIG_SOUND_YMPCI) += ymf_sb.o sb_lib.o uart401.o
obj-$(CONFIG_SOUND_YMFPCI) += ymfpci.o ac97_codec.o
+ifeq ($(CONFIG_SOUND_YMFPCI_LEGACY),y)
+ obj-$(CONFIG_SOUND_YMFPCI) += opl3.o uart401.o
+endif
obj-$(CONFIG_SOUND_MSNDCLAS) += msnd.o msnd_classic.o
obj-$(CONFIG_SOUND_MSNDPIN) += msnd.o msnd_pinnacle.o
obj-$(CONFIG_SOUND_VWSND) += vwsnd.o
diff --git a/drivers/sound/emu10k1/audio.c b/drivers/sound/emu10k1/audio.c
index 9623dcb2e..2280ca236 100644
--- a/drivers/sound/emu10k1/audio.c
+++ b/drivers/sound/emu10k1/audio.c
@@ -375,8 +375,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = wiinst->format;
format.samplingrate = val;
- if (emu10k1_wavein_setformat(wave_dev, &format) < 0)
+ if (emu10k1_wavein_setformat(wave_dev, &format) < 0) {
+ spin_unlock_irqrestore(&wiinst->lock, flags);
return -EINVAL;
+ }
val = wiinst->format.samplingrate;
@@ -393,8 +395,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = woinst->format;
format.samplingrate = val;
- if (emu10k1_waveout_setformat(wave_dev, &format) < 0)
+ if (emu10k1_waveout_setformat(wave_dev, &format) < 0) {
+ spin_unlock_irqrestore(&woinst->lock, flags);
return -EINVAL;
+ }
val = woinst->format.samplingrate;
@@ -430,8 +434,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = wiinst->format;
format.channels = val ? 2 : 1;
- if (emu10k1_wavein_setformat(wave_dev, &format) < 0)
+ if (emu10k1_wavein_setformat(wave_dev, &format) < 0) {
+ spin_unlock_irqrestore(&wiinst->lock, flags);
return -EINVAL;
+ }
val = wiinst->format.channels - 1;
@@ -447,8 +453,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = woinst->format;
format.channels = val ? 2 : 1;
- if (emu10k1_waveout_setformat(wave_dev, &format) < 0)
+ if (emu10k1_waveout_setformat(wave_dev, &format) < 0) {
+ spin_unlock_irqrestore(&woinst->lock, flags);
return -EINVAL;
+ }
val = woinst->format.channels - 1;
@@ -478,8 +486,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = wiinst->format;
format.channels = val;
- if (emu10k1_wavein_setformat(wave_dev, &format) < 0)
+ if (emu10k1_wavein_setformat(wave_dev, &format) < 0) {
+ spin_unlock_irqrestore(&wiinst->lock, flags);
return -EINVAL;
+ }
val = wiinst->format.channels;
@@ -495,8 +505,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = woinst->format;
format.channels = val;
- if (emu10k1_waveout_setformat(wave_dev, &format) < 0)
+ if (emu10k1_waveout_setformat(wave_dev, &format) < 0) {
+ spin_unlock_irqrestore(&woinst->lock, flags);
return -EINVAL;
+ }
val = woinst->format.channels;
@@ -542,8 +554,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = wiinst->format;
format.bitsperchannel = val;
- if (emu10k1_wavein_setformat(wave_dev, &format) < 0)
+ if (emu10k1_wavein_setformat(wave_dev, &format) < 0) {
+ spin_unlock_irqrestore(&wiinst->lock, flags);
return -EINVAL;
+ }
val = wiinst->format.bitsperchannel;
@@ -559,8 +573,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = woinst->format;
format.bitsperchannel = val;
- if (emu10k1_waveout_setformat(wave_dev, &format) < 0)
+ if (emu10k1_waveout_setformat(wave_dev, &format) < 0) {
+ spin_unlock_irqrestore(&woinst->lock, flags);
return -EINVAL;
+ }
val = woinst->format.bitsperchannel;
@@ -968,6 +984,7 @@ static int emu10k1_audio_mmap(struct file *file, struct vm_area_struct *vma)
for (i = 0; i < woinst->buffer.pages; i++) {
if (remap_page_range(vma->vm_start + (i * PAGE_SIZE), virt_to_phys(woinst->buffer.addr[i]), PAGE_SIZE, vma->vm_page_prot)) {
spin_unlock_irqrestore(&woinst->lock, flags);
+ unlock_kernel();
return -EAGAIN;
}
}
diff --git a/drivers/sound/trix.c b/drivers/sound/trix.c
index 73c9dcf54..03b4b1999 100644
--- a/drivers/sound/trix.c
+++ b/drivers/sound/trix.c
@@ -17,7 +17,6 @@
* Arnaldo C. de Melo Got rid of attach_uart401
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/sound/via82cxxx_audio.c b/drivers/sound/via82cxxx_audio.c
index f609d1e2b..fbe02bffd 100644
--- a/drivers/sound/via82cxxx_audio.c
+++ b/drivers/sound/via82cxxx_audio.c
@@ -15,7 +15,7 @@
*/
-#define VIA_VERSION "1.1.14"
+#define VIA_VERSION "1.1.14a"
#include <linux/config.h>
@@ -76,8 +76,16 @@
#define VIA_COUNTER_LIMIT 100000
/* size of DMA buffers */
-#define VIA_DMA_BUFFERS 16
-#define VIA_DMA_BUF_SIZE PAGE_SIZE
+#define VIA_MAX_BUFFER_DMA_PAGES 32
+
+/* buffering default values in ms */
+#define VIA_DEFAULT_FRAG_TIME 20
+#define VIA_DEFAULT_BUFFER_TIME 500
+
+#define VIA_MAX_FRAG_SIZE PAGE_SIZE
+#define VIA_MIN_FRAG_SIZE 64
+
+#define VIA_MIN_FRAG_NUMBER 2
#ifndef AC97_PCM_LR_ADC_RATE
# define AC97_PCM_LR_ADC_RATE AC97_PCM_LR_DAC_RATE
@@ -102,7 +110,6 @@
#define VIA_BASE0_PCM_OUT_CHAN_STATUS 0x00
#define VIA_BASE0_PCM_OUT_CHAN_CTRL 0x01
#define VIA_BASE0_PCM_OUT_CHAN_TYPE 0x02
-#define VIA_BASE0_PCM_OUT_BLOCK_COUNT 0x0C
#define VIA_BASE0_PCM_IN_CHAN 0x10 /* input PCM from user */
#define VIA_BASE0_PCM_IN_CHAN_STATUS 0x10
@@ -114,6 +121,7 @@
#define VIA_PCM_CONTROL 0x01
#define VIA_PCM_TYPE 0x02
#define VIA_PCM_TABLE_ADDR 0x04
+#define VIA_PCM_BLOCK_COUNT 0x0C
/* XXX unused DMA channel for FM PCM data */
#define VIA_BASE0_FM_OUT_CHAN 0x20
@@ -223,14 +231,14 @@ enum via_channel_states {
};
-struct via_sgd_data {
+struct via_buffer_pgtbl {
dma_addr_t handle;
void *cpuaddr;
};
struct via_channel {
- atomic_t n_bufs;
+ atomic_t n_frags;
atomic_t hw_ptr;
wait_queue_head_t wait;
@@ -246,11 +254,14 @@ struct via_channel {
u8 pcm_fmt; /* VIA_PCM_FMT_xxx */
unsigned rate; /* sample rate */
+ unsigned int frag_size;
+ unsigned int frag_number;
volatile struct via_sgd_table *sgtable;
dma_addr_t sgt_handle;
- struct via_sgd_data sgbuf [VIA_DMA_BUFFERS];
+ unsigned int page_number;
+ struct via_buffer_pgtbl pgtbl[VIA_MAX_BUFFER_DMA_PAGES];
long iobase;
@@ -301,17 +312,16 @@ static unsigned int via_dsp_poll(struct file *file, struct poll_table_struct *wa
static int via_dsp_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
static int via_dsp_open (struct inode *inode, struct file *file);
static int via_dsp_release(struct inode *inode, struct file *file);
-#ifdef VIA_SUPPORT_MMAP
static int via_dsp_mmap(struct file *file, struct vm_area_struct *vma);
-#endif
static u16 via_ac97_read_reg (struct ac97_codec *codec, u8 reg);
static void via_ac97_write_reg (struct ac97_codec *codec, u8 reg, u16 value);
static u8 via_ac97_wait_idle (struct via_info *card);
static void via_chan_free (struct via_info *card, struct via_channel *chan);
-static void via_chan_clear (struct via_channel *chan);
+static void via_chan_clear (struct via_info *card, struct via_channel *chan);
static void via_chan_pcm_fmt (struct via_channel *chan, int reset);
+static void via_chan_buffer_free (struct via_info *card, struct via_channel *chan);
#ifdef VIA_PROC_FS
static int via_init_proc (void);
@@ -569,25 +579,53 @@ static void via_chan_init_defaults (struct via_info *card, struct via_channel *c
chan->pcm_fmt = VIA_PCM_FMT_MASK;
chan->is_enabled = 1;
- if (chan->is_record)
- atomic_set (&chan->n_bufs, 0);
- else
- atomic_set (&chan->n_bufs, VIA_DMA_BUFFERS);
+ chan->frag_number = 0;
+ chan->frag_size = 0;
+ atomic_set(&chan->n_frags, 0);
atomic_set (&chan->hw_ptr, 0);
}
+/**
+ * via_chan_init - Initialize PCM channel
+ * @card: Private audio chip info
+ * @chan: Channel to be initialized
+ *
+ * Performs some of the preparations necessary to begin
+ * using a PCM channel.
+ *
+ * Currently the preparations consist in
+ * setting the
+ * PCM channel to a known state.
+ */
+
+
+static void via_chan_init (struct via_info *card, struct via_channel *chan)
+{
+
+ DPRINTK ("ENTER\n");
+
+ /* bzero channel structure, and init members to defaults */
+ via_chan_init_defaults (card, chan);
+
+ /* stop any existing channel output */
+ via_chan_clear (card, chan);
+ via_chan_status_clear (chan->iobase);
+ via_chan_pcm_fmt (chan, 1);
+
+ DPRINTK ("EXIT\n");
+}
/**
- * via_chan_init - Initialize PCM channel
+ * via_chan_buffer_init - Initialize PCM channel buffer
* @card: Private audio chip info
* @chan: Channel to be initialized
*
- * Performs all the preparations necessary to begin
+ * Performs some of the preparations necessary to begin
* using a PCM channel.
*
* Currently the preparations include allocating the
- * scatter-gather DMA table and buffers, setting the
- * PCM channel to a known state, and passing the
+ * scatter-gather DMA table and buffers,
+ * and passing the
* address of the DMA table to the hardware.
*
* Note that special care is taken when passing the
@@ -596,18 +634,21 @@ static void via_chan_init_defaults (struct via_info *card, struct via_channel *c
* always "take" the address.
*/
-static int via_chan_init (struct via_info *card, struct via_channel *chan)
+static int via_chan_buffer_init (struct via_info *card, struct via_channel *chan)
{
+ int page, offset;
int i;
DPRINTK ("ENTER\n");
- /* bzero channel structure, and init members to defaults */
- via_chan_init_defaults (card, chan);
+ if (chan->sgtable != NULL) {
+ DPRINTK ("EXIT\n");
+ return 0;
+ }
/* alloc DMA-able memory for scatter-gather table */
chan->sgtable = pci_alloc_consistent (card->pdev,
- (sizeof (struct via_sgd_table) * VIA_DMA_BUFFERS),
+ (sizeof (struct via_sgd_table) * chan->frag_number),
&chan->sgt_handle);
if (!chan->sgtable) {
printk (KERN_ERR PFX "DMA table alloc fail, aborting\n");
@@ -616,45 +657,54 @@ static int via_chan_init (struct via_info *card, struct via_channel *chan)
}
memset ((void*)chan->sgtable, 0,
- (sizeof (struct via_sgd_table) * VIA_DMA_BUFFERS));
+ (sizeof (struct via_sgd_table) * chan->frag_number));
/* alloc DMA-able memory for scatter-gather buffers */
- for (i = 0; i < VIA_DMA_BUFFERS; i++) {
- chan->sgbuf[i].cpuaddr =
- pci_alloc_consistent (card->pdev, VIA_DMA_BUF_SIZE,
- &chan->sgbuf[i].handle);
- if (!chan->sgbuf[i].cpuaddr)
- goto err_out_nomem;
+ chan->page_number = (chan->frag_number * chan->frag_size) / PAGE_SIZE +
+ (((chan->frag_number * chan->frag_size) % PAGE_SIZE) ? 1 : 0);
+
+ for (i = 0; i < chan->page_number; i++) {
+ chan->pgtbl[i].cpuaddr = pci_alloc_consistent (card->pdev, PAGE_SIZE,
+ &chan->pgtbl[i].handle);
- if (i < (VIA_DMA_BUFFERS - 1))
- chan->sgtable[i].count = cpu_to_le32 (VIA_DMA_BUF_SIZE | VIA_FLAG);
- else
- chan->sgtable[i].count = cpu_to_le32 (VIA_DMA_BUF_SIZE | VIA_EOL);
- chan->sgtable[i].addr = cpu_to_le32 (chan->sgbuf[i].handle);
+ if (!chan->pgtbl[i].cpuaddr) {
+ chan->page_number = i;
+ goto err_out_nomem;
+ }
#ifndef VIA_NDEBUG
- memset (chan->sgbuf[i].cpuaddr, 0xBC, VIA_DMA_BUF_SIZE);
+ memset (chan->pgtbl[i].cpuaddr, 0xBC, chan->frag_size);
#endif
#if 1
- DPRINTK ("dmabuf #%d (h=%lx, 32(h)=%lx, v2p=%lx, a=%p)\n",
- i, (long)chan->sgbuf[i].handle,
- (long)chan->sgtable[i].addr,
- virt_to_phys(chan->sgbuf[i].cpuaddr),
- chan->sgbuf[i].cpuaddr);
+ DPRINTK ("dmabuf_pg #%d (h=%lx, v2p=%lx, a=%p)\n",
+ i, (long)chan->pgtbl[i].handle,
+ virt_to_phys(chan->pgtbl[i].cpuaddr),
+ chan->pgtbl[i].cpuaddr);
#endif
-
- assert ((VIA_DMA_BUF_SIZE % PAGE_SIZE) == 0);
}
- /* stop any existing channel output */
- via_chan_clear (chan);
- via_chan_status_clear (chan->iobase);
- via_chan_pcm_fmt (chan, 1);
+ for (i = 0; i < chan->frag_number; i++) {
+
+ page = i / (PAGE_SIZE / chan->frag_size);
+ offset = (i % (PAGE_SIZE / chan->frag_size)) * chan->frag_size;
+
+ chan->sgtable[i].count = cpu_to_le32 (chan->frag_size | VIA_FLAG);
+ chan->sgtable[i].addr = cpu_to_le32 (chan->pgtbl[page].handle + offset);
+
+#if 1
+ DPRINTK ("dmabuf #%d (32(h)=%lx)\n",
+ i,
+ (long)chan->sgtable[i].addr);
+#endif
+ }
+
+ /* overwrite the last buffer information */
+ chan->sgtable[chan->frag_number - 1].count = cpu_to_le32 (chan->frag_size | VIA_EOL);
/* set location of DMA-able scatter-gather info table */
- DPRINTK("outl (0x%X, 0x%04lX)\n",
+ DPRINTK ("outl (0x%X, 0x%04lX)\n",
cpu_to_le32 (chan->sgt_handle),
chan->iobase + VIA_PCM_TABLE_ADDR);
@@ -664,7 +714,7 @@ static int via_chan_init (struct via_info *card, struct via_channel *chan)
udelay (20);
via_ac97_wait_idle (card);
- DPRINTK("inl (0x%lX) = %x\n",
+ DPRINTK ("inl (0x%lX) = %x\n",
chan->iobase + VIA_PCM_TABLE_ADDR,
inl(chan->iobase + VIA_PCM_TABLE_ADDR));
@@ -673,7 +723,7 @@ static int via_chan_init (struct via_info *card, struct via_channel *chan)
err_out_nomem:
printk (KERN_ERR PFX "DMA buffer alloc fail, aborting\n");
- via_chan_free (card, chan);
+ via_chan_buffer_free (card, chan);
DPRINTK ("EXIT\n");
return -ENOMEM;
}
@@ -695,8 +745,6 @@ err_out_nomem:
static void via_chan_free (struct via_info *card, struct via_channel *chan)
{
- int i;
-
DPRINTK ("ENTER\n");
synchronize_irq();
@@ -710,23 +758,33 @@ static void via_chan_free (struct via_info *card, struct via_channel *chan)
spin_unlock_irq (&card->lock);
+ DPRINTK ("EXIT\n");
+}
+
+static void via_chan_buffer_free (struct via_info *card, struct via_channel *chan)
+{
+ int i;
+
+ DPRINTK ("ENTER\n");
+
/* zero location of DMA-able scatter-gather info table */
via_ac97_wait_idle(card);
outl (0, chan->iobase + VIA_PCM_TABLE_ADDR);
- for (i = 0; i < VIA_DMA_BUFFERS; i++)
- if (chan->sgbuf[i].cpuaddr) {
- assert ((VIA_DMA_BUF_SIZE % PAGE_SIZE) == 0);
- pci_free_consistent (card->pdev, VIA_DMA_BUF_SIZE,
- chan->sgbuf[i].cpuaddr,
- chan->sgbuf[i].handle);
- chan->sgbuf[i].cpuaddr = NULL;
- chan->sgbuf[i].handle = 0;
+ for (i = 0; i < chan->page_number; i++)
+ if (chan->pgtbl[i].cpuaddr) {
+ pci_free_consistent (card->pdev, PAGE_SIZE,
+ chan->pgtbl[i].cpuaddr,
+ chan->pgtbl[i].handle);
+ chan->pgtbl[i].cpuaddr = NULL;
+ chan->pgtbl[i].handle = 0;
}
+ chan->page_number = 0;
+
if (chan->sgtable) {
pci_free_consistent (card->pdev,
- (sizeof (struct via_sgd_table) * VIA_DMA_BUFFERS),
+ (sizeof (struct via_sgd_table) * chan->frag_number),
(void*)chan->sgtable, chan->sgt_handle);
chan->sgtable = NULL;
}
@@ -771,11 +829,11 @@ static void via_chan_pcm_fmt (struct via_channel *chan, int reset)
if (!chan->is_record)
chan->pcm_fmt |= VIA_CHAN_TYPE_INT_SELECT;
- outb (chan->pcm_fmt, chan->iobase + 2);
+ outb (chan->pcm_fmt, chan->iobase + VIA_PCM_TYPE);
DPRINTK ("EXIT, pcm_fmt = 0x%02X, reg = 0x%02X\n",
chan->pcm_fmt,
- inb (chan->iobase + 2));
+ inb (chan->iobase + VIA_PCM_TYPE));
}
@@ -787,10 +845,11 @@ static void via_chan_pcm_fmt (struct via_channel *chan, int reset)
* all software pointers which track DMA operation.
*/
-static void via_chan_clear (struct via_channel *chan)
+static void via_chan_clear (struct via_info *card, struct via_channel *chan)
{
DPRINTK ("ENTER\n");
via_chan_stop (chan->iobase);
+ via_chan_buffer_free(card, chan);
chan->is_active = 0;
chan->is_mapped = 0;
chan->is_enabled = 1;
@@ -798,10 +857,6 @@ static void via_chan_clear (struct via_channel *chan)
chan->sw_ptr = 0;
chan->n_irqs = 0;
atomic_set (&chan->hw_ptr, 0);
- if (chan->is_record)
- atomic_set (&chan->n_bufs, 0);
- else
- atomic_set (&chan->n_bufs, VIA_DMA_BUFFERS);
DPRINTK ("EXIT\n");
}
@@ -826,7 +881,7 @@ static int via_chan_set_speed (struct via_info *card,
{
DPRINTK ("ENTER, requested rate = %d\n", val);
- via_chan_clear (chan);
+ via_chan_clear (card, chan);
val = via_set_rate (&card->ac97, chan, val);
@@ -858,7 +913,7 @@ static int via_chan_set_fmt (struct via_info *card,
val == AFMT_S16_LE ? "AFMT_S16_LE" :
"unknown");
- via_chan_clear (chan);
+ via_chan_clear (card, chan);
assert (val != AFMT_QUERY); /* this case is handled elsewhere */
@@ -907,7 +962,7 @@ static int via_chan_set_stereo (struct via_info *card,
{
DPRINTK ("ENTER, channels = %d\n", val);
- via_chan_clear (chan);
+ via_chan_clear (card, chan);
switch (val) {
@@ -934,6 +989,78 @@ static int via_chan_set_stereo (struct via_info *card,
return val;
}
+static int via_chan_set_buffering (struct via_info *card,
+ struct via_channel *chan, int val)
+{
+ int shift;
+
+ DPRINTK ("ENTER\n");
+
+ /* in both cases the buffer cannot be changed */
+ if (chan->is_active || chan->is_mapped) {
+ DPRINTK ("EXIT\n");
+ return -EINVAL;
+ }
+
+ /* called outside SETFRAGMENT */
+ /* set defaults or do nothing */
+ if (val < 0) {
+
+ if (chan->frag_size && chan->frag_number)
+ goto out;
+
+ DPRINTK ("\n");
+
+ chan->frag_size = (VIA_DEFAULT_FRAG_TIME * chan->rate *
+ ((chan->pcm_fmt & VIA_PCM_FMT_STEREO) ? 2 : 1) *
+ ((chan->pcm_fmt & VIA_PCM_FMT_16BIT) ? 2 : 1)) / 1000 - 1;
+
+ shift = 0;
+ while (chan->frag_size) {
+ chan->frag_size >>= 1;
+ shift++;
+ }
+ chan->frag_size = 1 << shift;
+
+ chan->frag_number = (VIA_DEFAULT_BUFFER_TIME / VIA_DEFAULT_FRAG_TIME);
+
+ DPRINTK ("setting default values %d %d\n", chan->frag_size, chan->frag_number);
+ } else {
+ chan->frag_size = 1 << (val & 0xFFFF);
+ chan->frag_number = (val >> 16) & 0xFFFF;
+
+ DPRINTK ("using user values %d %d\n", chan->frag_size, chan->frag_number);
+ }
+
+ /* quake3 wants frag_number to be a power of two */
+ shift = 0;
+ while (chan->frag_number) {
+ chan->frag_number >>= 1;
+ shift++;
+ }
+ chan->frag_number = 1 << shift;
+
+ if (chan->frag_size > VIA_MAX_FRAG_SIZE)
+ chan->frag_size = VIA_MAX_FRAG_SIZE;
+ else if (chan->frag_size < VIA_MIN_FRAG_SIZE)
+ chan->frag_size = VIA_MIN_FRAG_SIZE;
+
+ if (chan->frag_number < VIA_MIN_FRAG_NUMBER)
+ chan->frag_number = VIA_MIN_FRAG_NUMBER;
+
+ if ((chan->frag_number * chan->frag_size) / PAGE_SIZE > VIA_MAX_BUFFER_DMA_PAGES)
+ chan->frag_number = (VIA_MAX_BUFFER_DMA_PAGES * PAGE_SIZE) / chan->frag_size;
+
+out:
+ if (chan->is_record)
+ atomic_set (&chan->n_frags, 0);
+ else
+ atomic_set (&chan->n_frags, chan->frag_number);
+
+ DPRINTK ("EXIT\n");
+
+ return 0;
+}
#ifdef VIA_CHAN_DUMP_BUFS
/**
@@ -948,7 +1075,7 @@ static void via_chan_dump_bufs (struct via_channel *chan)
{
int i;
- for (i = 0; i < VIA_DMA_BUFFERS; i++) {
+ for (i = 0; i < chan->frag_number; i++) {
DPRINTK ("#%02d: addr=%x, count=%u, flag=%d, eol=%d\n",
i, chan->sgtable[i].addr,
chan->sgtable[i].count & 0x00FFFFFF,
@@ -975,15 +1102,15 @@ static void via_chan_flush_frag (struct via_channel *chan)
assert (chan->slop_len > 0);
- if (chan->sw_ptr == (VIA_DMA_BUFFERS - 1))
+ if (chan->sw_ptr == (chan->frag_number - 1))
chan->sw_ptr = 0;
else
chan->sw_ptr++;
chan->slop_len = 0;
- assert (atomic_read (&chan->n_bufs) > 0);
- atomic_dec (&chan->n_bufs);
+ assert (atomic_read (&chan->n_frags) > 0);
+ atomic_dec (&chan->n_frags);
DPRINTK ("EXIT\n");
}
@@ -1003,7 +1130,7 @@ static inline void via_chan_maybe_start (struct via_channel *chan)
if (!chan->is_active && chan->is_enabled) {
chan->is_active = 1;
sg_begin (chan);
- DPRINTK("starting channel %s\n", chan->name);
+ DPRINTK ("starting channel %s\n", chan->name);
}
}
@@ -1213,7 +1340,7 @@ static loff_t via_llseek(struct file *file, loff_t offset, int origin)
{
DPRINTK ("ENTER\n");
- DPRINTK("EXIT, returning -ESPIPE\n");
+ DPRINTK ("EXIT, returning -ESPIPE\n");
return -ESPIPE;
}
@@ -1245,7 +1372,7 @@ static int __init via_ac97_reset (struct via_info *card)
pci_read_config_byte (card->pdev, 0x43, &r43);
pci_read_config_byte (card->pdev, 0x44, &r44);
pci_read_config_byte (card->pdev, 0x48, &r48);
- DPRINTK("PCI config: %02X %02X %02X %02X %02X %02X\n",
+ DPRINTK ("PCI config: %02X %02X %02X %02X %02X %02X\n",
r40,r41,r42,r43,r44,r48);
spin_lock_irq (&card->lock);
@@ -1334,7 +1461,7 @@ static int __init via_ac97_init (struct via_info *card)
card->ac97.dev_mixer = register_sound_mixer (&via_mixer_fops, -1);
if (card->ac97.dev_mixer < 0) {
printk (KERN_ERR PFX "unable to register AC97 mixer, aborting\n");
- DPRINTK("EXIT, returning -EIO\n");
+ DPRINTK ("EXIT, returning -EIO\n");
return -EIO;
}
@@ -1359,21 +1486,21 @@ static int __init via_ac97_init (struct via_info *card)
err_out:
unregister_sound_mixer (card->ac97.dev_mixer);
- DPRINTK("EXIT, returning %d\n", rc);
+ DPRINTK ("EXIT, returning %d\n", rc);
return rc;
}
static void via_ac97_cleanup (struct via_info *card)
{
- DPRINTK("ENTER\n");
+ DPRINTK ("ENTER\n");
assert (card != NULL);
assert (card->ac97.dev_mixer >= 0);
unregister_sound_mixer (card->ac97.dev_mixer);
- DPRINTK("EXIT\n");
+ DPRINTK ("EXIT\n");
}
@@ -1414,24 +1541,24 @@ static void via_intr_channel (struct via_channel *chan)
/* sanity check: make sure our h/w ptr doesn't have a weird value */
assert (n >= 0);
- assert (n < VIA_DMA_BUFFERS);
+ assert (n < chan->frag_number);
/* reset SGD data structure in memory to reflect a full buffer,
* and advance the h/w ptr, wrapping around to zero if needed
*/
- if (n == (VIA_DMA_BUFFERS - 1)) {
- chan->sgtable[n].count = (VIA_DMA_BUF_SIZE | VIA_EOL);
+ if (n == (chan->frag_number - 1)) {
+ chan->sgtable[n].count = (chan->frag_size | VIA_EOL);
atomic_set (&chan->hw_ptr, 0);
} else {
- chan->sgtable[n].count = (VIA_DMA_BUF_SIZE | VIA_FLAG);
+ chan->sgtable[n].count = (chan->frag_size | VIA_FLAG);
atomic_inc (&chan->hw_ptr);
}
/* accounting crap for SNDCTL_DSP_GETxPTR */
chan->n_irqs++;
- chan->bytes += VIA_DMA_BUF_SIZE;
+ chan->bytes += chan->frag_size;
if (chan->bytes < 0) /* handle overflow of 31-bit value */
- chan->bytes = VIA_DMA_BUF_SIZE;
+ chan->bytes = chan->frag_size;
/* wake up anyone listening to see when interrupts occur */
if (waitqueue_active (&chan->wait))
@@ -1445,25 +1572,25 @@ static void via_intr_channel (struct via_channel *chan)
if (chan->is_mapped)
return;
- /* If we are recording, then n_bufs represents the number
- * of buffers waiting to be handled by userspace.
- * If we are playback, then n_bufs represents the number
- * of buffers remaining to be filled by userspace.
- * We increment here. If we reach max buffers (VIA_DMA_BUFFERS),
+ /* If we are recording, then n_frags represents the number
+ * of fragments waiting to be handled by userspace.
+ * If we are playback, then n_frags represents the number
+ * of fragments remaining to be filled by userspace.
+ * We increment here. If we reach max number of fragments,
* this indicates an underrun/overrun. For this case under OSS,
* we stop the record/playback process.
*/
- if (atomic_read (&chan->n_bufs) < VIA_DMA_BUFFERS)
- atomic_inc (&chan->n_bufs);
- assert (atomic_read (&chan->n_bufs) <= VIA_DMA_BUFFERS);
+ if (atomic_read (&chan->n_frags) < chan->frag_number)
+ atomic_inc (&chan->n_frags);
+ assert (atomic_read (&chan->n_frags) <= chan->frag_number);
- if (atomic_read (&chan->n_bufs) == VIA_DMA_BUFFERS) {
+ if (atomic_read (&chan->n_frags) == chan->frag_number) {
chan->is_active = 0;
via_chan_stop (chan->iobase);
}
- DPRINTK ("%s intr, channel n_bufs == %d\n", chan->name,
- atomic_read (&chan->n_bufs));
+ DPRINTK ("%s intr, channel n_frags == %d\n", chan->name,
+ atomic_read (&chan->n_frags));
}
@@ -1618,9 +1745,7 @@ static struct file_operations via_dsp_fops = {
poll: via_dsp_poll,
llseek: via_llseek,
ioctl: via_dsp_ioctl,
-#ifdef VIA_SUPPORT_MMAP
mmap: via_dsp_mmap,
-#endif
};
@@ -1668,7 +1793,6 @@ static void via_dsp_cleanup (struct via_info *card)
}
-#ifdef VIA_SUPPORT_MMAP
static struct page * via_mm_nopage (struct vm_area_struct * vma,
unsigned long address, int write_access)
{
@@ -1685,8 +1809,6 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
address,
write_access);
- assert (VIA_DMA_BUF_SIZE == PAGE_SIZE);
-
if (address > vma->vm_end) {
DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n");
return NOPAGE_SIGBUS; /* Disallow mremap */
@@ -1702,7 +1824,7 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
#ifndef VIA_NDEBUG
{
- unsigned long max_bufs = VIA_DMA_BUFFERS;
+ unsigned long max_bufs = chan->frag_number;
if (rd && wr) max_bufs *= 2;
/* via_dsp_mmap() should ensure this */
assert (pgoff < max_bufs);
@@ -1711,17 +1833,17 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
/* if full-duplex (read+write) and we have two sets of bufs,
* then the playback buffers come first, sez soundcard.c */
- if (pgoff >= VIA_DMA_BUFFERS) {
- pgoff -= VIA_DMA_BUFFERS;
+ if (pgoff >= chan->page_number) {
+ pgoff -= chan->page_number;
chan = &card->ch_in;
} else if (!wr)
chan = &card->ch_in;
- assert ((((unsigned long)chan->sgbuf[pgoff].cpuaddr) % PAGE_SIZE) == 0);
+ assert ((((unsigned long)chan->pgtbl[pgoff].cpuaddr) % PAGE_SIZE) == 0);
- dmapage = virt_to_page (chan->sgbuf[pgoff].cpuaddr);
+ dmapage = virt_to_page (chan->pgtbl[pgoff].cpuaddr);
DPRINTK ("EXIT, returning page %p for cpuaddr %lXh\n",
- dmapage, (unsigned long) chan->sgbuf[pgoff].cpuaddr);
+ dmapage, (unsigned long) chan->pgtbl[pgoff].cpuaddr);
get_page (dmapage);
return dmapage;
}
@@ -1761,16 +1883,18 @@ static int via_dsp_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_end - vma->vm_start,
vma->vm_pgoff);
- assert (VIA_DMA_BUF_SIZE == PAGE_SIZE);
-
max_size = 0;
- if (file->f_mode & FMODE_READ) {
+ if (vma->vm_flags & VM_READ) {
rd = 1;
- max_size += (VIA_DMA_BUFFERS * VIA_DMA_BUF_SIZE);
+ via_chan_set_buffering(card, &card->ch_in, -1);
+ via_chan_buffer_init (card, &card->ch_in);
+ max_size += card->ch_in.page_number << PAGE_SHIFT;
}
- if (file->f_mode & FMODE_WRITE) {
+ if (vma->vm_flags & VM_WRITE) {
wr = 1;
- max_size += (VIA_DMA_BUFFERS * VIA_DMA_BUF_SIZE);
+ via_chan_set_buffering(card, &card->ch_out, -1);
+ via_chan_buffer_init (card, &card->ch_out);
+ max_size += card->ch_out.page_number << PAGE_SHIFT;
}
start = vma->vm_start;
@@ -1802,10 +1926,9 @@ static int via_dsp_mmap(struct file *file, struct vm_area_struct *vma)
rc = 0;
out:
- DPRINTK("EXIT, returning %d\n", rc);
+ DPRINTK ("EXIT, returning %d\n", rc);
return rc;
}
-#endif /* VIA_SUPPORT_MMAP */
static ssize_t via_dsp_do_read (struct via_info *card,
@@ -1831,13 +1954,13 @@ handle_one_block:
*/
n = chan->sw_ptr;
- /* n_bufs represents the number of buffers waiting
+ /* n_frags represents the number of fragments waiting
* to be copied to userland. sleep until at least
* one buffer has been read from the audio hardware.
*/
- tmp = atomic_read (&chan->n_bufs);
+ tmp = atomic_read (&chan->n_frags);
assert (tmp >= 0);
- assert (tmp <= VIA_DMA_BUFFERS);
+ assert (tmp <= chan->frag_number);
while (tmp == 0) {
if (nonblock || !chan->is_active)
return -EAGAIN;
@@ -1848,18 +1971,18 @@ handle_one_block:
if (signal_pending (current))
return -ERESTARTSYS;
- tmp = atomic_read (&chan->n_bufs);
+ tmp = atomic_read (&chan->n_frags);
}
/* Now that we have a buffer we can read from, send
* as much as sample data possible to userspace.
*/
- while ((count > 0) && (chan->slop_len < VIA_DMA_BUF_SIZE)) {
- size_t slop_left = VIA_DMA_BUF_SIZE - chan->slop_len;
+ while ((count > 0) && (chan->slop_len < chan->frag_size)) {
+ size_t slop_left = chan->frag_size - chan->slop_len;
size = (count < slop_left) ? count : slop_left;
if (copy_to_user (userbuf,
- chan->sgbuf[n].cpuaddr + chan->slop_len,
+ chan->pgtbl[n / (PAGE_SIZE / chan->frag_size)].cpuaddr + n % (PAGE_SIZE / chan->frag_size) + chan->slop_len,
size))
return -EFAULT;
@@ -1871,7 +1994,7 @@ handle_one_block:
/* If we didn't copy the buffer completely to userspace,
* stop now.
*/
- if (chan->slop_len < VIA_DMA_BUF_SIZE)
+ if (chan->slop_len < chan->frag_size)
goto out;
/*
@@ -1882,20 +2005,20 @@ handle_one_block:
/* advance channel software pointer to point to
* the next buffer from which we will copy
*/
- if (chan->sw_ptr == (VIA_DMA_BUFFERS - 1))
+ if (chan->sw_ptr == (chan->frag_number - 1))
chan->sw_ptr = 0;
else
chan->sw_ptr++;
/* mark one less buffer waiting to be processed */
- assert (atomic_read (&chan->n_bufs) > 0);
- atomic_dec (&chan->n_bufs);
+ assert (atomic_read (&chan->n_frags) > 0);
+ atomic_dec (&chan->n_frags);
/* we are at a block boundary, there is no fragment data */
chan->slop_len = 0;
- DPRINTK("Flushed block %u, sw_ptr now %u, n_bufs now %d\n",
- n, chan->sw_ptr, atomic_read (&chan->n_bufs));
+ DPRINTK ("Flushed block %u, sw_ptr now %u, n_frags now %d\n",
+ n, chan->sw_ptr, atomic_read (&chan->n_frags));
DPRINTK ("regs==%02X %02X %02X %08X %08X %08X %08X\n",
inb (card->baseaddr + 0x00),
@@ -1941,12 +2064,18 @@ static ssize_t via_dsp_read(struct file *file, char *buffer, size_t count, loff_
goto out_up;
}
+ via_chan_set_buffering(card, &card->ch_in, -1);
+ rc = via_chan_buffer_init (card, &card->ch_in);
+
+ if (rc)
+ goto out_up;
+
rc = via_dsp_do_read (card, buffer, count, nonblock);
out_up:
up (&card->syscall_sem);
out:
- DPRINTK("EXIT, returning %ld\n",(long) rc);
+ DPRINTK ("EXIT, returning %ld\n",(long) rc);
return rc;
}
@@ -1966,40 +2095,40 @@ handle_one_block:
if (current->need_resched)
schedule ();
- /* grab current channel software pointer. In the case of
- * playback, this is pointing to the next buffer that
+ /* grab current channel fragment pointer. In the case of
+ * playback, this is pointing to the next fragment that
* should receive data from userland.
*/
n = chan->sw_ptr;
- /* n_bufs represents the number of buffers remaining
+ /* n_frags represents the number of fragments remaining
* to be filled by userspace. Sleep until
- * at least one buffer is available for our use.
+ * at least one fragment is available for our use.
*/
- tmp = atomic_read (&chan->n_bufs);
+ tmp = atomic_read (&chan->n_frags);
assert (tmp >= 0);
- assert (tmp <= VIA_DMA_BUFFERS);
+ assert (tmp <= chan->frag_number);
while (tmp == 0) {
if (nonblock || !chan->is_enabled)
return -EAGAIN;
- DPRINTK ("Sleeping on block %d, tmp==%d, ir==%d\n", n, tmp, chan->is_record);
+ DPRINTK ("Sleeping on page %d, tmp==%d, ir==%d\n", n, tmp, chan->is_record);
interruptible_sleep_on (&chan->wait);
if (signal_pending (current))
return -ERESTARTSYS;
- tmp = atomic_read (&chan->n_bufs);
+ tmp = atomic_read (&chan->n_frags);
}
- /* Now that we have a buffer we can write to, fill it up
+ /* Now that we have at least one fragment we can write to, fill the buffer
* as much as possible with data from userspace.
*/
- while ((count > 0) && (chan->slop_len < VIA_DMA_BUF_SIZE)) {
- size_t slop_left = VIA_DMA_BUF_SIZE - chan->slop_len;
+ while ((count > 0) && (chan->slop_len < chan->frag_size)) {
+ size_t slop_left = chan->frag_size - chan->slop_len;
size = (count < slop_left) ? count : slop_left;
- if (copy_from_user (chan->sgbuf[n].cpuaddr + chan->slop_len,
+ if (copy_from_user (chan->pgtbl[n / (PAGE_SIZE / chan->frag_size)].cpuaddr + (n % (PAGE_SIZE / chan->frag_size)) * chan->frag_size + chan->slop_len,
userbuf, size))
return -EFAULT;
@@ -2009,36 +2138,36 @@ handle_one_block:
}
/* If we didn't fill up the buffer with data, stop now.
- * Put a 'stop' marker in the DMA table too, to tell the
- * audio hardware to stop if it gets here.
- */
- if (chan->slop_len < VIA_DMA_BUF_SIZE) {
+ * Put a 'stop' marker in the DMA table too, to tell the
+ * audio hardware to stop if it gets here.
+ */
+ if (chan->slop_len < chan->frag_size) {
sgtable[n].count = cpu_to_le32 (chan->slop_len | VIA_EOL | VIA_STOP);
goto out;
}
/*
- * If we get to this point, we have filled a buffer with
- * audio data, flush the buffer to audio hardware.
- */
+ * If we get to this point, we have filled a buffer with
+ * audio data, flush the buffer to audio hardware.
+ */
/* Record the true size for the audio hardware to notice */
- if (n == (VIA_DMA_BUFFERS - 1))
- sgtable[n].count = cpu_to_le32 (VIA_DMA_BUF_SIZE | VIA_EOL);
- else
- sgtable[n].count = cpu_to_le32 (VIA_DMA_BUF_SIZE | VIA_FLAG);
+ if (n == (chan->frag_number - 1))
+ sgtable[n].count = cpu_to_le32 (chan->frag_size | VIA_EOL);
+ else
+ sgtable[n].count = cpu_to_le32 (chan->frag_size | VIA_FLAG);
/* advance channel software pointer to point to
* the next buffer we will fill with data
*/
- if (chan->sw_ptr == (VIA_DMA_BUFFERS - 1))
+ if (chan->sw_ptr == (chan->frag_number - 1))
chan->sw_ptr = 0;
else
chan->sw_ptr++;
/* mark one less buffer as being available for userspace consumption */
- assert (atomic_read (&chan->n_bufs) > 0);
- atomic_dec (&chan->n_bufs);
+ assert (atomic_read (&chan->n_frags) > 0);
+ atomic_dec (&chan->n_frags);
/* we are at a block boundary, there is no fragment data */
chan->slop_len = 0;
@@ -2046,8 +2175,8 @@ handle_one_block:
/* if SGD has not yet been started, start it */
via_chan_maybe_start (chan);
- DPRINTK("Flushed block %u, sw_ptr now %u, n_bufs now %d\n",
- n, chan->sw_ptr, atomic_read (&chan->n_bufs));
+ DPRINTK ("Flushed block %u, sw_ptr now %u, n_frags now %d\n",
+ n, chan->sw_ptr, atomic_read (&chan->n_frags));
DPRINTK ("regs==%02X %02X %02X %08X %08X %08X %08X\n",
inb (card->baseaddr + 0x00),
@@ -2093,12 +2222,18 @@ static ssize_t via_dsp_write(struct file *file, const char *buffer, size_t count
goto out_up;
}
+ via_chan_set_buffering(card, &card->ch_out, -1);
+ rc = via_chan_buffer_init (card, &card->ch_out);
+
+ if (rc)
+ goto out_up;
+
rc = via_dsp_do_write (card, buffer, count, nonblock);
out_up:
up (&card->syscall_sem);
out:
- DPRINTK("EXIT, returning %ld\n",(long) rc);
+ DPRINTK ("EXIT, returning %ld\n",(long) rc);
return rc;
}
@@ -2117,23 +2252,27 @@ static unsigned int via_dsp_poll(struct file *file, struct poll_table_struct *wa
rd = (file->f_mode & FMODE_READ);
wr = (file->f_mode & FMODE_WRITE);
- if (wr && (atomic_read (&card->ch_out.n_bufs) == 0)) {
+ if (wr && (atomic_read (&card->ch_out.n_frags) == 0)) {
assert (card->ch_out.is_active);
poll_wait(file, &card->ch_out.wait, wait);
}
if (rd) {
/* XXX is it ok, spec-wise, to start DMA here? */
+ if (!card->ch_in.is_active) {
+ via_chan_set_buffering(card, &card->ch_in, -1);
+ via_chan_buffer_init(card, &card->ch_in);
+ }
via_chan_maybe_start (&card->ch_in);
- if (atomic_read (&card->ch_in.n_bufs) == 0)
+ if (atomic_read (&card->ch_in.n_frags) == 0)
poll_wait(file, &card->ch_in.wait, wait);
}
- if (wr && (atomic_read (&card->ch_out.n_bufs) > 0))
+ if (wr && ((atomic_read (&card->ch_out.n_frags) > 0) || !card->ch_out.is_active))
mask |= POLLOUT | POLLWRNORM;
- if (rd && (atomic_read (&card->ch_in.n_bufs) > 0))
+ if (rd && (atomic_read (&card->ch_in.n_frags) > 0))
mask |= POLLIN | POLLRDNORM;
- DPRINTK("EXIT, returning %u\n", mask);
+ DPRINTK ("EXIT, returning %u\n", mask);
return mask;
}
@@ -2158,12 +2297,12 @@ static int via_dsp_drain_playback (struct via_info *card,
if (chan->slop_len > 0)
via_chan_flush_frag (chan);
- if (atomic_read (&chan->n_bufs) == VIA_DMA_BUFFERS)
+ if (atomic_read (&chan->n_frags) == chan->frag_number)
goto out;
via_chan_maybe_start (chan);
- while (atomic_read (&chan->n_bufs) < VIA_DMA_BUFFERS) {
+ while (atomic_read (&chan->n_frags) < chan->frag_number) {
if (nonblock) {
DPRINTK ("EXIT, returning -EAGAIN\n");
return -EAGAIN;
@@ -2178,7 +2317,7 @@ static int via_dsp_drain_playback (struct via_info *card,
pci_read_config_byte (card->pdev, 0x43, &r43);
pci_read_config_byte (card->pdev, 0x44, &r44);
pci_read_config_byte (card->pdev, 0x48, &r48);
- DPRINTK("PCI config: %02X %02X %02X %02X %02X %02X\n",
+ DPRINTK ("PCI config: %02X %02X %02X %02X %02X %02X\n",
r40,r41,r42,r43,r44,r48);
DPRINTK ("regs==%02X %02X %02X %08X %08X %08X %08X\n",
@@ -2195,7 +2334,7 @@ static int via_dsp_drain_playback (struct via_info *card,
printk (KERN_ERR "sleeping but not active\n");
#endif
- DPRINTK ("sleeping, nbufs=%d\n", atomic_read (&chan->n_bufs));
+ DPRINTK ("sleeping, nbufs=%d\n", atomic_read (&chan->n_frags));
interruptible_sleep_on (&chan->wait);
if (signal_pending (current)) {
@@ -2213,7 +2352,7 @@ static int via_dsp_drain_playback (struct via_info *card,
pci_read_config_byte (card->pdev, 0x43, &r43);
pci_read_config_byte (card->pdev, 0x44, &r44);
pci_read_config_byte (card->pdev, 0x48, &r48);
- DPRINTK("PCI config: %02X %02X %02X %02X %02X %02X\n",
+ DPRINTK ("PCI config: %02X %02X %02X %02X %02X %02X\n",
r40,r41,r42,r43,r44,r48);
DPRINTK ("regs==%02X %02X %02X %08X %08X %08X %08X\n",
@@ -2225,7 +2364,7 @@ static int via_dsp_drain_playback (struct via_info *card,
inl (card->baseaddr + 0x80),
inl (card->baseaddr + 0x84));
- DPRINTK ("final nbufs=%d\n", atomic_read (&chan->n_bufs));
+ DPRINTK ("final nbufs=%d\n", atomic_read (&chan->n_frags));
}
#endif
@@ -2252,21 +2391,23 @@ static int via_dsp_ioctl_space (struct via_info *card,
{
audio_buf_info info;
- info.fragstotal = VIA_DMA_BUFFERS;
- info.fragsize = VIA_DMA_BUF_SIZE;
+ via_chan_set_buffering(card, chan, -1);
+
+ info.fragstotal = chan->frag_number;
+ info.fragsize = chan->frag_size;
/* number of full fragments we can read/write without blocking */
- info.fragments = atomic_read (&chan->n_bufs);
+ info.fragments = atomic_read (&chan->n_frags);
- if ((chan->slop_len > 0) && (info.fragments > 0))
+ if ((chan->slop_len % chan->frag_size > 0) && (info.fragments > 0))
info.fragments--;
/* number of bytes that can be read or written immediately
* without blocking.
*/
- info.bytes = (info.fragments * VIA_DMA_BUF_SIZE);
- if (chan->slop_len > 0)
- info.bytes += VIA_DMA_BUF_SIZE - chan->slop_len;
+ info.bytes = (info.fragments * chan->frag_size);
+ if (chan->slop_len % chan->frag_size > 0)
+ info.bytes += chan->frag_size - (chan->slop_len % chan->frag_size);
DPRINTK ("EXIT, returning fragstotal=%d, fragsize=%d, fragments=%d, bytes=%d\n",
info.fragstotal,
@@ -2305,8 +2446,8 @@ static int via_dsp_ioctl_ptr (struct via_info *card,
if (chan->is_active) {
unsigned long extra;
- info.ptr = atomic_read (&chan->hw_ptr) * VIA_DMA_BUF_SIZE;
- extra = VIA_DMA_BUF_SIZE - inl (chan->iobase + VIA_BASE0_PCM_OUT_BLOCK_COUNT);
+ info.ptr = atomic_read (&chan->hw_ptr) * chan->frag_size;
+ extra = chan->frag_size - inl (chan->iobase + VIA_PCM_BLOCK_COUNT);
info.ptr += extra;
info.bytes += extra;
} else {
@@ -2386,13 +2527,13 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
/* OSS API version. XXX unverified */
case OSS_GETVERSION:
- DPRINTK("ioctl OSS_GETVERSION, EXIT, returning SOUND_VERSION\n");
+ DPRINTK ("ioctl OSS_GETVERSION, EXIT, returning SOUND_VERSION\n");
rc = put_user (SOUND_VERSION, (int *)arg);
break;
/* list of supported PCM data formats */
case SNDCTL_DSP_GETFMTS:
- DPRINTK("DSP_GETFMTS, EXIT, returning AFMT U8|S16_LE\n");
+ DPRINTK ("DSP_GETFMTS, EXIT, returning AFMT U8|S16_LE\n");
rc = put_user (AFMT_U8 | AFMT_S16_LE, (int *)arg);
break;
@@ -2402,20 +2543,19 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
rc = -EFAULT;
break;
}
- DPRINTK("DSP_SETFMT, val==%d\n", val);
+ DPRINTK ("DSP_SETFMT, val==%d\n", val);
if (val != AFMT_QUERY) {
rc = 0;
- if (rc == 0 && rd)
+ if (rd)
rc = via_chan_set_fmt (card, &card->ch_in, val);
- if (rc == 0 && wr)
+
+ if (rc >= 0 && wr)
rc = via_chan_set_fmt (card, &card->ch_out, val);
- if (rc <= 0) {
- if (rc == 0)
- rc = -EINVAL;
+ if (rc < 0)
break;
- }
+
val = rc;
} else {
if ((rd && (card->ch_in.pcm_fmt & VIA_PCM_FMT_16BIT)) ||
@@ -2424,7 +2564,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
else
val = AFMT_U8;
}
- DPRINTK("SETFMT EXIT, returning %d\n", val);
+ DPRINTK ("SETFMT EXIT, returning %d\n", val);
rc = put_user (val, (int *)arg);
break;
@@ -2434,18 +2574,19 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
rc = -EFAULT;
break;
}
- DPRINTK("DSP_CHANNELS, val==%d\n", val);
+ DPRINTK ("DSP_CHANNELS, val==%d\n", val);
if (val != 0) {
rc = 0;
- if (rc == 0 && rd)
+
+ if (rd)
rc = via_chan_set_stereo (card, &card->ch_in, val);
- if (rc == 0 && wr)
+
+ if (rc >= 0 && wr)
rc = via_chan_set_stereo (card, &card->ch_out, val);
- if (rc <= 0) {
- if (rc == 0)
- rc = -EINVAL;
+
+ if (rc < 0)
break;
- }
+
val = rc;
} else {
if ((rd && (card->ch_in.pcm_fmt & VIA_PCM_FMT_STEREO)) ||
@@ -2454,7 +2595,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
else
val = 1;
}
- DPRINTK("CHANNELS EXIT, returning %d\n", val);
+ DPRINTK ("CHANNELS EXIT, returning %d\n", val);
rc = put_user (val, (int *)arg);
break;
@@ -2464,21 +2605,21 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
rc = -EFAULT;
break;
}
- DPRINTK("DSP_STEREO, val==%d\n", val);
+ DPRINTK ("DSP_STEREO, val==%d\n", val);
rc = 0;
- if (rc == 0 && rd)
+ if (rd)
rc = via_chan_set_stereo (card, &card->ch_in, val ? 2 : 1);
- if (rc == 0 && wr)
+ if (rc >= 0 && wr)
rc = via_chan_set_stereo (card, &card->ch_out, val ? 2 : 1);
- if (rc <= 0) {
- if (rc == 0)
- rc = -EINVAL;
+ if (rc < 0)
break;
- }
- DPRINTK("STEREO EXIT, returning %d\n", val);
- rc = 0;
+
+ val = rc - 1;
+
+ DPRINTK ("STEREO EXIT, returning %d\n", val);
+ rc = put_user(val, (int *) arg);
break;
/* query or set sampling rate */
@@ -2487,7 +2628,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
rc = -EFAULT;
break;
}
- DPRINTK("DSP_SPEED, val==%d\n", val);
+ DPRINTK ("DSP_SPEED, val==%d\n", val);
if (val < 0) {
rc = -EINVAL;
break;
@@ -2495,16 +2636,14 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
if (val > 0) {
rc = 0;
- if (rc == 0 && rd)
+ if (rd)
rc = via_chan_set_speed (card, &card->ch_in, val);
- if (rc == 0 && wr)
+ if (rc >= 0 && wr)
rc = via_chan_set_speed (card, &card->ch_out, val);
- if (rc <= 0) {
- if (rc == 0)
- rc = -EINVAL;
+ if (rc < 0)
break;
- }
+
val = rc;
} else {
if (rd)
@@ -2514,7 +2653,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
else
val = 0;
}
- DPRINTK("SPEED EXIT, returning %d\n", val);
+ DPRINTK ("SPEED EXIT, returning %d\n", val);
rc = put_user (val, (int *)arg);
break;
@@ -2522,7 +2661,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
case SNDCTL_DSP_SYNC:
DPRINTK ("DSP_SYNC\n");
if (wr) {
- DPRINTK("SYNC EXIT (after calling via_dsp_drain_playback)\n");
+ DPRINTK ("SYNC EXIT (after calling via_dsp_drain_playback)\n");
rc = via_dsp_drain_playback (card, &card->ch_out, nonblock);
}
break;
@@ -2531,12 +2670,19 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
case SNDCTL_DSP_RESET:
DPRINTK ("DSP_RESET\n");
if (rd) {
- via_chan_clear (&card->ch_in);
+ via_chan_clear (card, &card->ch_in);
via_chan_pcm_fmt (&card->ch_in, 1);
+ card->ch_in.frag_number = 0;
+ card->ch_in.frag_size = 0;
+ atomic_set(&card->ch_in.n_frags, 0);
}
+
if (wr) {
- via_chan_clear (&card->ch_out);
+ via_chan_clear (card, &card->ch_out);
via_chan_pcm_fmt (&card->ch_out, 1);
+ card->ch_out.frag_number = 0;
+ card->ch_out.frag_size = 0;
+ atomic_set(&card->ch_out.n_frags, 0);
}
rc = 0;
@@ -2544,40 +2690,47 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
/* obtain bitmask of device capabilities, such as mmap, full duplex, etc. */
case SNDCTL_DSP_GETCAPS:
- DPRINTK("DSP_GETCAPS\n");
+ DPRINTK ("DSP_GETCAPS\n");
rc = put_user(VIA_DSP_CAP, (int *)arg);
break;
- /* obtain bitmask of device capabilities, such as mmap, full duplex, etc. */
+ /* obtain buffer fragment size */
case SNDCTL_DSP_GETBLKSIZE:
- DPRINTK("DSP_GETBLKSIZE\n");
- rc = put_user(VIA_DMA_BUF_SIZE, (int *)arg);
+ DPRINTK ("DSP_GETBLKSIZE\n");
+
+ if (rd) {
+ via_chan_set_buffering(card, &card->ch_in, -1);
+ rc = put_user(card->ch_in.frag_size, (int *)arg);
+ } else if (wr) {
+ via_chan_set_buffering(card, &card->ch_out, -1);
+ rc = put_user(card->ch_out.frag_size, (int *)arg);
+ }
break;
/* obtain information about input buffering */
case SNDCTL_DSP_GETISPACE:
- DPRINTK("DSP_GETISPACE\n");
+ DPRINTK ("DSP_GETISPACE\n");
if (rd)
rc = via_dsp_ioctl_space (card, &card->ch_in, (void*) arg);
break;
/* obtain information about output buffering */
case SNDCTL_DSP_GETOSPACE:
- DPRINTK("DSP_GETOSPACE\n");
+ DPRINTK ("DSP_GETOSPACE\n");
if (wr)
rc = via_dsp_ioctl_space (card, &card->ch_out, (void*) arg);
break;
/* obtain information about input hardware pointer */
case SNDCTL_DSP_GETIPTR:
- DPRINTK("DSP_GETIPTR\n");
+ DPRINTK ("DSP_GETIPTR\n");
if (rd)
rc = via_dsp_ioctl_ptr (card, &card->ch_in, (void*) arg);
break;
/* obtain information about output hardware pointer */
case SNDCTL_DSP_GETOPTR:
- DPRINTK("DSP_GETOPTR\n");
+ DPRINTK ("DSP_GETOPTR\n");
if (wr)
rc = via_dsp_ioctl_ptr (card, &card->ch_out, (void*) arg);
break;
@@ -2585,25 +2738,29 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
/* return number of bytes remaining to be played by DMA engine */
case SNDCTL_DSP_GETODELAY:
{
- DPRINTK("DSP_GETODELAY\n");
+ DPRINTK ("DSP_GETODELAY\n");
chan = &card->ch_out;
if (!wr)
break;
- val = VIA_DMA_BUFFERS - atomic_read (&chan->n_bufs);
+ if (chan->is_active) {
- if (val > 0) {
- val *= VIA_DMA_BUF_SIZE;
- val -= VIA_DMA_BUF_SIZE -
- inl (chan->iobase + VIA_BASE0_PCM_OUT_BLOCK_COUNT);
- }
- val += chan->slop_len;
+ val = chan->frag_number - atomic_read (&chan->n_frags);
- assert (val <= (VIA_DMA_BUF_SIZE * VIA_DMA_BUFFERS));
+ if (val > 0) {
+ val *= chan->frag_size;
+ val -= chan->frag_size -
+ inl (chan->iobase + VIA_PCM_BLOCK_COUNT);
+ }
+ val += chan->slop_len % chan->frag_size;
+ } else
+ val = 0;
- DPRINTK("GETODELAY EXIT, val = %d bytes\n", val);
+ assert (val <= (chan->frag_size * chan->frag_number));
+
+ DPRINTK ("GETODELAY EXIT, val = %d bytes\n", val);
rc = put_user (val, (int *)arg);
break;
}
@@ -2617,7 +2774,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
rc = -EFAULT;
break;
}
- DPRINTK("DSP_SETTRIGGER, rd=%d, wr=%d, act=%d/%d, en=%d/%d\n",
+ DPRINTK ("DSP_SETTRIGGER, rd=%d, wr=%d, act=%d/%d, en=%d/%d\n",
rd, wr, card->ch_in.is_active, card->ch_out.is_active,
card->ch_in.is_enabled, card->ch_out.is_enabled);
@@ -2625,6 +2782,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
if (rd)
rc = via_dsp_ioctl_trigger (&card->ch_in, val);
+
if (!rc && wr)
rc = via_dsp_ioctl_trigger (&card->ch_out, val);
@@ -2634,7 +2792,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
* with O_RDWR, this is mainly a no-op that always returns success.
*/
case SNDCTL_DSP_SETDUPLEX:
- DPRINTK("DSP_SETDUPLEX\n");
+ DPRINTK ("DSP_SETDUPLEX\n");
if (!rd || !wr)
break;
rc = 0;
@@ -2646,7 +2804,13 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
rc = -EFAULT;
break;
}
- DPRINTK("DSP_SETFRAGMENT, val==%d\n", val);
+ DPRINTK ("DSP_SETFRAGMENT, val==%d\n", val);
+
+ if (rd)
+ rc = via_chan_set_buffering(card, &card->ch_in, val);
+
+ if (wr)
+ rc = via_chan_set_buffering(card, &card->ch_out, val);
DPRINTK ("SNDCTL_DSP_SETFRAGMENT (fragshift==0x%04X (%d), maxfrags==0x%04X (%d))\n",
val & 0xFFFF,
@@ -2654,13 +2818,12 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
(val >> 16) & 0xFFFF,
(val >> 16) & 0xFFFF);
- /* just to shut up some programs */
rc = 0;
break;
/* inform device of an upcoming pause in input (or output). */
case SNDCTL_DSP_POST:
- DPRINTK("DSP_POST\n");
+ DPRINTK ("DSP_POST\n");
if (wr) {
if (card->ch_out.slop_len > 0)
via_chan_flush_frag (&card->ch_out);
@@ -2678,15 +2841,14 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
}
up (&card->syscall_sem);
- DPRINTK("EXIT, returning %d\n", rc);
+ DPRINTK ("EXIT, returning %d\n", rc);
return rc;
}
static int via_dsp_open (struct inode *inode, struct file *file)
{
- int rc, minor = MINOR(inode->i_rdev);
- int got_read_chan = 0;
+ int minor = MINOR(inode->i_rdev);
struct via_info *card;
struct pci_dev *pdev;
struct via_channel *chan;
@@ -2733,17 +2895,13 @@ match:
}
file->private_data = card;
- DPRINTK("file->f_mode == 0x%x\n", file->f_mode);
+ DPRINTK ("file->f_mode == 0x%x\n", file->f_mode);
/* handle input from analog source */
if (file->f_mode & FMODE_READ) {
chan = &card->ch_in;
- rc = via_chan_init (card, chan);
- if (rc)
- goto err_out;
-
- got_read_chan = 1;
+ via_chan_init (card, chan);
/* why is this forced to 16-bit stereo in all drivers? */
chan->pcm_fmt = VIA_PCM_FMT_16BIT | VIA_PCM_FMT_STEREO;
@@ -2756,30 +2914,28 @@ match:
if (file->f_mode & FMODE_WRITE) {
chan = &card->ch_out;
- rc = via_chan_init (card, chan);
- if (rc)
- goto err_out_read_chan;
+ via_chan_init (card, chan);
- if ((minor & 0xf) == SND_DEV_DSP16) {
- chan->pcm_fmt |= VIA_PCM_FMT_16BIT;
- via_set_rate (&card->ac97, chan, 44100);
+ if (file->f_mode & FMODE_READ) {
+ /* if in duplex mode make the recording and playback channels
+ have the same settings */
+ chan->pcm_fmt = VIA_PCM_FMT_16BIT | VIA_PCM_FMT_STEREO;
+ via_chan_pcm_fmt (chan, 0);
+ via_set_rate (&card->ac97, chan, 44100);
} else {
- via_set_rate (&card->ac97, chan, 8000);
+ if ((minor & 0xf) == SND_DEV_DSP16) {
+ chan->pcm_fmt = VIA_PCM_FMT_16BIT;
+ via_chan_pcm_fmt (chan, 0);
+ via_set_rate (&card->ac97, chan, 44100);
+ } else {
+ via_chan_pcm_fmt (chan, 0);
+ via_set_rate (&card->ac97, chan, 8000);
+ }
}
-
- via_chan_pcm_fmt (chan, 0);
}
DPRINTK ("EXIT, returning 0\n");
return 0;
-
-err_out_read_chan:
- if (got_read_chan)
- via_chan_free (card, &card->ch_in);
-err_out:
- up (&card->open_sem);
- DPRINTK("ERROR EXIT, returning %d\n", rc);
- return rc;
}
@@ -2807,15 +2963,18 @@ static int via_dsp_release(struct inode *inode, struct file *file)
printk (KERN_DEBUG "via_audio: ignoring drain playback error %d\n", rc);
via_chan_free (card, &card->ch_out);
+ via_chan_buffer_free(card, &card->ch_out);
}
- if (file->f_mode & FMODE_READ)
+ if (file->f_mode & FMODE_READ) {
via_chan_free (card, &card->ch_in);
+ via_chan_buffer_free (card, &card->ch_in);
+ }
up (&card->syscall_sem);
up (&card->open_sem);
- DPRINTK("EXIT, returning 0\n");
+ DPRINTK ("EXIT, returning 0\n");
return 0;
}
@@ -2934,9 +3093,9 @@ static int __init via_init_one (struct pci_dev *pdev, const struct pci_device_id
tmp &= 0xF0;
tmp |= pdev->irq;
pci_write_config_byte (pdev, 0x3C, tmp);
- DPRINTK("new 0x3c==0x%02x\n", tmp);
+ DPRINTK ("new 0x3c==0x%02x\n", tmp);
} else {
- DPRINTK("IRQ reg 0x3c==0x%02x, irq==%d\n",
+ DPRINTK ("IRQ reg 0x3c==0x%02x, irq==%d\n",
tmp, tmp & 0x0F);
}
@@ -3036,12 +3195,12 @@ static int __init init_via82cxxx_audio(void)
static void __exit cleanup_via82cxxx_audio(void)
{
- DPRINTK("ENTER\n");
+ DPRINTK ("ENTER\n");
pci_unregister_driver (&via_driver);
via_cleanup_proc ();
- DPRINTK("EXIT\n");
+ DPRINTK ("EXIT\n");
}
@@ -3133,7 +3292,7 @@ static int via_info_read_proc (char *page, char **start, off_t off,
);
- DPRINTK("EXIT, returning %d\n", len);
+ DPRINTK ("EXIT, returning %d\n", len);
return len;
#undef YN
diff --git a/drivers/sound/ymfpci.c b/drivers/sound/ymfpci.c
index a8cdc58f2..9670951f8 100644
--- a/drivers/sound/ymfpci.c
+++ b/drivers/sound/ymfpci.c
@@ -24,19 +24,28 @@
*
* TODO:
* - Use P44Slot for 44.1 playback.
- * - Capture and duplex
* - 96KHz playback for DVD - use pitch of 2.0.
* - uLaw for Sun apps.
+ * : Alan says firmly "no software format conversion in kernel".
* - Retain DMA buffer on close, do not wait the end of frame.
* - Cleanup
- * ? merge ymf_pcm and state
- * ? pcm interrupt no pointer
* ? underused structure members
* - Remove remaining P3 tags (debug messages).
* - Resolve XXX tagged questions.
* - Cannot play 5133Hz.
+ * - 2001/01/07 Consider if we can remove voice_lock, like so:
+ * : Allocate/deallocate voices in open/close under semafore.
+ * : We access voices in interrupt, that only for pcms that open.
+ * voice_lock around playback_prepare closes interrupts for insane duration.
+ * - Revisit the way voice_alloc is done - too confusing, overcomplicated.
+ * Should support various channel types, however.
+ * - Remove prog_dmabuf from read/write, leave it in open.
+ * - 2001/01/07 Replace the OPL3 part of CONFIG_SOUND_YMFPCI_LEGACY code with
+ * native synthesizer through a playback slot.
+ * - Use new 2.3.x cache coherent PCI DMA routines instead of virt_to_bus.
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -50,19 +59,19 @@
#include <asm/dma.h>
#include <asm/uaccess.h>
+#ifdef CONFIG_SOUND_YMFPCI_LEGACY
+# include "sound_config.h"
+# include "mpu401.h"
+#endif
#include "ymfpci.h"
-#define snd_magic_cast(t, p, err) ((t *)(p))
-
-/* Channels, such as play and record. I do only play a.t.m. XXX */
-#define NR_HW_CH 1
-
-static int ymf_playback_trigger(ymfpci_t *codec, ymfpci_pcm_t *ypcm, int cmd);
-static int ymfpci_voice_alloc(ymfpci_t *codec, ymfpci_voice_type_t type,
- int pair, ymfpci_voice_t **rvoice);
-static int ymfpci_voice_free(ymfpci_t *codec, ymfpci_voice_t *pvoice);
-static int ymf_playback_prepare(ymfpci_t *codec, struct ymf_state *state);
-static int ymf_state_alloc(ymfpci_t *unit, int nvirt);
+static int ymf_playback_trigger(ymfpci_t *codec, struct ymf_pcm *ypcm, int cmd);
+static void ymf_capture_trigger(ymfpci_t *unit, struct ymf_pcm *ypcm, int cmd);
+static void ymfpci_voice_free(ymfpci_t *codec, ymfpci_voice_t *pvoice);
+static int ymf_capture_alloc(struct ymf_unit *unit, int *pbank);
+static int ymf_playback_prepare(struct ymf_state *state);
+static int ymf_capture_prepare(struct ymf_state *state);
+static struct ymf_state *ymf_state_alloc(ymfpci_t *unit);
static LIST_HEAD(ymf_devs);
@@ -87,7 +96,7 @@ MODULE_DEVICE_TABLE(pci, ymf_id_tbl);
/*
* Mindlessly copied from cs46xx XXX
*/
-extern __inline__ unsigned ld2(unsigned int x)
+static inline unsigned ld2(unsigned int x)
{
unsigned r = 0;
@@ -281,18 +290,13 @@ static void ymf_pcm_update_shift(struct ymf_pcm_format *f)
f->shift++;
}
-/*
- * Whole OSS-style DMA machinery is taken from cs46xx.
- */
-
/* Are you sure 32K is not too much? See if mpg123 skips on loaded systems. */
#define DMABUF_DEFAULTORDER (15-PAGE_SHIFT)
#define DMABUF_MINORDER 1
/* allocate DMA buffer, playback and recording buffer should be allocated seperately */
-static int alloc_dmabuf(struct ymf_state *state)
+static int alloc_dmabuf(struct ymf_dmabuf *dmabuf)
{
- struct ymf_dmabuf *dmabuf = &state->dmabuf;
void *rawbuf = NULL;
int order;
struct page * map, * mapend;
@@ -323,9 +327,8 @@ static int alloc_dmabuf(struct ymf_state *state)
}
/* free DMA buffer */
-static void dealloc_dmabuf(struct ymf_state *state)
+static void dealloc_dmabuf(struct ymf_dmabuf *dmabuf)
{
- struct ymf_dmabuf *dmabuf = &state->dmabuf;
struct page *map, *mapend;
if (dmabuf->rawbuf) {
@@ -339,9 +342,9 @@ static void dealloc_dmabuf(struct ymf_state *state)
dmabuf->mapped = dmabuf->ready = 0;
}
-static int prog_dmabuf(struct ymf_state *state, unsigned rec)
+static int prog_dmabuf(struct ymf_state *state, int rec)
{
- struct ymf_dmabuf *dmabuf = &state->dmabuf;
+ struct ymf_dmabuf *dmabuf;
int w_16;
unsigned bytepersec;
unsigned bufsize;
@@ -350,6 +353,7 @@ static int prog_dmabuf(struct ymf_state *state, unsigned rec)
int ret;
w_16 = ymf_pcm_format_width(state->format.format) == 16;
+ dmabuf = rec ? &state->rpcm.dmabuf : &state->wpcm.dmabuf;
spin_lock_irqsave(&state->unit->reg_lock, flags);
dmabuf->hwptr = dmabuf->swptr = 0;
@@ -359,7 +363,7 @@ static int prog_dmabuf(struct ymf_state *state, unsigned rec)
/* allocate DMA buffer if not allocated yet */
if (!dmabuf->rawbuf)
- if ((ret = alloc_dmabuf(state)))
+ if ((ret = alloc_dmabuf(dmabuf)))
return ret;
bytepersec = state->format.rate << state->format.shift;
@@ -383,7 +387,6 @@ static int prog_dmabuf(struct ymf_state *state, unsigned rec)
dmabuf->numfrag = bufsize >> dmabuf->fragshift;
}
dmabuf->fragsize = 1 << dmabuf->fragshift;
- dmabuf->fragsamples = dmabuf->fragsize >> state->format.shift;
dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
/*
@@ -414,15 +417,20 @@ static int prog_dmabuf(struct ymf_state *state, unsigned rec)
* Now set up the ring
*/
- spin_lock_irqsave(&state->unit->reg_lock, flags);
+ /* XXX ret = rec? cap_pre(): pbk_pre(); */
+ spin_lock_irqsave(&state->unit->voice_lock, flags);
if (rec) {
- /* ymf_rec_setup(state); */
+ if ((ret = ymf_capture_prepare(state)) != 0) {
+ spin_unlock_irqrestore(&state->unit->voice_lock, flags);
+ return ret;
+ }
} else {
- if ((ret = ymf_playback_prepare(state->unit, state)) != 0) {
+ if ((ret = ymf_playback_prepare(state)) != 0) {
+ spin_unlock_irqrestore(&state->unit->voice_lock, flags);
return ret;
}
}
- spin_unlock_irqrestore(&state->unit->reg_lock, flags);
+ spin_unlock_irqrestore(&state->unit->voice_lock, flags);
/* set the ready flag for the dma buffer (this comment is not stupid) */
dmabuf->ready = 1;
@@ -439,9 +447,14 @@ static int prog_dmabuf(struct ymf_state *state, unsigned rec)
static void ymf_start_dac(struct ymf_state *state)
{
- ymf_playback_trigger(state->unit, &state->ypcm, 1);
+ ymf_playback_trigger(state->unit, &state->wpcm, 1);
}
+// static void ymf_start_adc(struct ymf_state *state)
+// {
+// ymf_capture_trigger(state->unit, &state->rpcm, 1);
+// }
+
/*
* Wait until output is drained.
* This does not kill the hardware for the sake of ioctls.
@@ -449,14 +462,14 @@ static void ymf_start_dac(struct ymf_state *state)
static void ymf_wait_dac(struct ymf_state *state)
{
struct ymf_unit *unit = state->unit;
- ymfpci_pcm_t *ypcm = &state->ypcm;
+ struct ymf_pcm *ypcm = &state->wpcm;
DECLARE_WAITQUEUE(waita, current);
unsigned long flags;
- add_wait_queue(&state->dmabuf.wait, &waita);
+ add_wait_queue(&ypcm->dmabuf.wait, &waita);
spin_lock_irqsave(&unit->reg_lock, flags);
- if (state->dmabuf.count != 0 && !state->ypcm.running) {
+ if (ypcm->dmabuf.count != 0 && !ypcm->running) {
ymf_playback_trigger(unit, ypcm, 1);
}
@@ -479,7 +492,7 @@ static void ymf_wait_dac(struct ymf_state *state)
spin_unlock_irqrestore(&unit->reg_lock, flags);
set_current_state(TASK_RUNNING);
- remove_wait_queue(&state->dmabuf.wait, &waita);
+ remove_wait_queue(&ypcm->dmabuf.wait, &waita);
/*
* This function may take up to 4 seconds to reach this point
@@ -487,6 +500,17 @@ static void ymf_wait_dac(struct ymf_state *state)
*/
}
+/* Can just stop, without wait. Or can we? */
+static void ymf_stop_adc(struct ymf_state *state)
+{
+ struct ymf_unit *unit = state->unit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&unit->reg_lock, flags);
+ ymf_capture_trigger(unit, &state->rpcm, 0);
+ spin_unlock_irqrestore(&unit->reg_lock, flags);
+}
+
/*
* Hardware start management
*/
@@ -523,12 +547,11 @@ static void ymfpci_hw_stop(ymfpci_t *codec)
* Playback voice management
*/
-static int voice_alloc(ymfpci_t *codec, ymfpci_voice_type_t type, int pair, ymfpci_voice_t **rvoice)
+static int voice_alloc(ymfpci_t *codec, ymfpci_voice_type_t type, int pair, ymfpci_voice_t *rvoice[])
{
ymfpci_voice_t *voice, *voice2;
int idx;
-
- *rvoice = NULL;
+
for (idx = 0; idx < 64; idx += pair ? 2 : 1) {
voice = &codec->voices[idx];
voice2 = pair ? &codec->voices[idx+1] : NULL;
@@ -551,52 +574,29 @@ static int voice_alloc(ymfpci_t *codec, ymfpci_voice_type_t type, int pair, ymfp
break;
}
ymfpci_hw_start(codec);
- if (voice2)
+ rvoice[0] = voice;
+ if (voice2) {
ymfpci_hw_start(codec);
- *rvoice = voice;
+ rvoice[1] = voice2;
+ }
return 0;
}
- return -ENOMEM;
+ return -EBUSY; /* Your audio channel is open by someone else. */
}
-static int ymfpci_voice_alloc(ymfpci_t *codec, ymfpci_voice_type_t type,
- int pair, ymfpci_voice_t **rvoice)
+static void ymfpci_voice_free(ymfpci_t *unit, ymfpci_voice_t *pvoice)
{
- unsigned long flags;
- int result;
-
- spin_lock_irqsave(&codec->voice_lock, flags);
- for (;;) {
- result = voice_alloc(codec, type, pair, rvoice);
- if (result == 0 || type != YMFPCI_PCM)
- break;
- /* TODO: synth/midi voice deallocation */
- break;
- }
- spin_unlock_irqrestore(&codec->voice_lock, flags);
- return result;
-}
-
-static int ymfpci_voice_free(ymfpci_t *codec, ymfpci_voice_t *pvoice)
-{
- unsigned long flags;
-
- ymfpci_hw_stop(codec);
- spin_lock_irqsave(&codec->voice_lock, flags);
+ ymfpci_hw_stop(unit);
pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = 0;
pvoice->ypcm = NULL;
- pvoice->interrupt = NULL;
- spin_unlock_irqrestore(&codec->voice_lock, flags);
- return 0;
}
/*
- * PCM part
*/
static void ymf_pcm_interrupt(ymfpci_t *codec, ymfpci_voice_t *voice)
{
- ymfpci_pcm_t *ypcm;
+ struct ymf_pcm *ypcm;
int redzone;
int pos, delta, swptr;
int played, distance;
@@ -611,7 +611,7 @@ static void ymf_pcm_interrupt(ymfpci_t *codec, ymfpci_voice_t *voice)
ypcm->running = 0; // lock it
return;
}
- dmabuf = &state->dmabuf;
+ dmabuf = &ypcm->dmabuf;
spin_lock(&codec->reg_lock);
if (ypcm->running) {
/* P3 */ /** printk("ymfpci: %d, intr bank %d count %d start 0x%x:%x\n",
@@ -627,10 +627,9 @@ static void ymf_pcm_interrupt(ymfpci_t *codec, ymfpci_voice_t *voice)
pos = voice->bank[codec->active_bank].start;
pos <<= state->format.shift;
if (pos < 0 || pos >= dmabuf->dmasize) { /* ucode bug */
- printk(KERN_ERR
- "ymfpci%d: %d: runaway: hwptr %d dmasize %d\n",
+ printk(KERN_ERR "ymfpci%d: runaway voice %d: hwptr %d=>%d dmasize %d\n",
codec->dev_audio, voice->number,
- dmabuf->hwptr, dmabuf->dmasize);
+ dmabuf->hwptr, pos, dmabuf->dmasize);
pos = 0;
}
if (pos < dmabuf->hwptr) {
@@ -706,36 +705,66 @@ static void ymf_pcm_interrupt(ymfpci_t *codec, ymfpci_voice_t *voice)
spin_unlock(&codec->reg_lock);
}
-#if HAVE_RECORD
-static void ymfpci_pcm_capture_interrupt(snd_pcm_subchn_t *substream)
+static void ymf_cap_interrupt(ymfpci_t *unit, struct ymf_capture *cap)
{
- snd_pcm_runtime_t *runtime = substream->runtime;
- ymfpci_pcm_t *ypcm = snd_magic_cast(ymfpci_pcm_t, runtime->private_data, );
- ymfpci_t *codec = ypcm->codec;
- u32 pos, delta;
-
- spin_lock(&codec->reg_lock);
+ struct ymf_pcm *ypcm;
+ int redzone;
+ struct ymf_state *state;
+ struct ymf_dmabuf *dmabuf;
+ int pos, delta;
+ int cnt;
+
+ if ((ypcm = cap->ypcm) == NULL) {
+ return;
+ }
+ if ((state = ypcm->state) == NULL) {
+ ypcm->running = 0; // lock it
+ return;
+ }
+ dmabuf = &ypcm->dmabuf;
+ spin_lock(&unit->reg_lock);
if (ypcm->running) {
- pos = codec->bank_capture[ypcm->capture_bank_number][codec->active_bank]->start << ypcm->shift_offset;
- if (pos < ypcm->last_pos) // <-- dmabuf->hwptr
- delta = pos + (ypcm->buffer_size - ypcm->last_pos);
- else
- delta = pos - ypcm->last_pos;
- ypcm->frag_pos += delta;
- ypcm->last_pos = pos;
- while (ypcm->frag_pos >= ypcm->frag_size) {
- ypcm->frag_pos -= ypcm->frag_size;
- // printk("done - active_bank = 0x%x, start = 0x%x\n", codec->active_bank, voice->bank[codec->active_bank].start);
- spin_unlock(&codec->reg_lock);
- snd_pcm_transfer_done(substream);
- spin_lock(&codec->reg_lock);
+ redzone = ymf_calc_lend(state->format.rate);
+ redzone <<= (state->format.shift + 1);
+
+ pos = cap->bank[unit->active_bank].start;
+ // pos <<= state->format.shift;
+ if (pos < 0 || pos >= dmabuf->dmasize) { /* ucode bug */
+ printk(KERN_ERR "ymfpci%d: runaway capture %d: hwptr %d=>%d dmasize %d\n",
+ unit->dev_audio, ypcm->capture_bank_number,
+ dmabuf->hwptr, pos, dmabuf->dmasize);
+ pos = 0;
+ }
+ if (pos < dmabuf->hwptr) {
+ delta = dmabuf->dmasize - dmabuf->hwptr;
+ delta += pos;
+ } else {
+ delta = pos - dmabuf->hwptr;
+ }
+ dmabuf->hwptr = pos;
+
+ cnt = dmabuf->count;
+ cnt += delta;
+ if (cnt + redzone > dmabuf->dmasize) {
+ /* Overflow - bump swptr */
+ dmabuf->count = dmabuf->dmasize - redzone;
+ dmabuf->swptr = dmabuf->hwptr + redzone;
+ if (dmabuf->swptr >= dmabuf->dmasize) {
+ dmabuf->swptr -= dmabuf->dmasize;
+ }
+ } else {
+ dmabuf->count = cnt;
+ }
+
+ dmabuf->total_bytes += delta;
+ if (dmabuf->count) { /* && is_sleeping XXX */
+ wake_up(&dmabuf->wait);
}
}
- spin_unlock(&codec->reg_lock);
+ spin_unlock(&unit->reg_lock);
}
-#endif
-static int ymf_playback_trigger(ymfpci_t *codec, ymfpci_pcm_t *ypcm, int cmd)
+static int ymf_playback_trigger(ymfpci_t *codec, struct ymf_pcm *ypcm, int cmd)
{
if (ypcm->voices[0] == NULL) {
@@ -755,40 +784,29 @@ static int ymf_playback_trigger(ymfpci_t *codec, ymfpci_pcm_t *ypcm, int cmd)
return 0;
}
-#if HAVE_RECORD
-static int ymfpci_capture_trigger(void *private_data,
- snd_pcm_subchn_t * substream,
- int cmd)
+static void ymf_capture_trigger(ymfpci_t *codec, struct ymf_pcm *ypcm, int cmd)
{
- unsigned long flags;
- ymfpci_t *codec = snd_magic_cast(ymfpci_t, private_data, -ENXIO);
- ymfpci_pcm_t *ypcm = snd_magic_cast(ymfpci_pcm_t, substream->runtime->private_data, -ENXIO);
- int result = 0;
u32 tmp;
- spin_lock_irqsave(&codec->reg_lock, flags);
- if (cmd == SND_PCM_TRIGGER_GO) {
+ if (cmd != 0) {
tmp = ymfpci_readl(codec, YDSXGR_MAPOFREC) | (1 << ypcm->capture_bank_number);
ymfpci_writel(codec, YDSXGR_MAPOFREC, tmp);
ypcm->running = 1;
- } else if (cmd == SND_PCM_TRIGGER_STOP) {
+ } else {
tmp = ymfpci_readl(codec, YDSXGR_MAPOFREC) & ~(1 << ypcm->capture_bank_number);
ymfpci_writel(codec, YDSXGR_MAPOFREC, tmp);
ypcm->running = 0;
- } else {
- result = -EINVAL;
}
- spin_unlock_irqrestore(&codec->reg_lock, flags);
- return result;
}
-#endif
-static int ymfpci_pcm_voice_alloc(ymfpci_pcm_t *ypcm, int voices)
+static int ymfpci_pcm_voice_alloc(struct ymf_pcm *ypcm, int voices)
{
+ struct ymf_unit *unit;
int err;
+ unit = ypcm->state->unit;
if (ypcm->voices[1] != NULL && voices < 2) {
- ymfpci_voice_free(ypcm->codec, ypcm->voices[1]);
+ ymfpci_voice_free(unit, ypcm->voices[1]);
ypcm->voices[1] = NULL;
}
if (voices == 1 && ypcm->voices[0] != NULL)
@@ -797,18 +815,17 @@ static int ymfpci_pcm_voice_alloc(ymfpci_pcm_t *ypcm, int voices)
return 0; /* already allocated */
if (voices > 1) {
if (ypcm->voices[0] != NULL && ypcm->voices[1] == NULL) {
- ymfpci_voice_free(ypcm->codec, ypcm->voices[0]);
+ ymfpci_voice_free(unit, ypcm->voices[0]);
ypcm->voices[0] = NULL;
}
- }
- err = ymfpci_voice_alloc(ypcm->codec, YMFPCI_PCM, voices > 1, &ypcm->voices[0]);
- if (err < 0)
- return err;
- ypcm->voices[0]->ypcm = ypcm;
- ypcm->voices[0]->interrupt = ymf_pcm_interrupt;
- if (voices > 1) {
- ypcm->voices[1] = &ypcm->codec->voices[ypcm->voices[0]->number + 1];
+ if ((err = voice_alloc(unit, YMFPCI_PCM, 1, ypcm->voices)) < 0)
+ return err;
+ ypcm->voices[0]->ypcm = ypcm;
ypcm->voices[1]->ypcm = ypcm;
+ } else {
+ if ((err = voice_alloc(unit, YMFPCI_PCM, 0, ypcm->voices)) < 0)
+ return err;
+ ypcm->voices[0]->ypcm = ypcm;
}
return 0;
}
@@ -901,17 +918,32 @@ static void ymf_pcm_init_voice(ymfpci_voice_t *voice, int stereo,
}
/*
- * XXX Use new cache coherent PCI DMA routines instead of virt_to_bus.
+ * XXX Capture channel allocation is entirely fake at the moment.
+ * We use only one channel and mark it busy as required.
*/
-static int ymf_playback_prepare(ymfpci_t *codec, struct ymf_state *state)
+static int ymf_capture_alloc(struct ymf_unit *unit, int *pbank)
{
- ymfpci_pcm_t *ypcm = &state->ypcm;
+ struct ymf_capture *cap;
+ int cbank;
+
+ cbank = 1; /* Only ADC slot is used for now. */
+ cap = &unit->capture[cbank];
+ if (cap->use)
+ return -EBUSY;
+ cap->use = 1;
+ *pbank = cbank;
+ return 0;
+}
+
+static int ymf_playback_prepare(struct ymf_state *state)
+{
+ struct ymf_pcm *ypcm = &state->wpcm;
int err, nvoice;
if ((err = ymfpci_pcm_voice_alloc(ypcm, state->format.voices)) < 0) {
- /* Cannot be unless we leak voices in ymf_release! */
- printk(KERN_ERR "ymfpci%d: cannot allocate voice!\n",
- codec->dev_audio);
+ /* Somebody started 32 mpg123's in parallel? */
+ /* P3 */ printk("ymfpci%d: cannot allocate voice\n",
+ state->unit->dev_audio);
return err;
}
@@ -919,101 +951,76 @@ static int ymf_playback_prepare(ymfpci_t *codec, struct ymf_state *state)
ymf_pcm_init_voice(ypcm->voices[nvoice],
state->format.voices == 2, state->format.rate,
ymf_pcm_format_width(state->format.format) == 16,
- virt_to_bus(state->dmabuf.rawbuf), state->dmabuf.dmasize,
+ virt_to_bus(ypcm->dmabuf.rawbuf), ypcm->dmabuf.dmasize,
ypcm->spdif);
}
return 0;
}
-#if 0 /* old */
-static int ymfpci_capture_prepare(void *private_data,
- snd_pcm_subchn_t * substream)
+static int ymf_capture_prepare(struct ymf_state *state)
{
- ymfpci_t *codec = snd_magic_cast(ymfpci_t, private_data, -ENXIO);
- snd_pcm_runtime_t *runtime = substream->runtime;
- ymfpci_pcm_t *ypcm = snd_magic_cast(ymfpci_pcm_t, runtime->private_data, -ENXIO);
+ ymfpci_t *unit = state->unit;
+ struct ymf_pcm *ypcm = &state->rpcm;
ymfpci_capture_bank_t * bank;
- int nbank;
+ /* XXX This is confusing, gotta rename one of them banks... */
+ int nbank; /* flip-flop bank */
+ int cbank; /* input [super-]bank */
+ struct ymf_capture *cap;
u32 rate, format;
- ypcm->frag_size = snd_pcm_lib_transfer_fragment(substream);
- ypcm->buffer_size = snd_pcm_lib_transfer_size(substream);
- ypcm->frag_pos = 0;
- ypcm->last_pos = 0;
- ypcm->shift_offset = 0;
- rate = ((48000 * 4096) / runtime->format.rate) - 1;
+ if (ypcm->capture_bank_number == -1) {
+ if (ymf_capture_alloc(unit, &cbank) != 0)
+ return -EBUSY;
+
+ ypcm->capture_bank_number = cbank;
+
+ cap = &unit->capture[cbank];
+ cap->bank = unit->bank_capture[cbank][0];
+ cap->ypcm = ypcm;
+ ymfpci_hw_start(unit);
+ }
+
+ // ypcm->frag_size = snd_pcm_lib_transfer_fragment(substream);
+ // frag_size is replaced with nonfragged byte-aligned rolling buffer
+ rate = ((48000 * 4096) / state->format.rate) - 1;
format = 0;
- if (runtime->format.voices == 2)
+ if (state->format.voices == 2)
format |= 2;
- if (snd_pcm_format_width(runtime->format.format) == 8)
+ if (ymf_pcm_format_width(state->format.format) == 8)
format |= 1;
switch (ypcm->capture_bank_number) {
case 0:
- ymfpci_writel(codec, YDSXGR_RECFORMAT, format);
- ymfpci_writel(codec, YDSXGR_RECSLOTSR, rate);
+ ymfpci_writel(unit, YDSXGR_RECFORMAT, format);
+ ymfpci_writel(unit, YDSXGR_RECSLOTSR, rate);
break;
case 1:
- ymfpci_writel(codec, YDSXGR_ADCFORMAT, format);
- ymfpci_writel(codec, YDSXGR_ADCSLOTSR, rate);
+ ymfpci_writel(unit, YDSXGR_ADCFORMAT, format);
+ ymfpci_writel(unit, YDSXGR_ADCSLOTSR, rate);
break;
}
for (nbank = 0; nbank < 2; nbank++) {
- bank = codec->bank_capture[ypcm->capture_bank_number][nbank];
- bank->base = virt_to_bus(runtime->dma_area->buf);
- bank->loop_end = ypcm->buffer_size;
+ bank = unit->bank_capture[ypcm->capture_bank_number][nbank];
+ bank->base = virt_to_bus(ypcm->dmabuf.rawbuf);
+ // bank->loop_end = ypcm->dmabuf.dmasize >> state->format.shift;
+ bank->loop_end = ypcm->dmabuf.dmasize;
bank->start = 0;
bank->num_of_loops = 0;
}
- if (runtime->digital.dig_valid)
- /*runtime->digital.type == SND_PCM_DIG_AES_IEC958*/
- ymfpci_writew(codec, YDSXGR_SPDIFOUTSTATUS, runtime->digital.dig_status[0] |
- (runtime->digital.dig_status[1] << 8));
+#if 0 /* s/pdif */
+ if (state->digital.dig_valid)
+ /*state->digital.type == SND_PCM_DIG_AES_IEC958*/
+ ymfpci_writew(codec, YDSXGR_SPDIFOUTSTATUS,
+ state->digital.dig_status[0] | (state->digital.dig_status[1] << 8));
+#endif
return 0;
}
-static unsigned int ymfpci_playback_pointer(void *private_data,
- snd_pcm_subchn_t * substream)
-{
- ymfpci_t *codec = snd_magic_cast(ymfpci_t, private_data, -ENXIO);
- snd_pcm_runtime_t *runtime = substream->runtime;
- ymfpci_pcm_t *ypcm = snd_magic_cast(ymfpci_pcm_t, runtime->private_data, -ENXIO);
- ymfpci_voice_t *voice = ypcm->voices[0];
- unsigned long flags;
- unsigned int result;
-
- spin_lock_irqsave(&codec->reg_lock, flags);
- if (ypcm->running && voice)
- result = voice->bank[codec->active_bank].start << ypcm->shift_offset;
- else
- result = 0;
- spin_unlock_irqrestore(&codec->reg_lock, flags);
- return result;
-}
-
-static unsigned int ymfpci_capture_pointer(void *private_data,
- snd_pcm_subchn_t * substream)
-{
- ymfpci_t *codec = snd_magic_cast(ymfpci_t, private_data, -ENXIO);
- snd_pcm_runtime_t *runtime = substream->runtime;
- ymfpci_pcm_t *ypcm = snd_magic_cast(ymfpci_pcm_t, runtime->private_data, -ENXIO);
- unsigned long flags;
- unsigned int result;
-
- spin_lock_irqsave(&codec->reg_lock, flags);
- if (ypcm->running)
- result = codec->bank_capture[ypcm->capture_bank_number][codec->active_bank]->start << ypcm->shift_offset;
- else
- result = 0;
- spin_unlock_irqrestore(&codec->reg_lock, flags);
- return result;
-}
-#endif /* old */
-
void ymf_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
ymfpci_t *codec = dev_id;
u32 status, nvoice, mode;
- ymfpci_voice_t *voice;
+ struct ymf_voice *voice;
+ struct ymf_capture *cap;
status = ymfpci_readl(codec, YDSXGR_STATUS);
if (status & 0x80000000) {
@@ -1026,8 +1033,13 @@ void ymf_interrupt(int irq, void *dev_id, struct pt_regs *regs)
spin_lock(&codec->voice_lock);
for (nvoice = 0; nvoice < 64; nvoice++) {
voice = &codec->voices[nvoice];
- if (voice->interrupt)
- voice->interrupt(codec, voice);
+ if (voice->use)
+ ymf_pcm_interrupt(codec, voice);
+ }
+ for (nvoice = 0; nvoice < 5; nvoice++) {
+ cap = &codec->capture[nvoice];
+ if (cap->use)
+ ymf_cap_interrupt(codec, cap);
}
spin_unlock(&codec->voice_lock);
}
@@ -1039,22 +1051,32 @@ void ymf_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
}
-static void ymf_pcm_free_substream(ymfpci_pcm_t *ypcm)
+static void ymf_pcm_free_substream(struct ymf_pcm *ypcm)
{
- ymfpci_t *codec;
+ unsigned long flags;
+ struct ymf_unit *unit;
+
+ unit = ypcm->state->unit;
- if (ypcm) {
- codec = ypcm->codec;
+ if (ypcm->type == PLAYBACK_VOICE) {
+ spin_lock_irqsave(&unit->voice_lock, flags);
if (ypcm->voices[1])
- ymfpci_voice_free(codec, ypcm->voices[1]);
+ ymfpci_voice_free(unit, ypcm->voices[1]);
if (ypcm->voices[0])
- ymfpci_voice_free(codec, ypcm->voices[0]);
+ ymfpci_voice_free(unit, ypcm->voices[0]);
+ spin_unlock_irqrestore(&unit->voice_lock, flags);
+ } else {
+ if (ypcm->capture_bank_number != -1) {
+ unit->capture[ypcm->capture_bank_number].use = 0;
+ ypcm->capture_bank_number = -1;
+ ymfpci_hw_stop(unit);
+ }
}
}
-static int ymf_state_alloc(ymfpci_t *unit, int nvirt)
+static struct ymf_state *ymf_state_alloc(ymfpci_t *unit)
{
- ymfpci_pcm_t *ypcm;
+ struct ymf_pcm *ypcm;
struct ymf_state *state;
if ((state = kmalloc(sizeof(struct ymf_state), GFP_KERNEL)) == NULL) {
@@ -1062,61 +1084,31 @@ static int ymf_state_alloc(ymfpci_t *unit, int nvirt)
}
memset(state, 0, sizeof(struct ymf_state));
- init_waitqueue_head(&state->dmabuf.wait);
-
- ypcm = &state->ypcm;
+ ypcm = &state->wpcm;
ypcm->state = state;
- ypcm->codec = unit;
ypcm->type = PLAYBACK_VOICE;
+ ypcm->capture_bank_number = -1;
+ init_waitqueue_head(&ypcm->dmabuf.wait);
+
+ ypcm = &state->rpcm;
+ ypcm->state = state;
+ ypcm->type = CAPTURE_AC97;
+ ypcm->capture_bank_number = -1;
+ init_waitqueue_head(&ypcm->dmabuf.wait);
state->unit = unit;
- state->virt = nvirt;
state->format.format = AFMT_U8;
state->format.rate = 8000;
state->format.voices = 1;
ymf_pcm_update_shift(&state->format);
- unit->states[nvirt] = state;
- return 0;
+ return state;
out0:
- return -ENOMEM;
+ return NULL;
}
-#if HAVE_RECORD
-
-static int ymfpci_capture_open(void *private_data,
- snd_pcm_subchn_t * substream,
- u32 capture_bank_number)
-{
- ymfpci_t *codec = snd_magic_cast(ymfpci_t, private_data, -ENXIO);
- snd_pcm_runtime_t *runtime = substream->runtime;
- ymfpci_pcm_t *ypcm;
- int err;
-
- if ((err = snd_pcm_dma_alloc(substream, !capture_bank_number ? codec->dma2ptr : codec->dma3ptr, "YMFPCI - ADC")) < 0)
- return err;
- ypcm = snd_magic_kcalloc(ymfpci_pcm_t, 0, GFP_KERNEL);
- if (ypcm == NULL) {
- snd_pcm_dma_free(substream);
- return -ENOMEM;
- }
- ypcm->codec = codec;
- ypcm->type = capture_bank_number + CAPTURE_REC;
- ypcm->substream = substream;
- ypcm->capture_bank_number = capture_bank_number;
- codec->capture_substream[capture_bank_number] = substream;
- runtime->hw = &ymfpci_capture;
- snd_pcm_set_mixer(substream, codec->mixer->device, codec->ac97->me_capture);
- runtime->private_data = ypcm;
- runtime->private_free = ymfpci_pcm_free_substream;
- ymfpci_hw_start(codec);
- return 0;
-}
-
-#endif /* old */
-
/* AES/IEC958 channel status bits */
#define SND_PCM_AES0_PROFESSIONAL (1<<0) /* 0 = consumer, 1 = professional */
#define SND_PCM_AES0_NONAUDIO (1<<1) /* 0 = audio, 1 = non-audio */
@@ -1202,24 +1194,6 @@ static int ymfpci_capture_open(void *private_data,
#define SND_PCM_AES3_CON_CLOCK_50PPM (1<<4) /* 50 ppm */
#define SND_PCM_AES3_CON_CLOCK_VARIABLE (2<<4) /* variable pitch */
-#if HAVE_RECORD /* old */
-
-static int ymfpci_capture_close(void *private_data,
- snd_pcm_subchn_t * substream)
-{
- ymfpci_t *codec = snd_magic_cast(ymfpci_t, private_data, -ENXIO);
- snd_pcm_runtime_t *runtime = substream->runtime;
- ymfpci_pcm_t *ypcm = snd_magic_cast(ymfpci_pcm_t, runtime->private_data, -ENXIO);
-
- if (ypcm != NULL) {
- codec->capture_substream[ypcm->capture_bank_number] = NULL;
- ymfpci_hw_stop(codec);
- }
- snd_pcm_dma_free(substream);
- return 0;
-}
-#endif
-
/*
* User interface
*/
@@ -1229,21 +1203,21 @@ static loff_t ymf_llseek(struct file *file, loff_t offset, int origin)
return -ESPIPE;
}
-/* in this loop, dmabuf.count signifies the amount of data that is waiting to be copied to
- the user's buffer. it is filled by the dma machine and drained by this loop. */
-static ssize_t ymf_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
+/*
+ * in this loop, dmabuf.count signifies the amount of data that is
+ * waiting to be copied to the user's buffer. it is filled by the dma
+ * machine and drained by this loop.
+ */
+static ssize_t
+ymf_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
{
-#if HAVE_RECORD
- struct cs_state *state = (struct cs_state *)file->private_data;
- struct dmabuf *dmabuf = &state->dmabuf;
+ struct ymf_state *state = (struct ymf_state *)file->private_data;
+ struct ymf_dmabuf *dmabuf = &state->rpcm.dmabuf;
+ DECLARE_WAITQUEUE(waita, current);
ssize_t ret;
unsigned long flags;
- unsigned swptr;
- int cnt;
-
-#ifdef DEBUG
- printk("cs461x: cs_read called, count = %d\n", count);
-#endif
+ unsigned int swptr;
+ int cnt; /* This many to go in this revolution */
if (ppos != &file->f_pos)
return -ESPIPE;
@@ -1255,19 +1229,14 @@ static ssize_t ymf_read(struct file *file, char *buffer, size_t count, loff_t *p
return -EFAULT;
ret = 0;
+ add_wait_queue(&dmabuf->wait, &waita);
while (count > 0) {
- spin_lock_irqsave(&state->card->lock, flags);
- if (dmabuf->count > (signed) dmabuf->dmasize) {
- /* buffer overrun, we are recovering from sleep_on_timeout,
- resync hwptr and swptr, make process flush the buffer */
- dmabuf->count = dmabuf->dmasize;
- dmabuf->swptr = dmabuf->hwptr;
- }
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
swptr = dmabuf->swptr;
cnt = dmabuf->dmasize - swptr;
if (dmabuf->count < cnt)
cnt = dmabuf->count;
- spin_unlock_irqrestore(&state->card->lock, flags);
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
if (cnt > count)
cnt = count;
@@ -1275,14 +1244,18 @@ static ssize_t ymf_read(struct file *file, char *buffer, size_t count, loff_t *p
unsigned long tmo;
/* buffer is empty, start the dma machine and wait for data to be
recorded */
- start_adc(state);
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
+ if (!state->rpcm.running) {
+ ymf_capture_trigger(state->unit, &state->rpcm, 1);
+ }
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
if (file->f_flags & O_NONBLOCK) {
if (!ret) ret = -EAGAIN;
- return ret;
+ break;
}
/* This isnt strictly right for the 810 but it'll do */
- tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
- tmo >>= sample_shift[dmabuf->fmt];
+ tmo = (dmabuf->dmasize * HZ) / (state->format.rate * 2);
+ tmo >>= state->format.shift;
/* There are two situations when sleep_on_timeout returns, one is when
the interrupt is serviced correctly and the process is waked up by
ISR ON TIME. Another is when timeout is expired, which means that
@@ -1290,50 +1263,56 @@ static ssize_t ymf_read(struct file *file, char *buffer, size_t count, loff_t *p
is TOO LATE for the process to be scheduled to run (scheduler latency)
which results in a (potential) buffer overrun. And worse, there is
NOTHING we can do to prevent it. */
- if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
-#ifdef DEBUG
- printk(KERN_ERR "cs461x: recording schedule timeout, "
- "dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
- dmabuf->dmasize, dmabuf->fragsize, dmabuf->count,
- dmabuf->hwptr, dmabuf->swptr);
-#endif
- /* a buffer overrun, we delay the recovery untill next time the
- while loop begin and we REALLY have space to record */
+ set_current_state(TASK_INTERRUPTIBLE);
+ tmo = schedule_timeout(tmo);
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
+ if (tmo == 0 && dmabuf->count == 0) {
+ printk(KERN_ERR "ymfpci%d: recording schedule timeout, "
+ "dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
+ state->unit->dev_audio,
+ dmabuf->dmasize, dmabuf->fragsize, dmabuf->count,
+ dmabuf->hwptr, dmabuf->swptr);
}
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
if (signal_pending(current)) {
ret = ret ? ret : -ERESTARTSYS;
- return ret;
+ break;
}
continue;
}
if (copy_to_user(buffer, dmabuf->rawbuf + swptr, cnt)) {
if (!ret) ret = -EFAULT;
- return ret;
+ break;
}
swptr = (swptr + cnt) % dmabuf->dmasize;
- spin_lock_irqsave(&state->card->lock, flags);
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
dmabuf->swptr = swptr;
dmabuf->count -= cnt;
- spin_unlock_irqrestore(&state->card->lock, flags);
+ // spin_unlock_irqrestore(&state->unit->reg_lock, flags);
count -= cnt;
buffer += cnt;
ret += cnt;
- start_adc(state);
+ // spin_lock_irqsave(&state->unit->reg_lock, flags);
+ if (!state->rpcm.running) {
+ ymf_capture_trigger(state->unit, &state->rpcm, 1);
+ }
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&dmabuf->wait, &waita);
+
return ret;
-#else
- return -EINVAL;
-#endif
}
-static ssize_t ymf_write(struct file *file, const char *buffer, size_t count, loff_t *ppos)
+static ssize_t
+ymf_write(struct file *file, const char *buffer, size_t count, loff_t *ppos)
{
struct ymf_state *state = (struct ymf_state *)file->private_data;
- struct ymf_dmabuf *dmabuf = &state->dmabuf;
+ struct ymf_dmabuf *dmabuf = &state->wpcm.dmabuf;
DECLARE_WAITQUEUE(waita, current);
ssize_t ret;
unsigned long flags;
@@ -1375,7 +1354,7 @@ static ssize_t ymf_write(struct file *file, const char *buffer, size_t count, lo
}
if (dmabuf->count == 0) {
swptr = dmabuf->hwptr;
- if (state->ypcm.running) {
+ if (state->wpcm.running) {
/*
* Add uncertainty reserve.
*/
@@ -1410,8 +1389,8 @@ static ssize_t ymf_write(struct file *file, const char *buffer, size_t count, lo
* wait for data to be played
*/
spin_lock_irqsave(&state->unit->reg_lock, flags);
- if (!state->ypcm.running) {
- ymf_playback_trigger(state->unit, &state->ypcm, 1);
+ if (!state->wpcm.running) {
+ ymf_playback_trigger(state->unit, &state->wpcm, 1);
}
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
if (file->f_flags & O_NONBLOCK) {
@@ -1448,8 +1427,8 @@ static ssize_t ymf_write(struct file *file, const char *buffer, size_t count, lo
*/
delay = state->format.rate / 20; /* 50ms */
delay <<= state->format.shift;
- if (dmabuf->count >= delay && !state->ypcm.running) {
- ymf_playback_trigger(state->unit, &state->ypcm, 1);
+ if (dmabuf->count >= delay && !state->wpcm.running) {
+ ymf_playback_trigger(state->unit, &state->wpcm, 1);
}
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
@@ -1469,19 +1448,23 @@ static ssize_t ymf_write(struct file *file, const char *buffer, size_t count, lo
static unsigned int ymf_poll(struct file *file, struct poll_table_struct *wait)
{
struct ymf_state *state = (struct ymf_state *)file->private_data;
- struct ymf_dmabuf *dmabuf = &state->dmabuf;
+ struct ymf_dmabuf *dmabuf;
unsigned long flags;
unsigned int mask = 0;
- if (file->f_mode & (FMODE_WRITE | FMODE_READ))
- poll_wait(file, &dmabuf->wait, wait);
+ if (file->f_mode & FMODE_WRITE)
+ poll_wait(file, &state->wpcm.dmabuf.wait, wait);
+ // if (file->f_mode & FMODE_READ)
+ // poll_wait(file, &dmabuf->wait, wait);
spin_lock_irqsave(&state->unit->reg_lock, flags);
if (file->f_mode & FMODE_READ) {
+ dmabuf = &state->rpcm.dmabuf;
if (dmabuf->count >= (signed)dmabuf->fragsize)
mask |= POLLIN | POLLRDNORM;
}
if (file->f_mode & FMODE_WRITE) {
+ dmabuf = &state->wpcm.dmabuf;
if (dmabuf->mapped) {
if (dmabuf->count >= (signed)dmabuf->fragsize)
mask |= POLLOUT | POLLWRNORM;
@@ -1498,10 +1481,9 @@ static unsigned int ymf_poll(struct file *file, struct poll_table_struct *wait)
static int ymf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct ymf_state *state = (struct ymf_state *)file->private_data;
- struct ymf_dmabuf *dmabuf = &state->dmabuf;
+ struct ymf_dmabuf *dmabuf = &state->wpcm.dmabuf;
int ret;
unsigned long size;
-
if (vma->vm_flags & VM_WRITE) {
if ((ret = prog_dmabuf(state, 0)) != 0)
@@ -1529,7 +1511,7 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct ymf_state *state = (struct ymf_state *)file->private_data;
- struct ymf_dmabuf *dmabuf = &state->dmabuf;
+ struct ymf_dmabuf *dmabuf;
unsigned long flags;
audio_buf_info abinfo;
count_info cinfo;
@@ -1542,28 +1524,30 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
case SNDCTL_DSP_RESET:
if (file->f_mode & FMODE_WRITE) {
ymf_wait_dac(state);
+ dmabuf = &state->wpcm.dmabuf;
spin_lock_irqsave(&state->unit->reg_lock, flags);
dmabuf->ready = 0;
- dmabuf->swptr = dmabuf->hwptr = 0;
+ dmabuf->swptr = dmabuf->hwptr;
dmabuf->count = dmabuf->total_bytes = 0;
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
-#if HAVE_RECORD
if (file->f_mode & FMODE_READ) {
- stop_adc(state);
- synchronize_irq();
+ ymf_stop_adc(state);
+ dmabuf = &state->rpcm.dmabuf;
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
dmabuf->ready = 0;
- dmabuf->swptr = dmabuf->hwptr = 0;
+ dmabuf->swptr = dmabuf->hwptr;
dmabuf->count = dmabuf->total_bytes = 0;
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
-#endif
return 0;
case SNDCTL_DSP_SYNC:
if (file->f_mode & FMODE_WRITE) {
+ dmabuf = &state->wpcm.dmabuf;
if (file->f_flags & O_NONBLOCK) {
spin_lock_irqsave(&state->unit->reg_lock, flags);
- if (dmabuf->count != 0 && !state->ypcm.running) {
+ if (dmabuf->count != 0 && !state->wpcm.running) {
ymf_start_dac(state);
}
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
@@ -1571,6 +1555,7 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
ymf_wait_dac(state);
}
}
+ /* XXX What does this do for reading? dmabuf->count=0; ? */
return 0;
case SNDCTL_DSP_SPEED: /* set smaple rate */
@@ -1579,17 +1564,22 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
if (val >= 8000 && val <= 48000) {
if (file->f_mode & FMODE_WRITE) {
ymf_wait_dac(state);
+ dmabuf = &state->wpcm.dmabuf;
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
+ dmabuf->ready = 0;
+ state->format.rate = val;
+ ymf_pcm_update_shift(&state->format);
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
-#if HAVE_RECORD
if (file->f_mode & FMODE_READ) {
- stop_adc(state);
+ ymf_stop_adc(state);
+ dmabuf = &state->rpcm.dmabuf;
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
+ dmabuf->ready = 0;
+ state->format.rate = val;
+ ymf_pcm_update_shift(&state->format);
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
-#endif
- spin_lock_irqsave(&state->unit->reg_lock, flags);
- dmabuf->ready = 0;
- state->format.rate = val;
- ymf_pcm_update_shift(&state->format);
- spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
return put_user(state->format.rate, (int *)arg);
@@ -1597,41 +1587,41 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
* OSS manual does not mention SNDCTL_DSP_STEREO at all.
* All channels are mono and if you want stereo, you
* play into two channels with SNDCTL_DSP_CHANNELS.
- * However, mpg123 uses it. I wonder, why Michael Hipp uses it.
+ * However, mpg123 calls it. I wonder, why Michael Hipp used it.
*/
case SNDCTL_DSP_STEREO: /* set stereo or mono channel */
if (get_user(val, (int *)arg))
return -EFAULT;
if (file->f_mode & FMODE_WRITE) {
ymf_wait_dac(state);
+ dmabuf = &state->wpcm.dmabuf;
spin_lock_irqsave(&state->unit->reg_lock, flags);
dmabuf->ready = 0;
state->format.voices = val ? 2 : 1;
ymf_pcm_update_shift(&state->format);
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
-#if HAVE_RECORD
if (file->f_mode & FMODE_READ) {
- /* stop_adc(state); */
+ ymf_stop_adc(state);
+ dmabuf = &state->rpcm.dmabuf;
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
dmabuf->ready = 0;
- if(val)
- dmabuf->fmt |= CS_FMT_STEREO;
- else
- dmabuf->fmt &= ~CS_FMT_STEREO;
+ state->format.voices = val ? 2 : 1;
+ ymf_pcm_update_shift(&state->format);
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
-#endif
return 0;
case SNDCTL_DSP_GETBLKSIZE:
if (file->f_mode & FMODE_WRITE) {
if ((val = prog_dmabuf(state, 0)))
return val;
- return put_user(dmabuf->fragsize, (int *)arg);
+ return put_user(state->wpcm.dmabuf.fragsize, (int *)arg);
}
if (file->f_mode & FMODE_READ) {
if ((val = prog_dmabuf(state, 1)))
return val;
- return put_user(dmabuf->fragsize, (int *)arg);
+ return put_user(state->rpcm.dmabuf.fragsize, (int *)arg);
}
return -EINVAL;
@@ -1644,17 +1634,22 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
if (val == AFMT_S16_LE || val == AFMT_U8) {
if (file->f_mode & FMODE_WRITE) {
ymf_wait_dac(state);
+ dmabuf = &state->wpcm.dmabuf;
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
+ dmabuf->ready = 0;
+ state->format.format = val;
+ ymf_pcm_update_shift(&state->format);
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
-#if HAVE_RECORD
if (file->f_mode & FMODE_READ) {
- stop_adc(state);
+ ymf_stop_adc(state);
+ dmabuf = &state->rpcm.dmabuf;
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
+ dmabuf->ready = 0;
+ state->format.format = val;
+ ymf_pcm_update_shift(&state->format);
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
-#endif
- spin_lock_irqsave(&state->unit->reg_lock, flags);
- dmabuf->ready = 0;
- state->format.format = val;
- ymf_pcm_update_shift(&state->format);
- spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
return put_user(state->format.format, (int *)arg);
@@ -1667,20 +1662,24 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
ymf_wait_dac(state);
if (val == 1 || val == 2) {
spin_lock_irqsave(&state->unit->reg_lock, flags);
+ dmabuf = &state->wpcm.dmabuf;
dmabuf->ready = 0;
state->format.voices = val;
ymf_pcm_update_shift(&state->format);
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
}
}
-#if HAVE_RECORD
if (file->f_mode & FMODE_READ) {
- spin_lock_irqsave(&state->unit->reg_lock, flags);
- stop_adc(state);
- dmabuf->ready = 0;
- spin_unlock_irqrestore(&state->unit->reg_lock, flags);
+ ymf_stop_adc(state);
+ if (val == 1 || val == 2) {
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
+ dmabuf = &state->rpcm.dmabuf;
+ dmabuf->ready = 0;
+ state->format.voices = val;
+ ymf_pcm_update_shift(&state->format);
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
+ }
}
-#endif
}
return put_user(state->format.voices, (int *)arg);
@@ -1696,15 +1695,16 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
* The paragraph above is a clumsy way to say "flush ioctl".
* This ioctl is used by mpg123.
*/
- /* P3 */ /* printk("ymfpci: ioctl SNDCTL_DSP_POST\n"); */
spin_lock_irqsave(&state->unit->reg_lock, flags);
- if (dmabuf->count != 0 && !state->ypcm.running) {
+ if (state->wpcm.dmabuf.count != 0 && !state->wpcm.running) {
ymf_start_dac(state);
}
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
return 0;
+#if 0 /* XXX Was dummy implementation anyways. Make sense of this. */
case SNDCTL_DSP_SUBDIVIDE:
+ dmabuf = &state->wpcm.dmabuf;
if (dmabuf->subdivision)
return -EINVAL;
if (get_user(val, (int *)arg))
@@ -1713,6 +1713,7 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
return -EINVAL;
dmabuf->subdivision = val;
return 0;
+#endif
case SNDCTL_DSP_SETFRAGMENT:
if (get_user(val, (int *)arg))
@@ -1720,6 +1721,7 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
/* P3: these frags are for Doom. Amasingly, it sets [2,2**11]. */
/* P3 */ // printk("ymfpci: ioctl SNDCTL_DSP_SETFRAGMENT 0x%x\n", val);
+ dmabuf = &state->wpcm.dmabuf;
dmabuf->ossfragshift = val & 0xffff;
dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
switch (dmabuf->ossmaxfrags) {
@@ -1736,10 +1738,10 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
case SNDCTL_DSP_GETOSPACE:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
+ dmabuf = &state->wpcm.dmabuf;
if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
return val;
spin_lock_irqsave(&state->unit->reg_lock, flags);
- /* cs_update_ptr(state); */ /* XXX Always up to date? */
abinfo.fragsize = dmabuf->fragsize;
abinfo.bytes = dmabuf->dmasize - dmabuf->count;
abinfo.fragstotal = dmabuf->numfrag;
@@ -1747,21 +1749,19 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
return copy_to_user((void *)arg, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
-#if HAVE_RECORD
case SNDCTL_DSP_GETISPACE:
if (!(file->f_mode & FMODE_READ))
return -EINVAL;
+ dmabuf = &state->rpcm.dmabuf;
if (!dmabuf->ready && (val = prog_dmabuf(state, 1)) != 0)
return val;
- spin_lock_irqsave(&state->card->lock, flags);
- cs_update_ptr(state);
+ spin_lock_irqsave(&state->unit->reg_lock, flags);
abinfo.fragsize = dmabuf->fragsize;
abinfo.bytes = dmabuf->count;
abinfo.fragstotal = dmabuf->numfrag;
abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
- spin_unlock_irqrestore(&state->card->lock, flags);
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
return copy_to_user((void *)arg, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
-#endif
case SNDCTL_DSP_NONBLOCK:
file->f_flags |= O_NONBLOCK;
@@ -1772,9 +1772,10 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
(int *)arg); */
return put_user(0, (int *)arg);
-#if 0 /* old */
+#if 0 /* not implememnted, yet? */
case SNDCTL_DSP_GETTRIGGER:
val = 0;
+ dmabuf = &state->wpcm.dmabuf;
if (file->f_mode & FMODE_READ && dmabuf->enable)
val |= PCM_ENABLE_INPUT;
if (file->f_mode & FMODE_WRITE && dmabuf->enable)
@@ -1785,6 +1786,7 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
if (get_user(val, (int *)arg))
return -EFAULT;
if (file->f_mode & FMODE_READ) {
+ dmabuf = &state->rpcm.dmabuf;
if (val & PCM_ENABLE_INPUT) {
if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
return ret;
@@ -1793,6 +1795,7 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
stop_adc(state);
}
if (file->f_mode & FMODE_WRITE) {
+ dmabuf = &state->wpcm.dmabuf;
if (val & PCM_ENABLE_OUTPUT) {
if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
return ret;
@@ -1801,48 +1804,48 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
stop_dac(state);
}
return 0;
-
#endif
-#if HAVE_RECORD
case SNDCTL_DSP_GETIPTR:
if (!(file->f_mode & FMODE_READ))
return -EINVAL;
+ dmabuf = &state->rpcm.dmabuf;
spin_lock_irqsave(&state->unit->reg_lock, flags);
- cs_update_ptr(state);
cinfo.bytes = dmabuf->total_bytes;
cinfo.blocks = dmabuf->count >> dmabuf->fragshift;
cinfo.ptr = dmabuf->hwptr;
+ /* XXX fishy - breaks invariant count=hwptr-swptr */
if (dmabuf->mapped)
dmabuf->count &= dmabuf->fragsize-1;
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
- return copy_to_user((void *)arg, &cinfo, sizeof(cinfo));
-#endif
+ return copy_to_user((void *)arg, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
case SNDCTL_DSP_GETOPTR:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
+ dmabuf = &state->wpcm.dmabuf;
spin_lock_irqsave(&state->unit->reg_lock, flags);
- /* cs_update_ptr(state); */ /* Always up to date */
cinfo.bytes = dmabuf->total_bytes;
cinfo.blocks = dmabuf->count >> dmabuf->fragshift;
cinfo.ptr = dmabuf->hwptr;
+ /* XXX fishy - breaks invariant count=swptr-hwptr */
if (dmabuf->mapped)
dmabuf->count &= dmabuf->fragsize-1;
spin_unlock_irqrestore(&state->unit->reg_lock, flags);
- return copy_to_user((void *)arg, &cinfo, sizeof(cinfo));
+ return copy_to_user((void *)arg, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
case SNDCTL_DSP_SETDUPLEX: /* XXX TODO */
return -EINVAL;
-#if 0 /* old */
+#if 0 /* XXX implement when an app found that uses it. */
case SNDCTL_DSP_GETODELAY:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
spin_lock_irqsave(&state->unit->reg_lock, flags);
cs_update_ptr(state);
+ dmabuf = &state->wpcm.dmabuf;
val = dmabuf->count;
- spin_unlock_irqrestore(&state->card->lock, flags);
+ spin_unlock_irqrestore(&state->unit->reg_lock, flags);
return put_user(val, (int *)arg);
#endif
@@ -1850,7 +1853,7 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
return put_user(state->format.rate, (int *)arg);
case SOUND_PCM_READ_CHANNELS:
- return put_user(state->format.voices, (int *)arg);
+ return put_user(state->format.voices, (int *)arg);
case SOUND_PCM_READ_BITS:
return put_user(AFMT_S16_LE, (int *)arg);
@@ -1873,28 +1876,25 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
return -ENOTTY;
}
+/*
+ * open(2)
+ * We use upper part of the minor to distinguish between soundcards.
+ * Channels are opened with a clone open.
+ */
static int ymf_open(struct inode *inode, struct file *file)
{
struct list_head *list;
ymfpci_t *unit;
int minor;
struct ymf_state *state;
- int nvirt;
int err;
- /*
- * This is how we do it currently: only one channel is used
- * in every board, so that we could use several boards in one machine.
- * We waste 63 out of 64 playback slots, but so what.
- * OSS model is constructed for devices with single playback channel.
- */
minor = MINOR(inode->i_rdev);
if ((minor & 0x0F) == 3) { /* /dev/dspN */
;
} else {
return -ENXIO;
}
- nvirt = 0; /* Such is the partitioning of minor */
for (list = ymf_devs.next; list != &ymf_devs; list = list->next) {
unit = list_entry(list, ymfpci_t, ymf_devs);
@@ -1905,34 +1905,36 @@ static int ymf_open(struct inode *inode, struct file *file)
return -ENODEV;
down(&unit->open_sem);
- if (unit->states[nvirt] != NULL) {
- up(&unit->open_sem);
- return -EBUSY;
- }
- if ((err = ymf_state_alloc(unit, nvirt)) != 0) {
+ if ((state = ymf_state_alloc(unit)) == NULL) {
up(&unit->open_sem);
- return err;
+ return -ENOMEM;
}
- state = unit->states[nvirt];
+ list_add_tail(&state->chain, &unit->states);
file->private_data = state;
/*
- * XXX This ymf_playback_prepare is totally unneeded here.
- * The question is if we want to allow write to fail if
- * prog_dmabuf fails... Say, no memory in DMA zone?
+ * ymf_read and ymf_write that we borrowed from cs46xx
+ * allocate buffers with prog_dmabuf(). We call prog_dmabuf
+ * here so that in case of DMA memory exhaustion open
+ * fails rather than write.
+ *
+ * XXX prog_dmabuf allocates voice. Should allocate explicitly, above.
*/
- if ((err = ymf_playback_prepare(unit, state)) != 0) {
- /* XXX This recovery is ugly as hell. */
-
- ymf_pcm_free_substream(&state->ypcm);
-
- unit->states[state->virt] = NULL;
- kfree(state);
-
- up(&unit->open_sem);
- return err;
+ if (file->f_mode & FMODE_WRITE) {
+ if (!state->wpcm.dmabuf.ready) {
+ if ((err = prog_dmabuf(state, 0)) != 0) {
+ goto out_nodma;
+ }
+ }
+ }
+ if (file->f_mode & FMODE_READ) {
+ if (!state->rpcm.dmabuf.ready) {
+ if ((err = prog_dmabuf(state, 1)) != 0) {
+ goto out_nodma;
+ }
+ }
}
#if 0 /* test if interrupts work */
@@ -1941,10 +1943,26 @@ static int ymf_open(struct inode *inode, struct file *file)
(YDSXGR_TIMERCTRL_TEN|YDSXGR_TIMERCTRL_TIEN));
#endif
up(&unit->open_sem);
- /* XXX Is it correct to have MOD_INC_USE_COUNT outside of sem.? */
MOD_INC_USE_COUNT;
return 0;
+
+out_nodma:
+ /*
+ * XXX Broken custom: "goto out_xxx" in other place is
+ * a nestable exception, but here it is not nestable due to semaphore.
+ * XXX Doubtful technique of self-describing objects....
+ */
+ dealloc_dmabuf(&state->wpcm.dmabuf);
+ dealloc_dmabuf(&state->rpcm.dmabuf);
+ ymf_pcm_free_substream(&state->wpcm);
+ ymf_pcm_free_substream(&state->rpcm);
+
+ list_del(&state->chain);
+ kfree(state);
+
+ up(&unit->open_sem);
+ return err;
}
static int ymf_release(struct inode *inode, struct file *file)
@@ -1956,12 +1974,6 @@ static int ymf_release(struct inode *inode, struct file *file)
ymfpci_writeb(codec, YDSXGR_TIMERCTRL, 0);
#endif
- if (state != codec->states[state->virt]) {
- printk(KERN_ERR "ymfpci%d.%d: state mismatch\n",
- state->unit->dev_audio, state->virt);
- return -EIO;
- }
-
down(&codec->open_sem);
/*
@@ -1969,10 +1981,14 @@ static int ymf_release(struct inode *inode, struct file *file)
* Deallocate when unloading the driver and we can wait.
*/
ymf_wait_dac(state);
- dealloc_dmabuf(state);
- ymf_pcm_free_substream(&state->ypcm);
-
- codec->states[state->virt] = NULL;
+ ymf_stop_adc(state); /* fortunately, it's immediate */
+ dealloc_dmabuf(&state->wpcm.dmabuf);
+ dealloc_dmabuf(&state->rpcm.dmabuf);
+ ymf_pcm_free_substream(&state->wpcm);
+ ymf_pcm_free_substream(&state->rpcm);
+
+ list_del(&state->chain);
+ file->private_data = NULL; /* Can you tell I programmed Solaris */
kfree(state);
up(&codec->open_sem);
@@ -2045,6 +2061,83 @@ static /*const*/ struct file_operations ymf_mixer_fops = {
* initialization routines
*/
+#ifdef CONFIG_SOUND_YMFPCI_LEGACY
+
+static int ymfpci_setup_legacy(ymfpci_t *unit, struct pci_dev *pcidev)
+{
+ int v;
+ int mpuio = -1, oplio = -1;
+
+ switch (unit->iomidi) {
+ case 0x330:
+ mpuio = 0;
+ break;
+ case 0x300:
+ mpuio = 1;
+ break;
+ case 0x332:
+ mpuio = 2;
+ break;
+ case 0x334:
+ mpuio = 3;
+ break;
+ default: ;
+ }
+
+ switch (unit->iosynth) {
+ case 0x388:
+ oplio = 0;
+ break;
+ case 0x398:
+ oplio = 1;
+ break;
+ case 0x3a0:
+ oplio = 2;
+ break;
+ case 0x3a8:
+ oplio = 3;
+ break;
+ default: ;
+ }
+
+ if (mpuio >= 0 || oplio >= 0) {
+ v = 0x003e;
+ pci_write_config_word(pcidev, PCIR_LEGCTRL, v);
+
+ switch (pcidev->device) {
+ case PCI_DEVICE_ID_YAMAHA_724:
+ case PCI_DEVICE_ID_YAMAHA_740:
+ case PCI_DEVICE_ID_YAMAHA_724F:
+ case PCI_DEVICE_ID_YAMAHA_740C:
+ v = 0x8800;
+ if (mpuio >= 0) { v |= mpuio<<4; }
+ if (oplio >= 0) { v |= oplio; }
+ pci_write_config_word(pcidev, PCIR_ELEGCTRL, v);
+ break;
+
+ case PCI_DEVICE_ID_YAMAHA_744:
+ case PCI_DEVICE_ID_YAMAHA_754:
+ v = 0x8800;
+ pci_write_config_word(pcidev, PCIR_ELEGCTRL, v);
+ if (oplio >= 0) {
+ pci_write_config_word(pcidev, PCIR_OPLADR, unit->iosynth);
+ }
+ if (mpuio >= 0) {
+ pci_write_config_word(pcidev, PCIR_MPUADR, unit->iomidi);
+ }
+ break;
+
+ default:
+ printk(KERN_ERR "ymfpci: Unknown device ID: 0x%x\n",
+ pcidev->device);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_SOUND_YMFPCI_LEGACY */
+
static void ymfpci_aclink_reset(struct pci_dev * pci)
{
u8 cmd;
@@ -2149,6 +2242,12 @@ static int ymfpci_memalloc(ymfpci_t *codec)
ptr += 0x00ff;
(long)ptr &= ~0x00ff;
+ /*
+ * Hardware requires only ptr[playback_ctrl_size] zeroed,
+ * but in our judgement it is a wrong kind of savings, so clear it all.
+ */
+ memset(ptr, 0, size);
+
codec->bank_base_playback = ptr;
codec->ctrl_playback = (u32 *)ptr;
codec->ctrl_playback[0] = YDSXG_PLAYBACK_VOICES;
@@ -2213,7 +2312,7 @@ static void ymfpci_memfree(ymfpci_t *codec)
kfree(codec->work_ptr);
}
-static int ymf_ac97_init(ymfpci_t *card, int num_ac97)
+static int ymf_ac97_init(ymfpci_t *unit, int num_ac97)
{
struct ac97_codec *codec;
u16 eid;
@@ -2224,7 +2323,7 @@ static int ymf_ac97_init(ymfpci_t *card, int num_ac97)
/* initialize some basic codec information, other fields will be filled
in ac97_probe_codec */
- codec->private_data = card;
+ codec->private_data = unit;
codec->id = num_ac97;
codec->codec_read = ymfpci_codec_read;
@@ -2241,14 +2340,14 @@ static int ymf_ac97_init(ymfpci_t *card, int num_ac97)
goto out_kfree;
}
- card->ac97_features = eid;
+ unit->ac97_features = eid;
if ((codec->dev_mixer = register_sound_mixer(&ymf_mixer_fops, -1)) < 0) {
printk(KERN_ERR "ymfpci: couldn't register mixer!\n");
goto out_kfree;
}
- card->ac97_codec[num_ac97] = codec;
+ unit->ac97_codec[num_ac97] = codec;
return 0;
out_kfree:
@@ -2256,6 +2355,19 @@ static int ymf_ac97_init(ymfpci_t *card, int num_ac97)
return -ENODEV;
}
+#ifdef CONFIG_SOUND_YMFPCI_LEGACY
+# ifdef MODULE
+static int mpu_io = 0;
+static int synth_io = 0;
+MODULE_PARM(mpu_io, "i");
+MODULE_PARM(synth_io, "i");
+# else
+static int mpu_io = 0x330;
+static int synth_io = 0x388;
+# endif
+static int assigned;
+#endif /* CONFIG_SOUND_YMFPCI_LEGACY */
+
static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
u16 ctrl;
@@ -2277,10 +2389,15 @@ static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_devi
spin_lock_init(&codec->reg_lock);
spin_lock_init(&codec->voice_lock);
init_MUTEX(&codec->open_sem);
+ INIT_LIST_HEAD(&codec->states);
codec->pci = pcidev;
pci_read_config_byte(pcidev, PCI_REVISION_ID, &codec->rev);
codec->reg_area_virt = ioremap(pci_resource_start(pcidev, 0), 0x8000);
+ if (codec->reg_area_virt == NULL) {
+ printk(KERN_ERR "ymfpci: unable to map registers\n");
+ goto out_free;
+ }
printk(KERN_INFO "ymfpci: %s at 0x%lx IRQ %d\n",
(char *)ent->driver_data, pci_resource_start(pcidev, 0), pcidev->irq);
@@ -2289,6 +2406,16 @@ static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_devi
if (ymfpci_codec_ready(codec, 0, 1) < 0)
goto out_unmap;
+#ifdef CONFIG_SOUND_YMFPCI_LEGACY
+ if (assigned == 0) {
+ codec->iomidi = mpu_io;
+ codec->iosynth = synth_io;
+ if (ymfpci_setup_legacy(codec, pcidev) < 0)
+ goto out_unmap;
+ assigned = 1;
+ }
+#endif
+
ymfpci_download_image(codec);
udelay(100); /* seems we need some delay after downloading image.. */
@@ -2296,11 +2423,11 @@ static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_devi
if (ymfpci_memalloc(codec) < 0)
goto out_disable_dsp;
- /* ymfpci_proc_init(card, codec); */
+ /* ymfpci_proc_init(unit, codec); */
if (request_irq(pcidev->irq, ymf_interrupt, SA_SHIRQ, "ymfpci", codec) != 0) {
- printk(KERN_ERR "ymfpci%d: unable to request IRQ %d\n",
- codec->dev_audio, pcidev->irq);
+ printk(KERN_ERR "ymfpci: unable to request IRQ %d\n",
+ pcidev->irq);
goto out_memfree;
}
@@ -2317,6 +2444,23 @@ static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_devi
if ((err = ymf_ac97_init(codec, 0)) != 0)
goto out_unregister_sound_dsp;
+#ifdef CONFIG_SOUND_YMFPCI_LEGACY
+ codec->opl3_data.name = "ymfpci";
+ codec->mpu_data.name = "ymfpci";
+
+ codec->opl3_data.io_base = codec->iosynth;
+ codec->opl3_data.irq = -1;
+
+ codec->mpu_data.io_base = codec->iomidi;
+ codec->mpu_data.irq = -1; /* XXX Make it ours. */
+
+ if (codec->iomidi) {
+ if (!probe_uart401(&codec->mpu_data, THIS_MODULE)) {
+ codec->iomidi = 0; /* XXX kludge */
+ }
+ }
+#endif /* CONFIG_SOUND_YMFPCI_LEGACY */
+
/* put it into driver list */
list_add_tail(&codec->ymf_devs, &ymf_devs);
pci_set_drvdata(pcidev, codec);
@@ -2336,6 +2480,7 @@ static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_devi
ymfpci_writel(codec, YDSXGR_STATUS, ~0);
out_unmap:
iounmap(codec->reg_area_virt);
+ out_free:
kfree(codec);
return -ENODEV;
}
@@ -2358,6 +2503,11 @@ static void __devexit ymf_remove_one(struct pci_dev *pcidev)
ctrl = ymfpci_readw(codec, YDSXGR_GLOBALCTRL);
ymfpci_writew(codec, YDSXGR_GLOBALCTRL, ctrl & ~0x0007);
iounmap(codec->reg_area_virt);
+#ifdef CONFIG_SOUND_YMFPCI_LEGACY
+ if (codec->iomidi) {
+ unload_uart401(&codec->mpu_data);
+ }
+#endif /* CONFIG_SOUND_YMFPCI_LEGACY */
kfree(codec);
}
diff --git a/drivers/sound/ymfpci.h b/drivers/sound/ymfpci.h
index 6e1a8d5f3..bd2f4c2a3 100644
--- a/drivers/sound/ymfpci.h
+++ b/drivers/sound/ymfpci.h
@@ -21,6 +21,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
+#include <linux/config.h>
/*
* Direct registers
@@ -131,7 +132,12 @@
#define YDSXG_AC97READCMD 0x8000
#define YDSXG_AC97WRITECMD 0x0000
+#define PCIR_LEGCTRL 0x40
+#define PCIR_ELEGCTRL 0x42
#define PCIR_DSXGCTRL 0x48
+#define PCIR_OPLADR 0x60
+#define PCIR_SBADR 0x62
+#define PCIR_MPUADR 0x64
#define YDSXG_DSPLENGTH 0x0080
#define YDSXG_CTRLLENGTH 0x3000
@@ -185,8 +191,8 @@ typedef struct stru_ymfpci_playback_bank {
} ymfpci_playback_bank_t;
typedef struct stru_ymfpci_capture_bank {
- u32 base; /* 32-bit address */
- u32 loop_end; /* 32-bit offset */
+ u32 base; /* 32-bit address (aligned at 4) */
+ u32 loop_end; /* size in BYTES (aligned at 4) */
u32 start; /* 32-bit offset */
u32 num_of_loops; /* counter */
} ymfpci_capture_bank_t;
@@ -198,8 +204,7 @@ typedef struct stru_ymfpci_effect_bank {
u32 temp;
} ymfpci_effect_bank_t;
-typedef struct stru_ymfpci_voice ymfpci_voice_t;
-typedef struct ymf_pcm ymfpci_pcm_t;
+typedef struct ymf_voice ymfpci_voice_t;
/*
* Throughout the code Yaroslav names YMF unit pointer "codec"
* even though it does not correspond to any codec. Must be historic.
@@ -214,52 +219,35 @@ typedef enum {
YMFPCI_MIDI
} ymfpci_voice_type_t;
-struct stru_ymfpci_voice {
- ymfpci_t *codec;
+struct ymf_voice {
+ // ymfpci_t *codec;
int number;
- int use: 1,
- pcm: 1,
- synth: 1,
- midi: 1;
+ char use, pcm, synth, midi; // bool
ymfpci_playback_bank_t *bank;
- void (*interrupt)(ymfpci_t *codec, ymfpci_voice_t *voice);
- ymfpci_pcm_t *ypcm;
+ struct ymf_pcm *ypcm;
};
-typedef enum {
- PLAYBACK_VOICE,
- CAPTURE_REC,
- CAPTURE_AC97,
- EFFECT_DRY_LEFT,
- EFFECT_DRY_RIGHT,
- EFFECT_EFF1,
- EFFECT_EFF2,
- EFFECT_EFF3
-} ymfpci_pcm_type_t;
-
-struct ymf_pcm {
- ymfpci_t *codec;
- ymfpci_pcm_type_t type;
- struct ymf_state *state;
- ymfpci_voice_t *voices[2]; /* playback only */
- int running; // +
- int spdif;
+struct ymf_capture {
+ // struct ymf_unit *unit;
+ int use;
+ ymfpci_capture_bank_t *bank;
+ struct ymf_pcm *ypcm;
};
struct ymf_unit {
u8 rev; /* PCI revision */
void *reg_area_virt;
- void *work_ptr; // +
+ void *work_ptr;
unsigned int bank_size_playback;
unsigned int bank_size_capture;
unsigned int bank_size_effect;
unsigned int work_size;
- void *bank_base_playback; // +
- void *bank_base_capture; // +
- void *bank_base_effect; // +
- void *work_base; // +
+ void *bank_base_playback;
+ void *bank_base_capture;
+ void *bank_base_effect;
+ void *work_base;
u32 *ctrl_playback;
ymfpci_playback_bank_t *bank_playback[YDSXG_PLAYBACK_VOICES][2];
@@ -269,13 +257,20 @@ struct ymf_unit {
int start_count;
u32 active_bank;
- ymfpci_voice_t voices[64];
+ struct ymf_voice voices[64];
+ struct ymf_capture capture[5];
struct ac97_codec *ac97_codec[NR_AC97];
u16 ac97_features;
struct pci_dev *pci;
+#ifdef CONFIG_SOUND_YMFPCI_LEGACY
+ /* legacy hardware resources */
+ unsigned int iosynth, iomidi;
+ struct address_info opl3_data, mpu_data;
+#endif
+
spinlock_t reg_lock;
spinlock_t voice_lock;
@@ -284,14 +279,11 @@ struct ymf_unit {
struct semaphore open_sem;
struct list_head ymf_devs;
- struct ymf_state *states[1]; // *
- /* ypcm may be the same thing as state, but not for record, effects. */
+ struct list_head states; /* List of states for this unit */
+ /* For the moment we do not traverse list of states so it is
+ * entirely useless. Will be used (PM) or killed. XXX */
};
-/*
- * "Software" or virtual channel, an instance of opened /dev/dsp.
- */
-
struct ymf_dmabuf {
/* OSS buffer management stuff */
@@ -312,7 +304,6 @@ struct ymf_dmabuf {
/* redundant, but makes calculations easier */
unsigned fragsize;
unsigned dmasize; /* Total rawbuf[] size */
- unsigned fragsamples;
/* OSS stuff */
unsigned mapped:1;
@@ -329,15 +320,40 @@ struct ymf_pcm_format {
int shift; /* redundant, computed from the above */
};
-struct ymf_state {
- struct ymf_unit *unit; /* backpointer */
+typedef enum {
+ PLAYBACK_VOICE,
+ CAPTURE_REC,
+ CAPTURE_AC97,
+ EFFECT_DRY_LEFT,
+ EFFECT_DRY_RIGHT,
+ EFFECT_EFF1,
+ EFFECT_EFF2,
+ EFFECT_EFF3
+} ymfpci_pcm_type_t;
- /* virtual channel number */
- int virt; // * unused a.t.m.
+/* This is variant record, but we hate unions. Little waste on pointers []. */
+struct ymf_pcm {
+ ymfpci_pcm_type_t type;
+ struct ymf_state *state;
+
+ ymfpci_voice_t *voices[2];
+ int capture_bank_number;
+
+ struct ymf_dmabuf dmabuf;
+ int running;
+ int spdif;
+};
- struct ymf_pcm ypcm; // *
- struct ymf_dmabuf dmabuf; // *
- struct ymf_pcm_format format; // *
+/*
+ * "Software" or virtual channel, an instance of opened /dev/dsp.
+ * It may have two physical channels (pcms) for duplex operations.
+ */
+
+struct ymf_state {
+ struct list_head chain;
+ struct ymf_unit *unit; /* backpointer */
+ struct ymf_pcm rpcm, wpcm;
+ struct ymf_pcm_format format;
};
#endif /* __YMFPCI_H */
diff --git a/drivers/usb/rio500.c b/drivers/usb/rio500.c
index 6d7b9043d..29b926056 100644
--- a/drivers/usb/rio500.c
+++ b/drivers/usb/rio500.c
@@ -247,6 +247,7 @@ ioctl_rio(struct inode *inode, struct file *file, unsigned int cmd,
return 0;
err_out:
+ up(&(rio->lock));
return retval;
}
@@ -389,7 +390,7 @@ read_rio(struct file *file, char *buffer, size_t count, loff_t * ppos)
result, partial, this_read);
return -EIO;
} else {
- unlock_kernel();
+ up(&(rio->lock));
return (0);
}
diff --git a/drivers/usb/serial/Config.in b/drivers/usb/serial/Config.in
index 6d949bc0b..0032f6e15 100644
--- a/drivers/usb/serial/Config.in
+++ b/drivers/usb/serial/Config.in
@@ -22,6 +22,7 @@ if [ "$CONFIG_USB_SERIAL" != "n" ]; then
bool ' USB Keyspan USA-19 Firmware' CONFIG_USB_SERIAL_KEYSPAN_USA19
bool ' USB Keyspan USA-18X Firmware' CONFIG_USB_SERIAL_KEYSPAN_USA18X
bool ' USB Keyspan USA-19W Firmware' CONFIG_USB_SERIAL_KEYSPAN_USA19W
+ bool ' USB Keyspan USA-49W Firmware' CONFIG_USB_SERIAL_KEYSPAN_USA49W
fi
dep_tristate ' USB MCT Single Port Serial Driver (EXPERIMENTAL)' CONFIG_USB_SERIAL_MCT_U232 $CONFIG_USB_SERIAL $CONFIG_EXPERIMENTAL
dep_tristate ' USB ZyXEL omni.net LCD Plus Driver (EXPERIMENTAL)' CONFIG_USB_SERIAL_OMNINET $CONFIG_USB_SERIAL $CONFIG_EXPERIMENTAL
diff --git a/drivers/usb/serial/usbserial.c b/drivers/usb/serial/usbserial.c
index 1a07a03d5..3a3451e49 100644
--- a/drivers/usb/serial/usbserial.c
+++ b/drivers/usb/serial/usbserial.c
@@ -14,8 +14,14 @@
* based on a driver by Brad Keryan)
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
- *
- * (12/29/2000) gkh
+ *
+ * (12/12/2000) gkh
+ * Removed MOD_INC and MOD_DEC from poll and disconnect functions, and
+ * moved them to the serial_open and serial_close functions.
+ * Also fixed bug with there not being a MOD_DEC for the generic driver
+ * (thanks to Gary Brubaker for finding this.)
+ *
+ * (11/29/2000) gkh
* Small NULL pointer initialization cleanup which saves a bit of disk image
*
* (11/01/2000) Adam J. Richter
@@ -471,6 +477,8 @@ static int serial_open (struct tty_struct *tty, struct file * filp)
return -ENODEV;
}
+ MOD_INC_USE_COUNT;
+
/* set up our port structure making the tty driver remember our port object, and us it */
portNumber = MINOR(tty->device) - serial->minor;
port = &serial->port[portNumber];
@@ -508,6 +516,8 @@ static void serial_close(struct tty_struct *tty, struct file * filp)
} else {
generic_close(port, filp);
}
+
+ MOD_DEC_USE_COUNT;
}
@@ -721,12 +731,13 @@ static int generic_open (struct usb_serial_port *port, struct file *filp)
if (port_paranoia_check (port, __FUNCTION__))
return -ENODEV;
+ MOD_INC_USE_COUNT;
+
dbg(__FUNCTION__ " - port %d", port->number);
spin_lock_irqsave (&port->port_lock, flags);
++port->open_count;
- MOD_INC_USE_COUNT;
if (!port->active) {
port->active = 1;
@@ -776,6 +787,7 @@ static void generic_close (struct usb_serial_port *port, struct file * filp)
}
spin_unlock_irqrestore (&port->port_lock, flags);
+ MOD_DEC_USE_COUNT;
}
@@ -1069,7 +1081,6 @@ static void * usb_serial_probe(struct usb_device *dev, unsigned int ifnum,
}
/* found all that we need */
- MOD_INC_USE_COUNT;
info("%s converter detected", type->name);
#ifdef CONFIG_USB_SERIAL_GENERIC
@@ -1077,7 +1088,6 @@ static void * usb_serial_probe(struct usb_device *dev, unsigned int ifnum,
num_ports = num_bulk_out;
if (num_ports == 0) {
err("Generic device with no bulk out, not allowed.");
- MOD_DEC_USE_COUNT;
return NULL;
}
} else
@@ -1087,7 +1097,6 @@ static void * usb_serial_probe(struct usb_device *dev, unsigned int ifnum,
serial = get_free_serial (num_ports, &minor);
if (serial == NULL) {
err("No more free serial devices");
- MOD_DEC_USE_COUNT;
return NULL;
}
@@ -1233,7 +1242,6 @@ probe_error:
/* free up any memory that we allocated */
kfree (serial);
- MOD_DEC_USE_COUNT;
return NULL;
}
@@ -1300,7 +1308,6 @@ static void usb_serial_disconnect(struct usb_device *dev, void *ptr)
info("device disconnected");
}
- MOD_DEC_USE_COUNT;
}
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index e363916b9..d8a60a2a7 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -10,6 +10,10 @@
* (at your option) any later version.
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
+ *
+ * (12/12/2000) gkh
+ * Moved MOD_DEC to end of visor_close to be nicer, as the final write
+ * message can sleep.
*
* (11/12/2000) gkh
* Fixed bug with data being dropped on the floor by forcing tty->low_latency
@@ -214,7 +218,6 @@ static void visor_close (struct usb_serial_port *port, struct file * filp)
spin_lock_irqsave (&port->port_lock, flags);
--port->open_count;
- MOD_DEC_USE_COUNT;
if (port->open_count <= 0) {
transfer_buffer = kmalloc (0x12, GFP_KERNEL);
@@ -237,6 +240,8 @@ static void visor_close (struct usb_serial_port *port, struct file * filp)
/* Uncomment the following line if you want to see some statistics in your syslog */
/* info ("Bytes In = %d Bytes Out = %d", bytes_in, bytes_out); */
+
+ MOD_DEC_USE_COUNT;
}
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index 5123f85f8..c5477dcaf 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -1,7 +1,7 @@
/* Driver for USB Mass Storage compliant devices
* Debugging Functions Header File
*
- * $Id: debug.h,v 1.5 2000/09/04 02:12:47 groovyjava Exp $
+ * $Id: debug.h,v 1.6 2001/01/12 23:51:04 mdharm Exp $
*
* Current development and maintenance by:
* (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 241f7d5f6..5c50b578c 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -248,7 +248,7 @@ static int bus_reset( Scsi_Cmnd *srb )
for (i = 0; i < us->pusb_dev->actconfig->bNumInterfaces; i++) {
struct usb_interface *intf =
&us->pusb_dev->actconfig->interface[i];
- const struct usb_device_id *id;
+ struct usb_device_id *id;
/* if this is an unclaimed interface, skip it */
if (!intf->driver) {
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
new file mode 100644
index 000000000..d29307433
--- /dev/null
+++ b/drivers/usb/storage/unusual_devs.h
@@ -0,0 +1,232 @@
+/* Driver for USB Mass Storage compliant devices
+ * Ununsual Devices File
+ *
+ * $Id: unusual_devs.h,v 1.1 2000/12/05 05:38:31 mdharm Exp $
+ *
+ * Current development and maintenance by:
+ * (c) 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
+ *
+ * Initial work by:
+ * (c) 2000 Adam J. Richter (adam@yggdrasil.com), Yggdrasil Computing, Inc.
+ *
+ * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
+ * information about this driver.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* IMPORTANT NOTE: This file must be included in another file which does
+ * the following thing for it to work:
+ * The macro UNUSUAL_DEV() must be defined before this file is included
+ */
+#include <linux/config.h>
+
+/* If you edit this file, please try to keep it sorted first by VendorID,
+ * then by ProductID.
+ */
+
+UNUSUAL_DEV( 0x03ee, 0x0000, 0x0000, 0x0245,
+ "Mitsumi",
+ "CD-R/RW Drive",
+ US_SC_8020, US_PR_CBI, NULL, 0),
+
+UNUSUAL_DEV( 0x03f0, 0x0107, 0x0200, 0x0200,
+ "HP",
+ "CD-Writer+",
+ US_SC_8070, US_PR_CB, NULL, 0),
+
+#ifdef CONFIG_USB_STORAGE_HP8200e
+UNUSUAL_DEV( 0x03f0, 0x0207, 0x0001, 0x0001,
+ "HP",
+ "CD-Writer+ 8200e",
+ US_SC_8070, US_PR_SCM_ATAPI, init_8200e, 0),
+#endif
+
+UNUSUAL_DEV( 0x04e6, 0x0001, 0x0200, 0x0200,
+ "Matshita",
+ "LS-120",
+ US_SC_8020, US_PR_CB, NULL, 0),
+
+UNUSUAL_DEV( 0x04e6, 0x0002, 0x0100, 0x0100,
+ "Shuttle",
+ "eUSCSI Bridge",
+ US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+ US_FL_SCM_MULT_TARG ),
+
+#ifdef CONFIG_USB_STORAGE_SDDR09
+UNUSUAL_DEV( 0x04e6, 0x0003, 0x0000, 0x9999,
+ "Sandisk",
+ "ImageMate SDDR09",
+ US_SC_SCSI, US_PR_EUSB_SDDR09, NULL,
+ US_FL_SINGLE_LUN | US_FL_START_STOP ),
+#endif
+
+#ifdef CONFIG_USB_STORAGE_DPCM
+UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
+ "Microtech",
+ "CameraMate (DPCM_USB)",
+ US_SC_SCSI, US_PR_DPCM_USB, NULL,
+ US_FL_START_STOP ),
+#endif
+
+UNUSUAL_DEV( 0x04e6, 0x0006, 0x0100, 0x0200,
+ "Shuttle",
+ "eUSB MMC Adapter",
+ US_SC_SCSI, US_PR_CB, NULL,
+ US_FL_SINGLE_LUN),
+
+UNUSUAL_DEV( 0x04e6, 0x0007, 0x0100, 0x0200,
+ "Sony",
+ "Hifd",
+ US_SC_SCSI, US_PR_CB, NULL,
+ US_FL_SINGLE_LUN),
+
+UNUSUAL_DEV( 0x04e6, 0x0009, 0x0200, 0x0200,
+ "Shuttle",
+ "eUSB ATA/ATAPI Adapter",
+ US_SC_8020, US_PR_CB, NULL, 0),
+
+UNUSUAL_DEV( 0x04e6, 0x000a, 0x0200, 0x0200,
+ "Shuttle",
+ "eUSB CompactFlash Adapter",
+ US_SC_8020, US_PR_CB, NULL, 0),
+
+UNUSUAL_DEV( 0x04e6, 0x000B, 0x0100, 0x0100,
+ "Shuttle",
+ "eUSCSI Bridge",
+ US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+ US_FL_SCM_MULT_TARG ),
+
+UNUSUAL_DEV( 0x04e6, 0x000C, 0x0100, 0x0100,
+ "Shuttle",
+ "eUSCSI Bridge",
+ US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+ US_FL_SCM_MULT_TARG ),
+
+UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
+ "Shuttle",
+ "CD-RW Device",
+ US_SC_8020, US_PR_CB, NULL, 0),
+
+UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0210,
+ "Sony",
+ "DSC-S30/S70/505V/F505",
+ US_SC_SCSI, US_PR_CB, NULL,
+ US_FL_SINGLE_LUN | US_FL_START_STOP | US_FL_MODE_XLATE ),
+
+UNUSUAL_DEV( 0x054c, 0x002d, 0x0100, 0x0100,
+ "Sony",
+ "Memorystick MSAC-US1",
+ US_SC_UFI, US_PR_CB, NULL,
+ US_FL_SINGLE_LUN | US_FL_START_STOP ),
+
+UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299,
+ "Y-E Data",
+ "Flashbuster-U",
+ US_SC_UFI, US_PR_CB, NULL,
+ US_FL_SINGLE_LUN),
+
+UNUSUAL_DEV( 0x057b, 0x0000, 0x0300, 0x9999,
+ "Y-E Data",
+ "Flashbuster-U",
+ US_SC_UFI, US_PR_CBI, NULL,
+ US_FL_SINGLE_LUN),
+
+UNUSUAL_DEV( 0x059f, 0xa601, 0x0200, 0x0200,
+ "LaCie",
+ "USB Hard Disk",
+ US_SC_RBC, US_PR_CB, NULL, 0 ),
+
+UNUSUAL_DEV( 0x05ab, 0x0031, 0x0100, 0x0100,
+ "In-System",
+ "USB/IDE Bridge (ATAPI ONLY!)",
+ US_SC_8070, US_PR_BULK, NULL, 0 ),
+
+UNUSUAL_DEV( 0x0644, 0x0000, 0x0100, 0x0100,
+ "TEAC",
+ "Floppy Drive",
+ US_SC_UFI, US_PR_CB, NULL, 0 ),
+
+#ifdef CONFIG_USB_STORAGE_SDDR09
+UNUSUAL_DEV( 0x066b, 0x0105, 0x0100, 0x0100,
+ "Olympus",
+ "Camedia MAUSB-2",
+ US_SC_SCSI, US_PR_EUSB_SDDR09, NULL,
+ US_FL_SINGLE_LUN | US_FL_START_STOP ),
+#endif
+
+UNUSUAL_DEV( 0x0693, 0x0002, 0x0100, 0x0100,
+ "Hagiwara",
+ "FlashGate SmartMedia",
+ US_SC_SCSI, US_PR_BULK, NULL, 0 ),
+
+UNUSUAL_DEV( 0x0693, 0x0005, 0x0100, 0x0100,
+ "Hagiwara",
+ "Flashgate",
+ US_SC_SCSI, US_PR_BULK, NULL, 0 ),
+
+UNUSUAL_DEV( 0x0781, 0x0001, 0x0200, 0x0200,
+ "Sandisk",
+ "ImageMate SDDR-05a",
+ US_SC_SCSI, US_PR_CB, NULL,
+ US_FL_SINGLE_LUN | US_FL_START_STOP),
+
+UNUSUAL_DEV( 0x0781, 0x0100, 0x0100, 0x0100,
+ "Sandisk",
+ "ImageMate SDDR-12",
+ US_SC_SCSI, US_PR_CB, NULL,
+ US_FL_SINGLE_LUN ),
+
+#ifdef CONFIG_USB_STORAGE_SDDR09
+UNUSUAL_DEV( 0x0781, 0x0200, 0x0100, 0x0100,
+ "Sandisk",
+ "ImageMate SDDR-09",
+ US_SC_SCSI, US_PR_EUSB_SDDR09, NULL,
+ US_FL_SINGLE_LUN | US_FL_START_STOP ),
+#endif
+
+UNUSUAL_DEV( 0x0781, 0x0002, 0x0009, 0x0009,
+ "Sandisk",
+ "ImageMate SDDR-31",
+ US_SC_SCSI, US_PR_BULK, NULL,
+ US_FL_IGNORE_SER),
+
+UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0100,
+ "Microtech",
+ "USB-SCSI-DB25",
+ US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+ US_FL_SCM_MULT_TARG ),
+
+#ifdef CONFIG_USB_STORAGE_FREECOM
+UNUSUAL_DEV( 0x07ab, 0xfc01, 0x0000, 0x9999,
+ "Freecom",
+ "USB-IDE",
+ US_SC_QIC, US_PR_FREECOM, freecom_init, 0),
+#endif
+
+UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
+ "Microtech",
+ "USB-SCSI-HD50",
+ US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+ US_FL_SCM_MULT_TARG ),
+
+#ifdef CONFIG_USB_STORAGE_DPCM
+UNUSUAL_DEV( 0x07af, 0x0006, 0x0100, 0x0100,
+ "Microtech",
+ "CameraMate (DPCM_USB)",
+ US_SC_SCSI, US_PR_DPCM_USB, NULL,
+ US_FL_START_STOP ),
+#endif
+
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 6c254c701..1e0f40f50 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -1,6 +1,6 @@
/* Driver for USB Mass Storage compliant devices
*
- * $Id: usb.c,v 1.57 2000/11/21 02:56:41 mdharm Exp $
+ * $Id: usb.c,v 1.61 2001/01/13 00:10:59 mdharm Exp $
*
* Current development and maintenance by:
* (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
@@ -11,6 +11,9 @@
* Initial work by:
* (c) 1999 Michael Gee (michael@linuxspecific.com)
*
+ * usb_device_id support by Adam J. Richter (adam@yggdrasil.com):
+ * (c) 2000 Yggdrasil Computing, Inc.
+ *
* This driver is based on the 'USB Mass Storage Class' document. This
* describes in detail the protocol used to communicate with such
* devices. Clearly, the designers had SCSI and ATAPI commands in
@@ -95,11 +98,133 @@ struct semaphore us_list_semaphore;
static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
const struct usb_device_id *id);
+
static void storage_disconnect(struct usb_device *dev, void *ptr);
+
+/* The entries in this table, except for final ones here
+ * (USB_MASS_STORAGE_CLASS and the empty entry), correspond,
+ * line for line with the entries of us_unsuaul_dev_list[].
+ * For now, we duplicate idVendor and idProduct in us_unsual_dev_list,
+ * just to avoid alignment bugs.
+ */
+
+#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
+ vendorName, productName,useProtocol, useTransport, \
+ initFunction, flags) \
+{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax) }
+
+static struct usb_device_id storage_usb_ids [] = {
+
+# include "unusual_devs.h"
+#undef UNUSUAL_DEV
+ /* Control/Bulk transport for all SubClass values */
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_RBC, US_PR_CB) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_8020, US_PR_CB) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_QIC, US_PR_CB) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_UFI, US_PR_CB) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_8070, US_PR_CB) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_CB) },
+
+ /* Control/Bulk/Interrupt transport for all SubClass values */
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_RBC, US_PR_CBI) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_8020, US_PR_CBI) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_QIC, US_PR_CBI) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_UFI, US_PR_CBI) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_8070, US_PR_CBI) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_CBI) },
+
+ /* Bulk-only transport for all SubClass values */
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_RBC, US_PR_BULK) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_8020, US_PR_BULK) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_QIC, US_PR_BULK) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_UFI, US_PR_BULK) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_8070, US_PR_BULK) },
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
+
+ /* Terminating entry */
+ { }
+};
+
+MODULE_DEVICE_TABLE (usb, storage_usb_ids);
+
+/* This is the list of devices we recognize, along with their flag data */
+
+/* The vendor name should be kept at eight characters or less, and
+ * the product name should be kept at 16 characters or less. If a device
+ * has the US_FL_DUMMY_INQUIRY flag, then the vendor and product names
+ * normally generated by a device thorugh the INQUIRY response will be
+ * taken from this list, and this is the reason for the above size
+ * restriction. However, if the flag is not present, then you
+ * are free to use as many characters as you like.
+ */
+
+#undef UNUSUAL_DEV
+#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
+ vendor_name, product_name, use_protocol, use_transport, \
+ init_function, Flags) \
+{ \
+ vendorName: vendor_name, \
+ productName: product_name, \
+ useProtocol: use_protocol, \
+ useTransport: use_transport, \
+ initFunction : init_function, \
+ flags: Flags, \
+}
+
+static struct us_unusual_dev us_unusual_dev_list[] = {
+# include "unusual_devs.h"
+# undef UNUSUAL_DEV
+ /* Control/Bulk transport for all SubClass values */
+ { useProtocol: US_SC_RBC,
+ useTransport: US_PR_CB},
+ { useProtocol: US_SC_8020,
+ useTransport: US_PR_CB},
+ { useProtocol: US_SC_QIC,
+ useTransport: US_PR_CB},
+ { useProtocol: US_SC_UFI,
+ useTransport: US_PR_CB},
+ { useProtocol: US_SC_8070,
+ useTransport: US_PR_CB},
+ { useProtocol: US_SC_SCSI,
+ useTransport: US_PR_CB},
+
+ /* Control/Bulk/Interrupt transport for all SubClass values */
+ { useProtocol: US_SC_RBC,
+ useTransport: US_PR_CBI},
+ { useProtocol: US_SC_8020,
+ useTransport: US_PR_CBI},
+ { useProtocol: US_SC_QIC,
+ useTransport: US_PR_CBI},
+ { useProtocol: US_SC_UFI,
+ useTransport: US_PR_CBI},
+ { useProtocol: US_SC_8070,
+ useTransport: US_PR_CBI},
+ { useProtocol: US_SC_SCSI,
+ useTransport: US_PR_CBI},
+
+ /* Bulk-only transport for all SubClass values */
+ { useProtocol: US_SC_RBC,
+ useTransport: US_PR_BULK},
+ { useProtocol: US_SC_8020,
+ useTransport: US_PR_BULK},
+ { useProtocol: US_SC_QIC,
+ useTransport: US_PR_BULK},
+ { useProtocol: US_SC_UFI,
+ useTransport: US_PR_BULK},
+ { useProtocol: US_SC_8070,
+ useTransport: US_PR_BULK},
+ { useProtocol: US_SC_SCSI,
+ useTransport: US_PR_BULK},
+
+ /* Terminating entry */
+ { 0 }
+};
+
struct usb_driver usb_storage_driver = {
name: "usb-storage",
probe: storage_probe,
disconnect: storage_disconnect,
+ id_table: storage_usb_ids,
};
/*
@@ -325,246 +450,6 @@ static int usb_stor_control_thread(void * __us)
return 0;
}
-/* This is the list of devices we recognize, along with their flag data */
-
-/* The vendor name should be kept at eight characters or less, and
- * the product name should be kept at 16 characters or less. If a device
- * has the US_FL_DUMMY_INQUIRY flag, then the vendor and product names
- * normally generated by a device thorugh the INQUIRY response will be
- * taken from this list, and this is the reason for the above size
- * restriction. However, if the flag is not present, then you
- * are free to use as many characters as you like.
- */
-static struct us_unusual_dev us_unusual_dev_list[] = {
-
- { 0x03ee, 0x0000, 0x0000, 0x0245,
- "Mitsumi",
- "CD-R/RW Drive",
- US_SC_8020, US_PR_CBI, NULL, 0},
-
- { 0x03f0, 0x0107, 0x0200, 0x0200,
- "HP",
- "CD-Writer+",
- US_SC_8070, US_PR_CB, NULL, 0},
-
-#ifdef CONFIG_USB_STORAGE_HP8200e
- { 0x03f0, 0x0207, 0x0001, 0x0001,
- "HP",
- "CD-Writer+ 8200e",
- US_SC_8070, US_PR_SCM_ATAPI, init_8200e, 0},
-#endif
-
- { 0x04e6, 0x0001, 0x0200, 0x0200,
- "Matshita",
- "LS-120",
- US_SC_8020, US_PR_CB, NULL, 0},
-
- { 0x04e6, 0x0002, 0x0100, 0x0100,
- "Shuttle",
- "eUSCSI Bridge",
- US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
- US_FL_SCM_MULT_TARG },
-
-#ifdef CONFIG_USB_STORAGE_SDDR09
- { 0x04e6, 0x0003, 0x0000, 0x9999,
- "Sandisk",
- "ImageMate SDDR09",
- US_SC_SCSI, US_PR_EUSB_SDDR09, NULL,
- US_FL_SINGLE_LUN | US_FL_START_STOP },
-#endif
-
-#ifdef CONFIG_USB_STORAGE_DPCM
- { 0x0436, 0x0005, 0x0100, 0x0100,
- "Microtech",
- "CameraMate (DPCM_USB)",
- US_SC_SCSI, US_PR_DPCM_USB, NULL,
- US_FL_START_STOP },
-#endif
-
- { 0x04e6, 0x0006, 0x0100, 0x0200,
- "Shuttle",
- "eUSB MMC Adapter",
- US_SC_SCSI, US_PR_CB, NULL,
- US_FL_SINGLE_LUN},
-
- { 0x04e6, 0x0007, 0x0100, 0x0200,
- "Sony",
- "Hifd",
- US_SC_SCSI, US_PR_CB, NULL,
- US_FL_SINGLE_LUN},
-
- { 0x04e6, 0x0009, 0x0200, 0x0200,
- "Shuttle",
- "eUSB ATA/ATAPI Adapter",
- US_SC_8020, US_PR_CB, NULL, 0},
-
- { 0x04e6, 0x000a, 0x0200, 0x0200,
- "Shuttle",
- "eUSB CompactFlash Adapter",
- US_SC_8020, US_PR_CB, NULL, 0},
-
- { 0x04e6, 0x000B, 0x0100, 0x0100,
- "Shuttle",
- "eUSCSI Bridge",
- US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
- US_FL_SCM_MULT_TARG },
-
- { 0x04e6, 0x000C, 0x0100, 0x0100,
- "Shuttle",
- "eUSCSI Bridge",
- US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
- US_FL_SCM_MULT_TARG },
-
- { 0x04e6, 0x0101, 0x0200, 0x0200,
- "Shuttle",
- "CD-RW Device",
- US_SC_8020, US_PR_CB, NULL, 0},
-
- { 0x054c, 0x0010, 0x0106, 0x0210,
- "Sony",
- "DSC-S30/S70/505V/F505",
- US_SC_SCSI, US_PR_CB, NULL,
- US_FL_SINGLE_LUN | US_FL_START_STOP | US_FL_MODE_XLATE },
-
- { 0x054c, 0x002d, 0x0100, 0x0100,
- "Sony",
- "Memorystick MSAC-US1",
- US_SC_UFI, US_PR_CB, NULL,
- US_FL_SINGLE_LUN | US_FL_START_STOP },
-
- { 0x057b, 0x0000, 0x0000, 0x0299,
- "Y-E Data",
- "Flashbuster-U",
- US_SC_UFI, US_PR_CB, NULL,
- US_FL_SINGLE_LUN},
-
- { 0x057b, 0x0000, 0x0300, 0x9999,
- "Y-E Data",
- "Flashbuster-U",
- US_SC_UFI, US_PR_CBI, NULL,
- US_FL_SINGLE_LUN},
-
- { 0x059f, 0xa601, 0x0200, 0x0200,
- "LaCie",
- "USB Hard Disk",
- US_SC_RBC, US_PR_CB, NULL, 0 },
-
- { 0x05ab, 0x0031, 0x0100, 0x0100,
- "In-System",
- "USB/IDE Bridge (ATAPI ONLY!)",
- US_SC_8070, US_PR_BULK, NULL, 0 },
-
- { 0x0644, 0x0000, 0x0100, 0x0100,
- "TEAC",
- "Floppy Drive",
- US_SC_UFI, US_PR_CB, NULL, 0 },
-
-#ifdef CONFIG_USB_STORAGE_SDDR09
- { 0x066b, 0x0105, 0x0100, 0x0100,
- "Olympus",
- "Camedia MAUSB-2",
- US_SC_SCSI, US_PR_EUSB_SDDR09, NULL,
- US_FL_SINGLE_LUN | US_FL_START_STOP },
-#endif
-
- { 0x0693, 0x0002, 0x0100, 0x0100,
- "Hagiwara",
- "FlashGate SmartMedia",
- US_SC_SCSI, US_PR_BULK, NULL, 0 },
-
- { 0x0693, 0x0005, 0x0100, 0x0100,
- "Hagiwara",
- "Flashgate",
- US_SC_SCSI, US_PR_BULK, NULL, 0 },
-
- { 0x0781, 0x0001, 0x0200, 0x0200,
- "Sandisk",
- "ImageMate SDDR-05a",
- US_SC_SCSI, US_PR_CB, NULL,
- US_FL_SINGLE_LUN | US_FL_START_STOP},
-
- { 0x0781, 0x0100, 0x0100, 0x0100,
- "Sandisk",
- "ImageMate SDDR-12",
- US_SC_SCSI, US_PR_CB, NULL,
- US_FL_SINGLE_LUN },
-
-#ifdef CONFIG_USB_STORAGE_SDDR09
- { 0x0781, 0x0200, 0x0100, 0x0208,
- "Sandisk",
- "ImageMate SDDR-09",
- US_SC_SCSI, US_PR_EUSB_SDDR09, NULL,
- US_FL_SINGLE_LUN | US_FL_START_STOP },
-#endif
-
- { 0x0781, 0x0002, 0x0009, 0x0009,
- "Sandisk",
- "ImageMate SDDR-31",
- US_SC_SCSI, US_PR_BULK, NULL,
- US_FL_IGNORE_SER},
-
- { 0x07af, 0x0004, 0x0100, 0x0100,
- "Microtech",
- "USB-SCSI-DB25",
- US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
- US_FL_SCM_MULT_TARG },
-
-#ifdef CONFIG_USB_STORAGE_FREECOM
- { 0x07ab, 0xfc01, 0x0000, 0x9999,
- "Freecom",
- "USB-IDE",
- US_SC_QIC, US_PR_FREECOM, freecom_init, 0},
-#endif
-
- { 0x07af, 0x0005, 0x0100, 0x0100,
- "Microtech",
- "USB-SCSI-HD50",
- US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
- US_FL_SCM_MULT_TARG },
-
-#ifdef CONFIG_USB_STORAGE_DPCM
- { 0x07af, 0x0006, 0x0100, 0x0100,
- "Microtech",
- "CameraMate (DPCM_USB)",
- US_SC_SCSI, US_PR_DPCM_USB, NULL,
- US_FL_START_STOP },
-#endif
- { 0 }
-};
-
-/* Search our ususual device list, based on vendor/product combinations
- * to see if we can support this device. Returns a pointer to a structure
- * defining how we should support this device, or NULL if it's not in the
- * list
- */
-static struct us_unusual_dev* us_find_dev(u16 idVendor, u16 idProduct,
- u16 bcdDevice)
-{
- struct us_unusual_dev* ptr;
-
- US_DEBUGP("Searching unusual device list for (0x%x, 0x%x, 0x%x)...\n",
- idVendor, idProduct, bcdDevice);
-
- ptr = us_unusual_dev_list;
- while ((ptr->idVendor != 0x0000) &&
- !((ptr->idVendor == idVendor) &&
- (ptr->idProduct == idProduct) &&
- (ptr->bcdDeviceMin <= bcdDevice) &&
- (ptr->bcdDeviceMax >= bcdDevice)))
- ptr++;
-
- /* if the search ended because we hit the end record, we failed */
- if (ptr->idVendor == 0x0000) {
- US_DEBUGP("-- did not find a matching device\n");
- return NULL;
- }
-
- /* otherwise, we found one! */
- US_DEBUGP("-- found matching device: %s %s\n", ptr->vendorName,
- ptr->productName);
- return ptr;
-}
-
/* Set up the IRQ pipe and handler
* Note that this function assumes that all the data in the us_data
* strucuture is current. This includes the ep_int field, which gives us
@@ -620,6 +505,7 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
const struct usb_device_id *id)
{
int i;
+ const int id_index = id - storage_usb_ids;
char mf[USB_STOR_STRING_LEN]; /* manufacturer */
char prod[USB_STOR_STRING_LEN]; /* product */
char serial[USB_STOR_STRING_LEN]; /* serial number */
@@ -640,47 +526,48 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
u8 subclass = 0;
u8 protocol = 0;
- /* the altsettting 0 on the interface we're probing */
- struct usb_interface_descriptor *altsetting =
- &(dev->actconfig->interface[ifnum].altsetting[0]);
+ /* the altsettting on the interface we're probing that matched our
+ * usb_match_id table
+ */
+ struct usb_interface *intf = dev->actconfig->interface;
+ struct usb_interface_descriptor *altsetting =
+ intf[ifnum].altsetting + intf[ifnum].act_altsetting;
+ US_DEBUGP("act_altsettting is %d\n", intf[ifnum].act_altsetting);
/* clear the temporary strings */
memset(mf, 0, sizeof(mf));
memset(prod, 0, sizeof(prod));
memset(serial, 0, sizeof(serial));
- /* search for this device in our unusual device list */
- unusual_dev = us_find_dev(dev->descriptor.idVendor,
- dev->descriptor.idProduct,
- dev->descriptor.bcdDevice);
-
/*
* Can we support this device, either because we know about it
* from our unusual device list, or because it advertises that it's
* compliant to the specification?
+ *
+ * id_index is calculated in the declaration to be the index number
+ * of the match from the usb_device_id table, so we can find the
+ * corresponding entry in the private table.
*/
- if (!unusual_dev &&
- !(dev->descriptor.bDeviceClass == 0 &&
- altsetting->bInterfaceClass == USB_CLASS_MASS_STORAGE &&
- altsetting->bInterfaceSubClass >= US_SC_MIN &&
- altsetting->bInterfaceSubClass <= US_SC_MAX)) {
- /* if it's not a mass storage, we go no further */
+ US_DEBUGP("id_index calculated to be: %d\n", id_index);
+ US_DEBUGP("Array length appears to be: %d\n", sizeof(us_unusual_dev_list) / sizeof(us_unusual_dev_list[0]));
+ if (id_index <
+ sizeof(us_unusual_dev_list) / sizeof(us_unusual_dev_list[0])) {
+ unusual_dev = &us_unusual_dev_list[id_index];
+ if (unusual_dev->vendorName)
+ US_DEBUGP("Vendor: %s\n", unusual_dev->vendorName);
+ if (unusual_dev->productName)
+ US_DEBUGP("Product: %s\n", unusual_dev->productName);
+ } else
+ /* no, we can't support it */
return NULL;
- }
/* At this point, we know we've got a live one */
US_DEBUGP("USB Mass Storage device detected\n");
/* Determine subclass and protocol, or copy from the interface */
- if (unusual_dev) {
- subclass = unusual_dev->useProtocol;
- protocol = unusual_dev->useTransport;
- flags = unusual_dev->flags;
- } else {
- subclass = altsetting->bInterfaceSubClass;
- protocol = altsetting->bInterfaceProtocol;
- flags = 0;
- }
+ subclass = unusual_dev->useProtocol;
+ protocol = unusual_dev->useTransport;
+ flags = unusual_dev->flags;
/*
* Find the endpoints we need
@@ -728,7 +615,7 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
/* Do some basic sanity checks, and bail if we find a problem */
if (!ep_in || !ep_out || (protocol == US_PR_CBI && !ep_int)) {
- US_DEBUGP("Sanity check failed. Rejecting device.\n");
+ US_DEBUGP("Endpoint sanity check failed! Rejecting dev.\n");
return NULL;
}
@@ -861,7 +748,7 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
strncpy(ss->product, prod, USB_STOR_STRING_LEN);
strncpy(ss->serial, serial, USB_STOR_STRING_LEN);
if (strlen(ss->vendor) == 0) {
- if (unusual_dev)
+ if (unusual_dev->vendorName)
strncpy(ss->vendor, unusual_dev->vendorName,
USB_STOR_STRING_LEN);
else
@@ -869,7 +756,7 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
USB_STOR_STRING_LEN);
}
if (strlen(ss->product) == 0) {
- if (unusual_dev)
+ if (unusual_dev->productName)
strncpy(ss->product, unusual_dev->productName,
USB_STOR_STRING_LEN);
else
@@ -1118,6 +1005,8 @@ static void storage_disconnect(struct usb_device *dev, void *ptr)
int __init usb_stor_init(void)
{
+ printk(KERN_INFO "Initializing USB Mass Storage driver...\n");
+
/* initialize internal global data elements */
us_list = NULL;
init_MUTEX(&us_list_semaphore);
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index e0dac568d..9c8076c8d 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -1,7 +1,7 @@
/* Driver for USB Mass Storage compliant devices
* Main Header File
*
- * $Id: usb.h,v 1.11 2000/11/13 22:38:55 mdharm Exp $
+ * $Id: usb.h,v 1.12 2000/12/05 03:33:49 mdharm Exp $
*
* Current development and maintenance by:
* (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
@@ -84,13 +84,6 @@ struct us_data;
*/
struct us_unusual_dev {
- /* we search the list based on these parameters */
- __u16 idVendor;
- __u16 idProduct;
- __u16 bcdDeviceMin;
- __u16 bcdDeviceMax;
-
- /* the list specifies these parameters */
const char* vendorName;
const char* productName;
__u8 useProtocol;
diff --git a/drivers/video/sbusfb.c b/drivers/video/sbusfb.c
index f9fa0d213..4d5b66386 100644
--- a/drivers/video/sbusfb.c
+++ b/drivers/video/sbusfb.c
@@ -678,7 +678,7 @@ static int sbusfb_ioctl(struct inode *inode, struct file *file, u_int cmd,
int end, count, index;
struct fbcmap *cmap;
- if (!fb->loadcmap)
+ if (!fb->loadcmap || !fb->color_map)
return -EINVAL;
i = verify_area (VERIFY_READ, (void *) arg, sizeof (struct fbcmap));
if (i) return i;
@@ -1110,6 +1110,8 @@ sizechange:
}
if (!p) {
+ if (fb->color_map)
+ kfree(fb->color_map);
kfree(fb);
return;
}
@@ -1147,6 +1149,8 @@ sizechange:
sbusfb_set_var(var, -1, &fb->info);
if (register_framebuffer(&fb->info) < 0) {
+ if (fb->color_map)
+ kfree(fb->color_map);
kfree(fb);
return;
}
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 8833cbeac..b494e25c6 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -461,7 +461,7 @@ static u_long get_line_length(int xres_virtual, int bpp)
{
u_long length;
- length = (xres_virtual+bpp-1)/bpp;
+ length = xres_virtual*bpp;
length = (length+31)&-32;
length >>= 3;
return(length);
diff --git a/fs/Config.in b/fs/Config.in
index 53380d067..19430baa6 100644
--- a/fs/Config.in
+++ b/fs/Config.in
@@ -8,6 +8,8 @@ bool 'Quota support' CONFIG_QUOTA
tristate 'Kernel automounter support' CONFIG_AUTOFS_FS
tristate 'Kernel automounter version 4 support (also supports v3)' CONFIG_AUTOFS4_FS
+dep_tristate 'Reiserfs support' CONFIG_REISERFS_FS $CONFIG_EXPERIMENTAL
+dep_mbool ' Have reiserfs do extra internal checking' CONFIG_REISERFS_CHECK $CONFIG_REISERFS_FS $CONFIG_EXPERIMENTAL
dep_tristate 'ADFS file system support' CONFIG_ADFS_FS $CONFIG_EXPERIMENTAL
dep_mbool ' ADFS write support (DANGEROUS)' CONFIG_ADFS_FS_RW $CONFIG_ADFS_FS $CONFIG_EXPERIMENTAL
diff --git a/fs/Makefile b/fs/Makefile
index d63fd61bd..738e4418c 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -58,6 +58,7 @@ subdir-$(CONFIG_UDF_FS) += udf
subdir-$(CONFIG_AUTOFS_FS) += autofs
subdir-$(CONFIG_AUTOFS4_FS) += autofs4
subdir-$(CONFIG_ADFS_FS) += adfs
+subdir-$(CONFIG_REISERFS_FS) += reiserfs
subdir-$(CONFIG_DEVPTS_FS) += devpts
subdir-$(CONFIG_SUN_OPENPROMFS) += openpromfs
diff --git a/fs/buffer.c b/fs/buffer.c
index 0096524bf..bea5f09e5 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -834,6 +834,10 @@ still_busy:
return;
}
+void set_buffer_async_io(struct buffer_head *bh) {
+ bh->b_end_io = end_buffer_io_async ;
+}
+
/*
* Synchronise all the inode's dirty buffers to the disk.
*
@@ -1151,7 +1155,7 @@ void __bforget(struct buffer_head * buf)
/* grab the lru lock here to block bdflush. */
spin_lock(&lru_list_lock);
write_lock(&hash_table_lock);
- if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf))
+ if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf) || buffer_protected(buf))
goto in_use;
__hash_unlink(buf);
remove_inode_queue(buf);
@@ -2411,6 +2415,7 @@ busy_buffer_page:
loop = 1;
goto cleaned_buffers_try_again;
}
+ wakeup_bdflush(0);
}
return 0;
}
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
index c8dc3dd8f..12de14e7b 100644
--- a/fs/coda/cnode.c
+++ b/fs/coda/cnode.c
@@ -14,17 +14,21 @@
extern int coda_debug;
extern int coda_print_entry;
+static ViceFid NullFID = { 0, 0, 0 };
+
inline int coda_fideq(ViceFid *fid1, ViceFid *fid2)
{
- if (fid1->Vnode != fid2->Vnode)
- return 0;
- if (fid1->Volume != fid2->Volume)
- return 0;
- if (fid1->Unique != fid2->Unique)
- return 0;
+ if (fid1->Vnode != fid2->Vnode) return 0;
+ if (fid1->Volume != fid2->Volume) return 0;
+ if (fid1->Unique != fid2->Unique) return 0;
return 1;
}
+static int coda_inocmp(struct inode *inode, unsigned long ino, void *opaque)
+{
+ return (coda_fideq((ViceFid *)opaque, &(ITOC(inode)->c_fid)));
+}
+
static struct inode_operations coda_symlink_inode_operations = {
readlink: page_readlink,
follow_link: page_follow_link,
@@ -62,7 +66,8 @@ struct inode * coda_iget(struct super_block * sb, ViceFid * fid,
struct coda_inode_info *cii;
ino_t ino = attr->va_fileid;
- inode = iget(sb, ino);
+ inode = iget4(sb, ino, coda_inocmp, fid);
+
if ( !inode ) {
CDEBUG(D_CNODE, "coda_iget: no inode\n");
return ERR_PTR(-ENOMEM);
@@ -70,25 +75,16 @@ struct inode * coda_iget(struct super_block * sb, ViceFid * fid,
/* check if the inode is already initialized */
cii = ITOC(inode);
- if (cii->c_fid.Volume != 0 || cii->c_fid.Vnode != 0 || cii->c_fid.Unique != 0) {
- /* see if it is the right one (might have an inode collision) */
- if ( !coda_fideq(fid, &cii->c_fid) ) {
- printk("coda_iget: initialized inode old %s new %s!\n",
- coda_f2s(&cii->c_fid), coda_f2s2(fid));
- iput(inode);
- return ERR_PTR(-ENOENT);
- }
- /* we will still replace the attributes, type might have changed */
- goto out;
+ if (coda_fideq(&cii->c_fid, &NullFID)) {
+ /* new, empty inode found... initializing */
+ cii->c_fid = *fid;
+ cii->c_vnode = inode;
}
- /* new, empty inode found... initializing */
-
- /* Initialize the Coda inode info structure */
- cii->c_fid = *fid;
- cii->c_vnode = inode;
+ /* we shouldnt see inode collisions anymore */
+ if ( !coda_fideq(fid, &cii->c_fid) ) BUG();
-out:
+ /* always replace the attributes, type might have changed */
coda_fill_inode(inode, attr);
return inode;
}
@@ -107,7 +103,6 @@ int coda_cnode_make(struct inode **inode, ViceFid *fid, struct super_block *sb)
ENTRY;
/* We get inode numbers from Venus -- see venus source */
-
error = venus_getattr(sb, fid, &attr);
if ( error ) {
CDEBUG(D_CNODE,
@@ -183,7 +178,7 @@ struct inode *coda_fid_to_inode(ViceFid *fid, struct super_block *sb)
if ( coda_fideq(&cii->c_fid, fid) ) {
inode = cii->c_vnode;
CDEBUG(D_INODE, "volume root, found %ld\n", inode->i_ino);
- iget(sb, inode->i_ino);
+ iget4(sb, inode->i_ino, coda_inocmp, fid);
return inode;
}
}
@@ -192,7 +187,7 @@ struct inode *coda_fid_to_inode(ViceFid *fid, struct super_block *sb)
/* fid is not weird: ino should be computable */
nr = coda_f2i(fid);
- inode = iget(sb, nr);
+ inode = iget4(sb, nr, coda_inocmp, fid);
if ( !inode ) {
printk("coda_fid_to_inode: null from iget, sb %p, nr %ld.\n",
sb, (long)nr);
@@ -202,18 +197,11 @@ struct inode *coda_fid_to_inode(ViceFid *fid, struct super_block *sb)
/* check if this inode is linked to a cnode */
cii = ITOC(inode);
- /* make sure this is the one we want */
- if ( coda_fideq(fid, &cii->c_fid) ) {
- CDEBUG(D_INODE, "found %ld\n", inode->i_ino);
- return inode;
- }
-
-#if 0
- printk("coda_fid2inode: bad cnode (ino %ld, fid %s)", nr, coda_f2s(fid));
-#endif
- iput(inode);
- return NULL;
+ /* we shouldn't have inode collisions anymore */
+ if ( !coda_fideq(fid, &cii->c_fid) ) BUG();
+ CDEBUG(D_INODE, "found %ld\n", inode->i_ino);
+ return inode;
}
/* the CONTROL inode is made without asking attributes from Venus */
diff --git a/fs/exec.c b/fs/exec.c
index 07ee80d0c..e618a3f18 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -388,10 +388,8 @@ static int exec_mmap(void)
old_mm = current->mm;
if (old_mm && atomic_read(&old_mm->mm_users) == 1) {
- flush_cache_mm(old_mm);
mm_release();
exit_mmap(old_mm);
- flush_tlb_mm(old_mm);
return 0;
}
@@ -407,6 +405,7 @@ static int exec_mmap(void)
/* Add it to the list of mm's */
spin_lock(&mmlist_lock);
list_add(&mm->mmlist, &init_mm.mmlist);
+ mmlist_nr++;
spin_unlock(&mmlist_lock);
task_lock(current);
@@ -938,12 +937,14 @@ int do_coredump(long signr, struct pt_regs * regs)
#else
corename[4] = '\0';
#endif
- file = filp_open(corename, O_CREAT | 2 | O_TRUNC | O_NOFOLLOW, 0600);
+ file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW, 0600);
if (IS_ERR(file))
goto fail;
inode = file->f_dentry->d_inode;
if (inode->i_nlink > 1)
goto close_fail; /* multiple links - don't dump */
+ if (d_unhashed(file->f_dentry))
+ goto close_fail;
if (!S_ISREG(inode->i_mode))
goto close_fail;
@@ -951,6 +952,8 @@ int do_coredump(long signr, struct pt_regs * regs)
goto close_fail;
if (!file->f_op->write)
goto close_fail;
+ if (do_truncate(file->f_dentry, 0) != 0)
+ goto close_fail;
if (!binfmt->core_dump(signr, regs, file))
goto close_fail;
unlock_kernel();
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 0cc0ee242..35874bf50 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -24,6 +24,7 @@
#endif
static struct fat_cache *fat_cache,cache[FAT_CACHE];
+static spinlock_t fat_cache_lock = SPIN_LOCK_UNLOCKED;
/* Returns the this'th FAT entry, -1 if it is an end-of-file entry. If
new_value is != -1, that FAT entry is replaced by it. */
@@ -133,13 +134,16 @@ int default_fat_access(struct super_block *sb,int nr,int new_value)
return next;
}
-
void fat_cache_init(void)
{
static int initialized = 0;
int count;
- if (initialized) return;
+ spin_lock(&fat_cache_lock);
+ if (initialized) {
+ spin_unlock(&fat_cache_lock);
+ return;
+ }
fat_cache = &cache[0];
for (count = 0; count < FAT_CACHE; count++) {
cache[count].device = 0;
@@ -147,6 +151,7 @@ void fat_cache_init(void)
&cache[count+1];
}
initialized = 1;
+ spin_unlock(&fat_cache_lock);
}
@@ -157,6 +162,7 @@ void fat_cache_lookup(struct inode *inode,int cluster,int *f_clu,int *d_clu)
if (!first)
return;
+ spin_lock(&fat_cache_lock);
for (walk = fat_cache; walk; walk = walk->next)
if (inode->i_dev == walk->device
&& walk->start_cluster == first
@@ -166,8 +172,12 @@ void fat_cache_lookup(struct inode *inode,int cluster,int *f_clu,int *d_clu)
#ifdef DEBUG
printk("cache hit: %d (%d)\n",walk->file_cluster,*d_clu);
#endif
- if ((*f_clu = walk->file_cluster) == cluster) return;
+ if ((*f_clu = walk->file_cluster) == cluster) {
+ spin_unlock(&fat_cache_lock);
+ return;
+ }
}
+ spin_unlock(&fat_cache_lock);
#ifdef DEBUG
printk("cache miss\n");
#endif
@@ -197,6 +207,7 @@ void fat_cache_add(struct inode *inode,int f_clu,int d_clu)
int first = MSDOS_I(inode)->i_start;
last = NULL;
+ spin_lock(&fat_cache_lock);
for (walk = fat_cache; walk->next; walk = (last = walk)->next)
if (inode->i_dev == walk->device
&& walk->start_cluster == first
@@ -204,17 +215,22 @@ void fat_cache_add(struct inode *inode,int f_clu,int d_clu)
if (walk->disk_cluster != d_clu) {
printk("FAT cache corruption inode=%ld\n",
inode->i_ino);
+ spin_unlock(&fat_cache_lock);
fat_cache_inval_inode(inode);
return;
}
/* update LRU */
- if (last == NULL) return;
+ if (last == NULL) {
+ spin_unlock(&fat_cache_lock);
+ return;
+ }
last->next = walk->next;
walk->next = fat_cache;
fat_cache = walk;
#ifdef DEBUG
list_cache();
#endif
+ spin_unlock(&fat_cache_lock);
return;
}
walk->device = inode->i_dev;
@@ -224,6 +240,7 @@ list_cache();
last->next = NULL;
walk->next = fat_cache;
fat_cache = walk;
+ spin_unlock(&fat_cache_lock);
#ifdef DEBUG
list_cache();
#endif
@@ -238,10 +255,12 @@ void fat_cache_inval_inode(struct inode *inode)
struct fat_cache *walk;
int first = MSDOS_I(inode)->i_start;
+ spin_lock(&fat_cache_lock);
for (walk = fat_cache; walk; walk = walk->next)
if (walk->device == inode->i_dev
&& walk->start_cluster == first)
walk->device = 0;
+ spin_unlock(&fat_cache_lock);
}
@@ -249,9 +268,11 @@ void fat_cache_inval_dev(kdev_t device)
{
struct fat_cache *walk;
+ spin_lock(&fat_cache_lock);
for (walk = fat_cache; walk; walk = walk->next)
if (walk->device == device)
walk->device = 0;
+ spin_unlock(&fat_cache_lock);
}
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 06f5d0783..aa09b0dd9 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -299,6 +299,7 @@ int hpfs_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
int error;
+ if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) return -EINVAL;
if (inode->i_sb->s_hpfs_root == inode->i_ino) return -EINVAL;
if ((error = inode_change_ok(inode, attr))) return error;
inode_setattr(inode, attr);
diff --git a/fs/inode.c b/fs/inode.c
index 1d2b23b70..d51bbddbe 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -136,6 +136,16 @@ void __mark_inode_dirty(struct inode *inode, int flags)
struct super_block * sb = inode->i_sb;
if (sb) {
+ /* Don't do this for I_DIRTY_PAGES - that doesn't actually dirty the inode itself */
+ if (flags & (I_DIRTY | I_DIRTY_SYNC)) {
+ if (sb->s_op && sb->s_op->dirty_inode)
+ sb->s_op->dirty_inode(inode);
+ }
+
+ /* avoid the locking if we can */
+ if ((inode->i_state & flags) == flags)
+ return;
+
spin_lock(&inode_lock);
if ((inode->i_state & flags) != flags) {
inode->i_state |= flags;
@@ -676,7 +686,17 @@ static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, s
spin_unlock(&inode_lock);
clean_inode(inode);
- sb->s_op->read_inode(inode);
+
+ /* reiserfs specific hack right here. We don't
+ ** want this to last, and are looking for VFS changes
+ ** that will allow us to get rid of it.
+ ** -- mason@suse.com
+ */
+ if (sb->s_op->read_inode2) {
+ sb->s_op->read_inode2(inode, opaque) ;
+ } else {
+ sb->s_op->read_inode(inode);
+ }
/*
* This is special! We do not need the spinlock
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index a187b1199..c16d93614 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -871,6 +871,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
goto out;
error = -EACCES;
+
+ if (S_ISREG(mode) &&
+ (server->m.flags & NCP_MOUNT_EXTRAS) &&
+ (mode & S_IXUGO))
+ attributes |= aSYSTEM;
+
result = ncp_open_create_file_or_subdir(server, dir, __name,
OC_MODE_CREATE | OC_MODE_OPEN | OC_MODE_REPLACE,
attributes, AR_READ | AR_WRITE, &finfo);
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 171f0cb51..470ea5e96 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -456,7 +456,10 @@ static int ncp_do_request(struct ncp_server *server, int size,
spin_lock_irqsave(&current->sigmask_lock, flags);
old_set = current->blocked;
- mask = sigmask(SIGKILL) | sigmask(SIGSTOP);
+ if (current->flags & PF_EXITING)
+ mask = 0;
+ else
+ mask = sigmask(SIGKILL);
if (server->m.flags & NCP_MOUNT_INTR) {
/* FIXME: This doesn't seem right at all. So, like,
we can't handle SIGINT and get whatever to stop?
diff --git a/fs/nfs/flushd.c b/fs/nfs/flushd.c
index 700504ded..502b490b8 100644
--- a/fs/nfs/flushd.c
+++ b/fs/nfs/flushd.c
@@ -34,7 +34,7 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
-#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
#include <linux/nfs.h>
#include <linux/nfs_fs.h>
@@ -53,11 +53,6 @@
static struct rpc_wait_queue flushd_queue = RPC_INIT_WAITQ("nfs_flushd");
/*
- * Spinlock
- */
-spinlock_t nfs_flushd_lock = SPIN_LOCK_UNLOCKED;
-
-/*
* Local function declarations.
*/
static void nfs_flushd(struct rpc_task *);
@@ -68,19 +63,20 @@ int nfs_reqlist_init(struct nfs_server *server)
{
struct nfs_reqlist *cache;
struct rpc_task *task;
- int status = 0;
+ int status;
dprintk("NFS: writecache_init\n");
- spin_lock(&nfs_flushd_lock);
- cache = server->rw_requests;
- if (cache->task)
+ lock_kernel();
+ status = -ENOMEM;
+ /* Create the RPC task */
+ if (!(task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC)))
goto out_unlock;
- /* Create the RPC task */
- status = -ENOMEM;
- task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC);
- if (!task)
+ cache = server->rw_requests;
+
+ status = 0;
+ if (cache->task)
goto out_unlock;
task->tk_calldata = server;
@@ -94,11 +90,13 @@ int nfs_reqlist_init(struct nfs_server *server)
task->tk_action = nfs_flushd;
task->tk_exit = nfs_flushd_exit;
- spin_unlock(&nfs_flushd_lock);
rpc_execute(task);
+ unlock_kernel();
return 0;
out_unlock:
- spin_unlock(&nfs_flushd_lock);
+ if (task)
+ rpc_release_task(task);
+ unlock_kernel();
return status;
}
@@ -106,23 +104,24 @@ void nfs_reqlist_exit(struct nfs_server *server)
{
struct nfs_reqlist *cache;
+ lock_kernel();
cache = server->rw_requests;
if (!cache)
- return;
+ goto out;
dprintk("NFS: reqlist_exit (ptr %p rpc %p)\n", cache, cache->task);
+
while (cache->task || cache->inodes) {
- spin_lock(&nfs_flushd_lock);
if (!cache->task) {
- spin_unlock(&nfs_flushd_lock);
nfs_reqlist_init(server);
} else {
cache->task->tk_status = -ENOMEM;
rpc_wake_up_task(cache->task);
- spin_unlock(&nfs_flushd_lock);
}
interruptible_sleep_on_timeout(&cache->request_wait, 1 * HZ);
}
+ out:
+ unlock_kernel();
}
int nfs_reqlist_alloc(struct nfs_server *server)
@@ -161,7 +160,6 @@ static void inode_append_flushd(struct inode *inode)
struct nfs_reqlist *cache = NFS_REQUESTLIST(inode);
struct inode **q;
- spin_lock(&nfs_flushd_lock);
if (NFS_FLAGS(inode) & NFS_INO_FLUSH)
goto out;
inode->u.nfs_i.hash_next = NULL;
@@ -177,7 +175,6 @@ static void inode_append_flushd(struct inode *inode)
NFS_FLAGS(inode) |= NFS_INO_FLUSH;
atomic_inc(&inode->i_count);
out:
- spin_unlock(&nfs_flushd_lock);
}
void inode_remove_flushd(struct inode *inode)
@@ -185,7 +182,7 @@ void inode_remove_flushd(struct inode *inode)
struct nfs_reqlist *cache = NFS_REQUESTLIST(inode);
struct inode **q;
- spin_lock(&nfs_flushd_lock);
+ lock_kernel();
if (!(NFS_FLAGS(inode) & NFS_INO_FLUSH))
goto out;
@@ -198,7 +195,7 @@ void inode_remove_flushd(struct inode *inode)
iput(inode);
}
out:
- spin_unlock(&nfs_flushd_lock);
+ unlock_kernel();
}
void inode_schedule_scan(struct inode *inode, unsigned long time)
@@ -207,6 +204,7 @@ void inode_schedule_scan(struct inode *inode, unsigned long time)
struct rpc_task *task;
unsigned long mintimeout;
+ lock_kernel();
if (time_after(NFS_NEXTSCAN(inode), time))
NFS_NEXTSCAN(inode) = time;
mintimeout = jiffies + 1 * HZ;
@@ -214,16 +212,14 @@ void inode_schedule_scan(struct inode *inode, unsigned long time)
mintimeout = NFS_NEXTSCAN(inode);
inode_append_flushd(inode);
- spin_lock(&nfs_flushd_lock);
task = cache->task;
if (!task) {
- spin_unlock(&nfs_flushd_lock);
nfs_reqlist_init(NFS_SERVER(inode));
} else {
if (time_after(cache->runat, mintimeout))
rpc_wake_up_task(task);
- spin_unlock(&nfs_flushd_lock);
}
+ unlock_kernel();
}
@@ -240,10 +236,8 @@ nfs_flushd(struct rpc_task *task)
server = (struct nfs_server *) task->tk_calldata;
cache = server->rw_requests;
- spin_lock(&nfs_flushd_lock);
next = cache->inodes;
cache->inodes = NULL;
- spin_unlock(&nfs_flushd_lock);
while ((inode = next) != NULL) {
next = next->u.nfs_i.hash_next;
@@ -280,13 +274,11 @@ nfs_flushd(struct rpc_task *task)
task->tk_timeout = delay;
cache->runat = jiffies + task->tk_timeout;
- spin_lock(&nfs_flushd_lock);
if (!atomic_read(&cache->nr_requests) && !cache->inodes) {
cache->task = NULL;
task->tk_action = NULL;
} else
rpc_sleep_on(&flushd_queue, task, NULL, NULL);
- spin_unlock(&nfs_flushd_lock);
}
static void
@@ -297,10 +289,8 @@ nfs_flushd_exit(struct rpc_task *task)
server = (struct nfs_server *) task->tk_calldata;
cache = server->rw_requests;
- spin_lock(&nfs_flushd_lock);
if (cache->task == task)
cache->task = NULL;
- spin_unlock(&nfs_flushd_lock);
wake_up(&cache->request_wait);
}
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
index 69f6f0ce9..0d72631c8 100644
--- a/fs/partitions/msdos.c
+++ b/fs/partitions/msdos.c
@@ -36,7 +36,7 @@
#include "check.h"
#include "msdos.h"
-#if CONFIG_BLK_DEV_MD && CONFIG_AUTODETECT_RAID
+#if CONFIG_BLK_DEV_MD
extern void md_autodetect_dev(kdev_t dev);
#endif
@@ -136,7 +136,7 @@ static void extended_partition(struct gendisk *hd, kdev_t dev)
add_gd_partition(hd, current_minor,
this_sector+START_SECT(p)*sector_size,
NR_SECTS(p)*sector_size);
-#if CONFIG_BLK_DEV_MD && CONFIG_AUTODETECT_RAID
+#if CONFIG_BLK_DEV_MD
if (SYS_IND(p) == LINUX_RAID_PARTITION) {
md_autodetect_dev(MKDEV(hd->major,current_minor));
}
@@ -448,7 +448,7 @@ check_table:
continue;
add_gd_partition(hd, minor, first_sector+START_SECT(p)*sector_size,
NR_SECTS(p)*sector_size);
-#if CONFIG_BLK_DEV_MD && CONFIG_AUTODETECT_RAID
+#if CONFIG_BLK_DEV_MD
if (SYS_IND(p) == LINUX_RAID_PARTITION) {
md_autodetect_dev(MKDEV(hd->major,minor));
}
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 00c16769e..f95e9f624 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -17,6 +17,7 @@
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/vmalloc.h>
+#include <linux/highmem.h>
#include <asm/uaccess.h>
#include <asm/io.h>
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 898ab8178..69ea5fb23 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -81,6 +81,7 @@ static int ramfs_readpage(struct file *file, struct page * page)
static int ramfs_writepage(struct page *page)
{
SetPageDirty(page);
+ UnlockPage(page);
return 0;
}
diff --git a/fs/reiserfs/.cvsignore b/fs/reiserfs/.cvsignore
new file mode 100644
index 000000000..857dd22e9
--- /dev/null
+++ b/fs/reiserfs/.cvsignore
@@ -0,0 +1,2 @@
+.depend
+.*.flags
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
new file mode 100644
index 000000000..fda0b17ec
--- /dev/null
+++ b/fs/reiserfs/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the linux reiser-filesystem routines.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+O_TARGET := reiserfs.o
+obj-y := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o super.o prints.o objectid.o \
+lbalance.o ibalance.o stree.o hashes.o buffer2.o tail_conversion.o journal.o resize.o tail_conversion.o version.o item_ops.o ioctl.o
+
+obj-m := $(O_TARGET)
+
+include $(TOPDIR)/Rules.make
+
+TAGS:
+ etags *.c
+
diff --git a/fs/reiserfs/README b/fs/reiserfs/README
new file mode 100644
index 000000000..37b23959c
--- /dev/null
+++ b/fs/reiserfs/README
@@ -0,0 +1,157 @@
+[LICENSING]
+
+ReiserFS is hereby licensed under the GNU General
+Public License version 2.
+
+Source code files that contain the phrase "licensing governed by
+reiserfs/README" are "governed files" throughout this file. Governed
+files are licensed under the GPL. The portions of them owned by Hans
+Reiser, or authorized to be licensed by him, have been in the past,
+and likely will be in the future, licensed to other parties under
+other licenses. If you add your code to governed files, and don't
+want it to be owned by Hans Reiser, put your copyright label on that
+code so the poor blight and his customers can keep things straight.
+All portions of governed files not labeled otherwise are owned by Hans
+Reiser, and by adding your code to it, widely distributing it to
+others or sending us a patch, and leaving the sentence in stating that
+licensing is governed by the statement in this file, you accept this.
+It will be a kindness if you identify whether Hans Reiser is allowed
+to license code labeled as owned by you on your behalf other than
+under the GPL, because he wants to know if it is okay to do so and put
+a check in the mail to you (for non-trivial improvements) when he
+makes his next sale. He makes no guarantees as to the amount if any,
+though he feels motivated to motivate contributors, and you can surely
+discuss this with him before or after contributing. You have the
+right to decline to allow him to license your code contribution other
+than under the GPL.
+
+Further licensing options are available for commercial and/or other
+interests directly from Hans Reiser: hans@reiser.to. If you interpret
+the GPL as not allowing those additional licensing options, you read
+it wrongly, and Richard Stallman agrees with me, when carefully read
+you can see that those restrictions on additional terms do not apply
+to the owner of the copyright, and my interpretation of this shall
+govern for this license.
+
+Finally, nothing in this license shall be interpreted to allow you to
+fail to fairly credit me, or to remove my credits, without my
+permission, unless you are an end user not redistributing to others.
+If you have doubts about how to properly do that, or about what is
+fair, ask. (Last I spoke with him Richard was contemplating how best
+to address the fair crediting issue in the next GPL version.)
+
+[END LICENSING]
+
+Reiserfs is a file system based on balanced tree algorithms, which is
+described at http://devlinux.com/namesys.
+
+Stop reading here. Go there, then return.
+
+Send bug reports to yura@namesys.botik.ru.
+
+mkreiserfs and other utilities are in reiserfs/utils, or wherever your
+Linux provider put them. There is some disagreement about how useful
+it is for users to get their fsck and mkreiserfs out of sync with the
+version of reiserfs that is in their kernel, with many important
+distributors wanting them out of sync.:-) Please try to remember to
+recompile and reinstall fsck and mkreiserfs with every update of
+reiserfs, this is a common source of confusion. Note that some of the
+utilities cannot be compiled without accessing the balancing code
+which is in the kernel code, and relocating the utilities may require
+you to specify where that code can be found.
+
+Yes, if you update your reiserfs kernel module you do have to
+recompile your kernel, most of the time. The errors you get will be
+quite cryptic if your forget to do so.
+
+Real users, as opposed to folks who want to hack and then understand
+what went wrong, will want REISERFS_CHECK off.
+
+Hideous Commercial Pitch: Spread your development costs across other OS
+vendors. Select from the best in the world, not the best in your
+building, by buying from third party OS component suppliers. Leverage
+the software component development power of the internet. Be the most
+aggressive in taking advantage of the commercial possibilities of
+decentralized internet development, and add value through your branded
+integration that you sell as an operating system. Let your competitors
+be the ones to compete against the entire internet by themselves. Be
+hip, get with the new economic trend, before your competitors do. Send
+email to hans@reiser.to.
+
+To understand the code, after reading the website, start reading the
+code by reading reiserfs_fs.h first.
+
+Hans Reiser was the project initiator, primary architect, source of all
+funding for the first 5.5 years, and one of the programmers. He owns
+the copyright.
+
+Vladimir Saveljev was one of the programmers, and he worked long hours
+writing the cleanest code. He always made the effort to be the best he
+could be, and to make his code the best that it could be. What resulted
+was quite remarkable. I don't think that money can ever motivate someone
+to work the way he did, he is one of the most selfless men I know.
+
+Yura helps with benchmarking, coding hashes, and block pre-allocation
+code.
+
+Anatoly Pinchuk is a former member of our team who worked closely with
+Vladimir throughout the project's development. He wrote a quite
+substantial portion of the total code. He realized that there was a
+space problem with packing tails of files for files larger than a node
+that start on a node aligned boundary (there are reasons to want to node
+align files), and he invented and implemented indirect items and
+unformatted nodes as the solution.
+
+Konstantin Shvachko, with the help of the Russian version of a VC,
+tried to put me in a position where I was forced into giving control
+of the project to him. (Fortunately, as the person paying the money
+for all salaries from my dayjob I owned all copyrights, and you can't
+really force takeovers of sole proprietorships.) This was something
+curious, because he never really understood the value of our project,
+why we should do what we do, or why innovation was possible in
+general, but he was sure that he ought to be controlling it. Every
+innovation had to be forced past him while he was with us. He added
+two years to the time required to complete reiserfs, and was a net
+loss for me. Mikhail Gilula was a brilliant innovator who also left
+in a destructive way that erased the value of his contributions, and
+that he was shown much generosity just makes it more painful.
+
+Grigory Zaigralin was an extremely effective system administrator for
+our group.
+
+Igor Krasheninnikov was wonderful at hardware procurement, repair, and
+network installation.
+
+Jeremy Fitzhardinge wrote the teahash.c code, and he gives credit to a
+textbook he got the algorithm from in the code. Note that his analysis
+of how we could use the hashing code in making 32 bit NFS cookies work
+was probably more important than the actual algorithm. Colin Plumb also
+contributed to it.
+
+Chris Mason dived right into our code, and in just a few months produced
+the journaling code that dramatically increased the value of ReiserFS.
+He is just an amazing programmer.
+
+Igor Zagorovsky is writing much of the new item handler and extent code
+for our next major release.
+
+Alexander Zarochentcev (sometimes known as zam, or sasha), wrote the
+resizer, and is hard at work on implementing allocate on flush. SGI
+implemented allocate on flush before us for XFS, and generously took
+the time to convince me we should do it also. They are great people,
+and a great company.
+
+Yuri Shevchuk and Nikita Danilov are doing squid cache optimization.
+
+Vitaly Fertman is doing fsck.
+
+SuSE, IntegratedLinux.com, Ecila, MP3.com, bigstorage.com, and the
+Alpha PC Company made it possible for me to not have a day job
+anymore, and to dramatically increase our staffing. Ecila funded
+hypertext feature development, MP3.com funded journaling, SuSE funded
+core development, IntegratedLinux.com funded squid web cache
+appliances, bigstorage.com funded HSM, and the alpha PC company funded
+the alpha port. Many of these tasks were helped by sponsors other
+than the ones just named. SuSE has helped in much more than just
+funding....
+
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
new file mode 100644
index 000000000..ea83fd549
--- /dev/null
+++ b/fs/reiserfs/bitmap.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/locks.h>
+#include <asm/bitops.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+#ifdef CONFIG_REISERFS_CHECK
+
+/* this is a safety check to make sure
+** blocks are reused properly. used for debugging only.
+**
+** this checks, that block can be reused, and it has correct state
+** (free or busy)
+*/
+int is_reusable (struct super_block * s, unsigned long block, int bit_value)
+{
+ int i, j;
+
+ if (block == 0 || block >= SB_BLOCK_COUNT (s)) {
+ reiserfs_warning ("vs-4010: is_reusable: block number is out of range %lu (%u)\n",
+ block, SB_BLOCK_COUNT (s));
+ return 0;
+ }
+
+ /* it can't be one of the bitmap blocks */
+ for (i = 0; i < SB_BMAP_NR (s); i ++)
+ if (block == SB_AP_BITMAP (s)[i]->b_blocknr) {
+ reiserfs_warning ("vs: 4020: is_reusable: "
+ "bitmap block %lu(%u) can't be freed or reused\n",
+ block, SB_BMAP_NR (s));
+ return 0;
+ }
+
+ i = block / (s->s_blocksize << 3);
+ if (i >= SB_BMAP_NR (s)) {
+ reiserfs_warning ("vs-4030: is_reusable: there is no so many bitmap blocks: "
+ "block=%lu, bitmap_nr=%d\n", block, i);
+ return 0;
+ }
+
+ j = block % (s->s_blocksize << 3);
+ if ((bit_value == 0 &&
+ reiserfs_test_le_bit(j, SB_AP_BITMAP(s)[i]->b_data)) ||
+ (bit_value == 1 &&
+ reiserfs_test_le_bit(j, SB_AP_BITMAP (s)[i]->b_data) == 0)) {
+ reiserfs_warning ("vs-4040: is_reusable: corresponding bit of block %lu does not "
+ "match required value (i==%d, j==%d) test_bit==%d\n",
+ block, i, j, reiserfs_test_le_bit (j, SB_AP_BITMAP (s)[i]->b_data));
+ return 0;
+ }
+
+ if (bit_value == 0 && block == SB_ROOT_BLOCK (s)) {
+ reiserfs_warning ("vs-4050: is_reusable: this is root block (%u), "
+ "it must be busy", SB_ROOT_BLOCK (s));
+ return 0;
+ }
+
+ return 1;
+}
+
+
+
+
+#endif /* CONFIG_REISERFS_CHECK */
+
+#if 0
+/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
+int is_used (struct super_block * s, unsigned long block)
+{
+ int i, j;
+
+ i = block / (s->s_blocksize << 3);
+ j = block % (s->s_blocksize << 3);
+ if (reiserfs_test_le_bit(j, SB_AP_BITMAP (s)[i]->b_data))
+ return 1;
+ return 0;
+
+}
+/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
+#endif
+
+
+/* get address of corresponding bit (bitmap block number and offset in it) */
+static inline void get_bit_address (struct super_block * s, unsigned long block, int * bmap_nr, int * offset)
+{
+ /* It is in the bitmap block number equal to the block number divided by the number of
+ bits in a block. */
+ *bmap_nr = block / (s->s_blocksize << 3);
+ /* Within that bitmap block it is located at bit offset *offset. */
+ *offset = block % (s->s_blocksize << 3);
+ return;
+}
+
+
+/* There would be a modest performance benefit if we write a version
+ to free a list of blocks at once. -Hans */
+ /* I wonder if it would be less modest
+ now that we use journaling. -Hans */
+void reiserfs_free_block (struct reiserfs_transaction_handle *th, unsigned long block)
+{
+ struct super_block * s = th->t_super;
+ struct reiserfs_super_block * rs;
+ struct buffer_head * sbh;
+ struct buffer_head ** apbh;
+ int nr, offset;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!s)
+ reiserfs_panic (s, "vs-4060: reiserfs_free_block: trying to free block on nonexistent device");
+
+ if (is_reusable (s, block, 1) == 0)
+ reiserfs_panic (s, "vs-4070: reiserfs_free_block: can not free such block");
+#endif
+
+ rs = SB_DISK_SUPER_BLOCK (s);
+ sbh = SB_BUFFER_WITH_SB (s);
+ apbh = SB_AP_BITMAP (s);
+
+ get_bit_address (s, block, &nr, &offset);
+
+ /* mark it before we clear it, just in case */
+ journal_mark_freed(th, s, block) ;
+
+ reiserfs_prepare_for_journal(s, apbh[nr], 1 ) ;
+
+ /* clear bit for the given block in bit map */
+ if (!reiserfs_test_and_clear_le_bit (offset, apbh[nr]->b_data)) {
+ reiserfs_warning ("vs-4080: reiserfs_free_block: "
+ "free_block (%04x:%lu)[dev:blocknr]: bit already cleared\n",
+ s->s_dev, block);
+ }
+ journal_mark_dirty (th, s, apbh[nr]);
+
+ reiserfs_prepare_for_journal(s, sbh, 1) ;
+ /* update super block */
+ rs->s_free_blocks = cpu_to_le32 (le32_to_cpu (rs->s_free_blocks) + 1);
+
+ journal_mark_dirty (th, s, sbh);
+ s->s_dirt = 1;
+}
+
+
+
+/* beginning from offset-th bit in bmap_nr-th bitmap block,
+ find_forward finds the closest zero bit. It returns 1 and zero
+ bit address (bitmap, offset) if zero bit found or 0 if there is no
+ zero bit in the forward direction */
+/* The function is NOT SCHEDULE-SAFE! */
+static int find_forward (struct super_block * s, int * bmap_nr, int * offset, int for_unformatted)
+{
+ int i, j;
+ struct buffer_head * bh;
+ unsigned long block_to_try = 0;
+ unsigned long next_block_to_try = 0 ;
+
+ for (i = *bmap_nr; i < SB_BMAP_NR (s); i ++, *offset = 0) {
+ /* get corresponding bitmap block */
+ bh = SB_AP_BITMAP (s)[i];
+ if (buffer_locked (bh)) {
+ __wait_on_buffer (bh);
+ }
+retry:
+ j = reiserfs_find_next_zero_le_bit ((unsigned long *)bh->b_data,
+ s->s_blocksize << 3, *offset);
+
+ /* wow, this really needs to be redone. We can't allocate a block if
+ ** it is in the journal somehow. reiserfs_in_journal makes a suggestion
+ ** for a good block if the one you ask for is in the journal. Note,
+ ** reiserfs_in_journal might reject the block it suggests. The big
+ ** gain from the suggestion is when a big file has been deleted, and
+ ** many blocks show free in the real bitmap, but are all not free
+ ** in the journal list bitmaps.
+ **
+ ** this whole system sucks. The bitmaps should reflect exactly what
+ ** can and can't be allocated, and the journal should update them as
+ ** it goes. TODO.
+ */
+ if (j < (s->s_blocksize << 3)) {
+ block_to_try = (i * (s->s_blocksize << 3)) + j;
+
+ /* the block is not in the journal, we can proceed */
+ if (!(reiserfs_in_journal(s, s->s_dev, block_to_try, s->s_blocksize, for_unformatted, &next_block_to_try))) {
+ *bmap_nr = i;
+ *offset = j;
+ return 1;
+ }
+ /* the block is in the journal */
+ else if ((j+1) < (s->s_blocksize << 3)) { /* try again */
+ /* reiserfs_in_journal suggested a new block to try */
+ if (next_block_to_try > 0) {
+ int new_i ;
+ get_bit_address (s, next_block_to_try, &new_i, offset);
+
+ /* block is not in this bitmap. reset i and continue
+ ** we only reset i if new_i is in a later bitmap.
+ */
+ if (new_i > i) {
+ i = (new_i - 1 ); /* i gets incremented by the for loop */
+ continue ;
+ }
+ } else {
+ /* no suggestion was made, just try the next block */
+ *offset = j+1 ;
+ }
+ goto retry ;
+ }
+ }
+ }
+ /* zero bit not found */
+ return 0;
+}
+
+/* return 0 if no free blocks, else return 1 */
+/* The function is NOT SCHEDULE-SAFE!
+** because the bitmap block we want to change could be locked, and on its
+** way to the disk when we want to read it, and because of the
+** flush_async_commits. Per bitmap block locks won't help much, and
+** really aren't needed, as we retry later on if we try to set the bit
+** and it is already set.
+*/
+static int find_zero_bit_in_bitmap (struct super_block * s,
+ unsigned long search_start,
+ int * bmap_nr, int * offset,
+ int for_unformatted)
+{
+ int retry_count = 0 ;
+ /* get bit location (bitmap number and bit offset) of search_start block */
+ get_bit_address (s, search_start, bmap_nr, offset);
+
+ /* note that we search forward in the bitmap, benchmarks have shown that it is better to allocate in increasing
+ sequence, which is probably due to the disk spinning in the forward direction.. */
+ if (find_forward (s, bmap_nr, offset, for_unformatted) == 0) {
+ /* there wasn't a free block with number greater than our
+ starting point, so we are going to go to the beginning of the disk */
+
+retry:
+ search_start = 0; /* caller will reset search_start for itself also. */
+ get_bit_address (s, search_start, bmap_nr, offset);
+ if (find_forward (s, bmap_nr,offset,for_unformatted) == 0) {
+ if (for_unformatted) { /* why only unformatted nodes? -Hans */
+ if (retry_count == 0) {
+ /* we've got a chance that flushing async commits will free up
+ ** some space. Sync then retry
+ */
+ flush_async_commits(s) ;
+ retry_count++ ;
+ goto retry ;
+ } else if (retry_count > 0) {
+ /* nothing more we can do. Make the others wait, flush
+ ** all log blocks to disk, and flush to their home locations.
+ ** this will free up any blocks held by the journal
+ */
+ SB_JOURNAL(s)->j_must_wait = 1 ;
+ }
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* get amount_needed free block numbers from scanning the bitmap of
+ free/used blocks.
+
+ Optimize layout by trying to find them starting from search_start
+ and moving in increasing blocknr direction. (This was found to be
+ faster than using a bi-directional elevator_direction, in part
+ because of disk spin direction, in part because by the time one
+ reaches the end of the disk the beginning of the disk is the least
+ congested).
+
+ search_start is the block number of the left
+ semantic neighbor of the node we create.
+
+ return CARRY_ON if everything is ok
+ return NO_DISK_SPACE if out of disk space
+ return NO_MORE_UNUSED_CONTIGUOUS_BLOCKS if the block we found is not contiguous to the last one
+
+ return block numbers found, in the array free_blocknrs. assumes
+ that any non-zero entries already present in the array are valid.
+ This feature is perhaps convenient coding when one might not have
+ used all blocknrs from the last time one called this function, or
+ perhaps it is an archaism from the days of schedule tracking, one
+ of us ought to reread the code that calls this, and analyze whether
+ it is still the right way to code it.
+
+ spare space is used only when priority is set to 1. reiserfsck has
+ its own reiserfs_new_blocknrs, which can use reserved space
+
+ exactly what reserved space? the SPARE_SPACE? if so, please comment reiserfs.h.
+
+ Give example of who uses spare space, and say that it is a deadlock
+ avoidance mechanism. -Hans */
+
+/* This function is NOT SCHEDULE-SAFE! */
+
+static int do_reiserfs_new_blocknrs (struct reiserfs_transaction_handle *th,
+ unsigned long * free_blocknrs,
+ unsigned long search_start,
+ int amount_needed, int priority,
+ int for_unformatted,
+ int for_prealloc)
+{
+ struct super_block * s = th->t_super;
+ int i, j;
+ unsigned long * block_list_start = free_blocknrs;
+ int init_amount_needed = amount_needed;
+ unsigned long new_block = 0 ;
+
+ if (SB_FREE_BLOCKS (s) < SPARE_SPACE && !priority)
+ /* we can answer NO_DISK_SPACE being asked for new block with
+ priority 0 */
+ return NO_DISK_SPACE;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!s)
+ reiserfs_panic (s, "vs-4090: reiserfs_new_blocknrs: trying to get new block from nonexistent device");
+
+ if (search_start == MAX_B_NUM)
+ reiserfs_panic (s, "vs-4100: reiserfs_new_blocknrs: we are optimizing location based on "
+ "the bogus location of a temp buffer (%lu).", search_start);
+
+ if (amount_needed < 1 || amount_needed > 2)
+ reiserfs_panic (s, "vs-4110: reiserfs_new_blocknrs: amount_needed parameter incorrect (%d)", amount_needed);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* We continue the while loop if another process snatches our found
+ * free block from us after we find it but before we successfully
+ * mark it as in use, or if we need to use sync to free up some
+ * blocks on the preserve list. */
+
+ while (amount_needed--) {
+ /* skip over any blocknrs already gotten last time. */
+ if (*(free_blocknrs) != 0) {
+#ifdef CONFIG_REISERFS_CHECK
+ if (is_reusable (s, *free_blocknrs, 1) == 0)
+ reiserfs_panic(s, "vs-4120: reiserfs_new_blocknrs: bad blocknr on free_blocknrs list");
+#endif /* CONFIG_REISERFS_CHECK */
+ free_blocknrs++;
+ continue;
+ }
+ /* look for zero bits in bitmap */
+ if (find_zero_bit_in_bitmap(s,search_start, &i, &j,for_unformatted) == 0) {
+ if (find_zero_bit_in_bitmap(s,search_start,&i,&j, for_unformatted) == 0) {
+ /* recode without the goto and without
+ the if. It will require a
+ duplicate for. This is worth the
+ code clarity. Your way was
+ admirable, and just a bit too
+ clever in saving instructions.:-)
+ I'd say create a new function, but
+ that would slow things also, yes?
+ -Hans */
+free_and_return:
+ for ( ; block_list_start != free_blocknrs; block_list_start++) {
+ reiserfs_free_block (th, *block_list_start);
+ *block_list_start = 0;
+ }
+ if (for_prealloc)
+ return NO_MORE_UNUSED_CONTIGUOUS_BLOCKS;
+ else
+ return NO_DISK_SPACE;
+ }
+ }
+
+ /* i and j now contain the results of the search. i = bitmap block
+ number containing free block, j = offset in this block. we
+ compute the blocknr which is our result, store it in
+ free_blocknrs, and increment the pointer so that on the next
+ loop we will insert into the next location in the array. Also
+ in preparation for the next loop, search_start is changed so
+ that the next search will not rescan the same range but will
+ start where this search finished. Note that while it is
+ possible that schedule has occurred and blocks have been freed
+ in that range, it is perhaps more important that the blocks
+ returned be near each other than that they be near their other
+ neighbors, and it also simplifies and speeds the code this way. */
+
+ /* journal: we need to make sure the block we are giving out is not
+ ** a log block, horrible things would happen there.
+ */
+ new_block = (i * (s->s_blocksize << 3)) + j;
+ if (for_prealloc && (new_block - 1) != search_start) {
+ /* preallocated blocks must be contiguous, bail if we didnt find one.
+ ** this is not a bug. We want to do the check here, before the
+ ** bitmap block is prepared, and before we set the bit and log the
+ ** bitmap.
+ **
+ ** If we do the check after this function returns, we have to
+ ** call reiserfs_free_block for new_block, which would be pure
+ ** overhead.
+ **
+ ** for_prealloc should only be set if the caller can deal with the
+ ** NO_MORE_UNUSED_CONTIGUOUS_BLOCKS return value. This can be
+ ** returned before the disk is actually full
+ */
+ goto free_and_return ;
+ }
+ search_start = new_block ;
+ if (search_start >= reiserfs_get_journal_block(s) &&
+ search_start < (reiserfs_get_journal_block(s) + JOURNAL_BLOCK_COUNT)) {
+ reiserfs_warning("vs-4130: reiserfs_new_blocknrs: trying to allocate log block %lu\n",
+ search_start) ;
+ search_start++ ;
+ amount_needed++ ;
+ continue ;
+ }
+
+
+ reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[i], 1) ;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (buffer_locked (SB_AP_BITMAP (s)[i]) || is_reusable (s, search_start, 0) == 0)
+ reiserfs_panic (s, "vs-4140: reiserfs_new_blocknrs: bitmap block is locked or bad block number found");
+#endif
+
+ /* if this bit was already set, we've scheduled, and someone else
+ ** has allocated it. loop around and try again
+ */
+ if (reiserfs_test_and_set_le_bit (j, SB_AP_BITMAP (s)[i]->b_data)) {
+ reiserfs_warning("vs-4150: reiserfs_new_blocknrs, block not free");
+ reiserfs_restore_prepared_buffer(s, SB_AP_BITMAP(s)[i]) ;
+ amount_needed++ ;
+ continue ;
+ }
+ journal_mark_dirty (th, s, SB_AP_BITMAP (s)[i]);
+ *free_blocknrs = search_start ;
+ free_blocknrs ++;
+ }
+
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+ /* update free block count in super block */
+ s->u.reiserfs_sb.s_rs->s_free_blocks = cpu_to_le32 (SB_FREE_BLOCKS (s) - init_amount_needed);
+ journal_mark_dirty (th, s, SB_BUFFER_WITH_SB (s));
+ s->s_dirt = 1;
+
+ return CARRY_ON;
+}
+
+// this is called only by get_empty_nodes with for_preserve_list==0
+int reiserfs_new_blocknrs (struct reiserfs_transaction_handle *th, unsigned long * free_blocknrs,
+ unsigned long search_start, int amount_needed) {
+ return do_reiserfs_new_blocknrs(th, free_blocknrs, search_start, amount_needed, 0/*for_preserve_list-priority*/, 0/*for_formatted*/, 0/*for_prealloc */) ;
+}
+
+
+// called by get_new_buffer and by reiserfs_get_block with amount_needed == 1 and for_preserve_list == 0
+int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle *th, unsigned long * free_blocknrs,
+ unsigned long search_start) {
+ return do_reiserfs_new_blocknrs(th, free_blocknrs, search_start,
+ 1/*amount_needed*/,
+ 0/*for_preserve_list-priority*/,
+ 1/*for formatted*/,
+ 0/*for prealloc */) ;
+}
+
+#ifdef REISERFS_PREALLOCATE
+
+/*
+** We pre-allocate 8 blocks. Pre-allocation is used for files > 16 KB only.
+** This lowers fragmentation on large files by grabbing a contiguous set of
+** blocks at once. It also limits the number of times the bitmap block is
+** logged by making X number of allocation changes in a single transaction.
+**
+** We are using a border to divide the disk into two parts. The first part
+** is used for tree blocks, which have a very high turnover rate (they
+** are constantly allocated then freed)
+**
+** The second part of the disk is for the unformatted nodes of larger files.
+** Putting them away from the tree blocks lowers fragmentation, and makes
+** it easier to group files together. There are a number of different
+** allocation schemes being tried right now, each is documented below.
+**
+** A great deal of the allocator's speed comes because reiserfs_get_block
+** sends us the block number of the last unformatted node in the file. Once
+** a given block is allocated past the border, we don't collide with the
+** blocks near the search_start again.
+**
+*/
+int reiserfs_new_unf_blocknrs2 (struct reiserfs_transaction_handle *th,
+ struct inode * p_s_inode,
+ unsigned long * free_blocknrs,
+ unsigned long search_start)
+{
+ int ret=0, blks_gotten=0;
+ unsigned long border = 0;
+ unsigned long bstart = 0;
+ unsigned long hash_in, hash_out;
+ int allocated[PREALLOCATION_SIZE];
+ int blks;
+
+ if (!reiserfs_no_border(th->t_super)) {
+ /* we default to having the border at the 10% mark of the disk. This
+ ** is an arbitrary decision and it needs tuning. It also needs a limit
+ ** to prevent it from taking too much space on huge drives.
+ */
+ bstart = (SB_BLOCK_COUNT(th->t_super) / 10);
+ }
+ if (!reiserfs_no_unhashed_relocation(th->t_super)) {
+ /* this is a very simple first attempt at preventing too much grouping
+ ** around the border value. Since k_dir_id is never larger than the
+ ** highest allocated oid, it is far from perfect, and files will tend
+ ** to be grouped towards the start of the border
+ */
+ border = (INODE_PKEY(p_s_inode)->k_dir_id) % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
+ } else {
+ /* why would we want to delcare a local variable to this if statement
+ ** name border????? -chris
+ ** unsigned long border = 0;
+ */
+ if (!reiserfs_hashed_relocation(th->t_super)) {
+ hash_in = (INODE_PKEY(p_s_inode))->k_dir_id;
+ /* I wonder if the CPU cost of the
+ hash will obscure the layout
+ effect? Of course, whether that
+ effect is good or bad we don't
+ know.... :-) */
+
+ hash_out = keyed_hash(((char *) (&hash_in)), 4);
+ border = hash_out % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
+ }
+ }
+ border += bstart ;
+ allocated[0] = 0 ; /* important. Allows a check later on to see if at
+ * least one block was allocated. This prevents false
+ * no disk space returns
+ */
+
+ if ( (p_s_inode->i_size < 4 * 4096) ||
+ !(S_ISREG(p_s_inode->i_mode)) )
+ {
+ if ( search_start < border
+ || (
+ /* allow us to test whether it is a
+ good idea to prevent files from
+ getting too far away from their
+ packing locality by some unexpected
+ means. This might be poor code for
+ directories whose files total
+ larger than 1/10th of the disk, and
+ it might be good code for
+ suffering from old insertions when the disk
+ was almost full. */
+ /* changed from !reiserfs_test3(th->t_super), which doesn't
+ ** seem like a good idea. Think about adding blocks to
+ ** a large file. If you've allocated 10% of the disk
+ ** in contiguous blocks, you start over at the border value
+ ** for every new allocation. This throws away all the
+ ** information sent in about the last block that was allocated
+ ** in the file. Not a good general case at all.
+ ** -chris
+ */
+ reiserfs_test4(th->t_super) &&
+ (search_start > border + (SB_BLOCK_COUNT(th->t_super) / 10))
+ )
+ )
+ search_start=border;
+
+ ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start,
+ 1/*amount_needed*/,
+ 0/*use reserved blocks for root */,
+ 1/*for_formatted*/,
+ 0/*for prealloc */) ;
+ return ret;
+ }
+
+ /* take a block off the prealloc list and return it -Hans */
+ if (p_s_inode->u.reiserfs_i.i_prealloc_count > 0) {
+ p_s_inode->u.reiserfs_i.i_prealloc_count--;
+ *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block++;
+ return ret;
+ }
+
+ /* else get a new preallocation for the file */
+ reiserfs_discard_prealloc (th, p_s_inode);
+ /* this uses the last preallocated block as the search_start. discard
+ ** prealloc does not zero out this number.
+ */
+ if (search_start <= p_s_inode->u.reiserfs_i.i_prealloc_block) {
+ search_start = p_s_inode->u.reiserfs_i.i_prealloc_block;
+ }
+
+ /* doing the compare again forces search_start to be >= the border,
+ ** even if the file already had prealloction done. This seems extra,
+ ** and should probably be removed
+ */
+ if ( search_start < border ) search_start=border;
+
+ *free_blocknrs = 0;
+ blks = PREALLOCATION_SIZE-1;
+ for (blks_gotten=0; blks_gotten<PREALLOCATION_SIZE; blks_gotten++) {
+ ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start,
+ 1/*amount_needed*/,
+ 0/*for root reserved*/,
+ 1/*for_formatted*/,
+ (blks_gotten > 0)/*must_be_contiguous*/) ;
+ /* if we didn't find a block this time, adjust blks to reflect
+ ** the actual number of blocks allocated
+ */
+ if (ret != CARRY_ON) {
+ blks = blks_gotten > 0 ? (blks_gotten - 1) : 0 ;
+ break ;
+ }
+ allocated[blks_gotten]= *free_blocknrs;
+#ifdef CONFIG_REISERFS_CHECK
+ if ( (blks_gotten>0) && (allocated[blks_gotten] - allocated[blks_gotten-1]) != 1 ) {
+ /* this should be caught by new_blocknrs now, checking code */
+ reiserfs_warning("yura-1, reiserfs_new_unf_blocknrs2: pre-allocated not contiguous set of blocks!\n") ;
+ reiserfs_free_block(th, allocated[blks_gotten]);
+ blks = blks_gotten-1;
+ break;
+ }
+#endif
+ if (blks_gotten==0) {
+ p_s_inode->u.reiserfs_i.i_prealloc_block = *free_blocknrs;
+ }
+ search_start = *free_blocknrs;
+ *free_blocknrs = 0;
+ }
+ p_s_inode->u.reiserfs_i.i_prealloc_count = blks;
+ *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block;
+ p_s_inode->u.reiserfs_i.i_prealloc_block++;
+
+ /* we did actually manage to get 1 block */
+ if (ret != CARRY_ON && allocated[0] > 0) {
+ return CARRY_ON ;
+ }
+ /* NO_MORE_UNUSED_CONTIGUOUS_BLOCKS should only mean something to
+ ** the preallocation code. The rest of the filesystem asks for a block
+ ** and should either get it, or know the disk is full. The code
+ ** above should never allow ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCK,
+ ** as it doesn't send for_prealloc = 1 to do_reiserfs_new_blocknrs
+ ** unless it has already successfully allocated at least one block.
+ ** Just in case, we translate into a return value the rest of the
+ ** filesystem can understand.
+ **
+ ** It is an error to change this without making the
+ ** rest of the filesystem understand NO_MORE_UNUSED_CONTIGUOUS_BLOCKS
+ ** If you consider it a bug to return NO_DISK_SPACE here, fix the rest
+ ** of the fs first.
+ */
+ if (ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCKS) {
+#ifdef CONFIG_REISERFS_CHECK
+ reiserfs_warning("reiser-2015: this shouldn't happen, may cause false out of disk space error");
+#endif
+ return NO_DISK_SPACE;
+ }
+ return ret;
+}
+
+//
+// a portion of this function, was derived from minix or ext2's
+// analog. You should be able to tell which portion by looking at the
+// ext2 code and comparing.
+
+void reiserfs_discard_prealloc (struct reiserfs_transaction_handle *th,
+ struct inode * inode)
+{
+ if (inode->u.reiserfs_i.i_prealloc_count > 0) {
+ while (inode->u.reiserfs_i.i_prealloc_count--) {
+ reiserfs_free_block(th,inode->u.reiserfs_i.i_prealloc_block);
+ inode->u.reiserfs_i.i_prealloc_block++;
+ }
+ }
+ inode->u.reiserfs_i.i_prealloc_count = 0;
+}
+#endif
diff --git a/fs/reiserfs/buffer2.c b/fs/reiserfs/buffer2.c
new file mode 100644
index 000000000..c05854ce5
--- /dev/null
+++ b/fs/reiserfs/buffer2.c
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+
+/*
+ * Contains code from
+ *
+ * linux/include/linux/lock.h and linux/fs/buffer.c /linux/fs/minix/fsync.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/locks.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/smp_lock.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+/*
+ * wait_buffer_until_released
+ * reiserfs_bread
+ * reiserfs_getblk
+ * get_new_buffer
+ */
+
+
+
+/* when we allocate a new block (get_new_buffer, get_empty_nodes) and
+ get buffer for it, it is possible that it is held by someone else
+ or even by this process. In this function we wait until all other
+ holders release buffer. To make sure, that current process does not
+ hold we did free all buffers in tree balance structure
+ (get_empty_nodes and get_nodes_for_preserving) or in path structure
+ only (get_new_buffer) just before calling this */
+void wait_buffer_until_released (struct buffer_head * bh)
+{
+ int repeat_counter = 0;
+
+ while (atomic_read (&(bh->b_count)) > 1) {
+
+ if ( !(++repeat_counter % 30000000) ) {
+ reiserfs_warning ("vs-3050: wait_buffer_until_released: nobody releases buffer (%b). Still waiting (%d) %cJDIRTY %cJWAIT\n",
+ bh, repeat_counter, buffer_journaled(bh) ? ' ' : '!',
+ buffer_journal_dirty(bh) ? ' ' : '!');
+ }
+ run_task_queue(&tq_disk);
+ current->policy |= SCHED_YIELD;
+ /*current->counter = 0;*/
+ schedule();
+ }
+ if (repeat_counter > 30000000) {
+ reiserfs_warning("vs-3051: done waiting, ignore vs-3050 messages for (%b)\n", bh) ;
+ }
+}
+
+/*
+ * reiserfs_bread() reads a specified block and returns the buffer that contains
+ * it. It returns NULL if the block was unreadable.
+ */
+/* It first tries to find the block in cache, and if it cannot do so
+ then it creates a new buffer and schedules I/O to read the
+ block. */
+/* The function is NOT SCHEDULE-SAFE! */
+
+struct buffer_head * reiserfs_bread (kdev_t n_dev, int n_block, int n_size)
+{
+ return bread (n_dev, n_block, n_size);
+}
+
+/* This function looks for a buffer which contains a given block. If
+ the block is in cache it returns it, otherwise it returns a new
+ buffer which is not uptodate. This is called by reiserfs_bread and
+ other functions. Note that get_new_buffer ought to be called this
+ and this ought to be called get_new_buffer, since this doesn't
+ actually get the block off of the disk. */
+/* The function is NOT SCHEDULE-SAFE! */
+
+struct buffer_head * reiserfs_getblk (kdev_t n_dev, int n_block, int n_size)
+{
+ return getblk (n_dev, n_block, n_size);
+}
+
+#ifdef NEW_GET_NEW_BUFFER
+
+/* returns one buffer with a blocknr near blocknr. */
+static int get_new_buffer_near_blocknr(
+ struct super_block * p_s_sb,
+ int blocknr,
+ struct buffer_head ** pp_s_new_bh,
+ struct path * p_s_path
+ ) {
+ unsigned long n_new_blocknumber = 0;
+ int n_ret_value,
+ n_repeat = CARRY_ON;
+
+#ifdef CONFIG_REISERFS_CHECK
+ int repeat_counter = 0;
+
+ if (!blocknr)
+ printk ("blocknr passed to get_new_buffer_near_blocknr was 0");
+#endif
+
+
+ if ( (n_ret_value = reiserfs_new_blocknrs (p_s_sb, &n_new_blocknumber,
+ blocknr, 1)) == NO_DISK_SPACE )
+ return NO_DISK_SPACE;
+
+ *pp_s_new_bh = reiserfs_getblk(p_s_sb->s_dev, n_new_blocknumber, p_s_sb->s_blocksize);
+ if ( buffer_uptodate(*pp_s_new_bh) ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( buffer_dirty(*pp_s_new_bh) || (*pp_s_new_bh)->b_dev == NODEV ) {
+ reiserfs_panic(p_s_sb, "PAP-14080: get_new_buffer: invalid uptodate buffer %b for the new block", *pp_s_new_bh);
+ }
+#endif
+
+ /* Free path buffers to prevent deadlock. */
+ /* It is possible that this process has the buffer, which this function is getting, already in
+ its path, and is responsible for double incrementing the value of b_count. If we recalculate
+ the path after schedule we can avoid risking an endless loop. This problematic situation is
+ possible in a multiple processing environment. Suppose process 1 has acquired a path P; then
+ process 2 balanced and remove block A from the tree. Process 1 continues and runs
+ get_new_buffer, that returns buffer with block A. If node A was on the path P, then it will
+ have b_count == 2. If we now will simply wait in while ( (*pp_s_new_bh)->b_count > 1 ) we get
+ into an endless loop, as nobody will release this buffer and the current process holds buffer
+ twice. That is why we do decrement_counters_in_path(p_s_path) before waiting until b_count
+ becomes 1. (it there were other processes holding node A, then eventually we will get a
+ moment, when all of them released a buffer). */
+ if ( atomic_read (&((*pp_s_new_bh)->b_count)) > 1 ) {
+ decrement_counters_in_path(p_s_path);
+ n_ret_value |= SCHEDULE_OCCURRED;
+ }
+
+ while ( atomic_read (&((*pp_s_new_bh)->b_count)) > 1 ) {
+
+#ifdef REISERFS_INFO
+ printk("get_new_buffer() calls schedule to decrement b_count\n");
+#endif
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! (++repeat_counter % 10000) )
+ printk("get_new_buffer(%u): counter(%d) too big", current->pid, repeat_counter);
+#endif
+
+ current->counter = 0;
+ schedule();
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( buffer_dirty(*pp_s_new_bh) || (*pp_s_new_bh)->b_dev == NODEV ) {
+ print_buffer_head(*pp_s_new_bh,"get_new_buffer");
+ reiserfs_panic(p_s_sb, "PAP-14090: get_new_buffer: invalid uptodate buffer %b for the new block(case 2)", *pp_s_new_bh);
+ }
+#endif
+
+ }
+ else {
+ ;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (atomic_read (&((*pp_s_new_bh)->b_count)) != 1) {
+ reiserfs_panic(p_s_sb,"PAP-14100: get_new_buffer: not uptodate buffer %b for the new block has b_count more than one",
+ *pp_s_new_bh);
+ }
+#endif
+
+ }
+ return (n_ret_value | n_repeat);
+}
+
+
+/* returns the block number of the last unformatted node, assumes p_s_key_to_search.k_offset is a byte in the tail of
+ the file, Useful for when you want to append to a file, and convert a direct item into an unformatted node near the
+ last unformatted node of the file. Putting the unformatted node near the direct item is potentially very bad to do.
+ If there is no unformatted node in the file, then we return the block number of the direct item. */
+/* The function is NOT SCHEDULE-SAFE! */
+inline int get_last_unformatted_node_blocknr_of_file( struct key * p_s_key_to_search, struct super_block * p_s_sb,
+ struct buffer_head * p_s_bh
+ struct path * p_unf_search_path, struct inode * p_s_inode)
+
+{
+ struct key unf_key_to_search;
+ struct item_head * p_s_ih;
+ int n_pos_in_item;
+ struct buffer_head * p_indirect_item_bh;
+
+ copy_key(&unf_key_to_search,p_s_key_to_search);
+ unf_key_to_search.k_uniqueness = TYPE_INDIRECT;
+ unf_key_to_search.k_offset = p_s_inode->u.reiserfs_i.i_first_direct_byte - 1;
+
+ /* p_s_key_to_search->k_offset - MAX_ITEM_LEN(p_s_sb->s_blocksize); */
+ if (search_for_position_by_key (p_s_sb, &unf_key_to_search, p_unf_search_path, &n_pos_in_item) == POSITION_FOUND)
+ {
+ p_s_ih = B_N_PITEM_HEAD(p_indirect_item_bh = PATH_PLAST_BUFFER(p_unf_search_path), PATH_LAST_POSITION(p_unf_search_path));
+ return (B_I_POS_UNFM_POINTER(p_indirect_item_bh, p_s_ih, n_pos_in_item));
+ }
+ /* else */
+ printk("reiser-1800: search for unformatted node failed, p_s_key_to_search->k_offset = %u, unf_key_to_search.k_offset = %u, MAX_ITEM_LEN(p_s_sb->s_blocksize) = %ld, debug this\n", p_s_key_to_search->k_offset, unf_key_to_search.k_offset, MAX_ITEM_LEN(p_s_sb->s_blocksize) );
+ print_buffer_head(PATH_PLAST_BUFFER(p_unf_search_path), "the buffer holding the item before the key we failed to find");
+ print_block_head(PATH_PLAST_BUFFER(p_unf_search_path), "the block head");
+ return 0; /* keeps the compiler quiet */
+}
+
+
+ /* hasn't been out of disk space tested */
+/* The function is NOT SCHEDULE-SAFE! */
+static int get_buffer_near_last_unf ( struct super_block * p_s_sb, struct key * p_s_key_to_search,
+ struct inode * p_s_inode, struct buffer_head * p_s_bh,
+ struct buffer_head ** pp_s_un_bh, struct path * p_s_search_path)
+{
+ int unf_blocknr = 0, /* blocknr from which we start search for a free block for an unformatted node, if 0
+ then we didn't find an unformatted node though we might have found a file hole */
+ n_repeat = CARRY_ON;
+ struct key unf_key_to_search;
+ struct path unf_search_path;
+
+ copy_key(&unf_key_to_search,p_s_key_to_search);
+ unf_key_to_search.k_uniqueness = TYPE_INDIRECT;
+
+ if (
+ (p_s_inode->u.reiserfs_i.i_first_direct_byte > 4095) /* i_first_direct_byte gets used for all sorts of
+ crap other than what the name indicates, thus
+ testing to see if it is 0 is not enough */
+ && (p_s_inode->u.reiserfs_i.i_first_direct_byte < MAX_KEY_OFFSET) /* if there is no direct item then
+ i_first_direct_byte = MAX_KEY_OFFSET */
+ )
+ {
+ /* actually, we don't want the last unformatted node, we want the last unformatted node
+ which is before the current file offset */
+ unf_key_to_search.k_offset = ((p_s_inode->u.reiserfs_i.i_first_direct_byte -1) < unf_key_to_search.k_offset) ? p_s_inode->u.reiserfs_i.i_first_direct_byte -1 : unf_key_to_search.k_offset;
+
+ while (unf_key_to_search.k_offset > -1)
+ {
+ /* This is our poorly documented way of initializing paths. -Hans */
+ init_path (&unf_search_path);
+ /* get the blocknr from which we start the search for a free block. */
+ unf_blocknr = get_last_unformatted_node_blocknr_of_file( p_s_key_to_search, /* assumes this points to the file tail */
+ p_s_sb, /* lets us figure out the block size */
+ p_s_bh, /* if there is no unformatted node in the file,
+ then it returns p_s_bh->b_blocknr */
+ &unf_search_path,
+ p_s_inode
+ );
+/* printk("in while loop: unf_blocknr = %d, *pp_s_un_bh = %p\n", unf_blocknr, *pp_s_un_bh); */
+ if (unf_blocknr)
+ break;
+ else /* release the path and search again, this could be really slow for huge
+ holes.....better to spend the coding time adding compression though.... -Hans */
+ {
+ /* Vladimir, is it a problem that I don't brelse these buffers ?-Hans */
+ decrement_counters_in_path(&unf_search_path);
+ unf_key_to_search.k_offset -= 4096;
+ }
+ }
+ if (unf_blocknr) {
+ n_repeat |= get_new_buffer_near_blocknr(p_s_sb, unf_blocknr, pp_s_un_bh, p_s_search_path);
+ }
+ else { /* all unformatted nodes are holes */
+ n_repeat |= get_new_buffer_near_blocknr(p_s_sb, p_s_bh->b_blocknr, pp_s_un_bh, p_s_search_path);
+ }
+ }
+ else { /* file has no unformatted nodes */
+ n_repeat |= get_new_buffer_near_blocknr(p_s_sb, p_s_bh->b_blocknr, pp_s_un_bh, p_s_search_path);
+/* printk("in else: unf_blocknr = %d, *pp_s_un_bh = %p\n", unf_blocknr, *pp_s_un_bh); */
+/* print_path (0, p_s_search_path); */
+ }
+
+ return n_repeat;
+}
+
+#endif /* NEW_GET_NEW_BUFFER */
+
+
+#ifdef OLD_GET_NEW_BUFFER
+
+/* The function is NOT SCHEDULE-SAFE! */
+int get_new_buffer(
+ struct reiserfs_transaction_handle *th,
+ struct buffer_head * p_s_bh,
+ struct buffer_head ** pp_s_new_bh,
+ struct path * p_s_path
+ ) {
+ unsigned long n_new_blocknumber = 0;
+ int n_repeat;
+ struct super_block * p_s_sb = th->t_super;
+
+ if ( (n_repeat = reiserfs_new_unf_blocknrs (th, &n_new_blocknumber, p_s_bh->b_blocknr)) == NO_DISK_SPACE )
+ return NO_DISK_SPACE;
+
+ *pp_s_new_bh = reiserfs_getblk(p_s_sb->s_dev, n_new_blocknumber, p_s_sb->s_blocksize);
+ if (atomic_read (&(*pp_s_new_bh)->b_count) > 1) {
+ /* Free path buffers to prevent deadlock which can occur in the
+ situation like : this process holds p_s_path; Block
+ (*pp_s_new_bh)->b_blocknr is on the path p_s_path, but it is
+ not necessary, that *pp_s_new_bh is in the tree; process 2
+ could remove it from the tree and freed block
+ (*pp_s_new_bh)->b_blocknr. Reiserfs_new_blocknrs in above
+ returns block (*pp_s_new_bh)->b_blocknr. Reiserfs_getblk gets
+ buffer for it, and it has b_count > 1. If we now will simply
+ wait in while ( (*pp_s_new_bh)->b_count > 1 ) we get into an
+ endless loop, as nobody will release this buffer and the
+ current process holds buffer twice. That is why we do
+ decrement_counters_in_path(p_s_path) before waiting until
+ b_count becomes 1. (it there were other processes holding node
+ pp_s_new_bh, then eventually we will get a moment, when all of
+ them released a buffer). */
+ decrement_counters_in_path(p_s_path);
+ wait_buffer_until_released (*pp_s_new_bh);
+ n_repeat |= SCHEDULE_OCCURRED;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( atomic_read (&((*pp_s_new_bh)->b_count)) != 1 || buffer_dirty (*pp_s_new_bh)) {
+ reiserfs_panic(p_s_sb,"PAP-14100: get_new_buffer: not free or dirty buffer %b for the new block",
+ *pp_s_new_bh);
+ }
+#endif
+
+ return n_repeat;
+}
+
+#endif /* OLD_GET_NEW_BUFFER */
+
+
+#ifdef GET_MANY_BLOCKNRS
+ /* code not yet functional */
+get_next_blocknr (
+ unsigned long * p_blocknr_array, /* we get a whole bunch of blocknrs all at once for
+ the write. This is better than getting them one at
+ a time. */
+ unsigned long ** p_blocknr_index, /* pointer to current offset into the array. */
+ unsigned long blocknr_array_length
+)
+{
+ unsigned long return_value;
+
+ if (*p_blocknr_index < p_blocknr_array + blocknr_array_length) {
+ return_value = **p_blocknr_index;
+ **p_blocknr_index = 0;
+ *p_blocknr_index++;
+ return (return_value);
+ }
+ else
+ {
+ kfree (p_blocknr_array);
+ }
+}
+#endif /* GET_MANY_BLOCKNRS */
+
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
new file mode 100644
index 000000000..48ca45c0c
--- /dev/null
+++ b/fs/reiserfs/dir.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/stat.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+extern struct key MIN_KEY;
+
+static int reiserfs_readdir (struct file *, void *, filldir_t);
+int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, int datasync) ;
+
+struct file_operations reiserfs_dir_operations = {
+ read: generic_read_dir,
+ readdir: reiserfs_readdir,
+ fsync: reiserfs_dir_fsync,
+};
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations reiserfs_dir_inode_operations = {
+ //&reiserfs_dir_operations, /* default_file_ops */
+ create: reiserfs_create,
+ lookup: reiserfs_lookup,
+ link: reiserfs_link,
+ unlink: reiserfs_unlink,
+ symlink: reiserfs_symlink,
+ mkdir: reiserfs_mkdir,
+ rmdir: reiserfs_rmdir,
+ mknod: reiserfs_mknod,
+ rename: reiserfs_rename,
+};
+
+int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, int datasync) {
+ int ret = 0 ;
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+
+ journal_begin(&th, dentry->d_inode->i_sb, 1) ;
+ windex = push_journal_writer("dir_fsync") ;
+ reiserfs_prepare_for_journal(th.t_super, SB_BUFFER_WITH_SB(th.t_super), 1) ;
+ journal_mark_dirty(&th, dentry->d_inode->i_sb, SB_BUFFER_WITH_SB (dentry->d_inode->i_sb)) ;
+ pop_journal_writer(windex) ;
+ journal_end_sync(&th, dentry->d_inode->i_sb, 1) ;
+
+ return ret ;
+}
+
+
+#define store_ih(where,what) copy_item_head (where, what)
+
+//
+static int reiserfs_readdir (struct file * filp, void * dirent, filldir_t filldir)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */
+ INITIALIZE_PATH (path_to_entry);
+ struct buffer_head * bh;
+ int item_num, entry_num;
+ struct key * rkey;
+ struct item_head * ih, tmp_ih;
+ int search_res;
+ char * local_buf;
+ loff_t next_pos;
+ char small_buf[32] ; /* avoid kmalloc if we can */
+ struct reiserfs_dir_entry de;
+
+
+ reiserfs_check_lock_depth("readdir") ;
+
+ /* form key for search the next directory entry using f_pos field of
+ file structure */
+ make_cpu_key (&pos_key, inode, (filp->f_pos) ? (filp->f_pos) : DOT_OFFSET,
+ TYPE_DIRENTRY, 3);
+ next_pos = cpu_key_k_offset (&pos_key);
+
+ /* reiserfs_warning ("reiserfs_readdir 1: f_pos = %Ld\n", filp->f_pos);*/
+
+ while (1) {
+ research:
+ /* search the directory item, containing entry with specified key */
+ search_res = search_by_entry_key (inode->i_sb, &pos_key, &path_to_entry, &de);
+ if (search_res == IO_ERROR) {
+ // FIXME: we could just skip part of directory which could
+ // not be read
+ return -EIO;
+ }
+ entry_num = de.de_entry_num;
+ bh = de.de_bh;
+ item_num = de.de_item_num;
+ ih = de.de_ih;
+ store_ih (&tmp_ih, ih);
+
+#ifdef CONFIG_REISERFS_CHECK
+ /* we must have found item, that is item of this directory, */
+ if (COMP_SHORT_KEYS (&(ih->ih_key), &pos_key))
+ reiserfs_panic (inode->i_sb, "vs-9000: reiserfs_readdir: "
+ "found item %h does not match to dir we readdir %k",
+ ih, &pos_key);
+
+ if (item_num > B_NR_ITEMS (bh) - 1)
+ reiserfs_panic (inode->i_sb, "vs-9005: reiserfs_readdir: "
+ "item_num == %d, item amount == %d",
+ item_num, B_NR_ITEMS (bh));
+
+ /* and entry must be not more than number of entries in the item */
+ if (I_ENTRY_COUNT (ih) < entry_num)
+ reiserfs_panic (inode->i_sb, "vs-9010: reiserfs_readdir: "
+ "entry number is too big %d (%d)",
+ entry_num, I_ENTRY_COUNT (ih));
+#endif /* CONFIG_REISERFS_CHECK */
+
+ if (search_res == POSITION_FOUND || entry_num < I_ENTRY_COUNT (ih)) {
+ /* go through all entries in the directory item beginning from the entry, that has been found */
+ struct reiserfs_de_head * deh = B_I_DEH (bh, ih) + entry_num;
+
+ for (; entry_num < I_ENTRY_COUNT (ih); entry_num ++, deh ++) {
+ int d_reclen;
+ char * d_name;
+ off_t d_off;
+ ino_t d_ino;
+
+ if (!de_visible (deh))
+ /* it is hidden entry */
+ continue;
+ d_reclen = entry_length (bh, ih, entry_num);
+ d_name = B_I_DEH_ENTRY_FILE_NAME (bh, ih, deh);
+ if (!d_name[d_reclen - 1])
+ d_reclen = strlen (d_name);
+
+ if (d_reclen > REISERFS_MAX_NAME_LEN(inode->i_sb->s_blocksize)){
+ /* too big to send back to VFS */
+ continue ;
+ }
+ d_off = deh_offset (deh);
+ filp->f_pos = d_off ;
+ d_ino = deh_objectid (deh);
+ if (d_reclen <= 32) {
+ local_buf = small_buf ;
+ } else {
+ local_buf = kmalloc(d_reclen, GFP_BUFFER) ;
+ if (!local_buf) {
+ pathrelse (&path_to_entry);
+ return -ENOMEM ;
+ }
+ if (item_moved (&tmp_ih, &path_to_entry)) {
+ kfree(local_buf) ;
+ goto research;
+ }
+ }
+ // Note, that we copy name to user space via temporary
+ // buffer (local_buf) because filldir will block if
+ // user space buffer is swapped out. At that time
+ // entry can move to somewhere else
+ memcpy (local_buf, d_name, d_reclen);
+ if (filldir (dirent, d_name, d_reclen, d_off, d_ino,
+ DT_UNKNOWN) < 0) {
+ if (local_buf != small_buf) {
+ kfree(local_buf) ;
+ }
+ goto end;
+ }
+ if (local_buf != small_buf) {
+ kfree(local_buf) ;
+ }
+
+ // next entry should be looked for with such offset
+ next_pos = deh_offset (deh) + 1;
+
+ if (item_moved (&tmp_ih, &path_to_entry)) {
+ reiserfs_warning ("vs-9020: reiserfs_readdir "
+ "things are moving under hands. Researching..\n");
+ goto research;
+ }
+ } /* for */
+ }
+
+ if (item_num != B_NR_ITEMS (bh) - 1)
+ // end of directory has been reached
+ goto end;
+
+ /* item we went through is last item of node. Using right
+ delimiting key check is it directory end */
+ rkey = get_rkey (&path_to_entry, inode->i_sb);
+ if (! comp_le_keys (rkey, &MIN_KEY)) {
+#ifdef CONFIG_REISERFS_CHECK
+ reiserfs_warning ("vs-9025: reiserfs_readdir:"
+ "get_rkey failed. Researching..\n");
+#endif
+ /* set pos_key to key, that is the smallest and greater
+ that key of the last entry in the item */
+ set_cpu_key_k_offset (&pos_key, next_pos);
+ continue;
+ }
+
+ if ( COMP_SHORT_KEYS (rkey, &pos_key)) {
+ // end of directory has been reached
+ goto end;
+ }
+
+ /* directory continues in the right neighboring block */
+ set_cpu_key_k_offset (&pos_key, le_key_k_offset (ITEM_VERSION_1, rkey));
+
+ } /* while */
+
+
+ end:
+ // FIXME: ext2_readdir does not reset f_pos
+ filp->f_pos = next_pos;
+ pathrelse (&path_to_entry);
+ reiserfs_check_path(&path_to_entry) ;
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
new file mode 100644
index 000000000..852135254
--- /dev/null
+++ b/fs/reiserfs/do_balan.c
@@ -0,0 +1,2043 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+/* Now we have all buffers that must be used in balancing of the tree */
+/* Further calculations can not cause schedule(), and thus the buffer */
+/* tree will be stable until the balancing will be finished */
+/* balance the tree according to the analysis made before, */
+/* and using buffers obtained after all above. */
+
+
+/**
+ ** balance_leaf_when_delete
+ ** balance_leaf
+ ** do_balance
+ **
+ **/
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <asm/uaccess.h>
+#include <linux/sched.h>
+#include <linux/reiserfs_fs.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+#ifdef CONFIG_REISERFS_CHECK
+
+struct tree_balance * cur_tb = NULL; /* detects whether more than one
+ copy of tb exists as a means
+ of checking whether schedule
+ is interrupting do_balance */
+#endif
+
+
+inline void do_balance_mark_leaf_dirty (struct tree_balance * tb,
+ struct buffer_head * bh, int flag)
+{
+ if (reiserfs_dont_log(tb->tb_sb)) {
+ if (!test_and_set_bit(BH_Dirty, &bh->b_state)) {
+ __mark_buffer_dirty(bh) ;
+ tb->need_balance_dirty = 1;
+ }
+ } else {
+ int windex = push_journal_writer("do_balance") ;
+ journal_mark_dirty(tb->transaction_handle, tb->transaction_handle->t_super, bh) ;
+ pop_journal_writer(windex) ;
+ }
+}
+
+#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
+#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
+
+
+/* summary:
+ if deleting something ( tb->insert_size[0] < 0 )
+ return(balance_leaf_when_delete()); (flag d handled here)
+ else
+ if lnum is larger than 0 we put items into the left node
+ if rnum is larger than 0 we put items into the right node
+ if snum1 is larger than 0 we put items into the new node s1
+ if snum2 is larger than 0 we put items into the new node s2
+Note that all *num* count new items being created.
+
+It would be easier to read balance_leaf() if each of these summary
+lines was a separate procedure rather than being inlined. I think
+that there are many passages here and in balance_leaf_when_delete() in
+which two calls to one procedure can replace two passages, and it
+might save cache space and improve software maintenance costs to do so.
+
+Vladimir made the perceptive comment that we should offload most of
+the decision making in this function into fix_nodes/check_balance, and
+then create some sort of structure in tb that says what actions should
+be performed by do_balance.
+
+-Hans */
+
+
+
+/* Balance leaf node in case of delete or cut: insert_size[0] < 0
+ *
+ * lnum, rnum can have values >= -1
+ * -1 means that the neighbor must be joined with S
+ * 0 means that nothing should be done with the neighbor
+ * >0 means to shift entirely or partly the specified number of items to the neighbor
+ */
+static int balance_leaf_when_delete (struct tree_balance * tb, int flag)
+{
+ struct buffer_head * tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+ int item_pos = PATH_LAST_POSITION (tb->tb_path);
+ int pos_in_item = tb->tb_path->pos_in_item;
+ struct buffer_info bi;
+ int n;
+ struct item_head * ih;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->FR[0] && B_LEVEL (tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1)
+ reiserfs_panic (tb->tb_sb,
+ "vs- 12000: balance_leaf_when_delete:level: wrong FR %z\n", tb->FR[0]);
+ if ( tb->blknum[0] > 1 )
+ reiserfs_panic (tb->tb_sb,
+ "PAP-12005: balance_leaf_when_delete: "
+ "tb->blknum == %d, can not be > 1", tb->blknum[0]);
+
+ if ( ! tb->blknum[0] && ! PATH_H_PPARENT(tb->tb_path, 0))
+ reiserfs_panic (tb->tb_sb, "PAP-12010: balance_leaf_when_delete: tree can not be empty");
+#endif
+
+ ih = B_N_PITEM_HEAD (tbS0, item_pos);
+
+ /* Delete or truncate the item */
+
+ switch (flag) {
+ case M_DELETE: /* delete item in S[0] */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (le16_to_cpu (ih->ih_item_len) + IH_SIZE != -tb->insert_size [0])
+ reiserfs_panic (tb->tb_sb, "vs-12013: balance_leaf_when_delete: "
+ "mode Delete, insert size %d, ih to be deleted %h", ih);
+
+#if 0 /* rigth delim key not supported */
+ if ( ! item_pos && (! tb->L[0] || COMP_KEYS(B_PRIGHT_DELIM_KEY(tb->L[0]), B_N_PKEY(tbS0, 0))) ) {
+ print_cur_tb ("12015");
+ reiserfs_panic (tb->tb_sb, "PAP-12015: balance_leaf_when_delete: L0's rkey does not match to 1st key of S0: "
+ "rkey in L %k, first key in S0 %k, rkey in CFL %k",
+ tb->L[0] ? B_PRIGHT_DELIM_KEY(tb->L[0]) : 0,
+ B_N_PKEY(tbS0, 0),
+ tb->CFL[0] ? B_N_PDELIM_KEY(tb->CFL[0],tb->lkey[0]) : 0);
+ }
+#endif
+
+#endif
+
+ bi.tb = tb;
+ bi.bi_bh = tbS0;
+ bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+ bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+ leaf_delete_items (&bi, 0, item_pos, 1, -1);
+
+ if ( ! item_pos && tb->CFL[0] ) {
+ if ( B_NR_ITEMS(tbS0) ) {
+ replace_key(tb, tb->CFL[0],tb->lkey[0],tbS0,0);
+#if 0 /* right delim key support */
+ copy_key(B_PRIGHT_DELIM_KEY(tb->L[0]), B_N_PKEY(tbS0, 0));
+ reiserfs_mark_buffer_dirty (tb->L[0], 0);
+#endif
+ }
+ else {
+ if ( ! PATH_H_POSITION (tb->tb_path, 1) )
+ replace_key(tb, tb->CFL[0],tb->lkey[0],PATH_H_PPARENT(tb->tb_path, 0),0);
+#if 0 /* right delim key support */
+ copy_key(B_PRIGHT_DELIM_KEY(tb->L[0]), B_PRIGHT_DELIM_KEY(tbS0));
+ reiserfs_mark_buffer_dirty (tb->L[0], 0);
+#endif
+ }
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+#if 0
+ if (! item_pos && (!tb->CFL[0] || !tb->L[0]))
+#endif
+ if (! item_pos && !tb->CFL[0])
+ reiserfs_panic (tb->tb_sb, "PAP-12020: balance_leaf_when_delete: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0], tb->L[0]);
+#endif
+
+ break;
+
+ case M_CUT: { /* cut item in S[0] */
+ bi.tb = tb;
+ bi.bi_bh = tbS0;
+ bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+ bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+ if (is_direntry_le_ih (ih)) {
+
+#ifdef CONFIG_REISERFS_CHECK
+#if 0 /* right delim key support */
+ if ( ! item_pos && ! pos_in_item && (! tb->L[0] || COMP_KEYS(B_PRIGHT_DELIM_KEY(tb->L[0]),
+ B_N_PKEY(tbS0, 0))) )
+ reiserfs_panic(tb->tb_sb, "PAP-12025: balance_leaf_when_delete: illegal right delimiting key");
+#endif
+#endif
+
+ /* UFS unlink semantics are such that you can only delete one directory entry at a time. */
+ /* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */
+ tb->insert_size[0] = -1;
+ leaf_cut_from_buffer (&bi, item_pos, pos_in_item, -tb->insert_size[0]);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (! item_pos && ! pos_in_item && ! tb->CFL[0])
+ reiserfs_panic (tb->tb_sb, "PAP-12030: balance_leaf_when_delete: can not change delimiting key. CFL[0]=%p", tb->CFL[0]);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ if ( ! item_pos && ! pos_in_item && tb->CFL[0] ) {
+ replace_key(tb, tb->CFL[0],tb->lkey[0],tbS0,0);
+#if 0/* right delim key support */
+ copy_key(B_PRIGHT_DELIM_KEY(tb->L[0]), B_N_PKEY(tbS0, 0));
+ reiserfs_mark_buffer_dirty (tb->L[0], 0);
+#endif
+ }
+ } else {
+ leaf_cut_from_buffer (&bi, item_pos, pos_in_item, -tb->insert_size[0]);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (! ih->ih_item_len)
+ reiserfs_panic (tb->tb_sb, "PAP-12035: balance_leaf_when_delete: cut must leave non-zero dynamic length of item");
+#endif /* CONFIG_REISERFS_CHECK */
+ }
+ break;
+ }
+
+ default:
+ print_cur_tb ("12040");
+ reiserfs_panic (tb->tb_sb, "PAP-12040: balance_leaf_when_delete: unexpectable mode: %s(%d)",
+ (flag == M_PASTE) ? "PASTE" : ((flag == M_INSERT) ? "INSERT" : "UNKNOWN"), flag);
+ }
+
+ /* the rule is that no shifting occurs unless by shifting a node can be freed */
+ n = B_NR_ITEMS(tbS0);
+ if ( tb->lnum[0] ) /* L[0] takes part in balancing */
+ {
+ if ( tb->lnum[0] == -1 ) /* L[0] must be joined with S[0] */
+ {
+ if ( tb->rnum[0] == -1 ) /* R[0] must be also joined with S[0] */
+ {
+ if ( tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0) )
+ {
+ /* all contents of all the 3 buffers will be in L[0] */
+ if ( PATH_H_POSITION (tb->tb_path, 1) == 0 && 1 < B_NR_ITEMS(tb->FR[0]) )
+ replace_key(tb, tb->CFL[0],tb->lkey[0],tb->FR[0],1);
+
+ /* update right_delimiting_key field */
+#if 0
+ copy_key (B_PRIGHT_DELIM_KEY (tb->L[0]), B_PRIGHT_DELIM_KEY (tb->R[0]));
+#endif
+ leaf_move_items (LEAF_FROM_S_TO_L, tb, n, -1, 0);
+ leaf_move_items (LEAF_FROM_R_TO_L, tb, B_NR_ITEMS(tb->R[0]), -1, 0);
+
+#if 0/*preserve list*/
+ preserve_invalidate(tb, tbS0, tb->L[0]);
+ preserve_invalidate(tb, tb->R[0], tb->L[0]);
+#endif
+ reiserfs_invalidate_buffer (tb, tbS0);
+ reiserfs_invalidate_buffer (tb, tb->R[0]);
+
+ return 0;
+ }
+ /* all contents of all the 3 buffers will be in R[0] */
+ leaf_move_items (LEAF_FROM_S_TO_R, tb, n, -1, 0);
+ leaf_move_items (LEAF_FROM_L_TO_R, tb, B_NR_ITEMS(tb->L[0]), -1, 0);
+
+ /* right_delimiting_key is correct in R[0] */
+ replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+
+#if 0
+ /* mark tb->R[0] as suspected recipient */
+ preserve_invalidate(tb,tbS0, tb->R[0]);
+ preserve_invalidate(tb,tb->L[0], tb->R[0]);
+#endif
+ reiserfs_invalidate_buffer (tb, tbS0);
+ reiserfs_invalidate_buffer (tb, tb->L[0]);
+
+ return -1;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->rnum[0] != 0 )
+ reiserfs_panic (tb->tb_sb, "PAP-12045: balance_leaf_when_delete: "
+ "rnum must be 0 (%d)", tb->rnum[0]);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* all contents of L[0] and S[0] will be in L[0] */
+ leaf_shift_left(tb, n, -1);
+
+#if 0/*preserve list*/
+ preserve_invalidate(tb, tbS0, tb->L[0]); /* preserved, shifting */
+#endif
+ reiserfs_invalidate_buffer (tb, tbS0);
+
+ return 0;
+ }
+ /* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (( tb->lnum[0] + tb->rnum[0] < n ) || ( tb->lnum[0] + tb->rnum[0] > n+1 ))
+ reiserfs_panic (tb->tb_sb, "PAP-12050: balance_leaf_when_delete: "
+ "rnum(%d) and lnum(%d) and item number in S[0] are not consistent",
+ tb->rnum[0], tb->lnum[0], n);
+
+ if (( tb->lnum[0] + tb->rnum[0] == n ) && (tb->lbytes != -1 || tb->rbytes != -1))
+ reiserfs_panic (tb->tb_sb, "PAP-12055: balance_leaf_when_delete: "
+ "bad rbytes (%d)/lbytes (%d) parameters when items are not split",
+ tb->rbytes, tb->lbytes);
+ if (( tb->lnum[0] + tb->rnum[0] == n + 1 ) && (tb->lbytes < 1 || tb->rbytes != -1))
+ reiserfs_panic (tb->tb_sb, "PAP-12060: balance_leaf_when_delete: "
+ "bad rbytes (%d)/lbytes (%d) parameters when items are split",
+ tb->rbytes, tb->lbytes);
+#endif
+
+ leaf_shift_left (tb, tb->lnum[0], tb->lbytes);
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
+
+#if 0/*preserve list*/
+ preserve_invalidate (tb, tbS0, tb->L[0]);
+ mark_suspected_recipient (tb->tb_sb, tb->R[0]);
+#endif
+ reiserfs_invalidate_buffer (tb, tbS0);
+
+ return 0;
+ }
+
+ if ( tb->rnum[0] == -1 ) {
+ /* all contents of R[0] and S[0] will be in R[0] */
+ leaf_shift_right(tb, n, -1);
+#if 0/*preserve list*/
+ preserve_invalidate(tb, tbS0, tb->R[0]);
+#endif
+ reiserfs_invalidate_buffer (tb, tbS0);
+ return 0;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->rnum[0] )
+ reiserfs_panic (tb->tb_sb, "PAP-12065: balance_leaf_when_delete: "
+ "bad rnum parameter must be 0 (%d)", tb->rnum[0]);
+#endif
+
+ return 0;
+}
+
+
+static int balance_leaf (struct tree_balance * tb,
+ struct item_head * ih, /* item header of inserted item (this is on little endian) */
+ const char * body, /* body of inserted item or bytes to paste */
+ int flag, /* i - insert, d - delete, c - cut, p - paste
+ (see comment to do_balance) */
+ struct item_head * insert_key, /* in our processing of one level we sometimes determine what
+ must be inserted into the next higher level. This insertion
+ consists of a key or two keys and their corresponding
+ pointers */
+ struct buffer_head ** insert_ptr /* inserted node-ptrs for the next level */
+ )
+{
+ struct buffer_head * tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#if 0/*preserve list*/
+ struct buffer_head * tbF0 = PATH_H_PPARENT (tb->tb_path, 0);
+ int S0_b_item_order = PATH_H_B_ITEM_ORDER (tb->tb_path, 0);
+#endif
+ int item_pos = PATH_LAST_POSITION (tb->tb_path); /* index into the array of item headers in S[0]
+ of the affected item */
+ struct buffer_info bi;
+ struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */
+ int snum[2]; /* number of items that will be placed
+ into S_new (includes partially shifted
+ items) */
+ int sbytes[2]; /* if an item is partially shifted into S_new then
+ if it is a directory item
+ it is the number of entries from the item that are shifted into S_new
+ else
+ it is the number of bytes from the item that are shifted into S_new
+ */
+ int n, i;
+ int ret_val;
+ int pos_in_item;
+ int zeros_num;
+
+#if 0
+ if (tb->insert_size [0] % 4) {
+ reiserfs_panic (tb->tb_sb, "balance_leaf: wrong insert_size %d",
+ tb->insert_size [0]);
+ }
+#endif
+ /* Make balance in case insert_size[0] < 0 */
+ if ( tb->insert_size[0] < 0 )
+ return balance_leaf_when_delete (tb, flag);
+
+ zeros_num = 0;
+ if (flag == M_INSERT && body == 0)
+ zeros_num = le16_to_cpu (ih->ih_item_len);
+
+ pos_in_item = tb->tb_path->pos_in_item;
+ /* for indirect item pos_in_item is measured in unformatted node
+ pointers. Recalculate to bytes */
+ if (flag != M_INSERT && is_indirect_le_ih (B_N_PITEM_HEAD (tbS0, item_pos)))
+ pos_in_item *= UNFM_P_SIZE;
+
+ if ( tb->lnum[0] > 0 ) {
+ /* Shift lnum[0] items from S[0] to the left neighbor L[0] */
+ if ( item_pos < tb->lnum[0] ) {
+ /* new item or it part falls to L[0], shift it too */
+ n = B_NR_ITEMS(tb->L[0]);
+
+ switch (flag) {
+ case M_INSERT: /* insert item into L[0] */
+
+ if ( item_pos == tb->lnum[0] - 1 && tb->lbytes != -1 ) {
+ /* part of new item falls into L[0] */
+ int new_item_len;
+ int version;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!is_direct_le_ih (ih))
+ reiserfs_panic (tb->tb_sb, "PAP-12075: balance_leaf: "
+ "only direct inserted item can be broken. %h", ih);
+#endif
+ ret_val = leaf_shift_left (tb, tb->lnum[0]-1, -1);
+ /* when reading the if conditions preceding the subsequent preserve_shifted
+ lines understand that their goal is to determine if all that we are
+ shifting is the new data being added */
+#if 0/*preserve list*/
+ if (tb->lnum[0] - 1 > 0) {
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->L[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+ }
+#endif
+
+ /* Calculate item length to insert to S[0] */
+ new_item_len = le16_to_cpu (ih->ih_item_len) - tb->lbytes;
+ /* Calculate and check item length to insert to L[0] */
+ ih->ih_item_len -= new_item_len;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( (int)(ih->ih_item_len) <= 0 )
+ reiserfs_panic(tb->tb_sb, "PAP-12080: balance_leaf: "
+ "there is nothing to insert into L[0]: ih_item_len=%d",
+ (int)ih->ih_item_len);
+#endif
+
+ /* Insert new item into L[0] */
+ bi.tb = tb;
+ bi.bi_bh = tb->L[0];
+ bi.bi_parent = tb->FL[0];
+ bi.bi_position = get_left_neighbor_position (tb, 0);
+ leaf_insert_into_buf (&bi, n + item_pos - ret_val, ih, body,
+ zeros_num > ih->ih_item_len ? ih->ih_item_len : zeros_num);
+
+ version = ih_version (ih);
+
+ /* Calculate key component, item length and body to insert into S[0] */
+ set_le_key_k_offset (ih_version (ih), &(ih->ih_key),
+ le_key_k_offset (ih_version (ih), &(ih->ih_key)) + tb->lbytes);
+ ih->ih_item_len = cpu_to_le16 (new_item_len);
+ if ( tb->lbytes > zeros_num ) {
+ body += (tb->lbytes - zeros_num);
+ zeros_num = 0;
+ }
+ else
+ zeros_num -= tb->lbytes;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( (int)(ih->ih_item_len) <= 0 )
+ reiserfs_panic(tb->tb_sb, "PAP-12085: balance_leaf: "
+ "there is nothing to insert into S[0]: ih_item_len=%d",
+ (int)ih->ih_item_len);
+#endif
+ } else {
+ /* new item in whole falls into L[0] */
+ /* Shift lnum[0]-1 items to L[0] */
+ ret_val = leaf_shift_left(tb, tb->lnum[0]-1, tb->lbytes);
+#if 0/*preserve list*/
+ if (tb->lnum[0] > 1) {
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->L[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+ }
+#endif
+ /* Insert new item into L[0] */
+ bi.tb = tb;
+ bi.bi_bh = tb->L[0];
+ bi.bi_parent = tb->FL[0];
+ bi.bi_position = get_left_neighbor_position (tb, 0);
+ leaf_insert_into_buf (&bi, n + item_pos - ret_val, ih, body, zeros_num);
+#if 0/*preserve list*/
+ if (tb->preserve_mode == PRESERVE_INDIRECT_TO_DIRECT){
+ mark_suspected_recipient (tb->tb_sb, bi.bi_bh);
+ }
+#endif
+ tb->insert_size[0] = 0;
+ zeros_num = 0;
+ }
+ break;
+
+ case M_PASTE: /* append item in L[0] */
+
+ if ( item_pos == tb->lnum[0] - 1 && tb->lbytes != -1 ) {
+ /* we must shift the part of the appended item */
+ if ( is_direntry_le_ih (B_N_PITEM_HEAD (tbS0, item_pos))) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( zeros_num )
+ reiserfs_panic(tb->tb_sb, "PAP-12090: balance_leaf: illegal parameter in case of a directory");
+#endif
+
+ /* directory item */
+ if ( tb->lbytes > pos_in_item ) {
+ /* new directory entry falls into L[0] */
+ struct item_head * pasted;
+ int l_pos_in_item = pos_in_item;
+
+ /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
+ ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes - 1);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->L[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ if ( ret_val && ! item_pos ) {
+ pasted = B_N_PITEM_HEAD(tb->L[0],B_NR_ITEMS(tb->L[0])-1);
+ l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes-1);
+ }
+
+ /* Append given directory entry to directory item */
+ bi.tb = tb;
+ bi.bi_bh = tb->L[0];
+ bi.bi_parent = tb->FL[0];
+ bi.bi_position = get_left_neighbor_position (tb, 0);
+ leaf_paste_in_buffer (&bi, n + item_pos - ret_val, l_pos_in_item,
+ tb->insert_size[0], body, zeros_num);
+
+ /* previous string prepared space for pasting new entry, following string pastes this entry */
+
+ /* when we have merge directory item, pos_in_item has been changed too */
+
+ /* paste new directory entry. 1 is entry number */
+ leaf_paste_entries (bi.bi_bh, n + item_pos - ret_val, l_pos_in_item, 1,
+ (struct reiserfs_de_head *)body,
+ body + DEH_SIZE, tb->insert_size[0]
+ );
+ tb->insert_size[0] = 0;
+ } else {
+ /* new directory item doesn't fall into L[0] */
+ /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
+ leaf_shift_left (tb, tb->lnum[0], tb->lbytes);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->L[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ }
+ /* Calculate new position to append in item body */
+ pos_in_item -= tb->lbytes;
+ }
+ else {
+ /* regular object */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->lbytes <= 0 )
+ reiserfs_panic(tb->tb_sb, "PAP-12095: balance_leaf: "
+ "there is nothing to shift to L[0]. lbytes=%d",
+ tb->lbytes);
+ if ( pos_in_item != B_N_PITEM_HEAD(tbS0, item_pos)->ih_item_len )
+ reiserfs_panic(tb->tb_sb, "PAP-12100: balance_leaf: "
+ "incorrect position to paste: item_len=%d, pos_in_item=%d",
+ B_N_PITEM_HEAD(tbS0,item_pos)->ih_item_len, pos_in_item);
+#endif
+
+ if ( tb->lbytes >= pos_in_item ) {
+ /* appended item will be in L[0] in whole */
+ int l_n;
+
+ /* this bytes number must be appended to the last item of L[h] */
+ l_n = tb->lbytes - pos_in_item;
+
+ /* Calculate new insert_size[0] */
+ tb->insert_size[0] -= l_n;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->insert_size[0] <= 0 )
+ reiserfs_panic(tb->tb_sb, "PAP-12105: balance_leaf: "
+ "there is nothing to paste into L[0]. insert_size=%d",
+ tb->insert_size[0]);
+#endif
+
+ ret_val = leaf_shift_left(tb,tb->lnum[0],
+ B_N_PITEM_HEAD(tbS0,item_pos)->ih_item_len);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->L[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ /* Append to body of item in L[0] */
+ bi.tb = tb;
+ bi.bi_bh = tb->L[0];
+ bi.bi_parent = tb->FL[0];
+ bi.bi_position = get_left_neighbor_position (tb, 0);
+ leaf_paste_in_buffer(
+ &bi,n + item_pos - ret_val,
+ B_N_PITEM_HEAD(tb->L[0],n+item_pos-ret_val)->ih_item_len,
+ l_n,body, zeros_num > l_n ? l_n : zeros_num
+ );
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (l_n && is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0],
+ n + item_pos - ret_val)))
+ reiserfs_panic(tb->tb_sb, "PAP-12110: balance_leaf: "
+ "pasting more than 1 unformatted node pointer into indirect item");
+#endif
+
+ /* 0-th item in S0 can be only of DIRECT type when l_n != 0*/
+ {
+ int version;
+
+ version = le16_to_cpu (B_N_PITEM_HEAD (tbS0, 0)->ih_version);
+ set_le_key_k_offset (version, B_N_PKEY (tbS0, 0),
+ le_key_k_offset (version, B_N_PKEY (tbS0, 0)) + l_n);
+ set_le_key_k_offset (version, B_N_PDELIM_KEY(tb->CFL[0],tb->lkey[0]),
+ le_key_k_offset (version, B_N_PDELIM_KEY(tb->CFL[0],tb->lkey[0])) + l_n);
+ }
+#if 0
+ set_le_key_k_offset (B_PRIGHT_DELIM_KEY(tb->L[0]), le_key_k_offset (B_PRIGHT_DELIM_KEY(tb->L[0])) + l_n);
+#endif
+ /* k_offset (B_N_PKEY (tbS0, 0)) += l_n;
+ k_offset (B_N_PDELIM_KEY(tb->CFL[0],tb->lkey[0])) += l_n;
+ k_offset (B_PRIGHT_DELIM_KEY(tb->L[0])) += l_n;*/
+
+#ifdef NO_CONFIG_REISERFS_CHECK /* journal victim */
+ if (!buffer_dirty (tbS0) || !buffer_dirty (tb->CFL[0]) || !buffer_dirty (tb->L[0]))
+ reiserfs_panic(tb->tb_sb, "PAP-12115: balance_leaf: L, CLF and S must be dirty already");
+#endif
+
+ /* Calculate new body, position in item and insert_size[0] */
+ if ( l_n > zeros_num ) {
+ body += (l_n - zeros_num);
+ zeros_num = 0;
+ }
+ else
+ zeros_num -= l_n;
+ pos_in_item = 0;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (comp_short_le_keys (B_N_PKEY(tbS0,0),
+ B_N_PKEY(tb->L[0],B_NR_ITEMS(tb->L[0])-1)) ||
+ !op_is_left_mergeable (B_N_PKEY (tbS0, 0), tbS0->b_size) ||
+ !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0],tb->lkey[0]), tbS0->b_size))
+ reiserfs_panic (tb->tb_sb, "PAP-12120: balance_leaf: "
+ "item must be merge-able with left neighboring item");
+#endif
+
+ }
+ else /* only part of the appended item will be in L[0] */
+ {
+ /* Calculate position in item for append in S[0] */
+ pos_in_item -= tb->lbytes;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( pos_in_item <= 0 )
+ reiserfs_panic(tb->tb_sb, "PAP-12125: balance_leaf: "
+ "no place for paste. pos_in_item=%d", pos_in_item);
+#endif
+
+ /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
+ leaf_shift_left(tb,tb->lnum[0],tb->lbytes);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->L[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ }
+ }
+ }
+ else /* appended item will be in L[0] in whole */
+ {
+ struct item_head * pasted;
+
+#ifdef REISERFS_FSCK
+ if ( ! item_pos && is_left_mergeable (tb->tb_sb, tb->tb_path) == 1 )
+#else
+ if ( ! item_pos && op_is_left_mergeable (B_N_PKEY (tbS0, 0), tbS0->b_size) )
+#endif
+ { /* if we paste into first item of S[0] and it is left mergable */
+ /* then increment pos_in_item by the size of the last item in L[0] */
+ pasted = B_N_PITEM_HEAD(tb->L[0],n-1);
+ if ( is_direntry_le_ih (pasted) )
+ pos_in_item += le16_to_cpu (pasted->u.ih_entry_count);
+ else
+ pos_in_item += le16_to_cpu (pasted->ih_item_len);
+ }
+
+ /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
+ ret_val = leaf_shift_left(tb,tb->lnum[0],tb->lbytes);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->L[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ /* Append to body of item in L[0] */
+ bi.tb = tb;
+ bi.bi_bh = tb->L[0];
+ bi.bi_parent = tb->FL[0];
+ bi.bi_position = get_left_neighbor_position (tb, 0);
+ leaf_paste_in_buffer (&bi, n + item_pos - ret_val, pos_in_item, tb->insert_size[0],
+ body, zeros_num);
+
+ /* if appended item is directory, paste entry */
+ pasted = B_N_PITEM_HEAD (tb->L[0], n + item_pos - ret_val);
+ if (is_direntry_le_ih (pasted))
+ leaf_paste_entries (
+ bi.bi_bh, n + item_pos - ret_val, pos_in_item, 1,
+ (struct reiserfs_de_head *)body, body + DEH_SIZE, tb->insert_size[0]
+ );
+ /* if appended item is indirect item, put unformatted node into un list */
+ if (is_indirect_le_ih (pasted))
+ set_ih_free_space (pasted, ((struct unfm_nodeinfo*)body)->unfm_freespace);
+ tb->insert_size[0] = 0;
+ zeros_num = 0;
+ }
+ break;
+ default: /* cases d and t */
+ reiserfs_panic (tb->tb_sb, "PAP-12130: balance_leaf: lnum > 0: unexpectable mode: %s(%d)",
+ (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
+ }
+ } else {
+ /* new item doesn't fall into L[0] */
+ leaf_shift_left(tb,tb->lnum[0],tb->lbytes);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->L[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ }
+ } /* tb->lnum[0] > 0 */
+
+ /* Calculate new item position */
+ item_pos -= ( tb->lnum[0] - (( tb->lbytes != -1 ) ? 1 : 0));
+
+ if ( tb->rnum[0] > 0 ) {
+ /* shift rnum[0] items from S[0] to the right neighbor R[0] */
+ n = B_NR_ITEMS(tbS0);
+ switch ( flag ) {
+
+ case M_INSERT: /* insert item */
+ if ( n - tb->rnum[0] < item_pos )
+ { /* new item or its part falls to R[0] */
+ if ( item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1 )
+ { /* part of new item falls into R[0] */
+ int old_key_comp, old_len, r_zeros_number;
+ const char * r_body;
+ int version;
+ loff_t offset;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( !is_direct_le_ih (ih) )
+ reiserfs_panic(tb->tb_sb, "PAP-12135: balance_leaf: "
+ "only direct item can be split. (%h)", ih);
+#endif
+
+ leaf_shift_right(tb,tb->rnum[0]-1,-1);
+#if 0/*preserve list*/
+ if (tb->rnum[0]>1) {
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->R[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+ }
+#endif
+
+ version = le16_to_cpu (ih->ih_version);
+ /* Remember key component and item length */
+ old_key_comp = le_key_k_offset (version, &(ih->ih_key));
+ old_len = le16_to_cpu (ih->ih_item_len);
+
+ /* Calculate key component and item length to insert into R[0] */
+ offset = le_key_k_offset (version, &(ih->ih_key)) + (old_len - tb->rbytes);
+ set_le_key_k_offset (version, &(ih->ih_key), offset);
+ ih->ih_item_len = cpu_to_le16 (tb->rbytes);
+ /* Insert part of the item into R[0] */
+ bi.tb = tb;
+ bi.bi_bh = tb->R[0];
+ bi.bi_parent = tb->FR[0];
+ bi.bi_position = get_right_neighbor_position (tb, 0);
+ if ( offset - old_key_comp > zeros_num ) {
+ r_zeros_number = 0;
+ r_body = body + offset - old_key_comp - zeros_num;
+ }
+ else {
+ r_body = body;
+ r_zeros_number = zeros_num - (offset - old_key_comp);
+ zeros_num -= r_zeros_number;
+ }
+
+ leaf_insert_into_buf (&bi, 0, ih, r_body, r_zeros_number);
+
+ /* Replace right delimiting key by first key in R[0] */
+ replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+
+ /* Calculate key component and item length to insert into S[0] */
+ set_le_key_k_offset (version, &(ih->ih_key), old_key_comp);
+ ih->ih_item_len = cpu_to_le16 (old_len - tb->rbytes);
+
+ tb->insert_size[0] -= tb->rbytes;
+
+ }
+ else /* whole new item falls into R[0] */
+ {
+ /* Shift rnum[0]-1 items to R[0] */
+ ret_val = leaf_shift_right(tb,tb->rnum[0]-1,tb->rbytes);
+#if 0/*preserve list*/
+ if (tb->rnum[0]>1) {
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->R[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+ }
+#endif
+ /* Insert new item into R[0] */
+ bi.tb = tb;
+ bi.bi_bh = tb->R[0];
+ bi.bi_parent = tb->FR[0];
+ bi.bi_position = get_right_neighbor_position (tb, 0);
+ leaf_insert_into_buf (&bi, item_pos - n + tb->rnum[0] - 1, ih, body, zeros_num);
+#if 0/*preserve list*/
+ if (tb->preserve_mode == PRESERVE_INDIRECT_TO_DIRECT){
+ mark_suspected_recipient (tb->tb_sb, bi.bi_bh);
+ }
+#endif
+
+ /* If we insert new item in the begin of R[0] change the right delimiting key */
+ if ( item_pos - n + tb->rnum[0] - 1 == 0 ) {
+ replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+
+#if 0
+ /* update right delimiting key */
+ copy_key(B_PRIGHT_DELIM_KEY(tbS0), &(ih->ih_key));
+ reiserfs_mark_buffer_dirty (tbS0, 0);
+#endif
+ }
+ zeros_num = tb->insert_size[0] = 0;
+ }
+ }
+ else /* new item or part of it doesn't fall into R[0] */
+ {
+ leaf_shift_right(tb,tb->rnum[0],tb->rbytes);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->R[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ }
+ break;
+
+ case M_PASTE: /* append item */
+
+ if ( n - tb->rnum[0] <= item_pos ) /* pasted item or part of it falls to R[0] */
+ {
+ if ( item_pos == n - tb->rnum[0] && tb->rbytes != -1 )
+ { /* we must shift the part of the appended item */
+ if ( is_direntry_le_ih (B_N_PITEM_HEAD(tbS0, item_pos)))
+ { /* we append to directory item */
+ int entry_count;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( zeros_num )
+ reiserfs_panic(tb->tb_sb, "PAP-12145: balance_leaf: illegal parametr in case of a directory");
+#endif
+
+ entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD(tbS0, item_pos));
+ if ( entry_count - tb->rbytes < pos_in_item )
+ /* new directory entry falls into R[0] */
+ {
+ int paste_entry_position;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->rbytes - 1 >= entry_count || ! tb->insert_size[0] )
+ reiserfs_panic(tb->tb_sb, "PAP-12150: balance_leaf: "
+ "no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
+ tb->rbytes, entry_count);
+#endif
+
+ /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
+ leaf_shift_right(tb,tb->rnum[0],tb->rbytes - 1);
+#if 0/*preserve list*/
+ /* if we are shifting more than just the new entry */
+ if (tb->rbytes > 1 || tb->rnum[0] > 1) {
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->R[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+ }
+#endif
+ /* Paste given directory entry to directory item */
+ paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1;
+ bi.tb = tb;
+ bi.bi_bh = tb->R[0];
+ bi.bi_parent = tb->FR[0];
+ bi.bi_position = get_right_neighbor_position (tb, 0);
+ leaf_paste_in_buffer (&bi, 0, paste_entry_position,
+ tb->insert_size[0],body,zeros_num);
+ /* paste entry */
+ leaf_paste_entries (
+ bi.bi_bh, 0, paste_entry_position, 1, (struct reiserfs_de_head *)body,
+ body + DEH_SIZE, tb->insert_size[0]
+ );
+
+ if ( paste_entry_position == 0 ) {
+ /* change delimiting keys */
+ replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+#if 0
+ copy_key(B_PRIGHT_DELIM_KEY(tbS0), B_N_PKEY(tb->R[0], 0));
+ reiserfs_mark_buffer_dirty (tbS0, 0);
+#endif
+ }
+
+ tb->insert_size[0] = 0;
+ pos_in_item++;
+ }
+ else /* new directory entry doesn't fall into R[0] */
+ {
+ leaf_shift_right(tb,tb->rnum[0],tb->rbytes);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->R[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ }
+ }
+ else /* regular object */
+ {
+ int n_shift, n_rem, r_zeros_number;
+ const char * r_body;
+
+ /* Calculate number of bytes which must be shifted from appended item */
+ if ( (n_shift = tb->rbytes - tb->insert_size[0]) < 0 )
+ n_shift = 0;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (pos_in_item != B_N_PITEM_HEAD (tbS0, item_pos)->ih_item_len)
+ reiserfs_panic(tb->tb_sb,"PAP-12155: balance_leaf: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
+ pos_in_item, B_N_PITEM_HEAD(tbS0,item_pos)->ih_item_len);
+#endif
+
+ leaf_shift_right(tb,tb->rnum[0],n_shift);
+#if 0/*preserve list*/
+ /* if we are shifting an old part from the appended item or more than the appended item is going into R */
+ if (n_shift || tb->rnum[0] > 1) {
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->R[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+ }
+#endif
+ /* Calculate number of bytes which must remain in body after appending to R[0] */
+ if ( (n_rem = tb->insert_size[0] - tb->rbytes) < 0 )
+ n_rem = 0;
+
+ {
+ int version;
+
+ version = ih_version (B_N_PITEM_HEAD (tb->R[0],0));
+ set_le_key_k_offset (version, B_N_PKEY(tb->R[0],0),
+ le_key_k_offset (version, B_N_PKEY(tb->R[0],0)) + n_rem);
+ set_le_key_k_offset (version, B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0]),
+ le_key_k_offset (version, B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) + n_rem);
+ }
+/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
+ k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
+ do_balance_mark_internal_dirty (tb, tb->CFR[0], 0);
+
+#if 0
+ set_le_key_k_offset (B_PRIGHT_DELIM_KEY(tbS0), le_key_k_offset (B_PRIGHT_DELIM_KEY(tbS0)) + n_rem);
+/* k_offset (B_PRIGHT_DELIM_KEY(tbS0)) += n_rem;*/
+ reiserfs_mark_buffer_dirty (tbS0, 0);
+#endif
+ /* Append part of body into R[0] */
+ bi.tb = tb;
+ bi.bi_bh = tb->R[0];
+ bi.bi_parent = tb->FR[0];
+ bi.bi_position = get_right_neighbor_position (tb, 0);
+ if ( n_rem > zeros_num ) {
+ r_zeros_number = 0;
+ r_body = body + n_rem - zeros_num;
+ }
+ else {
+ r_body = body;
+ r_zeros_number = zeros_num - n_rem;
+ zeros_num -= r_zeros_number;
+ }
+
+ leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem, r_body, r_zeros_number);
+
+ if (is_indirect_le_ih (B_N_PITEM_HEAD(tb->R[0],0))) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (n_rem)
+ reiserfs_panic(tb->tb_sb, "PAP-12160: balance_leaf: paste more than one unformatted node pointer");
+#endif
+
+ set_ih_free_space (B_N_PITEM_HEAD(tb->R[0],0), ((struct unfm_nodeinfo*)body)->unfm_freespace);
+ }
+
+ tb->insert_size[0] = n_rem;
+ if ( ! n_rem )
+ pos_in_item ++;
+ }
+ }
+ else /* pasted item in whole falls into R[0] */
+ {
+ struct item_head * pasted;
+
+ ret_val = leaf_shift_right(tb,tb->rnum[0],tb->rbytes);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->R[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ /* append item in R[0] */
+ if ( pos_in_item >= 0 ) {
+ bi.tb = tb;
+ bi.bi_bh = tb->R[0];
+ bi.bi_parent = tb->FR[0];
+ bi.bi_position = get_right_neighbor_position (tb, 0);
+ leaf_paste_in_buffer(&bi,item_pos - n + tb->rnum[0], pos_in_item,
+ tb->insert_size[0],body, zeros_num);
+ }
+
+ /* paste new entry, if item is directory item */
+ pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]);
+ if (is_direntry_le_ih (pasted) && pos_in_item >= 0 ) {
+ leaf_paste_entries (
+ bi.bi_bh, item_pos - n + tb->rnum[0], pos_in_item, 1,
+ (struct reiserfs_de_head *)body, body + DEH_SIZE, tb->insert_size[0]
+ );
+ if ( ! pos_in_item ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( item_pos - n + tb->rnum[0] )
+ reiserfs_panic (tb->tb_sb, "PAP-12165: balance_leaf: "
+ "directory item must be first item of node when pasting is in 0th position");
+#endif
+
+ /* update delimiting keys */
+ replace_key(tb, tb->CFR[0],tb->rkey[0],tb->R[0],0);
+#if 0
+ copy_key(B_PRIGHT_DELIM_KEY(tbS0),B_N_PKEY(tb->R[0], 0));
+ reiserfs_mark_buffer_dirty (tbS0, 0);
+#endif
+ }
+ }
+
+ if (is_indirect_le_ih (pasted))
+ set_ih_free_space (pasted, ((struct unfm_nodeinfo*)body)->unfm_freespace);
+ zeros_num = tb->insert_size[0] = 0;
+ }
+ }
+ else /* new item doesn't fall into R[0] */
+ {
+ leaf_shift_right(tb,tb->rnum[0],tb->rbytes);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, tb->R[0]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ }
+ break;
+ default: /* cases d and t */
+ reiserfs_panic (tb->tb_sb, "PAP-12175: balance_leaf: rnum > 0: unexpectable mode: %s(%d)",
+ (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
+ }
+
+ } /* tb->rnum[0] > 0 */
+
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->blknum[0] > 3 )
+ reiserfs_panic (tb->tb_sb, "PAP-12180: balance_leaf: blknum can not be %d. It must be <= 3", tb->blknum[0]);
+
+ if ( tb->blknum[0] < 0 )
+ reiserfs_panic (tb->tb_sb, "PAP-12185: balance_leaf: blknum can not be %d. It must be >= 0", tb->blknum[0]);
+#endif
+
+ /* if while adding to a node we discover that it is possible to split
+ it in two, and merge the left part into the left neighbor and the
+ right part into the right neighbor, eliminating the node */
+ if ( tb->blknum[0] == 0 ) { /* node S[0] is empty now */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! tb->lnum[0] || ! tb->rnum[0] )
+ reiserfs_panic(tb->tb_sb, "PAP-12190: balance_leaf: lnum and rnum must not be zero");
+#if 0
+ if (COMP_KEYS (B_N_PKEY(tb->R[0], 0), B_PRIGHT_DELIM_KEY(tbS0)))
+ reiserfs_panic (tb->tb_sb, "vs-12192: balance_leaf: S[0] is being removed from the tree, it has incorrect right delimiting key");
+#endif
+#endif
+
+#if 0
+ /* if insertion was done before 0-th position in R[0], right
+ delimiting key of the tb->L[0]'s and left delimiting key are
+ not set correctly */
+ if (tb->L[0]) {
+ copy_key(B_PRIGHT_DELIM_KEY(tb->L[0]), B_PRIGHT_DELIM_KEY(tbS0));
+ reiserfs_mark_buffer_dirty (tb->L[0], 0);
+ }
+
+ if (tb->CFL[0]) {
+ copy_key (B_N_PDELIM_KEY (tb->CFL[0], tb->lkey[0]), B_PRIGHT_DELIM_KEY(tbS0));
+ reiserfs_mark_buffer_dirty (tb->CFL[0], 0);
+ }
+#endif
+
+ /* if insertion was done before 0-th position in R[0], right
+ delimiting key of the tb->L[0]'s and left delimiting key are
+ not set correctly */
+ if (tb->CFL[0]) {
+ if (!tb->CFR[0])
+ reiserfs_panic (tb->tb_sb, "vs-12195: balance_leaf: CFR not initialized");
+ copy_key (B_N_PDELIM_KEY (tb->CFL[0], tb->lkey[0]), B_N_PDELIM_KEY (tb->CFR[0], tb->rkey[0]));
+ do_balance_mark_internal_dirty (tb, tb->CFL[0], 0);
+ }
+
+ reiserfs_invalidate_buffer(tb,tbS0);
+ return 0;
+ }
+
+
+ /* Fill new nodes that appear in place of S[0] */
+
+ /* I am told that this copying is because we need an array to enable
+ the looping code. -Hans */
+ snum[0] = tb->s1num,
+ snum[1] = tb->s2num;
+ sbytes[0] = tb->s1bytes;
+ sbytes[1] = tb->s2bytes;
+ for( i = tb->blknum[0] - 2; i >= 0; i-- ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!snum[i])
+ reiserfs_panic(tb->tb_sb,"PAP-12200: balance_leaf: snum[%d] == %d. Must be > 0", i, snum[i]);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* here we shift from S to S_new nodes */
+
+ S_new[i] = get_FEB(tb);
+
+ /* initialized block type and tree level */
+ B_BLK_HEAD(S_new[i])->blk_level = cpu_to_le16 (DISK_LEAF_NODE_LEVEL);
+
+
+ n = B_NR_ITEMS(tbS0);
+
+ switch (flag) {
+ case M_INSERT: /* insert item */
+
+ if ( n - snum[i] < item_pos )
+ { /* new item or it's part falls to first new node S_new[i]*/
+ if ( item_pos == n - snum[i] + 1 && sbytes[i] != -1 )
+ { /* part of new item falls into S_new[i] */
+ int old_key_comp, old_len, r_zeros_number;
+ const char * r_body;
+ int version;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( !is_direct_le_ih(ih) )
+ /* The items which can be inserted are:
+ Stat_data item, direct item, indirect item and directory item which consist of only two entries "." and "..".
+ These items must not be broken except for a direct one. */
+ reiserfs_panic(tb->tb_sb, "PAP-12205: balance_leaf: "
+ "non-direct item can not be broken when inserting");
+#endif
+
+ /* Move snum[i]-1 items from S[0] to S_new[i] */
+ leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i] - 1, -1, S_new[i]);
+#if 0/*preserve list*/
+ if (snum[i] > 1 ) {
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, S_new[i]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+ }
+#endif
+ /* Remember key component and item length */
+ version = ih_version (ih);
+ old_key_comp = le_key_k_offset (version, &(ih->ih_key));
+ old_len = le16_to_cpu (ih->ih_item_len);
+
+ /* Calculate key component and item length to insert into S_new[i] */
+ set_le_key_k_offset (version, &(ih->ih_key),
+ le_key_k_offset (version, &(ih->ih_key)) + (old_len - sbytes[i]));
+
+ ih->ih_item_len = cpu_to_le16 (sbytes[i]);
+
+ /* Insert part of the item into S_new[i] before 0-th item */
+ bi.tb = tb;
+ bi.bi_bh = S_new[i];
+ bi.bi_parent = 0;
+ bi.bi_position = 0;
+
+ if ( le_key_k_offset (version, &(ih->ih_key)) - old_key_comp > zeros_num ) {
+ r_zeros_number = 0;
+ r_body = body + (le_key_k_offset (version, &(ih->ih_key)) - old_key_comp) - zeros_num;
+ }
+ else {
+ r_body = body;
+ r_zeros_number = zeros_num - (le_key_k_offset (version, &(ih->ih_key)) - old_key_comp);
+ zeros_num -= r_zeros_number;
+ }
+
+ leaf_insert_into_buf (&bi, 0, ih, r_body, r_zeros_number);
+
+ /* Calculate key component and item length to insert into S[i] */
+ set_le_key_k_offset (version, &(ih->ih_key), old_key_comp);
+ ih->ih_item_len = cpu_to_le16 (old_len - sbytes[i]);
+ tb->insert_size[0] -= sbytes[i];
+ }
+ else /* whole new item falls into S_new[i] */
+ {
+ /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
+ leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i] - 1, sbytes[i], S_new[i]);
+
+ /* Insert new item into S_new[i] */
+ bi.tb = tb;
+ bi.bi_bh = S_new[i];
+ bi.bi_parent = 0;
+ bi.bi_position = 0;
+ leaf_insert_into_buf (&bi, item_pos - n + snum[i] - 1, ih, body, zeros_num);
+#if 0/*preserve list*/
+ if (tb->preserve_mode == PRESERVE_INDIRECT_TO_DIRECT){
+ mark_suspected_recipient (tb->tb_sb, bi.bi_bh);
+ }
+#endif
+
+ zeros_num = tb->insert_size[0] = 0;
+ }
+ }
+
+ else /* new item or it part don't falls into S_new[i] */
+ {
+ leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i], S_new[i]);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, S_new[i]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ }
+ break;
+
+ case M_PASTE: /* append item */
+
+ if ( n - snum[i] <= item_pos ) /* pasted item or part if it falls to S_new[i] */
+ {
+ if ( item_pos == n - snum[i] && sbytes[i] != -1 )
+ { /* we must shift part of the appended item */
+ struct item_head * aux_ih;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ih )
+ reiserfs_panic (tb->tb_sb, "PAP-12210: balance_leaf: ih must be 0");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ if ( is_direntry_le_ih (aux_ih = B_N_PITEM_HEAD(tbS0,item_pos))) {
+ /* we append to directory item */
+
+ int entry_count;
+
+ entry_count = le16_to_cpu (aux_ih->u.ih_entry_count);
+
+ if ( entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count ) {
+ /* new directory entry falls into S_new[i] */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! tb->insert_size[0] )
+ reiserfs_panic (tb->tb_sb, "PAP-12215: balance_leaif: insert_size is already 0");
+ if ( sbytes[i] - 1 >= entry_count )
+ reiserfs_panic (tb->tb_sb, "PAP-12220: balance_leaf: "
+ "there are no so much entries (%d), only %d",
+ sbytes[i] - 1, entry_count);
+#endif
+
+ /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
+ leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i]-1, S_new[i]);
+#if 0/*preserve list*/
+ /* if more than the affected item is shifted, or if more than
+ one entry (from the affected item) is shifted */
+ if (snum[i] > 1 || sbytes[i] > 1) {
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, S_new[i]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+ }
+#endif
+ /* Paste given directory entry to directory item */
+ bi.tb = tb;
+ bi.bi_bh = S_new[i];
+ bi.bi_parent = 0;
+ bi.bi_position = 0;
+ leaf_paste_in_buffer (&bi, 0, pos_in_item - entry_count + sbytes[i] - 1,
+ tb->insert_size[0], body,zeros_num);
+ /* paste new directory entry */
+ leaf_paste_entries (
+ bi.bi_bh, 0, pos_in_item - entry_count + sbytes[i] - 1,
+ 1, (struct reiserfs_de_head *)body, body + DEH_SIZE,
+ tb->insert_size[0]
+ );
+ tb->insert_size[0] = 0;
+ pos_in_item++;
+ } else { /* new directory entry doesn't fall into S_new[i] */
+ leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i], S_new[i]);
+ }
+ }
+ else /* regular object */
+ {
+ int n_shift, n_rem, r_zeros_number;
+ const char * r_body;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( pos_in_item != B_N_PITEM_HEAD(tbS0,item_pos)->ih_item_len ||
+ tb->insert_size[0] <= 0 )
+ reiserfs_panic (tb->tb_sb, "PAP-12225: balance_leaf: item too short or insert_size <= 0");
+#endif
+
+ /* Calculate number of bytes which must be shifted from appended item */
+ n_shift = sbytes[i] - tb->insert_size[0];
+ if ( n_shift < 0 )
+ n_shift = 0;
+ leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]);
+
+ /* Calculate number of bytes which must remain in body after append to S_new[i] */
+ n_rem = tb->insert_size[0] - sbytes[i];
+ if ( n_rem < 0 )
+ n_rem = 0;
+ /* Append part of body into S_new[0] */
+ bi.tb = tb;
+ bi.bi_bh = S_new[i];
+ bi.bi_parent = 0;
+ bi.bi_position = 0;
+
+ if ( n_rem > zeros_num ) {
+ r_zeros_number = 0;
+ r_body = body + n_rem - zeros_num;
+ }
+ else {
+ r_body = body;
+ r_zeros_number = zeros_num - n_rem;
+ zeros_num -= r_zeros_number;
+ }
+
+ leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0]-n_rem, r_body,r_zeros_number);
+ {
+ struct item_head * tmp;
+
+ tmp = B_N_PITEM_HEAD(S_new[i],0);
+ if (is_indirect_le_ih (tmp)) {
+ if (n_rem)
+ reiserfs_panic (tb->tb_sb, "PAP-12230: balance_leaf: invalid action with indirect item");
+ set_ih_free_space (tmp, ((struct unfm_nodeinfo*)body)->unfm_freespace);
+ }
+ set_le_key_k_offset (ih_version (tmp), &tmp->ih_key,
+ le_key_k_offset (ih_version (tmp), &tmp->ih_key) + n_rem);
+ }
+
+ tb->insert_size[0] = n_rem;
+ if ( ! n_rem )
+ pos_in_item++;
+ }
+ }
+ else
+ /* item falls wholly into S_new[i] */
+ {
+ int ret_val;
+ struct item_head * pasted;
+
+#ifdef CONFIG_REISERFS_CHECK
+ struct item_head * ih = B_N_PITEM_HEAD(tbS0,item_pos);
+
+ if ( ! is_direntry_le_ih(ih) && (pos_in_item != ih->ih_item_len ||
+ tb->insert_size[0] <= 0) )
+ reiserfs_panic (tb->tb_sb, "PAP-12235: balance_leaf: pos_in_item must be equal to ih_item_len");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ ret_val = leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i], S_new[i]);
+#if 0/*preserve list*/
+ /* we must preserve that which we are pasting onto the end of and shifting */
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, S_new[i]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ret_val )
+ reiserfs_panic (tb->tb_sb, "PAP-12240: balance_leaf: "
+ "unexpected value returned by leaf_move_items (%d)",
+ ret_val);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* paste into item */
+ bi.tb = tb;
+ bi.bi_bh = S_new[i];
+ bi.bi_parent = 0;
+ bi.bi_position = 0;
+ leaf_paste_in_buffer(&bi, item_pos - n + snum[i], pos_in_item, tb->insert_size[0], body, zeros_num);
+
+ pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]);
+ if (is_direntry_le_ih (pasted))
+ {
+ leaf_paste_entries (
+ bi.bi_bh, item_pos - n + snum[i], pos_in_item, 1,
+ (struct reiserfs_de_head *)body, body + DEH_SIZE, tb->insert_size[0]
+ );
+ }
+
+ /* if we paste to indirect item update ih_free_space */
+ if (is_indirect_le_ih (pasted))
+ set_ih_free_space (pasted, ((struct unfm_nodeinfo*)body)->unfm_freespace);
+ zeros_num = tb->insert_size[0] = 0;
+ }
+ }
+
+ else /* pasted item doesn't fall into S_new[i] */
+ {
+ leaf_move_items (LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i], S_new[i]);
+#if 0/*preserve list*/
+ preserve_shifted(tb, &(PATH_PLAST_BUFFER (tb->tb_path)), tbF0, S0_b_item_order, S_new[i]);
+ tbS0 = PATH_PLAST_BUFFER (tb->tb_path);
+#endif
+ }
+ break;
+ default: /* cases d and t */
+ reiserfs_panic (tb->tb_sb, "PAP-12245: balance_leaf: blknum > 2: unexpectable mode: %s(%d)",
+ (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
+ }
+
+ memcpy (insert_key + i,B_N_PKEY(S_new[i],0),KEY_SIZE);
+ insert_ptr[i] = S_new[i];
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (atomic_read (&(S_new[i]->b_count)) != 1) {
+ if (atomic_read(&(S_new[i]->b_count)) != 2 ||
+ !(buffer_journaled(S_new[i]) || buffer_journal_dirty(S_new[i]))) {
+ reiserfs_panic (tb->tb_sb, "PAP-12247: balance_leaf: S_new[%d] : (%b)\n", i, S_new[i]);
+ }
+ }
+#endif
+
+#if 0
+ /* update right_delimiting_key fields */
+ copy_key (B_PRIGHT_DELIM_KEY (S_new[i]), B_PRIGHT_DELIM_KEY (tbS0));
+ copy_key (B_PRIGHT_DELIM_KEY (tbS0), B_N_PKEY (S_new[i], 0));
+ reiserfs_mark_buffer_dirty (tbS0, 0);
+#endif
+
+ }
+
+ /* if the affected item was not wholly shifted then we perform all necessary operations on that part or whole of the
+ affected item which remains in S */
+ if ( 0 <= item_pos && item_pos < tb->s0num )
+ { /* if we must insert or append into buffer S[0] */
+
+ switch (flag)
+ {
+ case M_INSERT: /* insert item into S[0] */
+ bi.tb = tb;
+ bi.bi_bh = tbS0;
+ bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+ bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+ leaf_insert_into_buf (&bi, item_pos, ih, body, zeros_num);
+#if 0/*preserve list*/
+ if (tb->preserve_mode == PRESERVE_INDIRECT_TO_DIRECT){
+ mark_suspected_recipient (tb->tb_sb, bi.bi_bh);
+ }
+#endif
+
+ /* If we insert the first key change the delimiting key */
+ if( item_pos == 0 ) {
+ if (tb->CFL[0]) /* can be 0 in reiserfsck */
+ replace_key(tb, tb->CFL[0], tb->lkey[0],tbS0,0);
+
+#if 0 /* right delim key support */
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! tb->CFL[0] || ! tb->L[0] || (B_NR_ITEMS (tbS0) > 1 &&
+ COMP_KEYS(B_PRIGHT_DELIM_KEY(tb->L[0]), B_N_PKEY(tbS0, 1))) )
+ reiserfs_panic(tb->tb_sb, "PAP-12250: balance_leaf: invalid right delimiting key");
+ if (!buffer_dirty (tb->L[0]) && !(buffer_journaled(tb->L[0]) ||
+ buffer_journal_dirty(tb->L[0])))
+ reiserfs_panic (tb->tb_sb, "PAP-12255: balance_leaf: tb->L[0] must be dirty");
+#endif
+ if (tb->L[0]) /* can be 0 in reiserfsck */
+ copy_key (B_PRIGHT_DELIM_KEY (tb->L[0]), &(ih->ih_key));
+#endif /* right delim key support */
+ }
+ break;
+
+ case M_PASTE: { /* append item in S[0] */
+ struct item_head * pasted;
+
+ pasted = B_N_PITEM_HEAD (tbS0, item_pos);
+ /* when directory, may be new entry already pasted */
+ if (is_direntry_le_ih (pasted)) {
+ if ( pos_in_item >= 0 && pos_in_item <= le16_to_cpu (pasted->u.ih_entry_count) ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! tb->insert_size[0] )
+ reiserfs_panic (tb->tb_sb, "PAP-12260: balance_leaf: insert_size is 0 already");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* prepare space */
+ bi.tb = tb;
+ bi.bi_bh = tbS0;
+ bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+ bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+ leaf_paste_in_buffer(&bi, item_pos, pos_in_item, tb->insert_size[0], body, zeros_num);
+
+
+#ifdef CONFIG_REISERFS_CHECK
+#if 0
+ if ( ! item_pos && ! pos_in_item && (! tb->L[0] || COMP_KEYS(B_PRIGHT_DELIM_KEY(tb->L[0]),
+ B_N_PKEY(tbS0, 0))) )
+ reiserfs_panic(tb->tb_sb, "PAP-12265: balance_leaf: invalid right delimiting key");
+#endif
+#endif
+
+ /* paste entry */
+ leaf_paste_entries (
+ bi.bi_bh, item_pos, pos_in_item, 1, (struct reiserfs_de_head *)body,
+ body + DEH_SIZE, tb->insert_size[0]
+ );
+ if ( ! item_pos && ! pos_in_item ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!tb->CFL[0] || !tb->L[0])
+ reiserfs_panic (tb->tb_sb, "PAP-12270: balance_leaf: CFL[0]/L[0] must be specified");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ if (tb->CFL[0]) {
+ replace_key(tb, tb->CFL[0], tb->lkey[0],tbS0,0);
+
+#if 0
+ /* update right delimiting key */
+ copy_key (B_PRIGHT_DELIM_KEY (tb->L[0]), B_N_PKEY(tbS0, 0));
+ /* probably not needed as something has been shifted to tb->L[0] already */
+ reiserfs_mark_buffer_dirty (tb->L[0], 0);
+#endif
+ }
+ }
+ tb->insert_size[0] = 0;
+ }
+ } else { /* regular object */
+ if ( pos_in_item == pasted->ih_item_len ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->insert_size[0] <= 0 )
+ reiserfs_panic (tb->tb_sb,
+ "PAP-12275: balance_leaf: insert size must not be %d", tb->insert_size[0]);
+#endif /* CONFIG_REISERFS_CHECK */
+ bi.tb = tb;
+ bi.bi_bh = tbS0;
+ bi.bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+ bi.bi_position = PATH_H_POSITION (tb->tb_path, 1);
+ leaf_paste_in_buffer (&bi, item_pos, pos_in_item, tb->insert_size[0], body, zeros_num);
+
+ if (is_indirect_le_ih (pasted)) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->insert_size[0] != UNFM_P_SIZE )
+ reiserfs_panic (tb->tb_sb,
+ "PAP-12280: balance_leaf: insert_size for indirect item must be %d, not %d",
+ UNFM_P_SIZE, tb->insert_size[0]);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ set_ih_free_space (pasted, ((struct unfm_nodeinfo*)body)->unfm_freespace);
+ }
+ tb->insert_size[0] = 0;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ else {
+ if ( tb->insert_size[0] ) {
+ print_cur_tb ("12285");
+ reiserfs_panic (tb->tb_sb, "PAP-12285: balance_leaf: insert_size must be 0 (%d)", tb->insert_size[0]);
+ }
+ }
+#endif /* CONFIG_REISERFS_CHECK */
+
+ }
+ } /* case M_PASTE: */
+ }
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( flag == M_PASTE && tb->insert_size[0] ) {
+ print_cur_tb ("12290");
+ reiserfs_panic (tb->tb_sb, "PAP-12290: balance_leaf: insert_size is still not 0 (%d)", tb->insert_size[0]);
+ }
+#endif /* CONFIG_REISERFS_CHECK */
+
+ return 0;
+} /* Leaf level of the tree is balanced (end of balance_leaf) */
+
+
+
+/* Make empty node */
+void make_empty_node (struct buffer_info * bi)
+{
+ struct block_head * blkh;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (bi->bi_bh == NULL)
+ reiserfs_panic (0, "PAP-12295: make_empty_node: pointer to the buffer is NULL");
+#endif
+
+ (blkh = B_BLK_HEAD(bi->bi_bh))->blk_nr_item = cpu_to_le16 (0);
+ blkh->blk_free_space = cpu_to_le16 (MAX_CHILD_SIZE(bi->bi_bh));
+
+ if (bi->bi_parent)
+ B_N_CHILD (bi->bi_parent, bi->bi_position)->dc_size = 0;
+}
+
+
+/* Get first empty buffer */
+struct buffer_head * get_FEB (struct tree_balance * tb)
+{
+ int i;
+ struct buffer_head * first_b;
+ struct buffer_info bi;
+
+ for (i = 0; i < MAX_FEB_SIZE; i ++)
+ if (tb->FEB[i] != 0)
+ break;
+
+ if (i == MAX_FEB_SIZE)
+ reiserfs_panic(tb->tb_sb, "vs-12300: get_FEB: FEB list is empty");
+
+ bi.tb = tb;
+ bi.bi_bh = first_b = tb->FEB[i];
+ bi.bi_parent = 0;
+ bi.bi_position = 0;
+ make_empty_node (&bi);
+ set_bit(BH_Uptodate, &first_b->b_state);
+ tb->FEB[i] = 0;
+ tb->used[i] = first_b;
+
+#ifdef REISERFS_FSCK
+ mark_block_formatted (first_b->b_blocknr);
+#endif
+
+ return(first_b);
+}
+
+
+/* This is now used because reiserfs_free_block has to be able to
+** schedule.
+*/
+static void store_thrown (struct tree_balance * tb, struct buffer_head * bh)
+{
+ int i;
+
+ if (buffer_dirty (bh))
+ printk ("store_thrown deals with dirty buffer\n");
+ for (i = 0; i < sizeof (tb->thrown)/sizeof (tb->thrown[0]); i ++)
+ if (!tb->thrown[i]) {
+ tb->thrown[i] = bh;
+ atomic_inc(&bh->b_count) ; /* decremented in free_thrown */
+ return;
+ }
+ reiserfs_warning ("store_thrown: too many thrown buffers\n");
+}
+
+static void free_thrown(struct tree_balance *tb) {
+ int i ;
+ unsigned long blocknr ;
+ for (i = 0; i < sizeof (tb->thrown)/sizeof (tb->thrown[0]); i++) {
+ if (tb->thrown[i]) {
+ blocknr = tb->thrown[i]->b_blocknr ;
+ if (buffer_dirty (tb->thrown[i]))
+ printk ("free_thrown deals with dirty buffer %ld\n", blocknr);
+ brelse(tb->thrown[i]) ; /* incremented in store_thrown */
+ reiserfs_free_block (tb->transaction_handle, blocknr);
+ }
+ }
+}
+
+void reiserfs_invalidate_buffer (struct tree_balance * tb, struct buffer_head * bh)
+{
+ B_BLK_HEAD (bh)->blk_level = cpu_to_le16 (FREE_LEVEL)/*0*/;
+ B_BLK_HEAD (bh)->blk_nr_item = cpu_to_le16 (0);
+ mark_buffer_clean (bh);
+ /* reiserfs_free_block is no longer schedule safe
+ reiserfs_free_block (tb->transaction_handle, tb->tb_sb, bh->b_blocknr);
+ */
+
+ store_thrown (tb, bh);
+#if 0
+#ifdef REISERFS_FSCK
+ {
+ struct buffer_head * to_be_forgotten;
+
+ to_be_forgotten = find_buffer (bh->b_dev, bh->b_blocknr, bh->b_size);
+ if (to_be_forgotten) {
+ to_be_forgotten->b_count ++;
+ bforget (to_be_forgotten);
+ }
+ unmark_block_formatted (bh->b_blocknr);
+ }
+#endif
+#endif
+}
+
+/* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/
+void replace_key (struct tree_balance * tb, struct buffer_head * dest, int n_dest,
+ struct buffer_head * src, int n_src)
+{
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (dest == NULL || src == NULL)
+ reiserfs_panic (0, "vs-12305: replace_key: sourse or destination buffer is 0 (src=%p, dest=%p)", src, dest);
+
+ if ( ! B_IS_KEYS_LEVEL (dest) )
+ reiserfs_panic (0, "vs-12310: replace_key: invalid level (%z) for destination buffer. dest must be leaf",
+ dest);
+
+ if (n_dest < 0 || n_src < 0)
+ reiserfs_panic (0, "vs-12315: replace_key: src(%d) or dest(%d) key number less than 0", n_src, n_dest);
+
+ if (n_dest >= B_NR_ITEMS(dest) || n_src >= B_NR_ITEMS(src))
+ reiserfs_panic (0, "vs-12320: replace_key: src(%d(%d)) or dest(%d(%d)) key number is too big",
+ n_src, B_NR_ITEMS(src), n_dest, B_NR_ITEMS(dest));
+#endif /* CONFIG_REISERFS_CHECK */
+
+ if (B_IS_ITEMS_LEVEL (src))
+ /* source buffer contains leaf node */
+ memcpy (B_N_PDELIM_KEY(dest,n_dest), B_N_PITEM_HEAD(src,n_src), KEY_SIZE);
+ else
+ memcpy (B_N_PDELIM_KEY(dest,n_dest), B_N_PDELIM_KEY(src,n_src), KEY_SIZE);
+
+ do_balance_mark_internal_dirty (tb, dest, 0);
+}
+
+
+int get_left_neighbor_position (
+ struct tree_balance * tb,
+ int h
+ )
+{
+ int Sh_position = PATH_H_POSITION (tb->tb_path, h + 1);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (PATH_H_PPARENT (tb->tb_path, h) == 0 || tb->FL[h] == 0)
+ reiserfs_panic (tb->tb_sb, "vs-12325: get_left_neighbor_position: FL[%d](%p) or F[%d](%p) does not exist",
+ h, tb->FL[h], h, PATH_H_PPARENT (tb->tb_path, h));
+#endif
+
+ if (Sh_position == 0)
+ return B_NR_ITEMS (tb->FL[h]);
+ else
+ return Sh_position - 1;
+}
+
+
+int get_right_neighbor_position (struct tree_balance * tb, int h)
+{
+ int Sh_position = PATH_H_POSITION (tb->tb_path, h + 1);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (PATH_H_PPARENT (tb->tb_path, h) == 0 || tb->FR[h] == 0)
+ reiserfs_panic (tb->tb_sb, "vs-12330: get_right_neighbor_position: F[%d](%p) or FR[%d](%p) does not exist",
+ h, PATH_H_PPARENT (tb->tb_path, h), h, tb->FR[h]);
+#endif
+
+ if (Sh_position == B_NR_ITEMS (PATH_H_PPARENT (tb->tb_path, h)))
+ return 0;
+ else
+ return Sh_position + 1;
+}
+
+
+#ifdef CONFIG_REISERFS_CHECK
+
+int is_reusable (struct super_block * s, unsigned long block, int bit_value);
+static void check_internal_node (struct super_block * s, struct buffer_head * bh, char * mes)
+{
+ struct disk_child * dc;
+ int i;
+
+ if (!bh)
+ reiserfs_panic (s, "PAP-12336: check_internal_node: bh == 0");
+
+ if (!bh || !B_IS_IN_TREE (bh))
+ return;
+
+ if (!buffer_dirty (bh) &&
+ !(buffer_journaled(bh) || buffer_journal_dirty(bh))) {
+ reiserfs_panic (s, "PAP-12337: check_internal_node: buffer (%b) must be dirty", bh);
+ }
+
+ dc = B_N_CHILD (bh, 0);
+
+ for (i = 0; i <= B_NR_ITEMS (bh); i ++, dc ++) {
+ if (!is_reusable (s, dc->dc_block_number, 1) ) {
+ print_cur_tb (mes);
+ reiserfs_panic (s, "PAP-12338: check_internal_node: invalid child pointer %y in %b", dc, bh);
+ }
+ }
+}
+
+
+static int locked_or_not_in_tree (struct buffer_head * bh, char * which)
+{
+ if ( buffer_locked (bh) || !B_IS_IN_TREE (bh) ) {
+ reiserfs_warning ("vs-12339: locked_or_not_in_tree: %s (%b)\n", which, bh);
+ return 1;
+ }
+ return 0;
+}
+
+
+static int check_before_balancing (struct tree_balance * tb)
+{
+ int retval = 0;
+
+ if ( cur_tb ) {
+ reiserfs_panic (tb->tb_sb, "vs-12335: check_before_balancing: "
+ "suspect that schedule occurred based on cur_tb not being null at this point in code. "
+ "do_balance cannot properly handle schedule occuring while it runs.");
+ }
+
+ /* double check that buffers that we will modify are unlocked. (fix_nodes should already have
+ prepped all of these for us). */
+ if ( tb->lnum[0] ) {
+ retval |= locked_or_not_in_tree (tb->L[0], "L[0]");
+ retval |= locked_or_not_in_tree (tb->FL[0], "FL[0]");
+ retval |= locked_or_not_in_tree (tb->CFL[0], "CFL[0]");
+ check_leaf (tb->L[0]);
+ }
+ if ( tb->rnum[0] ) {
+ retval |= locked_or_not_in_tree (tb->R[0], "R[0]");
+ retval |= locked_or_not_in_tree (tb->FR[0], "FR[0]");
+ retval |= locked_or_not_in_tree (tb->CFR[0], "CFR[0]");
+ check_leaf (tb->R[0]);
+ }
+ retval |= locked_or_not_in_tree (PATH_PLAST_BUFFER (tb->tb_path), "S[0]");
+ check_leaf (PATH_PLAST_BUFFER (tb->tb_path));
+
+ return retval;
+}
+
+
+void check_after_balance_leaf (struct tree_balance * tb)
+{
+ if (tb->lnum[0]) {
+ if (B_FREE_SPACE (tb->L[0]) !=
+ MAX_CHILD_SIZE (tb->L[0]) - B_N_CHILD (tb->FL[0], get_left_neighbor_position (tb, 0))->dc_size) {
+ print_cur_tb ("12221");
+ reiserfs_panic (tb->tb_sb, "PAP-12355: check_after_balance_leaf: shift to left was incorrect");
+ }
+ }
+ if (tb->rnum[0]) {
+ if (B_FREE_SPACE (tb->R[0]) !=
+ MAX_CHILD_SIZE (tb->R[0]) - B_N_CHILD (tb->FR[0], get_right_neighbor_position (tb, 0))->dc_size) {
+ print_cur_tb ("12222");
+ reiserfs_panic (tb->tb_sb, "PAP-12360: check_after_balance_leaf: shift to right was incorrect");
+ }
+ }
+ if (PATH_H_PBUFFER(tb->tb_path,1) && (B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) !=
+ (MAX_CHILD_SIZE (PATH_H_PBUFFER(tb->tb_path,0)) -
+ B_N_CHILD (PATH_H_PBUFFER(tb->tb_path,1),
+ PATH_H_POSITION (tb->tb_path, 1))->dc_size))) {
+ print_cur_tb ("12223");
+ reiserfs_panic (tb->tb_sb, "PAP-12365: check_after_balance_leaf: S is incorrect");
+ }
+}
+
+
+void check_leaf_level (struct tree_balance * tb)
+{
+ check_leaf (tb->L[0]);
+ check_leaf (tb->R[0]);
+ check_leaf (PATH_PLAST_BUFFER (tb->tb_path));
+}
+
+void check_internal_levels (struct tree_balance * tb)
+{
+ int h;
+
+ /* check all internal nodes */
+ for (h = 1; tb->insert_size[h]; h ++) {
+ check_internal_node (tb->tb_sb, PATH_H_PBUFFER (tb->tb_path, h), "BAD BUFFER ON PATH");
+ if (tb->lnum[h])
+ check_internal_node (tb->tb_sb, tb->L[h], "BAD L");
+ if (tb->rnum[h])
+ check_internal_node (tb->tb_sb, tb->R[h], "BAD R");
+ }
+
+}
+
+#endif
+
+
+
+
+
+
+/* Now we have all of the buffers that must be used in balancing of
+ the tree. We rely on the assumption that schedule() will not occur
+ while do_balance works. ( Only interrupt handlers are acceptable.)
+ We balance the tree according to the analysis made before this,
+ using buffers already obtained. For SMP support it will someday be
+ necessary to add ordered locking of tb. */
+
+/* Some interesting rules of balancing:
+
+ we delete a maximum of two nodes per level per balancing: we never
+ delete R, when we delete two of three nodes L, S, R then we move
+ them into R.
+
+ we only delete L if we are deleting two nodes, if we delete only
+ one node we delete S
+
+ if we shift leaves then we shift as much as we can: this is a
+ deliberate policy of extremism in node packing which results in
+ higher average utilization after repeated random balance operations
+ at the cost of more memory copies and more balancing as a result of
+ small insertions to full nodes.
+
+ if we shift internal nodes we try to evenly balance the node
+ utilization, with consequent less balancing at the cost of lower
+ utilization.
+
+ one could argue that the policy for directories in leaves should be
+ that of internal nodes, but we will wait until another day to
+ evaluate this.... It would be nice to someday measure and prove
+ these assumptions as to what is optimal....
+
+*/
+
+static inline void do_balance_starts (struct tree_balance *tb)
+{
+ /* use print_cur_tb() to see initial state of struct
+ tree_balance */
+
+ /* store_print_tb (tb); */
+
+#ifdef CONFIG_REISERFS_CHECK
+
+ /* do not delete, just comment it out */
+/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
+ "check");*/
+
+ if (check_before_balancing (tb))
+ reiserfs_panic (tb->tb_sb, "PAP-12340: do_balance: locked buffers in TB");
+
+#ifndef __KERNEL__
+ if ( atomic_read(&(PATH_PLAST_BUFFER(tb->tb_path)->b_count)) > 1 || (tb->L[0] && atomic_read(&(tb->L[0]->b_count)) > 1) ||
+ (tb->R[0] && atomic_read(&(tb->R[0]->b_count)) > 1) ) {
+ print_cur_tb ("first three parameters are invalid");
+ reiserfs_panic (tb->tb_sb, "PAP-12345: do_balance: counter too big");
+ }
+#endif /* !__KERNEL__ */
+ cur_tb = tb;
+
+#endif /* CONFIG_REISERFS_CHECK */
+}
+
+
+static inline void do_balance_completed (struct tree_balance * tb)
+{
+
+#ifdef CONFIG_REISERFS_CHECK
+ check_leaf_level (tb);
+ check_internal_levels (tb);
+ cur_tb = NULL;
+#endif
+
+ /* reiserfs_free_block is no longer schedule safe. So, we need to
+ ** put the buffers we want freed on the thrown list during do_balance,
+ ** and then free them now
+ */
+
+ tb->tb_sb->u.reiserfs_sb.s_do_balance ++;
+
+
+ /* release all nodes hold to perform the balancing */
+ unfix_nodes(tb);
+
+ free_thrown(tb) ;
+}
+
+
+
+
+
+void do_balance (struct tree_balance * tb, /* tree_balance structure */
+ struct item_head * ih, /* item header of inserted item */
+ const char * body, /* body of inserted item or bytes to paste */
+ int flag) /* i - insert, d - delete
+ c - cut, p - paste
+
+ Cut means delete part of an item
+ (includes removing an entry from a
+ directory).
+
+ Delete means delete whole item.
+
+ Insert means add a new item into the
+ tree.
+
+ Paste means to append to the end of an
+ existing file or to insert a directory
+ entry. */
+{
+ int child_pos, /* position of a child node in its parent */
+ h; /* level of the tree being processed */
+ struct item_head insert_key[2]; /* in our processing of one level
+ we sometimes determine what
+ must be inserted into the next
+ higher level. This insertion
+ consists of a key or two keys
+ and their corresponding
+ pointers */
+ struct buffer_head *insert_ptr[2]; /* inserted node-ptrs for the next
+ level */
+
+ tb->tb_mode = flag;
+ tb->need_balance_dirty = 0;
+
+ if (FILESYSTEM_CHANGED_TB(tb)) {
+ reiserfs_panic(tb->tb_sb, "clm-6000: do_balance, fs generation has changed\n") ;
+ }
+ /* if we have no real work to do */
+ if ( ! tb->insert_size[0] ) {
+ reiserfs_warning ("PAP-12350: do_balance: insert_size == 0, mode == %c",
+ flag);
+ unfix_nodes(tb);
+ return;
+ }
+
+ atomic_inc (&(fs_generation (tb->tb_sb)));
+ do_balance_starts (tb);
+
+#ifdef REISERFS_FSCK
+ if (flag == M_INTERNAL) {
+ insert_ptr[0] = (struct buffer_head *)body;
+ /* we must prepare insert_key */
+
+ if (PATH_H_B_ITEM_ORDER (tb->tb_path, 0)/*LAST_POSITION (tb->tb_path)*//*item_pos*/ == -1) {
+ /* get delimiting key from buffer in tree */
+ copy_key (&insert_key[0].ih_key, B_N_PKEY (PATH_PLAST_BUFFER (tb->tb_path), 0));
+ /*insert_ptr[0]->b_item_order = 0;*/
+ } else {
+ /* get delimiting key from new buffer */
+ copy_key (&insert_key[0].ih_key, B_N_PKEY((struct buffer_head *)body,0));
+ /*insert_ptr[0]->b_item_order = item_pos;*/
+ }
+
+ /* and insert_ptr instead of balance_leaf */
+ child_pos = PATH_H_B_ITEM_ORDER (tb->tb_path, 0)/*item_pos*/;
+ } else
+#endif
+
+ /* balance leaf returns 0 except if combining L R and S into
+ one node. see balance_internal() for explanation of this
+ line of code.*/
+ child_pos = PATH_H_B_ITEM_ORDER (tb->tb_path, 0) +
+ balance_leaf (tb, ih, body, flag, insert_key, insert_ptr);
+
+#ifdef CONFIG_REISERFS_CHECK
+ check_after_balance_leaf (tb);
+#endif
+
+ /* Balance internal level of the tree. */
+ for ( h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++ )
+ child_pos = balance_internal (tb, h, child_pos, insert_key, insert_ptr);
+
+
+ do_balance_completed (tb);
+
+}
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
new file mode 100644
index 000000000..c7a62c0e7
--- /dev/null
+++ b/fs/reiserfs/file.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+
+#ifdef __KERNEL__
+
+#include <linux/sched.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/smp_lock.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+/*
+** We pack the tails of files on file close, not at the time they are written.
+** This implies an unnecessary copy of the tail and an unnecessary indirect item
+** insertion/balancing, for files that are written in one write.
+** It avoids unnecessary tail packings (balances) for files that are written in
+** multiple writes and are small enough to have tails.
+**
+** file_release is called by the VFS layer when the file is closed. If
+** this is the last open file descriptor, and the file
+** small enough to have a tail, and the tail is currently in an
+** unformatted node, the tail is converted back into a direct item.
+**
+** We use reiserfs_truncate_file to pack the tail, since it already has
+** all the conditions coded.
+*/
+static int reiserfs_file_release (struct inode * inode, struct file * filp)
+{
+
+ struct reiserfs_transaction_handle th ;
+ int windex ;
+
+ if (!S_ISREG (inode->i_mode))
+ BUG ();
+
+ /* fast out for when nothing needs to be done */
+ if ((atomic_read(&inode->i_count) > 1 ||
+ !inode->u.reiserfs_i.i_pack_on_close ||
+ !tail_has_to_be_packed(inode)) &&
+ inode->u.reiserfs_i.i_prealloc_count <= 0) {
+ return 0;
+ }
+
+ lock_kernel() ;
+ down (&inode->i_sem);
+ journal_begin(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3) ;
+
+#ifdef REISERFS_PREALLOCATE
+ reiserfs_discard_prealloc (&th, inode);
+#endif
+ journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3) ;
+
+ if (atomic_read(&inode->i_count) <= 1 &&
+ inode->u.reiserfs_i.i_pack_on_close &&
+ tail_has_to_be_packed (inode)) {
+ /* if regular file is released by last holder and it has been
+ appended (we append by unformatted node only) or its direct
+ item(s) had to be converted, then it may have to be
+ indirect2direct converted */
+ windex = push_journal_writer("file_release") ;
+ reiserfs_truncate_file(inode, 0) ;
+ pop_journal_writer(windex) ;
+ }
+ up (&inode->i_sem);
+ unlock_kernel() ;
+ return 0;
+}
+
+static void reiserfs_vfs_truncate_file(struct inode *inode) {
+ reiserfs_truncate_file(inode, 1) ;
+}
+
+/* Sync a reiserfs file. */
+static int reiserfs_sync_file(
+ struct file * p_s_filp,
+ struct dentry * p_s_dentry,
+ int datasync
+ ) {
+ struct inode * p_s_inode = p_s_dentry->d_inode;
+ struct reiserfs_transaction_handle th ;
+ int n_err = 0;
+ int windex ;
+ int jbegin_count = 1 ;
+
+ lock_kernel() ;
+
+ if (!S_ISREG(p_s_inode->i_mode))
+ BUG ();
+
+ n_err = fsync_inode_buffers(p_s_inode) ;
+ /* commit the current transaction to flush any metadata
+ ** changes. sys_fsync takes care of flushing the dirty pages for us
+ */
+ journal_begin(&th, p_s_inode->i_sb, jbegin_count) ;
+ windex = push_journal_writer("sync_file") ;
+ reiserfs_update_sd(&th, p_s_inode);
+ pop_journal_writer(windex) ;
+ journal_end_sync(&th, p_s_inode->i_sb,jbegin_count) ;
+ unlock_kernel() ;
+ return ( n_err < 0 ) ? -EIO : 0;
+}
+
+
+struct file_operations reiserfs_file_operations = {
+ read: generic_file_read,
+ write: generic_file_write,
+ ioctl: reiserfs_ioctl,
+ mmap: generic_file_mmap,
+ release: reiserfs_file_release,
+ fsync: reiserfs_sync_file,
+};
+
+
+struct inode_operations reiserfs_file_inode_operations = {
+ truncate: reiserfs_vfs_truncate_file,
+};
+
+
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
new file mode 100644
index 000000000..daa30b588
--- /dev/null
+++ b/fs/reiserfs/fix_node.c
@@ -0,0 +1,2908 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+/**
+ ** old_item_num
+ ** old_entry_num
+ ** set_entry_sizes
+ ** create_virtual_node
+ ** check_left
+ ** check_right
+ ** directory_part_size
+ ** get_num_ver
+ ** set_parameters
+ ** is_leaf_removable
+ ** are_leaves_removable
+ ** get_empty_nodes
+ ** get_lfree
+ ** get_rfree
+ ** is_left_neighbor_in_cache
+ ** decrement_key
+ ** get_far_parent
+ ** get_parents
+ ** can_node_be_removed
+ ** ip_check_balance
+ ** dc_check_balance_internal
+ ** dc_check_balance_leaf
+ ** dc_check_balance
+ ** check_balance
+ ** get_direct_parent
+ ** get_neighbors
+ ** fix_nodes
+ **
+ **
+ **/
+
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/locks.h>
+#include <linux/reiserfs_fs.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+
+/* To make any changes in the tree we find a node, that contains item
+ to be changed/deleted or position in the node we insert a new item
+ to. We call this node S. To do balancing we need to decide what we
+ will shift to left/right neighbor, or to a new node, where new item
+ will be etc. To make this analysis simpler we build virtual
+ node. Virtual node is an array of items, that will replace items of
+ node S. (For instance if we are going to delete an item, virtual
+ node does not contain it). Virtual node keeps information about
+ item sizes and types, mergeability of first and last items, sizes
+ of all entries in directory item. We use this array of items when
+ calculating what we can shift to neighbors and how many nodes we
+ have to have if we do not any shiftings, if we shift to left/right
+ neighbor or to both. */
+
+
+/* taking item number in virtual node, returns number of item, that it has in source buffer */
+static inline int old_item_num (int new_num, int affected_item_num, int mode)
+{
+ if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num)
+ return new_num;
+
+ if (mode == M_INSERT) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (new_num == 0)
+ reiserfs_panic (0,"vs-8005: old_item_num: for INSERT mode and item number of inserted item");
+#endif
+
+ return new_num - 1;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (mode != M_DELETE)
+ reiserfs_panic (0, "vs-8010: old_item_num: mode must be M_DELETE (mode = \'%c\'", mode);
+#endif
+
+ /* delete mode */
+ return new_num + 1;
+}
+
+static void create_virtual_node (struct tree_balance * tb, int h)
+{
+ struct item_head * ih;
+ struct virtual_node * vn = tb->tb_vn;
+ int new_num;
+ struct buffer_head * Sh; /* this comes from tb->S[h] */
+
+ Sh = PATH_H_PBUFFER (tb->tb_path, h);
+
+ /* size of changed node */
+ vn->vn_size = MAX_CHILD_SIZE (Sh) - B_FREE_SPACE (Sh) + tb->insert_size[h];
+
+ /* for internal nodes array if virtual items is not created */
+ if (h) {
+ vn->vn_nr_item = (vn->vn_size - DC_SIZE) / (DC_SIZE + KEY_SIZE);
+ return;
+ }
+
+ /* number of items in virtual node */
+ vn->vn_nr_item = B_NR_ITEMS (Sh) + ((vn->vn_mode == M_INSERT)? 1 : 0) - ((vn->vn_mode == M_DELETE)? 1 : 0);
+
+ /* first virtual item */
+ vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1);
+ memset (vn->vn_vi, 0, vn->vn_nr_item * sizeof (struct virtual_item));
+ vn->vn_free_ptr += vn->vn_nr_item * sizeof (struct virtual_item);
+
+
+ /* first item in the node */
+ ih = B_N_PITEM_HEAD (Sh, 0);
+
+ /* define the mergeability for 0-th item (if it is not being deleted) */
+#ifdef REISERFS_FSCK
+ if (is_left_mergeable (tb->tb_sb, tb->tb_path) == 1 && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num))
+#else
+ if (op_is_left_mergeable (&(ih->ih_key), Sh->b_size) && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num))
+#endif
+ vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE;
+
+ /* go through all items those remain in the virtual node (except for the new (inserted) one) */
+ for (new_num = 0; new_num < vn->vn_nr_item; new_num ++) {
+ int j;
+ struct virtual_item * vi = vn->vn_vi + new_num;
+ int is_affected = ((new_num != vn->vn_affected_item_num) ? 0 : 1);
+
+
+ if (is_affected && vn->vn_mode == M_INSERT)
+ continue;
+
+ /* get item number in source node */
+ j = old_item_num (new_num, vn->vn_affected_item_num, vn->vn_mode);
+
+ vi->vi_item_len += ih[j].ih_item_len + IH_SIZE;
+ vi->vi_ih = ih + j;
+ vi->vi_item = B_I_PITEM (Sh, ih + j);
+ vi->vi_uarea = vn->vn_free_ptr;
+
+ // FIXME: there is no check, that item operation did not
+ // consume too much memory
+ vn->vn_free_ptr += op_create_vi (vn, vi, is_affected, tb->insert_size [0]);
+ if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
+ reiserfs_panic (tb->tb_sb, "vs-8030: create_virtual_node: "
+ "virtual node space consumed");
+
+ if (!is_affected)
+ /* this is not being changed */
+ continue;
+
+ if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) {
+ vn->vn_vi[new_num].vi_item_len += tb->insert_size[0];
+ vi->vi_new_data = vn->vn_data; // pointer to data which is going to be pasted
+ }
+ }
+
+
+ /* virtual inserted item is not defined yet */
+ if (vn->vn_mode == M_INSERT) {
+ struct virtual_item * vi = vn->vn_vi + vn->vn_affected_item_num;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (vn->vn_ins_ih == 0)
+ reiserfs_panic (0, "vs-8040: create_virtual_node: item header of inserted item is not specified");
+#endif
+
+ vi->vi_item_len = tb->insert_size[0];
+ vi->vi_ih = vn->vn_ins_ih;
+ vi->vi_item = vn->vn_data;
+ vi->vi_uarea = vn->vn_free_ptr;
+
+ op_create_vi (vn, vi, 0/*not pasted or cut*/, tb->insert_size [0]);
+#if 0
+ switch (type/*le_key_k_type (ih_version (vn->vn_ins_ih), &(vn->vn_ins_ih->ih_key))*/) {
+ case TYPE_STAT_DATA:
+ vn->vn_vi[vn->vn_affected_item_num].vi_type |= VI_TYPE_STAT_DATA;
+ break;
+ case TYPE_DIRECT:
+ vn->vn_vi[vn->vn_affected_item_num].vi_type |= VI_TYPE_DIRECT;
+ break;
+ case TYPE_INDIRECT:
+ vn->vn_vi[vn->vn_affected_item_num].vi_type |= VI_TYPE_INDIRECT;
+ break;
+ default:
+ /* inseted item is directory (it must be item with "." and "..") */
+ vn->vn_vi[vn->vn_affected_item_num].vi_type |=
+ (VI_TYPE_DIRECTORY | VI_TYPE_FIRST_DIRECTORY_ITEM | VI_TYPE_INSERTED_DIRECTORY_ITEM);
+
+ /* this directory item can not be split, so do not set sizes of entries */
+ break;
+ }
+#endif
+ }
+
+ /* set right merge flag we take right delimiting key and check whether it is a mergeable item */
+ if (tb->CFR[0]) {
+ struct key * key;
+
+ key = B_N_PDELIM_KEY (tb->CFR[0], tb->rkey[0]);
+#ifdef REISERFS_FSCK
+ if (is_right_mergeable (tb->tb_sb, tb->tb_path) == 1 && (vn->vn_mode != M_DELETE ||
+ vn->vn_affected_item_num != B_NR_ITEMS (Sh) - 1))
+#else
+ if (op_is_left_mergeable (key, Sh->b_size) && (vn->vn_mode != M_DELETE ||
+ vn->vn_affected_item_num != B_NR_ITEMS (Sh) - 1))
+#endif
+ vn->vn_vi[vn->vn_nr_item-1].vi_type |= VI_TYPE_RIGHT_MERGEABLE;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (op_is_left_mergeable (key, Sh->b_size) &&
+ !(vn->vn_mode != M_DELETE || vn->vn_affected_item_num != B_NR_ITEMS (Sh) - 1) ) {
+ /* we delete last item and it could be merged with right neighbor's first item */
+ if (!(B_NR_ITEMS (Sh) == 1 && is_direntry_le_ih (B_N_PITEM_HEAD (Sh, 0)) &&
+ I_ENTRY_COUNT (B_N_PITEM_HEAD (Sh, 0)) == 1)) {
+ /* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */
+ print_block (Sh, 0, -1, -1);
+ reiserfs_panic (tb->tb_sb, "vs-8045: create_virtual_node: rdkey %k, affected item==%d (mode==%c) Must be %c",
+ key, vn->vn_affected_item_num, vn->vn_mode, M_DELETE);
+ } else
+ /* we can delete directory item, that has only one directory entry in it */
+ ;
+ }
+#endif
+
+ }
+}
+
+
+/* using virtual node check, how many items can be shifted to left
+ neighbor */
+static void check_left (struct tree_balance * tb, int h, int cur_free)
+{
+ int i;
+ struct virtual_node * vn = tb->tb_vn;
+ struct virtual_item * vi;
+ int d_size, ih_size;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (cur_free < 0)
+ reiserfs_panic (0, "vs-8050: check_left: cur_free (%d) < 0", cur_free);
+#endif
+
+ /* internal level */
+ if (h > 0) {
+ tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
+ return;
+ }
+
+ /* leaf level */
+
+ if (!cur_free || !vn->vn_nr_item) {
+ /* no free space or nothing to move */
+ tb->lnum[h] = 0;
+ tb->lbytes = -1;
+ return;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!PATH_H_PPARENT (tb->tb_path, 0))
+ reiserfs_panic (0, "vs-8055: check_left: parent does not exist or invalid");
+#endif
+
+ vi = vn->vn_vi;
+ if ((unsigned int)cur_free >= (vn->vn_size - ((vi->vi_type & VI_TYPE_LEFT_MERGEABLE) ? IH_SIZE : 0))) {
+ /* all contents of S[0] fits into L[0] */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE)
+ reiserfs_panic (0, "vs-8055: check_left: invalid mode or balance condition failed");
+#endif
+
+ tb->lnum[0] = vn->vn_nr_item;
+ tb->lbytes = -1;
+ return;
+ }
+
+
+ d_size = 0, ih_size = IH_SIZE;
+
+ /* first item may be merge with last item in left neighbor */
+ if (vi->vi_type & VI_TYPE_LEFT_MERGEABLE)
+ d_size = -((int)IH_SIZE), ih_size = 0;
+
+ tb->lnum[0] = 0;
+ for (i = 0; i < vn->vn_nr_item; i ++, ih_size = IH_SIZE, d_size = 0, vi ++) {
+ d_size += vi->vi_item_len;
+ if (cur_free >= d_size) {
+ /* the item can be shifted entirely */
+ cur_free -= d_size;
+ tb->lnum[0] ++;
+ continue;
+ }
+
+ /* the item cannot be shifted entirely, try to split it */
+ /* check whether L[0] can hold ih and at least one byte of the item body */
+ if (cur_free <= ih_size) {
+ /* cannot shift even a part of the current item */
+ tb->lbytes = -1;
+ return;
+ }
+ cur_free -= ih_size;
+
+ tb->lbytes = op_check_left (vi, cur_free, 0, 0);
+ if (tb->lbytes != -1)
+ /* count partially shifted item */
+ tb->lnum[0] ++;
+
+ break;
+ }
+
+ return;
+}
+
+
+/* using virtual node check, how many items can be shifted to right
+ neighbor */
+static void check_right (struct tree_balance * tb, int h, int cur_free)
+{
+ int i;
+ struct virtual_node * vn = tb->tb_vn;
+ struct virtual_item * vi;
+ int d_size, ih_size;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (cur_free < 0)
+ reiserfs_panic (tb->tb_sb, "vs-8070: check_right: cur_free < 0");
+#endif
+
+ /* internal level */
+ if (h > 0) {
+ tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
+ return;
+ }
+
+ /* leaf level */
+
+ if (!cur_free || !vn->vn_nr_item) {
+ /* no free space */
+ tb->rnum[h] = 0;
+ tb->rbytes = -1;
+ return;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!PATH_H_PPARENT (tb->tb_path, 0))
+ reiserfs_panic (tb->tb_sb, "vs-8075: check_right: parent does not exist or invalid");
+#endif
+
+ vi = vn->vn_vi + vn->vn_nr_item - 1;
+ if ((unsigned int)cur_free >= (vn->vn_size - ((vi->vi_type & VI_TYPE_RIGHT_MERGEABLE) ? IH_SIZE : 0))) {
+ /* all contents of S[0] fits into R[0] */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE)
+ reiserfs_panic (tb->tb_sb, "vs-8080: check_right: invalid mode or balance condition failed");
+#endif
+
+ tb->rnum[h] = vn->vn_nr_item;
+ tb->rbytes = -1;
+ return;
+ }
+
+ d_size = 0, ih_size = IH_SIZE;
+
+ /* last item may be merge with first item in right neighbor */
+ if (vi->vi_type & VI_TYPE_RIGHT_MERGEABLE)
+ d_size = -(int)IH_SIZE, ih_size = 0;
+
+ tb->rnum[0] = 0;
+ for (i = vn->vn_nr_item - 1; i >= 0; i --, d_size = 0, ih_size = IH_SIZE, vi --) {
+ d_size += vi->vi_item_len;
+ if (cur_free >= d_size) {
+ /* the item can be shifted entirely */
+ cur_free -= d_size;
+ tb->rnum[0] ++;
+ continue;
+ }
+
+ /* check whether R[0] can hold ih and at least one byte of the item body */
+ if ( cur_free <= ih_size ) { /* cannot shift even a part of the current item */
+ tb->rbytes = -1;
+ return;
+ }
+
+ /* R[0] can hold the header of the item and at least one byte of its body */
+ cur_free -= ih_size; /* cur_free is still > 0 */
+
+ tb->rbytes = op_check_right (vi, cur_free);
+ if (tb->rbytes != -1)
+ /* count partially shifted item */
+ tb->rnum[0] ++;
+
+ break;
+ }
+
+ return;
+}
+
+
+/*
+ * from - number of items, which are shifted to left neighbor entirely
+ * to - number of item, which are shifted to right neighbor entirely
+ * from_bytes - number of bytes of boundary item (or directory entries) which are shifted to left neighbor
+ * to_bytes - number of bytes of boundary item (or directory entries) which are shifted to right neighbor */
+static int get_num_ver (int mode, struct tree_balance * tb, int h,
+ int from, int from_bytes,
+ int to, int to_bytes,
+ short * snum012, int flow
+ )
+{
+ int i;
+ int cur_free;
+ // int bytes;
+ int units;
+ struct virtual_node * vn = tb->tb_vn;
+ // struct virtual_item * vi;
+
+ int total_node_size, max_node_size, current_item_size;
+ int needed_nodes;
+ int start_item, /* position of item we start filling node from */
+ end_item, /* position of item we finish filling node by */
+ start_bytes,/* number of first bytes (entries for directory) of start_item-th item
+ we do not include into node that is being filled */
+ end_bytes; /* number of last bytes (entries for directory) of end_item-th item
+ we do node include into node that is being filled */
+ int split_item_positions[2]; /* these are positions in virtual item of
+ items, that are split between S[0] and
+ S1new and S1new and S2new */
+
+ split_item_positions[0] = -1;
+ split_item_positions[1] = -1;
+
+#ifdef CONFIG_REISERFS_CHECK
+ /* We only create additional nodes if we are in insert or paste mode
+ or we are in replace mode at the internal level. If h is 0 and
+ the mode is M_REPLACE then in fix_nodes we change the mode to
+ paste or insert before we get here in the code. */
+ if ( tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE))
+ reiserfs_panic (0, "vs-8100: get_num_ver: insert_size < 0 in overflow");
+#endif
+
+ max_node_size = MAX_CHILD_SIZE (PATH_H_PBUFFER (tb->tb_path, h));
+
+ /* snum012 [0-2] - number of items, that lay
+ to S[0], first new node and second new node */
+ snum012[3] = -1; /* s1bytes */
+ snum012[4] = -1; /* s2bytes */
+
+ /* internal level */
+ if (h > 0) {
+ i = ((to - from) * (KEY_SIZE + DC_SIZE) + DC_SIZE);
+ if (i == max_node_size)
+ return 1;
+ return (i / max_node_size + 1);
+ }
+
+ /* leaf level */
+ needed_nodes = 1;
+ total_node_size = 0;
+ cur_free = max_node_size;
+
+ // start from 'from'-th item
+ start_item = from;
+ // skip its first 'start_bytes' units
+ start_bytes = ((from_bytes != -1) ? from_bytes : 0);
+
+ // last included item is the 'end_item'-th one
+ end_item = vn->vn_nr_item - to - 1;
+ // do not count last 'end_bytes' units of 'end_item'-th item
+ end_bytes = (to_bytes != -1) ? to_bytes : 0;
+
+ /* go through all item begining from the start_item-th item and ending by
+ the end_item-th item. Do not count first 'start_bytes' units of
+ 'start_item'-th item and last 'end_bytes' of 'end_item'-th item */
+
+ for (i = start_item; i <= end_item; i ++) {
+ struct virtual_item * vi = vn->vn_vi + i;
+ int skip_from_end = ((i == end_item) ? end_bytes : 0);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (needed_nodes > 3) {
+ reiserfs_panic (tb->tb_sb, "vs-8105: get_num_ver: too many nodes are needed");
+ }
+#endif
+
+ /* get size of current item */
+ current_item_size = vi->vi_item_len;
+
+ /* do not take in calculation head part (from_bytes) of from-th item */
+ current_item_size -= op_part_size (vi, 0/*from start*/, start_bytes);
+
+ /* do not take in calculation tail part of last item */
+ current_item_size -= op_part_size (vi, 1/*from end*/, skip_from_end);
+
+ /* if item fits into current node entierly */
+ if (total_node_size + current_item_size <= max_node_size) {
+ snum012[needed_nodes - 1] ++;
+ total_node_size += current_item_size;
+ start_bytes = 0;
+ continue;
+ }
+
+ if (current_item_size > max_node_size) {
+ /* virtual item length is longer, than max size of item in
+ a node. It is impossible for direct item */
+#ifdef CONFIG_REISERFS_CHECK
+ if (is_direct_le_ih (vi->vi_ih))
+ reiserfs_panic (tb->tb_sb, "vs-8110: get_num_ver: "
+ "direct item length is %d. It can not be longer than %d",
+ current_item_size, max_node_size);
+#endif
+ /* we will try to split it */
+ flow = 1;
+ }
+
+ if (!flow) {
+ /* as we do not split items, take new node and continue */
+ needed_nodes ++; i --; total_node_size = 0;
+ continue;
+ }
+
+ // calculate number of item units which fit into node being
+ // filled
+ {
+ int free_space;
+
+ free_space = max_node_size - total_node_size - IH_SIZE;
+ units = op_check_left (vi, free_space, start_bytes, skip_from_end);
+ if (units == -1) {
+ /* nothing fits into current node, take new node and continue */
+ needed_nodes ++, i--, total_node_size = 0;
+ continue;
+ }
+ }
+
+ /* something fits into the current node */
+ //if (snum012[3] != -1 || needed_nodes != 1)
+ // reiserfs_panic (tb->tb_sb, "vs-8115: get_num_ver: too many nodes required");
+ //snum012[needed_nodes - 1 + 3] = op_unit_num (vi) - start_bytes - units;
+ start_bytes += units;
+ snum012[needed_nodes - 1 + 3] = units;
+
+ if (needed_nodes > 2)
+ reiserfs_warning ("vs-8111: get_num_ver: split_item_position is out of boundary\n");
+ snum012[needed_nodes - 1] ++;
+ split_item_positions[needed_nodes - 1] = i;
+ needed_nodes ++;
+ /* continue from the same item with start_bytes != -1 */
+ start_item = i;
+ i --;
+ total_node_size = 0;
+ }
+
+ // sum012[4] (if it is not -1) contains number of units of which
+ // are to be in S1new, snum012[3] - to be in S0. They are supposed
+ // to be S1bytes and S2bytes correspondingly, so recalculate
+ if (snum012[4] > 0) {
+ int split_item_num;
+ int bytes_to_r, bytes_to_l;
+ int bytes_to_S1new;
+
+ split_item_num = split_item_positions[1];
+ bytes_to_l = ((from == split_item_num && from_bytes != -1) ? from_bytes : 0);
+ bytes_to_r = ((end_item == split_item_num && end_bytes != -1) ? end_bytes : 0);
+ bytes_to_S1new = ((split_item_positions[0] == split_item_positions[1]) ? snum012[3] : 0);
+
+ // s2bytes
+ snum012[4] = op_unit_num (&vn->vn_vi[split_item_num]) - snum012[4] - bytes_to_r - bytes_to_l - bytes_to_S1new;
+
+ if (vn->vn_vi[split_item_num].vi_index != TYPE_DIRENTRY)
+ reiserfs_warning ("vs-8115: get_num_ver: not directory item\n");
+ }
+
+ /* now we know S2bytes, calculate S1bytes */
+ if (snum012[3] > 0) {
+ int split_item_num;
+ int bytes_to_r, bytes_to_l;
+ int bytes_to_S2new;
+
+ split_item_num = split_item_positions[0];
+ bytes_to_l = ((from == split_item_num && from_bytes != -1) ? from_bytes : 0);
+ bytes_to_r = ((end_item == split_item_num && end_bytes != -1) ? end_bytes : 0);
+ bytes_to_S2new = ((split_item_positions[0] == split_item_positions[1] && snum012[4] != -1) ? snum012[4] : 0);
+
+ // s1bytes
+ snum012[3] = op_unit_num (&vn->vn_vi[split_item_num]) - snum012[3] - bytes_to_r - bytes_to_l - bytes_to_S2new;
+ }
+
+ return needed_nodes;
+}
+
+
+#ifdef CONFIG_REISERFS_CHECK
+extern struct tree_balance * cur_tb;
+#endif
+
+
+/* Set parameters for balancing.
+ * Performs write of results of analysis of balancing into structure tb,
+ * where it will later be used by the functions that actually do the balancing.
+ * Parameters:
+ * tb tree_balance structure;
+ * h current level of the node;
+ * lnum number of items from S[h] that must be shifted to L[h];
+ * rnum number of items from S[h] that must be shifted to R[h];
+ * blk_num number of blocks that S[h] will be splitted into;
+ * s012 number of items that fall into splitted nodes.
+ * lbytes number of bytes which flow to the left neighbor from the item that is not
+ * not shifted entirely
+ * rbytes number of bytes which flow to the right neighbor from the item that is not
+ * not shifted entirely
+ * s1bytes number of bytes which flow to the first new node when S[0] splits (this number is contained in s012 array)
+ */
+
+static void set_parameters (struct tree_balance * tb, int h, int lnum,
+ int rnum, int blk_num, short * s012, int lb, int rb)
+{
+
+ tb->lnum[h] = lnum;
+ tb->rnum[h] = rnum;
+ tb->blknum[h] = blk_num;
+
+ if (h == 0)
+ { /* only for leaf level */
+ if (s012 != NULL)
+ {
+ tb->s0num = * s012 ++,
+ tb->s1num = * s012 ++,
+ tb->s2num = * s012 ++;
+ tb->s1bytes = * s012 ++;
+ tb->s2bytes = * s012;
+ }
+ tb->lbytes = lb;
+ tb->rbytes = rb;
+ }
+}
+
+
+
+/* check, does node disappear if we shift tb->lnum[0] items to left
+ neighbor and tb->rnum[0] to the right one. */
+static int is_leaf_removable (struct tree_balance * tb)
+{
+ struct virtual_node * vn = tb->tb_vn;
+ int to_left, to_right;
+ int size;
+ int remain_items;
+
+ /* number of items, that will be shifted to left (right) neighbor
+ entirely */
+ to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0);
+ to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0);
+ remain_items = vn->vn_nr_item;
+
+ /* how many items remain in S[0] after shiftings to neighbors */
+ remain_items -= (to_left + to_right);
+
+ if (remain_items < 1) {
+ /* all content of node can be shifted to neighbors */
+ set_parameters (tb, 0, to_left, vn->vn_nr_item - to_left, 0, NULL, -1, -1);
+ return 1;
+ }
+
+ if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1)
+ /* S[0] is not removable */
+ return 0;
+
+ /* check, whether we can divide 1 remaining item between neighbors */
+
+ /* get size of remaining item (in item units) */
+ size = op_unit_num (&(vn->vn_vi[to_left]));
+
+ if (tb->lbytes + tb->rbytes >= size) {
+ set_parameters (tb, 0, to_left + 1, to_right + 1, 0, NULL, tb->lbytes, -1);
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/* check whether L, S, R can be joined in one node */
+static int are_leaves_removable (struct tree_balance * tb, int lfree, int rfree)
+{
+ struct virtual_node * vn = tb->tb_vn;
+ int ih_size;
+ struct buffer_head *S0;
+
+ S0 = PATH_H_PBUFFER (tb->tb_path, 0);
+
+ ih_size = 0;
+ if (vn->vn_nr_item) {
+ if (vn->vn_vi[0].vi_type & VI_TYPE_LEFT_MERGEABLE)
+ ih_size += IH_SIZE;
+
+ if (vn->vn_vi[vn->vn_nr_item-1].vi_type & VI_TYPE_RIGHT_MERGEABLE)
+ ih_size += IH_SIZE;
+ } else {
+ /* there was only one item and it will be deleted */
+ struct item_head * ih;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (B_NR_ITEMS (S0) != 1)
+ reiserfs_panic (0, "vs-8125: are_leaves_removable: item number must be 1: it is %d", B_NR_ITEMS(S0));
+#endif
+
+ ih = B_N_PITEM_HEAD (S0, 0);
+ if (tb->CFR[0] && !comp_short_le_keys (&(ih->ih_key), B_N_PDELIM_KEY (tb->CFR[0], tb->rkey[0])))
+ if (is_direntry_le_ih (ih)) {
+#ifndef REISERFS_FSCK
+
+ /* Directory must be in correct state here: that is
+ somewhere at the left side should exist first directory
+ item. But the item being deleted can not be that first
+ one because its right neighbor is item of the same
+ directory. (But first item always gets deleted in last
+ turn). So, neighbors of deleted item can be merged, so
+ we can save ih_size */
+ ih_size = IH_SIZE;
+
+#ifdef CONFIG_REISERFS_CHECK
+ /* we might check that left neighbor exists and is of the
+ same directory */
+ if (le_key_k_offset (ih_version (ih), &(ih->ih_key)) == DOT_OFFSET)
+ reiserfs_panic (tb->tb_sb, "vs-8130: are_leaves_removable: "
+ "first directory item can not be removed until directory is not empty");
+#endif
+
+
+#else /* REISERFS_FSCK */
+
+ /* we can delete any directory item in fsck (if it is unreachable) */
+ if (ih->ih_key.k_offset != DOT_OFFSET) {
+ /* must get left neighbor here to make sure, that left
+ neighbor is of the same directory */
+ struct buffer_head * left;
+
+ left = get_left_neighbor (tb->tb_sb, tb->tb_path);
+ if (left) {
+ struct item_head * last;
+
+ if (B_NR_ITEMS (left) == 0)
+ reiserfs_panic (tb->tb_sb, "vs-8135: are_leaves_removable: "
+ "empty node in the tree");
+ last = B_N_PITEM_HEAD (left, B_NR_ITEMS (left) - 1);
+ if (!comp_short_keys (&last->ih_key, &ih->ih_key))
+ ih_size = IH_SIZE;
+ brelse (left);
+ }
+ }
+#endif
+ }
+
+ }
+
+ if (MAX_CHILD_SIZE (S0) + vn->vn_size <= rfree + lfree + ih_size) {
+ set_parameters (tb, 0, -1, -1, -1, NULL, -1, -1);
+ return 1;
+ }
+ return 0;
+
+}
+
+
+
+/* when we do not split item, lnum and rnum are numbers of entire items */
+#define SET_PAR_SHIFT_LEFT \
+if (h)\
+{\
+ int to_l;\
+ \
+ to_l = (MAX_NR_KEY(Sh)+1 - lpar + vn->vn_nr_item + 1) / 2 -\
+ (MAX_NR_KEY(Sh) + 1 - lpar);\
+ \
+ set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\
+}\
+else \
+{\
+ if (lset==LEFT_SHIFT_FLOW)\
+ set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\
+ tb->lbytes, -1);\
+ else\
+ set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\
+ -1, -1);\
+}
+
+
+#define SET_PAR_SHIFT_RIGHT \
+if (h)\
+{\
+ int to_r;\
+ \
+ to_r = (MAX_NR_KEY(Sh)+1 - rpar + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - rpar);\
+ \
+ set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\
+}\
+else \
+{\
+ if (rset==RIGHT_SHIFT_FLOW)\
+ set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\
+ -1, tb->rbytes);\
+ else\
+ set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\
+ -1, -1);\
+}
+
+
+void free_buffers_in_tb (
+ struct tree_balance * p_s_tb
+ ) {
+ int n_counter;
+
+ decrement_counters_in_path(p_s_tb->tb_path);
+
+ for ( n_counter = 0; n_counter < MAX_HEIGHT; n_counter++ ) {
+ decrement_bcount(p_s_tb->L[n_counter]);
+ p_s_tb->L[n_counter] = NULL;
+ decrement_bcount(p_s_tb->R[n_counter]);
+ p_s_tb->R[n_counter] = NULL;
+ decrement_bcount(p_s_tb->FL[n_counter]);
+ p_s_tb->FL[n_counter] = NULL;
+ decrement_bcount(p_s_tb->FR[n_counter]);
+ p_s_tb->FR[n_counter] = NULL;
+ decrement_bcount(p_s_tb->CFL[n_counter]);
+ p_s_tb->CFL[n_counter] = NULL;
+ decrement_bcount(p_s_tb->CFR[n_counter]);
+ p_s_tb->CFR[n_counter] = NULL;
+ }
+}
+
+
+/* Get new buffers for storing new nodes that are created while balancing.
+ * Returns: SCHEDULE_OCCURED - schedule occured while the function worked;
+ * CARRY_ON - schedule didn't occur while the function worked;
+ * NO_DISK_SPACE - no disk space.
+ */
+/* The function is NOT SCHEDULE-SAFE! */
+static int get_empty_nodes(
+ struct tree_balance * p_s_tb,
+ int n_h
+ ) {
+ struct buffer_head * p_s_new_bh,
+ * p_s_Sh = PATH_H_PBUFFER (p_s_tb->tb_path, n_h);
+ unsigned long * p_n_blocknr,
+ a_n_blocknrs[MAX_AMOUNT_NEEDED] = {0, };
+ int n_counter,
+ n_number_of_freeblk,
+ n_amount_needed,/* number of needed empty blocks */
+ n_retval = CARRY_ON;
+ struct super_block * p_s_sb = p_s_tb->tb_sb;
+
+
+#ifdef REISERFS_FSCK
+ if (n_h == 0 && p_s_tb->insert_size[n_h] == 0x7fff)
+ return CARRY_ON;
+#endif
+
+ /* number_of_freeblk is the number of empty blocks which have been
+ acquired for use by the balancing algorithm minus the number of
+ empty blocks used in the previous levels of the analysis,
+ number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs
+ after empty blocks are acquired, and the balancing analysis is
+ then restarted, amount_needed is the number needed by this level
+ (n_h) of the balancing analysis.
+
+ Note that for systems with many processes writing, it would be
+ more layout optimal to calculate the total number needed by all
+ levels and then to run reiserfs_new_blocks to get all of them at once. */
+
+ /* Initiate number_of_freeblk to the amount acquired prior to the restart of
+ the analysis or 0 if not restarted, then subtract the amount needed
+ by all of the levels of the tree below n_h. */
+ /* blknum includes S[n_h], so we subtract 1 in this calculation */
+ for ( n_counter = 0, n_number_of_freeblk = p_s_tb->cur_blknum; n_counter < n_h; n_counter++ )
+ n_number_of_freeblk -= ( p_s_tb->blknum[n_counter] ) ? (p_s_tb->blknum[n_counter] - 1) : 0;
+
+ /* Allocate missing empty blocks. */
+ /* if p_s_Sh == 0 then we are getting a new root */
+ n_amount_needed = ( p_s_Sh ) ? (p_s_tb->blknum[n_h] - 1) : 1;
+ /* Amount_needed = the amount that we need more than the amount that we have. */
+ if ( n_amount_needed > n_number_of_freeblk )
+ n_amount_needed -= n_number_of_freeblk;
+ else /* If we have enough already then there is nothing to do. */
+ return CARRY_ON;
+
+ if ( reiserfs_new_blocknrs (p_s_tb->transaction_handle, a_n_blocknrs,
+ PATH_PLAST_BUFFER(p_s_tb->tb_path)->b_blocknr, n_amount_needed) == NO_DISK_SPACE )
+ return NO_DISK_SPACE;
+
+ /* for each blocknumber we just got, get a buffer and stick it on FEB */
+ for ( p_n_blocknr = a_n_blocknrs, n_counter = 0; n_counter < n_amount_needed;
+ p_n_blocknr++, n_counter++ ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! *p_n_blocknr )
+ reiserfs_panic(p_s_sb, "PAP-8135: get_empty_nodes: reiserfs_new_blocknrs failed when got new blocks");
+#endif
+
+ p_s_new_bh = reiserfs_getblk(p_s_sb->s_dev, *p_n_blocknr, p_s_sb->s_blocksize);
+ if (atomic_read (&(p_s_new_bh->b_count)) > 1) {
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+/*
+ reiserfs_warning ("waiting for buffer %b, iput inode pid = %d, this pid %d, mode %c, %h\n",
+ p_s_new_bh, put_inode_pid, current->pid, p_s_tb->tb_vn->vn_mode, p_s_tb->tb_vn->vn_ins_ih);
+ print_tb (0, 0, 0, p_s_tb, "tb");
+*/
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+ if (atomic_read(&(p_s_new_bh->b_count)) > 2 ||
+ !(buffer_journaled(p_s_new_bh) || buffer_journal_dirty(p_s_new_bh))) {
+ n_retval = REPEAT_SEARCH ;
+ free_buffers_in_tb (p_s_tb);
+ wait_buffer_until_released (p_s_new_bh);
+ }
+ }
+#ifdef CONFIG_REISERFS_CHECK
+ if (atomic_read (&(p_s_new_bh->b_count)) != 1 || buffer_dirty (p_s_new_bh)) {
+ if (atomic_read(&(p_s_new_bh->b_count)) > 2 ||
+ !(buffer_journaled(p_s_new_bh) || buffer_journal_dirty(p_s_new_bh))) {
+ reiserfs_panic(p_s_sb,"PAP-8140: get_empty_nodes: not free or dirty buffer %b for the new block",
+ p_s_new_bh);
+ }
+ }
+#endif
+
+ /* Put empty buffers into the array. */
+ if (p_s_tb->FEB[p_s_tb->cur_blknum])
+ BUG();
+
+ p_s_tb->FEB[p_s_tb->cur_blknum++] = p_s_new_bh;
+ }
+
+ if ( n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB (p_s_tb) )
+ n_retval = REPEAT_SEARCH ;
+
+ return n_retval;
+}
+
+
+/* Get free space of the left neighbor, which is stored in the parent
+ * node of the left neighbor. */
+static int get_lfree (struct tree_balance * tb, int h)
+{
+ struct buffer_head * l, * f;
+ int order;
+
+ if ((f = PATH_H_PPARENT (tb->tb_path, h)) == 0 || (l = tb->FL[h]) == 0)
+ return 0;
+
+ if (f == l)
+ order = PATH_H_B_ITEM_ORDER (tb->tb_path, h) - 1;
+ else {
+ order = B_NR_ITEMS (l);
+ f = l;
+ }
+
+ return (MAX_CHILD_SIZE(f) - le16_to_cpu (B_N_CHILD(f,order)->dc_size));
+}
+
+
+/* Get free space of the right neighbor,
+ * which is stored in the parent node of the right neighbor.
+ */
+static int get_rfree (struct tree_balance * tb, int h)
+{
+ struct buffer_head * r, * f;
+ int order;
+
+ if ((f = PATH_H_PPARENT (tb->tb_path, h)) == 0 || (r = tb->FR[h]) == 0)
+ return 0;
+
+ if (f == r)
+ order = PATH_H_B_ITEM_ORDER (tb->tb_path, h) + 1;
+ else {
+ order = 0;
+ f = r;
+ }
+
+ return (MAX_CHILD_SIZE(f) - B_N_CHILD(f,order)->dc_size);
+
+}
+
+
+/* Check whether left neighbor is in memory. */
+static int is_left_neighbor_in_cache(
+ struct tree_balance * p_s_tb,
+ int n_h
+ ) {
+ struct buffer_head * p_s_father, * left;
+ struct super_block * p_s_sb = p_s_tb->tb_sb;
+ unsigned long n_left_neighbor_blocknr;
+ int n_left_neighbor_position;
+
+ if ( ! p_s_tb->FL[n_h] ) /* Father of the left neighbor does not exist. */
+ return 0;
+
+ /* Calculate father of the node to be balanced. */
+ p_s_father = PATH_H_PBUFFER(p_s_tb->tb_path, n_h + 1);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! p_s_father || ! B_IS_IN_TREE (p_s_father) || ! B_IS_IN_TREE (p_s_tb->FL[n_h]) ||
+ ! buffer_uptodate (p_s_father) || ! buffer_uptodate (p_s_tb->FL[n_h]) ) {
+ reiserfs_panic (p_s_sb, "vs-8165: is_left_neighbor_in_cache: F[h] (%b) or FL[h] (%b) is invalid",
+ p_s_father, p_s_tb->FL[n_h]);
+ }
+#endif
+
+
+ /* Get position of the pointer to the left neighbor into the left father. */
+ n_left_neighbor_position = ( p_s_father == p_s_tb->FL[n_h] ) ?
+ p_s_tb->lkey[n_h] : B_NR_ITEMS (p_s_tb->FL[n_h]);
+ /* Get left neighbor block number. */
+ n_left_neighbor_blocknr = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position);
+ /* Look for the left neighbor in the cache. */
+ if ( (left = get_hash_table(p_s_sb->s_dev, n_left_neighbor_blocknr, p_s_sb->s_blocksize)) ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( buffer_uptodate (left) && ! B_IS_IN_TREE(left) ) {
+ reiserfs_panic(p_s_sb, "vs-8170: is_left_neighbor_in_cache: left neighbor (%b %z) is not in the tree",
+ left, left);
+ }
+#endif
+ atomic_dec (&(left->b_count));
+ return 1;
+ }
+
+ return 0;
+}
+
+
+#define LEFT_PARENTS 'l'
+#define RIGHT_PARENTS 'r'
+
+
+static void decrement_key (struct cpu_key * p_s_key)
+{
+ // call item specific function for this key
+ item_ops[cpu_key_k_type (p_s_key)]->decrement_key (p_s_key);
+
+
+#if 0 /* this works wrong when key is key of second part of tail: it
+ sets key to be of indirect type. It looks like it makes no
+ harm but it is unclear */
+
+ unsigned long * p_n_key_field = (unsigned long *)p_s_key + REISERFS_FULL_KEY_LEN - 1;
+ int n_counter;
+
+ for( n_counter = 0; n_counter < REISERFS_FULL_KEY_LEN; n_counter++, p_n_key_field-- ) {
+ if ( *p_n_key_field ) {
+ (*p_n_key_field)--;
+ break;
+ }
+ }
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_counter == REISERFS_FULL_KEY_LEN )
+ reiserfs_panic(NULL, "PAP-8175: decrement_key: zero key");
+#endif
+
+#endif /*0*/
+
+}
+
+
+
+
+/* Calculate far left/right parent of the left/right neighbor of the current node, that
+ * is calculate the left/right (FL[h]/FR[h]) neighbor of the parent F[h].
+ * Calculate left/right common parent of the current node and L[h]/R[h].
+ * Calculate left/right delimiting key position.
+ * Returns: PATH_INCORRECT - path in the tree is not correct;
+ SCHEDULE_OCCURRED - schedule occured while the function worked;
+ * CARRY_ON - schedule didn't occur while the function worked;
+ */
+static int get_far_parent (struct tree_balance * p_s_tb,
+ int n_h,
+ struct buffer_head ** pp_s_father,
+ struct buffer_head ** pp_s_com_father,
+ char c_lr_par)
+{
+ struct buffer_head * p_s_parent;
+ INITIALIZE_PATH (s_path_to_neighbor_father);
+ struct path * p_s_path = p_s_tb->tb_path;
+ struct cpu_key s_lr_father_key;
+ int n_counter,
+ n_position = MAX_INT,
+ n_first_last_position = 0,
+ n_path_offset = PATH_H_PATH_OFFSET(p_s_path, n_h);
+
+ /* Starting from F[n_h] go upwards in the tree, and look for the common
+ ancestor of F[n_h], and its neighbor l/r, that should be obtained. */
+
+ n_counter = n_path_offset;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_counter < FIRST_PATH_ELEMENT_OFFSET )
+ reiserfs_panic(p_s_tb->tb_sb, "PAP-8180: get_far_parent: invalid path length");
+#endif
+
+
+ for ( ; n_counter > FIRST_PATH_ELEMENT_OFFSET; n_counter-- ) {
+ /* Check whether parent of the current buffer in the path is really parent in the tree. */
+ if ( ! B_IS_IN_TREE(p_s_parent = PATH_OFFSET_PBUFFER(p_s_path, n_counter - 1)) )
+ return REPEAT_SEARCH;
+ /* Check whether position in the parent is correct. */
+ if ( (n_position = PATH_OFFSET_POSITION(p_s_path, n_counter - 1)) > B_NR_ITEMS(p_s_parent) )
+ return REPEAT_SEARCH;
+ /* Check whether parent at the path really points to the child. */
+ if ( B_N_CHILD_NUM(p_s_parent, n_position) !=
+ PATH_OFFSET_PBUFFER(p_s_path, n_counter)->b_blocknr )
+ return REPEAT_SEARCH;
+ /* Return delimiting key if position in the parent is not equal to first/last one. */
+ if ( c_lr_par == RIGHT_PARENTS )
+ n_first_last_position = B_NR_ITEMS (p_s_parent);
+ if ( n_position != n_first_last_position ) {
+ *pp_s_com_father = p_s_parent;
+ atomic_inc (&((*pp_s_com_father)->b_count));
+ /*(*pp_s_com_father = p_s_parent)->b_count++;*/
+ break;
+ }
+ }
+
+ /* if we are in the root of the tree, then there is no common father */
+ if ( n_counter == FIRST_PATH_ELEMENT_OFFSET ) {
+ /* Check whether first buffer in the path is the root of the tree. */
+ if ( PATH_OFFSET_PBUFFER(p_s_tb->tb_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+ SB_ROOT_BLOCK (p_s_tb->tb_sb) ) {
+ *pp_s_father = *pp_s_com_father = NULL;
+ return CARRY_ON;
+ }
+ return REPEAT_SEARCH;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( B_LEVEL (*pp_s_com_father) <= DISK_LEAF_NODE_LEVEL ) {
+ reiserfs_panic(p_s_tb->tb_sb, "PAP-8185: get_far_parent: (%b %z) level too small", *pp_s_com_father, *pp_s_com_father);
+ }
+#endif
+
+ /* Check whether the common parent is locked. */
+
+ if ( buffer_locked (*pp_s_com_father) ) {
+ __wait_on_buffer(*pp_s_com_father);
+ if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+ decrement_bcount(*pp_s_com_father);
+ return REPEAT_SEARCH;
+ }
+ }
+
+ /* So, we got common parent of the current node and its left/right neighbor.
+ Now we are geting the parent of the left/right neighbor. */
+
+ /* Form key to get parent of the left/right neighbor. */
+ le_key2cpu_key (&s_lr_father_key, B_N_PDELIM_KEY(*pp_s_com_father, ( c_lr_par == LEFT_PARENTS ) ?
+ (p_s_tb->lkey[n_h - 1] = n_position - 1) : (p_s_tb->rkey[n_h - 1] = n_position)));
+
+
+ if ( c_lr_par == LEFT_PARENTS )
+ decrement_key(&s_lr_father_key);
+
+ if (search_by_key(p_s_tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father, n_h + 1) == IO_ERROR)
+ // path is released
+ return IO_ERROR;
+
+ if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+ decrement_counters_in_path(&s_path_to_neighbor_father);
+ decrement_bcount(*pp_s_com_father);
+ return REPEAT_SEARCH;
+ }
+
+ *pp_s_father = PATH_PLAST_BUFFER(&s_path_to_neighbor_father);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( B_LEVEL (*pp_s_father) != n_h + 1 ) {
+ reiserfs_panic(p_s_tb->tb_sb, "PAP-8190: get_far_parent: (%b %z) level too small", *pp_s_father, *pp_s_father);
+ }
+
+ if ( s_path_to_neighbor_father.path_length < FIRST_PATH_ELEMENT_OFFSET )
+ reiserfs_panic(0, "PAP-8192: get_far_parent: path length is too small");
+
+#endif
+
+ s_path_to_neighbor_father.path_length--;
+ decrement_counters_in_path(&s_path_to_neighbor_father);
+ return CARRY_ON;
+}
+
+
+/* Get parents of neighbors of node in the path(S[n_path_offset]) and common parents of
+ * S[n_path_offset] and L[n_path_offset]/R[n_path_offset]: F[n_path_offset], FL[n_path_offset],
+ * FR[n_path_offset], CFL[n_path_offset], CFR[n_path_offset].
+ * Calculate numbers of left and right delimiting keys position: lkey[n_path_offset], rkey[n_path_offset].
+ * Returns: SCHEDULE_OCCURRED - schedule occured while the function worked;
+ * CARRY_ON - schedule didn't occur while the function worked;
+ */
+static int get_parents (struct tree_balance * p_s_tb, int n_h)
+{
+ struct path * p_s_path = p_s_tb->tb_path;
+ int n_position,
+ n_ret_value,
+ n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h);
+ struct buffer_head * p_s_curf,
+ * p_s_curcf;
+
+ /* Current node is the root of the tree or will be root of the tree */
+ if ( n_path_offset <= FIRST_PATH_ELEMENT_OFFSET ) {
+ /* The root can not have parents.
+ Release nodes which previously were obtained as parents of the current node neighbors. */
+ decrement_bcount(p_s_tb->FL[n_h]);
+ decrement_bcount(p_s_tb->CFL[n_h]);
+ decrement_bcount(p_s_tb->FR[n_h]);
+ decrement_bcount(p_s_tb->CFR[n_h]);
+ p_s_tb->FL[n_h] = p_s_tb->CFL[n_h] = p_s_tb->FR[n_h] = p_s_tb->CFR[n_h] = NULL;
+ return CARRY_ON;
+ }
+
+ /* Get parent FL[n_path_offset] of L[n_path_offset]. */
+ if ( (n_position = PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1)) ) {
+ /* Current node is not the first child of its parent. */
+ /*(p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))->b_count += 2;*/
+ p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1);
+ atomic_inc (&(p_s_curf->b_count));
+ atomic_inc (&(p_s_curf->b_count));
+ p_s_tb->lkey[n_h] = n_position - 1;
+ }
+ else {
+ /* Calculate current parent of L[n_path_offset], which is the left neighbor of the current node.
+ Calculate current common parent of L[n_path_offset] and the current node. Note that
+ CFL[n_path_offset] not equal FL[n_path_offset] and CFL[n_path_offset] not equal F[n_path_offset].
+ Calculate lkey[n_path_offset]. */
+ if ( (n_ret_value = get_far_parent(p_s_tb, n_h + 1, &p_s_curf,
+ &p_s_curcf, LEFT_PARENTS)) != CARRY_ON )
+ return n_ret_value;
+ }
+
+ decrement_bcount(p_s_tb->FL[n_h]);
+ p_s_tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */
+ decrement_bcount(p_s_tb->CFL[n_h]);
+ p_s_tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ((p_s_curf && !B_IS_IN_TREE (p_s_curf)) || (p_s_curcf && !B_IS_IN_TREE (p_s_curcf))) {
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8195: get_parents: FL (%b) or CFL (%b) is invalid", p_s_curf, p_s_curcf);
+ }
+#endif
+
+/* Get parent FR[n_h] of R[n_h]. */
+
+/* Current node is the last child of F[n_h]. FR[n_h] != F[n_h]. */
+ if ( n_position == B_NR_ITEMS (PATH_H_PBUFFER(p_s_path, n_h + 1)) ) {
+/* Calculate current parent of R[n_h], which is the right neighbor of F[n_h].
+ Calculate current common parent of R[n_h] and current node. Note that CFR[n_h]
+ not equal FR[n_path_offset] and CFR[n_h] not equal F[n_h]. */
+ if ( (n_ret_value = get_far_parent(p_s_tb, n_h + 1, &p_s_curf, &p_s_curcf, RIGHT_PARENTS)) != CARRY_ON )
+ return n_ret_value;
+ }
+ else {
+/* Current node is not the last child of its parent F[n_h]. */
+ /*(p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))->b_count += 2;*/
+ p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1);
+ atomic_inc (&(p_s_curf->b_count));
+ atomic_inc (&(p_s_curf->b_count));
+ p_s_tb->rkey[n_h] = n_position;
+ }
+
+ decrement_bcount(p_s_tb->FR[n_h]);
+ p_s_tb->FR[n_h] = p_s_curf; /* New initialization of FR[n_path_offset]. */
+
+ decrement_bcount(p_s_tb->CFR[n_h]);
+ p_s_tb->CFR[n_h] = p_s_curcf; /* New initialization of CFR[n_path_offset]. */
+
+#ifdef CONFIG_REISERFS_CHECK
+#if 0
+ if (n_h == 0 && p_s_tb->CFR[n_h] && COMP_KEYS (B_PRIGHT_DELIM_KEY (PATH_H_PBUFFER(p_s_path, n_h)),
+ B_N_PDELIM_KEY (p_s_tb->CFR[n_h], p_s_tb->rkey[n_h]))) {
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8200: get_parents: rdkey in S0 %k, rdkey in CFR0 %k do not match",
+ B_PRIGHT_DELIM_KEY (PATH_H_PBUFFER(p_s_path, n_h)), B_N_PDELIM_KEY (p_s_tb->CFR[n_h], p_s_tb->rkey[n_h]));
+ }
+#endif
+ if ((p_s_curf && !B_IS_IN_TREE (p_s_curf)) || (p_s_curcf && !B_IS_IN_TREE (p_s_curcf))) {
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8205: get_parents: FR (%b) or CFR (%b) is invalid", p_s_curf, p_s_curcf);
+ }
+#endif
+
+ return CARRY_ON;
+}
+
+
+/* it is possible to remove node as result of shiftings to
+ neighbors even when we insert or paste item. */
+static inline int can_node_be_removed (int mode, int lfree, int sfree, int rfree, struct tree_balance * tb, int h)
+{
+ struct buffer_head * Sh = PATH_H_PBUFFER (tb->tb_path, h);
+ int levbytes = tb->insert_size[h];
+ struct item_head * ih;
+ struct key * r_key = NULL;
+
+ ih = B_N_PITEM_HEAD (Sh, 0);
+ if ( tb->CFR[h] )
+ r_key = B_N_PDELIM_KEY(tb->CFR[h],tb->rkey[h]);
+
+ if (
+ lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes
+ /* shifting may merge items which might save space */
+#ifdef REISERFS_FSCK
+ - (( ! h && is_left_mergeable (tb->tb_sb, tb->tb_path) == 1 ) ? IH_SIZE : 0)
+ - (( ! h && r_ih && is_right_mergeable (tb->tb_sb, tb->tb_path) == 1 ) ? IH_SIZE : 0)
+#else
+ - (( ! h && op_is_left_mergeable (&(ih->ih_key), Sh->b_size) ) ? IH_SIZE : 0)
+ - (( ! h && r_key && op_is_left_mergeable (r_key, Sh->b_size) ) ? IH_SIZE : 0)
+#endif
+ + (( h ) ? KEY_SIZE : 0))
+ {
+ /* node can not be removed */
+ if (sfree >= levbytes ) { /* new item fits into node S[h] without any shifting */
+ if ( ! h )
+ tb->s0num = B_NR_ITEMS(Sh) + ((mode == M_INSERT ) ? 1 : 0);
+ set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+ return NO_BALANCING_NEEDED;
+ }
+ }
+ return !NO_BALANCING_NEEDED;
+}
+
+
+
+/* Check whether current node S[h] is balanced when increasing its size by
+ * Inserting or Pasting.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ * tb tree_balance structure;
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode i - insert, p - paste;
+ * Returns: 1 - schedule occured;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+ */
+/* ip means Inserting or Pasting */
+static int ip_check_balance (struct tree_balance * tb, int h)
+{
+ struct virtual_node * vn = tb->tb_vn;
+ int levbytes, /* Number of bytes that must be inserted into (value
+ is negative if bytes are deleted) buffer which
+ contains node being balanced. The mnemonic is
+ that the attempted change in node space used level
+ is levbytes bytes. */
+ n_ret_value;
+
+ int lfree, sfree, rfree /* free space in L, S and R */;
+
+ /* nver is short for number of vertixes, and lnver is the number if
+ we shift to the left, rnver is the number if we shift to the
+ right, and lrnver is the number if we shift in both directions.
+ The goal is to minimize first the number of vertixes, and second,
+ the number of vertixes whose contents are changed by shifting,
+ and third the number of uncached vertixes whose contents are
+ changed by shifting and must be read from disk. */
+ int nver, lnver, rnver, lrnver;
+
+ /* used at leaf level only, S0 = S[0] is the node being balanced,
+ sInum [ I = 0,1,2 ] is the number of items that will
+ remain in node SI after balancing. S1 and S2 are new
+ nodes that might be created. */
+
+ /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
+ where 4th parameter is s1bytes and 5th - s2bytes
+ */
+ short snum012[40] = {0,}; /* s0num, s1num, s2num for 8 cases
+ 0,1 - do not shift and do not shift but bottle
+ 2 - shift only whole item to left
+ 3 - shift to left and bottle as much as possible
+ 4,5 - shift to right (whole items and as much as possible
+ 6,7 - shift to both directions (whole items and as much as possible)
+ */
+
+ /* Sh is the node whose balance is currently being checked */
+ struct buffer_head * Sh;
+
+#ifdef REISERFS_FSCK
+ /* special mode for insert pointer to the most low internal node */
+ if (h == 0 && vn->vn_mode == M_INTERNAL) {
+ /* blk_num == 2 is to get pointer inserted to the next level */
+ set_parameters (tb, h, 0, 0, 2, NULL, -1, -1);
+ return 0;
+ }
+#endif
+
+ Sh = PATH_H_PBUFFER (tb->tb_path, h);
+ levbytes = tb->insert_size[h];
+
+ /* Calculate balance parameters for creating new root. */
+ if ( ! Sh ) {
+ if ( ! h )
+ reiserfs_panic (tb->tb_sb, "vs-8210: ip_check_balance: S[0] can not be 0");
+ switch ( n_ret_value = get_empty_nodes (tb, h) ) {
+ case CARRY_ON:
+ set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+ return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
+
+ case NO_DISK_SPACE:
+ case REPEAT_SEARCH:
+ return n_ret_value;
+ default:
+ reiserfs_panic(tb->tb_sb, "vs-8215: ip_check_balance: incorrect return value of get_empty_nodes");
+ }
+ }
+
+ if ( (n_ret_value = get_parents (tb, h)) != CARRY_ON ) /* get parents of S[h] neighbors. */
+ return n_ret_value;
+
+ sfree = B_FREE_SPACE (Sh);
+
+ /* get free space of neighbors */
+ rfree = get_rfree (tb, h);
+ lfree = get_lfree (tb, h);
+
+ if (can_node_be_removed (vn->vn_mode, lfree, sfree, rfree, tb, h) == NO_BALANCING_NEEDED)
+ /* and new item fits into node S[h] without any shifting */
+ return NO_BALANCING_NEEDED;
+
+ create_virtual_node (tb, h);
+
+ /*
+ determine maximal number of items we can shift to the left neighbor (in tb structure)
+ and the maximal number of bytes that can flow to the left neighbor
+ from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
+ */
+ check_left (tb, h, lfree);
+
+ /*
+ determine maximal number of items we can shift to the right neighbor (in tb structure)
+ and the maximal number of bytes that can flow to the right neighbor
+ from the right most liquid item that cannot be shifted from S[0] entirely (returned value)
+ */
+ check_right (tb, h, rfree);
+
+
+ /* all contents of internal node S[h] can be moved into its
+ neighbors, S[h] will be removed after balancing */
+ if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
+ int to_r;
+
+ /* Since we are working on internal nodes, and our internal
+ nodes have fixed size entries, then we can balance by the
+ number of items rather than the space they consume. In this
+ routine we set the left node equal to the right node,
+ allowing a difference of less than or equal to 1 child
+ pointer. */
+ to_r = ((MAX_NR_KEY(Sh)<<1)+2-tb->lnum[h]-tb->rnum[h]+vn->vn_nr_item+1)/2 -
+ (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
+ set_parameters (tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ /* this checks balance condition, that any two neighboring nodes can not fit in one node */
+ if ( h && ( tb->lnum[h] >= vn->vn_nr_item + 1 || tb->rnum[h] >= vn->vn_nr_item + 1) )
+ reiserfs_panic (tb->tb_sb, "vs-8220: ip_check_balance: tree is not balanced on internal level");
+
+ if ( ! h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) ||
+ (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1)) ))
+ reiserfs_panic(tb->tb_sb, "vs-8225: ip_check_balance: tree is not balanced on leaf level");
+#endif
+
+ /* all contents of S[0] can be moved into its neighbors
+ S[0] will be removed after balancing. */
+ if (!h && is_leaf_removable (tb))
+ return CARRY_ON;
+
+
+ /* why do we perform this check here rather than earlier??
+ Answer: we can win 1 node in some cases above. Moreover we
+ checked it above, when we checked, that S[0] is not removable
+ in principle */
+ if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
+ if ( ! h )
+ tb->s0num = vn->vn_nr_item;
+ set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+ return NO_BALANCING_NEEDED;
+ }
+
+
+ {
+ int lpar, rpar, nset, lset, rset, lrset;
+ /*
+ * regular overflowing of the node
+ */
+
+ /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
+ lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
+ nset, lset, rset, lrset - shows, whether flowing items give better packing
+ */
+#define FLOW 1
+#define NO_FLOW 0 /* do not any splitting */
+
+ /* we choose one the following */
+#define NOTHING_SHIFT_NO_FLOW 0
+#define NOTHING_SHIFT_FLOW 5
+#define LEFT_SHIFT_NO_FLOW 10
+#define LEFT_SHIFT_FLOW 15
+#define RIGHT_SHIFT_NO_FLOW 20
+#define RIGHT_SHIFT_FLOW 25
+#define LR_SHIFT_NO_FLOW 30
+#define LR_SHIFT_FLOW 35
+
+
+ lpar = tb->lnum[h];
+ rpar = tb->rnum[h];
+
+
+ /* calculate number of blocks S[h] must be split into when
+ nothing is shifted to the neighbors,
+ as well as number of items in each part of the split node (s012 numbers),
+ and number of bytes (s1bytes) of the shared drop which flow to S1 if any */
+ nset = NOTHING_SHIFT_NO_FLOW;
+ nver = get_num_ver (vn->vn_mode, tb, h,
+ 0, -1, h?vn->vn_nr_item:0, -1,
+ snum012, NO_FLOW);
+
+ if (!h)
+ {
+ int nver1;
+
+ /* note, that in this case we try to bottle between S[0] and S1 (S1 - the first new node) */
+ nver1 = get_num_ver (vn->vn_mode, tb, h,
+ 0, -1, 0, -1,
+ snum012 + NOTHING_SHIFT_FLOW, FLOW);
+ if (nver > nver1)
+ nset = NOTHING_SHIFT_FLOW, nver = nver1;
+ }
+
+
+ /* calculate number of blocks S[h] must be split into when
+ l_shift_num first items and l_shift_bytes of the right most
+ liquid item to be shifted are shifted to the left neighbor,
+ as well as number of items in each part of the splitted node (s012 numbers),
+ and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+ */
+ lset = LEFT_SHIFT_NO_FLOW;
+ lnver = get_num_ver (vn->vn_mode, tb, h,
+ lpar - (( h || tb->lbytes == -1 ) ? 0 : 1), -1, h ? vn->vn_nr_item:0, -1,
+ snum012 + LEFT_SHIFT_NO_FLOW, NO_FLOW);
+ if (!h)
+ {
+ int lnver1;
+
+ lnver1 = get_num_ver (vn->vn_mode, tb, h,
+ lpar - ((tb->lbytes != -1) ? 1 : 0), tb->lbytes, 0, -1,
+ snum012 + LEFT_SHIFT_FLOW, FLOW);
+ if (lnver > lnver1)
+ lset = LEFT_SHIFT_FLOW, lnver = lnver1;
+ }
+
+
+ /* calculate number of blocks S[h] must be split into when
+ r_shift_num first items and r_shift_bytes of the left most
+ liquid item to be shifted are shifted to the right neighbor,
+ as well as number of items in each part of the splitted node (s012 numbers),
+ and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+ */
+ rset = RIGHT_SHIFT_NO_FLOW;
+ rnver = get_num_ver (vn->vn_mode, tb, h,
+ 0, -1, h ? (vn->vn_nr_item-rpar) : (rpar - (( tb->rbytes != -1 ) ? 1 : 0)), -1,
+ snum012 + RIGHT_SHIFT_NO_FLOW, NO_FLOW);
+ if (!h)
+ {
+ int rnver1;
+
+ rnver1 = get_num_ver (vn->vn_mode, tb, h,
+ 0, -1, (rpar - ((tb->rbytes != -1) ? 1 : 0)), tb->rbytes,
+ snum012 + RIGHT_SHIFT_FLOW, FLOW);
+
+ if (rnver > rnver1)
+ rset = RIGHT_SHIFT_FLOW, rnver = rnver1;
+ }
+
+
+ /* calculate number of blocks S[h] must be split into when
+ items are shifted in both directions,
+ as well as number of items in each part of the splitted node (s012 numbers),
+ and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+ */
+ lrset = LR_SHIFT_NO_FLOW;
+ lrnver = get_num_ver (vn->vn_mode, tb, h,
+ lpar - ((h || tb->lbytes == -1) ? 0 : 1), -1, h ? (vn->vn_nr_item-rpar):(rpar - ((tb->rbytes != -1) ? 1 : 0)), -1,
+ snum012 + LR_SHIFT_NO_FLOW, NO_FLOW);
+ if (!h)
+ {
+ int lrnver1;
+
+ lrnver1 = get_num_ver (vn->vn_mode, tb, h,
+ lpar - ((tb->lbytes != -1) ? 1 : 0), tb->lbytes, (rpar - ((tb->rbytes != -1) ? 1 : 0)), tb->rbytes,
+ snum012 + LR_SHIFT_FLOW, FLOW);
+ if (lrnver > lrnver1)
+ lrset = LR_SHIFT_FLOW, lrnver = lrnver1;
+ }
+
+
+
+ /* Our general shifting strategy is:
+ 1) to minimized number of new nodes;
+ 2) to minimized number of neighbors involved in shifting;
+ 3) to minimized number of disk reads; */
+
+ /* we can win TWO or ONE nodes by shifting in both directions */
+ if (lrnver < lnver && lrnver < rnver)
+ {
+#ifdef CONFIG_REISERFS_CHECK
+ if (h && (tb->lnum[h] != 1 || tb->rnum[h] != 1 || lrnver != 1 || rnver != 2 || lnver != 2 || h != 1))
+ reiserfs_panic (0, "vs-8230: check_balance: bad h");
+#endif
+ if (lrset == LR_SHIFT_FLOW)
+ set_parameters (tb, h, tb->lnum[h], tb->rnum[h], lrnver, snum012 + lrset,
+ tb->lbytes, tb->rbytes);
+ else
+ set_parameters (tb, h, tb->lnum[h] - ((tb->lbytes == -1) ? 0 : 1),
+ tb->rnum[h] - ((tb->rbytes == -1) ? 0 : 1), lrnver, snum012 + lrset, -1, -1);
+
+ return CARRY_ON;
+ }
+
+ /* if shifting doesn't lead to better packing then don't shift */
+ if (nver == lrnver)
+ {
+ set_parameters (tb, h, 0, 0, nver, snum012 + nset, -1, -1);
+ return CARRY_ON;
+ }
+
+
+ /* now we know that for better packing shifting in only one
+ direction either to the left or to the right is required */
+
+ /* if shifting to the left is better than shifting to the right */
+ if (lnver < rnver)
+ {
+ SET_PAR_SHIFT_LEFT;
+ return CARRY_ON;
+ }
+
+ /* if shifting to the right is better than shifting to the left */
+ if (lnver > rnver)
+ {
+ SET_PAR_SHIFT_RIGHT;
+ return CARRY_ON;
+ }
+
+
+ /* now shifting in either direction gives the same number
+ of nodes and we can make use of the cached neighbors */
+ if (is_left_neighbor_in_cache (tb,h))
+ {
+ SET_PAR_SHIFT_LEFT;
+ return CARRY_ON;
+ }
+
+ /* shift to the right independently on whether the right neighbor in cache or not */
+ SET_PAR_SHIFT_RIGHT;
+ return CARRY_ON;
+ }
+}
+
+
+/* Check whether current node S[h] is balanced when Decreasing its size by
+ * Deleting or Cutting for INTERNAL node of S+tree.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ * tb tree_balance structure;
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode i - insert, p - paste;
+ * Returns: 1 - schedule occured;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+ *
+ * Note: Items of internal nodes have fixed size, so the balance condition for
+ * the internal part of S+tree is as for the B-trees.
+ */
+static int dc_check_balance_internal (struct tree_balance * tb, int h)
+{
+ struct virtual_node * vn = tb->tb_vn;
+
+ /* Sh is the node whose balance is currently being checked,
+ and Fh is its father. */
+ struct buffer_head * Sh, * Fh;
+ int maxsize,
+ n_ret_value;
+ int lfree, rfree /* free space in L and R */;
+
+ Sh = PATH_H_PBUFFER (tb->tb_path, h);
+ Fh = PATH_H_PPARENT (tb->tb_path, h);
+
+ maxsize = MAX_CHILD_SIZE(Sh);
+
+/* using tb->insert_size[h], which is negative in this case, create_virtual_node calculates: */
+/* new_nr_item = number of items node would have if operation is */
+/* performed without balancing (new_nr_item); */
+ create_virtual_node (tb, h);
+
+ if ( ! Fh )
+ { /* S[h] is the root. */
+ if ( vn->vn_nr_item > 0 )
+ {
+ set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+ return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
+ }
+ /* new_nr_item == 0.
+ * Current root will be deleted resulting in
+ * decrementing the tree height. */
+ set_parameters (tb, h, 0, 0, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+ if ( (n_ret_value = get_parents(tb,h)) != CARRY_ON )
+ return n_ret_value;
+
+
+ /* get free space of neighbors */
+ rfree = get_rfree (tb, h);
+ lfree = get_lfree (tb, h);
+
+ /* determine maximal number of items we can fit into neighbors */
+ check_left (tb, h, lfree);
+ check_right (tb, h, rfree);
+
+
+ if ( vn->vn_nr_item >= MIN_NR_KEY(Sh) )
+ { /* Balance condition for the internal node is valid.
+ * In this case we balance only if it leads to better packing. */
+ if ( vn->vn_nr_item == MIN_NR_KEY(Sh) )
+ { /* Here we join S[h] with one of its neighbors,
+ * which is impossible with greater values of new_nr_item. */
+ if ( tb->lnum[h] >= vn->vn_nr_item + 1 )
+ {
+ /* All contents of S[h] can be moved to L[h]. */
+ int n;
+ int order_L;
+
+ order_L = ((n=PATH_H_B_ITEM_ORDER(tb->tb_path, h))==0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
+ n = B_N_CHILD(tb->FL[h],order_L)->dc_size / (DC_SIZE + KEY_SIZE);
+ set_parameters (tb, h, -n-1, 0, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+ if ( tb->rnum[h] >= vn->vn_nr_item + 1 )
+ {
+ /* All contents of S[h] can be moved to R[h]. */
+ int n;
+ int order_R;
+
+ order_R = ((n=PATH_H_B_ITEM_ORDER(tb->tb_path, h))==B_NR_ITEMS(Fh)) ? 0 : n + 1;
+ n = B_N_CHILD(tb->FR[h],order_R)->dc_size / (DC_SIZE + KEY_SIZE);
+ set_parameters (tb, h, 0, -n-1, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+ }
+
+ if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)
+ {
+ /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
+ int to_r;
+
+ to_r = ((MAX_NR_KEY(Sh)<<1)+2-tb->lnum[h]-tb->rnum[h]+vn->vn_nr_item+1)/2 -
+ (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
+ set_parameters (tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+ /* Balancing does not lead to better packing. */
+ set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+ return NO_BALANCING_NEEDED;
+ }
+
+ /* Current node contain insufficient number of items. Balancing is required. */
+ /* Check whether we can merge S[h] with left neighbor. */
+ if (tb->lnum[h] >= vn->vn_nr_item + 1)
+ if (is_left_neighbor_in_cache (tb,h) || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h])
+ {
+ int n;
+ int order_L;
+
+ order_L = ((n=PATH_H_B_ITEM_ORDER(tb->tb_path, h))==0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
+ n = B_N_CHILD(tb->FL[h],order_L)->dc_size / (DC_SIZE + KEY_SIZE);
+ set_parameters (tb, h, -n-1, 0, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+ /* Check whether we can merge S[h] with right neighbor. */
+ if (tb->rnum[h] >= vn->vn_nr_item + 1)
+ {
+ int n;
+ int order_R;
+
+ order_R = ((n=PATH_H_B_ITEM_ORDER(tb->tb_path, h))==B_NR_ITEMS(Fh)) ? 0 : (n + 1);
+ n = B_N_CHILD(tb->FR[h],order_R)->dc_size / (DC_SIZE + KEY_SIZE);
+ set_parameters (tb, h, 0, -n-1, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+ /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
+ if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)
+ {
+ int to_r;
+
+ to_r = ((MAX_NR_KEY(Sh)<<1)+2-tb->lnum[h]-tb->rnum[h]+vn->vn_nr_item+1)/2 -
+ (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
+ set_parameters (tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+ /* For internal nodes try to borrow item from a neighbor */
+#ifdef CONFIG_REISERFS_CHECK
+ if (!tb->FL[h] && !tb->FR[h])
+ reiserfs_panic (0, "vs-8235: dc_check_balance_internal: trying to borrow for root");
+#endif
+
+ /* Borrow one or two items from caching neighbor */
+ if (is_left_neighbor_in_cache (tb,h) || !tb->FR[h])
+ {
+ int from_l;
+
+ from_l = (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item + 1) / 2 - (vn->vn_nr_item + 1);
+ set_parameters (tb, h, -from_l, 0, 1, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+ set_parameters (tb, h, 0, -((MAX_NR_KEY(Sh)+1-tb->rnum[h]+vn->vn_nr_item+1)/2-(vn->vn_nr_item+1)), 1,
+ NULL, -1, -1);
+ return CARRY_ON;
+}
+
+
+/* Check whether current node S[h] is balanced when Decreasing its size by
+ * Deleting or Truncating for LEAF node of S+tree.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ * tb tree_balance structure;
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode i - insert, p - paste;
+ * Returns: 1 - schedule occured;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+ */
+static int dc_check_balance_leaf (struct tree_balance * tb, int h)
+{
+ struct virtual_node * vn = tb->tb_vn;
+
+ /* Number of bytes that must be deleted from
+ (value is negative if bytes are deleted) buffer which
+ contains node being balanced. The mnemonic is that the
+ attempted change in node space used level is levbytes bytes. */
+ int levbytes;
+ /* the maximal item size */
+ int maxsize,
+ n_ret_value;
+ /* S0 is the node whose balance is currently being checked,
+ and F0 is its father. */
+ struct buffer_head * S0, * F0;
+ int lfree, rfree /* free space in L and R */;
+
+ S0 = PATH_H_PBUFFER (tb->tb_path, 0);
+ F0 = PATH_H_PPARENT (tb->tb_path, 0);
+
+ levbytes = tb->insert_size[h];
+
+ maxsize = MAX_CHILD_SIZE(S0); /* maximal possible size of an item */
+
+ if ( ! F0 )
+ { /* S[0] is the root now. */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( -levbytes >= maxsize - B_FREE_SPACE (S0) )
+ reiserfs_panic (tb->tb_sb, "vs-8240: dc_check_balance_leaf: attempt to create empty buffer tree");
+#endif
+
+ set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+ return NO_BALANCING_NEEDED;
+ }
+
+ if ( (n_ret_value = get_parents(tb,h)) != CARRY_ON )
+ return n_ret_value;
+
+ /* get free space of neighbors */
+ rfree = get_rfree (tb, h);
+ lfree = get_lfree (tb, h);
+
+ create_virtual_node (tb, h);
+
+ /* if 3 leaves can be merge to one, set parameters and return */
+ if (are_leaves_removable (tb, lfree, rfree))
+ return CARRY_ON;
+
+ /* determine maximal number of items we can shift to the left/right neighbor
+ and the maximal number of bytes that can flow to the left/right neighbor
+ from the left/right most liquid item that cannot be shifted from S[0] entirely
+ */
+ check_left (tb, h, lfree);
+ check_right (tb, h, rfree);
+
+ /* check whether we can merge S with left neighbor. */
+ if (tb->lnum[0] >= vn->vn_nr_item && tb->lbytes == -1)
+ if (is_left_neighbor_in_cache (tb,h) ||
+ ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */
+ !tb->FR[h]) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!tb->FL[h])
+ reiserfs_panic (0, "vs-8245: dc_check_balance_leaf: FL[h] must exist");
+#endif
+
+ /* set parameter to merge S[0] with its left neighbor */
+ set_parameters (tb, h, -1, 0, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+ /* check whether we can merge S[0] with right neighbor. */
+ if (tb->rnum[0] >= vn->vn_nr_item && tb->rbytes == -1) {
+ set_parameters (tb, h, 0, -1, 0, NULL, -1, -1);
+ return CARRY_ON;
+ }
+
+ /* All contents of S[0] can be moved to the neighbors (L[0] & R[0]). Set parameters and return */
+ if (is_leaf_removable (tb))
+ return CARRY_ON;
+
+ /* Balancing is not required. */
+ tb->s0num = vn->vn_nr_item;
+ set_parameters (tb, h, 0, 0, 1, NULL, -1, -1);
+ return NO_BALANCING_NEEDED;
+}
+
+
+
+/* Check whether current node S[h] is balanced when Decreasing its size by
+ * Deleting or Cutting.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ * tb tree_balance structure;
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode d - delete, c - cut.
+ * Returns: 1 - schedule occured;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+ */
+static int dc_check_balance (struct tree_balance * tb, int h)
+{
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! (PATH_H_PBUFFER (tb->tb_path, h)) )
+ reiserfs_panic(tb->tb_sb, "vs-8250: dc_check_balance: S is not initialized");
+#endif
+
+ if ( h )
+ return dc_check_balance_internal (tb, h);
+ else
+ return dc_check_balance_leaf (tb, h);
+}
+
+
+
+/* Check whether current node S[h] is balanced.
+ * Calculate parameters for balancing for current level h.
+ * Parameters:
+ *
+ * tb tree_balance structure:
+ *
+ * tb is a large structure that must be read about in the header file
+ * at the same time as this procedure if the reader is to successfully
+ * understand this procedure
+ *
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode i - insert, p - paste, d - delete, c - cut.
+ * Returns: 1 - schedule occured;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+ */
+static int check_balance (int mode,
+ struct tree_balance * tb,
+ int h,
+ int inum,
+ int pos_in_item,
+ struct item_head * ins_ih,
+ const void * data
+ )
+{
+ struct virtual_node * vn;
+
+ vn = tb->tb_vn = (struct virtual_node *)(tb->vn_buf);
+ vn->vn_free_ptr = (char *)(tb->tb_vn + 1);
+ vn->vn_mode = mode;
+ vn->vn_affected_item_num = inum;
+ vn->vn_pos_in_item = pos_in_item;
+ vn->vn_ins_ih = ins_ih;
+ vn->vn_data = data;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (mode == M_INSERT && !vn->vn_ins_ih)
+ reiserfs_panic (0, "vs-8255: check_balance: ins_ih can not be 0 in insert mode");
+#endif
+
+ if ( tb->insert_size[h] > 0 )
+ /* Calculate balance parameters when size of node is increasing. */
+ return ip_check_balance (tb, h);
+
+ /* Calculate balance parameters when size of node is decreasing. */
+ return dc_check_balance (tb, h);
+}
+
+
+
+/* Check whether parent at the path is the really parent of the current node.*/
+static int get_direct_parent(
+ struct tree_balance * p_s_tb,
+ int n_h
+ ) {
+ struct buffer_head * p_s_bh;
+ struct path * p_s_path = p_s_tb->tb_path;
+ int n_position,
+ n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h);
+
+ /* We are in the root or in the new root. */
+ if ( n_path_offset <= FIRST_PATH_ELEMENT_OFFSET ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_path_offset < FIRST_PATH_ELEMENT_OFFSET - 1 )
+ reiserfs_panic(p_s_tb->tb_sb, "PAP-8260: get_direct_parent: illegal offset in the path");
+#endif
+
+ if ( PATH_OFFSET_PBUFFER(p_s_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+ SB_ROOT_BLOCK (p_s_tb->tb_sb) ) {
+ /* Root is not changed. */
+ PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1) = NULL;
+ PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1) = 0;
+ return CARRY_ON;
+ }
+ return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */
+ }
+
+ if ( ! B_IS_IN_TREE(p_s_bh = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1)) )
+ return REPEAT_SEARCH; /* Parent in the path is not in the tree. */
+
+ if ( (n_position = PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1)) > B_NR_ITEMS(p_s_bh) )
+ return REPEAT_SEARCH;
+
+ if ( B_N_CHILD_NUM(p_s_bh, n_position) != PATH_OFFSET_PBUFFER(p_s_path, n_path_offset)->b_blocknr )
+ /* Parent in the path is not parent of the current node in the tree. */
+ return REPEAT_SEARCH;
+
+ if ( buffer_locked(p_s_bh) ) {
+ __wait_on_buffer(p_s_bh);
+ if ( FILESYSTEM_CHANGED_TB (p_s_tb) )
+ return REPEAT_SEARCH;
+ }
+
+ return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node. */
+}
+
+
+/* Using lnum[n_h] and rnum[n_h] we should determine what neighbors
+ * of S[n_h] we
+ * need in order to balance S[n_h], and get them if necessary.
+ * Returns: SCHEDULE_OCCURRED - schedule occured while the function worked;
+ * CARRY_ON - schedule didn't occur while the function worked;
+ */
+static int get_neighbors(
+ struct tree_balance * p_s_tb,
+ int n_h
+ ) {
+ int n_child_position,
+ n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h + 1);
+ unsigned long n_son_number;
+ struct super_block * p_s_sb = p_s_tb->tb_sb;
+ struct buffer_head * p_s_bh;
+
+
+ if ( p_s_tb->lnum[n_h] ) {
+ /* We need left neighbor to balance S[n_h]. */
+ p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( p_s_bh == p_s_tb->FL[n_h] && ! PATH_OFFSET_POSITION(p_s_tb->tb_path, n_path_offset) )
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8270: get_neighbors: invalid position in the parent");
+#endif
+
+ n_child_position = ( p_s_bh == p_s_tb->FL[n_h] ) ? p_s_tb->lkey[n_h] : B_NR_ITEMS (p_s_tb->FL[n_h]);
+ n_son_number = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position);
+ p_s_bh = reiserfs_bread(p_s_sb->s_dev, n_son_number, p_s_sb->s_blocksize);
+ if (!p_s_bh)
+ return IO_ERROR;
+ if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+ decrement_bcount(p_s_bh);
+ return REPEAT_SEARCH;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! B_IS_IN_TREE(p_s_tb->FL[n_h]) || n_child_position > B_NR_ITEMS(p_s_tb->FL[n_h]) ||
+ B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position) != p_s_bh->b_blocknr )
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8275: get_neighbors: invalid parent");
+ if ( ! B_IS_IN_TREE(p_s_bh) )
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8280: get_neighbors: invalid child");
+
+ if (! n_h && B_FREE_SPACE (p_s_bh) != MAX_CHILD_SIZE (p_s_bh) - B_N_CHILD (p_s_tb->FL[0],n_child_position)->dc_size)
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8290: get_neighbors: invalid child size of left neighbor");
+#endif
+
+ decrement_bcount(p_s_tb->L[n_h]);
+ p_s_tb->L[n_h] = p_s_bh;
+ }
+
+
+ if ( p_s_tb->rnum[n_h] ) { /* We need right neighbor to balance S[n_path_offset]. */
+ p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( p_s_bh == p_s_tb->FR[n_h] && PATH_OFFSET_POSITION(p_s_tb->tb_path, n_path_offset) >= B_NR_ITEMS(p_s_bh) )
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8295: get_neighbors: invalid position in the parent");
+#endif
+
+ n_child_position = ( p_s_bh == p_s_tb->FR[n_h] ) ? p_s_tb->rkey[n_h] + 1 : 0;
+ n_son_number = B_N_CHILD_NUM(p_s_tb->FR[n_h], n_child_position);
+ p_s_bh = reiserfs_bread(p_s_sb->s_dev, n_son_number, p_s_sb->s_blocksize);
+ if (!p_s_bh)
+ return IO_ERROR;
+ if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+ decrement_bcount(p_s_bh);
+ return REPEAT_SEARCH;
+ }
+ decrement_bcount(p_s_tb->R[n_h]);
+ p_s_tb->R[n_h] = p_s_bh;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (! n_h && B_FREE_SPACE (p_s_bh) != MAX_CHILD_SIZE (p_s_bh) - B_N_CHILD (p_s_tb->FR[0],n_child_position)->dc_size) {
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8300: get_neighbors: invalid child size of right neighbor (%d != %d - %d)",
+ B_FREE_SPACE (p_s_bh), MAX_CHILD_SIZE (p_s_bh), B_N_CHILD (p_s_tb->FR[0],n_child_position)->dc_size);
+ }
+#endif
+
+ }
+ return CARRY_ON;
+}
+
+
+void * reiserfs_kmalloc (size_t size, int flags, struct super_block * s)
+{
+ void * vp;
+ static size_t malloced;
+
+
+ vp = kmalloc (size, flags);
+ if (vp) {
+ s->u.reiserfs_sb.s_kmallocs += size;
+ if (s->u.reiserfs_sb.s_kmallocs > malloced + 200000) {
+ reiserfs_warning ("vs-8301: reiserfs_kmalloc: allocated memory %d\n", s->u.reiserfs_sb.s_kmallocs);
+ malloced = s->u.reiserfs_sb.s_kmallocs;
+ }
+ }
+/*printk ("malloc : size %d, allocated %d\n", size, s->u.reiserfs_sb.s_kmallocs);*/
+ return vp;
+}
+
+void reiserfs_kfree (const void * vp, size_t size, struct super_block * s)
+{
+ kfree (vp);
+
+ s->u.reiserfs_sb.s_kmallocs -= size;
+ if (s->u.reiserfs_sb.s_kmallocs < 0)
+ reiserfs_warning ("vs-8302: reiserfs_kfree: allocated memory %d\n", s->u.reiserfs_sb.s_kmallocs);
+
+}
+
+
+static int get_virtual_node_size (struct super_block * sb, struct buffer_head * bh)
+{
+ // int size = sizeof (struct virtual_item); /* for new item in case of insert */
+ // int i, nr_items;
+ // struct item_head * ih;
+
+ // this is enough for _ALL_ currently possible cases. In 4 k block
+ // one may put < 170 empty items. Each virtual item eats 12
+ // byte. The biggest direntry item may have < 256 entries. Each
+ // entry would eat 2 byte of virtual node space
+ return sb->s_blocksize;
+
+#if 0
+ size = sizeof (struct virtual_node) + sizeof (struct virtual_item);
+ ih = B_N_PITEM_HEAD (bh, 0);
+ nr_items = B_NR_ITEMS (bh);
+ for (i = 0; i < nr_items; i ++, ih ++) {
+ /* each item occupies some space in virtual node */
+ size += sizeof (struct virtual_item);
+ if (is_direntry_le_ih (ih))
+ /* each entry and new one occupeis 2 byte in the virtual node */
+ size += (le16_to_cpu (ih->u.ih_entry_count) + 1) * sizeof (__u16);
+ }
+
+ /* 1 bit for each bitmap block to note whether bitmap block was
+ dirtied in the operation */
+ /* size += (SB_BMAP_NR (sb) * 2 / 8 + 4);*/
+ return size;
+#endif
+}
+
+
+
+/* maybe we should fail balancing we are going to perform when kmalloc
+ fails several times. But now it will loop until kmalloc gets
+ required memory */
+static int get_mem_for_virtual_node (struct tree_balance * tb)
+{
+ int check_fs = 0;
+ int size;
+ char * buf;
+
+ size = get_virtual_node_size (tb->tb_sb, PATH_PLAST_BUFFER (tb->tb_path));
+
+ if (size > tb->vn_buf_size) {
+ /* we have to allocate more memory for virtual node */
+ if (tb->vn_buf) {
+ /* free memory allocated before */
+ reiserfs_kfree (tb->vn_buf, tb->vn_buf_size, tb->tb_sb);
+ /* this is not needed if kfree is atomic */
+ check_fs = 1;
+ }
+
+ /* virtual node requires now more memory */
+ tb->vn_buf_size = size;
+
+ /* get memory for virtual item */
+ buf = reiserfs_kmalloc(size, GFP_ATOMIC, tb->tb_sb);
+ if ( ! buf ) {
+ /* getting memory with GFP_KERNEL priority may involve
+ balancing now (due to indirect_to_direct conversion on
+ dcache shrinking). So, release path and collected
+ resourses here */
+ free_buffers_in_tb (tb);
+ buf = reiserfs_kmalloc(size, GFP_BUFFER, tb->tb_sb);
+ if ( !buf ) {
+#ifdef CONFIG_REISERFS_CHECK
+ reiserfs_warning ("vs-8345: get_mem_for_virtual_node: "
+ "kmalloc failed. reiserfs kmalloced %d bytes\n",
+ tb->tb_sb->u.reiserfs_sb.s_kmallocs);
+#endif
+ tb->vn_buf_size = 0;
+ }
+ tb->vn_buf = buf;
+ schedule() ;
+ return REPEAT_SEARCH;
+ }
+
+ tb->vn_buf = buf;
+ }
+
+ if ( check_fs && FILESYSTEM_CHANGED_TB (tb) )
+ return REPEAT_SEARCH;
+
+ return CARRY_ON;
+}
+
+
+#ifdef CONFIG_REISERFS_CHECK
+static void tb_buffer_sanity_check (struct super_block * p_s_sb,
+ struct buffer_head * p_s_bh,
+ const char *descr, int level) {
+ if (p_s_bh) {
+ if (atomic_read (&(p_s_bh->b_count)) <= 0) {
+
+ reiserfs_panic (p_s_sb, "tb_buffer_sanity_check(): negative or zero reference counter for buffer %s[%d] (%b)\n", descr, level, p_s_bh);
+ }
+
+ if ( ! buffer_uptodate (p_s_bh) ) {
+ reiserfs_panic (p_s_sb, "tb_buffer_sanity_check(): buffer is not up to date %s[%d] (%b)\n", descr, level, p_s_bh);
+ }
+
+ if ( ! B_IS_IN_TREE (p_s_bh) ) {
+ reiserfs_panic (p_s_sb, "tb_buffer_sanity_check(): buffer is not in tree %s[%d] (%b)\n", descr, level, p_s_bh);
+ }
+
+ if (p_s_bh->b_dev != p_s_sb->s_dev ||
+ p_s_bh->b_size != p_s_sb->s_blocksize ||
+ p_s_bh->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
+ reiserfs_panic (p_s_sb, "tb_buffer_sanity_check(): check failed for buffer %s[%d] (%b)\n", descr, level, p_s_bh);
+ }
+ }
+}
+#endif
+
+static void clear_all_dirty_bits(struct super_block *s,
+ struct buffer_head *bh) {
+ reiserfs_prepare_for_journal(s, bh, 0) ;
+}
+
+static int wait_tb_buffers_until_unlocked (struct tree_balance * p_s_tb)
+{
+ struct buffer_head * locked;
+#ifdef CONFIG_REISERFS_CHECK
+ int repeat_counter = 0;
+#endif
+ int i;
+
+ do {
+
+ locked = NULL;
+
+ for ( i = p_s_tb->tb_path->path_length; !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i-- ) {
+ if ( PATH_OFFSET_PBUFFER (p_s_tb->tb_path, i) ) {
+ /* if I understand correctly, we can only be sure the last buffer
+ ** in the path is in the tree --clm
+ */
+#ifdef CONFIG_REISERFS_CHECK
+ if (PATH_PLAST_BUFFER(p_s_tb->tb_path) ==
+ PATH_OFFSET_PBUFFER(p_s_tb->tb_path, i)) {
+ tb_buffer_sanity_check (p_s_tb->tb_sb,
+ PATH_OFFSET_PBUFFER (p_s_tb->tb_path, i),
+ "S",
+ p_s_tb->tb_path->path_length - i);
+ }
+#endif
+ clear_all_dirty_bits(p_s_tb->tb_sb,
+ PATH_OFFSET_PBUFFER (p_s_tb->tb_path, i)) ;
+
+ if ( buffer_locked (PATH_OFFSET_PBUFFER (p_s_tb->tb_path, i)) )
+ locked = PATH_OFFSET_PBUFFER (p_s_tb->tb_path, i);
+ }
+ }
+
+ for ( i = 0; !locked && i < MAX_HEIGHT && p_s_tb->insert_size[i]; i++ ) {
+
+ if (p_s_tb->lnum[i] ) {
+
+ if ( p_s_tb->L[i] ) {
+#ifdef CONFIG_REISERFS_CHECK
+ tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->L[i], "L", i);
+#endif
+ clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->L[i]) ;
+ if ( buffer_locked (p_s_tb->L[i]) )
+ locked = p_s_tb->L[i];
+ }
+
+ if ( !locked && p_s_tb->FL[i] ) {
+#ifdef CONFIG_REISERFS_CHECK
+ tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->FL[i], "FL", i);
+#endif
+ clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->FL[i]) ;
+ if ( buffer_locked (p_s_tb->FL[i]) )
+ locked = p_s_tb->FL[i];
+ }
+
+ if ( !locked && p_s_tb->CFL[i] ) {
+#ifdef CONFIG_REISERFS_CHECK
+ tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->CFL[i], "CFL", i);
+#endif
+ clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->CFL[i]) ;
+ if ( buffer_locked (p_s_tb->CFL[i]) )
+ locked = p_s_tb->CFL[i];
+ }
+
+ }
+
+ if ( !locked && (p_s_tb->rnum[i]) ) {
+
+ if ( p_s_tb->R[i] ) {
+#ifdef CONFIG_REISERFS_CHECK
+ tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->R[i], "R", i);
+#endif
+ clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->R[i]) ;
+ if ( buffer_locked (p_s_tb->R[i]) )
+ locked = p_s_tb->R[i];
+ }
+
+
+ if ( !locked && p_s_tb->FR[i] ) {
+#ifdef CONFIG_REISERFS_CHECK
+ tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->FR[i], "FR", i);
+#endif
+ clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->FR[i]) ;
+ if ( buffer_locked (p_s_tb->FR[i]) )
+ locked = p_s_tb->FR[i];
+ }
+
+ if ( !locked && p_s_tb->CFR[i] ) {
+#ifdef CONFIG_REISERFS_CHECK
+ tb_buffer_sanity_check (p_s_tb->tb_sb, p_s_tb->CFR[i], "CFR", i);
+#endif
+ clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->CFR[i]) ;
+ if ( buffer_locked (p_s_tb->CFR[i]) )
+ locked = p_s_tb->CFR[i];
+ }
+ }
+ }
+ /* as far as I can tell, this is not required. The FEB list seems
+ ** to be full of newly allocated nodes, which will never be locked,
+ ** dirty, or anything else.
+ ** To be safe, I'm putting in the checks and waits in. For the moment,
+ ** they are needed to keep the code in journal.c from complaining
+ ** about the buffer. That code is inside CONFIG_REISERFS_CHECK as well.
+ ** --clm
+ */
+ for ( i = 0; !locked && i < MAX_FEB_SIZE; i++ ) {
+ if ( p_s_tb->FEB[i] ) {
+ clear_all_dirty_bits(p_s_tb->tb_sb, p_s_tb->FEB[i]) ;
+ if (buffer_locked(p_s_tb->FEB[i])) {
+ locked = p_s_tb->FEB[i] ;
+ }
+ }
+ }
+
+ if (locked) {
+#ifdef CONFIG_REISERFS_CHECK
+ repeat_counter++;
+ if ( (repeat_counter % 10000) == 0) {
+ reiserfs_warning ("wait_tb_buffers_until_released(): too many iterations waiting for buffer to unlock (%b)\n", locked);
+
+ /* Don't loop forever. Try to recover from possible error. */
+
+ return ( FILESYSTEM_CHANGED_TB (p_s_tb) ) ? REPEAT_SEARCH : CARRY_ON;
+ }
+#endif
+ __wait_on_buffer (locked);
+ if ( FILESYSTEM_CHANGED_TB (p_s_tb) ) {
+ return REPEAT_SEARCH;
+ }
+ }
+
+ } while (locked);
+
+ return CARRY_ON;
+}
+
+
+/* Prepare for balancing, that is
+ * get all necessary parents, and neighbors;
+ * analyze what and where should be moved;
+ * get sufficient number of new nodes;
+ * Balancing will start only after all resources will be collected at a time.
+ *
+ * When ported to SMP kernels, only at the last moment after all needed nodes
+ * are collected in cache, will the resources be locked using the usual
+ * textbook ordered lock acquisition algorithms. Note that ensuring that
+ * this code neither write locks what it does not need to write lock nor locks out of order
+ * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
+ *
+ * fix is meant in the sense of render unchanging
+ *
+ * Latency might be improved by first gathering a list of what buffers are needed
+ * and then getting as many of them in parallel as possible? -Hans
+ *
+ * Parameters:
+ * op_mode i - insert, d - delete, c - cut (truncate), p - paste (append)
+ * tb tree_balance structure;
+ * inum item number in S[h];
+ * pos_in_item - comment this if you can
+ * ins_ih & ins_sd are used when inserting
+ * Returns: 1 - schedule occurred while the function worked;
+ * 0 - schedule didn't occur while the function worked;
+ * -1 - if no_disk_space
+ */
+
+
+int fix_nodes (int n_op_mode,
+ struct tree_balance * p_s_tb,
+ struct item_head * p_s_ins_ih, // item head of item being inserted
+ const void * data // inserted item or data to be pasted
+ ) {
+ int n_ret_value,
+ n_h,
+ n_item_num = PATH_LAST_POSITION(p_s_tb->tb_path);
+ int n_pos_in_item;
+
+ /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared
+ ** during wait_tb_buffers_run
+ */
+ int wait_tb_buffers_run = 0 ;
+ int windex ;
+ struct buffer_head * p_s_tbS0 = PATH_PLAST_BUFFER(p_s_tb->tb_path);
+
+ n_pos_in_item = p_s_tb->tb_path->pos_in_item;
+
+
+ p_s_tb->fs_gen = get_generation (p_s_tb->tb_sb);
+
+ /* we prepare and log the super here so it will already be in the
+ ** transaction when do_balance needs to change it.
+ ** This way do_balance won't have to schedule when trying to prepare
+ ** the super for logging
+ */
+ reiserfs_prepare_for_journal(p_s_tb->tb_sb,
+ SB_BUFFER_WITH_SB(p_s_tb->tb_sb), 1) ;
+ journal_mark_dirty(p_s_tb->transaction_handle, p_s_tb->tb_sb,
+ SB_BUFFER_WITH_SB(p_s_tb->tb_sb)) ;
+ if ( FILESYSTEM_CHANGED_TB (p_s_tb) )
+ return REPEAT_SEARCH;
+
+ /* if it possible in indirect_to_direct conversion */
+ if (buffer_locked (p_s_tbS0)) {
+ __wait_on_buffer (p_s_tbS0);
+ if ( FILESYSTEM_CHANGED_TB (p_s_tb) )
+ return REPEAT_SEARCH;
+ }
+
+#ifndef __KERNEL__
+ if ( atomic_read (&(p_s_tbS0->b_count)) > 1 ||
+ (p_s_tb->L[0] && atomic_read (&(p_s_tb->L[0]->b_count)) > 1) ||
+ (p_s_tb->R[0] && atomic_read (&(p_s_tb->R[0]->b_count)) > 1) ) {
+ printk ("mode=%c, insert_size=%d\n", n_op_mode, p_s_tb->insert_size[0]);
+ print_cur_tb ("first three parameters are invalid");
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8310: fix_nodes: all buffers must be hold once in one thread processing");
+ }
+#endif
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( cur_tb ) {
+ print_cur_tb ("fix_nodes");
+ reiserfs_panic(p_s_tb->tb_sb,"PAP-8305: fix_nodes: there is pending do_balance");
+ }
+
+ if (!buffer_uptodate (p_s_tbS0) || !B_IS_IN_TREE (p_s_tbS0)) {
+ reiserfs_panic (p_s_tb->tb_sb, "PAP-8320: fix_nodes: S[0] (%b %z) is not uptodate "
+ "at the beginning of fix_nodes or not in tree (mode %c)", p_s_tbS0, p_s_tbS0, n_op_mode);
+ }
+
+ // FIXME: new items have to be of 8 byte multiples. Including new
+ // directory items those look like old ones
+ /*
+ if (p_s_tb->insert_size[0] % 8)
+ reiserfs_panic (p_s_tb->tb_sb, "vs-: fix_nodes: incorrect insert_size %d, "
+ "mode %c",
+ p_s_tb->insert_size[0], n_op_mode);
+ */
+
+ /* Check parameters. */
+ switch (n_op_mode) {
+#ifdef REISERFS_FSCK
+ case M_INTERNAL:
+ break;
+ case M_INSERT:
+ if ( n_item_num < 0 || n_item_num > B_NR_ITEMS(p_s_tbS0) )
+ reiserfs_panic(p_s_tb->tb_sb,"PAP-8325: fix_nodes: Incorrect item number %d (in S0 - %d) in case of insert",
+ n_item_num, B_NR_ITEMS(p_s_tbS0));
+#else
+ case M_INSERT:
+ if ( n_item_num <= 0 || n_item_num > B_NR_ITEMS(p_s_tbS0) )
+ reiserfs_panic(p_s_tb->tb_sb,"PAP-8330: fix_nodes: Incorrect item number %d (in S0 - %d) in case of insert",
+ n_item_num, B_NR_ITEMS(p_s_tbS0));
+#endif
+ break;
+ case M_PASTE:
+ case M_DELETE:
+ case M_CUT:
+ if ( n_item_num < 0 || n_item_num >= B_NR_ITEMS(p_s_tbS0) ) {
+ print_block (p_s_tbS0, 0, -1, -1);
+ printk("mode = %c insert_size = %d\n", n_op_mode, p_s_tb->insert_size[0]);
+ reiserfs_panic(p_s_tb->tb_sb,"PAP-8335: fix_nodes: Incorrect item number(%d)", n_item_num);
+ }
+ break;
+ default:
+ reiserfs_panic(p_s_tb->tb_sb,"PAP-8340: fix_nodes: Incorrect mode of operation");
+ }
+#endif
+
+ if (get_mem_for_virtual_node (p_s_tb) == REPEAT_SEARCH)
+ // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat
+ return REPEAT_SEARCH;
+
+
+ /* Starting from the leaf level; for all levels n_h of the tree. */
+ for ( n_h = 0; n_h < MAX_HEIGHT && p_s_tb->insert_size[n_h]; n_h++ ) {
+ if ( (n_ret_value = get_direct_parent(p_s_tb, n_h)) != CARRY_ON ) {
+ goto repeat;
+ return n_ret_value;
+ }
+
+ if ( (n_ret_value = check_balance (n_op_mode, p_s_tb, n_h, n_item_num,
+ n_pos_in_item, p_s_ins_ih, data)) != CARRY_ON ) {
+ if ( n_ret_value == NO_BALANCING_NEEDED ) {
+ /* No balancing for higher levels needed. */
+ if ( (n_ret_value = get_neighbors(p_s_tb, n_h)) != CARRY_ON ) {
+ goto repeat;
+ return n_ret_value;
+ }
+ if ( n_h != MAX_HEIGHT - 1 )
+ p_s_tb->insert_size[n_h + 1] = 0;
+ /* ok, analysis and resource gathering are complete */
+ break;
+ }
+ goto repeat;
+ return n_ret_value;
+ }
+
+ if ( (n_ret_value = get_neighbors(p_s_tb, n_h)) != CARRY_ON ) {
+ goto repeat;
+ return n_ret_value;
+ }
+
+ if ( (n_ret_value = get_empty_nodes(p_s_tb, n_h)) != CARRY_ON ) {
+ goto repeat;
+ return n_ret_value; /* No disk space, or schedule occurred and
+ analysis may be invalid and needs to be redone. */
+ }
+
+ if ( ! PATH_H_PBUFFER(p_s_tb->tb_path, n_h) ) {
+ /* We have a positive insert size but no nodes exist on this
+ level, this means that we are creating a new root. */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( p_s_tb->blknum[n_h] != 1 )
+ reiserfs_panic(p_s_tb->tb_sb,"PAP-8350: fix_nodes: creating new empty root");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ if ( n_h < MAX_HEIGHT - 1 )
+ p_s_tb->insert_size[n_h + 1] = 0;
+ }
+ else
+ if ( ! PATH_H_PBUFFER(p_s_tb->tb_path, n_h + 1) ) {
+ if ( p_s_tb->blknum[n_h] > 1 ) {
+ /* The tree needs to be grown, so this node S[n_h]
+ which is the root node is split into two nodes,
+ and a new node (S[n_h+1]) will be created to
+ become the root node. */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_h == MAX_HEIGHT - 1 )
+ reiserfs_panic(p_s_tb->tb_sb, "PAP-8355: fix_nodes: attempt to create too high of a tree");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ p_s_tb->insert_size[n_h + 1] = (DC_SIZE + KEY_SIZE) * (p_s_tb->blknum[n_h] - 1) + DC_SIZE;
+ }
+ else
+ if ( n_h < MAX_HEIGHT - 1 )
+ p_s_tb->insert_size[n_h + 1] = 0;
+ }
+ else
+ p_s_tb->insert_size[n_h + 1] = (DC_SIZE + KEY_SIZE) * (p_s_tb->blknum[n_h] - 1);
+ }
+
+
+ windex = push_journal_writer("fix_nodes") ;
+ if ((n_ret_value = wait_tb_buffers_until_unlocked (p_s_tb)) == CARRY_ON) {
+ pop_journal_writer(windex) ;
+ if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+ wait_tb_buffers_run = 1 ;
+ n_ret_value = REPEAT_SEARCH ;
+ goto repeat;
+ } else {
+ return CARRY_ON;
+ }
+ } else {
+ wait_tb_buffers_run = 1 ;
+ pop_journal_writer(windex) ;
+ goto repeat;
+ }
+
+ repeat:
+ // fix_nodes was unable to perform its calculation due to
+ // filesystem got changed under us, lack of free disk space or i/o
+ // failure. If the first is the case - the search will be
+ // repeated. For now - free all resources acquired so far except
+ // for the new allocated nodes
+ {
+ int i;
+
+ /* Release path buffers. */
+ if (wait_tb_buffers_run) {
+ pathrelse_and_restore(p_s_tb->tb_sb, p_s_tb->tb_path) ;
+ } else {
+ pathrelse (p_s_tb->tb_path);
+ }
+ /* brelse all resources collected for balancing */
+ for ( i = 0; i < MAX_HEIGHT; i++ ) {
+ if (wait_tb_buffers_run) {
+ reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->L[i]);
+ reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->R[i]);
+ reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->FL[i]);
+ reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->FR[i]);
+ reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->CFL[i]);
+ reiserfs_restore_prepared_buffer(p_s_tb->tb_sb, p_s_tb->CFR[i]);
+ }
+
+ brelse (p_s_tb->L[i]);p_s_tb->L[i] = 0;
+ brelse (p_s_tb->R[i]);p_s_tb->R[i] = 0;
+ brelse (p_s_tb->FL[i]);p_s_tb->FL[i] = 0;
+ brelse (p_s_tb->FR[i]);p_s_tb->FR[i] = 0;
+ brelse (p_s_tb->CFL[i]);p_s_tb->CFL[i] = 0;
+ brelse (p_s_tb->CFR[i]);p_s_tb->CFR[i] = 0;
+ }
+
+ if (wait_tb_buffers_run) {
+ for ( i = 0; i < MAX_FEB_SIZE; i++ ) {
+ if ( p_s_tb->FEB[i] ) {
+ reiserfs_restore_prepared_buffer(p_s_tb->tb_sb,
+ p_s_tb->FEB[i]) ;
+ }
+ }
+ }
+ return n_ret_value;
+ }
+
+}
+
+
+/* Anatoly will probably forgive me renaming p_s_tb to tb. I just
+ wanted to make lines shorter */
+void unfix_nodes (struct tree_balance * tb)
+{
+ int i;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! tb->vn_buf )
+ reiserfs_panic (tb->tb_sb,
+ "PAP-16050: unfix_nodes: pointer to the virtual node is NULL");
+#endif
+
+ /* Release path buffers. */
+ pathrelse_and_restore (tb->tb_sb, tb->tb_path);
+
+ /* brelse all resources collected for balancing */
+ for ( i = 0; i < MAX_HEIGHT; i++ ) {
+ reiserfs_restore_prepared_buffer (tb->tb_sb, tb->L[i]);
+ reiserfs_restore_prepared_buffer (tb->tb_sb, tb->R[i]);
+ reiserfs_restore_prepared_buffer (tb->tb_sb, tb->FL[i]);
+ reiserfs_restore_prepared_buffer (tb->tb_sb, tb->FR[i]);
+ reiserfs_restore_prepared_buffer (tb->tb_sb, tb->CFL[i]);
+ reiserfs_restore_prepared_buffer (tb->tb_sb, tb->CFR[i]);
+
+ brelse (tb->L[i]);
+ brelse (tb->R[i]);
+ brelse (tb->FL[i]);
+ brelse (tb->FR[i]);
+ brelse (tb->CFL[i]);
+ brelse (tb->CFR[i]);
+ }
+
+ /* deal with list of allocated (used and unused) nodes */
+ for ( i = 0; i < MAX_FEB_SIZE; i++ ) {
+ if ( tb->FEB[i] ) {
+ unsigned long blocknr = tb->FEB[i]->b_blocknr ;
+ /* de-allocated block which was not used by balancing and
+ bforget about buffer for it */
+ brelse (tb->FEB[i]);
+ reiserfs_free_block (tb->transaction_handle, blocknr);
+ }
+ if (tb->used[i]) {
+ /* release used as new nodes including a new root */
+ brelse (tb->used[i]);
+ }
+ }
+
+#if 0 /* shouldn't this be in CONFIG_REISERFS_CHECK??? */
+ /* make sure, that all we have released got really freed */
+ for (i = 0; i < sizeof (tb->thrown) / sizeof (tb->thrown[0]); i ++)
+ if (tb->thrown[i]) {
+ if (atomic_read (&(tb->thrown[i]->b_count))) {
+ /* the log will have the count at one and the buffers marked */
+ if (atomic_read(&(tb->thrown[i]->b_count)) > 1 ||
+ !(buffer_journaled(tb->thrown[i]) ||
+ buffer_journal_dirty(tb->thrown[i]))) {
+ foo_print (tb->thrown[i], tb->tb_sb);
+ printk ("unfix_nodes: Waiting...(block %lu, count %d)\n",
+ tb->thrown[i]->b_blocknr,
+ atomic_read (&(tb->thrown[i]->b_count)));
+ wait_buffer_until_released (tb->thrown[i]);
+ printk ("unfix_nodes: Done (block %lu, count %d)\n",
+ tb->thrown[i]->b_blocknr,
+ atomic_read (&(tb->thrown[i]->b_count)));
+ }
+ }
+ }
+#endif /* 0 */
+ reiserfs_kfree (tb->vn_buf, tb->vn_buf_size, tb->tb_sb);
+
+}
+
+
+
+#ifndef REISERFS_FSCK
+
+// is_left_mergeable is now one of the item methods
+
+#else
+
+// this works only in fsck
+
+int are_items_mergeable (struct item_head * left, struct item_head * right, int bsize)
+{
+ if (comp_keys (&left->ih_key, &right->ih_key) != -1) {
+ reiserfs_panic (0, "vs-16070: are_items_mergeable: left %k, right %k", &(left->ih_key), &(right->ih_key));
+ }
+
+ if (comp_short_keys (&left->ih_key, &right->ih_key))
+ return 0;
+
+ if (I_IS_DIRECTORY_ITEM (left)) {
+ return 1;
+ }
+
+ if ((I_IS_DIRECT_ITEM (left) && I_IS_DIRECT_ITEM (right)) ||
+ (I_IS_INDIRECT_ITEM (left) && I_IS_INDIRECT_ITEM (right)))
+ return (left->ih_key.k_offset + I_BYTES_NUMBER (left, bsize) == right->ih_key.k_offset) ? 1 : 0;
+
+ return 0;
+}
+
+/* get left neighbor of the leaf node */
+static struct buffer_head * get_left_neighbor (struct super_block * s, struct path * path)
+{
+ struct key key;
+ INITIALIZE_PATH (path_to_left_neighbor);
+ struct buffer_head * bh;
+
+ copy_key (&key, B_N_PKEY (PATH_PLAST_BUFFER (path), 0));
+ decrement_key (&key);
+
+/* init_path (&path_to_left_neighbor);*/
+ search_by_key (s, &key, &path_to_left_neighbor, DISK_LEAF_NODE_LEVEL, READ_BLOCKS);
+ // FIXME: fsck is to handle I/O failures somehow as well
+ if (PATH_LAST_POSITION (&path_to_left_neighbor) == 0) {
+ pathrelse (&path_to_left_neighbor);
+ return 0;
+ }
+ bh = PATH_PLAST_BUFFER (&path_to_left_neighbor);
+ bh->b_count ++;
+ pathrelse (&path_to_left_neighbor);
+ return bh;
+}
+
+extern struct key MIN_KEY;
+static struct buffer_head * get_right_neighbor (struct super_block * s, struct path * path)
+{
+ struct key key;
+ struct key * rkey;
+ INITIALIZE_PATH (path_to_right_neighbor);
+ struct buffer_head * bh;
+
+ rkey = get_rkey (path, s);
+ if (comp_keys (rkey, &MIN_KEY) == 0)
+ reiserfs_panic (s, "vs-16080: get_right_neighbor: get_rkey returned min key (path has changed)");
+ copy_key (&key, rkey);
+
+
+ /*init_path (&path_to_right_neighbor);*/
+ search_by_key (s, &key, &path_to_right_neighbor, DISK_LEAF_NODE_LEVEL, READ_BLOCKS);
+ if (PATH_PLAST_BUFFER (&path_to_right_neighbor) == PATH_PLAST_BUFFER (path)) {
+ pathrelse (&path_to_right_neighbor);
+ return 0;
+ }
+ bh = PATH_PLAST_BUFFER (&path_to_right_neighbor);
+ bh->b_count ++;
+ pathrelse (&path_to_right_neighbor);
+ return bh;
+}
+
+
+int is_left_mergeable (struct super_block * s, struct path * path)
+{
+ struct item_head * right;
+ struct buffer_head * bh;
+ int retval;
+
+ right = B_N_PITEM_HEAD (PATH_PLAST_BUFFER (path), 0);
+
+ bh = get_left_neighbor (s, path);
+ if (bh == 0) {
+ return 0;
+ }
+ retval = are_items_mergeable (B_N_PITEM_HEAD (bh, B_NR_ITEMS (bh) - 1), right, bh->b_size);
+ brelse (bh);
+ return retval;
+}
+
+
+int is_right_mergeable (struct super_block * s, struct path * path)
+{
+ struct item_head * left;
+ struct buffer_head * bh;
+ int retval;
+
+ left = B_N_PITEM_HEAD (PATH_PLAST_BUFFER (path), B_NR_ITEMS (PATH_PLAST_BUFFER (path)) - 1);
+
+ bh = get_right_neighbor (s, path);
+ if (bh == 0) {
+ return 0;
+ }
+ retval = are_items_mergeable (left, B_N_PITEM_HEAD (bh, 0), bh->b_size);
+ brelse (bh);
+ return retval;
+}
+
+#endif /* REISERFS_FSCK */
+
+
+
+
+
diff --git a/fs/reiserfs/hashes.c b/fs/reiserfs/hashes.c
new file mode 100644
index 000000000..b707aec4b
--- /dev/null
+++ b/fs/reiserfs/hashes.c
@@ -0,0 +1,226 @@
+
+/*
+ * Keyed 32-bit hash function using TEA in a Davis-Meyer function
+ * H0 = Key
+ * Hi = E Mi(Hi-1) + Hi-1
+ *
+ * (see Applied Cryptography, 2nd edition, p448).
+ *
+ * Jeremy Fitzhardinge <jeremy@zip.com.au> 1998
+ *
+ * Jeremy has agreed to the contents of reiserfs/README. -Hans
+ * Yura's function is added (04/07/2000)
+ */
+
+//
+// keyed_hash
+// yura_hash
+// r5_hash
+//
+
+#include <asm/types.h>
+
+
+
+#define DELTA 0x9E3779B9
+#define FULLROUNDS 10 /* 32 is overkill, 16 is strong crypto */
+#define PARTROUNDS 6 /* 6 gets complete mixing */
+
+#ifndef __KERNEL__
+typedef __u32 u32;
+#endif
+
+/* a, b, c, d - data; h0, h1 - accumulated hash */
+#define TEACORE(rounds) \
+ do { \
+ u32 sum = 0; \
+ int n = rounds; \
+ u32 b0, b1; \
+ \
+ b0 = h0; \
+ b1 = h1; \
+ \
+ do \
+ { \
+ sum += DELTA; \
+ b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); \
+ b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \
+ } while(--n); \
+ \
+ h0 += b0; \
+ h1 += b1; \
+ } while(0)
+
+
+u32 keyed_hash(const char *msg, int len)
+{
+ u32 k[] = { 0x9464a485, 0x542e1a94, 0x3e846bff, 0xb75bcfc3};
+
+ u32 h0 = k[0], h1 = k[1];
+ u32 a, b, c, d;
+ u32 pad;
+ int i;
+
+
+ // assert(len >= 0 && len < 256);
+
+ pad = (u32)len | ((u32)len << 8);
+ pad |= pad << 16;
+
+ while(len >= 16)
+ {
+ a = (u32)msg[ 0] |
+ (u32)msg[ 1] << 8 |
+ (u32)msg[ 2] << 16|
+ (u32)msg[ 3] << 24;
+ b = (u32)msg[ 4] |
+ (u32)msg[ 5] << 8 |
+ (u32)msg[ 6] << 16|
+ (u32)msg[ 7] << 24;
+ c = (u32)msg[ 8] |
+ (u32)msg[ 9] << 8 |
+ (u32)msg[10] << 16|
+ (u32)msg[11] << 24;
+ d = (u32)msg[12] |
+ (u32)msg[13] << 8 |
+ (u32)msg[14] << 16|
+ (u32)msg[15] << 24;
+
+ TEACORE(PARTROUNDS);
+
+ len -= 16;
+ msg += 16;
+ }
+
+ if (len >= 12)
+ {
+ //assert(len < 16);
+ if (len >= 16)
+ *(int *)0 = 0;
+
+ a = (u32)msg[ 0] |
+ (u32)msg[ 1] << 8 |
+ (u32)msg[ 2] << 16|
+ (u32)msg[ 3] << 24;
+ b = (u32)msg[ 4] |
+ (u32)msg[ 5] << 8 |
+ (u32)msg[ 6] << 16|
+ (u32)msg[ 7] << 24;
+ c = (u32)msg[ 8] |
+ (u32)msg[ 9] << 8 |
+ (u32)msg[10] << 16|
+ (u32)msg[11] << 24;
+
+ d = pad;
+ for(i = 12; i < len; i++)
+ {
+ d <<= 8;
+ d |= msg[i];
+ }
+ }
+ else if (len >= 8)
+ {
+ //assert(len < 12);
+ if (len >= 12)
+ *(int *)0 = 0;
+ a = (u32)msg[ 0] |
+ (u32)msg[ 1] << 8 |
+ (u32)msg[ 2] << 16|
+ (u32)msg[ 3] << 24;
+ b = (u32)msg[ 4] |
+ (u32)msg[ 5] << 8 |
+ (u32)msg[ 6] << 16|
+ (u32)msg[ 7] << 24;
+
+ c = d = pad;
+ for(i = 8; i < len; i++)
+ {
+ c <<= 8;
+ c |= msg[i];
+ }
+ }
+ else if (len >= 4)
+ {
+ //assert(len < 8);
+ if (len >= 8)
+ *(int *)0 = 0;
+ a = (u32)msg[ 0] |
+ (u32)msg[ 1] << 8 |
+ (u32)msg[ 2] << 16|
+ (u32)msg[ 3] << 24;
+
+ b = c = d = pad;
+ for(i = 4; i < len; i++)
+ {
+ b <<= 8;
+ b |= msg[i];
+ }
+ }
+ else
+ {
+ //assert(len < 4);
+ if (len >= 4)
+ *(int *)0 = 0;
+ a = b = c = d = pad;
+ for(i = 0; i < len; i++)
+ {
+ a <<= 8;
+ a |= msg[i];
+ }
+ }
+
+ TEACORE(FULLROUNDS);
+
+/* return 0;*/
+ return h0^h1;
+}
+
+/* What follows in this file is copyright 2000 by Hans Reiser, and the
+ * licensing of what follows is governed by reiserfs/README */
+
+u32 yura_hash (const char *msg, int len)
+{
+ int j, pow;
+ u32 a, c;
+ int i;
+
+ for (pow=1,i=1; i < len; i++) pow = pow * 10;
+
+ if (len == 1)
+ a = msg[0]-48;
+ else
+ a = (msg[0] - 48) * pow;
+
+ for (i=1; i < len; i++) {
+ c = msg[i] - 48;
+ for (pow=1,j=i; j < len-1; j++) pow = pow * 10;
+ a = a + c * pow;
+ }
+
+ for (; i < 40; i++) {
+ c = '0' - 48;
+ for (pow=1,j=i; j < len-1; j++) pow = pow * 10;
+ a = a + c * pow;
+ }
+
+ for (; i < 256; i++) {
+ c = i;
+ for (pow=1,j=i; j < len-1; j++) pow = pow * 10;
+ a = a + c * pow;
+ }
+
+ a = a << 7;
+ return a;
+}
+
+u32 r5_hash (const char *msg, int len)
+{
+ u32 a=0;
+ while(*msg) {
+ a += *msg << 4;
+ a += *msg >> 4;
+ a *= 11;
+ msg++;
+ }
+ return a;
+}
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
new file mode 100644
index 000000000..6007d5158
--- /dev/null
+++ b/fs/reiserfs/ibalance.c
@@ -0,0 +1,1140 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <asm/uaccess.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/reiserfs_fs.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+/* this is one and only function that is used outside (do_balance.c) */
+int balance_internal (
+ struct tree_balance * ,
+ int,
+ int,
+ struct item_head * ,
+ struct buffer_head **
+ );
+
+/* modes of internal_shift_left, internal_shift_right and internal_insert_childs */
+#define INTERNAL_SHIFT_FROM_S_TO_L 0
+#define INTERNAL_SHIFT_FROM_R_TO_S 1
+#define INTERNAL_SHIFT_FROM_L_TO_S 2
+#define INTERNAL_SHIFT_FROM_S_TO_R 3
+#define INTERNAL_INSERT_TO_S 4
+#define INTERNAL_INSERT_TO_L 5
+#define INTERNAL_INSERT_TO_R 6
+
+static void internal_define_dest_src_infos (
+ int shift_mode,
+ struct tree_balance * tb,
+ int h,
+ struct buffer_info * dest_bi,
+ struct buffer_info * src_bi,
+ int * d_key,
+ struct buffer_head ** cf
+ )
+{
+#ifdef CONFIG_REISERFS_CHECK
+ memset (dest_bi, 0, sizeof (struct buffer_info));
+ memset (src_bi, 0, sizeof (struct buffer_info));
+#endif
+ /* define dest, src, dest parent, dest position */
+ switch (shift_mode) {
+ case INTERNAL_SHIFT_FROM_S_TO_L: /* used in internal_shift_left */
+ src_bi->tb = tb;
+ src_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+ src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+ src_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = tb->L[h];
+ dest_bi->bi_parent = tb->FL[h];
+ dest_bi->bi_position = get_left_neighbor_position (tb, h);
+ *d_key = tb->lkey[h];
+ *cf = tb->CFL[h];
+ break;
+ case INTERNAL_SHIFT_FROM_L_TO_S:
+ src_bi->tb = tb;
+ src_bi->bi_bh = tb->L[h];
+ src_bi->bi_parent = tb->FL[h];
+ src_bi->bi_position = get_left_neighbor_position (tb, h);
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+ dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+ dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); /* dest position is analog of dest->b_item_order */
+ *d_key = tb->lkey[h];
+ *cf = tb->CFL[h];
+ break;
+
+ case INTERNAL_SHIFT_FROM_R_TO_S: /* used in internal_shift_left */
+ src_bi->tb = tb;
+ src_bi->bi_bh = tb->R[h];
+ src_bi->bi_parent = tb->FR[h];
+ src_bi->bi_position = get_right_neighbor_position (tb, h);
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+ dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+ dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+ *d_key = tb->rkey[h];
+ *cf = tb->CFR[h];
+ break;
+
+ case INTERNAL_SHIFT_FROM_S_TO_R:
+ src_bi->tb = tb;
+ src_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+ src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+ src_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = tb->R[h];
+ dest_bi->bi_parent = tb->FR[h];
+ dest_bi->bi_position = get_right_neighbor_position (tb, h);
+ *d_key = tb->rkey[h];
+ *cf = tb->CFR[h];
+ break;
+
+ case INTERNAL_INSERT_TO_L:
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = tb->L[h];
+ dest_bi->bi_parent = tb->FL[h];
+ dest_bi->bi_position = get_left_neighbor_position (tb, h);
+ break;
+
+ case INTERNAL_INSERT_TO_S:
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h);
+ dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+ dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+ break;
+
+ case INTERNAL_INSERT_TO_R:
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = tb->R[h];
+ dest_bi->bi_parent = tb->FR[h];
+ dest_bi->bi_position = get_right_neighbor_position (tb, h);
+ break;
+
+ default:
+ reiserfs_panic (tb->tb_sb, "internal_define_dest_src_infos", "shift type is unknown (%d)", shift_mode);
+ }
+}
+
+
+
+/* Insert count node pointers into buffer cur before position to + 1.
+ * Insert count items into buffer cur before position to.
+ * Items and node pointers are specified by inserted and bh respectively.
+ */
+static void internal_insert_childs (struct buffer_info * cur_bi,
+ int to, int count,
+ struct item_head * inserted,
+ struct buffer_head ** bh
+ )
+{
+ struct buffer_head * cur = cur_bi->bi_bh;
+ struct block_head * blkh;
+ int nr;
+ struct key * ih;
+ struct disk_child new_dc[2];
+ struct disk_child * dc;
+ int i;
+
+ if (count <= 0)
+ return;
+
+ nr = le16_to_cpu ((blkh = B_BLK_HEAD(cur))->blk_nr_item);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (count > 2)
+ reiserfs_panic (0, "internal_insert_childs", "too many children (%d) are to be inserted", count);
+ if (B_FREE_SPACE (cur) < count * (KEY_SIZE + DC_SIZE))
+ reiserfs_panic (0, "internal_insert_childs", "no enough free space (%d), needed %d bytes",
+ B_FREE_SPACE (cur), count * (KEY_SIZE + DC_SIZE));
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* prepare space for count disk_child */
+ dc = B_N_CHILD(cur,to+1);
+
+ memmove (dc + count, dc, (nr+1-(to+1)) * DC_SIZE);
+
+ /* copy to_be_insert disk children */
+ for (i = 0; i < count; i ++) {
+ new_dc[i].dc_size =
+ cpu_to_le16 (MAX_CHILD_SIZE(bh[i]) - B_FREE_SPACE (bh[i]));
+ new_dc[i].dc_block_number = cpu_to_le32 (bh[i]->b_blocknr);
+ }
+ memcpy (dc, new_dc, DC_SIZE * count);
+
+
+ /* prepare space for count items */
+ ih = B_N_PDELIM_KEY (cur, ((to == -1) ? 0 : to));
+
+ memmove (ih + count, ih, (nr - to) * KEY_SIZE + (nr + 1 + count) * DC_SIZE);
+
+ /* copy item headers (keys) */
+ memcpy (ih, inserted, KEY_SIZE);
+ if ( count > 1 )
+ memcpy (ih + 1, inserted + 1, KEY_SIZE);
+
+ /* sizes, item number */
+ blkh->blk_nr_item = cpu_to_le16 (le16_to_cpu (blkh->blk_nr_item) + count);
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) - count * (DC_SIZE + KEY_SIZE));
+
+ do_balance_mark_internal_dirty (cur_bi->tb, cur,0);
+
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+ check_internal (cur);
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+
+ if (cur_bi->bi_parent) {
+ B_N_CHILD (cur_bi->bi_parent,cur_bi->bi_position)->dc_size += count * (DC_SIZE + KEY_SIZE);
+ do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent, 0);
+
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+ check_internal (cur_bi->bi_parent);
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+ }
+
+}
+
+
+/* Delete del_num items and node pointers from buffer cur starting from *
+ * the first_i'th item and first_p'th pointers respectively. */
+static void internal_delete_pointers_items (
+ struct buffer_info * cur_bi,
+ int first_p,
+ int first_i,
+ int del_num
+ )
+{
+ struct buffer_head * cur = cur_bi->bi_bh;
+ int nr;
+ struct block_head * blkh;
+ struct key * key;
+ struct disk_child * dc;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (cur == NULL)
+ reiserfs_panic (0, "internal_delete_pointers_items1: buffer is 0");
+
+ if (del_num < 0)
+ reiserfs_panic (0, "internal_delete_pointers_items2",
+ "negative number of items (%d) can not be deleted", del_num);
+
+ if (first_p < 0 || first_p + del_num > B_NR_ITEMS (cur) + 1 || first_i < 0)
+ reiserfs_panic (0, "internal_delete_pointers_items3",
+ "first pointer order (%d) < 0 or "
+ "no so many pointers (%d), only (%d) or "
+ "first key order %d < 0", first_p,
+ first_p + del_num, B_NR_ITEMS (cur) + 1, first_i);
+#endif /* CONFIG_REISERFS_CHECK */
+ if ( del_num == 0 )
+ return;
+
+ nr = le16_to_cpu ((blkh = B_BLK_HEAD(cur))->blk_nr_item);
+
+ if ( first_p == 0 && del_num == nr + 1 ) {
+#ifdef CONFIG_REISERFS_CHECK
+ if ( first_i != 0 )
+ reiserfs_panic (0, "internal_delete_pointers_items5",
+ "first deleted key must have order 0, not %d", first_i);
+#endif /* CONFIG_REISERFS_CHECK */
+ make_empty_node (cur_bi);
+ return;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (first_i + del_num > B_NR_ITEMS (cur)) {
+ printk("first_i = %d del_num = %d\n",first_i,del_num);
+ reiserfs_panic (0, "internal_delete_pointers_items4: :"
+ "no so many keys (%d) in the node (%b)(%z)", first_i + del_num, cur, cur);
+ }
+#endif /* CONFIG_REISERFS_CHECK */
+
+
+ /* deleting */
+ dc = B_N_CHILD (cur, first_p);
+
+ memmove (dc, dc + del_num, (nr + 1 - first_p - del_num) * DC_SIZE);
+ key = B_N_PDELIM_KEY (cur, first_i);
+ memmove (key, key + del_num, (nr - first_i - del_num) * KEY_SIZE + (nr + 1 - del_num) * DC_SIZE);
+
+
+ /* sizes, item number */
+ blkh->blk_nr_item = cpu_to_le16 (le16_to_cpu (blkh->blk_nr_item) - del_num);
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) + del_num * (KEY_SIZE + DC_SIZE));
+
+ do_balance_mark_internal_dirty (cur_bi->tb, cur, 0);
+ /*&&&&&&&&&&&&&&&&&&&&&&&*/
+ check_internal (cur);
+ /*&&&&&&&&&&&&&&&&&&&&&&&*/
+
+ if (cur_bi->bi_parent) {
+ B_N_CHILD (cur_bi->bi_parent, cur_bi->bi_position)->dc_size -= del_num * (KEY_SIZE + DC_SIZE);
+ do_balance_mark_internal_dirty (cur_bi->tb, cur_bi->bi_parent,0);
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+ check_internal (cur_bi->bi_parent);
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+ }
+}
+
+
+/* delete n node pointers and items starting from given position */
+static void internal_delete_childs (struct buffer_info * cur_bi,
+ int from, int n)
+{
+ int i_from;
+
+ i_from = (from == 0) ? from : from - 1;
+
+ /* delete n pointers starting from `from' position in CUR;
+ delete n keys starting from 'i_from' position in CUR;
+ */
+ internal_delete_pointers_items (cur_bi, from, i_from, n);
+}
+
+
+/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
+* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
+ * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
+ */
+static void internal_copy_pointers_items (
+ struct buffer_info * dest_bi,
+ struct buffer_head * src,
+ int last_first, int cpy_num
+ )
+{
+ /* ATTENTION! Number of node pointers in DEST is equal to number of items in DEST *
+ * as delimiting key have already inserted to buffer dest.*/
+ struct buffer_head * dest = dest_bi->bi_bh;
+ int nr_dest, nr_src;
+ int dest_order, src_order;
+ struct block_head * blkh;
+ struct key * key;
+ struct disk_child * dc;
+
+ nr_src = B_NR_ITEMS (src);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( dest == NULL || src == NULL )
+ reiserfs_panic (0, "internal_copy_pointers_items", "src (%p) or dest (%p) buffer is 0", src, dest);
+
+ if (last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST)
+ reiserfs_panic (0, "internal_copy_pointers_items",
+ "invalid last_first parameter (%d)", last_first);
+
+ if ( nr_src < cpy_num - 1 )
+ reiserfs_panic (0, "internal_copy_pointers_items", "no so many items (%d) in src (%d)", cpy_num, nr_src);
+
+ if ( cpy_num < 0 )
+ reiserfs_panic (0, "internal_copy_pointers_items", "cpy_num less than 0 (%d)", cpy_num);
+
+ if (cpy_num - 1 + B_NR_ITEMS(dest) > (int)MAX_NR_KEY(dest))
+ reiserfs_panic (0, "internal_copy_pointers_items",
+ "cpy_num (%d) + item number in dest (%d) can not be more than MAX_NR_KEY(%d)",
+ cpy_num, B_NR_ITEMS(dest), MAX_NR_KEY(dest));
+#endif
+
+ if ( cpy_num == 0 )
+ return;
+
+ /* coping */
+ nr_dest = le16_to_cpu ((blkh = B_BLK_HEAD(dest))->blk_nr_item);
+
+ /*dest_order = (last_first == LAST_TO_FIRST) ? 0 : nr_dest;*/
+ /*src_order = (last_first == LAST_TO_FIRST) ? (nr_src - cpy_num + 1) : 0;*/
+ (last_first == LAST_TO_FIRST) ? (dest_order = 0, src_order = nr_src - cpy_num + 1) :
+ (dest_order = nr_dest, src_order = 0);
+
+ /* prepare space for cpy_num pointers */
+ dc = B_N_CHILD (dest, dest_order);
+
+ memmove (dc + cpy_num, dc, (nr_dest - dest_order) * DC_SIZE);
+
+ /* insert pointers */
+ memcpy (dc, B_N_CHILD (src, src_order), DC_SIZE * cpy_num);
+
+
+ /* prepare space for cpy_num - 1 item headers */
+ key = B_N_PDELIM_KEY(dest, dest_order);
+ memmove (key + cpy_num - 1, key,
+ KEY_SIZE * (nr_dest - dest_order) + DC_SIZE * (nr_dest + cpy_num));
+
+
+ /* insert headers */
+ memcpy (key, B_N_PDELIM_KEY (src, src_order), KEY_SIZE * (cpy_num - 1));
+
+ /* sizes, item number */
+ blkh->blk_nr_item = cpu_to_le16 (le16_to_cpu (blkh->blk_nr_item) + (cpy_num - 1));
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) - (KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num));
+
+ do_balance_mark_internal_dirty (dest_bi->tb, dest, 0);
+
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+ check_internal (dest);
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+
+ if (dest_bi->bi_parent) {
+ B_N_CHILD(dest_bi->bi_parent,dest_bi->bi_position)->dc_size +=
+ KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num;
+
+ do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent,0);
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+ check_internal (dest_bi->bi_parent);
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+ }
+
+}
+
+
+/* Copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest.
+ * Delete cpy_num - del_par items and node pointers from buffer src.
+ * last_first == FIRST_TO_LAST means, that we copy/delete first items from src.
+ * last_first == LAST_TO_FIRST means, that we copy/delete last items from src.
+ */
+static void internal_move_pointers_items (struct buffer_info * dest_bi,
+ struct buffer_info * src_bi,
+ int last_first, int cpy_num, int del_par)
+{
+ int first_pointer;
+ int first_item;
+
+ internal_copy_pointers_items (dest_bi, src_bi->bi_bh, last_first, cpy_num);
+
+ if (last_first == FIRST_TO_LAST) { /* shift_left occurs */
+ first_pointer = 0;
+ first_item = 0;
+ /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
+ for key - with first_item */
+ internal_delete_pointers_items (src_bi, first_pointer, first_item, cpy_num - del_par);
+ } else { /* shift_right occurs */
+ int i, j;
+
+ i = ( cpy_num - del_par == ( j = B_NR_ITEMS(src_bi->bi_bh)) + 1 ) ? 0 : j - cpy_num + del_par;
+
+ internal_delete_pointers_items (src_bi, j + 1 - cpy_num + del_par, i, cpy_num - del_par);
+ }
+}
+
+/* Insert n_src'th key of buffer src before n_dest'th key of buffer dest. */
+static void internal_insert_key (struct buffer_info * dest_bi,
+ int dest_position_before, /* insert key before key with n_dest number */
+ struct buffer_head * src,
+ int src_position)
+{
+ struct buffer_head * dest = dest_bi->bi_bh;
+ int nr;
+ struct block_head * blkh;
+ struct key * key;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (dest == NULL || src == NULL)
+ reiserfs_panic (0, "internal_insert_key", "sourse(%p) or dest(%p) buffer is 0", src, dest);
+
+ if (dest_position_before < 0 || src_position < 0)
+ reiserfs_panic (0, "internal_insert_key", "source(%d) or dest(%d) key number less than 0",
+ src_position, dest_position_before);
+
+ if (dest_position_before > B_NR_ITEMS (dest) || src_position >= B_NR_ITEMS(src))
+ reiserfs_panic (0, "internal_insert_key",
+ "invalid position in dest (%d (key number %d)) or in src (%d (key number %d))",
+ dest_position_before, B_NR_ITEMS (dest), src_position, B_NR_ITEMS(src));
+
+ if (B_FREE_SPACE (dest) < KEY_SIZE)
+ reiserfs_panic (0, "internal_insert_key",
+ "no enough free space (%d) in dest buffer", B_FREE_SPACE (dest));
+#endif
+
+ nr = le16_to_cpu ((blkh=B_BLK_HEAD(dest))->blk_nr_item);
+
+ /* prepare space for inserting key */
+ key = B_N_PDELIM_KEY (dest, dest_position_before);
+ memmove (key + 1, key, (nr - dest_position_before) * KEY_SIZE + (nr + 1) * DC_SIZE);
+
+ /* insert key */
+ memcpy (key, B_N_PDELIM_KEY(src, src_position), KEY_SIZE);
+
+ /* Change dirt, free space, item number fields. */
+ blkh->blk_nr_item = cpu_to_le16 (le16_to_cpu (blkh->blk_nr_item) + 1);
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) - KEY_SIZE);
+
+ do_balance_mark_internal_dirty (dest_bi->tb, dest, 0);
+
+ if (dest_bi->bi_parent) {
+ B_N_CHILD(dest_bi->bi_parent,dest_bi->bi_position)->dc_size += KEY_SIZE;
+ do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent,0);
+ }
+}
+
+
+
+/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
+ * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
+ * Replace d_key'th key in buffer cfl.
+ * Delete pointer_amount items and node pointers from buffer src.
+ */
+/* this can be invoked both to shift from S to L and from R to S */
+static void internal_shift_left (
+ int mode, /* INTERNAL_FROM_S_TO_L | INTERNAL_FROM_R_TO_S */
+ struct tree_balance * tb,
+ int h,
+ int pointer_amount
+ )
+{
+ struct buffer_info dest_bi, src_bi;
+ struct buffer_head * cf;
+ int d_key_position;
+
+ internal_define_dest_src_infos (mode, tb, h, &dest_bi, &src_bi, &d_key_position, &cf);
+
+ /*printk("pointer_amount = %d\n",pointer_amount);*/
+
+ if (pointer_amount) {
+ /* insert delimiting key from common father of dest and src to node dest into position B_NR_ITEM(dest) */
+ internal_insert_key (&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf, d_key_position);
+
+ if (B_NR_ITEMS(src_bi.bi_bh) == pointer_amount - 1) {
+ if (src_bi.bi_position/*src->b_item_order*/ == 0)
+ replace_key (tb, cf, d_key_position, src_bi.bi_parent/*src->b_parent*/, 0);
+ } else
+ replace_key (tb, cf, d_key_position, src_bi.bi_bh, pointer_amount - 1);
+ }
+ /* last parameter is del_parameter */
+ internal_move_pointers_items (&dest_bi, &src_bi, FIRST_TO_LAST, pointer_amount, 0);
+
+}
+
+/* Insert delimiting key to L[h].
+ * Copy n node pointers and n - 1 items from buffer S[h] to L[h].
+ * Delete n - 1 items and node pointers from buffer S[h].
+ */
+/* it always shifts from S[h] to L[h] */
+static void internal_shift1_left (
+ struct tree_balance * tb,
+ int h,
+ int pointer_amount
+ )
+{
+ struct buffer_info dest_bi, src_bi;
+ struct buffer_head * cf;
+ int d_key_position;
+
+ internal_define_dest_src_infos (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, &dest_bi, &src_bi, &d_key_position, &cf);
+
+ if ( pointer_amount > 0 ) /* insert lkey[h]-th key from CFL[h] to left neighbor L[h] */
+ internal_insert_key (&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf, d_key_position);
+ /* internal_insert_key (tb->L[h], B_NR_ITEM(tb->L[h]), tb->CFL[h], tb->lkey[h]);*/
+
+ /* last parameter is del_parameter */
+ internal_move_pointers_items (&dest_bi, &src_bi, FIRST_TO_LAST, pointer_amount, 1);
+ /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1);*/
+}
+
+
+/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
+ * Copy n node pointers and n - 1 items from buffer src to buffer dest.
+ * Replace d_key'th key in buffer cfr.
+ * Delete n items and node pointers from buffer src.
+ */
+static void internal_shift_right (
+ int mode, /* INTERNAL_FROM_S_TO_R | INTERNAL_FROM_L_TO_S */
+ struct tree_balance * tb,
+ int h,
+ int pointer_amount
+ )
+{
+ struct buffer_info dest_bi, src_bi;
+ struct buffer_head * cf;
+ int d_key_position;
+ int nr;
+
+
+ internal_define_dest_src_infos (mode, tb, h, &dest_bi, &src_bi, &d_key_position, &cf);
+
+ nr = B_NR_ITEMS (src_bi.bi_bh);
+
+ if (pointer_amount > 0) {
+ /* insert delimiting key from common father of dest and src to dest node into position 0 */
+ internal_insert_key (&dest_bi, 0, cf, d_key_position);
+ if (nr == pointer_amount - 1) {
+#ifdef CONFIG_REISERFS_CHECK
+ if ( src_bi.bi_bh != PATH_H_PBUFFER (tb->tb_path, h)/*tb->S[h]*/ || dest_bi.bi_bh != tb->R[h])
+ reiserfs_panic (tb->tb_sb, "internal_shift_right", "src (%p) must be == tb->S[h](%p) when it disappears",
+ src_bi.bi_bh, PATH_H_PBUFFER (tb->tb_path, h));
+#endif
+ /* when S[h] disappers replace left delemiting key as well */
+ if (tb->CFL[h])
+ replace_key (tb, cf, d_key_position, tb->CFL[h], tb->lkey[h]);
+ } else
+ replace_key (tb, cf, d_key_position, src_bi.bi_bh, nr - pointer_amount);
+ }
+
+ /* last parameter is del_parameter */
+ internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, pointer_amount, 0);
+}
+
+/* Insert delimiting key to R[h].
+ * Copy n node pointers and n - 1 items from buffer S[h] to R[h].
+ * Delete n - 1 items and node pointers from buffer S[h].
+ */
+/* it always shift from S[h] to R[h] */
+static void internal_shift1_right (
+ struct tree_balance * tb,
+ int h,
+ int pointer_amount
+ )
+{
+ struct buffer_info dest_bi, src_bi;
+ struct buffer_head * cf;
+ int d_key_position;
+
+ internal_define_dest_src_infos (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, &dest_bi, &src_bi, &d_key_position, &cf);
+
+ if (pointer_amount > 0) /* insert rkey from CFR[h] to right neighbor R[h] */
+ internal_insert_key (&dest_bi, 0, cf, d_key_position);
+ /* internal_insert_key (tb->R[h], 0, tb->CFR[h], tb->rkey[h]);*/
+
+ /* last parameter is del_parameter */
+ internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, pointer_amount, 1);
+ /* internal_move_pointers_items (tb->R[h], tb->S[h], LAST_TO_FIRST, pointer_amount, 1);*/
+}
+
+
+/* Delete insert_num node pointers together with their left items
+ * and balance current node.*/
+static void balance_internal_when_delete (struct tree_balance * tb,
+ int h, int child_pos)
+{
+ int insert_num;
+ int n;
+ struct buffer_head * tbSh = PATH_H_PBUFFER (tb->tb_path, h);
+ struct buffer_info bi;
+
+ insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE));
+
+ /* delete child-node-pointer(s) together with their left item(s) */
+ bi.tb = tb;
+ bi.bi_bh = tbSh;
+ bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+ bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+
+ internal_delete_childs (&bi, child_pos, -insert_num);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->blknum[h] > 1 )
+ reiserfs_panic (tb->tb_sb, "balance_internal_when_delete", "tb->blknum[%d]=%d when insert_size < 0",
+ h, tb->blknum[h]);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ n = B_NR_ITEMS(tbSh);
+
+ if ( tb->lnum[h] == 0 && tb->rnum[h] == 0 ) {
+ if ( tb->blknum[h] == 0 ) {
+ /* node S[h] (root of the tree) is empty now */
+ struct buffer_head *new_root;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (n || B_FREE_SPACE (tbSh) != MAX_CHILD_SIZE(tbSh) - DC_SIZE)
+ reiserfs_panic (tb->tb_sb, "balance_internal_when_delete", "buffer must have only 0 keys (%d)",
+ n);
+
+ if (bi.bi_parent)
+ reiserfs_panic (tb->tb_sb, "balance_internal_when_delete", "root has parent (%p)", bi.bi_parent);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* choose a new root */
+ if ( ! tb->L[h-1] || ! B_NR_ITEMS(tb->L[h-1]) )
+ new_root = tb->R[h-1];
+ else
+ new_root = tb->L[h-1];
+ /* switch super block's tree root block number to the new value */
+ tb->tb_sb->u.reiserfs_sb.s_rs->s_root_block = cpu_to_le32 (new_root->b_blocknr);
+ //tb->tb_sb->u.reiserfs_sb.s_rs->s_tree_height --;
+ tb->tb_sb->u.reiserfs_sb.s_rs->s_tree_height = cpu_to_le16 (SB_TREE_HEIGHT (tb->tb_sb) - 1);
+
+ do_balance_mark_sb_dirty (tb, tb->tb_sb->u.reiserfs_sb.s_sbh, 1);
+ /*&&&&&&&&&&&&&&&&&&&&&&*/
+ if (h > 1)
+ /* use check_internal if new root is an internal node */
+ check_internal (new_root);
+ /*&&&&&&&&&&&&&&&&&&&&&&*/
+ tb->tb_sb->s_dirt = 1;
+
+ /* do what is needed for buffer thrown from tree */
+ reiserfs_invalidate_buffer(tb, tbSh);
+ return;
+ }
+ return;
+ }
+
+ if ( tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1 ) { /* join S[h] with L[h] */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->rnum[h] != 0 )
+ reiserfs_panic (tb->tb_sb, "balance_internal_when_delete", "invalid tb->rnum[%d]==%d when joining S[h] with L[h]",
+ h, tb->rnum[h]);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1);
+ reiserfs_invalidate_buffer(tb, tbSh);
+
+ return;
+ }
+
+ if ( tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1 ) { /* join S[h] with R[h] */
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->lnum[h] != 0 )
+ reiserfs_panic (tb->tb_sb, "balance_internal_when_delete", "invalid tb->lnum[%d]==%d when joining S[h] with R[h]",
+ h, tb->lnum[h]);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1);
+
+ reiserfs_invalidate_buffer(tb,tbSh);
+ return;
+ }
+
+ if ( tb->lnum[h] < 0 ) { /* borrow from left neighbor L[h] */
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->rnum[h] != 0 )
+ reiserfs_panic (tb->tb_sb, "balance_internal_when_delete", "invalid tb->rnum[%d]==%d when borrow from L[h]",
+ h, tb->rnum[h]);
+#endif /* CONFIG_REISERFS_CHECK */
+ /*internal_shift_right (tb, h, tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], -tb->lnum[h]);*/
+ internal_shift_right (INTERNAL_SHIFT_FROM_L_TO_S, tb, h, -tb->lnum[h]);
+ return;
+ }
+
+ if ( tb->rnum[h] < 0 ) { /* borrow from right neighbor R[h] */
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->lnum[h] != 0 )
+ reiserfs_panic (tb->tb_sb, "balance_internal_when_delete", "invalid tb->lnum[%d]==%d when borrow from R[h]",
+ h, tb->lnum[h]);
+#endif /* CONFIG_REISERFS_CHECK */
+ internal_shift_left (INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]);/*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]);*/
+ return;
+ }
+
+ if ( tb->lnum[h] > 0 ) { /* split S[h] into two parts and put them into neighbors */
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1 )
+ reiserfs_panic (tb->tb_sb, "balance_internal_when_delete",
+ "invalid tb->lnum[%d]==%d or tb->rnum[%d]==%d when S[h](item number == %d) is split between them",
+ h, tb->lnum[h], h, tb->rnum[h], n);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]);/*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]);*/
+ internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h]);
+
+ reiserfs_invalidate_buffer (tb, tbSh);
+
+ return;
+ }
+ reiserfs_panic (tb->tb_sb, "balance_internal_when_delete", "unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d",
+ h, tb->lnum[h], h, tb->rnum[h]);
+}
+
+
+/* Replace delimiting key of buffers L[h] and S[h] by the given key.*/
+void replace_lkey (
+ struct tree_balance * tb,
+ int h,
+ struct item_head * key
+ )
+{
+#ifdef CONFIG_REISERFS_CHECK
+ if (tb->L[h] == NULL || tb->CFL[h] == NULL)
+ reiserfs_panic (tb->tb_sb, "replace_lkey: 12255: "
+ "L[h](%p) and CFL[h](%p) must exist in replace_lkey", tb->L[h], tb->CFL[h]);
+#endif
+
+ if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0)
+ return;
+
+ memcpy (B_N_PDELIM_KEY(tb->CFL[h],tb->lkey[h]), key, KEY_SIZE);
+
+ do_balance_mark_internal_dirty (tb, tb->CFL[h],0);
+}
+
+
+/* Replace delimiting key of buffers S[h] and R[h] by the given key.*/
+void replace_rkey (
+ struct tree_balance * tb,
+ int h,
+ struct item_head * key
+ )
+{
+#ifdef CONFIG_REISERFS_CHECK
+ if (tb->R[h] == NULL || tb->CFR[h] == NULL)
+ reiserfs_panic (tb->tb_sb, "replace_rkey: 12260: "
+ "R[h](%p) and CFR[h](%p) must exist in replace_rkey", tb->R[h], tb->CFR[h]);
+
+ if (B_NR_ITEMS(tb->R[h]) == 0)
+ reiserfs_panic (tb->tb_sb, "replace_rkey: 12265: "
+ "R[h] can not be empty if it exists (item number=%d)", B_NR_ITEMS(tb->R[h]));
+#endif
+
+ memcpy (B_N_PDELIM_KEY(tb->CFR[h],tb->rkey[h]), key, KEY_SIZE);
+
+ do_balance_mark_internal_dirty (tb, tb->CFR[h], 0);
+}
+
+
+int balance_internal (struct tree_balance * tb, /* tree_balance structure */
+ int h, /* level of the tree */
+ int child_pos,
+ struct item_head * insert_key, /* key for insertion on higher level */
+ struct buffer_head ** insert_ptr /* node for insertion on higher level*/
+ )
+ /* if inserting/pasting
+ {
+ child_pos is the position of the node-pointer in S[h] that *
+ pointed to S[h-1] before balancing of the h-1 level; *
+ this means that new pointers and items must be inserted AFTER *
+ child_pos
+ }
+ else
+ {
+ it is the position of the leftmost pointer that must be deleted (together with
+ its corresponding key to the left of the pointer)
+ as a result of the previous level's balancing.
+ }
+*/
+{
+ struct buffer_head * tbSh = PATH_H_PBUFFER (tb->tb_path, h);
+ struct buffer_info bi;
+ int order; /* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */
+ int insert_num, n, k;
+ struct buffer_head * S_new;
+ struct item_head new_insert_key;
+ struct buffer_head * new_insert_ptr = NULL;
+ struct item_head * new_insert_key_addr = insert_key;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( h < 1 )
+ reiserfs_panic (tb->tb_sb, "balance_internal", "h (%d) can not be < 1 on internal level", h);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ order = ( tbSh ) ? PATH_H_POSITION (tb->tb_path, h + 1)/*tb->S[h]->b_item_order*/ : 0;
+
+ /* Using insert_size[h] calculate the number insert_num of items
+ that must be inserted to or deleted from S[h]. */
+ insert_num = tb->insert_size[h]/((int)(KEY_SIZE + DC_SIZE));
+
+ /* Check whether insert_num is proper **/
+#ifdef CONFIG_REISERFS_CHECK
+ if ( insert_num < -2 || insert_num > 2 )
+ reiserfs_panic (tb->tb_sb, "balance_internal",
+ "incorrect number of items inserted to the internal node (%d)", insert_num);
+
+ if ( h > 1 && (insert_num > 1 || insert_num < -1) )
+ reiserfs_panic (tb->tb_sb, "balance_internal",
+ "incorrect number of items (%d) inserted to the internal node on a level (h=%d) higher than last internal level",
+ insert_num, h);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* Make balance in case insert_num < 0 */
+ if ( insert_num < 0 ) {
+ balance_internal_when_delete (tb, h, child_pos);
+ return order;
+ }
+
+ k = 0;
+ if ( tb->lnum[h] > 0 ) {
+ /* shift lnum[h] items from S[h] to the left neighbor L[h].
+ check how many of new items fall into L[h] or CFL[h] after
+ shifting */
+ n = B_NR_ITEMS (tb->L[h]); /* number of items in L[h] */
+ if ( tb->lnum[h] <= child_pos ) {
+ /* new items don't fall into L[h] or CFL[h] */
+ internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]);
+ /*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]);*/
+ child_pos -= tb->lnum[h];
+ } else if ( tb->lnum[h] > child_pos + insert_num ) {
+ /* all new items fall into L[h] */
+ internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h] - insert_num);
+ /* internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,
+ tb->lnum[h]-insert_num);
+ */
+ /* insert insert_num keys and node-pointers into L[h] */
+ bi.tb = tb;
+ bi.bi_bh = tb->L[h];
+ bi.bi_parent = tb->FL[h];
+ bi.bi_position = get_left_neighbor_position (tb, h);
+ internal_insert_childs (&bi,/*tb->L[h], tb->S[h-1]->b_next*/ n + child_pos + 1,
+ insert_num,insert_key,insert_ptr);
+
+ insert_num = 0;
+ } else {
+ struct disk_child * dc;
+
+ /* some items fall into L[h] or CFL[h], but some don't fall */
+ internal_shift1_left(tb,h,child_pos+1);
+ /* calculate number of new items that fall into L[h] */
+ k = tb->lnum[h] - child_pos - 1;
+ bi.tb = tb;
+ bi.bi_bh = tb->L[h];
+ bi.bi_parent = tb->FL[h];
+ bi.bi_position = get_left_neighbor_position (tb, h);
+ internal_insert_childs (&bi,/*tb->L[h], tb->S[h-1]->b_next,*/ n + child_pos + 1,k,
+ insert_key,insert_ptr);
+
+ replace_lkey(tb,h,insert_key + k);
+
+ /* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */
+ dc = B_N_CHILD(tbSh, 0);
+ dc->dc_size = cpu_to_le16 (MAX_CHILD_SIZE(insert_ptr[k]) - B_FREE_SPACE (insert_ptr[k]));
+ dc->dc_block_number = cpu_to_le32 (insert_ptr[k]->b_blocknr);
+
+ do_balance_mark_internal_dirty (tb, tbSh, 0);
+
+ k++;
+ insert_key += k;
+ insert_ptr += k;
+ insert_num -= k;
+ child_pos = 0;
+ }
+ } /* tb->lnum[h] > 0 */
+
+ if ( tb->rnum[h] > 0 ) {
+ /*shift rnum[h] items from S[h] to the right neighbor R[h]*/
+ /* check how many of new items fall into R or CFR after shifting */
+ n = B_NR_ITEMS (tbSh); /* number of items in S[h] */
+ if ( n - tb->rnum[h] >= child_pos )
+ /* new items fall into S[h] */
+ /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]);*/
+ internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h]);
+ else
+ if ( n + insert_num - tb->rnum[h] < child_pos )
+ {
+ /* all new items fall into R[h] */
+ /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],
+ tb->rnum[h] - insert_num);*/
+ internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h] - insert_num);
+
+ /* insert insert_num keys and node-pointers into R[h] */
+ bi.tb = tb;
+ bi.bi_bh = tb->R[h];
+ bi.bi_parent = tb->FR[h];
+ bi.bi_position = get_right_neighbor_position (tb, h);
+ internal_insert_childs (&bi, /*tb->R[h],tb->S[h-1]->b_next*/ child_pos - n - insert_num + tb->rnum[h] - 1,
+ insert_num,insert_key,insert_ptr);
+ insert_num = 0;
+ }
+ else
+ {
+ struct disk_child * dc;
+
+ /* one of the items falls into CFR[h] */
+ internal_shift1_right(tb,h,n - child_pos + 1);
+ /* calculate number of new items that fall into R[h] */
+ k = tb->rnum[h] - n + child_pos - 1;
+ bi.tb = tb;
+ bi.bi_bh = tb->R[h];
+ bi.bi_parent = tb->FR[h];
+ bi.bi_position = get_right_neighbor_position (tb, h);
+ internal_insert_childs (&bi, /*tb->R[h], tb->R[h]->b_child,*/ 0, k, insert_key + 1, insert_ptr + 1);
+
+ replace_rkey(tb,h,insert_key + insert_num - k - 1);
+
+ /* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1]*/
+ dc = B_N_CHILD(tb->R[h], 0);
+ dc->dc_size =
+ cpu_to_le16 (MAX_CHILD_SIZE(insert_ptr[insert_num-k-1]) -
+ B_FREE_SPACE (insert_ptr[insert_num-k-1]));
+ dc->dc_block_number = cpu_to_le32 (insert_ptr[insert_num-k-1]->b_blocknr);
+
+ do_balance_mark_internal_dirty (tb, tb->R[h],0);
+
+ insert_num -= (k + 1);
+ }
+ }
+
+ /** Fill new node that appears instead of S[h] **/
+#ifdef CONFIG_REISERFS_CHECK
+ if ( tb->blknum[h] > 2 )
+ reiserfs_panic(0, "balance_internal", "blknum can not be > 2 for internal level");
+ if ( tb->blknum[h] < 0 )
+ reiserfs_panic(0, "balance_internal", "blknum can not be < 0");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ if ( ! tb->blknum[h] )
+ { /* node S[h] is empty now */
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! tbSh )
+ reiserfs_panic(0,"balance_internal", "S[h] is equal NULL");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* do what is needed for buffer thrown from tree */
+ reiserfs_invalidate_buffer(tb,tbSh);
+ return order;
+ }
+
+ if ( ! tbSh ) {
+ /* create new root */
+ struct disk_child * dc;
+ struct buffer_head * tbSh_1 = PATH_H_PBUFFER (tb->tb_path, h - 1);
+
+
+ if ( tb->blknum[h] != 1 )
+ reiserfs_panic(0, "balance_internal", "One new node required for creating the new root");
+ /* S[h] = empty buffer from the list FEB. */
+ tbSh = get_FEB (tb);
+ B_BLK_HEAD(tbSh)->blk_level = cpu_to_le16 (h + 1);
+
+ /* Put the unique node-pointer to S[h] that points to S[h-1]. */
+
+ dc = B_N_CHILD(tbSh, 0);
+ dc->dc_block_number = cpu_to_le32 (tbSh_1->b_blocknr);
+ dc->dc_size = cpu_to_le16 (MAX_CHILD_SIZE (tbSh_1) - B_FREE_SPACE (tbSh_1));
+
+ tb->insert_size[h] -= DC_SIZE;
+ B_BLK_HEAD(tbSh)->blk_free_space = cpu_to_le16 (B_FREE_SPACE (tbSh) - DC_SIZE);
+
+ do_balance_mark_internal_dirty (tb, tbSh, 0);
+
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+ check_internal (tbSh);
+ /*&&&&&&&&&&&&&&&&&&&&&&&&*/
+
+ /* put new root into path structure */
+ PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) = tbSh;
+
+ /* Change root in structure super block. */
+ tb->tb_sb->u.reiserfs_sb.s_rs->s_root_block = cpu_to_le32 (tbSh->b_blocknr);
+ tb->tb_sb->u.reiserfs_sb.s_rs->s_tree_height = cpu_to_le16 (SB_TREE_HEIGHT (tb->tb_sb) + 1);
+ do_balance_mark_sb_dirty (tb, tb->tb_sb->u.reiserfs_sb.s_sbh, 1);
+ tb->tb_sb->s_dirt = 1;
+ }
+
+ if ( tb->blknum[h] == 2 ) {
+ int snum;
+ struct buffer_info dest_bi, src_bi;
+
+
+ /* S_new = free buffer from list FEB */
+ S_new = get_FEB(tb);
+
+ B_BLK_HEAD(S_new)->blk_level = cpu_to_le16 (h + 1);
+
+ dest_bi.tb = tb;
+ dest_bi.bi_bh = S_new;
+ dest_bi.bi_parent = 0;
+ dest_bi.bi_position = 0;
+ src_bi.tb = tb;
+ src_bi.bi_bh = tbSh;
+ src_bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+ src_bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+
+ n = B_NR_ITEMS (tbSh); /* number of items in S[h] */
+ snum = (insert_num + n + 1)/2;
+ if ( n - snum >= child_pos ) {
+ /* new items don't fall into S_new */
+ /* store the delimiting key for the next level */
+ /* new_insert_key = (n - snum)'th key in S[h] */
+ memcpy (&new_insert_key,B_N_PDELIM_KEY(tbSh,n - snum),
+ KEY_SIZE);
+ /* last parameter is del_par */
+ internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, snum, 0);
+ /* internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0);*/
+ } else if ( n + insert_num - snum < child_pos ) {
+ /* all new items fall into S_new */
+ /* store the delimiting key for the next level */
+ /* new_insert_key = (n + insert_item - snum)'th key in S[h] */
+ memcpy(&new_insert_key,B_N_PDELIM_KEY(tbSh,n + insert_num - snum),
+ KEY_SIZE);
+ /* last parameter is del_par */
+ internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, snum - insert_num, 0);
+ /* internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0);*/
+
+ /* insert insert_num keys and node-pointers into S_new */
+ internal_insert_childs (&dest_bi, /*S_new,tb->S[h-1]->b_next,*/child_pos - n - insert_num + snum - 1,
+ insert_num,insert_key,insert_ptr);
+
+ insert_num = 0;
+ } else {
+ struct disk_child * dc;
+
+ /* some items fall into S_new, but some don't fall */
+ /* last parameter is del_par */
+ internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, n - child_pos + 1, 1);
+ /* internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1);*/
+ /* calculate number of new items that fall into S_new */
+ k = snum - n + child_pos - 1;
+
+ internal_insert_childs (&dest_bi, /*S_new,*/ 0, k, insert_key + 1, insert_ptr+1);
+
+ /* new_insert_key = insert_key[insert_num - k - 1] */
+ memcpy(&new_insert_key,insert_key + insert_num - k - 1,
+ KEY_SIZE);
+ /* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */
+
+ dc = B_N_CHILD(S_new,0);
+ dc->dc_size = cpu_to_le16 (MAX_CHILD_SIZE(insert_ptr[insert_num-k-1]) -
+ B_FREE_SPACE(insert_ptr[insert_num-k-1]));
+ dc->dc_block_number = cpu_to_le32 (insert_ptr[insert_num-k-1]->b_blocknr);
+
+ do_balance_mark_internal_dirty (tb, S_new,0);
+
+ insert_num -= (k + 1);
+ }
+ /* new_insert_ptr = node_pointer to S_new */
+ new_insert_ptr = S_new;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( buffer_locked(S_new) || atomic_read (&(S_new->b_count)) != 1)
+ if (buffer_locked(S_new) || atomic_read(&(S_new->b_count)) > 2 ||
+ !(buffer_journaled(S_new) || buffer_journal_dirty(S_new))) {
+ reiserfs_panic (tb->tb_sb, "cm-00001: balance_internal: bad S_new (%b)", S_new);
+ }
+#endif /* CONFIG_REISERFS_CHECK */
+
+ // S_new is released in unfix_nodes
+ }
+
+ n = B_NR_ITEMS (tbSh); /*number of items in S[h] */
+
+#ifdef REISERFS_FSCK
+ if ( -1 <= child_pos && child_pos <= n && insert_num > 0 ) {
+#else
+ if ( 0 <= child_pos && child_pos <= n && insert_num > 0 ) {
+#endif
+ bi.tb = tb;
+ bi.bi_bh = tbSh;
+ bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h);
+ bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
+#ifdef REISERFS_FSCK
+ if (child_pos == -1) {
+ /* this is a little different from original do_balance:
+ here we insert the minimal keys in the tree, that has never happened when file system works */
+ if (tb->CFL[h-1] || insert_num != 1 || h != 1)
+ die ("balance_internal: invalid child_pos");
+/* insert_child (tb->S[h], tb->S[h-1], child_pos, insert_num, B_N_ITEM_HEAD(tb->S[0],0), insert_ptr);*/
+ internal_insert_childs (&bi, child_pos, insert_num, B_N_PITEM_HEAD (PATH_PLAST_BUFFER (tb->tb_path), 0), insert_ptr);
+ } else
+#endif
+ internal_insert_childs (
+ &bi,/*tbSh,*/
+ /* ( tb->S[h-1]->b_parent == tb->S[h] ) ? tb->S[h-1]->b_next : tb->S[h]->b_child->b_next,*/
+ child_pos,insert_num,insert_key,insert_ptr
+ );
+ }
+
+
+ memcpy (new_insert_key_addr,&new_insert_key,KEY_SIZE);
+ insert_ptr[0] = new_insert_ptr;
+
+ return order;
+ }
+
+
+
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
new file mode 100644
index 000000000..12ab13894
--- /dev/null
+++ b/fs/reiserfs/inode.c
@@ -0,0 +1,1881 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/locks.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+/* args for the create parameter of reiserfs_get_block */
+#define GET_BLOCK_NO_CREATE 0 /* don't create new blocks or convert tails */
+#define GET_BLOCK_CREATE 1 /* add anything you need to find block */
+#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */
+#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
+
+//
+// initially this function was derived from minix or ext2's analog and
+// evolved as the prototype did
+//
+void reiserfs_delete_inode (struct inode * inode)
+{
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 2;
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+
+
+ lock_kernel() ;
+
+ /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
+ if (INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
+ down (&inode->i_sem);
+
+ journal_begin(&th, inode->i_sb, jbegin_count) ;
+ windex = push_journal_writer("delete_inode") ;
+
+ reiserfs_delete_object (&th, inode);
+ reiserfs_remove_page_from_flush_list(&th, inode) ;
+ pop_journal_writer(windex) ;
+ reiserfs_release_objectid (&th, inode->i_ino);
+
+ journal_end(&th, inode->i_sb, jbegin_count) ;
+
+ up (&inode->i_sem);
+ } else {
+ /* no object items are in the tree */
+ ;
+ }
+ clear_inode (inode); /* note this must go after the journal_end to prevent deadlock */
+ unlock_kernel() ;
+}
+
+static void _make_cpu_key (struct cpu_key * key, int version, __u32 dirid, __u32 objectid,
+ loff_t offset, int type, int length)
+{
+ key->version = version;
+
+ key->on_disk_key.k_dir_id = dirid;
+ key->on_disk_key.k_objectid = objectid;
+ set_cpu_key_k_offset (key, offset);
+ set_cpu_key_k_type (key, type);
+ key->key_length = length;
+}
+
+
+/* take base of inode_key (it comes from inode always) (dirid, objectid) and version from an inode, set
+ offset and type of key */
+void make_cpu_key (struct cpu_key * key, const struct inode * inode, loff_t offset,
+ int type, int length)
+{
+ _make_cpu_key (key, inode_items_version (inode), le32_to_cpu (INODE_PKEY (inode)->k_dir_id),
+ le32_to_cpu (INODE_PKEY (inode)->k_objectid),
+ offset, type, length);
+}
+
+
+//
+// when key is 0, do not set version and short key
+//
+inline void make_le_item_head (struct item_head * ih, struct cpu_key * key, int version,
+ loff_t offset, int type, int length, int entry_count/*or ih_free_space*/)
+{
+ if (key) {
+ ih->ih_key.k_dir_id = cpu_to_le32 (key->on_disk_key.k_dir_id);
+ ih->ih_key.k_objectid = cpu_to_le32 (key->on_disk_key.k_objectid);
+ }
+ ih->ih_version = cpu_to_le16 (version);
+ set_le_ih_k_offset (ih, offset);
+ set_le_ih_k_type (ih, type);
+ ih->ih_item_len = cpu_to_le16 (length);
+ /* set_ih_free_space (ih, 0);*/
+ // for directory items it is entry count, for directs and stat
+ // datas - 0xffff, for indirects - 0
+ ih->u.ih_entry_count = cpu_to_le16 (entry_count);
+}
+
+
+//
+// FIXME: we might cache recently accessed indirect item (or at least
+// first 15 pointers just like ext2 does
+
+// Ugh. Not too eager for that....
+// I cut the code until such time as I see a convincing argument (benchmark).
+// I don't want a bloated inode struct..., and I don't like code complexity....
+
+/* cutting the code is fine, since it really isn't in use yet and is easy
+** to add back in. But, Vladimir has a really good idea here. Think
+** about what happens for reading a file. For each page,
+** The VFS layer calls reiserfs_readpage, who searches the tree to find
+** an indirect item. This indirect item has X number of pointers, where
+** X is a big number if we've done the block allocation right. But,
+** we only use one or two of these pointers during each call to readpage,
+** needlessly researching again later on.
+**
+** The size of the cache could be dynamic based on the size of the file.
+**
+** I'd also like to see us cache the location the stat data item, since
+** we are needlessly researching for that frequently.
+**
+** --chris
+*/
+
+/* people who call journal_begin with a page locked must call this
+** BEFORE calling journal_begin
+*/
+static int prevent_flush_page_lock(struct page *page,
+ struct inode *inode) {
+ struct reiserfs_page_list *pl ;
+ struct super_block *s = inode->i_sb ;
+ /* we don't care if the inode has a stale pointer from an old
+ ** transaction
+ */
+ if(!page || inode->u.reiserfs_i.i_conversion_trans_id != SB_JOURNAL(s)->j_trans_id) {
+ return 0 ;
+ }
+ pl = inode->u.reiserfs_i.i_converted_page ;
+ if (pl && pl->page == page) {
+ pl->do_not_lock = 1 ;
+ }
+ /* this last part is really important. The address space operations have
+ ** the page locked before they call the journal functions. So it is possible
+ ** for one process to be waiting in flush_pages_before_commit for a
+ ** page, then for the process with the page locked to call journal_begin.
+ **
+ ** We'll deadlock because the process flushing pages will never notice
+ ** the process with the page locked has called prevent_flush_page_lock.
+ ** So, we wake up the page waiters, even though the page is still locked.
+ ** The process waiting in flush_pages_before_commit must check the
+ ** pl->do_not_lock flag, and stop trying to lock the page.
+ */
+ wake_up(&page->wait) ;
+ return 0 ;
+
+}
+/* people who call journal_end with a page locked must call this
+** AFTER calling journal_end
+*/
+static int allow_flush_page_lock(struct page *page,
+ struct inode *inode) {
+
+ struct reiserfs_page_list *pl ;
+ struct super_block *s = inode->i_sb ;
+ /* we don't care if the inode has a stale pointer from an old
+ ** transaction
+ */
+ if(!page || inode->u.reiserfs_i.i_conversion_trans_id != SB_JOURNAL(s)->j_trans_id) {
+ return 0 ;
+ }
+ pl = inode->u.reiserfs_i.i_converted_page ;
+ if (pl && pl->page == page) {
+ pl->do_not_lock = 0 ;
+ }
+ return 0 ;
+
+}
+
+/* If this page has a file tail in it, and
+** it was read in by get_block_create_0, the page data is valid,
+** but tail is still sitting in a direct item, and we can't write to
+** it. So, look through this page, and check all the mapped buffers
+** to make sure they have valid block numbers. Any that don't need
+** to be unmapped, so that block_prepare_write will correctly call
+** reiserfs_get_block to convert the tail into an unformatted node
+*/
+static inline void fix_tail_page_for_writing(struct page *page) {
+ struct buffer_head *head, *next, *bh ;
+
+ if (page && page->buffers) {
+ head = page->buffers ;
+ bh = head ;
+ do {
+ next = bh->b_this_page ;
+ if (buffer_mapped(bh) && bh->b_blocknr == 0) {
+ reiserfs_unmap_buffer(bh) ;
+ }
+ bh = next ;
+ } while (bh != head) ;
+ }
+}
+
+
+
+
+/* we need to allocate a block for new unformatted node. Try to figure out
+ what point in bitmap reiserfs_new_blocknrs should start from. */
+static b_blocknr_t find_tag (struct buffer_head * bh, struct item_head * ih,
+ __u32 * item, int pos_in_item)
+{
+ if (!is_indirect_le_ih (ih))
+ /* something more complicated could be here */
+ return bh->b_blocknr;
+
+ /* for indirect item: go to left and look for the first non-hole entry in
+ the indirect item */
+ if (pos_in_item == I_UNFM_NUM (ih))
+ pos_in_item --;
+ while (pos_in_item >= 0) {
+ if (item [pos_in_item])
+ return item [pos_in_item];
+ pos_in_item --;
+ }
+ return bh->b_blocknr;
+}
+
+
+/* reiserfs_get_block does not need to allocate a block only if it has been
+ done already or non-hole position has been found in the indirect item */
+static inline int allocation_needed (int retval, b_blocknr_t allocated,
+ struct item_head * ih,
+ __u32 * item, int pos_in_item)
+{
+ if (allocated)
+ return 0;
+ if (retval == POSITION_FOUND && is_indirect_le_ih (ih) && item[pos_in_item])
+ return 0;
+ return 1;
+}
+
+static inline int indirect_item_found (int retval, struct item_head * ih)
+{
+ return (retval == POSITION_FOUND) && is_indirect_le_ih (ih);
+}
+
+
+static inline void set_block_dev_mapped (struct buffer_head * bh,
+ b_blocknr_t block, struct inode * inode)
+{
+ bh->b_dev = inode->i_dev;
+ bh->b_blocknr = block;
+ bh->b_state |= (1UL << BH_Mapped);
+}
+
+
+//
+// files which were created in the earlier version can not be longer,
+// than 2 gb
+//
+int file_capable (struct inode * inode, long block)
+{
+ if (inode_items_version (inode) != ITEM_VERSION_1 || // it is new file.
+ block < (1 << (31 - inode->i_sb->s_blocksize_bits))) // old file, but 'block' is inside of 2gb
+ return 1;
+
+ return 0;
+}
+
+/*static*/ void restart_transaction(struct reiserfs_transaction_handle *th,
+ struct inode *inode, struct path *path) {
+ struct super_block *s = th->t_super ;
+ int len = th->t_blocks_allocated ;
+
+ pathrelse(path) ;
+ reiserfs_update_sd(th, inode) ;
+ journal_end(th, s, len) ;
+ journal_begin(th, s, len) ;
+}
+
+// it is called by get_block when create == 0. Returns block number
+// for 'block'-th logical block of file. When it hits direct item it
+// returns 0 (being called from bmap) or read direct item into piece
+// of page (bh_result)
+
+// Please improve the english/clarity in the comment above, as it is
+// hard to understand.
+
+static int _get_block_create_0 (struct inode * inode, long block,
+ struct buffer_head * bh_result,
+ int args)
+{
+ INITIALIZE_PATH (path);
+ struct cpu_key key;
+ struct buffer_head * bh;
+ struct item_head * ih, tmp_ih;
+ int fs_gen ;
+ int blocknr;
+ char * p = NULL;
+ int chars;
+ int ret ;
+ unsigned long offset ;
+
+ // prepare the key to look for the 'block'-th block of file
+ make_cpu_key (&key, inode,
+ (loff_t)block * inode->i_sb->s_blocksize + 1, TYPE_ANY, 3);
+
+research:
+ if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND) {
+ pathrelse (&path);
+ if (p)
+ kunmap(bh_result->b_page) ;
+ if ((args & GET_BLOCK_NO_HOLE)) {
+ return -ENOENT ;
+ }
+ return 0 ;
+ }
+
+ //
+ bh = get_bh (&path);
+ ih = get_ih (&path);
+ if (is_indirect_le_ih (ih)) {
+ __u32 * ind_item = (__u32 *)B_I_PITEM (bh, ih);
+
+ /* FIXME: here we could cache indirect item or part of it in
+ the inode to avoid search_by_key in case of subsequent
+ access to file */
+ blocknr = le32_to_cpu (ind_item [path.pos_in_item]);
+ ret = 0 ;
+ if (blocknr) {
+ bh_result->b_dev = inode->i_dev;
+ bh_result->b_blocknr = blocknr;
+ bh_result->b_state |= (1UL << BH_Mapped);
+ } else if ((args & GET_BLOCK_NO_HOLE)) {
+ ret = -ENOENT ;
+ }
+ pathrelse (&path);
+ if (p)
+ kunmap(bh_result->b_page) ;
+ return ret ;
+ }
+
+ // requested data are in direct item(s)
+ if (!(args & GET_BLOCK_READ_DIRECT)) {
+ // we are called by bmap. FIXME: we can not map block of file
+ // when it is stored in direct item(s)
+ pathrelse (&path);
+ if (p)
+ kunmap(bh_result->b_page) ;
+ return -ENOENT;
+ }
+
+ // read file tail into part of page
+ offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1) ;
+ fs_gen = get_generation(inode->i_sb) ;
+ copy_item_head (&tmp_ih, ih);
+
+ /* we only want to kmap if we are reading the tail into the page.
+ ** this is not the common case, so we don't kmap until we are
+ ** sure we need to. But, this means the item might move if
+ ** kmap schedules
+ */
+ p = (char *)kmap(bh_result->b_page) ;
+ if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
+ goto research;
+ }
+ p += offset ;
+ memset (p, 0, inode->i_sb->s_blocksize);
+ do {
+ if (!is_direct_le_ih (ih)) {
+ BUG ();
+ }
+ chars = le16_to_cpu (ih->ih_item_len) - path.pos_in_item;
+ memcpy (p, B_I_PITEM (bh, ih) + path.pos_in_item, chars);
+ p += chars;
+
+ if (PATH_LAST_POSITION (&path) != (B_NR_ITEMS (bh) - 1))
+ // we done, if read direct item is not the last item of
+ // node FIXME: we could try to check right delimiting key
+ // to see whether direct item continues in the right
+ // neighbor or rely on i_size
+ break;
+
+ // update key to look for the next piece
+ set_cpu_key_k_offset (&key, cpu_key_k_offset (&key) + chars);
+ if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND)
+ // we read something from tail, even if now we got IO_ERROR
+ break;
+ bh = get_bh (&path);
+ ih = get_ih (&path);
+ } while (1);
+
+ pathrelse (&path);
+
+ // FIXME: b_blocknr == 0 here. but b_data contains correct data
+ // from tail. ll_rw_block will skip uptodate buffers
+ bh_result->b_blocknr = 0 ;
+ bh_result->b_dev = inode->i_dev;
+ mark_buffer_uptodate (bh_result, 1);
+ bh_result->b_state |= (1UL << BH_Mapped);
+ kunmap(bh_result->b_page) ;
+
+ return 0;
+}
+
+
+// this is called to create file map. So, _get_block_create_0 will not
+// read direct item
+int reiserfs_bmap (struct inode * inode, long block,
+ struct buffer_head * bh_result, int create)
+{
+ if (!file_capable (inode, block))
+ return -EFBIG;
+
+ lock_kernel() ;
+ /* do not read the direct item */
+ _get_block_create_0 (inode, block, bh_result, 0) ;
+ unlock_kernel() ;
+ return 0;
+}
+
+/* special version of get_block that is only used by grab_tail_page right
+** now. It is sent to block_prepare_write, and when you try to get a
+** block past the end of the file (or a block from a hole) it returns
+** -ENOENT instead of a valid buffer. block_prepare_write expects to
+** be able to do i/o on the buffers returned, unless an error value
+** is also returned.
+**
+** So, this allows block_prepare_write to be used for reading a single block
+** in a page. Where it does not produce a valid page for holes, or past the
+** end of the file. This turns out to be exactly what we need for reading
+** tails for conversion.
+**
+** The point of the wrapper is forcing a certain value for create, even
+** though the VFS layer is calling this function with create==1. If you
+** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
+** don't use this function.
+*/
+static int reiserfs_get_block_create_0 (struct inode * inode, long block,
+ struct buffer_head * bh_result, int create) {
+ return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE) ;
+}
+
+/*
+** helper function for when reiserfs_get_block is called for a hole
+** but the file tail is still in a direct item
+** bh_result is the buffer head for the hole
+** tail_offset is the offset of the start of the tail in the file
+**
+** This calls prepare_write, which will start a new transaction
+** you should not be in a transaction, or have any paths held when you
+** call this.
+*/
+static int convert_tail_for_hole(struct inode *inode,
+ struct buffer_head *bh_result,
+ loff_t tail_offset) {
+ unsigned long index ;
+ unsigned long tail_end ;
+ unsigned long tail_start ;
+ struct page * tail_page ;
+ struct page * hole_page = bh_result->b_page ;
+ int retval = 0 ;
+
+ if ((tail_offset & (bh_result->b_size - 1)) != 1)
+ return -EIO ;
+
+ /* always try to read until the end of the block */
+ tail_start = tail_offset & (PAGE_CACHE_SIZE - 1) ;
+ tail_end = (tail_start | (bh_result->b_size - 1)) + 1 ;
+
+ index = tail_offset >> PAGE_CACHE_SHIFT ;
+ if (index != hole_page->index) {
+ tail_page = grab_cache_page(inode->i_mapping, index) ;
+ retval = PTR_ERR(tail_page) ;
+ if (IS_ERR(tail_page)) {
+ goto out ;
+ }
+ } else {
+ tail_page = hole_page ;
+ }
+
+ /* we don't have to make sure the conversion did not happen while
+ ** we were locking the page because anyone that could convert
+ ** must first take i_sem.
+ **
+ ** We must fix the tail page for writing because it might have buffers
+ ** that are mapped, but have a block number of 0. This indicates tail
+ ** data that has been read directly into the page, and block_prepare_write
+ ** won't trigger a get_block in this case.
+ */
+ fix_tail_page_for_writing(tail_page) ;
+ retval = block_prepare_write(tail_page, tail_start, tail_end,
+ reiserfs_get_block) ;
+ if (retval)
+ goto unlock ;
+
+ /* tail conversion might change the data in the page */
+ flush_dcache_page(tail_page) ;
+
+ retval = generic_commit_write(NULL, tail_page, tail_start, tail_end) ;
+
+unlock:
+ if (tail_page != hole_page) {
+ UnlockPage(tail_page) ;
+ page_cache_release(tail_page) ;
+ }
+out:
+ return retval ;
+}
+
+//
+// initially this function was derived from ext2's analog and evolved
+// as the prototype did. You'll need to look at the ext2 version to
+// determine which parts are derivative, if any, understanding that
+// there are only so many ways to code to a given interface.
+//
+int reiserfs_get_block (struct inode * inode, long block,
+ struct buffer_head * bh_result, int create)
+{
+ int repeat, retval;
+ unsigned long tag;
+ b_blocknr_t allocated_block_nr = 0;// b_blocknr_t is unsigned long
+ INITIALIZE_PATH(path);
+ int pos_in_item;
+ struct cpu_key key;
+ struct buffer_head * bh, * unbh = 0;
+ struct item_head * ih, tmp_ih;
+ __u32 * item;
+ int done;
+ int fs_gen;
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 ;
+ int version;
+ int transaction_started = 0 ;
+ loff_t new_offset = (block << inode->i_sb->s_blocksize_bits) + 1 ;
+
+ /* bad.... */
+ lock_kernel() ;
+ th.t_trans_id = 0 ;
+ version = inode_items_version (inode);
+
+ if (!file_capable (inode, block)) {
+ unlock_kernel() ;
+ return -EFBIG;
+ }
+
+ /* if !create, we aren't changing the FS, so we don't need to
+ ** log anything, so we don't need to start a transaction
+ */
+ if (!(create & GET_BLOCK_CREATE)) {
+ int ret ;
+ /* find number of block-th logical block of the file */
+ ret = _get_block_create_0 (inode, block, bh_result,
+ create | GET_BLOCK_READ_DIRECT) ;
+ unlock_kernel() ;
+ return ret;
+ }
+
+ if (block < 0) {
+ unlock_kernel();
+ return -EIO;
+ }
+
+ prevent_flush_page_lock(bh_result->b_page, inode) ;
+ inode->u.reiserfs_i.i_pack_on_close = 1 ;
+
+ windex = push_journal_writer("reiserfs_get_block") ;
+
+ /* set the key of the first byte in the 'block'-th block of file */
+ make_cpu_key (&key, inode,
+ (loff_t)block * inode->i_sb->s_blocksize + 1, // k_offset
+ TYPE_ANY, 3/*key length*/);
+ if ((new_offset + inode->i_sb->s_blocksize) >= inode->i_size) {
+ journal_begin(&th, inode->i_sb, jbegin_count) ;
+ transaction_started = 1 ;
+ }
+ research:
+
+ retval = search_for_position_by_key (inode->i_sb, &key, &path);
+ if (retval == IO_ERROR) {
+ retval = -EIO;
+ goto failure;
+ }
+
+ bh = get_bh (&path);
+ ih = get_ih (&path);
+ item = get_item (&path);
+ pos_in_item = path.pos_in_item;
+
+ fs_gen = get_generation (inode->i_sb);
+ copy_item_head (&tmp_ih, ih);
+
+ if (allocation_needed (retval, allocated_block_nr, ih, item, pos_in_item)) {
+ /* we have to allocate block for the unformatted node */
+ tag = find_tag (bh, ih, item, pos_in_item);
+ if (!transaction_started) {
+ pathrelse(&path) ;
+ journal_begin(&th, inode->i_sb, jbegin_count) ;
+ transaction_started = 1 ;
+ goto research ;
+ }
+
+#ifdef REISERFS_PREALLOCATE
+ repeat = reiserfs_new_unf_blocknrs2 (&th, inode, &allocated_block_nr, tag);
+#else
+ repeat = reiserfs_new_unf_blocknrs (&th, &allocated_block_nr, tag);
+#endif
+
+ if (repeat == NO_DISK_SPACE) {
+ /* restart the transaction to give the journal a chance to free
+ ** some blocks. releases the path, so we have to go back to
+ ** research if we succeed on the second try
+ */
+ restart_transaction(&th, inode, &path) ;
+#ifdef REISERFS_PREALLOCATE
+ repeat = reiserfs_new_unf_blocknrs2 (&th, inode, &allocated_block_nr, tag);
+#else
+ repeat = reiserfs_new_unf_blocknrs (&th, &allocated_block_nr, tag);
+#endif
+
+ if (repeat != NO_DISK_SPACE) {
+ goto research ;
+ }
+ retval = -ENOSPC;
+ goto failure;
+ }
+
+ if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
+ goto research;
+ }
+ }
+
+ if (indirect_item_found (retval, ih)) {
+ /* 'block'-th block is in the file already (there is
+ corresponding cell in some indirect item). But it may be
+ zero unformatted node pointer (hole) */
+ if (!item[pos_in_item]) {
+ /* use allocated block to plug the hole */
+ reiserfs_prepare_for_journal(inode->i_sb, bh, 1) ;
+ if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
+ reiserfs_restore_prepared_buffer(inode->i_sb, bh) ;
+ goto research;
+ }
+ bh_result->b_state |= (1UL << BH_New);
+ item[pos_in_item] = cpu_to_le32 (allocated_block_nr);
+ journal_mark_dirty (&th, inode->i_sb, bh);
+ inode->i_blocks += (inode->i_sb->s_blocksize / 512) ;
+ reiserfs_update_sd(&th, inode) ;
+ }
+ set_block_dev_mapped(bh_result, le32_to_cpu (item[pos_in_item]), inode);
+ pathrelse (&path);
+#ifdef REISERFS_CHECK
+ pop_journal_writer(windex) ;
+#endif /* REISERFS_CHECK */
+ if (transaction_started)
+ journal_end(&th, inode->i_sb, jbegin_count) ;
+
+ allow_flush_page_lock(bh_result->b_page, inode) ;
+ unlock_kernel() ;
+
+ /* the item was found, so new blocks were not added to the file
+ ** there is no need to make sure the inode is updated with this
+ ** transaction
+ */
+ return 0;
+ }
+
+ if (!transaction_started) {
+ /* if we don't pathrelse, we could vs-3050 on the buffer if
+ ** someone is waiting for it (they can't finish until the buffer
+ ** is released, we can start a new transaction until they finish)
+ */
+ pathrelse(&path) ;
+ journal_begin(&th, inode->i_sb, jbegin_count) ;
+ transaction_started = 1 ;
+ goto research;
+ }
+
+ /* desired position is not found or is in the direct item. We have
+ to append file with holes up to 'block'-th block converting
+ direct items to indirect one if necessary */
+ done = 0;
+ do {
+ if (is_statdata_le_ih (ih)) {
+ __u32 unp = 0;
+ struct cpu_key tmp_key;
+
+ /* indirect item has to be inserted */
+ make_le_item_head (&tmp_ih, &key, version, 1, TYPE_INDIRECT,
+ UNFM_P_SIZE, 0/* free_space */);
+
+ if (cpu_key_k_offset (&key) == 1) {
+ /* we are going to add 'block'-th block to the file. Use
+ allocated block for that */
+ unp = cpu_to_le32 (allocated_block_nr);
+ set_block_dev_mapped (bh_result, allocated_block_nr, inode);
+ bh_result->b_state |= (1UL << BH_New);
+ done = 1;
+ }
+ tmp_key = key; // ;)
+ set_cpu_key_k_offset (&tmp_key, 1);
+ PATH_LAST_POSITION(&path) ++;
+
+ retval = reiserfs_insert_item (&th, &path, &tmp_key, &tmp_ih, (char *)&unp);
+ if (retval) {
+ reiserfs_free_block (&th, allocated_block_nr);
+
+#ifdef REISERFS_PREALLOCATE
+ reiserfs_discard_prealloc (&th, inode);
+#endif
+ goto failure; // retval == -ENOSPC or -EIO or -EEXIST
+ }
+ if (unp)
+ inode->i_blocks += inode->i_sb->s_blocksize / 512;
+ //mark_tail_converted (inode);
+ } else if (is_direct_le_ih (ih)) {
+ /* direct item has to be converted */
+ loff_t tail_offset;
+
+ tail_offset = ((le_ih_k_offset (ih) - 1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
+ if (tail_offset == cpu_key_k_offset (&key)) {
+ /* direct item we just found fits into block we have
+ to map. Convert it into unformatted node: use
+ bh_result for the conversion */
+ set_block_dev_mapped (bh_result, allocated_block_nr, inode);
+ unbh = bh_result;
+ done = 1;
+ } else {
+ /* we have to padd file tail stored in direct item(s)
+ up to block size and convert it to unformatted
+ node. FIXME: this should also get into page cache */
+
+ pathrelse(&path) ;
+ journal_end(&th, inode->i_sb, jbegin_count) ;
+ transaction_started = 0 ;
+
+ retval = convert_tail_for_hole(inode, bh_result, tail_offset) ;
+ if (retval) {
+ printk("clm-6004: convert tail failed inode %lu, error %d\n", inode->i_ino, retval) ;
+ if (allocated_block_nr)
+ reiserfs_free_block (&th, allocated_block_nr);
+ goto failure ;
+ }
+ goto research ;
+ }
+ retval = direct2indirect (&th, inode, &path, unbh, tail_offset);
+ /* it is important the mark_buffer_uptodate is done after
+ ** the direct2indirect. The buffer might contain valid
+ ** data newer than the data on disk (read by readpage, changed,
+ ** and then sent here by writepage). direct2indirect needs
+ ** to know if unbh was already up to date, so it can decide
+ ** if the data in unbh needs to be replaced with data from
+ ** the disk
+ */
+ mark_buffer_uptodate (unbh, 1);
+ if (retval) {
+ reiserfs_free_block (&th, allocated_block_nr);
+
+#ifdef REISERFS_PREALLOCATE
+ reiserfs_discard_prealloc (&th, inode);
+#endif
+ goto failure;
+ }
+ /* we've converted the tail, so we must
+ ** flush unbh before the transaction commits
+ */
+ reiserfs_add_page_to_flush_list(&th, inode, unbh) ;
+
+ //inode->i_blocks += inode->i_sb->s_blocksize / 512;
+ //mark_tail_converted (inode);
+ } else {
+ /* append indirect item with holes if needed, when appending
+ pointer to 'block'-th block use block, which is already
+ allocated */
+ struct cpu_key tmp_key;
+ struct unfm_nodeinfo un = {0, 0};
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (pos_in_item != le16_to_cpu (ih->ih_item_len) / UNFM_P_SIZE)
+ reiserfs_panic (inode->i_sb, "vs-: reiserfs_get_block: "
+ "invalid position for append");
+#endif
+ /* indirect item has to be appended, set up key of that position */
+ make_cpu_key (&tmp_key, inode,
+ le_key_k_offset (version, &(ih->ih_key)) + op_bytes_number (ih, inode->i_sb->s_blocksize),
+ //pos_in_item * inode->i_sb->s_blocksize,
+ TYPE_INDIRECT, 3);// key type is unimportant
+
+ if (cpu_key_k_offset (&tmp_key) == cpu_key_k_offset (&key)) {
+ /* we are going to add target block to the file. Use allocated
+ block for that */
+ un.unfm_nodenum = cpu_to_le32 (allocated_block_nr);
+ set_block_dev_mapped (bh_result, allocated_block_nr, inode);
+ bh_result->b_state |= (1UL << BH_New);
+ done = 1;
+ } else {
+ /* paste hole to the indirect item */
+ }
+ retval = reiserfs_paste_into_item (&th, &path, &tmp_key, (char *)&un, UNFM_P_SIZE);
+ if (retval) {
+ reiserfs_free_block (&th, allocated_block_nr);
+
+#ifdef REISERFS_PREALLOCATE
+ reiserfs_discard_prealloc (&th, inode);
+#endif
+ goto failure;
+ }
+ if (un.unfm_nodenum)
+ inode->i_blocks += inode->i_sb->s_blocksize / 512;
+ //mark_tail_converted (inode);
+ }
+
+ if (done == 1)
+ break;
+
+ /* this loop could log more blocks than we had originally asked
+ ** for. So, we have to allow the transaction to end if it is
+ ** too big or too full. Update the inode so things are
+ ** consistent if we crash before the function returns
+ **
+ ** release the path so that anybody waiting on the path before
+ ** ending their transaction will be able to continue.
+ */
+ if (journal_transaction_should_end(&th, th.t_blocks_allocated)) {
+ restart_transaction(&th, inode, &path) ;
+ }
+ /* inserting indirect pointers for a hole can take a
+ ** long time. reschedule if needed
+ */
+ if (current->need_resched)
+ schedule() ;
+
+ retval = search_for_position_by_key (inode->i_sb, &key, &path);
+ if (retval == IO_ERROR) {
+ retval = -EIO;
+ goto failure;
+ }
+ if (retval == POSITION_FOUND) {
+ reiserfs_warning ("vs-: reiserfs_get_block: "
+ "%k should not be found", &key);
+ retval = -EEXIST;
+ pathrelse(&path) ;
+ goto failure;
+ }
+ bh = get_bh (&path);
+ ih = get_ih (&path);
+ item = get_item (&path);
+ pos_in_item = path.pos_in_item;
+ } while (1);
+
+
+ retval = 0;
+ reiserfs_check_path(&path) ;
+
+ failure:
+ if (transaction_started) {
+ reiserfs_update_sd(&th, inode) ;
+ journal_end(&th, inode->i_sb, jbegin_count) ;
+ }
+ pop_journal_writer(windex) ;
+ allow_flush_page_lock(bh_result->b_page, inode) ;
+ unlock_kernel() ;
+ reiserfs_check_path(&path) ;
+ return retval;
+}
+
+
+//
+// BAD: new directories have stat data of new type and all other items
+// of old type. Version stored in the inode says about body items, so
+// in update_stat_data we can not rely on inode, but have to check
+// item version directly
+//
+
+// called by read_inode
+static void init_inode (struct inode * inode, struct path * path)
+{
+ struct buffer_head * bh;
+ struct item_head * ih;
+ __u32 rdev;
+ //int version = ITEM_VERSION_1;
+
+ bh = PATH_PLAST_BUFFER (path);
+ ih = PATH_PITEM_HEAD (path);
+
+
+ copy_key (INODE_PKEY (inode), &(ih->ih_key));
+ inode->i_generation = INODE_PKEY (inode)->k_dir_id;
+ inode->i_blksize = PAGE_SIZE;
+
+ if (stat_data_v1 (ih)) {
+ struct stat_data_v1 * sd = (struct stat_data_v1 *)B_I_PITEM (bh, ih);
+ unsigned long blocks;
+
+ inode_items_version (inode) = ITEM_VERSION_1;
+ inode->i_mode = le16_to_cpu (sd->sd_mode);
+ inode->i_nlink = le16_to_cpu (sd->sd_nlink);
+ inode->i_uid = le16_to_cpu (sd->sd_uid);
+ inode->i_gid = le16_to_cpu (sd->sd_gid);
+ inode->i_size = le32_to_cpu (sd->sd_size);
+ inode->i_atime = le32_to_cpu (sd->sd_atime);
+ inode->i_mtime = le32_to_cpu (sd->sd_mtime);
+ inode->i_ctime = le32_to_cpu (sd->sd_ctime);
+
+ inode->i_blocks = le32_to_cpu (sd->u.sd_blocks);
+ blocks = (inode->i_size + 511) >> 9;
+ blocks = _ROUND_UP (blocks, inode->i_blksize >> 9);
+ if (inode->i_blocks > blocks) {
+ // there was a bug in <=3.5.23 when i_blocks could take negative
+ // values. Starting from 3.5.17 this value could even be stored in
+ // stat data. For such files we set i_blocks based on file
+ // size. Just 2 notes: this can be wrong for sparce files. On-disk value will be
+ // only updated if file's inode will ever change
+ inode->i_blocks = blocks;
+ }
+
+ rdev = le32_to_cpu (sd->u.sd_rdev);
+ inode->u.reiserfs_i.i_first_direct_byte = le32_to_cpu (sd->sd_first_direct_byte);
+ } else {
+ // new stat data found, but object may have old items
+ // (directories and symlinks)
+ struct stat_data * sd = (struct stat_data *)B_I_PITEM (bh, ih);
+
+ /* both old and new directories have old keys */
+ //version = (S_ISDIR (sd->sd_mode) ? ITEM_VERSION_1 : ITEM_VERSION_2);
+ if (S_ISDIR (sd->sd_mode) || S_ISLNK (sd->sd_mode))
+ inode_items_version (inode) = ITEM_VERSION_1;
+ else
+ inode_items_version (inode) = ITEM_VERSION_2;
+ inode->i_mode = le16_to_cpu (sd->sd_mode);
+ inode->i_nlink = le32_to_cpu (sd->sd_nlink);
+ inode->i_uid = le32_to_cpu (sd->sd_uid);
+ inode->i_size = le64_to_cpu (sd->sd_size);
+ inode->i_gid = le32_to_cpu (sd->sd_gid);
+ inode->i_mtime = le32_to_cpu (sd->sd_mtime);
+ inode->i_atime = le32_to_cpu (sd->sd_atime);
+ inode->i_ctime = le32_to_cpu (sd->sd_ctime);
+ inode->i_blocks = le32_to_cpu (sd->sd_blocks);
+ rdev = le32_to_cpu (sd->u.sd_rdev);
+ }
+
+ /* nopack = 0, by default */
+ inode->u.reiserfs_i.nopack = 0;
+
+ pathrelse (path);
+ if (S_ISREG (inode->i_mode)) {
+ inode->i_op = &reiserfs_file_inode_operations;
+ inode->i_fop = &reiserfs_file_operations;
+ inode->i_mapping->a_ops = &reiserfs_address_space_operations ;
+ } else if (S_ISDIR (inode->i_mode)) {
+ inode->i_op = &reiserfs_dir_inode_operations;
+ inode->i_fop = &reiserfs_dir_operations;
+ } else if (S_ISLNK (inode->i_mode)) {
+ inode->i_op = &page_symlink_inode_operations;
+ inode->i_mapping->a_ops = &reiserfs_address_space_operations;
+ } else {
+ inode->i_blocks = 0;
+ init_special_inode(inode, inode->i_mode, rdev) ;
+ }
+}
+
+
+// update new stat data with inode fields
+static void inode2sd (void * sd, struct inode * inode)
+{
+ struct stat_data * sd_v2 = (struct stat_data *)sd;
+
+ sd_v2->sd_mode = cpu_to_le16 (inode->i_mode);
+ sd_v2->sd_nlink = cpu_to_le16 (inode->i_nlink);
+ sd_v2->sd_uid = cpu_to_le32 (inode->i_uid);
+ sd_v2->sd_size = cpu_to_le64 (inode->i_size);
+ sd_v2->sd_gid = cpu_to_le32 (inode->i_gid);
+ sd_v2->sd_mtime = cpu_to_le32 (inode->i_mtime);
+ sd_v2->sd_atime = cpu_to_le32 (inode->i_atime);
+ sd_v2->sd_ctime = cpu_to_le32 (inode->i_ctime);
+ sd_v2->sd_blocks = cpu_to_le32 (inode->i_blocks);
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+ sd_v2->u.sd_rdev = cpu_to_le32 (inode->i_rdev);
+}
+
+
+// used to copy inode's fields to old stat data
+static void inode2sd_v1 (void * sd, struct inode * inode)
+{
+ struct stat_data_v1 * sd_v1 = (struct stat_data_v1 *)sd;
+
+ sd_v1->sd_mode = cpu_to_le16 (inode->i_mode);
+ sd_v1->sd_uid = cpu_to_le16 (inode->i_uid);
+ sd_v1->sd_gid = cpu_to_le16 (inode->i_gid);
+ sd_v1->sd_nlink = cpu_to_le16 (inode->i_nlink);
+ sd_v1->sd_size = cpu_to_le32 (inode->i_size);
+ sd_v1->sd_atime = cpu_to_le32 (inode->i_atime);
+ sd_v1->sd_ctime = cpu_to_le32 (inode->i_ctime);
+ sd_v1->sd_mtime = cpu_to_le32 (inode->i_mtime);
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+ sd_v1->u.sd_rdev = cpu_to_le32 (inode->i_rdev);
+ else
+ sd_v1->u.sd_blocks = cpu_to_le32 (inode->i_blocks);
+
+ // Sigh. i_first_direct_byte is back
+ sd_v1->sd_first_direct_byte = cpu_to_le32 (inode->u.reiserfs_i.i_first_direct_byte);
+}
+
+
+/* NOTE, you must prepare the buffer head before sending it here,
+** and then log it after the call
+*/
+static void update_stat_data (struct path * path, struct inode * inode)
+{
+ struct buffer_head * bh;
+ struct item_head * ih;
+
+ bh = PATH_PLAST_BUFFER (path);
+ ih = PATH_PITEM_HEAD (path);
+
+ if (!is_statdata_le_ih (ih))
+ reiserfs_panic (inode->i_sb, "vs-13065: update_stat_data: key %k, found item %h",
+ INODE_PKEY (inode), ih);
+
+ if (stat_data_v1 (ih)) {
+ // path points to old stat data
+ inode2sd_v1 (B_I_PITEM (bh, ih), inode);
+ } else {
+ inode2sd (B_I_PITEM (bh, ih), inode);
+ }
+
+ return;
+}
+
+
+void reiserfs_update_sd (struct reiserfs_transaction_handle *th,
+ struct inode * inode)
+{
+ struct cpu_key key;
+ INITIALIZE_PATH(path);
+ struct buffer_head *bh ;
+ int fs_gen ;
+ struct item_head *ih, tmp_ih ;
+ int retval;
+
+ make_cpu_key (&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);//key type is unimportant
+
+ for(;;) {
+ int pos;
+ /* look for the object's stat data */
+ retval = search_item (inode->i_sb, &key, &path);
+ if (retval == IO_ERROR) {
+ reiserfs_warning ("vs-13050: reiserfs_update_sd: "
+ "i/o failure occurred trying to update %K stat data",
+ &key);
+ return;
+ }
+ if (retval == ITEM_NOT_FOUND) {
+ pos = PATH_LAST_POSITION (&path);
+ pathrelse(&path) ;
+ if (inode->i_nlink == 0) {
+ /*printk ("vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found\n");*/
+ return;
+ }
+ reiserfs_warning ("vs-13060: reiserfs_update_sd: "
+ "stat data of object %k (nlink == %d) not found (pos %d)\n",
+ INODE_PKEY (inode), inode->i_nlink, pos);
+ reiserfs_check_path(&path) ;
+ return;
+ }
+
+ /* sigh, prepare_for_journal might schedule. When it schedules the
+ ** FS might change. We have to detect that, and loop back to the
+ ** search if the stat data item has moved
+ */
+ bh = get_bh(&path) ;
+ ih = get_ih(&path) ;
+ copy_item_head (&tmp_ih, ih);
+ fs_gen = get_generation (inode->i_sb);
+ reiserfs_prepare_for_journal(inode->i_sb, bh, 1) ;
+ if (fs_changed (fs_gen, inode->i_sb) && item_moved(&tmp_ih, &path)) {
+ reiserfs_restore_prepared_buffer(inode->i_sb, bh) ;
+ continue ; /* Stat_data item has been moved after scheduling. */
+ }
+ break;
+ }
+ update_stat_data (&path, inode);
+ journal_mark_dirty(th, th->t_super, bh) ;
+ pathrelse (&path);
+ return;
+}
+
+void reiserfs_read_inode(struct inode *inode) {
+ make_bad_inode(inode) ;
+}
+
+
+//
+// initially this function was derived from minix or ext2's analog and
+// evolved as the prototype did
+//
+
+/* looks for stat data in the tree, and fills up the fields of in-core
+ inode stat data fields */
+void reiserfs_read_inode2 (struct inode * inode, void *p)
+{
+ INITIALIZE_PATH (path_to_sd);
+ struct cpu_key key;
+ struct reiserfs_iget4_args *args = (struct reiserfs_iget4_args *)p ;
+ unsigned long dirino;
+ int retval;
+
+ if (!p) {
+ make_bad_inode(inode) ;
+ return;
+ }
+
+ dirino = args->objectid ;
+
+ /* set version 1, version 2 could be used too, because stat data
+ key is the same in both versions */
+ key.version = ITEM_VERSION_1;
+ key.on_disk_key.k_dir_id = dirino;
+ key.on_disk_key.k_objectid = inode->i_ino;
+ key.on_disk_key.u.k_offset_v1.k_offset = SD_OFFSET;
+ key.on_disk_key.u.k_offset_v1.k_uniqueness = SD_UNIQUENESS;
+
+ /* look for the object's stat data */
+ retval = search_item (inode->i_sb, &key, &path_to_sd);
+ if (retval == IO_ERROR) {
+ reiserfs_warning ("vs-13070: reiserfs_read_inode2: "
+ "i/o failure occurred trying to find stat data of %K\n",
+ &key);
+ make_bad_inode(inode) ;
+ return;
+ }
+ if (retval != ITEM_FOUND) {
+ reiserfs_warning ("vs-13042: reiserfs_read_inode2: %K not found\n", &key);
+ pathrelse (&path_to_sd);
+ make_bad_inode(inode) ;
+ return;
+ }
+
+ init_inode (inode, &path_to_sd);
+ reiserfs_check_path(&path_to_sd) ; /* init inode should be relsing */
+
+}
+
+
+struct inode * reiserfs_iget (struct super_block * s, struct cpu_key * key)
+{
+ struct inode * inode;
+ struct reiserfs_iget4_args args ;
+
+ args.objectid = key->on_disk_key.k_dir_id ;
+ inode = iget4 (s, key->on_disk_key.k_objectid, 0, (void *)(&args));
+ if (!inode)
+ return inode ;
+
+ // if (comp_short_keys (INODE_PKEY (inode), key)) {
+ if (is_bad_inode (inode)) {
+ reiserfs_warning ("vs-13048: reiserfs_iget: "
+ "bad_inode. Stat data of (%lu %lu) not found\n",
+ key->on_disk_key.k_dir_id, key->on_disk_key.k_objectid);
+ iput (inode);
+ inode = 0;
+ }
+ return inode;
+}
+
+
+//
+// initially this function was derived from minix or ext2's analog and
+// evolved as the prototype did
+//
+/* looks for stat data, then copies fields to it, marks the buffer
+ containing stat data as dirty */
+/* reiserfs inodes are never really dirty, since the dirty inode call
+** always logs them. This call allows the VFS inode marking routines
+** to properly mark inodes for datasync and such, but only actually
+** does something when called for a synchronous update.
+*/
+void reiserfs_write_inode (struct inode * inode, int do_sync) {
+ struct reiserfs_transaction_handle th ;
+ int jbegin_count = 1 ;
+
+ if (inode->i_sb->s_flags & MS_RDONLY) {
+ reiserfs_warning("clm-6005: writing inode %lu on readonly FS\n",
+ inode->i_ino) ;
+ return ;
+ }
+ if (do_sync) {
+ lock_kernel() ;
+ journal_begin(&th, inode->i_sb, jbegin_count) ;
+ reiserfs_update_sd (&th, inode);
+ journal_end_sync(&th, inode->i_sb, jbegin_count) ;
+ unlock_kernel() ;
+ }
+}
+
+void reiserfs_dirty_inode (struct inode * inode) {
+ struct reiserfs_transaction_handle th ;
+
+ if (inode->i_sb->s_flags & MS_RDONLY) {
+ reiserfs_warning("clm-6006: writing inode %lu on readonly FS\n",
+ inode->i_ino) ;
+ return ;
+ }
+ lock_kernel() ;
+ journal_begin(&th, inode->i_sb, 1) ;
+ reiserfs_update_sd (&th, inode);
+ journal_end(&th, inode->i_sb, 1) ;
+ unlock_kernel() ;
+}
+
+
+/* FIXME: no need any more. right? */
+int reiserfs_sync_inode (struct reiserfs_transaction_handle *th, struct inode * inode)
+{
+ int err = 0;
+
+ reiserfs_update_sd (th, inode);
+ return err;
+}
+
+
+/* stat data of new object is inserted already, this inserts the item
+ containing "." and ".." entries */
+static int reiserfs_new_directory (struct reiserfs_transaction_handle *th,
+ struct item_head * ih, struct path * path, const struct inode * dir)
+{
+ struct super_block * sb = th->t_super;
+ char empty_dir [EMPTY_DIR_SIZE];
+ char * body = empty_dir;
+ struct cpu_key key;
+ int retval;
+
+ _make_cpu_key (&key, ITEM_VERSION_1, le32_to_cpu (ih->ih_key.k_dir_id),
+ le32_to_cpu (ih->ih_key.k_objectid), DOT_OFFSET, TYPE_DIRENTRY, 3/*key length*/);
+
+ /* compose item head for new item. Directories consist of items of
+ old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
+ is done by reiserfs_new_inode */
+ if (old_format_only (sb)) {
+ make_le_item_head (ih, 0, ITEM_VERSION_1, DOT_OFFSET, TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
+
+ make_empty_dir_item_v1 (body, ih->ih_key.k_dir_id, ih->ih_key.k_objectid,
+ le32_to_cpu (INODE_PKEY (dir)->k_dir_id),
+ le32_to_cpu (INODE_PKEY (dir)->k_objectid));
+ } else {
+ make_le_item_head (ih, 0, ITEM_VERSION_1, DOT_OFFSET, TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
+
+ make_empty_dir_item (body, ih->ih_key.k_dir_id, ih->ih_key.k_objectid,
+ le32_to_cpu (INODE_PKEY (dir)->k_dir_id),
+ le32_to_cpu (INODE_PKEY (dir)->k_objectid));
+ }
+
+ /* look for place in the tree for new item */
+ retval = search_item (sb, &key, path);
+ if (retval == IO_ERROR) {
+ reiserfs_warning ("vs-13080: reiserfs_new_directory: "
+ "i/o failure occured creating new directory\n");
+ return -EIO;
+ }
+ if (retval == ITEM_FOUND) {
+ pathrelse (path);
+ reiserfs_warning ("vs-13070: reiserfs_new_directory: "
+ "object with this key exists (%k)", &(ih->ih_key));
+ return -EEXIST;
+ }
+
+ /* insert item, that is empty directory item */
+ return reiserfs_insert_item (th, path, &key, ih, body);
+}
+
+
+/* stat data of object has been inserted, this inserts the item
+ containing the body of symlink */
+static int reiserfs_new_symlink (struct reiserfs_transaction_handle *th,
+ struct item_head * ih,
+ struct path * path, const char * symname, int item_len)
+{
+ struct super_block * sb = th->t_super;
+ struct cpu_key key;
+ int retval;
+
+ _make_cpu_key (&key, ITEM_VERSION_1,
+ le32_to_cpu (ih->ih_key.k_dir_id),
+ le32_to_cpu (ih->ih_key.k_objectid),
+ 1, TYPE_DIRECT, 3/*key length*/);
+
+ make_le_item_head (ih, 0, ITEM_VERSION_1, 1, TYPE_DIRECT, item_len, 0/*free_space*/);
+
+ /* look for place in the tree for new item */
+ retval = search_item (sb, &key, path);
+ if (retval == IO_ERROR) {
+ reiserfs_warning ("vs-13080: reiserfs_new_symlinik: "
+ "i/o failure occured creating new symlink\n");
+ return -EIO;
+ }
+ if (retval == ITEM_FOUND) {
+ pathrelse (path);
+ reiserfs_warning ("vs-13080: reiserfs_new_symlink: "
+ "object with this key exists (%k)", &(ih->ih_key));
+ return -EEXIST;
+ }
+
+ /* insert item, that is body of symlink */
+ return reiserfs_insert_item (th, path, &key, ih, symname);
+}
+
+
+/* inserts the stat data into the tree, and then calls
+ reiserfs_new_directory (to insert ".", ".." item if new object is
+ directory) or reiserfs_new_symlink (to insert symlink body if new
+ object is symlink) or nothing (if new object is regular file) */
+struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
+ const struct inode * dir, int mode,
+ const char * symname,
+ int i_size, /* 0 for regular, EMTRY_DIR_SIZE for dirs,
+ strlen (symname) for symlinks)*/
+ struct dentry *dentry, struct inode *inode, int * err)
+{
+ struct super_block * sb;
+ INITIALIZE_PATH (path_to_key);
+ struct cpu_key key;
+ struct item_head ih;
+ struct stat_data sd;
+ int retval;
+
+ if (!dir || !dir->i_nlink) {
+ *err = -EPERM;
+ iput(inode) ;
+ return NULL;
+ }
+
+ sb = dir->i_sb;
+ inode->i_sb = sb;
+ inode->i_flags = 0;//inode->i_sb->s_flags;
+
+ /* item head of new item */
+ ih.ih_key.k_dir_id = INODE_PKEY (dir)->k_objectid;
+ ih.ih_key.k_objectid = cpu_to_le32 (reiserfs_get_unused_objectid (th));
+ if (!ih.ih_key.k_objectid) {
+ iput(inode) ;
+ *err = -ENOMEM;
+ return NULL;
+ }
+ if (old_format_only (sb))
+ make_le_item_head (&ih, 0, ITEM_VERSION_1, SD_OFFSET, TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
+ else
+ make_le_item_head (&ih, 0, ITEM_VERSION_2, SD_OFFSET, TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
+
+
+ /* key to search for correct place for new stat data */
+ _make_cpu_key (&key, ITEM_VERSION_2, le32_to_cpu (ih.ih_key.k_dir_id),
+ le32_to_cpu (ih.ih_key.k_objectid), SD_OFFSET, TYPE_STAT_DATA, 3/*key length*/);
+
+ /* find proper place for inserting of stat data */
+ retval = search_item (sb, &key, &path_to_key);
+ if (retval == IO_ERROR) {
+ iput (inode);
+ *err = -EIO;
+ return NULL;
+ }
+ if (retval == ITEM_FOUND) {
+ pathrelse (&path_to_key);
+ iput (inode);
+ *err = -EEXIST;
+ return NULL;
+ }
+
+ /* fill stat data */
+ inode->i_mode = mode;
+ inode->i_nlink = (S_ISDIR (mode) ? 2 : 1);
+ inode->i_uid = current->fsuid;
+ if (dir->i_mode & S_ISGID) {
+ inode->i_gid = dir->i_gid;
+ if (S_ISDIR(mode))
+ inode->i_mode |= S_ISGID;
+ } else
+ inode->i_gid = current->fsgid;
+
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_size = i_size;
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+ inode->u.reiserfs_i.i_first_direct_byte = S_ISLNK(mode) ? 1 :
+ U32_MAX/*NO_BYTES_IN_DIRECT_ITEM*/;
+
+ if (old_format_only (sb))
+ inode2sd_v1 (&sd, inode);
+ else
+ inode2sd (&sd, inode);
+
+ // these do not go to on-disk stat data
+ inode->i_ino = le32_to_cpu (ih.ih_key.k_objectid);
+ inode->i_blksize = PAGE_SIZE;
+ inode->i_dev = sb->s_dev;
+
+ // store in in-core inode the key of stat data and version all
+ // object items will have (directory items will have old offset
+ // format, other new objects will consist of new items)
+ memcpy (INODE_PKEY (inode), &(ih.ih_key), KEY_SIZE);
+ if (old_format_only (sb) || S_ISDIR(mode) || S_ISLNK(mode))
+ inode_items_version (inode) = ITEM_VERSION_1;
+ else
+ inode_items_version (inode) = ITEM_VERSION_2;
+
+ /* insert the stat data into the tree */
+ retval = reiserfs_insert_item (th, &path_to_key, &key, &ih, (char *)(&sd));
+ if (retval) {
+ iput (inode);
+ *err = retval;
+ reiserfs_check_path(&path_to_key) ;
+ return NULL;
+ }
+
+ if (S_ISDIR(mode)) {
+ /* insert item with "." and ".." */
+ retval = reiserfs_new_directory (th, &ih, &path_to_key, dir);
+ }
+
+ if (S_ISLNK(mode)) {
+ /* insert body of symlink */
+ if (!old_format_only (sb))
+ i_size = ROUND_UP(i_size);
+ retval = reiserfs_new_symlink (th, &ih, &path_to_key, symname, i_size);
+ }
+ if (retval) {
+ inode->i_nlink = 0;
+ iput (inode);
+ *err = retval;
+ reiserfs_check_path(&path_to_key) ;
+ return NULL;
+ }
+
+ /* not a perfect generation count, as object ids can be reused, but this
+ ** is as good as reiserfs can do right now
+ */
+ inode->i_generation = INODE_PKEY (inode)->k_dir_id;
+ insert_inode_hash (inode);
+ // we do not mark inode dirty: on disk content matches to the
+ // in-core one
+ reiserfs_check_path(&path_to_key) ;
+
+ return inode;
+}
+
+/*
+** finds the tail page in the page cache,
+** reads the last block in.
+**
+** On success, page_result is set to a locked, pinned page, and bh_result
+** is set to an up to date buffer for the last block in the file. returns 0.
+**
+** tail conversion is not done, so bh_result might not be valid for writing
+** check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
+** trying to write the block.
+**
+** on failure, nonzero is returned, page_result and bh_result are untouched.
+*/
+static int grab_tail_page(struct inode *p_s_inode,
+ struct page **page_result,
+ struct buffer_head **bh_result) {
+
+ /* we want the page with the last byte in the file,
+ ** not the page that will hold the next byte for appending
+ */
+ unsigned long index = (p_s_inode->i_size-1) >> PAGE_CACHE_SHIFT ;
+ unsigned long pos = 0 ;
+ unsigned long start = 0 ;
+ unsigned long blocksize = p_s_inode->i_sb->s_blocksize ;
+ unsigned long offset = (p_s_inode->i_size) & (PAGE_CACHE_SIZE - 1) ;
+ struct buffer_head *bh ;
+ struct buffer_head *head ;
+ struct page * page ;
+ int error ;
+
+ /* we know that we are only called with inode->i_size > 0.
+ ** we also know that a file tail can never be as big as a block
+ ** If i_size % blocksize == 0, our file is currently block aligned
+ ** and it won't need converting or zeroing after a truncate.
+ */
+ if ((offset & (blocksize - 1)) == 0) {
+ return -ENOENT ;
+ }
+ page = grab_cache_page(p_s_inode->i_mapping, index) ;
+ error = PTR_ERR(page) ;
+ if (IS_ERR(page)) {
+ goto out ;
+ }
+ /* start within the page of the last block in the file */
+ start = (offset / blocksize) * blocksize ;
+
+ error = block_prepare_write(page, start, offset,
+ reiserfs_get_block_create_0) ;
+ if (error)
+ goto unlock ;
+
+ kunmap(page) ; /* mapped by block_prepare_write */
+
+ head = page->buffers ;
+ bh = head;
+ do {
+ if (pos >= start) {
+ break ;
+ }
+ bh = bh->b_this_page ;
+ pos += blocksize ;
+ } while(bh != head) ;
+
+ if (!buffer_uptodate(bh)) {
+ /* note, this should never happen, prepare_write should
+ ** be taking care of this for us. If the buffer isn't up to date,
+ ** I've screwed up the code to find the buffer, or the code to
+ ** call prepare_write
+ */
+ reiserfs_warning("clm-6000: error reading block %lu on dev %s\n",
+ bh->b_blocknr, kdevname(bh->b_dev)) ;
+ error = -EIO ;
+ goto unlock ;
+ }
+ *bh_result = bh ;
+ *page_result = page ;
+
+out:
+ return error ;
+
+unlock:
+ UnlockPage(page) ;
+ page_cache_release(page) ;
+ return error ;
+}
+
+/*
+** vfs version of truncate file. Must NOT be called with
+** a transaction already started.
+**
+** some code taken from block_truncate_page
+*/
+void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) {
+ struct reiserfs_transaction_handle th ;
+ int windex ;
+
+ /* we want the offset for the first byte after the end of the file */
+ unsigned long offset = p_s_inode->i_size & (PAGE_CACHE_SIZE - 1) ;
+ unsigned blocksize = p_s_inode->i_sb->s_blocksize ;
+ unsigned length ;
+ struct page *page = NULL ;
+ int error ;
+ struct buffer_head *bh = NULL ;
+
+ if (p_s_inode->i_size > 0) {
+ if ((error = grab_tail_page(p_s_inode, &page, &bh))) {
+ // -ENOENT means we truncated past the end of the file,
+ // and get_block_create_0 could not find a block to read in,
+ // which is ok.
+ if (error != -ENOENT)
+ reiserfs_warning("clm-6001: grab_tail_page failed %d\n", error);
+ page = NULL ;
+ bh = NULL ;
+ }
+ }
+
+ /* so, if page != NULL, we have a buffer head for the offset at
+ ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
+ ** then we have an unformatted node. Otherwise, we have a direct item,
+ ** and no zeroing is required. We zero after the truncate, because the
+ ** truncate might pack the item anyway (it will unmap bh if it packs).
+ */
+ prevent_flush_page_lock(page, p_s_inode) ;
+ journal_begin(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ;
+ windex = push_journal_writer("reiserfs_vfs_truncate_file") ;
+ reiserfs_do_truncate (&th, p_s_inode, page, update_timestamps) ;
+ pop_journal_writer(windex) ;
+ journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ;
+ allow_flush_page_lock(page, p_s_inode) ;
+
+ if (page && buffer_mapped(bh) && bh->b_blocknr != 0) {
+ length = offset & (blocksize - 1) ;
+ /* if we are not on a block boundary */
+ if (length) {
+ length = blocksize - length ;
+ memset((char *)kmap(page) + offset, 0, length) ;
+ flush_dcache_page(page) ;
+ kunmap(page) ;
+ mark_buffer_dirty(bh) ;
+ }
+ }
+
+ if (page) {
+ UnlockPage(page) ;
+ page_cache_release(page) ;
+ }
+ return ;
+}
+
+static int map_block_for_writepage(struct inode *inode,
+ struct buffer_head *bh_result,
+ unsigned long block) {
+ struct reiserfs_transaction_handle th ;
+ int fs_gen ;
+ struct item_head tmp_ih ;
+ struct item_head *ih ;
+ struct buffer_head *bh ;
+ __u32 *item ;
+ struct cpu_key key ;
+ INITIALIZE_PATH(path) ;
+ int pos_in_item ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT ;
+ loff_t byte_offset = (block << inode->i_sb->s_blocksize_bits) + 1 ;
+ int retval ;
+ int use_get_block = 0 ;
+ int bytes_copied = 0 ;
+ int copy_size ;
+
+start_over:
+ lock_kernel() ;
+ prevent_flush_page_lock(bh_result->b_page, inode) ;
+ journal_begin(&th, inode->i_sb, jbegin_count) ;
+
+ make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3) ;
+
+research:
+ retval = search_for_position_by_key(inode->i_sb, &key, &path) ;
+ if (retval != POSITION_FOUND) {
+ use_get_block = 1;
+ goto out ;
+ }
+
+ bh = get_bh(&path) ;
+ ih = get_ih(&path) ;
+ item = get_item(&path) ;
+ pos_in_item = path.pos_in_item ;
+
+ /* we've found an unformatted node */
+ if (indirect_item_found(retval, ih)) {
+ if (bytes_copied > 0) {
+ reiserfs_warning("clm-6002: bytes_copied %d\n", bytes_copied) ;
+ }
+ if (!item[pos_in_item]) {
+ /* crap, we are writing to a hole */
+ use_get_block = 1;
+ goto out ;
+ }
+ set_block_dev_mapped(bh_result, le32_to_cpu(item[pos_in_item]), inode);
+ } else if (is_direct_le_ih(ih)) {
+ char *p ;
+ p = page_address(bh_result->b_page) ;
+ p += (byte_offset -1) & (PAGE_CACHE_SIZE - 1) ;
+ copy_size = le16_to_cpu(ih->ih_item_len) - pos_in_item ;
+
+ fs_gen = get_generation(inode->i_sb) ;
+ copy_item_head(&tmp_ih, ih) ;
+ reiserfs_prepare_for_journal(inode->i_sb, bh, 1) ;
+ if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
+ reiserfs_restore_prepared_buffer(inode->i_sb, bh) ;
+ goto research;
+ }
+
+ memcpy( B_I_PITEM(bh, ih) + pos_in_item, p + bytes_copied, copy_size) ;
+
+ journal_mark_dirty(&th, inode->i_sb, bh) ;
+ bytes_copied += copy_size ;
+ set_block_dev_mapped(bh_result, 0, inode);
+
+ /* are there still bytes left? */
+ if (bytes_copied < bh_result->b_size &&
+ (byte_offset + bytes_copied) < inode->i_size) {
+ set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + copy_size) ;
+ goto research ;
+ }
+ } else {
+ reiserfs_warning("clm-6003: bad item inode %lu, device %s\n", inode->i_ino, kdevname(inode->i_sb->s_dev)) ;
+ retval = -EIO ;
+ goto out ;
+ }
+ retval = 0 ;
+
+out:
+ pathrelse(&path) ;
+ journal_end(&th, inode->i_sb, jbegin_count) ;
+ allow_flush_page_lock(bh_result->b_page, inode) ;
+ unlock_kernel() ;
+
+ /* this is where we fill in holes in the file. */
+ if (use_get_block) {
+ kmap(bh_result->b_page) ;
+ retval = reiserfs_get_block(inode, block, bh_result, 1) ;
+ kunmap(bh_result->b_page) ;
+ if (!retval) {
+ if (!buffer_mapped(bh_result) || bh_result->b_blocknr == 0) {
+ /* get_block failed to find a mapped unformatted node. */
+ use_get_block = 0 ;
+ goto start_over ;
+ }
+ }
+ }
+ return retval ;
+}
+
+/* helper func to get a buffer head ready for writepage to send to
+** ll_rw_block
+*/
+static inline void submit_bh_for_writepage(struct buffer_head **bhp, int nr) {
+ struct buffer_head *bh ;
+ int i;
+ for(i = 0 ; i < nr ; i++) {
+ bh = bhp[i] ;
+ lock_buffer(bh) ;
+ atomic_inc(&bh->b_count) ; /* async end_io handler decs this */
+ set_buffer_async_io(bh) ;
+ /* submit_bh doesn't care if the buffer is dirty, but nobody
+ ** later on in the call chain will be cleaning it. So, we
+ ** clean the buffer here, it still gets written either way.
+ */
+ clear_bit(BH_Dirty, &bh->b_state) ;
+ set_bit(BH_Uptodate, &bh->b_state) ;
+ submit_bh(WRITE, bh) ;
+ }
+}
+
+static int reiserfs_write_full_page(struct page *page) {
+ struct inode *inode = page->mapping->host ;
+ unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT ;
+ unsigned last_offset = PAGE_CACHE_SIZE;
+ int error = 0;
+ unsigned long block ;
+ unsigned cur_offset = 0 ;
+ struct buffer_head *head, *bh ;
+ int partial = 0 ;
+ struct buffer_head *arr[PAGE_CACHE_SIZE/512] ;
+ int nr = 0 ;
+
+ if (!page->buffers) {
+ block_prepare_write(page, 0, 0, NULL) ;
+ kunmap(page) ;
+ }
+ /* last page in the file, zero out any contents past the
+ ** last byte in the file
+ */
+ if (page->index >= end_index) {
+ last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1) ;
+ /* no file contents in this page */
+ if (page->index >= end_index + 1 || !last_offset) {
+ error = -EIO ;
+ goto fail ;
+ }
+ memset((char *)kmap(page)+last_offset, 0, PAGE_CACHE_SIZE-last_offset) ;
+ flush_dcache_page(page) ;
+ kunmap(page) ;
+ }
+ head = page->buffers ;
+ bh = head ;
+ block = page->index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits) ;
+ do {
+ /* if this offset in the page is outside the file */
+ if (cur_offset >= last_offset) {
+ if (!buffer_uptodate(bh))
+ partial = 1 ;
+ } else {
+ /* fast path, buffer mapped to an unformatted node */
+ if (buffer_mapped(bh) && bh->b_blocknr != 0) {
+ arr[nr++] = bh ;
+ } else {
+ /* buffer not mapped yet, or points to a direct item.
+ ** search and dirty or log
+ */
+ if ((error = map_block_for_writepage(inode, bh, block))) {
+ goto fail ;
+ }
+ /* map_block_for_writepage either found an unformatted node
+ ** and mapped it for us, or it found a direct item
+ ** and logged the changes.
+ */
+ if (buffer_mapped(bh) && bh->b_blocknr != 0) {
+ arr[nr++] = bh ;
+ }
+ }
+ }
+ bh = bh->b_this_page ;
+ cur_offset += bh->b_size ;
+ block++ ;
+ } while(bh != head) ;
+
+ /* if this page only had a direct item, it is very possible for
+ ** nr == 0 without there being any kind of error.
+ */
+ if (nr) {
+ submit_bh_for_writepage(arr, nr) ;
+ } else {
+ UnlockPage(page) ;
+ }
+ if (!partial)
+ SetPageUptodate(page) ;
+
+ return 0 ;
+
+fail:
+ if (nr) {
+ submit_bh_for_writepage(arr, nr) ;
+ } else {
+ UnlockPage(page) ;
+ }
+ ClearPageUptodate(page) ;
+ return error ;
+}
+
+//
+// this is exactly what 2.3.99-pre9's ext2_readpage is
+//
+static int reiserfs_readpage (struct file *f, struct page * page)
+{
+ return block_read_full_page (page, reiserfs_get_block);
+}
+
+
+//
+// modified from ext2_writepage is
+//
+static int reiserfs_writepage (struct page * page)
+{
+ struct inode *inode = page->mapping->host ;
+ reiserfs_wait_on_write_block(inode->i_sb) ;
+ return reiserfs_write_full_page(page) ;
+}
+
+
+//
+// from ext2_prepare_write, but modified
+//
+int reiserfs_prepare_write(struct file *f, struct page *page, unsigned from, unsigned to) {
+ struct inode *inode = page->mapping->host ;
+ reiserfs_wait_on_write_block(inode->i_sb) ;
+ fix_tail_page_for_writing(page) ;
+ return block_prepare_write(page, from, to, reiserfs_get_block) ;
+}
+
+
+//
+// this is exactly what 2.3.99-pre9's ext2_bmap is
+//
+static int reiserfs_aop_bmap(struct address_space *as, long block) {
+ return generic_block_bmap(as, block, reiserfs_bmap) ;
+}
+
+
+static int reiserfs_commit_write(struct file *f, struct page *page,
+ unsigned from, unsigned to) {
+ struct inode *inode = page->mapping->host ;
+ int ret ;
+ struct reiserfs_transaction_handle th ;
+
+ reiserfs_wait_on_write_block(inode->i_sb) ;
+ lock_kernel();
+ prevent_flush_page_lock(page, inode) ;
+ ret = generic_commit_write(f, page, from, to) ;
+ /* we test for O_SYNC here so we can commit the transaction
+ ** for any packed tails the file might have had
+ */
+ if (f->f_flags & O_SYNC) {
+ journal_begin(&th, inode->i_sb, 1) ;
+ reiserfs_prepare_for_journal(inode->i_sb,
+ SB_BUFFER_WITH_SB(inode->i_sb), 1) ;
+ journal_mark_dirty(&th, inode->i_sb, SB_BUFFER_WITH_SB(inode->i_sb)) ;
+ journal_end_sync(&th, inode->i_sb, 1) ;
+ }
+ allow_flush_page_lock(page, inode) ;
+ unlock_kernel();
+ return ret ;
+}
+
+struct address_space_operations reiserfs_address_space_operations = {
+ writepage: reiserfs_writepage,
+ readpage: reiserfs_readpage,
+ sync_page: block_sync_page,
+ prepare_write: reiserfs_prepare_write,
+ commit_write: reiserfs_commit_write,
+ bmap: reiserfs_aop_bmap
+} ;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
new file mode 100644
index 000000000..72054ce3a
--- /dev/null
+++ b/fs/reiserfs/ioctl.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <linux/smp_lock.h>
+#include <linux/locks.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+/*
+** reiserfs_ioctl - handler for ioctl for inode
+** supported commands:
+** 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
+** and prevent packing file (argument arg has to be non-zero)
+** 2) That's all for a while ...
+*/
+int reiserfs_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case REISERFS_IOC_UNPACK:
+ if (arg)
+ return reiserfs_unpack (inode, filp);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+** reiserfs_unpack
+** Function try to convert tail from direct item into indirect.
+** It set up nopack attribute in the inode.u.reiserfs_i.nopack
+*/
+int reiserfs_unpack (struct inode * inode, struct file * filp)
+{
+ int retval = 0;
+ int index ;
+ struct page *page ;
+ unsigned long write_from ;
+ unsigned long blocksize = inode->i_sb->s_blocksize ;
+
+ if (inode->i_size == 0) {
+ return -EINVAL ;
+ }
+ /* ioctl already done */
+ if (inode->u.reiserfs_i.nopack) {
+ return 0 ;
+ }
+ lock_kernel();
+
+ /* we need to make sure nobody is changing the file size beneath
+ ** us
+ */
+ down(&inode->i_sem) ;
+
+ write_from = inode->i_size & (blocksize - 1) ;
+ /* if we are on a block boundary, we are already unpacked. */
+ if ( write_from == 0) {
+ inode->u.reiserfs_i.nopack = 1;
+ goto out ;
+ }
+
+ /* we unpack by finding the page with the tail, and calling
+ ** reiserfs_prepare_write on that page. This will force a
+ ** reiserfs_get_block to unpack the tail for us.
+ */
+ index = inode->i_size >> PAGE_CACHE_SHIFT ;
+ page = grab_cache_page(inode->i_mapping, index) ;
+ retval = PTR_ERR(page) ;
+ if (IS_ERR(page)) {
+ goto out ;
+ }
+ retval = reiserfs_prepare_write(NULL, page, write_from, blocksize) ;
+ if (retval)
+ goto out_unlock ;
+
+ /* conversion can change page contents, must flush */
+ flush_dcache_page(page) ;
+ inode->u.reiserfs_i.nopack = 1;
+ kunmap(page) ; /* mapped by prepare_write */
+
+out_unlock:
+ UnlockPage(page) ;
+ page_cache_release(page) ;
+
+out:
+ up(&inode->i_sem) ;
+ unlock_kernel();
+ return retval;
+}
diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
new file mode 100644
index 000000000..b3f962d05
--- /dev/null
+++ b/fs/reiserfs/item_ops.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/sched.h>
+#include <linux/reiserfs_fs.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+// this contains item handlers for old item types: sd, direct,
+// indirect, directory
+
+/* and where are the comments? how about saying where we can find an
+ explanation of each item handler method? -Hans */
+
+//////////////////////////////////////////////////////////////////////////////
+// stat data functions
+//
+static int sd_bytes_number (struct item_head * ih, int block_size)
+{
+ return 0;
+}
+
+static void sd_decrement_key (struct cpu_key * key)
+{
+ key->on_disk_key.k_objectid --;
+ set_cpu_key_k_type (key, TYPE_ANY);
+ set_cpu_key_k_offset(key, (loff_t)(-1));
+}
+
+static int sd_is_left_mergeable (struct key * key, unsigned long bsize)
+{
+ return 0;
+}
+
+
+
+static char * print_time (time_t t)
+{
+ static char timebuf[256];
+
+#ifndef __KERNEL__
+// struct tm *loctime;
+// loctime = localtime (&t);
+ sprintf (timebuf, "%s", asctime (localtime (&t)));
+ timebuf[strlen (timebuf) - 1] = 0;
+#else
+ sprintf (timebuf, "%ld", t);
+#endif
+ return timebuf;
+}
+
+
+static void sd_print_item (struct item_head * ih, char * item)
+{
+ printk ("\tmode | size | nlinks | first direct | mtime\n");
+ if (stat_data_v1 (ih)) {
+ struct stat_data_v1 * sd = (struct stat_data_v1 *)item;
+
+ printk ("\t0%-6o | %6u | %2u | %d | %s\n", sd->sd_mode, sd->sd_size,
+ sd->sd_nlink, sd->sd_first_direct_byte, print_time (sd->sd_mtime));
+ } else {
+ struct stat_data * sd = (struct stat_data *)item;
+
+ printk ("\t0%-6o | %6Lu | %2u | %d | %s\n", sd->sd_mode, (unsigned long long)(sd->sd_size),
+ sd->sd_nlink, sd->u.sd_rdev, print_time (sd->sd_mtime));
+ }
+}
+
+static void sd_check_item (struct item_head * ih, char * item)
+{
+ // FIXME: type something here!
+}
+
+
+static int sd_create_vi (struct virtual_node * vn,
+ struct virtual_item * vi,
+ int is_affected,
+ int insert_size)
+{
+ vi->vi_index = TYPE_STAT_DATA;
+ //vi->vi_type |= VI_TYPE_STAT_DATA;// not needed?
+ return 0;
+}
+
+
+static int sd_check_left (struct virtual_item * vi, int free,
+ int start_skip, int end_skip)
+{
+ if (start_skip || end_skip)
+ BUG ();
+ return -1;
+}
+
+
+static int sd_check_right (struct virtual_item * vi, int free)
+{
+ return -1;
+}
+
+static int sd_part_size (struct virtual_item * vi, int first, int count)
+{
+ if (count)
+ BUG ();
+ return 0;
+}
+
+static int sd_unit_num (struct virtual_item * vi)
+{
+ return vi->vi_item_len - IH_SIZE;
+}
+
+
+static void sd_print_vi (struct virtual_item * vi)
+{
+ reiserfs_warning ("STATDATA, index %d, type 0x%x, %h\n",
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+}
+
+struct item_operations stat_data_ops = {
+ sd_bytes_number,
+ sd_decrement_key,
+ sd_is_left_mergeable,
+ sd_print_item,
+ sd_check_item,
+
+ sd_create_vi,
+ sd_check_left,
+ sd_check_right,
+ sd_part_size,
+ sd_unit_num,
+ sd_print_vi
+};
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// direct item functions
+//
+static int direct_bytes_number (struct item_head * ih, int block_size)
+{
+ return le16_to_cpu (ih->ih_item_len);
+}
+
+
+// FIXME: this should probably switch to indirect as well
+static void direct_decrement_key (struct cpu_key * key)
+{
+ cpu_key_k_offset_dec (key);
+ if (cpu_key_k_offset (key) == 0)
+ set_cpu_key_k_type (key, TYPE_STAT_DATA);
+}
+
+
+static int direct_is_left_mergeable (struct key * key, unsigned long bsize)
+{
+ int version = le_key_version (key);
+ return ((le_key_k_offset (version, key) & (bsize - 1)) != 1);
+}
+
+
+static void direct_print_item (struct item_head * ih, char * item)
+{
+ int j = 0;
+
+// return;
+ printk ("\"");
+ while (j < ih->ih_item_len)
+ printk ("%c", item[j++]);
+ printk ("\"\n");
+}
+
+
+static void direct_check_item (struct item_head * ih, char * item)
+{
+ // FIXME: type something here!
+}
+
+
+static int direct_create_vi (struct virtual_node * vn,
+ struct virtual_item * vi,
+ int is_affected,
+ int insert_size)
+{
+ vi->vi_index = TYPE_DIRECT;
+ //vi->vi_type |= VI_TYPE_DIRECT;
+ return 0;
+}
+
+static int direct_check_left (struct virtual_item * vi, int free,
+ int start_skip, int end_skip)
+{
+ int bytes;
+
+ bytes = free - free % 8;
+ return bytes ?: -1;
+}
+
+
+static int direct_check_right (struct virtual_item * vi, int free)
+{
+ return direct_check_left (vi, free, 0, 0);
+}
+
+static int direct_part_size (struct virtual_item * vi, int first, int count)
+{
+ return count;
+}
+
+
+static int direct_unit_num (struct virtual_item * vi)
+{
+ return vi->vi_item_len - IH_SIZE;
+}
+
+
+static void direct_print_vi (struct virtual_item * vi)
+{
+ reiserfs_warning ("DIRECT, index %d, type 0x%x, %h\n",
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+}
+
+struct item_operations direct_ops = {
+ direct_bytes_number,
+ direct_decrement_key,
+ direct_is_left_mergeable,
+ direct_print_item,
+ direct_check_item,
+
+ direct_create_vi,
+ direct_check_left,
+ direct_check_right,
+ direct_part_size,
+ direct_unit_num,
+ direct_print_vi
+};
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// indirect item functions
+//
+
+static int indirect_bytes_number (struct item_head * ih, int block_size)
+{
+ return le16_to_cpu (ih->ih_item_len) / UNFM_P_SIZE * block_size; //- get_ih_free_space (ih);
+}
+
+
+// decrease offset, if it becomes 0, change type to stat data
+static void indirect_decrement_key (struct cpu_key * key)
+{
+ cpu_key_k_offset_dec (key);
+ if (cpu_key_k_offset (key) == 0)
+ set_cpu_key_k_type (key, TYPE_STAT_DATA);
+}
+
+
+// if it is not first item of the body, then it is mergeable
+static int indirect_is_left_mergeable (struct key * key, unsigned long bsize)
+{
+ int version = le_key_version (key);
+ return (le_key_k_offset (version, key) != 1);
+}
+
+
+// printing of indirect item
+static void start_new_sequence (__u32 * start, int * len, __u32 new)
+{
+ *start = new;
+ *len = 1;
+}
+
+
+static int sequence_finished (__u32 start, int * len, __u32 new)
+{
+ if (start == INT_MAX)
+ return 1;
+
+ if (start == 0 && new == 0) {
+ (*len) ++;
+ return 0;
+ }
+ if (start != 0 && (start + *len) == new) {
+ (*len) ++;
+ return 0;
+ }
+ return 1;
+}
+
+static void print_sequence (__u32 start, int len)
+{
+ if (start == INT_MAX)
+ return;
+
+ if (len == 1)
+ printk (" %d", start);
+ else
+ printk (" %d(%d)", start, len);
+}
+
+
+static void indirect_print_item (struct item_head * ih, char * item)
+{
+ int j;
+ __u32 * unp, prev = INT_MAX;
+ int num;
+
+ unp = (__u32 *)item;
+
+ if (ih->ih_item_len % UNFM_P_SIZE)
+ printk ("indirect_print_item: invalid item len");
+
+ printk ("%d pointers\n[ ", (int)I_UNFM_NUM (ih));
+ for (j = 0; j < I_UNFM_NUM (ih); j ++) {
+ if (sequence_finished (prev, &num, unp[j])) {
+ print_sequence (prev, num);
+ start_new_sequence (&prev, &num, unp[j]);
+ }
+ }
+ print_sequence (prev, num);
+ printk ("]\n");
+}
+
+static void indirect_check_item (struct item_head * ih, char * item)
+{
+ // FIXME: type something here!
+}
+
+
+static int indirect_create_vi (struct virtual_node * vn,
+ struct virtual_item * vi,
+ int is_affected,
+ int insert_size)
+{
+ vi->vi_index = TYPE_INDIRECT;
+ //vi->vi_type |= VI_TYPE_INDIRECT;
+ return 0;
+}
+
+static int indirect_check_left (struct virtual_item * vi, int free,
+ int start_skip, int end_skip)
+{
+ int bytes;
+
+ bytes = free - free % UNFM_P_SIZE;
+ return bytes ?: -1;
+}
+
+
+static int indirect_check_right (struct virtual_item * vi, int free)
+{
+ return indirect_check_left (vi, free, 0, 0);
+}
+
+
+
+// return size in bytes of 'units' units. If first == 0 - calculate from the head (left), otherwise - from tail (right)
+static int indirect_part_size (struct virtual_item * vi, int first, int units)
+{
+ // unit of indirect item is byte (yet)
+ return units;
+}
+
+static int indirect_unit_num (struct virtual_item * vi)
+{
+ // unit of indirect item is byte (yet)
+ return vi->vi_item_len - IH_SIZE;
+}
+
+static void indirect_print_vi (struct virtual_item * vi)
+{
+ reiserfs_warning ("INDIRECT, index %d, type 0x%x, %h\n",
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+}
+
+struct item_operations indirect_ops = {
+ indirect_bytes_number,
+ indirect_decrement_key,
+ indirect_is_left_mergeable,
+ indirect_print_item,
+ indirect_check_item,
+
+ indirect_create_vi,
+ indirect_check_left,
+ indirect_check_right,
+ indirect_part_size,
+ indirect_unit_num,
+ indirect_print_vi
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// direntry functions
+//
+
+
+static int direntry_bytes_number (struct item_head * ih, int block_size)
+{
+ reiserfs_warning ("vs-16090: direntry_bytes_number: "
+ "bytes number is asked for direntry");
+ return 0;
+}
+
+static void direntry_decrement_key (struct cpu_key * key)
+{
+ cpu_key_k_offset_dec (key);
+ if (cpu_key_k_offset (key) == 0)
+ set_cpu_key_k_type (key, TYPE_STAT_DATA);
+}
+
+
+static int direntry_is_left_mergeable (struct key * key, unsigned long bsize)
+{
+ if (le32_to_cpu (key->u.k_offset_v1.k_offset) == DOT_OFFSET)
+ return 0;
+ return 1;
+
+}
+
+
+static void direntry_print_item (struct item_head * ih, char * item)
+{
+ int i;
+ int namelen;
+ struct reiserfs_de_head * deh;
+ char * name;
+ static char namebuf [80];
+
+
+ printk ("\n # %-15s%-30s%-15s%-15s%-15s\n", "Name", "Key of pointed object", "Hash", "Gen number", "Status");
+
+ deh = (struct reiserfs_de_head *)item;
+
+ for (i = 0; i < I_ENTRY_COUNT (ih); i ++, deh ++) {
+ namelen = (i ? ((deh - 1)->deh_location) : ih->ih_item_len) - deh->deh_location;
+ name = item + deh->deh_location;
+ if (name[namelen-1] == 0)
+ namelen = strlen (name);
+ namebuf[0] = '"';
+ if (namelen > sizeof (namebuf) - 3) {
+ strncpy (namebuf + 1, name, sizeof (namebuf) - 3);
+ namebuf[sizeof (namebuf) - 2] = '"';
+ namebuf[sizeof (namebuf) - 1] = 0;
+ } else {
+ memcpy (namebuf + 1, name, namelen);
+ namebuf[namelen + 1] = '"';
+ namebuf[namelen + 2] = 0;
+ }
+
+ printk ("%d: %-15s%-15d%-15d%-15Ld%-15Ld(%s)\n",
+ i, namebuf,
+ deh->deh_dir_id, deh->deh_objectid,
+ GET_HASH_VALUE (deh_offset (deh)), GET_GENERATION_NUMBER ((deh_offset (deh))),
+ (de_hidden (deh)) ? "HIDDEN" : "VISIBLE");
+ }
+}
+
+
+static void direntry_check_item (struct item_head * ih, char * item)
+{
+ int i;
+ struct reiserfs_de_head * deh;
+
+ // FIXME: type something here!
+ deh = (struct reiserfs_de_head *)item;
+ for (i = 0; i < I_ENTRY_COUNT (ih); i ++, deh ++) {
+ ;
+ }
+}
+
+
+
+#define DIRENTRY_VI_FIRST_DIRENTRY_ITEM 1
+
+struct direntry_uarea {
+ int flags;
+ short entry_count;
+ short entry_sizes[1];
+};
+
+
+/*
+ * function returns old entry number in directory item in real node
+ * using new entry number in virtual item in virtual node */
+static inline int old_entry_num (int is_affected, int virtual_entry_num, int pos_in_item, int mode)
+{
+ if ( mode == M_INSERT || mode == M_DELETE)
+ return virtual_entry_num;
+
+ if (!is_affected)
+ /* cut or paste is applied to another item */
+ return virtual_entry_num;
+
+ if (virtual_entry_num < pos_in_item)
+ return virtual_entry_num;
+
+ if (mode == M_CUT)
+ return virtual_entry_num + 1;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (mode != M_PASTE || virtual_entry_num == 0)
+ reiserfs_panic (0, "vs-8015: old_entry_num: mode must be M_PASTE (mode = \'%c\'", mode);
+#endif
+
+ return virtual_entry_num - 1;
+}
+
+
+
+
+/* Create an array of sizes of directory entries for virtual
+ item. Return space used by an item. FIXME: no control over
+ consuming of space used by this item handler */
+static int direntry_create_vi (struct virtual_node * vn,
+ struct virtual_item * vi,
+ int is_affected,
+ int insert_size)
+{
+ struct direntry_uarea * dir_u = vi->vi_uarea;
+ int i, j;
+ int size = sizeof (struct direntry_uarea);
+ struct reiserfs_de_head * deh;
+
+ vi->vi_index = TYPE_DIRENTRY;
+
+ if (!(vi->vi_ih) || !vi->vi_item)
+ BUG ();
+
+
+ dir_u->flags = 0;
+ if (le_ih_k_offset (vi->vi_ih) == DOT_OFFSET)
+ dir_u->flags |= DIRENTRY_VI_FIRST_DIRENTRY_ITEM;
+
+ deh = (struct reiserfs_de_head *)(vi->vi_item);
+
+
+ /* virtual directory item have this amount of entry after */
+ dir_u->entry_count = ih_entry_count (vi->vi_ih) +
+ ((is_affected) ? ((vn->vn_mode == M_CUT) ? -1 :
+ (vn->vn_mode == M_PASTE ? 1 : 0)) : 0);
+
+ for (i = 0; i < dir_u->entry_count; i ++) {
+ j = old_entry_num (is_affected, i, vn->vn_pos_in_item, vn->vn_mode);
+ dir_u->entry_sizes[i] = (j ? le16_to_cpu (deh[j - 1].deh_location) : le16_to_cpu (vi->vi_ih->ih_item_len)) -
+ le16_to_cpu (deh[j].deh_location) + DEH_SIZE;
+ }
+
+ size += (dir_u->entry_count * sizeof (short));
+
+ /* set size of pasted entry */
+ if (is_affected && vn->vn_mode == M_PASTE)
+ dir_u->entry_sizes[vn->vn_pos_in_item] = insert_size;
+
+
+#ifdef CONFIG_REISERFS_CHECK
+ /* compare total size of entries with item length */
+ {
+ int k, l;
+
+ l = 0;
+ for (k = 0; k < dir_u->entry_count; k ++)
+ l += dir_u->entry_sizes[k];
+
+ if (l + IH_SIZE != vi->vi_item_len +
+ ((is_affected && (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT)) ? insert_size : 0) ) {
+ reiserfs_panic (0, "vs-8025: set_entry_sizes: (mode==%c, insert_size==%d), invalid length of directory item",
+ vn->vn_mode, insert_size);
+ }
+ }
+#endif
+
+ return size;
+
+
+}
+
+
+//
+// return number of entries which may fit into specified amount of
+// free space, or -1 if free space is not enough even for 1 entry
+//
+static int direntry_check_left (struct virtual_item * vi, int free,
+ int start_skip, int end_skip)
+{
+ int i;
+ int entries = 0;
+ struct direntry_uarea * dir_u = vi->vi_uarea;
+
+ for (i = start_skip; i < dir_u->entry_count - end_skip; i ++) {
+ if (dir_u->entry_sizes[i] > free)
+ /* i-th entry doesn't fit into the remaining free space */
+ break;
+
+ free -= dir_u->entry_sizes[i];
+ entries ++;
+ }
+
+ if (entries == dir_u->entry_count) {
+ printk ("free spze %d, entry_count %d\n", free, dir_u->entry_count);
+ BUG ();
+ }
+
+ /* "." and ".." can not be separated from each other */
+ if (start_skip == 0 && (dir_u->flags & DIRENTRY_VI_FIRST_DIRENTRY_ITEM) && entries < 2)
+ entries = 0;
+
+ return entries ?: -1;
+}
+
+
+static int direntry_check_right (struct virtual_item * vi, int free)
+{
+ int i;
+ int entries = 0;
+ struct direntry_uarea * dir_u = vi->vi_uarea;
+
+ for (i = dir_u->entry_count - 1; i >= 0; i --) {
+ if (dir_u->entry_sizes[i] > free)
+ /* i-th entry doesn't fit into the remaining free space */
+ break;
+
+ free -= dir_u->entry_sizes[i];
+ entries ++;
+ }
+ if (entries == dir_u->entry_count)
+ BUG ();
+
+ /* "." and ".." can not be separated from each other */
+ if ((dir_u->flags & DIRENTRY_VI_FIRST_DIRENTRY_ITEM) && entries > dir_u->entry_count - 2)
+ entries = dir_u->entry_count - 2;
+
+ return entries ?: -1;
+}
+
+
+/* sum of entry sizes between from-th and to-th entries including both edges */
+static int direntry_part_size (struct virtual_item * vi, int first, int count)
+{
+ int i, retval;
+ int from, to;
+ struct direntry_uarea * dir_u = vi->vi_uarea;
+
+ retval = 0;
+ if (first == 0)
+ from = 0;
+ else
+ from = dir_u->entry_count - count;
+ to = from + count - 1;
+
+ for (i = from; i <= to; i ++)
+ retval += dir_u->entry_sizes[i];
+
+ return retval;
+}
+
+static int direntry_unit_num (struct virtual_item * vi)
+{
+ struct direntry_uarea * dir_u = vi->vi_uarea;
+
+ return dir_u->entry_count;
+}
+
+
+
+static void direntry_print_vi (struct virtual_item * vi)
+{
+ int i;
+ struct direntry_uarea * dir_u = vi->vi_uarea;
+
+ reiserfs_warning ("DIRENTRY, index %d, type 0x%x, %h, flags 0x%x\n",
+ vi->vi_index, vi->vi_type, vi->vi_ih, dir_u->flags);
+ printk ("%d entries: ", dir_u->entry_count);
+ for (i = 0; i < dir_u->entry_count; i ++)
+ printk ("%d ", dir_u->entry_sizes[i]);
+ printk ("\n");
+}
+
+struct item_operations direntry_ops = {
+ direntry_bytes_number,
+ direntry_decrement_key,
+ direntry_is_left_mergeable,
+ direntry_print_item,
+ direntry_check_item,
+
+ direntry_create_vi,
+ direntry_check_left,
+ direntry_check_right,
+ direntry_part_size,
+ direntry_unit_num,
+ direntry_print_vi
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+//
+#if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
+ do not compile
+#endif
+
+struct item_operations * item_ops [4] = {
+ &stat_data_ops,
+ &indirect_ops,
+ &direct_ops,
+ &direntry_ops
+};
+
+
+
+
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
new file mode 100644
index 000000000..01a597156
--- /dev/null
+++ b/fs/reiserfs/journal.c
@@ -0,0 +1,3215 @@
+/*
+** Write ahead logging implementation copyright Chris Mason 2000
+**
+** The background commits make this code very interelated, and
+** overly complex. I need to rethink things a bit....The major players:
+**
+** journal_begin -- call with the number of blocks you expect to log.
+** If the current transaction is too
+** old, it will block until the current transaction is
+** finished, and then start a new one.
+** Usually, your transaction will get joined in with
+** previous ones for speed.
+**
+** journal_join -- same as journal_begin, but won't block on the current
+** transaction regardless of age. Don't ever call
+** this. Ever. There are only two places it should be
+** called from, and they are both inside this file.
+**
+** journal_mark_dirty -- adds blocks into this transaction. clears any flags
+** that might make them get sent to disk
+** and then marks them BH_JDirty. Puts the buffer head
+** into the current transaction hash.
+**
+** journal_end -- if the current transaction is batchable, it does nothing
+** otherwise, it could do an async/synchronous commit, or
+** a full flush of all log and real blocks in the
+** transaction.
+**
+** flush_old_commits -- if the current transaction is too old, it is ended and
+** commit blocks are sent to disk. Forces commit blocks
+** to disk for all backgrounded commits that have been
+** around too long.
+** -- Note, if you call this as an immediate flush from
+** from within kupdate, it will ignore the immediate flag
+**
+** The commit thread -- a writer process for async commits. It allows a
+** a process to request a log flush on a task queue.
+** the commit will happen once the commit thread wakes up.
+** The benefit here is the writer (with whatever
+** related locks it has) doesn't have to wait for the
+** log blocks to hit disk if it doesn't want to.
+*/
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/sched.h>
+#include <asm/semaphore.h>
+
+#include <linux/vmalloc.h>
+#include <linux/reiserfs_fs.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/locks.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+/* the number of mounted filesystems. This is used to decide when to
+** start and kill the commit thread
+*/
+static int reiserfs_mounted_fs_count = 0 ;
+
+/* wake this up when you add something to the commit thread task queue */
+DECLARE_WAIT_QUEUE_HEAD(reiserfs_commit_thread_wait) ;
+
+/* wait on this if you need to be sure you task queue entries have been run */
+static DECLARE_WAIT_QUEUE_HEAD(reiserfs_commit_thread_done) ;
+DECLARE_TASK_QUEUE(reiserfs_commit_thread_tq) ;
+
+#define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit structs at 4k */
+
+/* cnode stat bits. Move these into reiserfs_fs.h */
+
+#define BLOCK_FREED 2 /* this block was freed, and can't be written. */
+#define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
+
+#define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
+
+/* flags for do_journal_end */
+#define FLUSH_ALL 1 /* flush commit and real blocks */
+#define COMMIT_NOW 2 /* end and commit this transaction */
+#define WAIT 4 /* wait for the log blocks to hit the disk*/
+
+/* state bits for the journal */
+#define WRITERS_BLOCKED 1 /* set when new writers not allowed */
+
+static int do_journal_end(struct reiserfs_transaction_handle *,struct super_block *,unsigned long nblocks,int flags) ;
+static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
+static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
+static int can_dirty(struct reiserfs_journal_cnode *cn) ;
+
+static void init_journal_hash(struct super_block *p_s_sb) {
+ memset(SB_JOURNAL(p_s_sb)->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
+}
+
+/*
+** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
+** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
+** more details.
+*/
+static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
+ if (bh) {
+ clear_bit(BH_Dirty, &bh->b_state) ;
+#if 0
+ if (bh->b_list != BUF_CLEAN) {
+ reiserfs_file_buffer(bh, BUF_CLEAN) ;
+ }
+#endif
+ }
+ return 0 ;
+}
+
+static struct reiserfs_bitmap_node *
+allocate_bitmap_node(struct super_block *p_s_sb) {
+ struct reiserfs_bitmap_node *bn ;
+ static int id = 0 ;
+
+ bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_BUFFER) ;
+ if (!bn) {
+ return NULL ;
+ }
+ bn->data = kmalloc(p_s_sb->s_blocksize, GFP_BUFFER) ;
+ if (!bn->data) {
+ kfree(bn) ;
+ return NULL ;
+ }
+ bn->id = id++ ;
+ memset(bn->data, 0, p_s_sb->s_blocksize) ;
+ INIT_LIST_HEAD(&bn->list) ;
+ return bn ;
+}
+
+static struct reiserfs_bitmap_node *
+get_bitmap_node(struct super_block *p_s_sb) {
+ struct reiserfs_bitmap_node *bn = NULL;
+ struct list_head *entry = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
+
+ SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes++ ;
+repeat:
+
+ if(entry != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
+ bn = list_entry(entry, struct reiserfs_bitmap_node, list) ;
+ list_del(entry) ;
+ memset(bn->data, 0, p_s_sb->s_blocksize) ;
+ SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
+ return bn ;
+ }
+ bn = allocate_bitmap_node(p_s_sb) ;
+ if (!bn) {
+ current->policy = SCHED_YIELD ;
+ schedule() ;
+ goto repeat ;
+ }
+ return bn ;
+}
+static inline void free_bitmap_node(struct super_block *p_s_sb,
+ struct reiserfs_bitmap_node *bn) {
+ SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes-- ;
+ if (SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
+ kfree(bn->data) ;
+ kfree(bn) ;
+ } else {
+ list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
+ SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
+ }
+}
+
+static void allocate_bitmap_nodes(struct super_block *p_s_sb) {
+ int i ;
+ struct reiserfs_bitmap_node *bn = NULL ;
+ for (i = 0 ; i < REISERFS_MIN_BITMAP_NODES ; i++) {
+ bn = allocate_bitmap_node(p_s_sb) ;
+ if (bn) {
+ list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
+ SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
+ } else {
+ break ; // this is ok, we'll try again when more are needed
+ }
+ }
+}
+
+static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
+ struct reiserfs_list_bitmap *jb) {
+ int bmap_nr = block / (p_s_sb->s_blocksize << 3) ;
+ int bit_nr = block % (p_s_sb->s_blocksize << 3) ;
+
+ if (!jb->bitmaps[bmap_nr]) {
+ jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ;
+ }
+ set_bit(bit_nr, jb->bitmaps[bmap_nr]->data) ;
+ return 0 ;
+}
+
+static void cleanup_bitmap_list(struct super_block *p_s_sb,
+ struct reiserfs_list_bitmap *jb) {
+ int i;
+ for (i = 0 ; i < SB_BMAP_NR(p_s_sb) ; i++) {
+ if (jb->bitmaps[i]) {
+ free_bitmap_node(p_s_sb, jb->bitmaps[i]) ;
+ jb->bitmaps[i] = NULL ;
+ }
+ }
+}
+
+/*
+** only call this on FS unmount.
+*/
+static int free_list_bitmaps(struct super_block *p_s_sb,
+ struct reiserfs_list_bitmap *jb_array) {
+ int i ;
+ struct reiserfs_list_bitmap *jb ;
+ for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
+ jb = jb_array + i ;
+ jb->journal_list = NULL ;
+ cleanup_bitmap_list(p_s_sb, jb) ;
+ vfree(jb->bitmaps) ;
+ jb->bitmaps = NULL ;
+ }
+ return 0;
+}
+
+static int free_bitmap_nodes(struct super_block *p_s_sb) {
+ struct list_head *next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
+ struct reiserfs_bitmap_node *bn ;
+
+ while(next != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
+ bn = list_entry(next, struct reiserfs_bitmap_node, list) ;
+ list_del(next) ;
+ kfree(bn->data) ;
+ kfree(bn) ;
+ next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
+ SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
+ }
+
+ return 0 ;
+}
+
+/*
+** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
+** jb_array is the array to be filled in.
+*/
+int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
+ struct reiserfs_list_bitmap *jb_array,
+ int bmap_nr) {
+ int i ;
+ int failed = 0 ;
+ struct reiserfs_list_bitmap *jb ;
+ int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *) ;
+
+ for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
+ jb = jb_array + i ;
+ jb->journal_list = NULL ;
+ jb->bitmaps = vmalloc( mem ) ;
+ if (!jb->bitmaps) {
+ reiserfs_warning("clm-2000, unable to allocate bitmaps for journal lists\n") ;
+ failed = 1;
+ break ;
+ }
+ memset(jb->bitmaps, 0, mem) ;
+ }
+ if (failed) {
+ free_list_bitmaps(p_s_sb, jb_array) ;
+ return -1 ;
+ }
+ return 0 ;
+}
+
+/*
+** find an available list bitmap. If you can't find one, flush a commit list
+** and try again
+*/
+static struct reiserfs_list_bitmap *
+get_list_bitmap(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
+ int i,j ;
+ struct reiserfs_list_bitmap *jb = NULL ;
+
+ for (j = 0 ; j < (JOURNAL_NUM_BITMAPS * 3) ; j++) {
+ i = SB_JOURNAL(p_s_sb)->j_list_bitmap_index ;
+ SB_JOURNAL(p_s_sb)->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
+ jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
+ if (SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
+ flush_commit_list(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list, 1) ;
+ if (!SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
+ break ;
+ }
+ } else {
+ break ;
+ }
+ }
+ if (jb->journal_list) { /* double check to make sure if flushed correctly */
+ return NULL ;
+ }
+ jb->journal_list = jl ;
+ return jb ;
+}
+
+/*
+** allocates a new chunk of X nodes, and links them all together as a list.
+** Uses the cnode->next and cnode->prev pointers
+** returns NULL on failure
+*/
+static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) {
+ struct reiserfs_journal_cnode *head ;
+ int i ;
+ if (num_cnodes <= 0) {
+ return NULL ;
+ }
+ head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
+ if (!head) {
+ return NULL ;
+ }
+ memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
+ head[0].prev = NULL ;
+ head[0].next = head + 1 ;
+ for (i = 1 ; i < num_cnodes; i++) {
+ head[i].prev = head + (i - 1) ;
+ head[i].next = head + (i + 1) ; /* if last one, overwrite it after the if */
+ }
+ head[num_cnodes -1].next = NULL ;
+ return head ;
+}
+
+/*
+** pulls a cnode off the free list, or returns NULL on failure
+*/
+static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) {
+ struct reiserfs_journal_cnode *cn ;
+
+ reiserfs_check_lock_depth("get_cnode") ;
+
+ if (SB_JOURNAL(p_s_sb)->j_cnode_free <= 0) {
+ return NULL ;
+ }
+ SB_JOURNAL(p_s_sb)->j_cnode_used++ ;
+ SB_JOURNAL(p_s_sb)->j_cnode_free-- ;
+ cn = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
+ if (!cn) {
+ return cn ;
+ }
+ if (cn->next) {
+ cn->next->prev = NULL ;
+ }
+ SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn->next ;
+ memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ;
+ return cn ;
+}
+
+/*
+** returns a cnode to the free list
+*/
+static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) {
+
+ reiserfs_check_lock_depth("free_cnode") ;
+
+ SB_JOURNAL(p_s_sb)->j_cnode_used-- ;
+ SB_JOURNAL(p_s_sb)->j_cnode_free++ ;
+ /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
+ cn->next = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
+ if (SB_JOURNAL(p_s_sb)->j_cnode_free_list) {
+ SB_JOURNAL(p_s_sb)->j_cnode_free_list->prev = cn ;
+ }
+ cn->prev = NULL ; /* not needed with the memset, but I might kill the memset, and forget to do this */
+ SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn ;
+}
+
+static int clear_prepared_bits(struct buffer_head *bh) {
+ clear_bit(BH_JPrepared, &bh->b_state) ;
+ return 0 ;
+}
+
+/* buffer is in current transaction */
+inline int buffer_journaled(struct buffer_head *bh) {
+ if (bh)
+ return test_bit(BH_JDirty, &bh->b_state) ;
+ else
+ return 0 ;
+}
+
+/* disk block was taken off free list before being in a finished transation, or written to disk
+** journal_new blocks can be reused immediately, for any purpose
+*/
+inline int buffer_journal_new(struct buffer_head *bh) {
+ if (bh)
+ return test_bit(BH_JNew, &bh->b_state) ;
+ else
+ return 0 ;
+}
+
+inline int mark_buffer_journal_new(struct buffer_head *bh) {
+ if (bh) {
+ set_bit(BH_JNew, &bh->b_state) ;
+ }
+ return 0 ;
+}
+
+inline int mark_buffer_not_journaled(struct buffer_head *bh) {
+ if (bh)
+ clear_bit(BH_JDirty, &bh->b_state) ;
+ return 0 ;
+}
+
+/* utility function to force a BUG if it is called without the big
+** kernel lock held. caller is the string printed just before calling BUG()
+*/
+void reiserfs_check_lock_depth(char *caller) {
+#ifdef CONFIG_SMP
+ if (current->lock_depth < 0) {
+ printk("%s called without kernel lock held\n", caller) ;
+ show_reiserfs_locks() ;
+ BUG() ;
+ }
+#else
+ ;
+#endif
+}
+
+/* return a cnode with same dev, block number and size in table, or null if not found */
+static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct reiserfs_journal_cnode **table,
+ kdev_t dev,long bl,int size) {
+ struct reiserfs_journal_cnode *cn ;
+ cn = journal_hash(table, dev, bl) ;
+ while(cn) {
+ if ((cn->blocknr == bl) && (cn->dev == dev))
+ return cn ;
+ cn = cn->hnext ;
+ }
+ return (struct reiserfs_journal_cnode *)0 ;
+}
+
+/* returns a cnode with same size, block number and dev as bh in the current transaction hash. NULL if not found */
+static inline struct reiserfs_journal_cnode *get_journal_hash(struct super_block *p_s_sb, struct buffer_head *bh) {
+ struct reiserfs_journal_cnode *cn ;
+ if (bh) {
+ cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_hash_table, bh->b_dev, bh->b_blocknr, bh->b_size) ;
+ }
+ else {
+ return (struct reiserfs_journal_cnode *)0 ;
+ }
+ return cn ;
+}
+
+/* once upon a time, the journal would deadlock. a lot. Now, when
+** CONFIG_REISERFS_CHECK is defined, anytime someone enters a
+** transaction, it pushes itself into this ugly static list, and pops
+** itself off before calling journal_end. I made a SysRq key to dump
+** the list, and tell me what the writers are when I'm deadlocked. */
+
+ /* are you depending on the compiler
+ to optimize this function away
+ everywhere it is called? It is not
+ obvious how this works, but I
+ suppose debugging code need not be
+ clear. -Hans */
+static char *journal_writers[512] ;
+int push_journal_writer(char *s) {
+#ifdef CONFIG_REISERFS_CHECK
+ int i ;
+ for (i = 0 ; i < 512 ; i++) {
+ if (!journal_writers[i]) {
+ journal_writers[i] = s ;
+ return i ;
+ }
+ }
+ return -1 ;
+#else
+ return 0 ;
+#endif
+}
+int pop_journal_writer(int index) {
+#ifdef CONFIG_REISERFS_CHECK
+ if (index >= 0) {
+ journal_writers[index] = NULL ;
+ }
+#endif
+ return 0 ;
+}
+
+int dump_journal_writers(void) {
+ int i ;
+ for (i = 0 ; i < 512 ; i++) {
+ if (journal_writers[i]) {
+ printk("%d: %s\n", i, journal_writers[i]) ;
+ }
+ }
+ return 0 ;
+}
+
+/*
+** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
+** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
+** being overwritten by a replay after crashing.
+**
+** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
+** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
+** sure you never write the block without logging it.
+**
+** next_zero_bit is a suggestion about the next block to try for find_forward.
+** when bl is rejected because it is set in a journal list bitmap, we search
+** for the next zero bit in the bitmap that rejected bl. Then, we return that
+** through next_zero_bit for find_forward to try.
+**
+** Just because we return something in next_zero_bit does not mean we won't
+** reject it on the next call to reiserfs_in_journal
+**
+*/
+int reiserfs_in_journal(struct super_block *p_s_sb, kdev_t dev,
+ unsigned long bl, int size, int search_all,
+ unsigned long *next_zero_bit) {
+ struct reiserfs_journal_cnode *cn ;
+ struct reiserfs_list_bitmap *jb ;
+ int i ;
+ int bmap_nr = bl / (p_s_sb->s_blocksize << 3) ;
+ int bit_nr = bl % (p_s_sb->s_blocksize << 3) ;
+ int tmp_bit ;
+
+ *next_zero_bit = 0 ; /* always start this at zero. */
+
+ /* we aren't logging all blocks are safe for reuse */
+ if (reiserfs_dont_log(p_s_sb)) {
+ return 0 ;
+ }
+
+ /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
+ ** if we crash before the transaction that freed it commits, this transaction won't
+ ** have committed either, and the block will never be written
+ */
+ if (search_all) {
+ for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
+ jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
+ if (jb->journal_list && jb->bitmaps[bmap_nr] &&
+ test_bit(bit_nr, jb->bitmaps[bmap_nr]->data)) {
+ tmp_bit = find_next_zero_bit((unsigned long *)
+ (jb->bitmaps[bmap_nr]->data),
+ p_s_sb->s_blocksize << 3, bit_nr+1) ;
+ *next_zero_bit = bmap_nr * (p_s_sb->s_blocksize << 3) + tmp_bit ;
+ return 1 ;
+ }
+ }
+ }
+
+ /* is it in any old transactions? */
+ if (search_all && (cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_list_hash_table, dev,bl,size))) {
+ return 1;
+ }
+
+ /* is it in the current transaction. This should never happen */
+ if ((cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_hash_table, dev,bl,size))) {
+ return 1;
+ }
+
+ /* safe for reuse */
+ return 0 ;
+}
+
+/* insert cn into table
+*/
+inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
+ struct reiserfs_journal_cnode *cn_orig ;
+
+ cn_orig = journal_hash(table, cn->dev, cn->blocknr) ;
+ cn->hnext = cn_orig ;
+ cn->hprev = NULL ;
+ if (cn_orig) {
+ cn_orig->hprev = cn ;
+ }
+ journal_hash(table, cn->dev, cn->blocknr) = cn ;
+}
+
+/* lock the current transaction */
+inline static void lock_journal(struct super_block *p_s_sb) {
+ while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_wlock)) > 0) {
+ sleep_on(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
+ }
+ atomic_set(&(SB_JOURNAL(p_s_sb)->j_wlock), 1) ;
+}
+
+/* unlock the current transaction */
+inline static void unlock_journal(struct super_block *p_s_sb) {
+ atomic_dec(&(SB_JOURNAL(p_s_sb)->j_wlock)) ;
+ wake_up(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
+}
+
+/*
+** this used to be much more involved, and I'm keeping it just in case things get ugly again.
+** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
+** transaction.
+*/
+static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
+
+ struct reiserfs_list_bitmap *jb = jl->j_list_bitmap ;
+ if (jb) {
+ cleanup_bitmap_list(p_s_sb, jb) ;
+ }
+ jl->j_list_bitmap->journal_list = NULL ;
+ jl->j_list_bitmap = NULL ;
+}
+
+/*
+** if this journal list still has commit blocks unflushed, send them to disk.
+**
+** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
+** Before the commit block can by written, every other log block must be safely on disk
+**
+*/
+static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) {
+ int i, count ;
+ int index = 0 ;
+ int bn ;
+ int retry_count = 0 ;
+ int orig_commit_left = 0 ;
+ struct buffer_head *tbh = NULL ;
+ struct reiserfs_journal_list *other_jl ;
+
+ reiserfs_check_lock_depth("flush_commit_list") ;
+
+ if (atomic_read(&jl->j_older_commits_done)) {
+ return 0 ;
+ }
+
+ /* before we can put our commit blocks on disk, we have to make sure everyone older than
+ ** us is on disk too
+ */
+ if (jl->j_len <= 0) {
+ return 0 ;
+ }
+ if (flushall) {
+ /* we _must_ make sure the transactions are committed in order. Start with the
+ ** index after this one, wrap all the way around
+ */
+ index = (jl - SB_JOURNAL_LIST(s)) + 1 ;
+ for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
+ other_jl = SB_JOURNAL_LIST(s) + ( (index + i) % JOURNAL_LIST_COUNT) ;
+ if (other_jl && other_jl != jl && other_jl->j_len > 0 && other_jl->j_trans_id > 0 &&
+ other_jl->j_trans_id <= jl->j_trans_id && (atomic_read(&(jl->j_older_commits_done)) == 0)) {
+ flush_commit_list(s, other_jl, 0) ;
+ }
+ }
+ }
+
+ count = 0 ;
+ /* don't flush the commit list for the current transactoin */
+ if (jl == ((SB_JOURNAL_LIST(s) + SB_JOURNAL_LIST_INDEX(s)))) {
+ return 0 ;
+ }
+
+ /* make sure nobody is trying to flush this one at the same time */
+ if (atomic_read(&(jl->j_commit_flushing))) {
+ sleep_on(&(jl->j_commit_wait)) ;
+ if (flushall) {
+ atomic_set(&(jl->j_older_commits_done), 1) ;
+ }
+ return 0 ;
+ }
+
+ /* this commit is done, exit */
+ if (atomic_read(&(jl->j_commit_left)) <= 0) {
+ if (flushall) {
+ atomic_set(&(jl->j_older_commits_done), 1) ;
+ }
+ return 0 ;
+ }
+ /* keeps others from flushing while we are flushing */
+ atomic_set(&(jl->j_commit_flushing), 1) ;
+
+
+ if (jl->j_len > JOURNAL_TRANS_MAX) {
+ reiserfs_panic(s, "journal-512: flush_commit_list: length is %lu, list number %d\n", jl->j_len, jl - SB_JOURNAL_LIST(s)) ;
+ return 0 ;
+ }
+
+ orig_commit_left = atomic_read(&(jl->j_commit_left)) ;
+
+ /* start by checking all the commit blocks in this transaction.
+ ** Add anyone not on disk into tbh. Stop checking once commit_left <= 1, because that means we
+ ** only have the commit block left
+ */
+retry:
+ count = 0 ;
+ for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
+ bn = reiserfs_get_journal_block(s) + (jl->j_start+i) % JOURNAL_BLOCK_COUNT;
+ tbh = get_hash_table(s->s_dev, bn, s->s_blocksize) ;
+
+/* kill this sanity check */
+if (count > (orig_commit_left + 2)) {
+reiserfs_panic(s, "journal-539: flush_commit_list: BAD count(%d) > orig_commit_left(%d)!\n", count, orig_commit_left) ;
+}
+ if (tbh) {
+ if (buffer_locked(tbh)) { /* wait on it, redo it just to make sure */
+ wait_on_buffer(tbh) ;
+ if (!buffer_uptodate(tbh)) {
+ reiserfs_panic(s, "journal-584, buffer write failed\n") ;
+ }
+ }
+ if (buffer_dirty(tbh)) {
+ printk("journal-569: flush_commit_list, block already dirty!\n") ;
+ } else {
+ mark_buffer_dirty(tbh) ;
+ }
+ ll_rw_block(WRITE, 1, &tbh) ;
+ count++ ;
+ atomic_dec(&(tbh->b_count)) ; /* once for our get_hash */
+ }
+ }
+
+ /* wait on everyone in tbh before writing commit block*/
+ if (count > 0) {
+ for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 &&
+ i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
+ bn = reiserfs_get_journal_block(s) + (jl->j_start + i) % JOURNAL_BLOCK_COUNT ;
+ tbh = get_hash_table(s->s_dev, bn, s->s_blocksize) ;
+
+ wait_on_buffer(tbh) ;
+ if (!buffer_uptodate(tbh)) {
+ reiserfs_panic(s, "journal-601, buffer write failed\n") ;
+ }
+ atomic_dec(&(tbh->b_count)) ; /* once for our get_hash */
+ bforget(tbh) ; /* once due to original getblk in do_journal_end */
+ atomic_dec(&(jl->j_commit_left)) ;
+ }
+ }
+
+ if (atomic_read(&(jl->j_commit_left)) != 1) { /* just the commit_bh left, flush it without calling getblk for everyone */
+ if (retry_count < 2) {
+ printk("journal-582: flush_commit_list, not all log blocks on disk yet, trying again\n") ;
+ retry_count++ ;
+ goto retry;
+ }
+ reiserfs_panic(s, "journal-563: flush_commit_list: BAD, j_commit_left is %lu, should be 1\n",
+ atomic_read(&(jl->j_commit_left)));
+ }
+
+ mark_buffer_dirty(jl->j_commit_bh) ;
+ ll_rw_block(WRITE, 1, &(jl->j_commit_bh)) ;
+ wait_on_buffer(jl->j_commit_bh) ;
+ if (!buffer_uptodate(jl->j_commit_bh)) {
+ reiserfs_panic(s, "journal-615: buffer write failed\n") ;
+ }
+ atomic_dec(&(jl->j_commit_left)) ;
+ bforget(jl->j_commit_bh) ;
+
+ /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
+ cleanup_freed_for_journal_list(s, jl) ;
+
+ if (flushall) {
+ atomic_set(&(jl->j_older_commits_done), 1) ;
+ }
+ atomic_set(&(jl->j_commit_flushing), 0) ;
+ wake_up(&(jl->j_commit_wait)) ;
+ return 0 ;
+}
+
+/*
+** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
+** returns NULL if it can't find anything
+*/
+static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) {
+ kdev_t dev = cn->dev;
+ unsigned long blocknr = cn->blocknr ;
+
+ cn = cn->hprev ;
+ while(cn) {
+ if (cn->dev == dev && cn->blocknr == blocknr && cn->jlist) {
+ return cn->jlist ;
+ }
+ cn = cn->hprev ;
+ }
+ return NULL ;
+}
+
+
+/*
+** once all the real blocks have been flushed, it is safe to remove them from the
+** journal list for this transaction. Aside from freeing the cnode, this also allows the
+** block to be reallocated for data blocks if it had been deleted.
+*/
+static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {
+ struct buffer_head fake_bh ;
+ struct reiserfs_journal_cnode *cn, *last ;
+ cn = jl->j_realblock ;
+
+ /* which is better, to lock once around the whole loop, or
+ ** to lock for each call to remove_from_journal_list?
+ */
+ while(cn) {
+ if (cn->blocknr != 0) {
+ if (debug) {
+ printk("block %lu, bh is %d, state %d\n", cn->blocknr, cn->bh ? 1: 0,
+ cn->state) ;
+ }
+ fake_bh.b_blocknr = cn->blocknr ;
+ fake_bh.b_dev = cn->dev ;
+ cn->state = 0 ;
+ remove_from_journal_list(p_s_sb, jl, &fake_bh, 1) ;
+ }
+ last = cn ;
+ cn = cn->next ;
+ free_cnode(p_s_sb, last) ;
+ }
+ jl->j_realblock = NULL ;
+}
+
+/*
+** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
+** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
+** releasing blocks in this transaction for reuse as data blocks.
+** called by flush_journal_list, before it calls remove_all_from_journal_list
+**
+*/
+static int update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {
+ struct reiserfs_journal_header *jh ;
+ if (trans_id >= SB_JOURNAL(p_s_sb)->j_last_flush_trans_id) {
+ if (buffer_locked((SB_JOURNAL(p_s_sb)->j_header_bh))) {
+ wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
+ if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
+ reiserfs_panic(p_s_sb, "journal-699: buffer write failed\n") ;
+ }
+ }
+ SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
+ SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = offset ;
+ jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
+ jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
+ jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
+ jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
+ set_bit(BH_Dirty, &(SB_JOURNAL(p_s_sb)->j_header_bh->b_state)) ;
+ ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ;
+ wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
+ if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
+ reiserfs_panic(p_s_sb, "journal-712: buffer write failed\n") ;
+ }
+ }
+ return 0 ;
+}
+
+/*
+** flush any and all journal lists older than you are
+** can only be called from flush_journal_list
+*/
+static int flush_older_journal_lists(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, unsigned long trans_id) {
+ int i, index ;
+ struct reiserfs_journal_list *other_jl ;
+
+ index = jl - SB_JOURNAL_LIST(p_s_sb) ;
+ for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
+ other_jl = SB_JOURNAL_LIST(p_s_sb) + ((index + i) % JOURNAL_LIST_COUNT) ;
+ if (other_jl && other_jl->j_len > 0 &&
+ other_jl->j_trans_id > 0 &&
+ other_jl->j_trans_id < trans_id &&
+ other_jl != jl) {
+ /* do not flush all */
+ flush_journal_list(p_s_sb, other_jl, 0) ;
+ }
+ }
+ return 0 ;
+}
+
+static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
+ if (buffer_journaled(bh)) {
+ reiserfs_warning("clm-2084: pinned buffer %u:%s sent to disk\n",
+ bh->b_blocknr, kdevname(bh->b_dev)) ;
+ }
+ mark_buffer_uptodate(bh, uptodate) ;
+ unlock_buffer(bh) ;
+}
+static void submit_logged_buffer(struct buffer_head *bh) {
+ lock_buffer(bh) ;
+ bh->b_end_io = reiserfs_end_buffer_io_sync ;
+ mark_buffer_notjournal_new(bh) ;
+ clear_bit(BH_Dirty, &bh->b_state) ;
+ submit_bh(WRITE, bh) ;
+}
+
+/* flush a journal list, both commit and real blocks
+**
+** always set flushall to 1, unless you are calling from inside
+** flush_journal_list
+**
+** IMPORTANT. This can only be called while there are no journal writers,
+** and the journal is locked. That means it can only be called from
+** do_journal_end, or by journal_release
+*/
+static int flush_journal_list(struct super_block *s,
+ struct reiserfs_journal_list *jl, int flushall) {
+ struct reiserfs_journal_list *pjl ;
+ struct reiserfs_journal_cnode *cn, *last ;
+ int count ;
+ int was_jwait = 0 ;
+ int was_dirty = 0 ;
+ struct buffer_head *saved_bh ;
+ unsigned long j_len_saved = jl->j_len ;
+
+ if (j_len_saved <= 0) {
+ return 0 ;
+ }
+
+ if (atomic_read(&SB_JOURNAL(s)->j_wcount) != 0) {
+ reiserfs_warning("clm-2048: flush_journal_list called with wcount %d\n",
+ atomic_read(&SB_JOURNAL(s)->j_wcount)) ;
+ }
+ /* if someone is getting the commit list, we must wait for them */
+ while (atomic_read(&(jl->j_commit_flushing))) {
+ sleep_on(&(jl->j_commit_wait)) ;
+ }
+ /* if someone is flushing this list, we must wait for them */
+ while (atomic_read(&(jl->j_flushing))) {
+ sleep_on(&(jl->j_flush_wait)) ;
+ }
+
+ /* this list is now ours, we can change anything we want */
+ atomic_set(&(jl->j_flushing), 1) ;
+
+ count = 0 ;
+ if (j_len_saved > JOURNAL_TRANS_MAX) {
+ reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, list number %d\n", j_len_saved, jl - SB_JOURNAL_LIST(s)) ;
+ atomic_dec(&(jl->j_flushing)) ;
+ return 0 ;
+ }
+
+ /* if all the work is already done, get out of here */
+ if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
+ atomic_read(&(jl->j_commit_left)) <= 0) {
+ goto flush_older_and_return ;
+ }
+
+ /* start by putting the commit list on disk. This will also flush
+ ** the commit lists of any olders transactions
+ */
+ flush_commit_list(s, jl, 1) ;
+
+ /* are we done now? */
+ if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
+ atomic_read(&(jl->j_commit_left)) <= 0) {
+ goto flush_older_and_return ;
+ }
+
+ /* loop through each cnode, see if we need to write it,
+ ** or wait on a more recent transaction, or just ignore it
+ */
+ if (atomic_read(&(SB_JOURNAL(s)->j_wcount)) != 0) {
+ reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
+ }
+ cn = jl->j_realblock ;
+ while(cn) {
+ was_jwait = 0 ;
+ was_dirty = 0 ;
+ saved_bh = NULL ;
+ /* blocknr of 0 is no longer in the hash, ignore it */
+ if (cn->blocknr == 0) {
+ goto free_cnode ;
+ }
+ pjl = find_newer_jl_for_cn(cn) ;
+ /* the order is important here. We check pjl to make sure we
+ ** don't clear BH_JDirty_wait if we aren't the one writing this
+ ** block to disk
+ */
+ if (!pjl && cn->bh) {
+ saved_bh = cn->bh ;
+
+ /* we do this to make sure nobody releases the buffer while
+ ** we are working with it
+ */
+ atomic_inc(&(saved_bh->b_count)) ;
+
+ if (buffer_journal_dirty(saved_bh)) {
+ was_jwait = 1 ;
+ mark_buffer_notjournal_dirty(saved_bh) ;
+ /* brelse the inc from journal_mark_dirty */
+ atomic_dec(&(saved_bh->b_count)) ;
+ }
+ if (can_dirty(cn)) {
+ was_dirty = 1 ;
+ }
+ }
+
+ /* if someone has this block in a newer transaction, just make
+ ** sure they are commited, and don't try writing it to disk
+ */
+ if (pjl) {
+ flush_commit_list(s, pjl, 1) ;
+ goto free_cnode ;
+ }
+
+ /* bh == NULL when the block got to disk on its own, OR,
+ ** the block got freed in a future transaction
+ */
+ if (saved_bh == NULL) {
+ goto free_cnode ;
+ }
+
+ /* this should never happen. kupdate_one_transaction has this list
+ ** locked while it works, so we should never see a buffer here that
+ ** is not marked JDirty_wait
+ */
+ if ((!was_jwait) && !buffer_locked(saved_bh)) {
+printk("journal-813: BAD! buffer %lu %cdirty %cjwait, not in a newer tranasction\n", saved_bh->b_blocknr,
+ was_dirty ? ' ' : '!', was_jwait ? ' ' : '!') ;
+ }
+ /* kupdate_one_transaction waits on the buffers it is writing, so we
+ ** should never see locked buffers here
+ */
+ if (buffer_locked(saved_bh)) {
+ printk("clm-2083: locked buffer %lu in flush_journal_list\n",
+ saved_bh->b_blocknr) ;
+ wait_on_buffer(saved_bh) ;
+ if (!buffer_uptodate(saved_bh)) {
+ reiserfs_panic(s, "journal-923: buffer write failed\n") ;
+ }
+ }
+ if (was_dirty) {
+ /* we inc again because saved_bh gets decremented at free_cnode */
+ atomic_inc(&(saved_bh->b_count)) ;
+ set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
+ submit_logged_buffer(saved_bh) ;
+ count++ ;
+ } else {
+ printk("clm-2082: Unable to flush buffer %lu in flush_journal_list\n",
+ saved_bh->b_blocknr) ;
+ }
+free_cnode:
+ last = cn ;
+ cn = cn->next ;
+ if (saved_bh) {
+ /* we incremented this to keep others from taking the buffer head away */
+ atomic_dec(&(saved_bh->b_count));
+ if (atomic_read(&(saved_bh->b_count)) < 0) {
+ printk("journal-945: saved_bh->b_count < 0") ;
+ }
+ }
+ }
+ if (count > 0) {
+ cn = jl->j_realblock ;
+ while(cn) {
+ if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
+ if (!cn->bh) {
+ reiserfs_panic(s, "journal-1011: cn->bh is NULL\n") ;
+ }
+ wait_on_buffer(cn->bh) ;
+ if (!cn->bh) {
+ reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ;
+ }
+ if (!buffer_uptodate(cn->bh)) {
+ reiserfs_panic(s, "journal-949: buffer write failed\n") ;
+ }
+ refile_buffer(cn->bh) ;
+ brelse(cn->bh) ;
+ }
+ cn = cn->next ;
+ }
+ }
+
+flush_older_and_return:
+ /* before we can update the journal header block, we _must_ flush all
+ ** real blocks from all older transactions to disk. This is because
+ ** once the header block is updated, this transaction will not be
+ ** replayed after a crash
+ */
+ if (flushall) {
+ flush_older_journal_lists(s, jl, jl->j_trans_id) ;
+ }
+
+ /* before we can remove everything from the hash tables for this
+ ** transaction, we must make sure it can never be replayed
+ **
+ ** since we are only called from do_journal_end, we know for sure there
+ ** are no allocations going on while we are flushing journal lists. So,
+ ** we only need to update the journal header block for the last list
+ ** being flushed
+ */
+ if (flushall) {
+ update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % JOURNAL_BLOCK_COUNT, jl->j_trans_id) ;
+ }
+ remove_all_from_journal_list(s, jl, 0) ;
+ jl->j_len = 0 ;
+ atomic_set(&(jl->j_nonzerolen), 0) ;
+ jl->j_start = 0 ;
+ jl->j_realblock = NULL ;
+ jl->j_commit_bh = NULL ;
+ jl->j_trans_id = 0 ;
+ atomic_dec(&(jl->j_flushing)) ;
+ wake_up(&(jl->j_flush_wait)) ;
+ return 0 ;
+}
+
+
+static int kupdate_one_transaction(struct super_block *s,
+ struct reiserfs_journal_list *jl)
+{
+ struct reiserfs_journal_list *pjl ; /* previous list for this cn */
+ struct reiserfs_journal_cnode *cn, *walk_cn ;
+ unsigned long blocknr ;
+ int run = 0 ;
+ int orig_trans_id = jl->j_trans_id ;
+ struct buffer_head *saved_bh ;
+ int ret = 0 ;
+
+ /* if someone is getting the commit list, we must wait for them */
+ while (atomic_read(&(jl->j_commit_flushing))) {
+ sleep_on(&(jl->j_commit_wait)) ;
+ }
+ /* if someone is flushing this list, we must wait for them */
+ while (atomic_read(&(jl->j_flushing))) {
+ sleep_on(&(jl->j_flush_wait)) ;
+ }
+ /* was it flushed while we slept? */
+ if (jl->j_len <= 0 || jl->j_trans_id != orig_trans_id) {
+ return 0 ;
+ }
+
+ /* this list is now ours, we can change anything we want */
+ atomic_set(&(jl->j_flushing), 1) ;
+
+loop_start:
+ cn = jl->j_realblock ;
+ while(cn) {
+ saved_bh = NULL ;
+ /* if the blocknr == 0, this has been cleared from the hash,
+ ** skip it
+ */
+ if (cn->blocknr == 0) {
+ goto next ;
+ }
+ /* look for a more recent transaction that logged this
+ ** buffer. Only the most recent transaction with a buffer in
+ ** it is allowed to send that buffer to disk
+ */
+ pjl = find_newer_jl_for_cn(cn) ;
+ if (run == 0 && !pjl && cn->bh && buffer_journal_dirty(cn->bh) &&
+ can_dirty(cn))
+ {
+ if (!test_bit(BH_JPrepared, &cn->bh->b_state)) {
+ set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
+ submit_logged_buffer(cn->bh) ;
+ } else {
+ /* someone else is using this buffer. We can't
+ ** send it to disk right now because they might
+ ** be changing/logging it.
+ */
+ ret = 1 ;
+ }
+ } else if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
+ clear_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
+ if (!pjl && cn->bh) {
+ wait_on_buffer(cn->bh) ;
+ }
+ /* check again, someone could have logged while we scheduled */
+ pjl = find_newer_jl_for_cn(cn) ;
+
+ /* before the JDirty_wait bit is set, the
+ ** buffer is added to the hash list. So, if we are
+ ** run in the middle of a do_journal_end, we will notice
+ ** if this buffer was logged and added from the latest
+ ** transaction. In this case, we don't want to decrement
+ ** b_count
+ */
+ if (!pjl && cn->bh && buffer_journal_dirty(cn->bh)) {
+ blocknr = cn->blocknr ;
+ walk_cn = cn ;
+ saved_bh= cn->bh ;
+ /* update all older transactions to show this block
+ ** was flushed
+ */
+ mark_buffer_notjournal_dirty(cn->bh) ;
+ while(walk_cn) {
+ if (walk_cn->bh && walk_cn->blocknr == blocknr &&
+ walk_cn->dev == cn->dev) {
+ if (walk_cn->jlist) {
+ atomic_dec(&(walk_cn->jlist->j_nonzerolen)) ;
+ }
+ walk_cn->bh = NULL ;
+ }
+ walk_cn = walk_cn->hnext ;
+ }
+ if (atomic_read(&saved_bh->b_count) < 1) {
+ reiserfs_warning("clm-2081: bad count on %lu\n",
+ saved_bh->b_blocknr) ;
+ }
+ brelse(saved_bh) ;
+ }
+ }
+ /*
+ ** if the more recent transaction is committed to the log,
+ ** this buffer can be considered flushed. Decrement our
+ ** counters to reflect one less buffer that needs writing.
+ **
+ ** note, this relies on all of the above code being
+ ** schedule free once pjl comes back non-null.
+ */
+ if (pjl && cn->bh && atomic_read(&pjl->j_commit_left) == 0) {
+ atomic_dec(&cn->jlist->j_nonzerolen) ;
+ cn->bh = NULL ;
+ }
+next:
+ cn = cn->next ;
+ }
+ /* the first run through the loop sends all the dirty buffers to
+ ** ll_rw_block.
+ ** the second run through the loop does all the accounting
+ */
+ if (run++ == 0) {
+ goto loop_start ;
+ }
+
+ atomic_set(&(jl->j_flushing), 0) ;
+ wake_up(&(jl->j_flush_wait)) ;
+ return ret ;
+}
+/* since we never give dirty buffers to bdflush/kupdate, we have to
+** flush them ourselves. This runs through the journal lists, finds
+** old metadata in need of flushing and sends it to disk.
+** this does not end transactions, commit anything, or free
+** cnodes.
+**
+** returns the highest transaction id that was flushed last time
+*/
+static unsigned long reiserfs_journal_kupdate(struct super_block *s) {
+ struct reiserfs_journal_list *jl ;
+ int i ;
+ int start ;
+ time_t age ;
+ int ret = 0 ;
+
+ start = SB_JOURNAL_LIST_INDEX(s) ;
+
+ /* safety check to prevent flush attempts during a mount */
+ if (start < 0) {
+ return 0 ;
+ }
+ i = (start + 1) % JOURNAL_LIST_COUNT ;
+ while(i != start) {
+ jl = SB_JOURNAL_LIST(s) + i ;
+ age = CURRENT_TIME - jl->j_timestamp ;
+ if (jl->j_len > 0 && // age >= (JOURNAL_MAX_COMMIT_AGE * 2) &&
+ atomic_read(&(jl->j_nonzerolen)) > 0 &&
+ atomic_read(&(jl->j_commit_left)) == 0) {
+
+ if (jl->j_trans_id == SB_JOURNAL(s)->j_trans_id) {
+ break ;
+ }
+ /* if ret was already 1, we want to preserve that */
+ ret |= kupdate_one_transaction(s, jl) ;
+ }
+ if (atomic_read(&(jl->j_nonzerolen)) > 0) {
+ ret |= 1 ;
+ }
+ i = (i + 1) % JOURNAL_LIST_COUNT ;
+ }
+ return ret ;
+}
+
+/*
+** removes any nodes in table with name block and dev as bh.
+** only touchs the hnext and hprev pointers.
+*/
+void remove_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_list *jl,struct buffer_head *bh,
+ int remove_freed){
+ struct reiserfs_journal_cnode *cur ;
+ struct reiserfs_journal_cnode **head ;
+
+ if (!bh)
+ return ;
+
+ head= &(journal_hash(table, bh->b_dev, bh->b_blocknr)) ;
+ if (!head) {
+ return ;
+ }
+ cur = *head ;
+ while(cur) {
+ if (cur->blocknr == bh->b_blocknr && cur->dev == bh->b_dev && (jl == NULL || jl == cur->jlist) &&
+ (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
+ if (cur->hnext) {
+ cur->hnext->hprev = cur->hprev ;
+ }
+ if (cur->hprev) {
+ cur->hprev->hnext = cur->hnext ;
+ } else {
+ *head = cur->hnext ;
+ }
+ cur->blocknr = 0 ;
+ cur->dev = 0 ;
+ cur->state = 0 ;
+ if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
+ atomic_dec(&(cur->jlist->j_nonzerolen)) ;
+ cur->bh = NULL ;
+ cur->jlist = NULL ;
+ }
+ cur = cur->hnext ;
+ }
+}
+
+static void free_journal_ram(struct super_block *p_s_sb) {
+ vfree(SB_JOURNAL(p_s_sb)->j_cnode_free_orig) ;
+ free_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap) ;
+ free_bitmap_nodes(p_s_sb) ; /* must be after free_list_bitmaps */
+ if (SB_JOURNAL(p_s_sb)->j_header_bh) {
+ brelse(SB_JOURNAL(p_s_sb)->j_header_bh) ;
+ }
+ vfree(SB_JOURNAL(p_s_sb)) ;
+}
+
+/*
+** call on unmount. Only set error to 1 if you haven't made your way out
+** of read_super() yet. Any other caller must keep error at 0.
+*/
+static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, int error) {
+ struct reiserfs_transaction_handle myth ;
+
+ /* we only want to flush out transactions if we were called with error == 0
+ */
+ if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
+ /* end the current trans */
+ do_journal_end(th, p_s_sb,10, FLUSH_ALL) ;
+
+ /* make sure something gets logged to force our way into the flush code */
+ journal_join(&myth, p_s_sb, 1) ;
+ reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+ journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+ do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
+ }
+
+ /* we decrement before we wake up, because the commit thread dies off
+ ** when it has been woken up and the count is <= 0
+ */
+ reiserfs_mounted_fs_count-- ;
+ wake_up(&reiserfs_commit_thread_wait) ;
+ sleep_on(&reiserfs_commit_thread_done) ;
+
+ free_journal_ram(p_s_sb) ;
+
+ return 0 ;
+}
+
+/*
+** call on unmount. flush all journal trans, release all alloc'd ram
+*/
+int journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
+ return do_journal_release(th, p_s_sb, 0) ;
+}
+/*
+** only call from an error condition inside reiserfs_read_super!
+*/
+int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
+ return do_journal_release(th, p_s_sb, 1) ;
+}
+
+/* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
+static int journal_compare_desc_commit(struct super_block *p_s_sb, struct reiserfs_journal_desc *desc,
+ struct reiserfs_journal_commit *commit) {
+ if (le32_to_cpu(commit->j_trans_id) != le32_to_cpu(desc->j_trans_id) ||
+ le32_to_cpu(commit->j_len) != le32_to_cpu(desc->j_len) ||
+ le32_to_cpu(commit->j_len) > JOURNAL_TRANS_MAX ||
+ le32_to_cpu(commit->j_len) <= 0
+ ) {
+ return 1 ;
+ }
+ return 0 ;
+}
+/* returns 0 if it did not find a description block
+** returns -1 if it found a corrupt commit block
+** returns 1 if both desc and commit were valid
+*/
+static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffer_head *d_bh, unsigned long *oldest_invalid_trans_id, unsigned long *newest_mount_id) {
+ struct reiserfs_journal_desc *desc ;
+ struct reiserfs_journal_commit *commit ;
+ struct buffer_head *c_bh ;
+ unsigned long offset ;
+
+ desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
+ if (le32_to_cpu(desc->j_len) > 0 && !memcmp(desc->j_magic, JOURNAL_DESC_MAGIC, 8)) {
+ if (oldest_invalid_trans_id && *oldest_invalid_trans_id && le32_to_cpu(desc->j_trans_id) > *oldest_invalid_trans_id) {
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-986: transaction "
+ "is valid returning because trans_id %d is greater than "
+ "oldest_invalid %lu\n", le32_to_cpu(desc->j_trans_id),
+ *oldest_invalid_trans_id);
+ return 0 ;
+ }
+ if (newest_mount_id && *newest_mount_id > le32_to_cpu(desc->j_mount_id)) {
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1087: transaction "
+ "is valid returning because mount_id %d is less than "
+ "newest_mount_id %lu\n", desc->j_mount_id,
+ *newest_mount_id) ;
+ return -1 ;
+ }
+ offset = d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb) ;
+
+ /* ok, we have a journal description block, lets see if the transaction was valid */
+ c_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + ((offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT),
+ p_s_sb->s_blocksize) ;
+ if (!c_bh)
+ return 0 ;
+ commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
+ if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
+ "journal_transaction_is_valid, commit offset %ld had bad "
+ "time %d or length %d\n",
+ c_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb),
+ le32_to_cpu(commit->j_trans_id),
+ le32_to_cpu(commit->j_len));
+ brelse(c_bh) ;
+ if (oldest_invalid_trans_id)
+ *oldest_invalid_trans_id = le32_to_cpu(desc->j_trans_id) ;
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1004: "
+ "transaction_is_valid setting oldest invalid trans_id "
+ "to %d\n", le32_to_cpu(desc->j_trans_id)) ;
+ return -1;
+ }
+ brelse(c_bh) ;
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1006: found valid "
+ "transaction start offset %lu, len %d id %d\n",
+ d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb),
+ le32_to_cpu(desc->j_len), le32_to_cpu(desc->j_trans_id)) ;
+ return 1 ;
+ } else {
+ return 0 ;
+ }
+}
+
+static void brelse_array(struct buffer_head **heads, int num) {
+ int i ;
+ for (i = 0 ; i < num ; i++) {
+ brelse(heads[i]) ;
+ }
+}
+
+/*
+** given the start, and values for the oldest acceptable transactions,
+** this either reads in a replays a transaction, or returns because the transaction
+** is invalid, or too old.
+*/
+static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cur_dblock, unsigned long oldest_start,
+ unsigned long oldest_trans_id, unsigned long newest_mount_id) {
+ struct reiserfs_journal_desc *desc ;
+ struct reiserfs_journal_commit *commit ;
+ unsigned long trans_id = 0 ;
+ struct buffer_head *c_bh ;
+ struct buffer_head *d_bh ;
+ struct buffer_head **log_blocks = NULL ;
+ struct buffer_head **real_blocks = NULL ;
+ unsigned long trans_offset ;
+ int i;
+
+ d_bh = bread(p_s_sb->s_dev, cur_dblock, p_s_sb->s_blocksize) ;
+ if (!d_bh)
+ return 1 ;
+ desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
+ trans_offset = d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb) ;
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
+ "journal_read_transaction, offset %lu, len %d mount_id %d\n",
+ d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb),
+ le32_to_cpu(desc->j_len), le32_to_cpu(desc->j_mount_id)) ;
+ if (le32_to_cpu(desc->j_trans_id) < oldest_trans_id) {
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
+ "journal_read_trans skipping because %lu is too old\n",
+ cur_dblock - reiserfs_get_journal_block(p_s_sb)) ;
+ brelse(d_bh) ;
+ return 1 ;
+ }
+ if (le32_to_cpu(desc->j_mount_id) != newest_mount_id) {
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
+ "journal_read_trans skipping because %d is != "
+ "newest_mount_id %lu\n", le32_to_cpu(desc->j_mount_id),
+ newest_mount_id) ;
+ brelse(d_bh) ;
+ return 1 ;
+ }
+ c_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT),
+ p_s_sb->s_blocksize) ;
+ if (!c_bh) {
+ brelse(d_bh) ;
+ return 1 ;
+ }
+ commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
+ if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal_read_transaction, "
+ "commit offset %ld had bad time %d or length %d\n",
+ c_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb),
+ le32_to_cpu(commit->j_trans_id), le32_to_cpu(commit->j_len));
+ brelse(c_bh) ;
+ brelse(d_bh) ;
+ return 1;
+ }
+ trans_id = le32_to_cpu(desc->j_trans_id) ;
+ /* now we know we've got a good transaction, and it was inside the valid time ranges */
+ log_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_BUFFER) ;
+ real_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_BUFFER) ;
+ if (!log_blocks || !real_blocks) {
+ brelse(c_bh) ;
+ brelse(d_bh) ;
+ kfree(log_blocks) ;
+ kfree(real_blocks) ;
+ reiserfs_warning("journal-1169: kmalloc failed, unable to mount FS\n") ;
+ return -1 ;
+ }
+ /* get all the buffer heads */
+ for(i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
+ log_blocks[i] = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + (trans_offset + 1 + i) % JOURNAL_BLOCK_COUNT, p_s_sb->s_blocksize);
+ if (i < JOURNAL_TRANS_HALF) {
+ real_blocks[i] = getblk(p_s_sb->s_dev, le32_to_cpu(desc->j_realblock[i]), p_s_sb->s_blocksize) ;
+ } else {
+ real_blocks[i] = getblk(p_s_sb->s_dev, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF]), p_s_sb->s_blocksize) ;
+ }
+ if (real_blocks[i]->b_blocknr >= reiserfs_get_journal_block(p_s_sb) &&
+ real_blocks[i]->b_blocknr < (reiserfs_get_journal_block(p_s_sb)+JOURNAL_BLOCK_COUNT)) {
+ reiserfs_warning("journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block\n") ;
+ brelse_array(log_blocks, i) ;
+ brelse_array(real_blocks, i) ;
+ brelse(c_bh) ;
+ brelse(d_bh) ;
+ kfree(log_blocks) ;
+ kfree(real_blocks) ;
+ return -1 ;
+ }
+ }
+ /* read in the log blocks, memcpy to the corresponding real block */
+ ll_rw_block(READ, le32_to_cpu(desc->j_len), log_blocks) ;
+ for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
+ wait_on_buffer(log_blocks[i]) ;
+ if (!buffer_uptodate(log_blocks[i])) {
+ reiserfs_warning("journal-1212: REPLAY FAILURE fsck required! buffer write failed\n") ;
+ brelse_array(log_blocks + i, le32_to_cpu(desc->j_len) - i) ;
+ brelse_array(real_blocks, le32_to_cpu(desc->j_len)) ;
+ brelse(c_bh) ;
+ brelse(d_bh) ;
+ kfree(log_blocks) ;
+ kfree(real_blocks) ;
+ return -1 ;
+ }
+ memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ;
+ mark_buffer_uptodate(real_blocks[i], 1) ;
+ brelse(log_blocks[i]) ;
+ }
+ /* flush out the real blocks */
+ for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
+ set_bit(BH_Dirty, &(real_blocks[i]->b_state)) ;
+ ll_rw_block(WRITE, 1, real_blocks + i) ;
+ }
+ for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
+ wait_on_buffer(real_blocks[i]) ;
+ if (!buffer_uptodate(real_blocks[i])) {
+ reiserfs_warning("journal-1226: REPLAY FAILURE, fsck required! buffer write failed\n") ;
+ brelse_array(real_blocks + i, le32_to_cpu(desc->j_len) - i) ;
+ brelse(c_bh) ;
+ brelse(d_bh) ;
+ kfree(log_blocks) ;
+ kfree(real_blocks) ;
+ return -1 ;
+ }
+ brelse(real_blocks[i]) ;
+ }
+ cur_dblock = reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 2) % JOURNAL_BLOCK_COUNT) ;
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal "
+ "start to offset %ld\n",
+ cur_dblock - reiserfs_get_journal_block(p_s_sb)) ;
+
+ /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
+ SB_JOURNAL(p_s_sb)->j_start = cur_dblock - reiserfs_get_journal_block(p_s_sb) ;
+ SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
+ SB_JOURNAL(p_s_sb)->j_trans_id = trans_id + 1;
+ brelse(c_bh) ;
+ brelse(d_bh) ;
+ kfree(log_blocks) ;
+ kfree(real_blocks) ;
+ return 0 ;
+}
+
+/*
+** read and replay the log
+** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
+** transaction. This tests that before finding all the transactions in the log, whic makes normal mount times fast.
+**
+** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
+**
+** On exit, it sets things up so the first transaction will work correctly.
+*/
+static int journal_read(struct super_block *p_s_sb) {
+ struct reiserfs_journal_desc *desc ;
+ unsigned long last_flush_trans_id = 0 ;
+ unsigned long oldest_trans_id = 0;
+ unsigned long oldest_invalid_trans_id = 0 ;
+ time_t start ;
+ unsigned long last_flush_start = 0;
+ unsigned long oldest_start = 0;
+ unsigned long cur_dblock = 0 ;
+ unsigned long newest_mount_id = 9 ;
+ struct buffer_head *d_bh ;
+ struct reiserfs_journal_header *jh ;
+ int valid_journal_header = 0 ;
+ int replay_count = 0 ;
+ int continue_replay = 1 ;
+ int ret ;
+
+ cur_dblock = reiserfs_get_journal_block(p_s_sb) ;
+ printk("reiserfs: checking transaction log (device %s) ...\n",
+ kdevname(p_s_sb->s_dev)) ;
+ start = CURRENT_TIME ;
+
+ /* step 1, read in the journal header block. Check the transaction it says
+ ** is the first unflushed, and if that transaction is not valid,
+ ** replay is done
+ */
+ SB_JOURNAL(p_s_sb)->j_header_bh = bread(p_s_sb->s_dev,
+ reiserfs_get_journal_block(p_s_sb) +
+ JOURNAL_BLOCK_COUNT,
+ p_s_sb->s_blocksize) ;
+ if (!SB_JOURNAL(p_s_sb)->j_header_bh) {
+ return 1 ;
+ }
+ jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
+ if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
+ le32_to_cpu(jh->j_first_unflushed_offset) < JOURNAL_BLOCK_COUNT &&
+ le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
+ last_flush_start = reiserfs_get_journal_block(p_s_sb) +
+ le32_to_cpu(jh->j_first_unflushed_offset) ;
+ last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1153: found in "
+ "header: first_unflushed_offset %d, last_flushed_trans_id "
+ "%lu\n", le32_to_cpu(jh->j_first_unflushed_offset),
+ last_flush_trans_id) ;
+ valid_journal_header = 1 ;
+
+ /* now, we try to read the first unflushed offset. If it is not valid,
+ ** there is nothing more we can do, and it makes no sense to read
+ ** through the whole log.
+ */
+ d_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset), p_s_sb->s_blocksize) ;
+ ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
+ if (!ret) {
+ continue_replay = 0 ;
+ }
+ brelse(d_bh) ;
+ }
+
+ if (continue_replay && is_read_only(p_s_sb->s_dev)) {
+ printk("clm-2076: device is readonly, unable to replay log\n") ;
+ brelse(SB_JOURNAL(p_s_sb)->j_header_bh) ;
+ SB_JOURNAL(p_s_sb)->j_header_bh = NULL ;
+ return -1 ;
+ }
+ if (continue_replay && (p_s_sb->s_flags & MS_RDONLY)) {
+ printk("Warning, log replay starting on readonly filesystem\n") ;
+ }
+
+ /* ok, there are transactions that need to be replayed. start with the first log block, find
+ ** all the valid transactions, and pick out the oldest.
+ */
+ while(continue_replay && cur_dblock < (reiserfs_get_journal_block(p_s_sb) + JOURNAL_BLOCK_COUNT)) {
+ d_bh = bread(p_s_sb->s_dev, cur_dblock, p_s_sb->s_blocksize) ;
+ ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
+ if (ret == 1) {
+ desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
+ if (oldest_start == 0) { /* init all oldest_ values */
+ oldest_trans_id = le32_to_cpu(desc->j_trans_id) ;
+ oldest_start = d_bh->b_blocknr ;
+ newest_mount_id = le32_to_cpu(desc->j_mount_id) ;
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1179: Setting "
+ "oldest_start to offset %lu, trans_id %lu\n",
+ oldest_start - reiserfs_get_journal_block(p_s_sb),
+ oldest_trans_id) ;
+ } else if (oldest_trans_id > le32_to_cpu(desc->j_trans_id)) {
+ /* one we just read was older */
+ oldest_trans_id = le32_to_cpu(desc->j_trans_id) ;
+ oldest_start = d_bh->b_blocknr ;
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting "
+ "oldest_start to offset %lu, trans_id %lu\n",
+ oldest_start - reiserfs_get_journal_block(p_s_sb),
+ oldest_trans_id) ;
+ }
+ if (newest_mount_id < le32_to_cpu(desc->j_mount_id)) {
+ newest_mount_id = le32_to_cpu(desc->j_mount_id) ;
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
+ "newest_mount_id to %d\n", le32_to_cpu(desc->j_mount_id));
+ }
+ cur_dblock += le32_to_cpu(desc->j_len) + 2 ;
+ }
+ else {
+ cur_dblock++ ;
+ }
+ brelse(d_bh) ;
+ }
+ /* step three, starting at the oldest transaction, replay */
+ if (last_flush_start > 0) {
+ oldest_start = last_flush_start ;
+ oldest_trans_id = last_flush_trans_id ;
+ }
+ cur_dblock = oldest_start ;
+ if (oldest_trans_id) {
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay "
+ "from offset %lu, trans_id %lu\n",
+ cur_dblock - reiserfs_get_journal_block(p_s_sb),
+ oldest_trans_id) ;
+
+ }
+ replay_count = 0 ;
+ while(continue_replay && oldest_trans_id > 0) {
+ ret = journal_read_transaction(p_s_sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id) ;
+ if (ret < 0) {
+ return ret ;
+ } else if (ret != 0) {
+ break ;
+ }
+ cur_dblock = reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start ;
+ replay_count++ ;
+ }
+
+ if (oldest_trans_id == 0) {
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1225: No valid "
+ "transactions found\n") ;
+ }
+ /* j_start does not get set correctly if we don't replay any transactions.
+ ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
+ ** copy the trans_id from the header
+ */
+ if (valid_journal_header && replay_count == 0) {
+ SB_JOURNAL(p_s_sb)->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
+ SB_JOURNAL(p_s_sb)->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
+ SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
+ SB_JOURNAL(p_s_sb)->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
+ } else {
+ SB_JOURNAL(p_s_sb)->j_mount_id = newest_mount_id + 1 ;
+ }
+ reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
+ "newest_mount_id to %lu\n", SB_JOURNAL(p_s_sb)->j_mount_id) ;
+ SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = SB_JOURNAL(p_s_sb)->j_start ;
+ if (replay_count > 0) {
+ printk("reiserfs: replayed %d transactions in %lu seconds\n", replay_count,
+ CURRENT_TIME - start) ;
+ }
+ if (!is_read_only(p_s_sb->s_dev)) {
+ update_journal_header_block(p_s_sb, SB_JOURNAL(p_s_sb)->j_start,
+ SB_JOURNAL(p_s_sb)->j_last_flush_trans_id) ;
+ }
+ return 0 ;
+}
+
+
+struct reiserfs_journal_commit_task {
+ struct super_block *p_s_sb ;
+ int jindex ;
+ int wake_on_finish ; /* if this is one, we wake the task_done queue, if it
+ ** is zero, we free the whole struct on finish
+ */
+ struct reiserfs_journal_commit_task *self ;
+ struct wait_queue *task_done ;
+ struct tq_struct task ;
+} ;
+
+static void reiserfs_journal_commit_task_func(struct reiserfs_journal_commit_task *ct) {
+
+ struct reiserfs_journal_list *jl ;
+ jl = SB_JOURNAL_LIST(ct->p_s_sb) + ct->jindex ;
+
+ flush_commit_list(ct->p_s_sb, SB_JOURNAL_LIST(ct->p_s_sb) + ct->jindex, 1) ;
+ if (jl->j_len > 0 && atomic_read(&(jl->j_nonzerolen)) > 0 &&
+ atomic_read(&(jl->j_commit_left)) == 0) {
+ kupdate_one_transaction(ct->p_s_sb, jl) ;
+ }
+ kfree(ct->self) ;
+}
+
+static void setup_commit_task_arg(struct reiserfs_journal_commit_task *ct,
+ struct super_block *p_s_sb,
+ int jindex) {
+ if (!ct) {
+ reiserfs_panic(NULL, "journal-1360: setup_commit_task_arg called with NULL struct\n") ;
+ }
+ ct->p_s_sb = p_s_sb ;
+ ct->jindex = jindex ;
+ ct->task_done = NULL ;
+ INIT_LIST_HEAD(&ct->task.list) ;
+ ct->task.sync = 0 ;
+ ct->task.routine = (void *)(void *)reiserfs_journal_commit_task_func ;
+ ct->self = ct ;
+ ct->task.data = (void *)ct ;
+}
+
+static void commit_flush_async(struct super_block *p_s_sb, int jindex) {
+ struct reiserfs_journal_commit_task *ct ;
+ /* using GFP_BUFFER, GFP_KERNEL could try to flush inodes, which will try
+ ** to start/join a transaction, which will deadlock
+ */
+ ct = kmalloc(sizeof(struct reiserfs_journal_commit_task), GFP_BUFFER) ;
+ if (ct) {
+ setup_commit_task_arg(ct, p_s_sb, jindex) ;
+ queue_task(&(ct->task), &reiserfs_commit_thread_tq);
+ wake_up(&reiserfs_commit_thread_wait) ;
+ } else {
+#ifdef CONFIG_REISERFS_CHECK
+ reiserfs_warning("journal-1540: kmalloc failed, doing sync commit\n") ;
+#endif
+ flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1) ;
+ }
+}
+
+/*
+** this is the commit thread. It is started with kernel_thread on
+** FS mount, and journal_release() waits for it to exit.
+**
+** It could do a periodic commit, but there is a lot code for that
+** elsewhere right now, and I only wanted to implement this little
+** piece for starters.
+**
+** All we do here is sleep on the j_commit_thread_wait wait queue, and
+** then run the per filesystem commit task queue when we wakeup.
+*/
+static int reiserfs_journal_commit_thread(void *nullp) {
+ exit_files(current);
+ exit_mm(current);
+
+ spin_lock_irq(&current->sigmask_lock);
+ sigfillset(&current->blocked);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ current->session = 1;
+ current->pgrp = 1;
+ sprintf(current->comm, "kreiserfsd") ;
+ lock_kernel() ;
+ while(1) {
+
+ while(TQ_ACTIVE(reiserfs_commit_thread_tq)) {
+ run_task_queue(&reiserfs_commit_thread_tq) ;
+ }
+
+ /* if there aren't any more filesystems left, break */
+ if (reiserfs_mounted_fs_count <= 0) {
+ run_task_queue(&reiserfs_commit_thread_tq) ;
+ break ;
+ }
+ wake_up(&reiserfs_commit_thread_done) ;
+ interruptible_sleep_on_timeout(&reiserfs_commit_thread_wait, 5) ;
+ }
+ unlock_kernel() ;
+ wake_up(&reiserfs_commit_thread_done) ;
+ return 0 ;
+}
+
+static void journal_list_init(struct super_block *p_s_sb) {
+ int i ;
+ for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
+ init_waitqueue_head(&(SB_JOURNAL_LIST(p_s_sb)[i].j_commit_wait)) ;
+ init_waitqueue_head(&(SB_JOURNAL_LIST(p_s_sb)[i].j_flush_wait)) ;
+ }
+}
+
+/*
+** must be called once on fs mount. calls journal_read for you
+*/
+int journal_init(struct super_block *p_s_sb) {
+ int num_cnodes = JOURNAL_BLOCK_COUNT * 2 ;
+
+ if (sizeof(struct reiserfs_journal_commit) != 4096 ||
+ sizeof(struct reiserfs_journal_desc) != 4096
+ ) {
+ printk("journal-1249: commit or desc struct not 4096 %Zd %Zd\n", sizeof(struct reiserfs_journal_commit),
+ sizeof(struct reiserfs_journal_desc)) ;
+ return 1 ;
+ }
+ /* sanity check to make sure they don't overflow the journal */
+ if (JOURNAL_BLOCK_COUNT > reiserfs_get_journal_orig_size(p_s_sb)) {
+ printk("journal-1393: current JOURNAL_BLOCK_COUNT (%d) is too big. This FS was created with a journal size of %lu blocks\n",
+ JOURNAL_BLOCK_COUNT, reiserfs_get_journal_orig_size(p_s_sb)) ;
+ return 1 ;
+ }
+ SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
+
+ if (!SB_JOURNAL(p_s_sb)) {
+ printk("journal-1256: unable to get memory for journal structure\n") ;
+ return 1 ;
+ }
+ memset(SB_JOURNAL(p_s_sb), 0, sizeof(struct reiserfs_journal)) ;
+
+ SB_JOURNAL(p_s_sb)->j_list_bitmap_index = 0 ;
+ SB_JOURNAL_LIST_INDEX(p_s_sb) = -10000 ; /* make sure flush_old_commits does not try to flush a list while replay is on */
+
+ /* clear out the journal list array */
+ memset(SB_JOURNAL_LIST(p_s_sb), 0, sizeof(struct reiserfs_journal_list) * JOURNAL_LIST_COUNT) ;
+ journal_list_init(p_s_sb) ;
+
+ memset(SB_JOURNAL(p_s_sb)->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
+ memset(journal_writers, 0, sizeof(char *) * 512) ; /* debug code */
+
+ INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
+ reiserfs_allocate_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap,
+ SB_BMAP_NR(p_s_sb)) ;
+ allocate_bitmap_nodes(p_s_sb) ;
+
+ SB_JOURNAL(p_s_sb)->j_start = 0 ;
+ SB_JOURNAL(p_s_sb)->j_len = 0 ;
+ SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
+ atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
+ SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
+ SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;
+ SB_JOURNAL(p_s_sb)->j_last = NULL ;
+ SB_JOURNAL(p_s_sb)->j_first = NULL ;
+ init_waitqueue_head(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
+ init_waitqueue_head(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
+
+ SB_JOURNAL(p_s_sb)->j_trans_id = 10 ;
+ SB_JOURNAL(p_s_sb)->j_mount_id = 10 ;
+ SB_JOURNAL(p_s_sb)->j_state = 0 ;
+ atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
+ atomic_set(&(SB_JOURNAL(p_s_sb)->j_wlock), 0) ;
+ SB_JOURNAL(p_s_sb)->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
+ SB_JOURNAL(p_s_sb)->j_cnode_free_orig = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
+ SB_JOURNAL(p_s_sb)->j_cnode_free = SB_JOURNAL(p_s_sb)->j_cnode_free_list ? num_cnodes : 0 ;
+ SB_JOURNAL(p_s_sb)->j_cnode_used = 0 ;
+ SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
+ init_journal_hash(p_s_sb) ;
+ SB_JOURNAL_LIST(p_s_sb)[0].j_list_bitmap = get_list_bitmap(p_s_sb, SB_JOURNAL_LIST(p_s_sb)) ;
+ if (!(SB_JOURNAL_LIST(p_s_sb)[0].j_list_bitmap)) {
+ reiserfs_warning("journal-2005, get_list_bitmap failed for journal list 0\n") ;
+ return 1 ;
+ }
+ if (journal_read(p_s_sb) < 0) {
+ reiserfs_warning("Replay Failure, unable to mount\n") ;
+ free_journal_ram(p_s_sb) ;
+ return 1 ;
+ }
+ SB_JOURNAL_LIST_INDEX(p_s_sb) = 0 ; /* once the read is done, we can set this where it belongs */
+
+ if (reiserfs_dont_log (p_s_sb))
+ return 0;
+
+ reiserfs_mounted_fs_count++ ;
+ if (reiserfs_mounted_fs_count <= 1) {
+ kernel_thread((void *)(void *)reiserfs_journal_commit_thread, NULL,
+ CLONE_FS | CLONE_FILES | CLONE_VM) ;
+ }
+ return 0 ;
+}
+
+/*
+** test for a polite end of the current transaction. Used by file_write, and should
+** be used by delete to make sure they don't write more than can fit inside a single
+** transaction
+*/
+int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) {
+ time_t now = CURRENT_TIME ;
+ if (reiserfs_dont_log(th->t_super))
+ return 0 ;
+ if ( SB_JOURNAL(th->t_super)->j_must_wait > 0 ||
+ (SB_JOURNAL(th->t_super)->j_len_alloc + new_alloc) >= JOURNAL_MAX_BATCH ||
+ atomic_read(&(SB_JOURNAL(th->t_super)->j_jlock)) ||
+ (now - SB_JOURNAL(th->t_super)->j_trans_start_time) > JOURNAL_MAX_TRANS_AGE ||
+ SB_JOURNAL(th->t_super)->j_cnode_free < (JOURNAL_TRANS_MAX * 3)) {
+ return 1 ;
+ }
+ return 0 ;
+}
+
+/* this must be called inside a transaction, and requires the
+** kernel_lock to be held
+*/
+void reiserfs_block_writes(struct reiserfs_transaction_handle *th) {
+ struct super_block *s = th->t_super ;
+ SB_JOURNAL(s)->j_must_wait = 1 ;
+ set_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
+ return ;
+}
+
+/* this must be called without a transaction started, and does not
+** require BKL
+*/
+void reiserfs_allow_writes(struct super_block *s) {
+ clear_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
+ wake_up(&SB_JOURNAL(s)->j_join_wait) ;
+}
+
+/* this must be called without a transaction started, and does not
+** require BKL
+*/
+void reiserfs_wait_on_write_block(struct super_block *s) {
+ wait_event(SB_JOURNAL(s)->j_join_wait,
+ !test_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state)) ;
+}
+
+/* join == true if you must join an existing transaction.
+** join == false if you can deal with waiting for others to finish
+**
+** this will block until the transaction is joinable. send the number of blocks you
+** expect to use in nblocks.
+*/
+static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,unsigned long nblocks,int join) {
+ time_t now = CURRENT_TIME ;
+ int old_trans_id ;
+
+ reiserfs_check_lock_depth("journal_begin") ;
+#ifdef CONFIG_REISERFS_CHECK
+ if (p_s_sb->s_flags & MS_RDONLY) {
+ printk("clm-2078: calling journal_begin on readonly FS\n") ;
+ BUG() ;
+ }
+#endif
+
+ if (reiserfs_dont_log(p_s_sb)) {
+ th->t_super = p_s_sb ; /* others will check this for the don't log flag */
+ return 0 ;
+ }
+
+relock:
+ lock_journal(p_s_sb) ;
+
+ if (test_bit(WRITERS_BLOCKED, &SB_JOURNAL(p_s_sb)->j_state)) {
+ unlock_journal(p_s_sb) ;
+ reiserfs_wait_on_write_block(p_s_sb) ;
+ goto relock ;
+ }
+
+ /* if there is no room in the journal OR
+ ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
+ ** we don't sleep if there aren't other writers
+ */
+
+
+ if ( (!join && SB_JOURNAL(p_s_sb)->j_must_wait > 0) ||
+ ( !join && (SB_JOURNAL(p_s_sb)->j_len_alloc + nblocks + 2) >= JOURNAL_MAX_BATCH) ||
+ (!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0 && SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 &&
+ (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > JOURNAL_MAX_TRANS_AGE) ||
+ (!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) ) ||
+ (!join && SB_JOURNAL(p_s_sb)->j_cnode_free < (JOURNAL_TRANS_MAX * 3))) {
+
+ unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
+
+ /* if writer count is 0, we can just force this transaction to end, and start
+ ** a new one afterwards.
+ */
+ if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0) {
+ struct reiserfs_transaction_handle myth ;
+ journal_join(&myth, p_s_sb, 1) ;
+ reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+ journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+ do_journal_end(&myth, p_s_sb,1,COMMIT_NOW) ;
+ } else {
+ /* but if the writer count isn't zero, we have to wait for the current writers to finish.
+ ** They won't batch on transaction end once we set j_jlock
+ */
+ atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
+ old_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
+ while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) &&
+ SB_JOURNAL(p_s_sb)->j_trans_id == old_trans_id) {
+ sleep_on(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
+ }
+ }
+ lock_journal(p_s_sb) ; /* relock to continue */
+ }
+
+ if (SB_JOURNAL(p_s_sb)->j_trans_start_time == 0) { /* we are the first writer, set trans_id */
+ SB_JOURNAL(p_s_sb)->j_trans_start_time = now ;
+ }
+ atomic_inc(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
+ SB_JOURNAL(p_s_sb)->j_len_alloc += nblocks ;
+ th->t_blocks_logged = 0 ;
+ th->t_blocks_allocated = nblocks ;
+ th->t_super = p_s_sb ;
+ th->t_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
+ th->t_caller = "Unknown" ;
+ unlock_journal(p_s_sb) ;
+ p_s_sb->s_dirt = 1;
+ return 0 ;
+}
+
+
+int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+ return do_journal_begin_r(th, p_s_sb, nblocks, 1) ;
+}
+
+int journal_begin(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks) {
+ return do_journal_begin_r(th, p_s_sb, nblocks, 0) ;
+}
+
+/* not used at all */
+int journal_prepare(struct super_block * p_s_sb, struct buffer_head *bh) {
+ return 0 ;
+}
+
+/*
+** puts bh into the current transaction. If it was already there, reorders removes the
+** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
+**
+** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
+** transaction is committed.
+**
+** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
+*/
+int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
+ struct reiserfs_journal_cnode *cn = NULL;
+ int count_already_incd = 0 ;
+ int prepared = 0 ;
+
+ if (reiserfs_dont_log(th->t_super)) {
+ mark_buffer_dirty(bh) ;
+ return 0 ;
+ }
+
+ if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
+ reiserfs_panic(th->t_super, "journal-1577: handle trans id %d != current trans id %d\n",
+ th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
+ }
+ p_s_sb->s_dirt = 1 ;
+
+ prepared = test_and_clear_bit(BH_JPrepared, &bh->b_state) ;
+ /* already in this transaction, we are done */
+ if (buffer_journaled(bh)) {
+ return 0 ;
+ }
+
+ /* this must be turned into a panic instead of a warning. We can't allow
+ ** a dirty or journal_dirty or locked buffer to be logged, as some changes
+ ** could get to disk too early. NOT GOOD.
+ */
+ if (!prepared || buffer_locked(bh)) {
+ printk("journal-1777: buffer %lu bad state %cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT\n", bh->b_blocknr, prepared ? ' ' : '!',
+ buffer_locked(bh) ? ' ' : '!',
+ buffer_dirty(bh) ? ' ' : '!',
+ buffer_journal_dirty(bh) ? ' ' : '!') ;
+ show_reiserfs_locks() ;
+ }
+ count_already_incd = clear_prepared_bits(bh) ;
+
+ if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0) {
+ printk("journal-1409: journal_mark_dirty returning because j_wcount was %d\n", atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount))) ;
+ return 1 ;
+ }
+ /* this error means I've screwed up, and we've overflowed the transaction.
+ ** Nothing can be done here, except make the FS readonly or panic.
+ */
+ if (SB_JOURNAL(p_s_sb)->j_len >= JOURNAL_TRANS_MAX) {
+ reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", SB_JOURNAL(p_s_sb)->j_len) ;
+ }
+
+ if (buffer_journal_dirty(bh)) {
+ count_already_incd = 1 ;
+ mark_buffer_notjournal_dirty(bh) ;
+ }
+
+ if (buffer_dirty(bh)) {
+ clear_bit(BH_Dirty, &bh->b_state) ;
+ }
+
+ if (buffer_journaled(bh)) { /* must double check after getting lock */
+ goto done ;
+ }
+
+ if (SB_JOURNAL(p_s_sb)->j_len > SB_JOURNAL(p_s_sb)->j_len_alloc) {
+ SB_JOURNAL(p_s_sb)->j_len_alloc = SB_JOURNAL(p_s_sb)->j_len + JOURNAL_PER_BALANCE_CNT ;
+ }
+
+ set_bit(BH_JDirty, &bh->b_state) ;
+
+ /* now put this guy on the end */
+ if (!cn) {
+ cn = get_cnode(p_s_sb) ;
+ if (!cn) {
+ reiserfs_panic(p_s_sb, "get_cnode failed!\n");
+ }
+
+ if (th->t_blocks_logged == th->t_blocks_allocated) {
+ th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT ;
+ SB_JOURNAL(p_s_sb)->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
+ }
+ th->t_blocks_logged++ ;
+ SB_JOURNAL(p_s_sb)->j_len++ ;
+
+ cn->bh = bh ;
+ cn->blocknr = bh->b_blocknr ;
+ cn->dev = bh->b_dev ;
+ cn->jlist = NULL ;
+ insert_journal_hash(SB_JOURNAL(p_s_sb)->j_hash_table, cn) ;
+ if (!count_already_incd) {
+ atomic_inc(&(bh->b_count)) ;
+ }
+ }
+ cn->next = NULL ;
+ cn->prev = SB_JOURNAL(p_s_sb)->j_last ;
+ cn->bh = bh ;
+ if (SB_JOURNAL(p_s_sb)->j_last) {
+ SB_JOURNAL(p_s_sb)->j_last->next = cn ;
+ SB_JOURNAL(p_s_sb)->j_last = cn ;
+ } else {
+ SB_JOURNAL(p_s_sb)->j_first = cn ;
+ SB_JOURNAL(p_s_sb)->j_last = cn ;
+ }
+done:
+ return 0 ;
+}
+
+/*
+** if buffer already in current transaction, do a journal_mark_dirty
+** otherwise, just mark it dirty and move on. Used for writes to meta blocks
+** that don't need journaling
+*/
+int journal_mark_dirty_nolog(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
+ if (reiserfs_dont_log(th->t_super) || buffer_journaled(bh) ||
+ buffer_journal_dirty(bh)) {
+ return journal_mark_dirty(th, p_s_sb, bh) ;
+ }
+ if (get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_list_hash_table, bh->b_dev,bh->b_blocknr,bh->b_size)) {
+ return journal_mark_dirty(th, p_s_sb, bh) ;
+ }
+ mark_buffer_dirty(bh) ;
+ return 0 ;
+}
+
+int journal_end(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+ return do_journal_end(th, p_s_sb, nblocks, 0) ;
+}
+
+/* removes from the current transaction, relsing and descrementing any counters.
+** also files the removed buffer directly onto the clean list
+**
+** called by journal_mark_freed when a block has been deleted
+**
+** returns 1 if it cleaned and relsed the buffer. 0 otherwise
+*/
+int remove_from_transaction(struct super_block *p_s_sb, unsigned long blocknr, int already_cleaned) {
+ struct buffer_head *bh ;
+ struct reiserfs_journal_cnode *cn ;
+ int ret = 0;
+
+ cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_hash_table, p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+ if (!cn || !cn->bh) {
+ return ret ;
+ }
+ bh = cn->bh ;
+ if (cn->prev) {
+ cn->prev->next = cn->next ;
+ }
+ if (cn->next) {
+ cn->next->prev = cn->prev ;
+ }
+ if (cn == SB_JOURNAL(p_s_sb)->j_first) {
+ SB_JOURNAL(p_s_sb)->j_first = cn->next ;
+ }
+ if (cn == SB_JOURNAL(p_s_sb)->j_last) {
+ SB_JOURNAL(p_s_sb)->j_last = cn->prev ;
+ }
+ remove_journal_hash(SB_JOURNAL(p_s_sb)->j_hash_table, NULL, bh, 0) ;
+ mark_buffer_not_journaled(bh) ; /* don't log this one */
+
+ if (!already_cleaned) {
+ mark_buffer_notjournal_dirty(bh) ;
+ atomic_dec(&(bh->b_count)) ;
+ if (atomic_read(&(bh->b_count)) < 0) {
+ printk("journal-1752: remove from trans, b_count < 0\n") ;
+ }
+ if (!buffer_locked(bh)) reiserfs_clean_and_file_buffer(bh) ;
+ ret = 1 ;
+ }
+ SB_JOURNAL(p_s_sb)->j_len-- ;
+ SB_JOURNAL(p_s_sb)->j_len_alloc-- ;
+ free_cnode(p_s_sb, cn) ;
+ return ret ;
+}
+
+/* removes from a specific journal list hash */
+int remove_from_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_head *bh, int remove_freed) {
+ remove_journal_hash(SB_JOURNAL(s)->j_list_hash_table, jl, bh, remove_freed) ;
+ return 0 ;
+}
+
+/*
+** for any cnode in a journal list, it can only be dirtied of all the
+** transactions that include it are commited to disk.
+** this checks through each transaction, and returns 1 if you are allowed to dirty,
+** and 0 if you aren't
+**
+** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
+** blocks for a given transaction on disk
+**
+*/
+static int can_dirty(struct reiserfs_journal_cnode *cn) {
+ kdev_t dev = cn->dev ;
+ unsigned long blocknr = cn->blocknr ;
+ struct reiserfs_journal_cnode *cur = cn->hprev ;
+ int can_dirty = 1 ;
+
+ /* first test hprev. These are all newer than cn, so any node here
+ ** with the name block number and dev means this node can't be sent
+ ** to disk right now.
+ */
+ while(cur && can_dirty) {
+ if (cur->jlist && cur->bh && cur->blocknr && cur->dev == dev &&
+ cur->blocknr == blocknr) {
+ can_dirty = 0 ;
+ }
+ cur = cur->hprev ;
+ }
+ /* then test hnext. These are all older than cn. As long as they
+ ** are committed to the log, it is safe to write cn to disk
+ */
+ cur = cn->hnext ;
+ while(cur && can_dirty) {
+ if (cur->jlist && cur->jlist->j_len > 0 &&
+ atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
+ cur->blocknr && cur->dev == dev && cur->blocknr == blocknr) {
+ can_dirty = 0 ;
+ }
+ cur = cur->hnext ;
+ }
+ return can_dirty ;
+}
+
+/* syncs the commit blocks, but does not force the real buffers to disk
+** will wait until the current transaction is done/commited before returning
+*/
+int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+ return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT) ;
+}
+
+#ifdef __KERNEL__
+int show_reiserfs_locks(void) {
+
+ dump_journal_writers() ;
+#if 0 /* debugging code for when we are compiled static don't delete */
+ p_s_sb = sb_entry(super_blocks.next);
+ while (p_s_sb != sb_entry(&super_blocks)) {
+ if (reiserfs_is_super(p_s_sb)) {
+printk("journal lock is %d, join lock is %d, writers %d must wait is %d\n",
+ atomic_read(&(SB_JOURNAL(p_s_sb)->j_wlock)),
+ atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)),
+ atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)),
+ SB_JOURNAL(p_s_sb)->j_must_wait) ;
+ printk("used cnodes %d, free cnodes %d\n", SB_JOURNAL(p_s_sb)->j_cnode_used, SB_JOURNAL(p_s_sb)->j_cnode_free) ;
+ }
+ p_s_sb = sb_entry(p_s_sb->s_list.next);
+ }
+#endif
+ return 0 ;
+}
+#endif
+
+/*
+** used to get memory back from async commits that are floating around
+** and to reclaim any blocks deleted but unusable because their commits
+** haven't hit disk yet. called from bitmap.c
+**
+** if it starts flushing things, it ors SCHEDULE_OCCURRED into repeat.
+** note, this is just if schedule has a chance of occuring. I need to
+** change flush_commit_lists to have a repeat parameter too.
+**
+*/
+void flush_async_commits(struct super_block *p_s_sb) {
+ int i ;
+
+ for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
+ if (i != SB_JOURNAL_LIST_INDEX(p_s_sb)) {
+ flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + i, 1) ;
+ }
+ }
+}
+
+/*
+** flushes any old transactions to disk
+** ends the current transaction if it is too old
+**
+** also calls flush_journal_list with old_only == 1, which allows me to reclaim
+** memory and such from the journal lists whose real blocks are all on disk.
+**
+** called by sync_dev_journal from buffer.c
+*/
+int flush_old_commits(struct super_block *p_s_sb, int immediate) {
+ int i ;
+ int count = 0;
+ int start ;
+ time_t now ;
+ int keep_dirty = 0 ;
+ struct reiserfs_transaction_handle th ;
+
+ start = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
+ now = CURRENT_TIME ;
+
+ /* safety check so we don't flush while we are replaying the log during mount */
+ if (SB_JOURNAL_LIST_INDEX(p_s_sb) < 0) {
+ return 0 ;
+ }
+ if (!strcmp(current->comm, "kupdate")) {
+ immediate = 0 ;
+ keep_dirty = 1 ;
+ }
+ /* starting with oldest, loop until we get to the start */
+ i = (SB_JOURNAL_LIST_INDEX(p_s_sb) + 1) % JOURNAL_LIST_COUNT ;
+ while(i != start) {
+ if (SB_JOURNAL_LIST(p_s_sb)[i].j_len > 0 && ((now - SB_JOURNAL_LIST(p_s_sb)[i].j_timestamp) > JOURNAL_MAX_COMMIT_AGE ||
+ immediate)) {
+ /* we have to check again to be sure the current transaction did not change */
+ if (i != SB_JOURNAL_LIST_INDEX(p_s_sb)) {
+ flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + i, 1) ;
+ }
+ }
+ i = (i + 1) % JOURNAL_LIST_COUNT ;
+ count++ ;
+ }
+ /* now, check the current transaction. If there are no writers, and it is too old, finish it, and
+ ** force the commit blocks to disk
+ */
+ if (!immediate && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0 &&
+ SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 &&
+ SB_JOURNAL(p_s_sb)->j_len > 0 &&
+ (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > JOURNAL_MAX_TRANS_AGE) {
+ journal_join(&th, p_s_sb, 1) ;
+ reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+ journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+ do_journal_end(&th, p_s_sb,1, COMMIT_NOW) ;
+ keep_dirty = 0 ;
+ } else if (immediate) { /* belongs above, but I wanted this to be very explicit as a special case. If they say to
+ flush, we must be sure old transactions hit the disk too. */
+ journal_join(&th, p_s_sb, 1) ;
+ reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+ journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+ do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
+ }
+ keep_dirty |= reiserfs_journal_kupdate(p_s_sb) ;
+ return keep_dirty ;
+}
+
+/*
+** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
+**
+** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
+** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
+** flushes the commit list and returns 0.
+**
+** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
+**
+** Note, we can't allow the journal_end to proceed while there are still writers in the log.
+*/
+static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,
+ unsigned long nblocks, int flags) {
+
+ time_t now ;
+ int flush = flags & FLUSH_ALL ;
+ int commit_now = flags & COMMIT_NOW ;
+ int wait_on_commit = flags & WAIT ;
+
+ if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
+ reiserfs_panic(th->t_super, "journal-1577: handle trans id %d != current trans id %d\n",
+ th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
+ }
+
+ SB_JOURNAL(p_s_sb)->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
+ if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
+ atomic_dec(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
+ }
+
+ /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
+ ** will be dealt with by next transaction that actually writes something, but should be taken
+ ** care of in this trans
+ */
+ if (SB_JOURNAL(p_s_sb)->j_len == 0) {
+ int wcount = atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
+ unlock_journal(p_s_sb) ;
+ if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) > 0 && wcount <= 0) {
+ atomic_dec(&(SB_JOURNAL(p_s_sb)->j_jlock)) ;
+ wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
+ }
+ return 0 ;
+ }
+ /* if wcount > 0, and we are called to with flush or commit_now,
+ ** we wait on j_join_wait. We will wake up when the last writer has
+ ** finished the transaction, and started it on its way to the disk.
+ ** Then, we flush the commit or journal list, and just return 0
+ ** because the rest of journal end was already done for this transaction.
+ */
+ if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) {
+ if (flush || commit_now) {
+ int orig_jindex = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
+ atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
+ if (flush) {
+ SB_JOURNAL(p_s_sb)->j_next_full_flush = 1 ;
+ }
+ unlock_journal(p_s_sb) ;
+ /* sleep while the current transaction is still j_jlocked */
+ while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) &&
+ SB_JOURNAL(p_s_sb)->j_trans_id == th->t_trans_id) {
+ sleep_on(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
+ }
+ if (commit_now) {
+ if (wait_on_commit) {
+ flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
+ } else {
+ commit_flush_async(p_s_sb, orig_jindex) ;
+ }
+ }
+ return 0 ;
+ }
+ unlock_journal(p_s_sb) ;
+ return 0 ;
+ }
+
+ /* deal with old transactions where we are the last writers */
+ now = CURRENT_TIME ;
+ if ((now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > JOURNAL_MAX_TRANS_AGE) {
+ commit_now = 1 ;
+ SB_JOURNAL(p_s_sb)->j_next_async_flush = 1 ;
+ }
+ /* don't batch when someone is waiting on j_join_wait */
+ /* don't batch when syncing the commit or flushing the whole trans */
+ if (!(SB_JOURNAL(p_s_sb)->j_must_wait > 0) && !(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock))) && !flush && !commit_now &&
+ (SB_JOURNAL(p_s_sb)->j_len < JOURNAL_MAX_BATCH) &&
+ SB_JOURNAL(p_s_sb)->j_len_alloc < JOURNAL_MAX_BATCH && SB_JOURNAL(p_s_sb)->j_cnode_free > (JOURNAL_TRANS_MAX * 3)) {
+ SB_JOURNAL(p_s_sb)->j_bcount++ ;
+ unlock_journal(p_s_sb) ;
+ return 0 ;
+ }
+
+ if (SB_JOURNAL(p_s_sb)->j_start > JOURNAL_BLOCK_COUNT) {
+ reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%d) is too high\n", SB_JOURNAL(p_s_sb)->j_start) ;
+ }
+ return 1 ;
+}
+
+/*
+** Does all the work that makes deleting blocks safe.
+** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
+**
+** otherwise:
+** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
+** before this transaction has finished.
+**
+** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
+** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
+** the block can't be reallocated yet.
+**
+** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
+*/
+int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long blocknr) {
+ struct reiserfs_journal_cnode *cn = NULL ;
+ struct buffer_head *bh = NULL ;
+ struct reiserfs_list_bitmap *jb = NULL ;
+ int cleaned = 0 ;
+
+ if (reiserfs_dont_log(th->t_super)) {
+ bh = get_hash_table(p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+ if (bh && buffer_dirty (bh)) {
+ printk ("journal_mark_freed(dont_log): dirty buffer on hash list: %lx %ld\n", bh->b_state, blocknr);
+ BUG ();
+ }
+ brelse (bh);
+ return 0 ;
+ }
+ bh = get_hash_table(p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+ /* if it is journal new, we just remove it from this transaction */
+ if (bh && buffer_journal_new(bh)) {
+ clear_prepared_bits(bh) ;
+ cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
+ } else {
+ /* set the bit for this block in the journal bitmap for this transaction */
+ jb = SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap ;
+ if (!jb) {
+ reiserfs_panic(p_s_sb, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
+ }
+ set_bit_in_list_bitmap(p_s_sb, blocknr, jb) ;
+
+ /* Note, the entire while loop is not allowed to schedule. */
+
+ if (bh) {
+ clear_prepared_bits(bh) ;
+ }
+ cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
+
+ /* find all older transactions with this block, make sure they don't try to write it out */
+ cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_list_hash_table, p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+ while (cn) {
+ if (p_s_sb->s_dev == cn->dev && blocknr == cn->blocknr) {
+ set_bit(BLOCK_FREED, &cn->state) ;
+ if (cn->bh) {
+ if (!cleaned) {
+ /* remove_from_transaction will brelse the buffer if it was
+ ** in the current trans
+ */
+ mark_buffer_notjournal_dirty(cn->bh) ;
+ if (!buffer_locked(cn->bh)) {
+ reiserfs_clean_and_file_buffer(cn->bh) ;
+ }
+ cleaned = 1 ;
+ atomic_dec(&(cn->bh->b_count)) ;
+ if (atomic_read(&(cn->bh->b_count)) < 0) {
+ printk("journal-2138: cn->bh->b_count < 0\n") ;
+ }
+ }
+ if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
+ atomic_dec(&(cn->jlist->j_nonzerolen)) ;
+ }
+ cn->bh = NULL ;
+ }
+ }
+ cn = cn->hnext ;
+ }
+ }
+
+ if (bh) {
+ atomic_dec(&(bh->b_count)) ; /* get_hash incs this */
+ if (atomic_read(&(bh->b_count)) < 0) {
+ printk("journal-2165: bh->b_count < 0\n") ;
+ }
+ }
+ return 0 ;
+}
+
+void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
+ struct buffer_head *bh) {
+ if (reiserfs_dont_log (p_s_sb))
+ return;
+
+ if (!bh) {
+ return ;
+ }
+ clear_bit(BH_JPrepared, &bh->b_state) ;
+}
+
+extern struct tree_balance *cur_tb ;
+/*
+** before we can change a metadata block, we have to make sure it won't
+** be written to disk while we are altering it. So, we must:
+** clean it
+** wait on it.
+**
+*/
+void reiserfs_prepare_for_journal(struct super_block *p_s_sb,
+ struct buffer_head *bh, int wait) {
+ int retry_count = 0 ;
+
+ if (reiserfs_dont_log (p_s_sb))
+ return;
+
+ while(!test_bit(BH_JPrepared, &bh->b_state) ||
+ (wait && buffer_locked(bh))) {
+ if (buffer_journaled(bh)) {
+ set_bit(BH_JPrepared, &bh->b_state) ;
+ return ;
+ }
+ set_bit(BH_JPrepared, &bh->b_state) ;
+ if (wait) {
+#ifdef CONFIG_REISERFS_CHECK
+ if (buffer_locked(bh) && cur_tb != NULL) {
+ printk("reiserfs_prepare_for_journal, waiting while do_balance was running\n") ;
+ BUG() ;
+ }
+#endif
+ wait_on_buffer(bh) ;
+ }
+ retry_count++ ;
+ }
+}
+
+/*
+ * Wait for a page to get unlocked.
+ *
+ * This must be called with the caller "holding" the page,
+ * ie with increased "page->count" so that the page won't
+ * go away during the wait..
+ */
+static void ___reiserfs_wait_on_page(struct reiserfs_page_list *pl)
+{
+ struct task_struct *tsk = current;
+ struct page *page = pl->page ;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ add_wait_queue(&page->wait, &wait);
+ do {
+ block_sync_page(page);
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (!PageLocked(page) || pl->do_not_lock)
+ break;
+ schedule();
+ } while (PageLocked(page));
+ tsk->state = TASK_RUNNING;
+ remove_wait_queue(&page->wait, &wait);
+}
+
+/*
+ * Get an exclusive lock on the page..
+ * but, every time you get woken up, check the page to make sure
+ * someone hasn't called a journal_begin with it locked.
+ *
+ * the page should always be locked when this returns
+ *
+ * returns 0 if you've got the page locked
+ * returns 1 if it returns because someone else has called journal_begin
+ * with the page locked
+ * this is only useful to the code that flushes pages before a
+ * commit. Do not export this hack. Ever.
+ */
+static int reiserfs_try_lock_page(struct reiserfs_page_list *pl)
+{
+ struct page *page = pl->page ;
+ while (TryLockPage(page)) {
+ if (pl->do_not_lock) {
+ /* the page is locked, but we cannot have it */
+ return 1 ;
+ }
+ ___reiserfs_wait_on_page(pl);
+ }
+ /* we have the page locked */
+ return 0 ;
+}
+
+
+/*
+** This can only be called from do_journal_end.
+** it runs through the list things that need flushing before the
+** transaction can commit, and writes each of them to disk
+**
+*/
+
+static void flush_pages_before_commit(struct reiserfs_transaction_handle *th,
+ struct super_block *p_s_sb) {
+ struct reiserfs_page_list *pl = SB_JOURNAL(p_s_sb)->j_flush_pages ;
+ struct reiserfs_page_list *pl_tmp ;
+ struct buffer_head *bh, *head ;
+ int count = 0 ;
+
+ /* first write each dirty unlocked buffer in the list */
+
+ while(pl) {
+ /* ugly. journal_end can be called from get_block, which has a
+ ** page locked. So, we have to check to see if pl->page is the page
+ ** currently locked by the calling function, and if so, skip the
+ ** lock
+ */
+ if (reiserfs_try_lock_page(pl)) {
+ goto setup_next ;
+ }
+ if (!PageLocked(pl->page)) {
+ BUG() ;
+ }
+ if (pl->page->buffers) {
+ head = pl->page->buffers ;
+ bh = head ;
+ do {
+ if (bh->b_blocknr == pl->blocknr && buffer_dirty(bh) &&
+ !buffer_locked(bh) && buffer_uptodate(bh) ) {
+ ll_rw_block(WRITE, 1, &bh) ;
+ }
+ bh = bh->b_this_page ;
+ } while (bh != head) ;
+ }
+ if (!pl->do_not_lock) {
+ UnlockPage(pl->page) ;
+ }
+setup_next:
+ pl = pl->next ;
+ }
+
+ /* now wait on them */
+
+ pl = SB_JOURNAL(p_s_sb)->j_flush_pages ;
+ while(pl) {
+ if (reiserfs_try_lock_page(pl)) {
+ goto remove_page ;
+ }
+ if (!PageLocked(pl->page)) {
+ BUG() ;
+ }
+ if (pl->page->buffers) {
+ head = pl->page->buffers ;
+ bh = head ;
+ do {
+ if (bh->b_blocknr == pl->blocknr) {
+ count++ ;
+ wait_on_buffer(bh) ;
+ if (!buffer_uptodate(bh)) {
+ reiserfs_panic(p_s_sb, "journal-2443: flush_pages_before_commit, error writing block %lu\n", bh->b_blocknr) ;
+ }
+ }
+ bh = bh->b_this_page ;
+ } while (bh != head) ;
+ }
+ if (!pl->do_not_lock) {
+ UnlockPage(pl->page) ;
+ }
+remove_page:
+ /* we've waited on the I/O, we can remove the page from the
+ ** list, and free our pointer struct to it.
+ */
+ if (pl->prev) {
+ pl->prev->next = pl->next ;
+ }
+ if (pl->next) {
+ pl->next->prev = pl->prev ;
+ }
+ put_page(pl->page) ;
+ pl_tmp = pl ;
+ pl = pl->next ;
+ reiserfs_kfree(pl_tmp, sizeof(struct reiserfs_page_list), p_s_sb) ;
+ }
+ SB_JOURNAL(p_s_sb)->j_flush_pages = NULL ;
+}
+
+/*
+** called when a indirect item is converted back into a tail.
+**
+** The reiserfs part of the inode stores enough information to find
+** our page_list struct in the flush list. We remove it from the list
+** and free the struct.
+**
+** Note, it is possible for this to happen:
+**
+** reiserfs_add_page_to_flush_list(inode)
+** transaction ends, list is flushed
+** reiserfs_remove_page_from_flush_list(inode)
+**
+** This would be bad because the page_list pointer in the inode is not
+** updated when the list is flushed, so we can't know if the pointer is
+** valid. So, in the inode, we also store the transaction id when the
+** page was added. If we are trying to remove something from an old
+** transaction, we just clear out the pointer in the inode and return.
+**
+** Normal case is to use the reiserfs_page_list pointer in the inode to
+** find and remove the page from the flush list.
+*/
+int reiserfs_remove_page_from_flush_list(struct reiserfs_transaction_handle *th,
+ struct inode *inode) {
+ struct reiserfs_page_list *pl ;
+
+ /* was this conversion done in a previous transaction? If so, return */
+ if (inode->u.reiserfs_i.i_conversion_trans_id < th->t_trans_id) {
+ inode->u.reiserfs_i.i_converted_page = NULL ;
+ inode->u.reiserfs_i.i_conversion_trans_id = 0 ;
+ return 0 ;
+ }
+
+ /* remove the page_list struct from the list, release our hold on the
+ ** page, and free the page_list struct
+ */
+ pl = inode->u.reiserfs_i.i_converted_page ;
+ if (pl) {
+ if (pl->next) {
+ pl->next->prev = pl->prev ;
+ }
+ if (pl->prev) {
+ pl->prev->next = pl->next ;
+ }
+ if (SB_JOURNAL(inode->i_sb)->j_flush_pages == pl) {
+ SB_JOURNAL(inode->i_sb)->j_flush_pages = pl->next ;
+ }
+ put_page(pl->page) ;
+ reiserfs_kfree(pl, sizeof(struct reiserfs_page_list), inode->i_sb) ;
+ inode->u.reiserfs_i.i_converted_page = NULL ;
+ inode->u.reiserfs_i.i_conversion_trans_id = 0 ;
+ }
+ return 0 ;
+}
+
+/*
+** Called after a direct to indirect transaction. The unformatted node
+** must be flushed to disk before the transaction commits, otherwise, we
+** risk losing the data from the direct item. This adds the page
+** containing the unformatted node to a list of pages that need flushing.
+**
+** it calls get_page(page), so the page won't disappear until we've
+** flushed or removed it from our list.
+**
+** pointers to the reiserfs_page_list struct are stored in the inode,
+** so this page can be quickly removed from the list after the tail is
+** converted back into a direct item.
+**
+** If we fail to find the memory for the reiserfs_page_list struct, we
+** just sync the page now. Not good, but safe.
+**
+** since this must be called with the page locked, we always set
+** the do_not_lock field in the page_list struct we allocate
+**
+*/
+int reiserfs_add_page_to_flush_list(struct reiserfs_transaction_handle *th,
+ struct inode *inode,
+ struct buffer_head *bh) {
+ struct reiserfs_page_list *new_pl ;
+
+/* debugging use ONLY. Do not define this on data you care about. */
+#ifdef REISERFS_NO_FLUSH_AFTER_CONVERT
+ return 0 ;
+#endif
+
+ get_page(bh->b_page) ;
+ new_pl = reiserfs_kmalloc(sizeof(struct reiserfs_page_list), GFP_BUFFER,
+ inode->i_sb) ;
+ if (!new_pl) {
+ put_page(bh->b_page) ;
+ reiserfs_warning("journal-2480: forced to flush page, out of memory\n") ;
+ ll_rw_block(WRITE, 1, &bh) ;
+ wait_on_buffer(bh) ;
+ if (!buffer_uptodate(bh)) {
+ reiserfs_panic(inode->i_sb, "journal-2484: error writing buffer %lu to disk\n", bh->b_blocknr) ;
+ }
+ inode->u.reiserfs_i.i_converted_page = NULL ;
+ return 0 ;
+ }
+
+ new_pl->page = bh->b_page ;
+ new_pl->do_not_lock = 1 ;
+ new_pl->blocknr = bh->b_blocknr ;
+ new_pl->next = SB_JOURNAL(inode->i_sb)->j_flush_pages;
+ if (new_pl->next) {
+ new_pl->next->prev = new_pl ;
+ }
+ new_pl->prev = NULL ;
+ SB_JOURNAL(inode->i_sb)->j_flush_pages = new_pl ;
+
+ /* if we have numbers from an old transaction, zero the converted
+ ** page, it has already been flushed and freed
+ */
+ if (inode->u.reiserfs_i.i_conversion_trans_id &&
+ inode->u.reiserfs_i.i_conversion_trans_id < th->t_trans_id) {
+ inode->u.reiserfs_i.i_converted_page = NULL ;
+ }
+ if (inode->u.reiserfs_i.i_converted_page) {
+ reiserfs_panic(inode->i_sb, "journal-2501: inode already had a converted page\n") ;
+ }
+ inode->u.reiserfs_i.i_converted_page = new_pl ;
+ inode->u.reiserfs_i.i_conversion_trans_id = th->t_trans_id ;
+ return 0 ;
+}
+
+/*
+** long and ugly. If flush, will not return until all commit
+** blocks and all real buffers in the trans are on disk.
+** If no_async, won't return until all commit blocks are on disk.
+**
+** keep reading, there are comments as you go along
+*/
+static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks,
+ int flags) {
+ struct reiserfs_journal_cnode *cn, *next, *jl_cn;
+ struct reiserfs_journal_cnode *last_cn = NULL;
+ struct reiserfs_journal_desc *desc ;
+ struct reiserfs_journal_commit *commit ;
+ struct buffer_head *c_bh ; /* commit bh */
+ struct buffer_head *d_bh ; /* desc bh */
+ int cur_write_start = 0 ; /* start index of current log write */
+ int cur_blocks_left = 0 ; /* number of journal blocks left to write */
+ int old_start ;
+ int i ;
+ int jindex ;
+ int orig_jindex ;
+ int flush = flags & FLUSH_ALL ;
+ int commit_now = flags & COMMIT_NOW ;
+ int wait_on_commit = flags & WAIT ;
+ struct reiserfs_super_block *rs ;
+
+ if (reiserfs_dont_log(th->t_super)) {
+ return 0 ;
+ }
+
+ lock_journal(p_s_sb) ;
+ if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
+ flags |= FLUSH_ALL ;
+ flush = 1 ;
+ }
+ if (SB_JOURNAL(p_s_sb)->j_next_async_flush) {
+ flags |= COMMIT_NOW ;
+ commit_now = 1 ;
+ }
+
+ /* check_journal_end locks the journal, and unlocks if it does not return 1
+ ** it tells us if we should continue with the journal_end, or just return
+ */
+ if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
+ return 0 ;
+ }
+
+ /* check_journal_end might set these, check again */
+ if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
+ flush = 1 ;
+ }
+ if (SB_JOURNAL(p_s_sb)->j_next_async_flush) {
+ commit_now = 1 ;
+ }
+ /*
+ ** j must wait means we have to flush the log blocks, and the real blocks for
+ ** this transaction
+ */
+ if (SB_JOURNAL(p_s_sb)->j_must_wait > 0) {
+ flush = 1 ;
+ }
+
+ rs = SB_DISK_SUPER_BLOCK(p_s_sb) ;
+ /* setup description block */
+ d_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start, p_s_sb->s_blocksize) ;
+ mark_buffer_uptodate(d_bh, 1) ;
+ desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
+ memset(desc, 0, sizeof(struct reiserfs_journal_desc)) ;
+ memcpy(desc->j_magic, JOURNAL_DESC_MAGIC, 8) ;
+ desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
+
+ /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
+ c_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) +
+ ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % JOURNAL_BLOCK_COUNT),
+ p_s_sb->s_blocksize) ;
+ commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
+ memset(commit, 0, sizeof(struct reiserfs_journal_commit)) ;
+ commit->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
+ mark_buffer_uptodate(c_bh, 1) ;
+
+ /* init this journal list */
+ atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_older_commits_done), 0) ;
+ SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
+ SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_timestamp = SB_JOURNAL(p_s_sb)->j_trans_start_time ;
+ SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_bh = c_bh ;
+ SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_start = SB_JOURNAL(p_s_sb)->j_start ;
+ SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_len = SB_JOURNAL(p_s_sb)->j_len ;
+ atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_nonzerolen), SB_JOURNAL(p_s_sb)->j_len) ;
+ atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_left), SB_JOURNAL(p_s_sb)->j_len + 2);
+ SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_realblock = NULL ;
+ atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 1) ;
+ atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 1) ;
+
+ /* which is faster, locking/unlocking at the start and end of the for
+ ** or locking once per iteration around the insert_journal_hash?
+ ** eitherway, we are write locking insert_journal_hash. The ENTIRE FOR
+ ** LOOP MUST not cause schedule to occur.
+ */
+
+ /* for each real block, add it to the journal list hash,
+ ** copy into real block index array in the commit or desc block
+ */
+ for (i = 0, cn = SB_JOURNAL(p_s_sb)->j_first ; cn ; cn = cn->next, i++) {
+ if (test_bit(BH_JDirty, &cn->bh->b_state) ) {
+ jl_cn = get_cnode(p_s_sb) ;
+ if (!jl_cn) {
+ reiserfs_panic(p_s_sb, "journal-1676, get_cnode returned NULL\n") ;
+ }
+ if (i == 0) {
+ SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_realblock = jl_cn ;
+ }
+ jl_cn->prev = last_cn ;
+ jl_cn->next = NULL ;
+ if (last_cn) {
+ last_cn->next = jl_cn ;
+ }
+ last_cn = jl_cn ;
+ if (cn->bh->b_blocknr >= reiserfs_get_journal_block(p_s_sb) &&
+ cn->bh->b_blocknr < (reiserfs_get_journal_block(p_s_sb) + JOURNAL_BLOCK_COUNT)) {
+ reiserfs_panic(p_s_sb, "journal-2332: Trying to log block %lu, which is a log block\n", cn->bh->b_blocknr) ;
+ }
+ jl_cn->blocknr = cn->bh->b_blocknr ;
+ jl_cn->state = 0 ;
+ jl_cn->dev = cn->bh->b_dev ;
+ jl_cn->bh = cn->bh ;
+ jl_cn->jlist = SB_JOURNAL_LIST(p_s_sb) + SB_JOURNAL_LIST_INDEX(p_s_sb) ;
+ insert_journal_hash(SB_JOURNAL(p_s_sb)->j_list_hash_table, jl_cn) ;
+ if (i < JOURNAL_TRANS_HALF) {
+ desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr) ;
+ } else {
+ commit->j_realblock[i - JOURNAL_TRANS_HALF] = cpu_to_le32(cn->bh->b_blocknr) ;
+ }
+ } else {
+ i-- ;
+ }
+ }
+
+ desc->j_len = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_len) ;
+ desc->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
+ desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
+ commit->j_len = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_len) ;
+
+ /* special check in case all buffers in the journal were marked for not logging */
+ if (SB_JOURNAL(p_s_sb)->j_len == 0) {
+ brelse(d_bh) ;
+ brelse(c_bh) ;
+ unlock_journal(p_s_sb) ;
+printk("journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ;
+ atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
+ wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
+ return 0 ;
+ }
+
+ /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
+ cur_write_start = SB_JOURNAL(p_s_sb)->j_start ;
+ cur_blocks_left = SB_JOURNAL(p_s_sb)->j_len ;
+ cn = SB_JOURNAL(p_s_sb)->j_first ;
+ jindex = 1 ; /* start at one so we don't get the desc again */
+ while(cur_blocks_left > 0) {
+ /* copy all the real blocks into log area. dirty log blocks */
+ if (test_bit(BH_JDirty, &cn->bh->b_state)) {
+ struct buffer_head *tmp_bh ;
+ tmp_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) +
+ ((cur_write_start + jindex) % JOURNAL_BLOCK_COUNT),
+ p_s_sb->s_blocksize) ;
+ mark_buffer_uptodate(tmp_bh, 1) ;
+ memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;
+ jindex++ ;
+ } else {
+ /* JDirty cleared sometime during transaction. don't log this one */
+ printk("journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!\n") ;
+ }
+ cn = cn->next ;
+ cur_blocks_left-- ;
+ }
+
+ /* we are done with both the c_bh and d_bh, but
+ ** c_bh must be written after all other commit blocks,
+ ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
+ */
+
+ /* now loop through and mark all buffers from this transaction as JDirty_wait
+ ** clear the JDirty bit, clear BH_JNew too.
+ ** if they weren't JDirty, they weren't logged, just relse them and move on
+ */
+ cn = SB_JOURNAL(p_s_sb)->j_first ;
+ while(cn) {
+ clear_bit(BH_JNew, &(cn->bh->b_state)) ;
+ if (test_bit(BH_JDirty, &(cn->bh->b_state))) {
+ set_bit(BH_JDirty_wait, &(cn->bh->b_state)) ;
+ clear_bit(BH_JDirty, &(cn->bh->b_state)) ;
+ } else {
+ brelse(cn->bh) ;
+ }
+ next = cn->next ;
+ free_cnode(p_s_sb, cn) ;
+ cn = next ;
+ }
+
+ /* unlock the journal list for committing and flushing */
+ atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 0) ;
+ atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 0) ;
+
+ orig_jindex = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
+ jindex = (SB_JOURNAL_LIST_INDEX(p_s_sb) + 1) % JOURNAL_LIST_COUNT ;
+ SB_JOURNAL_LIST_INDEX(p_s_sb) = jindex ;
+
+ /* make sure to flush any data converted from direct items to
+ ** indirect items before allowing the commit blocks to reach the
+ ** disk
+ */
+ flush_pages_before_commit(th, p_s_sb) ;
+
+ /* honor the flush and async wishes from the caller */
+ if (flush) {
+
+ flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
+ flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex , 1) ;
+ } else if (commit_now) {
+ if (wait_on_commit) {
+ flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
+ } else {
+ commit_flush_async(p_s_sb, orig_jindex) ;
+ }
+ }
+
+ /* reset journal values for the next transaction */
+ old_start = SB_JOURNAL(p_s_sb)->j_start ;
+ SB_JOURNAL(p_s_sb)->j_start = (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 2) % JOURNAL_BLOCK_COUNT;
+ atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
+ SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
+ SB_JOURNAL(p_s_sb)->j_last = NULL ;
+ SB_JOURNAL(p_s_sb)->j_first = NULL ;
+ SB_JOURNAL(p_s_sb)->j_len = 0 ;
+ SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;
+ SB_JOURNAL(p_s_sb)->j_trans_id++ ;
+ SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
+ SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
+ SB_JOURNAL(p_s_sb)->j_next_full_flush = 0 ;
+ SB_JOURNAL(p_s_sb)->j_next_async_flush = 0 ;
+ init_journal_hash(p_s_sb) ;
+
+ /* if the next transaction has any chance of wrapping, flush
+ ** transactions that might get overwritten. If any journal lists are very
+ ** old flush them as well.
+ */
+ for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
+ jindex = i ;
+ if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 && SB_JOURNAL(p_s_sb)->j_start <= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
+ if ((SB_JOURNAL(p_s_sb)->j_start + JOURNAL_TRANS_MAX + 1) >= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
+ flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1) ;
+ }
+ } else if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 &&
+ (SB_JOURNAL(p_s_sb)->j_start + JOURNAL_TRANS_MAX + 1) > JOURNAL_BLOCK_COUNT) {
+ if (((SB_JOURNAL(p_s_sb)->j_start + JOURNAL_TRANS_MAX + 1) % JOURNAL_BLOCK_COUNT) >=
+ SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
+ flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ;
+ }
+ }
+ /* this check should always be run, to send old lists to disk */
+ if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 &&
+ SB_JOURNAL_LIST(p_s_sb)[jindex].j_timestamp <
+ (CURRENT_TIME - (JOURNAL_MAX_TRANS_AGE * 4))) {
+ flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ;
+ }
+ }
+
+ /* if the next journal_list is still in use, flush it */
+ if (SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_len != 0) {
+ flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + SB_JOURNAL_LIST_INDEX(p_s_sb), 1) ;
+ }
+
+ /* we don't want anyone flushing the new transaction's list */
+ atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 1) ;
+ atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 1) ;
+ SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap = get_list_bitmap(p_s_sb, SB_JOURNAL_LIST(p_s_sb) +
+ SB_JOURNAL_LIST_INDEX(p_s_sb)) ;
+
+ if (!(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap)) {
+ reiserfs_panic(p_s_sb, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
+ }
+ unlock_journal(p_s_sb) ;
+ atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
+ /* wake up any body waiting to join. */
+ wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
+ return 0 ;
+}
+
+
+
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
new file mode 100644
index 000000000..c18f3bdd0
--- /dev/null
+++ b/fs/reiserfs/lbalance.c
@@ -0,0 +1,1326 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <asm/uaccess.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/reiserfs_fs.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+/* these are used in do_balance.c */
+
+/* leaf_move_items
+ leaf_shift_left
+ leaf_shift_right
+ leaf_delete_items
+ leaf_insert_into_buf
+ leaf_paste_in_buffer
+ leaf_cut_from_buffer
+ leaf_paste_entries
+ */
+
+
+/* copy copy_count entries from source directory item to dest buffer (creating new item if needed) */
+static void leaf_copy_dir_entries (struct buffer_info * dest_bi, struct buffer_head * source,
+ int last_first, int item_num, int from, int copy_count)
+{
+ struct buffer_head * dest = dest_bi->bi_bh;
+ int item_num_in_dest; /* either the number of target item,
+ or if we must create a new item,
+ the number of the item we will
+ create it next to */
+ struct item_head * ih;
+ struct reiserfs_de_head * deh;
+ int copy_records_len; /* length of all records in item to be copied */
+ char * records;
+
+ ih = B_N_PITEM_HEAD (source, item_num);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!is_direntry_le_ih (ih))
+ reiserfs_panic(0, "vs-10000: leaf_copy_dir_entries: item must be directory item");
+#endif
+
+ /* length of all record to be copied and first byte of the last of them */
+ deh = B_I_DEH (source, ih);
+ if (copy_count) {
+ copy_records_len = (from ? deh[from - 1].deh_location : ih->ih_item_len) -
+ deh[from + copy_count - 1].deh_location;
+ records = source->b_data + ih->ih_item_location + deh[from + copy_count - 1].deh_location;
+ } else {
+ copy_records_len = 0;
+ records = 0;
+ }
+
+ /* when copy last to first, dest buffer can contain 0 items */
+ item_num_in_dest = (last_first == LAST_TO_FIRST) ? (( B_NR_ITEMS(dest) ) ? 0 : -1) : (B_NR_ITEMS(dest) - 1);
+
+ /* if there are no items in dest or the first/last item in dest is not item of the same directory */
+ if ( (item_num_in_dest == - 1) ||
+#ifdef REISERFS_FSCK
+ (last_first == FIRST_TO_LAST && are_items_mergeable (B_N_PITEM_HEAD (dest, item_num_in_dest), ih, dest->b_size) == 0) ||
+ (last_first == LAST_TO_FIRST && are_items_mergeable (ih, B_N_PITEM_HEAD (dest, item_num_in_dest), dest->b_size) == 0)) {
+#else
+ (last_first == FIRST_TO_LAST && le_key_k_offset (ih_version (ih), &(ih->ih_key)) == DOT_OFFSET) ||
+ (last_first == LAST_TO_FIRST && comp_short_le_keys/*COMP_SHORT_KEYS*/ (&ih->ih_key, B_N_PKEY (dest, item_num_in_dest)))) {
+#endif
+ /* create new item in dest */
+ struct item_head new_ih;
+
+ /* form item header */
+ memcpy (&new_ih.ih_key, &ih->ih_key, KEY_SIZE);
+ new_ih.ih_version = cpu_to_le16 (ITEM_VERSION_1);
+ /* calculate item len */
+ new_ih.ih_item_len = cpu_to_le16 (DEH_SIZE * copy_count + copy_records_len);
+ I_ENTRY_COUNT(&new_ih) = 0;
+
+ if (last_first == LAST_TO_FIRST) {
+ /* form key by the following way */
+ if (from < I_ENTRY_COUNT(ih)) {
+ set_le_ih_k_offset (&new_ih, cpu_to_le32 (le32_to_cpu (deh[from].deh_offset)));
+ /*memcpy (&new_ih.ih_key.k_offset, &deh[from].deh_offset, SHORT_KEY_SIZE);*/
+ } else {
+ /* no entries will be copied to this item in this function */
+ set_le_ih_k_offset (&new_ih, cpu_to_le32 (U32_MAX));
+ /* this item is not yet valid, but we want I_IS_DIRECTORY_ITEM to return 1 for it, so we -1 */
+ }
+ set_le_key_k_type (ITEM_VERSION_1, &(new_ih.ih_key), TYPE_DIRENTRY);
+ }
+
+ /* insert item into dest buffer */
+ leaf_insert_into_buf (dest_bi, (last_first == LAST_TO_FIRST) ? 0 : B_NR_ITEMS(dest), &new_ih, NULL, 0);
+ } else {
+ /* prepare space for entries */
+ leaf_paste_in_buffer (dest_bi, (last_first==FIRST_TO_LAST) ? (B_NR_ITEMS(dest) - 1) : 0, MAX_US_INT,
+ DEH_SIZE * copy_count + copy_records_len, records, 0
+ );
+ }
+
+ item_num_in_dest = (last_first == FIRST_TO_LAST) ? (B_NR_ITEMS(dest)-1) : 0;
+
+ leaf_paste_entries (dest_bi->bi_bh, item_num_in_dest,
+ (last_first == FIRST_TO_LAST) ? I_ENTRY_COUNT(B_N_PITEM_HEAD (dest, item_num_in_dest)) : 0,
+ copy_count, deh + from, records,
+ DEH_SIZE * copy_count + copy_records_len
+ );
+}
+
+
+/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
+ part of it or nothing (see the return 0 below) from SOURCE to the end
+ (if last_first) or beginning (!last_first) of the DEST */
+/* returns 1 if anything was copied, else 0 */
+static int leaf_copy_boundary_item (struct buffer_info * dest_bi, struct buffer_head * src, int last_first,
+ int bytes_or_entries)
+{
+ struct buffer_head * dest = dest_bi->bi_bh;
+ int dest_nr_item, src_nr_item; /* number of items in the source and destination buffers */
+ struct item_head * ih;
+ struct item_head * dih;
+
+ dest_nr_item = B_NR_ITEMS(dest);
+
+ if ( last_first == FIRST_TO_LAST ) {
+ /* if ( DEST is empty or first item of SOURCE and last item of DEST are the items of different objects
+ or of different types ) then there is no need to treat this item differently from the other items
+ that we copy, so we return */
+ ih = B_N_PITEM_HEAD (src, 0);
+ dih = B_N_PITEM_HEAD (dest, dest_nr_item - 1);
+#ifdef REISERFS_FSCK
+ if (!dest_nr_item || (are_items_mergeable (dih, ih, src->b_size) == 0))
+#else
+ if (!dest_nr_item || (!op_is_left_mergeable (&(ih->ih_key), src->b_size)))
+#endif
+ /* there is nothing to merge */
+ return 0;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! ih->ih_item_len )
+ reiserfs_panic (0, "vs-10010: leaf_copy_boundary_item: item can not have empty dynamic length");
+#endif
+
+ if ( is_direntry_le_ih (ih) ) {
+ if ( bytes_or_entries == -1 )
+ /* copy all entries to dest */
+ bytes_or_entries = le16_to_cpu (ih->u.ih_entry_count);
+ leaf_copy_dir_entries (dest_bi, src, FIRST_TO_LAST, 0, 0, bytes_or_entries);
+ return 1;
+ }
+
+ /* copy part of the body of the first item of SOURCE to the end of the body of the last item of the DEST
+ part defined by 'bytes_or_entries'; if bytes_or_entries == -1 copy whole body; don't create new item header
+ */
+ if ( bytes_or_entries == -1 )
+ bytes_or_entries = le16_to_cpu (ih->ih_item_len);
+
+#ifdef CONFIG_REISERFS_CHECK
+ else {
+ if (bytes_or_entries == le16_to_cpu (ih->ih_item_len) && is_indirect_le_ih(ih))
+ if (get_ih_free_space (ih))
+ reiserfs_panic (0, "vs-10020: leaf_copy_boundary_item: "
+ "last unformatted node must be filled entirely (%h)",
+ ih);
+ }
+#endif
+
+ /* merge first item (or its part) of src buffer with the last
+ item of dest buffer. Both are of the same file */
+ leaf_paste_in_buffer (dest_bi,
+ dest_nr_item - 1, dih->ih_item_len, bytes_or_entries, B_I_PITEM(src,ih), 0
+ );
+
+ if (is_indirect_le_ih (dih)) {
+#ifdef CONFIG_REISERFS_CHECK
+ if (get_ih_free_space (dih))
+ reiserfs_panic (0, "vs-10030: leaf_copy_boundary_item: "
+ "merge to left: last unformatted node of non-last indirect item %h must have zerto free space",
+ ih);
+#endif
+ if (bytes_or_entries == le16_to_cpu (ih->ih_item_len))
+ set_ih_free_space (dih, get_ih_free_space (ih));
+ }
+
+ return 1;
+ }
+
+
+ /* copy boundary item to right (last_first == LAST_TO_FIRST) */
+
+ /* ( DEST is empty or last item of SOURCE and first item of DEST
+ are the items of different object or of different types )
+ */
+ src_nr_item = B_NR_ITEMS (src);
+ ih = B_N_PITEM_HEAD (src, src_nr_item - 1);
+ dih = B_N_PITEM_HEAD (dest, 0);
+
+#ifdef REISERFS_FSCK
+ if (!dest_nr_item || are_items_mergeable (ih, dih, src->b_size) == 0)
+#else
+ if (!dest_nr_item || !op_is_left_mergeable (&(dih->ih_key), src->b_size))
+#endif
+ return 0;
+
+ if ( is_direntry_le_ih (ih)) {
+ if ( bytes_or_entries == -1 )
+ /* bytes_or_entries = entries number in last item body of SOURCE */
+ bytes_or_entries = le16_to_cpu (ih->u.ih_entry_count);
+
+ leaf_copy_dir_entries (dest_bi, src, LAST_TO_FIRST, src_nr_item - 1, le16_to_cpu (ih->u.ih_entry_count) - bytes_or_entries, bytes_or_entries);
+ return 1;
+ }
+
+ /* copy part of the body of the last item of SOURCE to the begin of the body of the first item of the DEST;
+ part defined by 'bytes_or_entries'; if byte_or_entriess == -1 copy whole body; change first item key of the DEST;
+ don't create new item header
+ */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (is_indirect_le_ih(ih) && get_ih_free_space (ih))
+ reiserfs_panic (0, "vs-10040: leaf_copy_boundary_item: "
+ "merge to right: last unformatted node of non-last indirect item must be filled entirely (%h)",
+ ih);
+#endif
+
+ if ( bytes_or_entries == -1 ) {
+ /* bytes_or_entries = length of last item body of SOURCE */
+ bytes_or_entries = ih->ih_item_len;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (le_ih_k_offset (dih) != le_ih_k_offset (ih) + op_bytes_number (ih, src->b_size))
+ reiserfs_panic (0, "vs-10050: leaf_copy_boundary_item: items %h and %h do not match", ih, dih);
+#endif
+
+ /* change first item key of the DEST */
+ set_le_ih_k_offset (dih, le_ih_k_offset (ih));
+
+ /* item becomes non-mergeable */
+ /* or mergeable if left item was */
+ set_le_ih_k_type (dih, le_ih_k_type (ih));
+ } else {
+ /* merge to right only part of item */
+#ifdef CONFIG_REISERFS_CHECK
+ if ( le16_to_cpu (ih->ih_item_len) <= bytes_or_entries )
+ reiserfs_panic (0, "vs-10060: leaf_copy_boundary_item: no so much bytes %lu (needed %lu)",
+ ih->ih_item_len, bytes_or_entries);
+#endif
+
+ /* change first item key of the DEST */
+ if ( is_direct_le_ih (dih) ) {
+#ifdef CONFIG_REISERFS_CHECK
+ if (le_ih_k_offset (dih) <= (unsigned long)bytes_or_entries)
+ reiserfs_panic (0, "vs-10070: leaf_copy_boundary_item: dih %h, bytes_or_entries(%d)",
+ dih, bytes_or_entries);
+#endif
+ set_le_ih_k_offset (dih, le_ih_k_offset (dih) - bytes_or_entries);
+ } else {
+#ifdef CONFIG_REISERFS_CHECK
+ if (le_ih_k_offset (dih) <= (bytes_or_entries / UNFM_P_SIZE) * dest->b_size )
+ reiserfs_panic (0, "vs-10080: leaf_copy_boundary_item: dih %h, bytes_or_entries(%d)",
+ dih, (bytes_or_entries/UNFM_P_SIZE)*dest->b_size);
+#endif
+ set_le_ih_k_offset (dih, le_ih_k_offset (dih) - ((bytes_or_entries / UNFM_P_SIZE) * dest->b_size));
+ }
+ }
+
+ leaf_paste_in_buffer (dest_bi, 0, 0, bytes_or_entries, B_I_PITEM(src,ih) + ih->ih_item_len - bytes_or_entries, 0);
+ return 1;
+}
+
+
+/* copy cpy_mun items from buffer src to buffer dest
+ * last_first == FIRST_TO_LAST means, that we copy cpy_num items beginning from first-th item in src to tail of dest
+ * last_first == LAST_TO_FIRST means, that we copy cpy_num items beginning from first-th item in src to head of dest
+ */
+static void leaf_copy_items_entirely (struct buffer_info * dest_bi, struct buffer_head * src, int last_first,
+ int first, int cpy_num)
+{
+ struct buffer_head * dest;
+ int nr;
+ int dest_before;
+ int last_loc, last_inserted_loc, location;
+ int i, j;
+ struct block_head * blkh;
+ struct item_head * ih;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (last_first != LAST_TO_FIRST && last_first != FIRST_TO_LAST)
+ reiserfs_panic (0, "vs-10090: leaf_copy_items_entirely: bad last_first parameter %d", last_first);
+
+ if (B_NR_ITEMS (src) - first < cpy_num)
+ reiserfs_panic (0, "vs-10100: leaf_copy_items_entirely: too few items in source %d, required %d from %d",
+ B_NR_ITEMS(src), cpy_num, first);
+
+ if (cpy_num < 0)
+ reiserfs_panic (0, "vs-10110: leaf_copy_items_entirely: can not copy negative amount of items");
+
+ if ( ! dest_bi )
+ reiserfs_panic (0, "vs-10120: leaf_copy_items_entirely: can not copy negative amount of items");
+#endif
+
+ dest = dest_bi->bi_bh;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! dest )
+ reiserfs_panic (0, "vs-10130: leaf_copy_items_entirely: can not copy negative amount of items");
+#endif
+
+ if (cpy_num == 0)
+ return;
+
+ nr = le16_to_cpu ((blkh = B_BLK_HEAD(dest))->blk_nr_item);
+
+ /* we will insert items before 0-th or nr-th item in dest buffer. It depends of last_first parameter */
+ dest_before = (last_first == LAST_TO_FIRST) ? 0 : nr;
+
+ /* location of head of first new item */
+ ih = B_N_PITEM_HEAD (dest, dest_before);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (le16_to_cpu (blkh->blk_free_space) < cpy_num * IH_SIZE) {
+ reiserfs_panic (0, "vs-10140: leaf_copy_items_entirely: "
+ "not enough free space for headers %d (needed %d)",
+ B_FREE_SPACE (dest), cpy_num * IH_SIZE);
+ }
+#endif
+
+ /* prepare space for headers */
+ memmove (ih + cpy_num, ih, (nr-dest_before) * IH_SIZE);
+
+ /* copy item headers */
+ memcpy (ih, B_N_PITEM_HEAD (src, first), cpy_num * IH_SIZE);
+
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) - IH_SIZE * cpy_num);
+
+ /* location of unmovable item */
+ j = location = (dest_before == 0) ? dest->b_size : (ih-1)->ih_item_location;
+ for (i = dest_before; i < nr + cpy_num; i ++)
+ ih[i-dest_before].ih_item_location =
+ (location -= ih[i-dest_before].ih_item_len);
+
+ /* prepare space for items */
+ last_loc = ih[nr+cpy_num-1-dest_before].ih_item_location;
+ last_inserted_loc = ih[cpy_num-1].ih_item_location;
+
+ /* check free space */
+#ifdef CONFIG_REISERFS_CHECK
+ if (le16_to_cpu (blkh->blk_free_space) < j - last_inserted_loc) {
+ reiserfs_panic (0, "vs-10150: leaf_copy_items_entirely: not enough free space for items %d (needed %d)",
+ le16_to_cpu (blkh->blk_free_space), j - last_inserted_loc);
+ }
+#endif
+
+ memmove (dest->b_data + last_loc,
+ dest->b_data + last_loc + j - last_inserted_loc,
+ last_inserted_loc - last_loc);
+
+ /* copy items */
+ memcpy (dest->b_data + last_inserted_loc, B_N_PITEM(src,(first + cpy_num - 1)),
+ j - last_inserted_loc);
+
+ /* sizes, item number */
+ blkh->blk_nr_item = cpu_to_le16 (le16_to_cpu (blkh->blk_nr_item) + cpy_num);
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) - (j - last_inserted_loc));
+
+ do_balance_mark_leaf_dirty (dest_bi->tb, dest, 0);
+
+ if (dest_bi->bi_parent) {
+#ifdef CONFIG_REISERFS_CHECK
+ if (B_N_CHILD (dest_bi->bi_parent, dest_bi->bi_position)->dc_block_number != dest->b_blocknr) {
+ reiserfs_panic (0, "vs-10160: leaf_copy_items_entirely: "
+ "block number in bh does not match to field in disk_child structure %lu and %lu",
+ dest->b_blocknr, B_N_CHILD (dest_bi->bi_parent, dest_bi->bi_position)->dc_block_number);
+ }
+#endif
+ B_N_CHILD (dest_bi->bi_parent, dest_bi->bi_position)->dc_size +=
+ j - last_inserted_loc + IH_SIZE * cpy_num;
+
+ do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent, 0);
+ }
+}
+
+
+/* This function splits the (liquid) item into two items (useful when
+ shifting part of an item into another node.) */
+static void leaf_item_bottle (struct buffer_info * dest_bi, struct buffer_head * src, int last_first,
+ int item_num, int cpy_bytes)
+{
+ struct buffer_head * dest = dest_bi->bi_bh;
+ struct item_head * ih;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( cpy_bytes == -1 )
+ reiserfs_panic (0, "vs-10170: leaf_item_bottle: bytes == - 1 means: do not split item");
+#endif
+
+ if ( last_first == FIRST_TO_LAST ) {
+ /* if ( if item in position item_num in buffer SOURCE is directory item ) */
+ if (is_direntry_le_ih (ih = B_N_PITEM_HEAD(src,item_num)))
+ leaf_copy_dir_entries (dest_bi, src, FIRST_TO_LAST, item_num, 0, cpy_bytes);
+ else {
+ struct item_head n_ih;
+
+ /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
+ part defined by 'cpy_bytes'; create new item header; change old item_header (????);
+ n_ih = new item_header;
+ */
+ memcpy (&n_ih, ih, IH_SIZE);
+ n_ih.ih_item_len = cpu_to_le16 (cpy_bytes);
+ if (is_indirect_le_ih (ih)) {
+#ifdef CONFIG_REISERFS_CHECK
+ if (cpy_bytes == le16_to_cpu (ih->ih_item_len) && get_ih_free_space (ih))
+ reiserfs_panic (0, "vs-10180: leaf_item_bottle: "
+ "when whole indirect item is bottle to left neighbor, it must have free_space==0 (not %lu)",
+ get_ih_free_space (ih));
+#endif
+ set_ih_free_space (&n_ih, 0);
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (op_is_left_mergeable (&(ih->ih_key), src->b_size))
+ reiserfs_panic (0, "vs-10190: leaf_item_bottle: bad mergeability of item %h", ih);
+#endif
+ n_ih.ih_version = ih->ih_version;;
+ leaf_insert_into_buf (dest_bi, B_NR_ITEMS(dest), &n_ih, B_N_PITEM (src, item_num), 0);
+ }
+ } else {
+ /* if ( if item in position item_num in buffer SOURCE is directory item ) */
+ if (is_direntry_le_ih(ih = B_N_PITEM_HEAD (src, item_num)))
+ leaf_copy_dir_entries (dest_bi, src, LAST_TO_FIRST, item_num, I_ENTRY_COUNT(ih) - cpy_bytes, cpy_bytes);
+ else {
+ struct item_head n_ih;
+
+ /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
+ part defined by 'cpy_bytes'; create new item header;
+ n_ih = new item_header;
+ */
+ memcpy (&n_ih, ih, SHORT_KEY_SIZE);
+ n_ih.ih_version = cpu_to_le16 (ih_version (ih));
+ if (is_direct_le_ih (ih)) {
+ set_le_ih_k_offset (&n_ih, le_ih_k_offset (ih) + le16_to_cpu (ih->ih_item_len) - cpy_bytes);
+ set_le_ih_k_type (&n_ih, TYPE_DIRECT);
+ set_ih_free_space (&n_ih, MAX_US_INT);
+ } else {
+ /* indirect item */
+#ifdef CONFIG_REISERFS_CHECK
+ if (!cpy_bytes && get_ih_free_space (ih))
+ reiserfs_panic (0, "vs-10200: leaf_item_bottle: ih->ih_free_space must be 0 when indirect item will be appended");
+#endif
+ set_le_ih_k_offset (&n_ih, le_ih_k_offset (ih) + (le16_to_cpu (ih->ih_item_len) - cpy_bytes) / UNFM_P_SIZE * dest->b_size);
+ set_le_ih_k_type (&n_ih, TYPE_INDIRECT);
+ set_ih_free_space (&n_ih, get_ih_free_space (ih));
+ }
+
+ /* set item length */
+ n_ih.ih_item_len = cpu_to_le16 (cpy_bytes);
+ n_ih.ih_version = cpu_to_le16 (le16_to_cpu (ih->ih_version));
+ leaf_insert_into_buf (dest_bi, 0, &n_ih, B_N_PITEM(src,item_num) + le16_to_cpu (ih->ih_item_len) - cpy_bytes, 0);
+ }
+ }
+}
+
+
+/* If cpy_bytes equals minus one than copy cpy_num whole items from SOURCE to DEST.
+ If cpy_bytes not equal to minus one than copy cpy_num-1 whole items from SOURCE to DEST.
+ From last item copy cpy_num bytes for regular item and cpy_num directory entries for
+ directory item. */
+static int leaf_copy_items (struct buffer_info * dest_bi, struct buffer_head * src, int last_first, int cpy_num,
+ int cpy_bytes)
+{
+ struct buffer_head * dest;
+ int pos, i, src_nr_item, bytes;
+
+ dest = dest_bi->bi_bh;
+#ifdef CONFIG_REISERFS_CHECK
+ if (!dest || !src)
+ reiserfs_panic (0, "vs-10210: leaf_copy_items: !dest || !src");
+
+ if ( last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST )
+ reiserfs_panic (0, "vs-10220: leaf_copy_items: last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST");
+
+ if ( B_NR_ITEMS(src) < cpy_num )
+ reiserfs_panic (0, "vs-10230: leaf_copy_items: No enough items: %d, required %d", B_NR_ITEMS(src), cpy_num);
+
+ if ( cpy_num < 0 )
+ reiserfs_panic (0, "vs-10240: leaf_copy_items: cpy_num < 0 (%d)", cpy_num);
+#endif
+
+ if ( cpy_num == 0 )
+ return 0;
+
+ if ( last_first == FIRST_TO_LAST ) {
+ /* copy items to left */
+ pos = 0;
+ if ( cpy_num == 1 )
+ bytes = cpy_bytes;
+ else
+ bytes = -1;
+
+ /* copy the first item or it part or nothing to the end of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,0,bytes)) */
+ i = leaf_copy_boundary_item (dest_bi, src, FIRST_TO_LAST, bytes);
+ cpy_num -= i;
+ if ( cpy_num == 0 )
+ return i;
+ pos += i;
+ if ( cpy_bytes == -1 )
+ /* copy first cpy_num items starting from position 'pos' of SOURCE to end of DEST */
+ leaf_copy_items_entirely (dest_bi, src, FIRST_TO_LAST, pos, cpy_num);
+ else {
+ /* copy first cpy_num-1 items starting from position 'pos-1' of the SOURCE to the end of the DEST */
+ leaf_copy_items_entirely (dest_bi, src, FIRST_TO_LAST, pos, cpy_num-1);
+
+ /* copy part of the item which number is cpy_num+pos-1 to the end of the DEST */
+ leaf_item_bottle (dest_bi, src, FIRST_TO_LAST, cpy_num+pos-1, cpy_bytes);
+ }
+ } else {
+ /* copy items to right */
+ src_nr_item = B_NR_ITEMS (src);
+ if ( cpy_num == 1 )
+ bytes = cpy_bytes;
+ else
+ bytes = -1;
+
+ /* copy the last item or it part or nothing to the begin of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,1,bytes)); */
+ i = leaf_copy_boundary_item (dest_bi, src, LAST_TO_FIRST, bytes);
+
+ cpy_num -= i;
+ if ( cpy_num == 0 )
+ return i;
+
+ pos = src_nr_item - cpy_num - i;
+ if ( cpy_bytes == -1 ) {
+ /* starting from position 'pos' copy last cpy_num items of SOURCE to begin of DEST */
+ leaf_copy_items_entirely (dest_bi, src, LAST_TO_FIRST, pos, cpy_num);
+ } else {
+ /* copy last cpy_num-1 items starting from position 'pos+1' of the SOURCE to the begin of the DEST; */
+ leaf_copy_items_entirely (dest_bi, src, LAST_TO_FIRST, pos+1, cpy_num-1);
+
+ /* copy part of the item which number is pos to the begin of the DEST */
+ leaf_item_bottle (dest_bi, src, LAST_TO_FIRST, pos, cpy_bytes);
+ }
+ }
+ return i;
+}
+
+
+/* there are types of coping: from S[0] to L[0], from S[0] to R[0],
+ from R[0] to L[0]. for each of these we have to define parent and
+ positions of destination and source buffers */
+static void leaf_define_dest_src_infos (int shift_mode, struct tree_balance * tb, struct buffer_info * dest_bi,
+ struct buffer_info * src_bi, int * first_last,
+ struct buffer_head * Snew)
+{
+#ifdef CONFIG_REISERFS_CHECK
+ memset (dest_bi, 0, sizeof (struct buffer_info));
+ memset (src_bi, 0, sizeof (struct buffer_info));
+#endif
+
+ /* define dest, src, dest parent, dest position */
+ switch (shift_mode) {
+ case LEAF_FROM_S_TO_L: /* it is used in leaf_shift_left */
+ src_bi->tb = tb;
+ src_bi->bi_bh = PATH_PLAST_BUFFER (tb->tb_path);
+ src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+ src_bi->bi_position = PATH_H_B_ITEM_ORDER (tb->tb_path, 0); /* src->b_item_order */
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = tb->L[0];
+ dest_bi->bi_parent = tb->FL[0];
+ dest_bi->bi_position = get_left_neighbor_position (tb, 0);
+ *first_last = FIRST_TO_LAST;
+ break;
+
+ case LEAF_FROM_S_TO_R: /* it is used in leaf_shift_right */
+ src_bi->tb = tb;
+ src_bi->bi_bh = PATH_PLAST_BUFFER (tb->tb_path);
+ src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+ src_bi->bi_position = PATH_H_B_ITEM_ORDER (tb->tb_path, 0);
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = tb->R[0];
+ dest_bi->bi_parent = tb->FR[0];
+ dest_bi->bi_position = get_right_neighbor_position (tb, 0);
+ *first_last = LAST_TO_FIRST;
+ break;
+
+ case LEAF_FROM_R_TO_L: /* it is used in balance_leaf_when_delete */
+ src_bi->tb = tb;
+ src_bi->bi_bh = tb->R[0];
+ src_bi->bi_parent = tb->FR[0];
+ src_bi->bi_position = get_right_neighbor_position (tb, 0);
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = tb->L[0];
+ dest_bi->bi_parent = tb->FL[0];
+ dest_bi->bi_position = get_left_neighbor_position (tb, 0);
+ *first_last = FIRST_TO_LAST;
+ break;
+
+ case LEAF_FROM_L_TO_R: /* it is used in balance_leaf_when_delete */
+ src_bi->tb = tb;
+ src_bi->bi_bh = tb->L[0];
+ src_bi->bi_parent = tb->FL[0];
+ src_bi->bi_position = get_left_neighbor_position (tb, 0);
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = tb->R[0];
+ dest_bi->bi_parent = tb->FR[0];
+ dest_bi->bi_position = get_right_neighbor_position (tb, 0);
+ *first_last = LAST_TO_FIRST;
+ break;
+
+ case LEAF_FROM_S_TO_SNEW:
+ src_bi->tb = tb;
+ src_bi->bi_bh = PATH_PLAST_BUFFER (tb->tb_path);
+ src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, 0);
+ src_bi->bi_position = PATH_H_B_ITEM_ORDER (tb->tb_path, 0);
+ dest_bi->tb = tb;
+ dest_bi->bi_bh = Snew;
+ dest_bi->bi_parent = 0;
+ dest_bi->bi_position = 0;
+ *first_last = LAST_TO_FIRST;
+ break;
+
+ default:
+ reiserfs_panic (0, "vs-10250: leaf_define_dest_src_infos: shift type is unknown (%d)", shift_mode);
+ }
+#ifdef CONFIG_REISERFS_CHECK
+ if (src_bi->bi_bh == 0 || dest_bi->bi_bh == 0) {
+ reiserfs_panic (0, "vs-10260: leaf_define_dest_src_etc: mode==%d, source (%p) or dest (%p) buffer is initialized incorrectly",
+ shift_mode, src_bi->bi_bh, dest_bi->bi_bh);
+ }
+#endif
+}
+
+
+
+
+/* copy mov_num items and mov_bytes of the (mov_num-1)th item to
+ neighbor. Delete them from source */
+int leaf_move_items (int shift_mode, struct tree_balance * tb, int mov_num, int mov_bytes, struct buffer_head * Snew)
+{
+ int ret_value;
+ struct buffer_info dest_bi, src_bi;
+ int first_last;
+
+ leaf_define_dest_src_infos (shift_mode, tb, &dest_bi, &src_bi, &first_last, Snew);
+
+ ret_value = leaf_copy_items (&dest_bi, src_bi.bi_bh, first_last, mov_num, mov_bytes);
+
+ leaf_delete_items (&src_bi, first_last, (first_last == FIRST_TO_LAST) ? 0 : (B_NR_ITEMS(src_bi.bi_bh) - mov_num), mov_num, mov_bytes);
+
+
+ return ret_value;
+}
+
+
+/* Shift shift_num items (and shift_bytes of last shifted item if shift_bytes != -1)
+ from S[0] to L[0] and replace the delimiting key */
+int leaf_shift_left (struct tree_balance * tb, int shift_num, int shift_bytes)
+{
+ struct buffer_head * S0 = PATH_PLAST_BUFFER (tb->tb_path);
+ int i;
+
+ /* move shift_num (and shift_bytes bytes) items from S[0] to left neighbor L[0] */
+ i = leaf_move_items (LEAF_FROM_S_TO_L, tb, shift_num, shift_bytes, 0);
+
+ if ( shift_num ) {
+ if (B_NR_ITEMS (S0) == 0) { /* number of items in S[0] == 0 */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( shift_bytes != -1 )
+ reiserfs_panic (tb->tb_sb, "vs-10270: leaf_shift_left: S0 is empty now, but shift_bytes != -1 (%d)", shift_bytes);
+
+ if (tb->tb_mode == M_PASTE || tb->tb_mode == M_INSERT) {
+ print_cur_tb ("vs-10275");
+ reiserfs_panic (tb->tb_sb, "vs-10275: leaf_shift_left: balance condition corrupted (%c)", tb->tb_mode);
+ }
+#endif
+
+ if (PATH_H_POSITION (tb->tb_path, 1) == 0)
+ replace_key (tb, tb->CFL[0], tb->lkey[0], PATH_H_PPARENT (tb->tb_path, 0), 0);
+
+#if 0
+ /* change right_delimiting_key field in L0's block header */
+ copy_key (B_PRIGHT_DELIM_KEY(tb->L[0]), B_PRIGHT_DELIM_KEY (S0));
+#endif
+ } else {
+ /* replace lkey in CFL[0] by 0-th key from S[0]; */
+ replace_key (tb, tb->CFL[0], tb->lkey[0], S0, 0);
+
+#if 0
+ /* change right_delimiting_key field in L0's block header */
+ copy_key (B_PRIGHT_DELIM_KEY(tb->L[0]), B_N_PKEY (S0, 0));
+#endif
+#ifdef CONFIG_REISERFS_CHECK
+ if (shift_bytes != -1 && !(is_direntry_le_ih (B_N_PITEM_HEAD (S0, 0))
+ && !I_ENTRY_COUNT (B_N_PITEM_HEAD (S0, 0)))) {
+ if (!op_is_left_mergeable (B_N_PKEY (S0, 0), S0->b_size)) {
+ reiserfs_panic (tb->tb_sb, "vs-10280: leaf_shift_left: item must be mergeable");
+ }
+ }
+#endif
+ }
+ }
+
+ return i;
+}
+
+
+
+
+
+/* CLEANING STOPPED HERE */
+
+
+
+
+/* Shift shift_num (shift_bytes) items from S[0] to the right neighbor, and replace the delimiting key */
+int leaf_shift_right(
+ struct tree_balance * tb,
+ int shift_num,
+ int shift_bytes
+ )
+{
+ // struct buffer_head * S0 = PATH_PLAST_BUFFER (tb->tb_path);
+ int ret_value;
+
+ /* move shift_num (and shift_bytes) items from S[0] to right neighbor R[0] */
+ ret_value = leaf_move_items (LEAF_FROM_S_TO_R, tb, shift_num, shift_bytes, 0);
+
+ /* replace rkey in CFR[0] by the 0-th key from R[0] */
+ if (shift_num) {
+ replace_key (tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
+
+#if 0
+ /* change right_delimiting_key field in S0's block header */
+ copy_key (B_PRIGHT_DELIM_KEY(S0), B_N_PKEY (tb->R[0], 0));
+#endif
+ }
+
+ return ret_value;
+}
+
+
+
+static void leaf_delete_items_entirely (struct buffer_info * bi,
+ int first, int del_num);
+/* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
+ If not.
+ If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
+ the first item. Part defined by del_bytes. Don't delete first item header
+ If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
+ the last item . Part defined by del_bytes. Don't delete last item header.
+*/
+void leaf_delete_items (struct buffer_info * cur_bi, int last_first,
+ int first, int del_num, int del_bytes)
+{
+ struct buffer_head * bh;
+ int item_amount = B_NR_ITEMS (bh = cur_bi->bi_bh);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( !bh )
+ reiserfs_panic (0, "leaf_delete_items: 10155: bh is not defined");
+
+ if ( del_num < 0 )
+ reiserfs_panic (0, "leaf_delete_items: 10160: del_num can not be < 0. del_num==%d", del_num);
+
+ if ( first < 0 || first + del_num > item_amount )
+ reiserfs_panic (0, "leaf_delete_items: 10165: invalid number of first item to be deleted (%d) or "
+ "no so much items (%d) to delete (only %d)", first, first + del_num, item_amount);
+#endif
+
+ if ( del_num == 0 )
+ return;
+
+ if ( first == 0 && del_num == item_amount && del_bytes == -1 ) {
+ make_empty_node (cur_bi);
+ do_balance_mark_leaf_dirty (cur_bi->tb, bh, 0);
+ return;
+ }
+
+ if ( del_bytes == -1 )
+ /* delete del_num items beginning from item in position first */
+ leaf_delete_items_entirely (cur_bi, first, del_num);
+ else {
+ if ( last_first == FIRST_TO_LAST ) {
+ /* delete del_num-1 items beginning from item in position first */
+ leaf_delete_items_entirely (cur_bi, first, del_num-1);
+
+ /* delete the part of the first item of the bh
+ do not delete item header
+ */
+ leaf_cut_from_buffer (cur_bi, 0, 0, del_bytes);
+ } else {
+ struct item_head * ih;
+ int len;
+
+ /* delete del_num-1 items beginning from item in position first+1 */
+ leaf_delete_items_entirely (cur_bi, first+1, del_num-1);
+
+ if (is_direntry_le_ih (ih = B_N_PITEM_HEAD(bh, B_NR_ITEMS(bh)-1))) /* the last item is directory */
+ /* len = numbers of directory entries in this item */
+ len = le16_to_cpu (ih->u.ih_entry_count);
+ else
+ /* len = body len of item */
+ len = le16_to_cpu (ih->ih_item_len);
+
+ /* delete the part of the last item of the bh
+ do not delete item header
+ */
+ leaf_cut_from_buffer (cur_bi, B_NR_ITEMS(bh)-1, len - del_bytes, del_bytes);
+ }
+ }
+}
+
+
+/* insert item into the leaf node in position before */
+void leaf_insert_into_buf (struct buffer_info * bi, int before,
+ struct item_head * inserted_item_ih,
+ const char * inserted_item_body,
+ int zeros_number)
+{
+ struct buffer_head * bh = bi->bi_bh;
+ int nr;
+ struct block_head * blkh;
+ struct item_head * ih;
+ int i;
+ int last_loc, unmoved_loc;
+ char * to;
+
+
+ nr = le16_to_cpu ((blkh = B_BLK_HEAD (bh))->blk_nr_item);
+
+#ifdef CONFIG_REISERFS_CHECK
+ /* check free space */
+ if (le16_to_cpu (blkh->blk_free_space) < le16_to_cpu (inserted_item_ih->ih_item_len) + IH_SIZE)
+ reiserfs_panic (0, "leaf_insert_into_buf: 10170: "
+ "not enough free space in block %z, new item %h",
+ bh, inserted_item_ih);
+ if (zeros_number > inserted_item_ih->ih_item_len)
+ reiserfs_panic (0, "vs-10172: leaf_insert_into_buf: "
+ "zero number == %d, item length == %d", zeros_number, inserted_item_ih->ih_item_len);
+#endif /* CONFIG_REISERFS_CHECK */
+
+
+ /* get item new item must be inserted before */
+ ih = B_N_PITEM_HEAD (bh, before);
+
+ /* prepare space for the body of new item */
+ last_loc = nr ? ih[nr - before - 1].ih_item_location : bh->b_size;
+ unmoved_loc = before ? (ih-1)->ih_item_location : bh->b_size;
+
+ memmove (bh->b_data + last_loc - inserted_item_ih->ih_item_len,
+ bh->b_data + last_loc, unmoved_loc - last_loc);
+
+ to = bh->b_data + unmoved_loc - inserted_item_ih->ih_item_len;
+ memset (to, 0, zeros_number);
+ to += zeros_number;
+
+ /* copy body to prepared space */
+ if (inserted_item_body)
+ memmove (to, inserted_item_body, inserted_item_ih->ih_item_len - zeros_number);
+ else
+ memset(to, '\0', inserted_item_ih->ih_item_len - zeros_number);
+
+ /* insert item header */
+ memmove (ih + 1, ih, IH_SIZE * (nr - before));
+ memmove (ih, inserted_item_ih, IH_SIZE);
+
+ /* change locations */
+ for (i = before; i < nr + 1; i ++)
+ ih[i-before].ih_item_location =
+ (unmoved_loc -= ih[i-before].ih_item_len);
+
+ /* sizes, free space, item number */
+ blkh->blk_nr_item = cpu_to_le16 (le16_to_cpu (blkh->blk_nr_item) + 1);
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) -
+ (IH_SIZE + inserted_item_ih->ih_item_len));
+
+ do_balance_mark_leaf_dirty (bi->tb, bh, 1);
+
+ if (bi->bi_parent) {
+ B_N_CHILD (bi->bi_parent, bi->bi_position)->dc_size += (IH_SIZE + inserted_item_ih->ih_item_len);
+ do_balance_mark_internal_dirty (bi->tb, bi->bi_parent, 0);
+ }
+}
+
+
+/* paste paste_size bytes to affected_item_num-th item.
+ When item is a directory, this only prepare space for new entries */
+void leaf_paste_in_buffer (struct buffer_info * bi, int affected_item_num,
+ int pos_in_item, int paste_size,
+ const char * body,
+ int zeros_number)
+{
+ struct buffer_head * bh = bi->bi_bh;
+ int nr;
+ struct block_head * blkh;
+ struct item_head * ih;
+ int i;
+ int last_loc, unmoved_loc;
+
+
+ nr = le16_to_cpu ((blkh = B_BLK_HEAD(bh))->blk_nr_item);
+
+#ifdef CONFIG_REISERFS_CHECK
+ /* check free space */
+ if (le16_to_cpu (blkh->blk_free_space) < paste_size)
+ reiserfs_panic (0, "leaf_paste_in_buffer: 10175: not enough free space: needed %d, available %d",
+ paste_size, le16_to_cpu (blkh->blk_free_space));
+ if (zeros_number > paste_size) {
+ print_cur_tb ("10177");
+ reiserfs_panic (0, "vs-10177: leaf_paste_in_buffer: zero number == %d, paste_size == %d",
+ zeros_number, paste_size);
+ }
+#endif /* CONFIG_REISERFS_CHECK */
+
+
+ /* item to be appended */
+ ih = B_N_PITEM_HEAD(bh, affected_item_num);
+
+ last_loc = ih[nr - affected_item_num - 1].ih_item_location;
+ unmoved_loc = affected_item_num ? (ih-1)->ih_item_location : bh->b_size;
+
+ /* prepare space */
+ memmove (bh->b_data + last_loc - paste_size, bh->b_data + last_loc,
+ unmoved_loc - last_loc);
+
+
+ /* change locations */
+ for (i = affected_item_num; i < nr; i ++)
+ ih[i-affected_item_num].ih_item_location -= paste_size;
+
+ if ( body ) {
+ if (!is_direntry_le_ih (ih)) {
+ if (!pos_in_item) {
+ /* shift data to right */
+ memmove (bh->b_data + ih->ih_item_location + paste_size,
+ bh->b_data + ih->ih_item_location, ih->ih_item_len);
+ /* paste data in the head of item */
+ memset (bh->b_data + ih->ih_item_location, 0, zeros_number);
+ memcpy (bh->b_data + ih->ih_item_location + zeros_number, body, paste_size - zeros_number);
+ } else {
+ memset (bh->b_data + unmoved_loc - paste_size, 0, zeros_number);
+ memcpy (bh->b_data + unmoved_loc - paste_size + zeros_number, body, paste_size - zeros_number);
+ }
+ }
+ }
+ else
+ memset(bh->b_data + unmoved_loc - paste_size,'\0',paste_size);
+
+ ih->ih_item_len += paste_size;
+
+ /* change free space */
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) - paste_size);
+
+ do_balance_mark_leaf_dirty (bi->tb, bh, 0);
+
+ if (bi->bi_parent) {
+ B_N_CHILD (bi->bi_parent, bi->bi_position)->dc_size += paste_size;
+ do_balance_mark_internal_dirty (bi->tb, bi->bi_parent, 0);
+ }
+}
+
+
+/* cuts DEL_COUNT entries beginning from FROM-th entry. Directory item
+ does not have free space, so it moves DEHs and remaining records as
+ necessary. Return value is size of removed part of directory item
+ in bytes. */
+static int leaf_cut_entries (
+ struct buffer_head * bh,
+ struct item_head * ih,
+ int from,
+ int del_count
+ )
+{
+ char * item;
+ struct reiserfs_de_head * deh;
+ int prev_record_offset; /* offset of record, that is (from-1)th */
+ char * prev_record; /* */
+ int cut_records_len; /* length of all removed records */
+ int i;
+
+
+#ifdef CONFIG_REISERFS_CHECK
+ /* make sure, that item is directory and there are enough entries to
+ remove */
+ if (!is_direntry_le_ih (ih))
+ reiserfs_panic (0, "leaf_cut_entries: 10180: item is not directory item");
+
+ if (I_ENTRY_COUNT(ih) < from + del_count)
+ reiserfs_panic (0, "leaf_cut_entries: 10185: item contains not enough entries: entry_cout = %d, from = %d, to delete = %d",
+ I_ENTRY_COUNT(ih), from, del_count);
+#endif
+
+ if (del_count == 0)
+ return 0;
+
+ /* first byte of item */
+ item = bh->b_data + ih->ih_item_location;
+
+ /* entry head array */
+ deh = B_I_DEH (bh, ih);
+
+ /* first byte of remaining entries, those are BEFORE cut entries
+ (prev_record) and length of all removed records (cut_records_len) */
+ prev_record_offset = (from ? deh[from - 1].deh_location : ih->ih_item_len);
+ cut_records_len = prev_record_offset/*from_record*/ - deh[from + del_count - 1].deh_location;
+ prev_record = item + prev_record_offset;
+
+
+ /* adjust locations of remaining entries */
+ for (i = I_ENTRY_COUNT(ih) - 1; i > from + del_count - 1; i --)
+ deh[i].deh_location -= (DEH_SIZE * del_count);
+
+ for (i = 0; i < from; i ++)
+ deh[i].deh_location -= DEH_SIZE * del_count + cut_records_len;
+
+ I_ENTRY_COUNT(ih) -= del_count;
+
+ /* shift entry head array and entries those are AFTER removed entries */
+ memmove ((char *)(deh + from),
+ deh + from + del_count,
+ prev_record - cut_records_len - (char *)(deh + from + del_count));
+
+ /* shift records, those are BEFORE removed entries */
+ memmove (prev_record - cut_records_len - DEH_SIZE * del_count,
+ prev_record, item + ih->ih_item_len - prev_record);
+
+ return DEH_SIZE * del_count + cut_records_len;
+}
+
+
+/* when cut item is part of regular file
+ pos_in_item - first byte that must be cut
+ cut_size - number of bytes to be cut beginning from pos_in_item
+
+ when cut item is part of directory
+ pos_in_item - number of first deleted entry
+ cut_size - count of deleted entries
+ */
+void leaf_cut_from_buffer (struct buffer_info * bi, int cut_item_num,
+ int pos_in_item, int cut_size)
+{
+ int nr;
+ struct buffer_head * bh = bi->bi_bh;
+ struct block_head * blkh;
+ struct item_head * ih;
+ int last_loc, unmoved_loc;
+ int i;
+
+ nr = le16_to_cpu ((blkh = B_BLK_HEAD (bh))->blk_nr_item);
+
+ /* item head of truncated item */
+ ih = B_N_PITEM_HEAD (bh, cut_item_num);
+
+ if (is_direntry_le_ih (ih)) {
+ /* first cut entry ()*/
+ cut_size = leaf_cut_entries (bh, ih, pos_in_item, cut_size);
+ if (pos_in_item == 0) {
+ /* change key */
+#ifdef CONFIG_REISERFS_CHECK
+ if (cut_item_num)
+ reiserfs_panic (0, "leaf_cut_from_buffer: 10190: "
+ "when 0-th enrty of item is cut, that item must be first in the node, not %d-th", cut_item_num);
+#endif
+ /* change item key by key of first entry in the item */
+ set_le_ih_k_offset (ih, le32_to_cpu (B_I_DEH (bh, ih)->deh_offset));
+ /*memcpy (&ih->ih_key.k_offset, &(B_I_DEH (bh, ih)->deh_offset), SHORT_KEY_SIZE);*/
+ }
+ } else {
+ /* item is direct or indirect */
+#ifdef CONFIG_REISERFS_CHECK
+ if (is_statdata_le_ih (ih))
+ reiserfs_panic (0, "leaf_cut_from_buffer: 10195: item is stat data");
+
+ if (pos_in_item && pos_in_item + cut_size != le16_to_cpu (ih->ih_item_len) )
+ reiserfs_panic (0, "cut_from_buf: 10200: invalid offset (%lu) or trunc_size (%lu) or ih_item_len (%lu)",
+ pos_in_item, cut_size, le16_to_cpu (ih->ih_item_len));
+#endif
+
+ /* shift item body to left if cut is from the head of item */
+ if (pos_in_item == 0) {
+ memmove (bh->b_data + le16_to_cpu (ih->ih_item_location), bh->b_data + le16_to_cpu (ih->ih_item_location) + cut_size,
+ le16_to_cpu (ih->ih_item_len) - cut_size);
+
+ /* change key of item */
+ if (is_direct_le_ih (ih))
+ set_le_ih_k_offset (ih, le_ih_k_offset (ih) + cut_size);
+ else {
+ set_le_ih_k_offset (ih, le_ih_k_offset (ih) + (cut_size / UNFM_P_SIZE) * bh->b_size);
+#ifdef CONFIG_REISERFS_CHECK
+ if ( le16_to_cpu (ih->ih_item_len) == cut_size && get_ih_free_space (ih) )
+ reiserfs_panic (0, "leaf_cut_from_buf: 10205: invalid ih_free_space (%h)", ih);
+#endif
+ }
+ }
+ }
+
+
+ /* location of the last item */
+ last_loc = le16_to_cpu (ih[nr - cut_item_num - 1].ih_item_location);
+
+ /* location of the item, which is remaining at the same place */
+ unmoved_loc = cut_item_num ? le16_to_cpu ((ih-1)->ih_item_location) : bh->b_size;
+
+
+ /* shift */
+ memmove (bh->b_data + last_loc + cut_size, bh->b_data + last_loc,
+ unmoved_loc - last_loc - cut_size);
+
+ /* change item length */
+/* ih->ih_item_len -= cut_size;*/
+ ih->ih_item_len = cpu_to_le16 (le16_to_cpu (ih->ih_item_len) - cut_size);
+
+ if (is_indirect_le_ih (ih)) {
+ if (pos_in_item)
+ set_ih_free_space (ih, 0);
+ }
+
+ /* change locations */
+ for (i = cut_item_num; i < nr; i ++)
+/* ih[i-cut_item_num].ih_item_location += cut_size;*/
+ ih[i-cut_item_num].ih_item_location =
+ cpu_to_le16 (le16_to_cpu (ih[i-cut_item_num].ih_item_location) + cut_size);
+
+ /* size, free space */
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) + cut_size);
+
+ do_balance_mark_leaf_dirty (bi->tb, bh, 0);
+
+ if (bi->bi_parent) {
+ B_N_CHILD (bi->bi_parent, bi->bi_position)->dc_size -= cut_size;
+ do_balance_mark_internal_dirty (bi->tb, bi->bi_parent, 0);
+ }
+}
+
+
+/* delete del_num items from buffer starting from the first'th item */
+static void leaf_delete_items_entirely (struct buffer_info * bi,
+ int first, int del_num)
+{
+ struct buffer_head * bh = bi->bi_bh;
+ int nr;
+ int i, j;
+ int last_loc, last_removed_loc;
+ struct block_head * blkh;
+ struct item_head * ih;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (bh == NULL)
+ reiserfs_panic (0, "leaf_delete_items_entirely: 10210: buffer is 0");
+
+ if (del_num < 0)
+ reiserfs_panic (0, "leaf_delete_items_entirely: 10215: del_num less than 0 (%d)", del_num);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ if (del_num == 0)
+ return;
+
+ nr = le16_to_cpu ((blkh = B_BLK_HEAD(bh))->blk_nr_item);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (first < 0 || first + del_num > nr)
+ reiserfs_panic (0, "leaf_delete_items_entirely: 10220: first=%d, number=%d, there is %d items", first, del_num, nr);
+#endif /* CONFIG_REISERFS_CHECK */
+
+ if (first == 0 && del_num == nr) {
+ /* this does not work */
+ make_empty_node (bi);
+
+ do_balance_mark_leaf_dirty (bi->tb, bh, 0);
+ return;
+ }
+
+ ih = B_N_PITEM_HEAD (bh, first);
+
+ /* location of unmovable item */
+ j = (first == 0) ? bh->b_size : (ih-1)->ih_item_location;
+
+ /* delete items */
+ last_loc = ih[nr-1-first].ih_item_location;
+ last_removed_loc = ih[del_num-1].ih_item_location;
+
+ memmove (bh->b_data + last_loc + j - last_removed_loc,
+ bh->b_data + last_loc, last_removed_loc - last_loc);
+
+ /* delete item headers */
+ memmove (ih, ih + del_num, (nr - first - del_num) * IH_SIZE);
+
+ /* change item location */
+ for (i = first; i < nr - del_num; i ++)
+ ih[i-first].ih_item_location += j - last_removed_loc;
+
+ /* sizes, item number */
+ blkh->blk_nr_item = cpu_to_le16 (le16_to_cpu (blkh->blk_nr_item) - del_num);
+ blkh->blk_free_space = cpu_to_le16 (le16_to_cpu (blkh->blk_free_space) + (j - last_removed_loc + IH_SIZE * del_num));
+
+ do_balance_mark_leaf_dirty (bi->tb, bh, 0);
+
+ if (bi->bi_parent) {
+ B_N_CHILD (bi->bi_parent, bi->bi_position)->dc_size -= j - last_removed_loc + IH_SIZE * del_num;
+ do_balance_mark_internal_dirty (bi->tb, bi->bi_parent, 0);
+ }
+}
+
+
+
+
+
+/* paste new_entry_count entries (new_dehs, records) into position before to item_num-th item */
+void leaf_paste_entries (
+ struct buffer_head * bh,
+ int item_num,
+ int before,
+ int new_entry_count,
+ struct reiserfs_de_head * new_dehs,
+ const char * records,
+ int paste_size
+ )
+{
+ struct item_head * ih;
+ char * item;
+ struct reiserfs_de_head * deh;
+ char * insert_point;
+ int i, old_entry_num;
+
+ if (new_entry_count == 0)
+ return;
+
+ ih = B_N_PITEM_HEAD(bh, item_num);
+
+#ifdef CONFIG_REISERFS_CHECK
+ /* make sure, that item is directory, and there are enough records in it */
+ if (!is_direntry_le_ih (ih))
+ reiserfs_panic (0, "leaf_paste_entries: 10225: item is not directory item");
+
+ if (I_ENTRY_COUNT (ih) < before)
+ reiserfs_panic (0, "leaf_paste_entries: 10230: there are no entry we paste entries before. entry_count = %d, before = %d",
+ I_ENTRY_COUNT (ih), before);
+#endif
+
+
+ /* first byte of dest item */
+ item = bh->b_data + ih->ih_item_location;
+
+ /* entry head array */
+ deh = B_I_DEH (bh, ih);
+
+ /* new records will be pasted at this point */
+ insert_point = item + (before ? deh[before - 1].deh_location : (ih->ih_item_len - paste_size));
+
+ /* adjust locations of records that will be AFTER new records */
+ for (i = I_ENTRY_COUNT(ih) - 1; i >= before; i --)
+ deh[i].deh_location += DEH_SIZE * new_entry_count;
+
+ /* adjust locations of records that will be BEFORE new records */
+ for (i = 0; i < before; i ++)
+ deh[i].deh_location += paste_size;
+
+ old_entry_num = I_ENTRY_COUNT(ih);
+ I_ENTRY_COUNT(ih) += new_entry_count;
+
+ /* prepare space for pasted records */
+ memmove (insert_point + paste_size, insert_point, item + (ih->ih_item_len - paste_size) - insert_point);
+
+ /* copy new records */
+ memcpy (insert_point + DEH_SIZE * new_entry_count, records,
+ paste_size - DEH_SIZE * new_entry_count);
+
+ /* prepare space for new entry heads */
+ deh += before;
+ memmove ((char *)(deh + new_entry_count), deh, insert_point - (char *)deh);
+
+ /* copy new entry heads */
+ deh = (struct reiserfs_de_head *)((char *)deh);
+ memcpy (deh, new_dehs, DEH_SIZE * new_entry_count);
+
+ /* set locations of new records */
+ for (i = 0; i < new_entry_count; i ++)
+ deh[i].deh_location +=
+ (- new_dehs[new_entry_count - 1].deh_location + insert_point + DEH_SIZE * new_entry_count - item);
+
+
+ /* change item key if neccessary (when we paste before 0-th entry */
+ if (!before)
+ {
+#ifdef CONFIG_REISERFS_CHECK
+/*
+ if ( old_entry_num && COMP_SHORT_KEYS ((unsigned long *)&ih->ih_key.k_offset,
+ &(new_dehs->deh_offset)) <= 0)
+ reiserfs_panic (0, "leaf_paste_entries: 10235: new key must be less, that old key");
+*/
+#endif
+ set_le_ih_k_offset (ih, le32_to_cpu (new_dehs->deh_offset));
+/* memcpy (&ih->ih_key.k_offset,
+ &new_dehs->deh_offset, SHORT_KEY_SIZE);*/
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ {
+ int prev, next;
+ /* check record locations */
+ deh = B_I_DEH (bh, ih);
+ for (i = 0; i < I_ENTRY_COUNT(ih); i ++) {
+ next = (i < I_ENTRY_COUNT(ih) - 1) ? deh[i + 1].deh_location : 0;
+ prev = (i != 0) ? deh[i - 1].deh_location : 0;
+
+ if (prev && prev <= deh[i].deh_location)
+ reiserfs_warning ("vs-10240: leaf_paste_entries: directory item (%h) corrupted (prev %a, cur(%d) %a)\n",
+ ih, deh + i - 1, i, deh + i);
+ if (next && next >= deh[i].deh_location)
+ reiserfs_warning ("vs-10250: leaf_paste_entries: directory item (%h) corrupted (cur(%d) %a, next %a)\n",
+ ih, i, deh + i, deh + i + 1);
+ }
+ }
+#endif
+
+}
+
+
+
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
new file mode 100644
index 000000000..ffb8920bd
--- /dev/null
+++ b/fs/reiserfs/namei.c
@@ -0,0 +1,1221 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/smp_lock.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+ /* there should be an overview right
+ here, as there should be in every
+ conceptual grouping of code. This
+ should be combined with dir.c and
+ called dir.c (naming will become
+ too large to be called one file in
+ a few years), stop senselessly
+ imitating the incoherent
+ structuring of code used by other
+ filesystems. */
+
+#define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { i->i_nlink++; if (i->i_nlink >= REISERFS_LINK_MAX) i->i_nlink=1; }
+#define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) i->i_nlink--;
+
+// directory item contains array of entry headers. This performs
+// binary search through that array
+static int bin_search_in_dir_item (struct reiserfs_dir_entry * de, loff_t off)
+{
+ struct item_head * ih = de->de_ih;
+ struct reiserfs_de_head * deh = de->de_deh;
+ int rbound, lbound, j;
+
+ lbound = 0;
+ rbound = I_ENTRY_COUNT (ih) - 1;
+
+ for (j = (rbound + lbound) / 2; lbound <= rbound; j = (rbound + lbound) / 2) {
+ if (off < deh_offset (deh + j)) {
+ rbound = j - 1;
+ continue;
+ }
+ if (off > deh_offset (deh + j)) {
+ lbound = j + 1;
+ continue;
+ }
+ // this is not name found, but matched third key component
+ de->de_entry_num = j;
+ return NAME_FOUND;
+ }
+
+ de->de_entry_num = lbound;
+ return NAME_NOT_FOUND;
+}
+
+
+// comment? maybe something like set de to point to what the path points to?
+static inline void set_de_item_location (struct reiserfs_dir_entry * de, struct path * path)
+{
+ de->de_bh = get_bh (path);
+ de->de_ih = get_ih (path);
+ de->de_deh = B_I_DEH (de->de_bh, de->de_ih);
+ de->de_item_num = PATH_LAST_POSITION (path);
+}
+
+
+// de_bh, de_ih, de_deh (points to first element of array), de_item_num is set
+inline void set_de_name_and_namelen (struct reiserfs_dir_entry * de)
+{
+ struct reiserfs_de_head * deh = de->de_deh + de->de_entry_num;
+
+ if (de->de_entry_num >= ih_entry_count (de->de_ih))
+ BUG ();
+
+ de->de_entrylen = entry_length (de->de_bh, de->de_ih, de->de_entry_num);
+ de->de_namelen = de->de_entrylen - (de_with_sd (deh) ? SD_SIZE : 0);
+ de->de_name = B_I_PITEM (de->de_bh, de->de_ih) + le16_to_cpu (deh->deh_location);
+ if (de->de_name[de->de_namelen - 1] == 0)
+ de->de_namelen = strlen (de->de_name);
+}
+
+
+// what entry points to
+static inline void set_de_object_key (struct reiserfs_dir_entry * de)
+{
+ if (de->de_entry_num >= ih_entry_count (de->de_ih))
+ BUG ();
+ de->de_dir_id = le32_to_cpu (de->de_deh[de->de_entry_num].deh_dir_id);
+ de->de_objectid = le32_to_cpu (de->de_deh[de->de_entry_num].deh_objectid);
+}
+
+
+static inline void store_de_entry_key (struct reiserfs_dir_entry * de)
+{
+ struct reiserfs_de_head * deh = de->de_deh + de->de_entry_num;
+
+ if (de->de_entry_num >= ih_entry_count (de->de_ih))
+ BUG ();
+
+ /* store key of the found entry */
+ de->de_entry_key.version = ITEM_VERSION_1;
+ de->de_entry_key.on_disk_key.k_dir_id = le32_to_cpu (de->de_ih->ih_key.k_dir_id);
+ de->de_entry_key.on_disk_key.k_objectid = le32_to_cpu (de->de_ih->ih_key.k_objectid);
+ set_cpu_key_k_offset (&(de->de_entry_key), deh_offset (deh));
+ set_cpu_key_k_type (&(de->de_entry_key), TYPE_DIRENTRY);
+}
+
+
+/* We assign a key to each directory item, and place multiple entries
+in a single directory item. A directory item has a key equal to the
+key of the first directory entry in it.
+
+This function first calls search_by_key, then, if item whose first
+entry matches is not found it looks for the entry inside directory
+item found by search_by_key. Fills the path to the entry, and to the
+entry position in the item
+
+*/
+
+/* The function is NOT SCHEDULE-SAFE! */
+int search_by_entry_key (struct super_block * sb, struct cpu_key * key,
+ struct path * path, struct reiserfs_dir_entry * de)
+{
+ int retval;
+
+ retval = search_item (sb, key, path);
+ switch (retval) {
+ case ITEM_NOT_FOUND:
+ if (!PATH_LAST_POSITION (path)) {
+ reiserfs_warning ("vs-7000: search_by_entry_key: search_by_key returned item position == 0");
+ pathrelse(path) ;
+ return IO_ERROR ;
+ }
+ PATH_LAST_POSITION (path) --;
+
+ case ITEM_FOUND:
+ break;
+
+ case IO_ERROR:
+ return retval;
+
+ default:
+ pathrelse (path);
+ reiserfs_warning ("vs-7002: search_by_entry_key: no path to here");
+ return IO_ERROR;
+ }
+
+ set_de_item_location (de, path);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (!is_direntry_le_ih (de->de_ih) ||
+ COMP_SHORT_KEYS (&(de->de_ih->ih_key), key)) {
+ print_block (de->de_bh, 0, -1, -1);
+ reiserfs_panic (sb, "vs-7005: search_by_entry_key: found item %h is not directory item or "
+ "does not belong to the same directory as key %k", de->de_ih, key);
+ }
+#endif /* CONFIG_REISERFS_CHECK */
+
+ /* binary search in directory item by third componen t of the
+ key. sets de->de_entry_num of de */
+ retval = bin_search_in_dir_item (de, cpu_key_k_offset (key));
+ path->pos_in_item = de->de_entry_num;
+ if (retval != NAME_NOT_FOUND) {
+ // ugly, but rename needs de_bh, de_deh, de_name, de_namelen, de_objectid set
+ set_de_name_and_namelen (de);
+ set_de_object_key (de);
+ }
+ return retval;
+}
+
+
+
+/* Keyed 32-bit hash function using TEA in a Davis-Meyer function */
+
+/* The third component is hashed, and you can choose from more than
+ one hash function. Per directory hashes are not yet implemented
+ but are thought about. This function should be moved to hashes.c
+ Jedi, please do so. -Hans */
+
+static __u32 get_third_component (struct super_block * s,
+ const char * name, int len)
+{
+ __u32 res;
+
+ if (!len || (len == 1 && name[0] == '.'))
+ return DOT_OFFSET;
+ if (len == 2 && name[0] == '.' && name[1] == '.')
+ return DOT_DOT_OFFSET;
+
+ res = s->u.reiserfs_sb.s_hash_function (name, len);
+
+ // take bits from 7-th to 30-th including both bounds
+ res = GET_HASH_VALUE(res);
+ if (res == 0)
+ // needed to have no names before "." and ".." those have hash
+ // value == 0 and generation conters 1 and 2 accordingly
+ res = 128;
+ return res + MAX_GENERATION_NUMBER;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+static int reiserfs_match (struct reiserfs_dir_entry * de,
+ const char * name, int namelen)
+{
+ int retval = NAME_NOT_FOUND;
+
+ if ((namelen == de->de_namelen) &&
+ !memcmp(de->de_name, name, de->de_namelen))
+ retval = (de_visible (de->de_deh + de->de_entry_num) ? NAME_FOUND : NAME_FOUND_INVISIBLE);
+
+ return retval;
+}
+
+
+/* de's de_bh, de_ih, de_deh, de_item_num, de_entry_num are set already */
+
+ /* used when hash collisions exist */
+
+
+static int linear_search_in_dir_item (struct cpu_key * key, struct reiserfs_dir_entry * de,
+ const char * name, int namelen)
+{
+ struct reiserfs_de_head * deh = de->de_deh;
+ int retval;
+ int i;
+
+ i = de->de_entry_num;
+
+ if (i == I_ENTRY_COUNT (de->de_ih) ||
+ GET_HASH_VALUE (deh_offset (deh + i)) != GET_HASH_VALUE (cpu_key_k_offset (key))) {
+ i --;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (de->de_deh != B_I_DEH (de->de_bh, de->de_ih))
+ reiserfs_panic (0, "vs-7010: linear_search_in_dir_item: array of entry headers not found");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ deh += i;
+
+ for (; i >= 0; i --, deh --) {
+ if (GET_HASH_VALUE (deh_offset (deh)) !=
+ GET_HASH_VALUE (cpu_key_k_offset (key))) {
+ // hash value does not match, no need to check whole name
+ return NAME_NOT_FOUND;
+ }
+
+ /* mark, that this generation number is used */
+ if (de->de_gen_number_bit_string)
+ set_bit (GET_GENERATION_NUMBER (deh_offset (deh)), de->de_gen_number_bit_string);
+
+ // calculate pointer to name and namelen
+ de->de_entry_num = i;
+ set_de_name_and_namelen (de);
+
+ if ((retval = reiserfs_match (de, name, namelen)) != NAME_NOT_FOUND) {
+ // de's de_name, de_namelen, de_recordlen are set. Fill the rest:
+
+ // key of pointed object
+ set_de_object_key (de);
+
+ store_de_entry_key (de);
+
+ // retval can be NAME_FOUND or NAME_FOUND_INVISIBLE
+ return retval;
+ }
+ }
+
+ if (GET_GENERATION_NUMBER (le_ih_k_offset (de->de_ih)) == 0)
+ /* we have reached left most entry in the node. In common we
+ have to go to the left neighbor, but if generation counter
+ is 0 already, we know for sure, that there is no name with
+ the same hash value */
+ // FIXME: this work correctly only because hash value can not
+ // be 0. Btw, in case of Yura's hash it is probably possible,
+ // so, this is a bug
+ return NAME_NOT_FOUND;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (de->de_item_num)
+ reiserfs_panic (0, "vs-7015: linear_search_in_dir_item: "
+ "two diritems of the same directory in one node?");
+#endif /* CONFIG_REISERFS_CHECK */
+
+ return GOTO_PREVIOUS_ITEM;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+// may return NAME_FOUND, NAME_FOUND_INVISIBLE, NAME_NOT_FOUND
+// FIXME: should add something like IOERROR
+static int reiserfs_find_entry (struct inode * dir, const char * name, int namelen,
+ struct path * path_to_entry, struct reiserfs_dir_entry * de)
+{
+ struct cpu_key key_to_search;
+ int retval;
+
+
+ if (namelen > REISERFS_MAX_NAME_LEN (dir->i_sb->s_blocksize))
+ return NAME_NOT_FOUND;
+
+ /* we will search for this key in the tree */
+ make_cpu_key (&key_to_search, dir,
+ get_third_component (dir->i_sb, name, namelen), TYPE_DIRENTRY, 3);
+
+ while (1) {
+ retval = search_by_entry_key (dir->i_sb, &key_to_search, path_to_entry, de);
+ if (retval == IO_ERROR)
+ // FIXME: still has to be dealt with
+
+ /* I want you to conform to our error
+ printing standard. How many times
+ do I have to ask? -Hans */
+
+ BUG ();
+
+ /* compare names for all entries having given hash value */
+ retval = linear_search_in_dir_item (&key_to_search, de, name, namelen);
+ if (retval != GOTO_PREVIOUS_ITEM) {
+ /* there is no need to scan directory anymore. Given entry found or does not exist */
+ path_to_entry->pos_in_item = de->de_entry_num;
+ return retval;
+ }
+
+ /* there is left neighboring item of this directory and given entry can be there */
+ set_cpu_key_k_offset (&key_to_search, le_ih_k_offset (de->de_ih) - 1);
+ pathrelse (path_to_entry);
+
+ } /* while (1) */
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+struct dentry * reiserfs_lookup (struct inode * dir, struct dentry * dentry)
+{
+ int retval;
+ struct inode * inode = 0;
+ struct reiserfs_dir_entry de;
+ INITIALIZE_PATH (path_to_entry);
+
+ reiserfs_check_lock_depth("lookup") ;
+
+ if (dentry->d_name.len > REISERFS_MAX_NAME_LEN (dir->i_sb->s_blocksize))
+ return ERR_PTR(-ENAMETOOLONG);
+
+ de.de_gen_number_bit_string = 0;
+ retval = reiserfs_find_entry (dir, dentry->d_name.name, dentry->d_name.len, &path_to_entry, &de);
+ pathrelse (&path_to_entry);
+ if (retval == NAME_FOUND) {
+ inode = reiserfs_iget (dir->i_sb, (struct cpu_key *)&(de.de_dir_id));
+ if (!inode) {
+ return ERR_PTR(-EACCES);
+ }
+ }
+
+ d_add(dentry, inode);
+ return NULL;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+
+/* add entry to the directory (entry can be hidden).
+
+insert definition of when hidden directories are used here -Hans
+
+ Does not mark dir inode dirty, do it after successesfull call to it */
+
+static int reiserfs_add_entry (struct reiserfs_transaction_handle *th, struct inode * dir,
+ const char * name, int namelen, struct inode * inode,
+ int visible)
+{
+ struct cpu_key entry_key;
+ struct reiserfs_de_head * deh;
+ INITIALIZE_PATH (path);
+ struct reiserfs_dir_entry de;
+ int bit_string [MAX_GENERATION_NUMBER / (sizeof(int) * 8) + 1];
+ int gen_number;
+ char small_buf[32+DEH_SIZE] ; /* 48 bytes now and we avoid kmalloc
+ if we create file with short name */
+ char * buffer;
+ int buflen, paste_size;
+ int retval;
+
+
+ /* cannot allow items to be added into a busy deleted directory */
+ if (!namelen)
+ return -EINVAL;
+
+ if (namelen > REISERFS_MAX_NAME_LEN (dir->i_sb->s_blocksize))
+ return -ENAMETOOLONG;
+
+ /* each entry has unique key. compose it */
+ make_cpu_key (&entry_key, dir,
+ get_third_component (dir->i_sb, name, namelen), TYPE_DIRENTRY, 3);
+
+ /* get memory for composing the entry */
+ buflen = DEH_SIZE + ROUND_UP (namelen);
+ if (buflen > sizeof (small_buf)) {
+ buffer = reiserfs_kmalloc (buflen, GFP_BUFFER, dir->i_sb);
+ if (buffer == 0)
+ return -ENOMEM;
+ } else
+ buffer = small_buf;
+
+ paste_size = (old_format_only (dir->i_sb)) ? (DEH_SIZE + namelen) : buflen;
+
+ /* fill buffer : directory entry head, name[, dir objectid | , stat data | ,stat data, dir objectid ] */
+ deh = (struct reiserfs_de_head *)buffer;
+ deh->deh_location = 0;
+ deh->deh_offset = cpu_to_le32 (cpu_key_k_offset (&entry_key));
+ deh->deh_state = 0;
+ /* put key (ino analog) to de */
+ deh->deh_dir_id = INODE_PKEY (inode)->k_dir_id;
+ deh->deh_objectid = INODE_PKEY (inode)->k_objectid;
+
+ /* copy name */
+ memcpy ((char *)(deh + 1), name, namelen);
+ /* padd by 0s to the 4 byte boundary */
+ padd_item ((char *)(deh + 1), ROUND_UP (namelen), namelen);
+
+ /* entry is ready to be pasted into tree, set 'visibility' and 'stat data in entry' attributes */
+ mark_de_without_sd (deh);
+ visible ? mark_de_visible (deh) : mark_de_hidden (deh);
+
+ /* find the proper place for the new entry */
+ memset (bit_string, 0, sizeof (bit_string));
+ de.de_gen_number_bit_string = (char *)bit_string;
+ if (reiserfs_find_entry (dir, name, namelen, &path, &de) == NAME_FOUND) {
+ if (buffer != small_buf)
+ reiserfs_kfree (buffer, buflen, dir->i_sb);
+ pathrelse (&path);
+ return -EEXIST;
+ }
+
+ if (find_first_nonzero_bit (bit_string, MAX_GENERATION_NUMBER + 1) < MAX_GENERATION_NUMBER + 1) {
+ /* there are few names with given hash value */
+ gen_number = find_first_zero_bit (bit_string, MAX_GENERATION_NUMBER + 1);
+ if (gen_number > MAX_GENERATION_NUMBER) {
+ /* there is no free generation number */
+ reiserfs_warning ("reiserfs_add_entry: Congratulations! we have got hash function screwed up\n");
+ if (buffer != small_buf)
+ reiserfs_kfree (buffer, buflen, dir->i_sb);
+ pathrelse (&path);
+ return -EHASHCOLLISION;//EBADSLT
+ }
+ /* adjust offset of directory enrty */
+ deh->deh_offset = cpu_to_le32 (SET_GENERATION_NUMBER (deh_offset (deh), gen_number));
+ set_cpu_key_k_offset (&entry_key, le32_to_cpu (deh->deh_offset));
+
+ /* find place for new entry */
+ if (search_by_entry_key (dir->i_sb, &entry_key, &path, &de) == NAME_FOUND) {
+ reiserfs_warning ("vs-7032: reiserfs_add_entry: "
+ "entry with this key (%k) already exists", &entry_key);
+ if (buffer != small_buf)
+ reiserfs_kfree (buffer, buflen, dir->i_sb);
+ pathrelse (&path);
+ return -EHASHCOLLISION;
+ }
+ } else {
+ deh->deh_offset = cpu_to_le32 (SET_GENERATION_NUMBER (le32_to_cpu (deh->deh_offset), 0));
+ set_cpu_key_k_offset (&entry_key, le32_to_cpu (deh->deh_offset));
+ }
+
+ /* perform the insertion of the entry that we have prepared */
+ retval = reiserfs_paste_into_item (th, &path, &entry_key, buffer, paste_size);
+ if (buffer != small_buf)
+ reiserfs_kfree (buffer, buflen, dir->i_sb);
+ if (retval) {
+ reiserfs_check_path(&path) ;
+ return retval;
+ }
+
+ dir->i_size += paste_size;
+ dir->i_blocks = ((dir->i_size + 511) >> 9);
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ if (!S_ISDIR (inode->i_mode) && visible)
+ // reiserfs_mkdir or reiserfs_rename will do that by itself
+ reiserfs_update_sd (th, dir);
+
+ reiserfs_check_path(&path) ;
+ return 0;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode)
+{
+ int retval;
+ struct inode * inode;
+ int windex ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 ;
+ struct reiserfs_transaction_handle th ;
+
+
+ inode = get_empty_inode() ;
+ if (!inode) {
+ return -ENOMEM ;
+ }
+ journal_begin(&th, dir->i_sb, jbegin_count) ;
+ th.t_caller = "create" ;
+ windex = push_journal_writer("reiserfs_create") ;
+ inode = reiserfs_new_inode (&th, dir, mode, 0, 0/*i_size*/, dentry, inode, &retval);
+ if (!inode) {
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return retval;
+ }
+
+ inode->i_op = &reiserfs_file_inode_operations;
+ inode->i_fop = &reiserfs_file_operations;
+ inode->i_mapping->a_ops = &reiserfs_address_space_operations ;
+
+ retval = reiserfs_add_entry (&th, dir, dentry->d_name.name, dentry->d_name.len,
+ inode, 1/*visible*/);
+ if (retval) {
+ inode->i_nlink--;
+ reiserfs_update_sd (&th, inode);
+ pop_journal_writer(windex) ;
+ // FIXME: should we put iput here and have stat data deleted
+ // in the same transactioin
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ iput (inode);
+ return retval;
+ }
+
+ d_instantiate(dentry, inode);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return 0;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode, int rdev)
+{
+ int retval;
+ struct inode * inode;
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+
+ inode = get_empty_inode() ;
+ if (!inode) {
+ return -ENOMEM ;
+ }
+ journal_begin(&th, dir->i_sb, jbegin_count) ;
+ windex = push_journal_writer("reiserfs_mknod") ;
+
+ inode = reiserfs_new_inode (&th, dir, mode, 0, 0/*i_size*/, dentry, inode, &retval);
+ if (!inode) {
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return retval;
+ }
+
+ init_special_inode(inode, mode, rdev) ;
+
+ //FIXME: needed for block and char devices only
+ reiserfs_update_sd (&th, inode);
+
+ retval = reiserfs_add_entry (&th, dir, dentry->d_name.name, dentry->d_name.len,
+ inode, 1/*visible*/);
+ if (retval) {
+ inode->i_nlink--;
+ reiserfs_update_sd (&th, inode);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ iput (inode);
+ return retval;
+ }
+
+ d_instantiate(dentry, inode);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return 0;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode)
+{
+ int retval;
+ struct inode * inode;
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+
+ inode = get_empty_inode() ;
+ if (!inode) {
+ return -ENOMEM ;
+ }
+ journal_begin(&th, dir->i_sb, jbegin_count) ;
+ windex = push_journal_writer("reiserfs_mkdir") ;
+
+ /* inc the link count now, so another writer doesn't overflow it while
+ ** we sleep later on.
+ */
+ INC_DIR_INODE_NLINK(dir)
+
+ mode = S_IFDIR | mode;
+ inode = reiserfs_new_inode (&th, dir, mode, 0/*symlink*/,
+ old_format_only (dir->i_sb) ? EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
+ dentry, inode, &retval);
+ if (!inode) {
+ pop_journal_writer(windex) ;
+ dir->i_nlink-- ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return retval;
+ }
+
+ inode->i_op = &reiserfs_dir_inode_operations;
+ inode->i_fop = &reiserfs_dir_operations;
+
+ // note, _this_ add_entry will not update dir's stat data
+ retval = reiserfs_add_entry (&th, dir, dentry->d_name.name, dentry->d_name.len,
+ inode, 1/*visible*/);
+ if (retval) {
+ inode->i_nlink = 0;
+ DEC_DIR_INODE_NLINK(dir);
+ reiserfs_update_sd (&th, inode);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ iput (inode);
+ return retval;
+ }
+
+ // the above add_entry did not update dir's stat data
+ reiserfs_update_sd (&th, dir);
+
+ d_instantiate(dentry, inode);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return 0;
+}
+
+static inline int reiserfs_empty_dir(struct inode *inode) {
+ /* we can cheat because an old format dir cannot have
+ ** EMPTY_DIR_SIZE, and a new format dir cannot have
+ ** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
+ ** regardless of disk format version, the directory is empty.
+ */
+ if (inode->i_size != EMPTY_DIR_SIZE &&
+ inode->i_size != EMPTY_DIR_SIZE_V1) {
+ return 0 ;
+ }
+ return 1 ;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+int reiserfs_rmdir (struct inode * dir, struct dentry *dentry)
+{
+ int retval;
+ struct inode * inode;
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+ INITIALIZE_PATH (path);
+ struct reiserfs_dir_entry de;
+
+
+ journal_begin(&th, dir->i_sb, jbegin_count) ;
+ windex = push_journal_writer("reiserfs_rmdir") ;
+
+ de.de_gen_number_bit_string = 0;
+ if (reiserfs_find_entry (dir, dentry->d_name.name, dentry->d_name.len, &path, &de) == NAME_NOT_FOUND) {
+ retval = -ENOENT;
+ goto end_rmdir;
+ }
+ inode = dentry->d_inode;
+
+ if (de.de_objectid != inode->i_ino) {
+ // FIXME: compare key of an object and a key found in the
+ // entry
+ retval = -EIO;
+ goto end_rmdir;
+ }
+ if (!reiserfs_empty_dir(inode)) {
+ retval = -ENOTEMPTY;
+ goto end_rmdir;
+ }
+
+ /* cut entry from dir directory */
+ retval = reiserfs_cut_from_item (&th, &path, &(de.de_entry_key), dir,
+ NULL, /* page */
+ 0/*new file size - not used here*/);
+ if (retval < 0)
+ goto end_rmdir;
+
+ if ( inode->i_nlink != 2 && inode->i_nlink != 1 )
+ printk ("reiserfs_rmdir: empty directory has nlink != 2 (%d)\n", inode->i_nlink);
+
+ inode->i_nlink = 0;
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ reiserfs_update_sd (&th, inode);
+
+ DEC_DIR_INODE_NLINK(dir)
+ dir->i_size -= (DEH_SIZE + de.de_entrylen);
+ dir->i_blocks = ((dir->i_size + 511) >> 9);
+ reiserfs_update_sd (&th, dir);
+
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ reiserfs_check_path(&path) ;
+ return 0;
+
+ end_rmdir:
+ /* we must release path, because we did not call
+ reiserfs_cut_from_item, or reiserfs_cut_from_item does not
+ release path if operation was not complete */
+ pathrelse (&path);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return retval;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+int reiserfs_unlink (struct inode * dir, struct dentry *dentry)
+{
+ int retval;
+ struct inode * inode;
+ struct reiserfs_dir_entry de;
+ INITIALIZE_PATH (path);
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+
+ journal_begin(&th, dir->i_sb, jbegin_count) ;
+ windex = push_journal_writer("reiserfs_unlink") ;
+
+ de.de_gen_number_bit_string = 0;
+ if (reiserfs_find_entry (dir, dentry->d_name.name, dentry->d_name.len, &path, &de) == NAME_NOT_FOUND) {
+ retval = -ENOENT;
+ goto end_unlink;
+ }
+ inode = dentry->d_inode;
+
+ if (de.de_objectid != inode->i_ino) {
+ // FIXME: compare key of an object and a key found in the
+ // entry
+ retval = -EIO;
+ goto end_unlink;
+ }
+
+ if (!inode->i_nlink) {
+ printk("reiserfs_unlink: deleting nonexistent file (%s:%lu), %d\n",
+ kdevname(inode->i_dev), inode->i_ino, inode->i_nlink);
+ inode->i_nlink = 1;
+ }
+
+ retval = reiserfs_cut_from_item (&th, &path, &(de.de_entry_key), dir, NULL, 0);
+ if (retval < 0)
+ goto end_unlink;
+
+ inode->i_nlink--;
+ inode->i_ctime = CURRENT_TIME;
+ reiserfs_update_sd (&th, inode);
+
+ dir->i_size -= (de.de_entrylen + DEH_SIZE);
+ dir->i_blocks = ((dir->i_size + 511) >> 9);
+ dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ reiserfs_update_sd (&th, dir);
+
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ reiserfs_check_path(&path) ;
+ return 0;
+
+ end_unlink:
+ pathrelse (&path);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ reiserfs_check_path(&path) ;
+ return retval;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+int reiserfs_symlink (struct inode * dir, struct dentry * dentry, const char * symname)
+{
+ int retval;
+ struct inode * inode;
+ char * name;
+ int item_len;
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+
+
+ inode = get_empty_inode() ;
+ if (!inode) {
+ return -ENOMEM ;
+ }
+
+ item_len = ROUND_UP (strlen (symname));
+ if (item_len > MAX_ITEM_LEN (dir->i_sb->s_blocksize)) {
+ iput(inode) ;
+ return -ENAMETOOLONG;
+ }
+
+ name = kmalloc (item_len, GFP_BUFFER);
+ if (!name) {
+ iput(inode) ;
+ return -ENOMEM;
+ }
+ memcpy (name, symname, strlen (symname));
+ padd_item (name, item_len, strlen (symname));
+
+ journal_begin(&th, dir->i_sb, jbegin_count) ;
+ windex = push_journal_writer("reiserfs_symlink") ;
+
+ inode = reiserfs_new_inode (&th, dir, S_IFLNK | S_IRWXUGO, name, strlen (symname), dentry,
+ inode, &retval);
+ kfree (name);
+ if (inode == 0) { /* reiserfs_new_inode iputs for us */
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return retval;
+ }
+
+ inode->i_op = &page_symlink_inode_operations;
+ inode->i_mapping->a_ops = &reiserfs_address_space_operations;
+
+ // must be sure this inode is written with this transaction
+ //
+ //reiserfs_update_sd (&th, inode, READ_BLOCKS);
+
+ retval = reiserfs_add_entry (&th, dir, dentry->d_name.name, dentry->d_name.len,
+ inode, 1/*visible*/);
+ if (retval) {
+ inode->i_nlink--;
+ reiserfs_update_sd (&th, inode);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ iput (inode);
+ return retval;
+ }
+
+ d_instantiate(dentry, inode);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return 0;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+int reiserfs_link (struct dentry * old_dentry, struct inode * dir, struct dentry * dentry)
+{
+ int retval;
+ struct inode *inode = old_dentry->d_inode;
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+
+
+ if (S_ISDIR(inode->i_mode))
+ return -EPERM;
+
+ if (inode->i_nlink >= REISERFS_LINK_MAX) {
+ //FIXME: sd_nlink is 32 bit for new files
+ return -EMLINK;
+ }
+
+ journal_begin(&th, dir->i_sb, jbegin_count) ;
+ windex = push_journal_writer("reiserfs_link") ;
+
+ /* create new entry */
+ retval = reiserfs_add_entry (&th, dir, dentry->d_name.name, dentry->d_name.len,
+ inode, 1/*visible*/);
+ if (retval) {
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return retval;
+ }
+
+ inode->i_nlink++;
+ inode->i_ctime = CURRENT_TIME;
+ reiserfs_update_sd (&th, inode);
+
+ atomic_inc(&inode->i_count) ;
+ d_instantiate(dentry, inode);
+ pop_journal_writer(windex) ;
+ journal_end(&th, dir->i_sb, jbegin_count) ;
+ return 0;
+}
+
+
+// de contains information pointing to an entry which
+static int de_still_valid (const char * name, int len, struct reiserfs_dir_entry * de)
+{
+ struct reiserfs_dir_entry tmp = *de;
+
+ // recalculate pointer to name and name length
+ set_de_name_and_namelen (&tmp);
+ // FIXME: could check more
+ if (tmp.de_namelen != len || memcmp (name, de->de_name, len))
+ return 0;
+ return 1;
+}
+
+
+static int entry_points_to_object (const char * name, int len, struct reiserfs_dir_entry * de, struct inode * inode)
+{
+ if (!de_still_valid (name, len, de))
+ return 0;
+
+ if (inode) {
+ if (!de_visible (de->de_deh + de->de_entry_num))
+ reiserfs_panic (0, "vs-7042: entry_points_to_object: entry must be visible");
+ return (de->de_objectid == inode->i_ino) ? 1 : 0;
+ }
+
+ /* this must be added hidden entry */
+ if (de_visible (de->de_deh + de->de_entry_num))
+ reiserfs_panic (0, "vs-7043: entry_points_to_object: entry must be visible");
+
+ return 1;
+}
+
+
+/* sets key of objectid the entry has to point to */
+static void set_ino_in_dir_entry (struct reiserfs_dir_entry * de, struct key * key)
+{
+ de->de_deh[de->de_entry_num].deh_dir_id = key->k_dir_id;
+ de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+
+/*
+ * process, that is going to call fix_nodes/do_balance must hold only
+ * one path. If it holds 2 or more, it can get into endless waiting in
+ * get_empty_nodes or its clones
+ */
+int reiserfs_rename (struct inode * old_dir, struct dentry *old_dentry,
+ struct inode * new_dir, struct dentry *new_dentry)
+{
+ int retval;
+ INITIALIZE_PATH (old_entry_path);
+ INITIALIZE_PATH (new_entry_path);
+ INITIALIZE_PATH (dot_dot_entry_path);
+ struct item_head new_entry_ih, old_entry_ih ;
+ struct reiserfs_dir_entry old_de, new_de, dot_dot_de;
+ struct inode * old_inode, * new_inode;
+ int windex ;
+ struct reiserfs_transaction_handle th ;
+ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+
+
+ old_inode = old_dentry->d_inode;
+ new_inode = new_dentry->d_inode;
+
+ // make sure, that oldname still exists and points to an object we
+ // are going to rename
+ old_de.de_gen_number_bit_string = 0;
+ retval = reiserfs_find_entry (old_dir, old_dentry->d_name.name, old_dentry->d_name.len,
+ &old_entry_path, &old_de);
+ pathrelse (&old_entry_path);
+ if (retval != NAME_FOUND || old_de.de_objectid != old_inode->i_ino) {
+ // FIXME: IO error is possible here
+ return -ENOENT;
+ }
+
+ if (S_ISDIR(old_inode->i_mode)) {
+ // make sure, that directory being renamed has correct ".."
+ // and that its new parent directory has not too many links
+ // already
+
+ if (new_inode) {
+ if (!reiserfs_empty_dir(new_inode)) {
+ return -ENOTEMPTY;
+ }
+ }
+
+ /* directory is renamed, its parent directory will be changed,
+ ** so find ".." entry
+ */
+ dot_dot_de.de_gen_number_bit_string = 0;
+ retval = reiserfs_find_entry (old_inode, "..", 2, &dot_dot_entry_path, &dot_dot_de);
+ pathrelse (&dot_dot_entry_path);
+ if (retval != NAME_FOUND)
+ return -EIO;
+
+ /* inode number of .. must equal old_dir->i_ino */
+ if (dot_dot_de.de_objectid != old_dir->i_ino)
+ return -EIO;
+ }
+
+ journal_begin(&th, old_dir->i_sb, jbegin_count) ;
+ windex = push_journal_writer("reiserfs_rename") ;
+
+ /* add new entry (or find the existing one) */
+ retval = reiserfs_add_entry (&th, new_dir, new_dentry->d_name.name, new_dentry->d_name.len,
+ old_inode, 0);
+ if (retval == -EEXIST) {
+ // FIXME: is it possible, that new_inode == 0 here? If yes, it
+ // is not clear how does ext2 handle that
+ if (!new_inode) {
+ printk ("reiserfs_rename: new entry is found, new inode == 0\n");
+ BUG ();
+ }
+ } else if (retval) {
+ pop_journal_writer(windex) ;
+ journal_end(&th, old_dir->i_sb, jbegin_count) ;
+ return retval;
+ }
+
+
+ while (1) {
+ // look for old name using corresponding entry key (found by reiserfs_find_entry)
+ if (search_by_entry_key (new_dir->i_sb, &old_de.de_entry_key, &old_entry_path, &old_de) != NAME_FOUND)
+ BUG ();
+
+ copy_item_head(&old_entry_ih, get_ih(&old_entry_path)) ;
+
+ // look for new name by reiserfs_find_entry
+ new_de.de_gen_number_bit_string = 0;
+ retval = reiserfs_find_entry (new_dir, new_dentry->d_name.name, new_dentry->d_name.len,
+ &new_entry_path, &new_de);
+ if (retval != NAME_FOUND_INVISIBLE && retval != NAME_FOUND)
+ BUG ();
+
+ copy_item_head(&new_entry_ih, get_ih(&new_entry_path)) ;
+
+ reiserfs_prepare_for_journal(old_inode->i_sb, new_de.de_bh, 1) ;
+
+ if (S_ISDIR(old_inode->i_mode)) {
+ if (search_by_entry_key (new_dir->i_sb, &dot_dot_de.de_entry_key, &dot_dot_entry_path, &dot_dot_de) != NAME_FOUND)
+ BUG ();
+ // node containing ".." gets into transaction
+ reiserfs_prepare_for_journal(old_inode->i_sb, dot_dot_de.de_bh, 1) ;
+ }
+ /* we should check seals here, not do
+ this stuff, yes? Then, having
+ gathered everything into RAM we
+ should lock the buffers, yes? -Hans */
+ /* probably. our rename needs to hold more
+ ** than one path at once. The seals would
+ ** have to be written to deal with multi-path
+ ** issues -chris
+ */
+ /* sanity checking before doing the rename - avoid races many
+ ** of the above checks could have scheduled. We have to be
+ ** sure our items haven't been shifted by another process.
+ */
+ if (!entry_points_to_object(new_dentry->d_name.name,
+ new_dentry->d_name.len,
+ &new_de, new_inode) ||
+ item_moved(&new_entry_ih, &new_entry_path) ||
+ item_moved(&old_entry_ih, &old_entry_path) ||
+ !entry_points_to_object (old_dentry->d_name.name,
+ old_dentry->d_name.len,
+ &old_de, old_inode)) {
+ reiserfs_restore_prepared_buffer (old_inode->i_sb, new_de.de_bh);
+ if (S_ISDIR(old_inode->i_mode))
+ reiserfs_restore_prepared_buffer (old_inode->i_sb, dot_dot_de.de_bh);
+#if 0
+ // FIXME: do we need this? shouldn't we simply continue?
+ run_task_queue(&tq_disk);
+ current->policy |= SCHED_YIELD;
+ /*current->counter = 0;*/
+ schedule();
+#endif
+ continue;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (S_ISDIR(old_inode->i_mode) &&
+ (!entry_points_to_object ("..", 2, &dot_dot_de, old_dir) ||
+ !reiserfs_buffer_prepared(dot_dot_de.de_bh))) {
+ // this should be not changed
+ BUG ();
+ }
+#endif
+
+ break;
+ }
+
+ /* ok, all the changes can be done in one fell swoop when we
+ have claimed all the buffers needed.*/
+
+ mark_de_visible (new_de.de_deh + new_de.de_entry_num);
+ set_ino_in_dir_entry (&new_de, INODE_PKEY (old_inode));
+ journal_mark_dirty (&th, old_dir->i_sb, new_de.de_bh);
+
+ mark_de_hidden (old_de.de_deh + old_de.de_entry_num);
+ old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
+ new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME;
+
+ if (new_inode) {
+ // adjust link number of the victim
+ if (S_ISDIR(new_inode->i_mode)) {
+ DEC_DIR_INODE_NLINK(new_inode)
+ } else {
+ new_inode->i_nlink--;
+ }
+ new_inode->i_ctime = CURRENT_TIME;
+ }
+
+ if (S_ISDIR(old_inode->i_mode)) {
+ //if (dot_dot_de.de_bh) {
+ // adjust ".." of renamed directory
+ set_ino_in_dir_entry (&dot_dot_de, INODE_PKEY (new_dir));
+ journal_mark_dirty (&th, new_dir->i_sb, dot_dot_de.de_bh);
+
+ DEC_DIR_INODE_NLINK(old_dir)
+ if (new_inode) {
+ if (S_ISDIR(new_inode->i_mode)) {
+ DEC_DIR_INODE_NLINK(new_inode)
+ } else {
+ new_inode->i_nlink--;
+ }
+ } else {
+ INC_DIR_INODE_NLINK(new_dir)
+ }
+ }
+
+ // looks like in 2.3.99pre3 brelse is atomic. so we can use pathrelse
+ pathrelse (&new_entry_path);
+ pathrelse (&dot_dot_entry_path);
+
+ // FIXME: this reiserfs_cut_from_item's return value may screw up
+ // anybody, but it will panic if will not be able to find the
+ // entry. This needs one more clean up
+ if (reiserfs_cut_from_item (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL, 0) < 0)
+ reiserfs_warning ("vs-: reiserfs_rename: coudl not cut old name. Fsck later?\n");
+
+ old_dir->i_size -= DEH_SIZE + old_de.de_entrylen;
+ old_dir->i_blocks = ((old_dir->i_size + 511) >> 9);
+
+ reiserfs_update_sd (&th, old_dir);
+ reiserfs_update_sd (&th, new_dir);
+ if (new_inode)
+ reiserfs_update_sd (&th, new_inode);
+
+ pop_journal_writer(windex) ;
+ journal_end(&th, old_dir->i_sb, jbegin_count) ;
+ return 0;
+}
+
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
new file mode 100644
index 000000000..0bcbd4559
--- /dev/null
+++ b/fs/reiserfs/objectid.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/locks.h>
+#include <linux/sched.h>
+#include <linux/reiserfs_fs.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+// find where objectid map starts
+#define objectid_map(s,rs) (old_format_only (s) ? \
+ (__u32 *)((struct reiserfs_super_block_v1 *)rs + 1) :\
+ (__u32 *)(rs + 1))
+
+
+#ifdef CONFIG_REISERFS_CHECK
+
+static void check_objectid_map (struct super_block * s, __u32 * map)
+{
+ if (le32_to_cpu (map[0]) != 1)
+ reiserfs_panic (s, "vs-15010: check_objectid_map: map corrupted");
+
+ // FIXME: add something else here
+}
+
+#endif
+
+
+/* When we allocate objectids we allocate the first unused objectid.
+ Each sequence of objectids in use (the odd sequences) is followed
+ by a sequence of objectids not in use (the even sequences). We
+ only need to record the last objectid in each of these sequences
+ (both the odd and even sequences) in order to fully define the
+ boundaries of the sequences. A consequence of allocating the first
+ objectid not in use is that under most conditions this scheme is
+ extremely compact. The exception is immediately after a sequence
+ of operations which deletes a large number of objects of
+ non-sequential objectids, and even then it will become compact
+ again as soon as more objects are created. Note that many
+ interesting optimizations of layout could result from complicating
+ objectid assignment, but we have deferred making them for now. */
+
+
+/* get unique object identifier */
+__u32 reiserfs_get_unused_objectid (struct reiserfs_transaction_handle *th)
+{
+ struct super_block * s = th->t_super;
+ struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
+ __u32 * map = objectid_map (s, rs);
+ __u32 unused_objectid;
+
+
+#ifdef CONFIG_REISERFS_CHECK
+ check_objectid_map (s, map);
+#endif
+
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+ /* comment needed -Hans */
+ unused_objectid = le32_to_cpu (map[1]);
+ if (unused_objectid == U32_MAX) {
+ printk ("REISERFS: get_objectid: no more object ids\n");
+ reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s)) ;
+ return 0;
+ }
+
+ /* This incrementation allocates the first unused objectid. That
+ is to say, the first entry on the objectid map is the first
+ unused objectid, and by incrementing it we use it. See below
+ where we check to see if we eliminated a sequence of unused
+ objectids.... */
+ map[1] = cpu_to_le32 (unused_objectid + 1);
+
+ /* Now we check to see if we eliminated the last remaining member of
+ the first even sequence (and can eliminate the sequence by
+ eliminating its last objectid from oids), and can collapse the
+ first two odd sequences into one sequence. If so, then the net
+ result is to eliminate a pair of objectids from oids. We do this
+ by shifting the entire map to the left. */
+ if (le16_to_cpu (rs->s_oid_cursize) > 2 && map[1] == map[2]) {
+ memmove (map + 1, map + 3, (le16_to_cpu (rs->s_oid_cursize) - 3) * sizeof(__u32));
+ //rs->s_oid_cursize -= 2;
+ rs->s_oid_cursize = cpu_to_le16 (le16_to_cpu (rs->s_oid_cursize) - 2);
+ }
+
+ journal_mark_dirty(th, s, SB_BUFFER_WITH_SB (s));
+ s->s_dirt = 1;
+ return unused_objectid;
+}
+
+
+/* makes object identifier unused */
+void reiserfs_release_objectid (struct reiserfs_transaction_handle *th,
+ __u32 objectid_to_release)
+{
+ struct super_block * s = th->t_super;
+ struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
+ __u32 * map = objectid_map (s, rs);
+ int i = 0;
+
+ //return;
+#ifdef CONFIG_REISERFS_CHECK
+ check_objectid_map (s, map);
+#endif
+
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+ journal_mark_dirty(th, s, SB_BUFFER_WITH_SB (s));
+ s->s_dirt = 1;
+
+
+ /* start at the beginning of the objectid map (i = 0) and go to
+ the end of it (i = disk_sb->s_oid_cursize). Linear search is
+ what we use, though it is possible that binary search would be
+ more efficient after performing lots of deletions (which is
+ when oids is large.) We only check even i's. */
+ while (i < le16_to_cpu (rs->s_oid_cursize)) {
+ if (objectid_to_release == le32_to_cpu (map[i])) {
+ /* This incrementation unallocates the objectid. */
+ //map[i]++;
+ map[i] = cpu_to_le32 (le32_to_cpu (map[i]) + 1);
+
+ /* Did we unallocate the last member of an odd sequence, and can shrink oids? */
+ if (map[i] == map[i+1]) {
+ /* shrink objectid map */
+ memmove (map + i, map + i + 2,
+ (le16_to_cpu (rs->s_oid_cursize) - i - 2) * sizeof (__u32));
+ //disk_sb->s_oid_cursize -= 2;
+ rs->s_oid_cursize = cpu_to_le16 (le16_to_cpu (rs->s_oid_cursize) - 2);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (le16_to_cpu (rs->s_oid_cursize) < 2 ||
+ le16_to_cpu (rs->s_oid_cursize) > le16_to_cpu (rs->s_oid_maxsize))
+ reiserfs_panic (s, "vs-15005: reiserfs_release_objectid: "
+ "objectid map corrupted cur_size == %d (max == %d)",
+ le16_to_cpu (rs->s_oid_cursize), le16_to_cpu (rs->s_oid_maxsize));
+#endif
+ }
+ return;
+ }
+
+ if (objectid_to_release > le32_to_cpu (map[i]) &&
+ objectid_to_release < le32_to_cpu (map[i + 1])) {
+ /* size of objectid map is not changed */
+ if (objectid_to_release + 1 == le32_to_cpu (map[i + 1])) {
+ //objectid_map[i+1]--;
+ map[i + 1] = cpu_to_le32 (le32_to_cpu (map[i + 1]) - 1);
+ return;
+ }
+
+ if (rs->s_oid_cursize == rs->s_oid_maxsize)
+ /* objectid map must be expanded, but there is no space */
+ return;
+
+ /* expand the objectid map*/
+ memmove (map + i + 3, map + i + 1,
+ (le16_to_cpu (rs->s_oid_cursize) - i - 1) * sizeof(__u32));
+ map[i + 1] = cpu_to_le32 (objectid_to_release);
+ map[i + 2] = cpu_to_le32 (objectid_to_release + 1);
+ rs->s_oid_cursize = cpu_to_le16 (le16_to_cpu (rs->s_oid_cursize) + 2);
+ return;
+ }
+ i += 2;
+ }
+
+ reiserfs_warning ("vs-15010: reiserfs_release_objectid: tried to free free object id (%lu)",
+ objectid_to_release);
+}
+
+
+int reiserfs_convert_objectid_map_v1(struct super_block *s) {
+ struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK (s);
+ int cur_size = le16_to_cpu(disk_sb->s_oid_cursize) ;
+ int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2 ;
+ int old_max = le16_to_cpu(disk_sb->s_oid_maxsize) ;
+ struct reiserfs_super_block_v1 *disk_sb_v1 ;
+ __u32 *objectid_map, *new_objectid_map ;
+ int i ;
+
+ disk_sb_v1=(struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
+ objectid_map = (__u32 *)(disk_sb_v1 + 1) ;
+ new_objectid_map = (__u32 *)(disk_sb + 1) ;
+
+ if (cur_size > new_size) {
+ /* mark everyone used that was listed as free at the end of the objectid
+ ** map
+ */
+ objectid_map[new_size - 1] = objectid_map[cur_size - 1] ;
+ disk_sb->s_oid_cursize = cpu_to_le16(new_size) ;
+ }
+ /* move the smaller objectid map past the end of the new super */
+ for (i = new_size - 1 ; i >= 0 ; i--) {
+ objectid_map[i + (old_max - new_size)] = objectid_map[i] ;
+ }
+
+
+ /* set the max size so we don't overflow later */
+ disk_sb->s_oid_maxsize = cpu_to_le16(new_size) ;
+
+ /* finally, zero out the unused chunk of the new super */
+ memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused)) ;
+ return 0 ;
+}
+
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
new file mode 100644
index 000000000..40cabc500
--- /dev/null
+++ b/fs/reiserfs/prints.c
@@ -0,0 +1,881 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/string.h>
+
+#else
+
+#include "nokernel.h"
+#include <limits.h>
+
+#endif
+
+#include <stdarg.h>
+
+static char error_buf[1024];
+static char fmt_buf[1024];
+static char off_buf[80];
+
+
+static char * cpu_offset (struct cpu_key * key)
+{
+ if (cpu_key_k_type(key) == TYPE_DIRENTRY)
+ sprintf (off_buf, "%Lu(%Lu)",
+ (unsigned long long)GET_HASH_VALUE (cpu_key_k_offset (key)),
+ (unsigned long long)GET_GENERATION_NUMBER (cpu_key_k_offset (key)));
+ else
+ sprintf (off_buf, "0x%Lx", (unsigned long long)cpu_key_k_offset (key));
+ return off_buf;
+}
+
+
+static char * le_offset (struct key * key)
+{
+ int version;
+
+ version = le_key_version (key);
+ if (le_key_k_type (version, key) == TYPE_DIRENTRY)
+ sprintf (off_buf, "%Lu(%Lu)",
+ (unsigned long long)GET_HASH_VALUE (le_key_k_offset (version, key)),
+ (unsigned long long)GET_GENERATION_NUMBER (le_key_k_offset (version, key)));
+ else
+ sprintf (off_buf, "0x%Lx", (unsigned long long)le_key_k_offset (version, key));
+ return off_buf;
+}
+
+
+static char * cpu_type (struct cpu_key * key)
+{
+ if (cpu_key_k_type (key) == TYPE_STAT_DATA)
+ return "SD";
+ if (cpu_key_k_type (key) == TYPE_DIRENTRY)
+ return "DIR";
+ if (cpu_key_k_type (key) == TYPE_DIRECT)
+ return "DIRECT";
+ if (cpu_key_k_type (key) == TYPE_INDIRECT)
+ return "IND";
+ return "UNKNOWN";
+}
+
+
+static char * le_type (struct key * key)
+{
+ int version;
+
+ version = le_key_version (key);
+
+ if (le_key_k_type (version, key) == TYPE_STAT_DATA)
+ return "SD";
+ if (le_key_k_type (version, key) == TYPE_DIRENTRY)
+ return "DIR";
+ if (le_key_k_type (version, key) == TYPE_DIRECT)
+ return "DIRECT";
+ if (le_key_k_type (version, key) == TYPE_INDIRECT)
+ return "IND";
+ return "UNKNOWN";
+}
+
+
+/* %k */
+static void sprintf_le_key (char * buf, struct key * key)
+{
+ if (key)
+ sprintf (buf, "[%d %d %s %s]", le32_to_cpu (key->k_dir_id),
+ le32_to_cpu (key->k_objectid), le_offset (key), le_type (key));
+ else
+ sprintf (buf, "[NULL]");
+}
+
+
+/* %K */
+static void sprintf_cpu_key (char * buf, struct cpu_key * key)
+{
+ if (key)
+ sprintf (buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
+ key->on_disk_key.k_objectid, cpu_offset (key), cpu_type (key));
+ else
+ sprintf (buf, "[NULL]");
+}
+
+
+static void sprintf_item_head (char * buf, struct item_head * ih)
+{
+ if (ih) {
+ sprintf (buf, "%s", (ih_version (ih) == ITEM_VERSION_2) ? "*NEW* " : "*OLD*");
+ sprintf_le_key (buf + strlen (buf), &(ih->ih_key));
+ sprintf (buf + strlen (buf), ", item_len %d, item_location %d, "
+ "free_space(entry_count) %d",
+ ih->ih_item_len, ih->ih_item_location, ih_free_space (ih));
+ } else
+ sprintf (buf, "[NULL]");
+}
+
+
+static void sprintf_direntry (char * buf, struct reiserfs_dir_entry * de)
+{
+ char name[20];
+
+ memcpy (name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
+ name [de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
+ sprintf (buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
+}
+
+
+static void sprintf_block_head (char * buf, struct buffer_head * bh)
+{
+ sprintf (buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
+ B_LEVEL (bh), B_NR_ITEMS (bh), B_FREE_SPACE (bh));
+#if 0
+ if (B_LEVEL (bh) == DISK_LEAF_NODE_LEVEL)
+ sprintf_le_key (buf + strlen (buf), B_PRIGHT_DELIM_KEY (bh));
+#endif
+}
+
+
+static void sprintf_buffer_head (char * buf, struct buffer_head * bh)
+{
+ sprintf (buf, "dev %s, size %d, blocknr %ld, count %d, list %d, state 0x%lx, page %p, (%s, %s, %s)",
+ kdevname (bh->b_dev), bh->b_size, bh->b_blocknr, atomic_read (&(bh->b_count)), bh->b_list,
+ bh->b_state, bh->b_page,
+ buffer_uptodate (bh) ? "UPTODATE" : "!UPTODATE",
+ buffer_dirty (bh) ? "DIRTY" : "CLEAN",
+ buffer_locked (bh) ? "LOCKED" : "UNLOCKED");
+}
+
+
+static void sprintf_disk_child (char * buf, struct disk_child * dc)
+{
+ sprintf (buf, "[dc_number=%d, dc_size=%u]", dc->dc_block_number, dc->dc_size);
+}
+
+
+static char * is_there_reiserfs_struct (char * fmt, int * what, int * skip)
+{
+ char * k = fmt;
+
+ *skip = 0;
+
+ while (1) {
+ k = strstr (k, "%");
+ if (!k)
+ break;
+ if (k && (k[1] == 'k' || k[1] == 'K' || k[1] == 'h' || k[1] == 't' ||
+ k[1] == 'z' || k[1] == 'b' || k[1] == 'y')) {
+ *what = k[1];
+ break;
+ }
+ (*skip) ++;
+ k ++;
+ }
+ return k;
+}
+
+
+/* debugging reiserfs we used to print out a lot of different
+ variables, like keys, item headers, buffer heads etc. Values of
+ most fields matter. So it took a long time just to write
+ appropriative printk. With this reiserfs_warning you can use format
+ specification for complex structures like you used to do with
+ printfs for integers, doubles and pointers. For instance, to print
+ out key structure you have to write just:
+ reiserfs_warning ("bad key %k", key);
+ instead of
+ printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
+ key->k_offset, key->k_uniqueness);
+*/
+
+#define do_reiserfs_warning \
+{\
+ char * fmt1 = fmt_buf;\
+ va_list args;\
+ int i, j;\
+ char * k;\
+ char * p = error_buf;\
+ int what, skip;\
+\
+ strcpy (fmt1, fmt);\
+ va_start(args, fmt);\
+\
+ while (1) {\
+ k = is_there_reiserfs_struct (fmt1, &what, &skip);\
+ if (k != 0) {\
+ *k = 0;\
+ p += vsprintf (p, fmt1, args);\
+\
+ for (i = 0; i < skip; i ++)\
+ j = va_arg (args, int);\
+\
+ switch (what) {\
+ case 'k':\
+ sprintf_le_key (p, va_arg(args, struct key *));\
+ break;\
+ case 'K':\
+ sprintf_cpu_key (p, va_arg(args, struct cpu_key *));\
+ break;\
+ case 'h':\
+ sprintf_item_head (p, va_arg(args, struct item_head *));\
+ break;\
+ case 't':\
+ sprintf_direntry (p, va_arg(args, struct reiserfs_dir_entry *));\
+ break;\
+ case 'y':\
+ sprintf_disk_child (p, va_arg(args, struct disk_child *));\
+ break;\
+ case 'z':\
+ sprintf_block_head (p, va_arg(args, struct buffer_head *));\
+ break;\
+ case 'b':\
+ sprintf_buffer_head (p, va_arg(args, struct buffer_head *));\
+ break;\
+ }\
+ p += strlen (p);\
+ fmt1 = k + 2;\
+ } else {\
+ i = vsprintf (p, fmt1, args);\
+ break;\
+ }\
+ }\
+\
+ va_end(args);\
+}
+
+
+/* in addition to usual conversion specifiers this accepts reiserfs
+ specific conversion specifiers:
+ %k to print little endian key,
+ %K to print cpu key,
+ %h to print item_head,
+ %t to print directory entry
+ %z to print block head (arg must be struct buffer_head *
+ %b to print buffer_head
+*/
+void reiserfs_warning (const char * fmt, ...)
+{
+ do_reiserfs_warning;
+ /* console_print (error_buf); */
+ printk ("%s", error_buf);
+}
+
+void reiserfs_debug (struct super_block *s, int level, const char * fmt, ...)
+{
+#ifdef CONFIG_REISERFS_CHECK
+ do_reiserfs_warning;
+ printk ("%s", error_buf);
+#else
+ ;
+#endif
+}
+
+/* The format:
+
+ maintainer-errorid: [function-name:] message
+
+ where errorid is unique to the maintainer and function-name is
+ optional, is recommended, so that anyone can easily find the bug
+ with a simple grep for the short to type string
+ maintainer-errorid. Don't bother with reusing errorids, there are
+ lots of numbers out there.
+
+ Example:
+
+ reiserfs_panic(
+ p_sb, "reiser-29: reiserfs_new_blocknrs: "
+ "one of search_start or rn(%d) is equal to MAX_B_NUM,"
+ "which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
+ rn, bh
+ );
+
+ Regular panic()s sometimes clear the screen before the message can
+ be read, thus the need for the while loop.
+
+ Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
+ pointless complexity):
+
+ panics in reiserfs_fs.h have numbers from 1000 to 1999
+ super.c 2000 to 2999
+ preserve.c 3000 to 3999
+ bitmap.c 4000 to 4999
+ stree.c 5000 to 5999
+ prints.c 6000 to 6999
+ namei.c 7000 to 7999
+ fix_nodes.c 8000 to 8999
+ dir.c 9000 to 9999
+ lbalance.c 10000 to 10999
+ ibalance.c 11000 to 11999 not ready
+ do_balan.c 12000 to 12999
+ inode.c 13000 to 13999
+ file.c 14000 to 14999
+ objectid.c 15000 - 15999
+ buffer.c 16000 - 16999
+ symlink.c 17000 - 17999
+
+ . */
+
+
+#ifdef CONFIG_REISERFS_CHECK
+extern struct tree_balance * cur_tb;
+#endif
+
+void reiserfs_panic (struct super_block * sb, const char * fmt, ...)
+{
+#ifdef __KERNEL__
+ show_reiserfs_locks() ;
+#endif
+ do_reiserfs_warning;
+ printk ("%s", error_buf);
+ BUG ();
+ // console_print (error_buf);
+ // for (;;);
+
+#ifdef __KERNEL__
+
+ /* comment before release */
+ //for (;;);
+
+#if 0 /* this is not needed, the state is ignored */
+ if (sb && !(sb->s_flags & MS_RDONLY)) {
+ sb->u.reiserfs_sb.s_mount_state |= REISERFS_ERROR_FS;
+ sb->u.reiserfs_sb.s_rs->s_state = REISERFS_ERROR_FS;
+
+ mark_buffer_dirty(sb->u.reiserfs_sb.s_sbh) ;
+ sb->s_dirt = 1;
+ }
+#endif
+
+ /* this is to prevent panic from syncing this filesystem */
+ if (sb && sb->s_lock)
+ sb->s_lock=0;
+ if (sb)
+ sb->s_flags |= MS_RDONLY;
+
+ panic ("REISERFS: panic (device %s): %s\n",
+ sb ? kdevname(sb->s_dev) : "sb == 0", error_buf);
+#else
+ exit (0);
+#endif
+}
+
+
+void print_virtual_node (struct virtual_node * vn)
+{
+ int i;
+ struct virtual_item * vi;
+
+ printk ("VIRTUAL NODE CONTAINS %d items, has size %d,%s,%s, ITEM_POS=%d POS_IN_ITEM=%d MODE=\'%c\'\n",
+ vn->vn_nr_item, vn->vn_size,
+ (vn->vn_vi[0].vi_type & VI_TYPE_LEFT_MERGEABLE )? "left mergeable" : "",
+ (vn->vn_vi[vn->vn_nr_item - 1].vi_type & VI_TYPE_RIGHT_MERGEABLE) ? "right mergeable" : "",
+ vn->vn_affected_item_num, vn->vn_pos_in_item, vn->vn_mode);
+
+ vi = vn->vn_vi;
+ for (i = 0; i < vn->vn_nr_item; i ++, vi ++)
+ op_print_vi (vi);
+
+}
+
+
+void print_path (struct tree_balance * tb, struct path * path)
+{
+ int h = 0;
+ struct buffer_head * bh;
+
+ if (tb) {
+ while (tb->insert_size[h]) {
+ bh = PATH_H_PBUFFER (path, h);
+ printk ("block %lu (level=%d), position %d\n", bh ? bh->b_blocknr : 0,
+ bh ? B_LEVEL (bh) : 0, PATH_H_POSITION (path, h));
+ h ++;
+ }
+ } else {
+ int offset = path->path_length;
+ struct buffer_head * bh;
+ printk ("Offset Bh (b_blocknr, b_count) Position Nr_item\n");
+ while ( offset > ILLEGAL_PATH_ELEMENT_OFFSET ) {
+ bh = PATH_OFFSET_PBUFFER (path, offset);
+ printk ("%6d %10p (%9lu, %7d) %8d %7d\n", offset,
+ bh, bh ? bh->b_blocknr : 0, bh ? atomic_read (&(bh->b_count)) : 0,
+ PATH_OFFSET_POSITION (path, offset), bh ? B_NR_ITEMS (bh) : -1);
+
+ offset --;
+ }
+ }
+
+}
+
+
+/* this prints internal nodes (4 keys/items in line) (dc_number,
+ dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
+ dc_size)...*/
+static int print_internal (struct buffer_head * bh, int first, int last)
+{
+ struct key * key;
+ struct disk_child * dc;
+ int i;
+ int from, to;
+
+ if (!B_IS_KEYS_LEVEL (bh))
+ return 1;
+
+ check_internal (bh);
+
+ if (first == -1) {
+ from = 0;
+ to = B_NR_ITEMS (bh);
+ } else {
+ from = first;
+ to = last < B_NR_ITEMS (bh) ? last : B_NR_ITEMS (bh);
+ }
+
+ reiserfs_warning ("INTERNAL NODE (%ld) contains %z\n", bh->b_blocknr, bh);
+
+ dc = B_N_CHILD (bh, from);
+ reiserfs_warning ("PTR %d: %y ", from, dc);
+
+ for (i = from, key = B_N_PDELIM_KEY (bh, from), dc ++; i < to; i ++, key ++, dc ++) {
+ reiserfs_warning ("KEY %d: %k PTR %d: %y ", i, key, i + 1, dc);
+ if (i && i % 4 == 0)
+ printk ("\n");
+ }
+ printk ("\n");
+ return 0;
+}
+
+
+
+
+
+static int print_leaf (struct buffer_head * bh, int print_mode, int first, int last)
+{
+ struct block_head * blkh;
+ struct item_head * ih;
+ int i;
+ int from, to;
+
+ if (!B_IS_ITEMS_LEVEL (bh))
+ return 1;
+
+ check_leaf (bh);
+
+ blkh = B_BLK_HEAD (bh);
+ ih = B_N_PITEM_HEAD (bh,0);
+
+ printk ("\n===================================================================\n");
+ reiserfs_warning ("LEAF NODE (%ld) contains %z\n", bh->b_blocknr, bh);
+
+ if (!(print_mode & PRINT_LEAF_ITEMS)) {
+ reiserfs_warning ("FIRST ITEM_KEY: %k, LAST ITEM KEY: %k\n",
+ &(ih->ih_key), &((ih + le16_to_cpu (blkh->blk_nr_item) - 1)->ih_key));
+ return 0;
+ }
+
+ if (first < 0 || first > le16_to_cpu (blkh->blk_nr_item) - 1)
+ from = 0;
+ else
+ from = first;
+
+ if (last < 0 || last > le16_to_cpu (blkh->blk_nr_item))
+ to = le16_to_cpu (blkh->blk_nr_item);
+ else
+ to = last;
+
+ ih += from;
+ printk ("-------------------------------------------------------------------------------\n");
+ printk ("|##| type | key | ilen | free_space | version | loc |\n");
+ for (i = from; i < to; i++, ih ++) {
+ printk ("-------------------------------------------------------------------------------\n");
+ reiserfs_warning ("|%2d| %h |\n", i, ih);
+ if (print_mode & PRINT_LEAF_ITEMS)
+ op_print_item (ih, B_I_PITEM (bh, ih));
+ }
+
+ printk ("===================================================================\n");
+
+ return 0;
+}
+
+static char * reiserfs_version (char * buf)
+{
+ __u16 * pversion;
+
+ pversion = (__u16 *)(buf) + 36;
+ if (*pversion == 0)
+ return "0";
+ if (*pversion == 2)
+ return "2";
+ return "Unknown";
+}
+
+
+/* return 1 if this is not super block */
+static int print_super_block (struct buffer_head * bh)
+{
+ struct reiserfs_super_block * rs = (struct reiserfs_super_block *)(bh->b_data);
+ int skipped, data_blocks;
+
+
+ if (strncmp (rs->s_magic, REISERFS_SUPER_MAGIC_STRING, strlen ( REISERFS_SUPER_MAGIC_STRING)) &&
+ strncmp (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING, strlen ( REISER2FS_SUPER_MAGIC_STRING)))
+ return 1;
+
+ printk ("%s\'s super block in block %ld\n======================\n", kdevname (bh->b_dev), bh->b_blocknr);
+ printk ("Reiserfs version %s\n", reiserfs_version (bh->b_data));
+ printk ("Block count %u\n", le32_to_cpu (rs->s_block_count));
+ printk ("Blocksize %d\n", le16_to_cpu (rs->s_blocksize));
+ printk ("Free blocks %u\n", le32_to_cpu (rs->s_free_blocks));
+ skipped = bh->b_blocknr; // FIXME: this would be confusing if
+ // someone stores reiserfs super block in some data block ;)
+ data_blocks = le32_to_cpu (rs->s_block_count) - skipped - 1 -
+ le16_to_cpu (rs->s_bmap_nr) - (le32_to_cpu (rs->s_orig_journal_size) + 1) -
+ le32_to_cpu (rs->s_free_blocks);
+ printk ("Busy blocks (skipped %d, bitmaps - %d, journal blocks - %d\n"
+ "1 super blocks, %d data blocks\n",
+ skipped, le16_to_cpu (rs->s_bmap_nr),
+ (le32_to_cpu (rs->s_orig_journal_size) + 1), data_blocks);
+ printk ("Root block %u\n", le32_to_cpu (rs->s_root_block));
+ printk ("Journal block (first) %d\n", le32_to_cpu (rs->s_journal_block));
+ printk ("Journal dev %d\n", le32_to_cpu (rs->s_journal_dev));
+ printk ("Journal orig size %d\n", le32_to_cpu (rs->s_orig_journal_size));
+ printk ("Filesystem state %s\n",
+ (le16_to_cpu (rs->s_state) == REISERFS_VALID_FS) ? "VALID" : "ERROR");
+ printk ("Hash function \"%s\"\n", le16_to_cpu (rs->s_hash_function_code) == TEA_HASH ? "tea" :
+ ((le16_to_cpu (rs->s_hash_function_code) == YURA_HASH) ? "rupasov" : "unknown"));
+
+#if 0
+ __u32 s_journal_trans_max ; /* max number of blocks in a transaction. */
+ __u32 s_journal_block_count ; /* total size of the journal. can change over time */
+ __u32 s_journal_max_batch ; /* max number of blocks to batch into a trans */
+ __u32 s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
+ __u32 s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
+#endif
+ printk ("Tree height %d\n", rs->s_tree_height);
+ return 0;
+}
+
+
+static int print_desc_block (struct buffer_head * bh)
+{
+ struct reiserfs_journal_desc * desc;
+
+ desc = (struct reiserfs_journal_desc *)(bh->b_data);
+ if (memcmp(desc->j_magic, JOURNAL_DESC_MAGIC, 8))
+ return 1;
+
+ printk ("Desc block %lu (j_trans_id %d, j_mount_id %d, j_len %d)",
+ bh->b_blocknr, desc->j_trans_id, desc->j_mount_id, desc->j_len);
+
+ return 0;
+}
+
+
+void print_block (struct buffer_head * bh, ...)//int print_mode, int first, int last)
+{
+ va_list args;
+ int mode, first, last;
+
+ va_start (args, bh);
+
+ if ( ! bh ) {
+ printk("print_block: buffer is NULL\n");
+ return;
+ }
+
+ mode = va_arg (args, int);
+ first = va_arg (args, int);
+ last = va_arg (args, int);
+ if (print_leaf (bh, mode, first, last))
+ if (print_internal (bh, first, last))
+ if (print_super_block (bh))
+ if (print_desc_block (bh))
+ printk ("Block %ld contains unformatted data\n", bh->b_blocknr);
+}
+
+
+
+char print_tb_buf[2048];
+
+/* this stores initial state of tree balance in the print_tb_buf */
+void store_print_tb (struct tree_balance * tb)
+{
+ int h = 0;
+ int i;
+ struct buffer_head * tbSh, * tbFh;
+
+ if (!tb)
+ return;
+
+ sprintf (print_tb_buf, "\n"
+ "BALANCING %d\n"
+ "MODE=%c, ITEM_POS=%d POS_IN_ITEM=%d\n"
+ "=====================================================================\n"
+ "* h * S * L * R * F * FL * FR * CFL * CFR *\n",
+ tb->tb_sb->u.reiserfs_sb.s_do_balance,
+ tb->tb_mode, PATH_LAST_POSITION (tb->tb_path), tb->tb_path->pos_in_item);
+
+ for (h = 0; h < sizeof(tb->insert_size) / sizeof (tb->insert_size[0]); h ++) {
+ if (PATH_H_PATH_OFFSET (tb->tb_path, h) <= tb->tb_path->path_length &&
+ PATH_H_PATH_OFFSET (tb->tb_path, h) > ILLEGAL_PATH_ELEMENT_OFFSET) {
+ tbSh = PATH_H_PBUFFER (tb->tb_path, h);
+ tbFh = PATH_H_PPARENT (tb->tb_path, h);
+ } else {
+ tbSh = 0;
+ tbFh = 0;
+ }
+ sprintf (print_tb_buf + strlen (print_tb_buf),
+ "* %d * %3ld(%2d) * %3ld(%2d) * %3ld(%2d) * %5ld * %5ld * %5ld * %5ld * %5ld *\n",
+ h,
+ (tbSh) ? (tbSh->b_blocknr):(-1),
+ (tbSh) ? atomic_read (&(tbSh->b_count)) : -1,
+ (tb->L[h]) ? (tb->L[h]->b_blocknr):(-1),
+ (tb->L[h]) ? atomic_read (&(tb->L[h]->b_count)) : -1,
+ (tb->R[h]) ? (tb->R[h]->b_blocknr):(-1),
+ (tb->R[h]) ? atomic_read (&(tb->R[h]->b_count)) : -1,
+ (tbFh) ? (tbFh->b_blocknr):(-1),
+ (tb->FL[h]) ? (tb->FL[h]->b_blocknr):(-1),
+ (tb->FR[h]) ? (tb->FR[h]->b_blocknr):(-1),
+ (tb->CFL[h]) ? (tb->CFL[h]->b_blocknr):(-1),
+ (tb->CFR[h]) ? (tb->CFR[h]->b_blocknr):(-1));
+ }
+
+ sprintf (print_tb_buf + strlen (print_tb_buf),
+ "=====================================================================\n"
+ "* h * size * ln * lb * rn * rb * blkn * s0 * s1 * s1b * s2 * s2b * curb * lk * rk *\n"
+ "* 0 * %4d * %2d * %2d * %2d * %2d * %4d * %2d * %2d * %3d * %2d * %3d * %4d * %2d * %2d *\n",
+ tb->insert_size[0], tb->lnum[0], tb->lbytes, tb->rnum[0],tb->rbytes, tb->blknum[0],
+ tb->s0num, tb->s1num,tb->s1bytes, tb->s2num, tb->s2bytes, tb->cur_blknum, tb->lkey[0], tb->rkey[0]);
+
+ /* this prints balance parameters for non-leaf levels */
+ h = 0;
+ do {
+ h++;
+ sprintf (print_tb_buf + strlen (print_tb_buf),
+ "* %d * %4d * %2d * * %2d * * %2d *\n",
+ h, tb->insert_size[h], tb->lnum[h], tb->rnum[h], tb->blknum[h]);
+ } while (tb->insert_size[h]);
+
+ sprintf (print_tb_buf + strlen (print_tb_buf),
+ "=====================================================================\n"
+ "FEB list: ");
+
+ /* print FEB list (list of buffers in form (bh (b_blocknr, b_count), that will be used for new nodes) */
+ h = 0;
+ for (i = 0; i < sizeof (tb->FEB) / sizeof (tb->FEB[0]); i ++)
+ sprintf (print_tb_buf + strlen (print_tb_buf),
+ "%p (%lu %d)%s", tb->FEB[i], tb->FEB[i] ? tb->FEB[i]->b_blocknr : 0,
+ tb->FEB[i] ? atomic_read (&(tb->FEB[i]->b_count)) : 0,
+ (i == sizeof (tb->FEB) / sizeof (tb->FEB[0]) - 1) ? "\n" : ", ");
+
+ sprintf (print_tb_buf + strlen (print_tb_buf),
+ "======================== the end ====================================\n");
+}
+
+void print_cur_tb (char * mes)
+{
+ printk ("%s\n%s", mes, print_tb_buf);
+}
+
+
+#ifndef __KERNEL__
+
+void print_bmap_block (int i, char * data, int size, int silent)
+{
+ int j, k;
+ int bits = size * 8;
+ int zeros = 0, ones = 0;
+
+
+ if (test_bit (0, data)) {
+ /* first block addressed by this bitmap block is used */
+ ones ++;
+ if (!silent)
+ printf ("Busy (%d-", i * bits);
+ for (j = 1; j < bits; j ++) {
+ while (test_bit (j, data)) {
+ ones ++;
+ if (j == bits - 1) {
+ if (!silent)
+ printf ("%d)\n", j + i * bits);
+ goto end;
+ }
+ j++;
+ }
+ if (!silent)
+ printf ("%d) Free(%d-", j - 1 + i * bits, j + i * bits);
+
+ while (!test_bit (j, data)) {
+ zeros ++;
+ if (j == bits - 1) {
+ if (!silent)
+ printf ("%d)\n", j + i * bits);
+ goto end;
+ }
+ j++;
+ }
+ if (!silent)
+ printf ("%d) Busy(%d-", j - 1 + i * bits, j + i * bits);
+
+ j --;
+ end:
+ }
+ } else {
+ /* first block addressed by this bitmap is free */
+ zeros ++;
+ if (!silent)
+ printf ("Free (%d-", i * bits);
+ for (j = 1; j < bits; j ++) {
+ k = 0;
+ while (!test_bit (j, data)) {
+ k ++;
+ if (j == bits - 1) {
+ if (!silent)
+ printf ("%d)\n", j + i * bits);
+ zeros += k;
+ goto end2;
+ }
+ j++;
+ }
+ zeros += k;
+ if (!silent)
+ printf ("%d) Busy(%d-", j - 1 + i * bits, j + i * bits);
+
+ k = 0;
+ while (test_bit (j, data)) {
+ ones ++;
+ if (j == bits - 1) {
+ if (!silent)
+ printf ("%d)\n", j + i * bits);
+ ones += k;
+ goto end2;
+ }
+ j++;
+ }
+ ones += k;
+ if (!silent)
+ printf ("%d) Busy(%d-", j - 1 + i * bits, j + i * bits);
+
+ j --;
+ end2:
+ }
+ }
+
+ printf ("used %d, free %d\n", ones, zeros);
+}
+
+
+/* if silent == 1, do not print details */
+void print_bmap (struct super_block * s, int silent)
+{
+ int bmapnr = SB_BMAP_NR (s);
+ int i;
+
+ printf ("Bitmap blocks are:\n");
+ for (i = 0; i < bmapnr; i ++) {
+ printf ("#%d: block %lu: ", i, SB_AP_BITMAP(s)[i]->b_blocknr);
+ print_bmap_block (i, SB_AP_BITMAP(s)[i]->b_data, s->s_blocksize, silent);
+ }
+
+}
+
+
+
+
+void print_objectid_map (struct super_block * s)
+{
+ int i;
+ struct reiserfs_super_block * rs;
+ unsigned long * omap;
+
+ rs = SB_DISK_SUPER_BLOCK (s);
+ omap = (unsigned long *)(rs + 1);
+ printk ("Map of objectids\n");
+
+ for (i = 0; i < rs->s_oid_cursize; i ++) {
+ if (i % 2 == 0)
+ printk ("busy(%lu-%lu) ", omap[i], omap[i+1] - 1);
+ else
+ printk ("free(%lu-%lu) ",
+ omap[i], ((i+1) == rs->s_oid_cursize) ? -1 : omap[i+1] - 1);
+ }
+ printk ("\n");
+
+ printk ("Object id array has size %d (max %d):", rs->s_oid_cursize,
+ rs->s_oid_maxsize);
+
+ for (i = 0; i < rs->s_oid_cursize; i ++)
+ printk ("%lu ", omap[i]);
+ printk ("\n");
+
+}
+
+#endif /* #ifndef __KERNEL__ */
+
+
+static void check_leaf_block_head (struct buffer_head * bh)
+{
+ struct block_head * blkh;
+
+ blkh = B_BLK_HEAD (bh);
+ if (le16_to_cpu (blkh->blk_nr_item) > (bh->b_size - BLKH_SIZE) / IH_SIZE)
+ reiserfs_panic (0, "vs-6010: check_leaf_block_head: invalid item number %z", bh);
+ if (le16_to_cpu (blkh->blk_free_space) >
+ bh->b_size - BLKH_SIZE - IH_SIZE * le16_to_cpu (blkh->blk_nr_item))
+ reiserfs_panic (0, "vs-6020: check_leaf_block_head: invalid free space %z", bh);
+
+}
+
+static void check_internal_block_head (struct buffer_head * bh)
+{
+ struct block_head * blkh;
+
+ blkh = B_BLK_HEAD (bh);
+ if (!(B_LEVEL (bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL (bh) <= MAX_HEIGHT))
+ reiserfs_panic (0, "vs-6025: check_internal_block_head: invalid level %z", bh);
+
+ if (B_NR_ITEMS (bh) > (bh->b_size - BLKH_SIZE) / IH_SIZE)
+ reiserfs_panic (0, "vs-6030: check_internal_block_head: invalid item number %z", bh);
+
+ if (B_FREE_SPACE (bh) !=
+ bh->b_size - BLKH_SIZE - KEY_SIZE * B_NR_ITEMS (bh) - DC_SIZE * (B_NR_ITEMS (bh) + 1))
+ reiserfs_panic (0, "vs-6040: check_internal_block_head: invalid free space %z", bh);
+
+}
+
+
+void check_leaf (struct buffer_head * bh)
+{
+ int i;
+ struct item_head * ih;
+
+ if (!bh)
+ return;
+ check_leaf_block_head (bh);
+ for (i = 0, ih = B_N_PITEM_HEAD (bh, 0); i < B_NR_ITEMS (bh); i ++, ih ++)
+ op_check_item (ih, B_I_PITEM (bh, ih));
+}
+
+
+void check_internal (struct buffer_head * bh)
+{
+ if (!bh)
+ return;
+ check_internal_block_head (bh);
+}
+
+
+void print_statistics (struct super_block * s)
+{
+
+ /*
+ printk ("reiserfs_put_super: session statistics: balances %d, fix_nodes %d, preserve list freeings %d, \
+bmap with search %d, without %d, dir2ind %d, ind2dir %d\n",
+ s->u.reiserfs_sb.s_do_balance, s->u.reiserfs_sb.s_fix_nodes, s->u.reiserfs_sb.s_preserve_list_freeings,
+ s->u.reiserfs_sb.s_bmaps, s->u.reiserfs_sb.s_bmaps_without_search,
+ s->u.reiserfs_sb.s_direct2indirect, s->u.reiserfs_sb.s_indirect2direct);
+ */
+
+}
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
new file mode 100644
index 000000000..87ebd6ee6
--- /dev/null
+++ b/fs/reiserfs/resize.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+/*
+ * Written by Alexander Zarochentcev.
+ *
+ * The kernel part of the (on-line) reiserfs resizer.
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/locks.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/reiserfs_fs_sb.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
+{
+ struct reiserfs_super_block * sb;
+ struct buffer_head ** bitmap, * bh;
+ struct reiserfs_transaction_handle th;
+ unsigned int bmap_nr_new, bmap_nr;
+ unsigned int block_r_new, block_r;
+
+ struct reiserfs_list_bitmap * jb;
+ struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];
+
+ unsigned long int block_count, free_blocks;
+ int i;
+ int copy_size ;
+
+ sb = SB_DISK_SUPER_BLOCK(s);
+
+ if (SB_BLOCK_COUNT(s) >= block_count_new) {
+ printk("can\'t shrink filesystem on-line\n");
+ return -EINVAL;
+ }
+
+ /* check the device size */
+ bh = bread(s->s_dev, block_count_new - 1, s->s_blocksize);
+ if (!bh) {
+ printk("reiserfs_resize: can\'t read last block\n");
+ return -EINVAL;
+ }
+ bforget(bh);
+
+ /* old disk layout detection; those partitions can be mounted, but
+ * cannot be resized */
+ if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
+ != REISERFS_DISK_OFFSET_IN_BYTES ) {
+ printk("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
+ return -ENOTSUPP;
+ }
+
+ /* count used bits in last bitmap block */
+ block_r = SB_BLOCK_COUNT(s) -
+ (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8;
+
+ /* count bitmap blocks in new fs */
+ bmap_nr_new = block_count_new / ( s->s_blocksize * 8 );
+ block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
+ if (block_r_new)
+ bmap_nr_new++;
+ else
+ block_r_new = s->s_blocksize * 8;
+
+ /* save old values */
+ block_count = SB_BLOCK_COUNT(s);
+ bmap_nr = SB_BMAP_NR(s);
+
+ /* resizing of reiserfs bitmaps (journal and real), if needed */
+ if (bmap_nr_new > bmap_nr) {
+ /* reallocate journal bitmaps */
+ if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
+ printk("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
+ unlock_super(s) ;
+ return -ENOMEM ;
+ }
+ /* the new journal bitmaps are zero filled, now we copy in the bitmap
+ ** node pointers from the old journal bitmap structs, and then
+ ** transfer the new data structures into the journal struct.
+ **
+ ** using the copy_size var below allows this code to work for
+ ** both shrinking and expanding the FS.
+ */
+ copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr ;
+ copy_size = copy_size * sizeof(struct reiserfs_list_bitmap_node *) ;
+ for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
+ struct reiserfs_bitmap_node **node_tmp ;
+ jb = SB_JOURNAL(s)->j_list_bitmap + i ;
+ memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size) ;
+
+ /* just in case vfree schedules on us, copy the new
+ ** pointer into the journal struct before freeing the
+ ** old one
+ */
+ node_tmp = jb->bitmaps ;
+ jb->bitmaps = jbitmap[i].bitmaps ;
+ vfree(node_tmp) ;
+ }
+
+ /* allocate additional bitmap blocks, reallocate array of bitmap
+ * block pointers */
+ bitmap = reiserfs_kmalloc(sizeof(struct buffer_head *) * bmap_nr_new, GFP_KERNEL, s);
+ if (!bitmap) {
+ printk("reiserfs_resize: unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < bmap_nr; i++)
+ bitmap[i] = SB_AP_BITMAP(s)[i];
+ for (i = bmap_nr; i < bmap_nr_new; i++) {
+ bitmap[i] = reiserfs_getblk(s->s_dev, i * s->s_blocksize * 8, s->s_blocksize);
+ memset(bitmap[i]->b_data, 0, sb->s_blocksize);
+ reiserfs_test_and_set_le_bit(0, bitmap[i]->b_data);
+
+ mark_buffer_dirty(bitmap[i]) ;
+ mark_buffer_uptodate(bitmap[i], 1);
+ ll_rw_block(WRITE, 1, bitmap + i);
+ wait_on_buffer(bitmap[i]);
+ }
+ /* free old bitmap blocks array */
+ reiserfs_kfree(SB_AP_BITMAP(s),
+ sizeof(struct buffer_head *) * bmap_nr, s);
+ SB_AP_BITMAP(s) = bitmap;
+ }
+
+ /* begin transaction */
+ journal_begin(&th, s, 10);
+
+ /* correct last bitmap blocks in old and new disk layout */
+ reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr - 1], 1);
+ for (i = block_r; i < s->s_blocksize * 8; i++)
+ reiserfs_test_and_clear_le_bit(i,
+ SB_AP_BITMAP(s)[bmap_nr - 1]->b_data);
+ journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr - 1]);
+
+ reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr_new - 1], 1);
+ for (i = block_r_new; i < s->s_blocksize * 8; i++)
+ reiserfs_test_and_set_le_bit(i,
+ SB_AP_BITMAP(s)[bmap_nr_new - 1]->b_data);
+ journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr_new - 1]);
+
+ /* update super */
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+ free_blocks = SB_FREE_BLOCKS(s);
+ PUT_SB_FREE_BLOCKS(s, free_blocks + (block_count_new - block_count - (bmap_nr_new - bmap_nr)));
+ PUT_SB_BLOCK_COUNT(s, block_count_new);
+ PUT_SB_BMAP_NR(s, bmap_nr_new);
+ s->s_dirt = 1;
+
+ journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
+
+ SB_JOURNAL(s)->j_must_wait = 1;
+ journal_end(&th, s, 10);
+
+ return 0;
+}
+
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
new file mode 100644
index 000000000..f4b24b590
--- /dev/null
+++ b/fs/reiserfs/stree.c
@@ -0,0 +1,2078 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+/*
+ * Written by Anatoly P. Pinchuk pap@namesys.botik.ru
+ * Programm System Institute
+ * Pereslavl-Zalessky Russia
+ */
+
+/*
+ * This file contains functions dealing with S+tree
+ *
+ * B_IS_IN_TREE
+ * copy_short_key
+ * copy_item_head
+ * comp_short_keys
+ * comp_keys
+ * comp_cpu_keys
+ * comp_short_le_keys
+ * comp_short_cpu_keys
+ * cpu_key2cpu_key
+ * le_key2cpu_key
+ * comp_le_keys
+ * bin_search
+ * get_lkey
+ * get_rkey
+ * key_in_buffer
+ * decrement_bcount
+ * decrement_counters_in_path
+ * reiserfs_check_path
+ * pathrelse_and_restore
+ * pathrelse
+ * search_by_key_reada
+ * search_by_key
+ * search_for_position_by_key
+ * comp_items
+ * prepare_for_direct_item
+ * prepare_for_direntry_item
+ * prepare_for_delete_or_cut
+ * calc_deleted_bytes_number
+ * init_tb_struct
+ * padd_item
+ * reiserfs_delete_item
+ * reiserfs_delete_solid_item
+ * reiserfs_delete_object
+ * maybe_indirect_to_direct
+ * indirect_to_direct_roll_back
+ * reiserfs_cut_from_item
+ * truncate_directory
+ * reiserfs_do_truncate
+ * reiserfs_paste_into_item
+ * reiserfs_insert_item
+ */
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/locks.h>
+#include <linux/pagemap.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/smp_lock.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+
+/* Does the buffer contain a disk block which is in the tree. */
+inline int B_IS_IN_TREE (struct buffer_head * p_s_bh)
+{
+
+#ifdef CONFIG_REISERFS_CHECK
+
+ if ( B_LEVEL (p_s_bh) > MAX_HEIGHT ) {
+ reiserfs_panic(0, "PAP-1010: B_IS_IN_TREE: block (%b) has too big level (%z)",
+ p_s_bh, p_s_bh);
+ }
+#endif
+
+ return ( B_LEVEL (p_s_bh) != FREE_LEVEL );
+}
+
+
+
+
+inline void copy_short_key (void * to, void * from)
+{
+ memcpy (to, from, SHORT_KEY_SIZE);
+}
+
+//
+// to gets item head in le form
+//
+inline void copy_item_head(void * p_v_to, void * p_v_from)
+{
+ memcpy (p_v_to, p_v_from, IH_SIZE);
+}
+
+
+/* k1 is pointer to on-disk structure which is stored in little-endian
+ form. k2 is pointer to cpu variable. For key of items of the same
+ object this returns 0.
+ Returns: -1 if key1 < key2
+ 0 if key1 == key2
+ 1 if key1 > key2 */
+inline int comp_short_keys (struct key * le_key, struct cpu_key * cpu_key)
+{
+ __u32 * p_s_le_u32, * p_s_cpu_u32;
+ int n_key_length = REISERFS_SHORT_KEY_LEN;
+
+ p_s_le_u32 = (__u32 *)le_key;
+ p_s_cpu_u32 = (__u32 *)cpu_key;
+ for( ; n_key_length--; ++p_s_le_u32, ++p_s_cpu_u32 ) {
+ if ( le32_to_cpu (*p_s_le_u32) < *p_s_cpu_u32 )
+ return -1;
+ if ( le32_to_cpu (*p_s_le_u32) > *p_s_cpu_u32 )
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/* k1 is pointer to on-disk structure which is stored in little-endian
+ form. k2 is pointer to cpu variable.
+ Compare keys using all 4 key fields.
+ Returns: -1 if key1 < key2 0
+ if key1 = key2 1 if key1 > key2 */
+inline int comp_keys (struct key * le_key, struct cpu_key * cpu_key)
+{
+ int retval;
+
+ retval = comp_short_keys (le_key, cpu_key);
+ if (retval)
+ return retval;
+ if (le_key_k_offset (cpu_key->version, le_key) < cpu_key_k_offset (cpu_key))
+ return -1;
+ if (le_key_k_offset (cpu_key->version, le_key) > cpu_key_k_offset (cpu_key))
+ return 1;
+
+ if (cpu_key->key_length == 3)
+ return 0;
+
+ /* this part is needed only when tail conversion is in progress */
+ if (le_key_k_type (cpu_key->version, le_key) < cpu_key_k_type (cpu_key))
+ return -1;
+
+ if (le_key_k_type (cpu_key->version, le_key) > cpu_key_k_type (cpu_key))
+ return 1;
+
+ return 0;
+}
+
+
+//
+// FIXME: not used yet
+//
+inline int comp_cpu_keys (struct cpu_key * key1, struct cpu_key * key2)
+{
+ if (key1->on_disk_key.k_dir_id < key2->on_disk_key.k_dir_id)
+ return -1;
+ if (key1->on_disk_key.k_dir_id > key2->on_disk_key.k_dir_id)
+ return 1;
+
+ if (key1->on_disk_key.k_objectid < key2->on_disk_key.k_objectid)
+ return -1;
+ if (key1->on_disk_key.k_objectid > key2->on_disk_key.k_objectid)
+ return 1;
+
+ if (cpu_key_k_offset (key1) < cpu_key_k_offset (key2))
+ return -1;
+ if (cpu_key_k_offset (key1) > cpu_key_k_offset (key2))
+ return 1;
+
+ reiserfs_warning ("comp_cpu_keys: type are compared for %k and %k\n",
+ key1, key2);
+
+ if (cpu_key_k_type (key1) < cpu_key_k_type (key2))
+ return -1;
+ if (cpu_key_k_type (key1) > cpu_key_k_type (key2))
+ return 1;
+ return 0;
+}
+
+inline int comp_short_le_keys (struct key * key1, struct key * key2)
+{
+ __u32 * p_s_1_u32, * p_s_2_u32;
+ int n_key_length = REISERFS_SHORT_KEY_LEN;
+
+ p_s_1_u32 = (__u32 *)key1;
+ p_s_2_u32 = (__u32 *)key2;
+ for( ; n_key_length--; ++p_s_1_u32, ++p_s_2_u32 ) {
+ if ( le32_to_cpu (*p_s_1_u32) < le32_to_cpu (*p_s_2_u32) )
+ return -1;
+ if ( le32_to_cpu (*p_s_1_u32) > le32_to_cpu (*p_s_2_u32) )
+ return 1;
+ }
+ return 0;
+}
+
+inline int comp_short_cpu_keys (struct cpu_key * key1,
+ struct cpu_key * key2)
+{
+ __u32 * p_s_1_u32, * p_s_2_u32;
+ int n_key_length = REISERFS_SHORT_KEY_LEN;
+
+ p_s_1_u32 = (__u32 *)key1;
+ p_s_2_u32 = (__u32 *)key2;
+
+ for( ; n_key_length--; ++p_s_1_u32, ++p_s_2_u32 ) {
+ if ( *p_s_1_u32 < *p_s_2_u32 )
+ return -1;
+ if ( *p_s_1_u32 > *p_s_2_u32 )
+ return 1;
+ }
+ return 0;
+}
+
+
+
+inline void cpu_key2cpu_key (struct cpu_key * to, struct cpu_key * from)
+{
+ memcpy (to, from, sizeof (struct cpu_key));
+}
+
+
+inline void le_key2cpu_key (struct cpu_key * to, struct key * from)
+{
+ to->on_disk_key.k_dir_id = le32_to_cpu (from->k_dir_id);
+ to->on_disk_key.k_objectid = le32_to_cpu (from->k_objectid);
+
+ // find out version of the key
+ to->version = le_key_version (from);
+ if (to->version == ITEM_VERSION_1) {
+ to->on_disk_key.u.k_offset_v1.k_offset = le32_to_cpu (from->u.k_offset_v1.k_offset);
+ to->on_disk_key.u.k_offset_v1.k_uniqueness = le32_to_cpu (from->u.k_offset_v1.k_uniqueness);
+ } else {
+ to->on_disk_key.u.k_offset_v2.k_offset = le64_to_cpu (from->u.k_offset_v2.k_offset);
+ to->on_disk_key.u.k_offset_v2.k_type = le16_to_cpu (from->u.k_offset_v2.k_type);
+ }
+}
+
+
+
+// this does not say which one is bigger, it only returns 1 if keys
+// are not equal, 0 otherwise
+inline int comp_le_keys (struct key * k1, struct key * k2)
+{
+ return memcmp (k1, k2, sizeof (struct key));
+}
+
+/**************************************************************************
+ * Binary search toolkit function *
+ * Search for an item in the array by the item key *
+ * Returns: 1 if found, 0 if not found; *
+ * *p_n_pos = number of the searched element if found, else the *
+ * number of the first element that is larger than p_v_key. *
+ **************************************************************************/
+/* For those not familiar with binary search: n_lbound is the leftmost item that it
+ could be, n_rbound the rightmost item that it could be. We examine the item
+ halfway between n_lbound and n_rbound, and that tells us either that we can increase
+ n_lbound, or decrease n_rbound, or that we have found it, or if n_lbound <= n_rbound that
+ there are no possible items, and we have not found it. With each examination we
+ cut the number of possible items it could be by one more than half rounded down,
+ or we find it. */
+inline int bin_search (
+ void * p_v_key, /* Key to search for. */
+ void * p_v_base, /* First item in the array. */
+ int p_n_num, /* Number of items in the array. */
+ int p_n_width, /* Item size in the array.
+ searched. Lest the reader be
+ confused, note that this is crafted
+ as a general function, and when it
+ is applied specifically to the array
+ of item headers in a node, p_n_width
+ is actually the item header size not
+ the item size. */
+ int * p_n_pos /* Number of the searched for element. */
+ ) {
+ int n_rbound, n_lbound, n_j;
+
+ for ( n_j = ((n_rbound = p_n_num - 1) + (n_lbound = 0))/2; n_lbound <= n_rbound; n_j = (n_rbound + n_lbound)/2 )
+ switch( COMP_KEYS((struct key *)((char * )p_v_base + n_j * p_n_width), (struct cpu_key *)p_v_key) ) {
+ case -1: n_lbound = n_j + 1; continue;
+ case 1: n_rbound = n_j - 1; continue;
+ case 0: *p_n_pos = n_j; return ITEM_FOUND; /* Key found in the array. */
+ }
+
+ /* bin_search did not find given key, it returns position of key,
+ that is minimal and greater than the given one. */
+ *p_n_pos = n_lbound;
+ return ITEM_NOT_FOUND;
+}
+
+#ifdef CONFIG_REISERFS_CHECK
+extern struct tree_balance * cur_tb;
+#endif
+
+
+
+/* Minimal possible key. It is never in the tree. */
+struct key MIN_KEY = {0, 0, {{0, 0},}};
+
+/* Maximal possible key. It is never in the tree. */
+struct key MAX_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}};
+
+
+/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
+ of the path, and going upwards. We must check the path's validity at each step. If the key is not in
+ the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
+ case we return a special key, either MIN_KEY or MAX_KEY. */
+inline struct key * get_lkey (
+ struct path * p_s_chk_path,
+ struct super_block * p_s_sb
+ ) {
+ int n_position, n_path_offset = p_s_chk_path->path_length;
+ struct buffer_head * p_s_parent;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_path_offset < FIRST_PATH_ELEMENT_OFFSET )
+ reiserfs_panic(p_s_sb,"PAP-5010: get_lkey: illegal offset in the path");
+#endif
+
+ /* While not higher in path than first element. */
+ while ( n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! buffer_uptodate(PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
+ reiserfs_panic(p_s_sb, "PAP-5020: get_lkey: parent is not uptodate");
+#endif
+
+ /* Parent at the path is not in the tree now. */
+ if ( ! B_IS_IN_TREE(p_s_parent = PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
+ return &MAX_KEY;
+ /* Check whether position in the parent is correct. */
+ if ( (n_position = PATH_OFFSET_POSITION(p_s_chk_path, n_path_offset)) > B_NR_ITEMS(p_s_parent) )
+ return &MAX_KEY;
+ /* Check whether parent at the path really points to the child. */
+ if ( B_N_CHILD_NUM(p_s_parent, n_position) !=
+ PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset + 1)->b_blocknr )
+ return &MAX_KEY;
+ /* Return delimiting key if position in the parent is not equal to zero. */
+ if ( n_position )
+ return B_N_PDELIM_KEY(p_s_parent, n_position - 1);
+ }
+ /* Return MIN_KEY if we are in the root of the buffer tree. */
+ if ( PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+ SB_ROOT_BLOCK (p_s_sb) )
+ return &MIN_KEY;
+ return &MAX_KEY;
+}
+
+
+/* Get delimiting key of the buffer at the path and its right neighbor. */
+inline struct key * get_rkey (
+ struct path * p_s_chk_path,
+ struct super_block * p_s_sb
+ ) {
+ int n_position,
+ n_path_offset = p_s_chk_path->path_length;
+ struct buffer_head * p_s_parent;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_path_offset < FIRST_PATH_ELEMENT_OFFSET )
+ reiserfs_panic(p_s_sb,"PAP-5030: get_rkey: illegal offset in the path");
+#endif
+
+ while ( n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! buffer_uptodate(PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
+ reiserfs_panic(p_s_sb, "PAP-5040: get_rkey: parent is not uptodate");
+#endif
+
+ /* Parent at the path is not in the tree now. */
+ if ( ! B_IS_IN_TREE(p_s_parent = PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
+ return &MIN_KEY;
+ /* Check whether position in the parrent is correct. */
+ if ( (n_position = PATH_OFFSET_POSITION(p_s_chk_path, n_path_offset)) > B_NR_ITEMS(p_s_parent) )
+ return &MIN_KEY;
+ /* Check whether parent at the path really points to the child. */
+ if ( B_N_CHILD_NUM(p_s_parent, n_position) !=
+ PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset + 1)->b_blocknr )
+ return &MIN_KEY;
+ /* Return delimiting key if position in the parent is not the last one. */
+ if ( n_position != B_NR_ITEMS(p_s_parent) )
+ return B_N_PDELIM_KEY(p_s_parent, n_position);
+ }
+ /* Return MAX_KEY if we are in the root of the buffer tree. */
+ if ( PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+ SB_ROOT_BLOCK (p_s_sb) )
+ return &MAX_KEY;
+ return &MIN_KEY;
+}
+
+
+/* Check whether a key is contained in the tree rooted from a buffer at a path. */
+/* This works by looking at the left and right delimiting keys for the buffer in the last path_element in
+ the path. These delimiting keys are stored at least one level above that buffer in the tree. If the
+ buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in
+ this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */
+static inline int key_in_buffer (
+ struct path * p_s_chk_path, /* Path which should be checked. */
+ struct cpu_key * p_s_key, /* Key which should be checked. */
+ struct super_block * p_s_sb /* Super block pointer. */
+ ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! p_s_key || p_s_chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET ||
+ p_s_chk_path->path_length > MAX_HEIGHT )
+ reiserfs_panic(p_s_sb, "PAP-5050: key_in_buffer: pointer to the key(%p) is NULL or illegal path length(%d)",
+ p_s_key, p_s_chk_path->path_length);
+
+ if ( PATH_PLAST_BUFFER(p_s_chk_path)->b_dev == NODEV )
+ reiserfs_panic(p_s_sb, "PAP-5060: key_in_buffer: device must not be NODEV");
+#endif
+
+ if ( COMP_KEYS(get_lkey(p_s_chk_path, p_s_sb), p_s_key) == 1 )
+ /* left delimiting key is bigger, that the key we look for */
+ return 0;
+ // if ( COMP_KEYS(p_s_key, get_rkey(p_s_chk_path, p_s_sb)) != -1 )
+ if ( COMP_KEYS(get_rkey(p_s_chk_path, p_s_sb), p_s_key) != 1 )
+ /* p_s_key must be less than right delimitiing key */
+ return 0;
+ return 1;
+}
+
+
+inline void decrement_bcount(
+ struct buffer_head * p_s_bh
+ ) {
+ if ( p_s_bh ) {
+ if ( atomic_read (&(p_s_bh->b_count)) ) {
+ atomic_dec (&(p_s_bh->b_count));
+ return;
+ }
+ reiserfs_panic(NULL, "PAP-5070: decrement_bcount: trying to free free buffer %b", p_s_bh);
+ }
+}
+
+
+/* Decrement b_count field of the all buffers in the path. */
+void decrement_counters_in_path (
+ struct path * p_s_search_path
+ ) {
+ int n_path_offset = p_s_search_path->path_length;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET ||
+ n_path_offset > EXTENDED_MAX_HEIGHT - 1 )
+ reiserfs_panic(NULL, "PAP-5080: decrement_counters_in_path: illegal path offset of %d", n_path_offset);
+#endif
+
+ while ( n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET ) {
+ struct buffer_head * bh;
+
+ bh = PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--);
+ decrement_bcount (bh);
+ }
+ p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+}
+
+
+int reiserfs_check_path(struct path *p) {
+#ifdef CONFIG_REISERFS_CHECK
+ if (p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET) {
+ reiserfs_warning("check_path, path not properly relsed\n") ;
+ BUG() ;
+ }
+#endif
+ return 0 ;
+}
+
+
+/* Release all buffers in the path. Restore dirty bits clean
+** when preparing the buffer for the log
+**
+** only called from fix_nodes()
+*/
+void pathrelse_and_restore (
+ struct super_block *s,
+ struct path * p_s_search_path
+ ) {
+ int n_path_offset = p_s_search_path->path_length;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET )
+ reiserfs_panic(NULL, "clm-4000: pathrelse: illegal path offset");
+#endif
+
+ while ( n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET ) {
+ reiserfs_restore_prepared_buffer(s, PATH_OFFSET_PBUFFER(p_s_search_path,
+ n_path_offset));
+ brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--));
+ }
+ p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+}
+
+/* Release all buffers in the path. */
+void pathrelse (
+ struct path * p_s_search_path
+ ) {
+ int n_path_offset = p_s_search_path->path_length;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET )
+ reiserfs_panic(NULL, "PAP-5090: pathrelse: illegal path offset");
+#endif
+
+ while ( n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET )
+ brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--));
+
+ p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+}
+
+
+
+static int is_leaf (char * buf, int blocksize, struct buffer_head * bh)
+{
+ struct block_head * blkh;
+ struct item_head * ih;
+ int used_space;
+ int prev_location;
+ int i;
+ int nr;
+
+ blkh = (struct block_head *)buf;
+ if (le16_to_cpu (blkh->blk_level) != DISK_LEAF_NODE_LEVEL) {
+ printk ("is_leaf: this should be caught earlier\n");
+ return 0;
+ }
+
+ nr = le16_to_cpu (blkh->blk_nr_item);
+ if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) {
+ /* item number is too big or too small */
+ reiserfs_warning ("is_leaf: nr_item seems wrong: %z\n", bh);
+ return 0;
+ }
+ ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1;
+ used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location (ih));
+ if (used_space != blocksize - le16_to_cpu (blkh->blk_free_space)) {
+ /* free space does not match to calculated amount of use space */
+ reiserfs_warning ("is_leaf: free space seems wrong: %z\n", bh);
+ return 0;
+ }
+
+ // FIXME: it is_leaf will hit performance too much - we may have
+ // return 1 here
+
+ /* check tables of item heads */
+ ih = (struct item_head *)(buf + BLKH_SIZE);
+ prev_location = blocksize;
+ for (i = 0; i < nr; i ++, ih ++) {
+ if (ih_location (ih) >= blocksize || ih_location (ih) < IH_SIZE * nr) {
+ reiserfs_warning ("is_leaf: item location seems wrong: %h\n", ih);
+ return 0;
+ }
+ if (ih_item_len (ih) < 1 || ih_item_len (ih) > MAX_ITEM_LEN (blocksize)) {
+ reiserfs_warning ("is_leaf: item length seems wrong: %h\n", ih);
+ return 0;
+ }
+ if (prev_location - ih_location (ih) != ih_item_len (ih)) {
+ reiserfs_warning ("is_leaf: item location seems wrong (second one): %h\n", ih);
+ return 0;
+ }
+ prev_location = ih_location (ih);
+ }
+
+ // one may imagine much more checks
+ return 1;
+}
+
+
+/* returns 1 if buf looks like an internal node, 0 otherwise */
+static int is_internal (char * buf, int blocksize, struct buffer_head * bh)
+{
+ struct block_head * blkh;
+ int nr;
+ int used_space;
+
+ blkh = (struct block_head *)buf;
+ if (le16_to_cpu (blkh->blk_level) <= DISK_LEAF_NODE_LEVEL ||
+ le16_to_cpu (blkh->blk_level) > MAX_HEIGHT) {
+ /* this level is not possible for internal nodes */
+ printk ("is_internal: this should be caught earlier\n");
+ return 0;
+ }
+
+ nr = le16_to_cpu (blkh->blk_nr_item);
+ if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) {
+ /* for internal which is not root we might check min number of keys */
+ reiserfs_warning ("is_internal: number of key seems wrong: %z\n", bh);
+ return 0;
+ }
+
+ used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1);
+ if (used_space != blocksize - le16_to_cpu (blkh->blk_free_space)) {
+ reiserfs_warning ("is_internal: free space seems wrong: %z\n", bh);
+ return 0;
+ }
+
+ // one may imagine much more checks
+ return 1;
+}
+
+
+// make sure that bh contains formatted node of reiserfs tree of
+// 'level'-th level
+static int is_tree_node (struct buffer_head * bh, int level)
+{
+ if (B_LEVEL (bh) != level) {
+ printk ("is_tree_node: node level %d does not match to the expected one %d\n",
+ B_LEVEL (bh), level);
+ return 0;
+ }
+ if (level == DISK_LEAF_NODE_LEVEL)
+ return is_leaf (bh->b_data, bh->b_size, bh);
+
+ return is_internal (bh->b_data, bh->b_size, bh);
+}
+
+
+
+#ifdef SEARCH_BY_KEY_READA
+
+/* The function is NOT SCHEDULE-SAFE! */
+static void search_by_key_reada (struct super_block * s, int blocknr)
+{
+ struct buffer_head * bh;
+
+ if (blocknr == 0)
+ return;
+
+ bh = reiserfs_getblk (s->s_dev, blocknr, s->s_blocksize);
+
+ if (!buffer_uptodate (bh)) {
+ ll_rw_block (READA, 1, &bh);
+ }
+ bh->b_count --;
+}
+
+#endif
+
+/**************************************************************************
+ * Algorithm SearchByKey *
+ * look for item in the Disk S+Tree by its key *
+ * Input: p_s_sb - super block *
+ * p_s_key - pointer to the key to search *
+ * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR *
+ * p_s_search_path - path from the root to the needed leaf *
+ **************************************************************************/
+
+/* This function fills up the path from the root to the leaf as it
+ descends the tree looking for the key. It uses reiserfs_bread to
+ try to find buffers in the cache given their block number. If it
+ does not find them in the cache it reads them from disk. For each
+ node search_by_key finds using reiserfs_bread it then uses
+ bin_search to look through that node. bin_search will find the
+ position of the block_number of the next node if it is looking
+ through an internal node. If it is looking through a leaf node
+ bin_search will find the position of the item which has key either
+ equal to given key, or which is the maximal key less than the given
+ key. search_by_key returns a path that must be checked for the
+ correctness of the top of the path but need not be checked for the
+ correctness of the bottom of the path */
+/* The function is NOT SCHEDULE-SAFE! */
+int search_by_key (struct super_block * p_s_sb,
+ struct cpu_key * p_s_key, /* Key to search. */
+ struct path * p_s_search_path, /* This structure was
+ allocated and initialized
+ by the calling
+ function. It is filled up
+ by this function. */
+ int n_stop_level /* How far down the tree to search. To
+ stop at leaf level - set to
+ DISK_LEAF_NODE_LEVEL */
+ ) {
+ kdev_t n_dev = p_s_sb->s_dev;
+ int n_block_number = SB_ROOT_BLOCK (p_s_sb),
+ expected_level = SB_TREE_HEIGHT (p_s_sb),
+ n_block_size = p_s_sb->s_blocksize;
+ struct buffer_head * p_s_bh;
+ struct path_element * p_s_last_element;
+ int n_node_level, n_retval;
+ int right_neighbor_of_leaf_node;
+ int fs_gen;
+
+#ifdef CONFIG_REISERFS_CHECK
+ int n_repeat_counter = 0;
+#endif
+
+ /* As we add each node to a path we increase its count. This means that
+ we must be careful to release all nodes in a path before we either
+ discard the path struct or re-use the path struct, as we do here. */
+
+ decrement_counters_in_path(p_s_search_path);
+
+ right_neighbor_of_leaf_node = 0;
+
+ /* With each iteration of this loop we search through the items in the
+ current node, and calculate the next current node(next path element)
+ for the next iteration of this loop.. */
+ while ( 1 ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( !(++n_repeat_counter % 50000) )
+ reiserfs_warning ("PAP-5100: search_by_key: %s:"
+ "there were %d iterations of while loop "
+ "looking for key %K\n",
+ current->comm, n_repeat_counter, p_s_key);
+#endif
+
+ /* prep path to have another element added to it. */
+ p_s_last_element = PATH_OFFSET_PELEMENT(p_s_search_path, ++p_s_search_path->path_length);
+ fs_gen = get_generation (p_s_sb);
+ expected_level --;
+
+#ifdef SEARCH_BY_KEY_READA
+ /* schedule read of right neighbor */
+ search_by_key_reada (p_s_sb, right_neighbor_of_leaf_node);
+#endif
+
+ /* Read the next tree node, and set the last element in the path to
+ have a pointer to it. */
+ if ( ! (p_s_bh = p_s_last_element->pe_buffer =
+ reiserfs_bread(n_dev, n_block_number, n_block_size)) ) {
+ p_s_search_path->path_length --;
+ pathrelse(p_s_search_path);
+ return IO_ERROR;
+ }
+
+ /* It is possible that schedule occured. We must check whether the key
+ to search is still in the tree rooted from the current buffer. If
+ not then repeat search from the root. */
+ if ( fs_changed (fs_gen, p_s_sb) &&
+ (!B_IS_IN_TREE (p_s_bh) || !key_in_buffer(p_s_search_path, p_s_key, p_s_sb)) ) {
+ decrement_counters_in_path(p_s_search_path);
+
+ /* Get the root block number so that we can repeat the search
+ starting from the root. */
+ n_block_number = SB_ROOT_BLOCK (p_s_sb);
+ expected_level = SB_TREE_HEIGHT (p_s_sb);
+ right_neighbor_of_leaf_node = 0;
+
+ /* repeat search from the root */
+ continue;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+
+ if ( ! key_in_buffer(p_s_search_path, p_s_key, p_s_sb) )
+ reiserfs_panic(p_s_sb, "PAP-5130: search_by_key: key is not in the buffer");
+ if ( cur_tb ) {
+ print_cur_tb ("5140");
+ reiserfs_panic(p_s_sb, "PAP-5140: search_by_key: schedule occurred in do_balance!");
+ }
+
+#endif
+
+ // make sure, that the node contents look like a node of
+ // certain level
+ if (!is_tree_node (p_s_bh, expected_level)) {
+ reiserfs_warning ("vs-5150: search_by_key: "
+ "invalid format found in block %d. Fsck?\n", p_s_bh->b_blocknr);
+ pathrelse (p_s_search_path);
+ return IO_ERROR;
+ }
+
+ /* ok, we have acquired next formatted node in the tree */
+ n_node_level = B_LEVEL (p_s_bh);
+
+#ifdef CONFIG_REISERFS_CHECK
+
+ if (n_node_level < n_stop_level)
+ reiserfs_panic (p_s_sb, "vs-5152: search_by_key: tree level is less than stop level (%d)",
+ n_node_level, n_stop_level);
+
+#endif
+
+ n_retval = bin_search (p_s_key, B_N_PITEM_HEAD(p_s_bh, 0), B_NR_ITEMS(p_s_bh),
+ ( n_node_level == DISK_LEAF_NODE_LEVEL ) ? IH_SIZE : KEY_SIZE, &(p_s_last_element->pe_position));
+ if (n_node_level == n_stop_level) {
+ return n_retval;
+ }
+
+ /* we are not in the stop level */
+ if (n_retval == ITEM_FOUND)
+ /* item has been found, so we choose the pointer which is to the right of the found one */
+ p_s_last_element->pe_position++;
+
+ /* if item was not found we choose the position which is to
+ the left of the found item. This requires no code,
+ bin_search did it already.*/
+
+ /* So we have chosen a position in the current node which is
+ an internal node. Now we calculate child block number by
+ position in the node. */
+ n_block_number = B_N_CHILD_NUM(p_s_bh, p_s_last_element->pe_position);
+
+#ifdef SEARCH_BY_KEY_READA
+ /* if we are going to read leaf node, then calculate its right neighbor if possible */
+ if (n_node_level == DISK_LEAF_NODE_LEVEL + 1 && p_s_last_element->pe_position < B_NR_ITEMS (p_s_bh))
+ right_neighbor_of_leaf_node = B_N_CHILD_NUM(p_s_bh, p_s_last_element->pe_position + 1);
+#endif
+ }
+}
+
+
+/* Form the path to an item and position in this item which contains
+ file byte defined by p_s_key. If there is no such item
+ corresponding to the key, we point the path to the item with
+ maximal key less than p_s_key, and *p_n_pos_in_item is set to one
+ past the last entry/byte in the item. If searching for entry in a
+ directory item, and it is not found, *p_n_pos_in_item is set to one
+ entry more than the entry with maximal key which is less than the
+ sought key.
+
+ Note that if there is no entry in this same node which is one more,
+ then we point to an imaginary entry. for direct items, the
+ position is in units of bytes, for indirect items the position is
+ in units of blocknr entries, for directory items the position is in
+ units of directory entries. */
+
+/* The function is NOT SCHEDULE-SAFE! */
+int search_for_position_by_key (struct super_block * p_s_sb, /* Pointer to the super block. */
+ struct cpu_key * p_cpu_key, /* Key to search (cpu variable) */
+ struct path * p_s_search_path /* Filled up by this function. */
+ ) {
+ struct item_head * p_le_ih; /* pointer to on-disk structure */
+ int n_blk_size;
+ loff_t item_offset, offset;
+ struct reiserfs_dir_entry de;
+ int retval;
+
+ /* If searching for directory entry. */
+ if ( is_direntry_cpu_key (p_cpu_key) )
+ return search_by_entry_key (p_s_sb, p_cpu_key, p_s_search_path, &de);
+
+ /* If not searching for directory entry. */
+
+ /* If item is found. */
+ retval = search_item (p_s_sb, p_cpu_key, p_s_search_path);
+ if (retval == IO_ERROR)
+ return retval;
+ if ( retval == ITEM_FOUND ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path),
+ PATH_LAST_POSITION(p_s_search_path))->ih_item_len )
+ reiserfs_panic(p_s_sb, "PAP-5165: search_for_position_by_key: item length equals zero");
+#endif
+
+ pos_in_item(p_s_search_path) = 0;
+ return POSITION_FOUND;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! PATH_LAST_POSITION(p_s_search_path) )
+ reiserfs_panic(p_s_sb, "PAP-5170: search_for_position_by_key: position equals zero");
+#endif
+
+ /* Item is not found. Set path to the previous item. */
+ p_le_ih = B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path), --PATH_LAST_POSITION(p_s_search_path));
+ n_blk_size = p_s_sb->s_blocksize;
+
+ if (comp_short_keys (&(p_le_ih->ih_key), p_cpu_key)) {
+ return FILE_NOT_FOUND;
+ }
+
+#if 0
+/*#ifdef CONFIG_REISERFS_CHECK*/
+
+ /* we expect to find stat data or item of the same type */
+ if ( ! is_statdata_le_ih(p_le_ih) && ((is_indirect_cpu_key(p_cpu_key) && ! is_indirect_le_ih(p_le_ih)) ||
+ (is_direct_cpu_key(p_cpu_key) && ! is_direct_le_ih(p_le_ih))) ) {
+ print_block (PATH_PLAST_BUFFER(p_s_search_path), PRINT_LEAF_ITEMS,
+ PATH_LAST_POSITION (p_s_search_path) - 2,
+ PATH_LAST_POSITION (p_s_search_path) + 2);
+ reiserfs_panic(p_s_sb, "PAP-5190: search_for_position_by_key: "
+ "found item %h type does not match to the expected one %k",
+ p_le_ih, p_cpu_key);
+ }
+/*#endif*/
+#endif
+
+ // FIXME: quite ugly this far
+
+ item_offset = le_ih_k_offset (p_le_ih);
+ offset = cpu_key_k_offset (p_cpu_key);
+
+ /* Needed byte is contained in the item pointed to by the path.*/
+ if (item_offset <= offset &&
+ item_offset + op_bytes_number (p_le_ih, n_blk_size) > offset) {
+ pos_in_item (p_s_search_path) = offset - item_offset;
+ if ( is_indirect_le_ih(p_le_ih) ) {
+ pos_in_item (p_s_search_path) /= n_blk_size;
+ }
+ return POSITION_FOUND;
+ }
+
+ /* Needed byte is not contained in the item pointed to by the
+ path. Set pos_in_item out of the item. */
+ if ( is_indirect_le_ih (p_le_ih) )
+ pos_in_item (p_s_search_path) = le16_to_cpu (p_le_ih->ih_item_len) / UNFM_P_SIZE;
+ else
+ pos_in_item (p_s_search_path) = le16_to_cpu (p_le_ih->ih_item_len);
+
+ return POSITION_NOT_FOUND;
+}
+
+
+/* Compare given item and item pointed to by the path. */
+int comp_items (struct item_head * stored_ih, struct path * p_s_path)
+{
+ struct buffer_head * p_s_bh;
+ struct item_head * ih;
+
+ /* Last buffer at the path is not in the tree. */
+ if ( ! B_IS_IN_TREE(p_s_bh = PATH_PLAST_BUFFER(p_s_path)) )
+ return 1;
+
+ /* Last path position is invalid. */
+ if ( PATH_LAST_POSITION(p_s_path) >= B_NR_ITEMS(p_s_bh) )
+ return 1;
+
+ /* we need only to know, whether it is the same item */
+ ih = get_ih (p_s_path);
+ return memcmp (stored_ih, ih, IH_SIZE);
+
+#if 0
+ /* Get item at the path. */
+ p_s_path_item = PATH_PITEM_HEAD(p_s_path);
+ /* Compare keys. */
+ if ( COMP_KEYS(&(p_s_path_item->ih_key), &(p_cpu_ih->ih_key)) )
+ return 1;
+
+ /* Compare other items fields. */
+ if ( le16_to_cpu (p_s_path_item->u.ih_entry_count) != p_cpu_ih->u.ih_entry_count ||
+ le16_to_cpu (p_s_path_item->ih_item_len) != p_cpu_ih->ih_item_len ||
+ le16_to_cpu ( p_s_path_item->ih_item_location) != p_cpu_ih->ih_item_location )
+ return 1;
+
+ /* Items are equal. */
+ return 0;
+#endif
+}
+
+
+/* unformatted nodes are not logged anymore, ever. This is safe
+** now
+*/
+#define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1)
+
+// block can not be forgotten as it is in I/O or held by someone
+#define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh)))
+
+
+
+// prepare for delete or cut of direct item
+static inline int prepare_for_direct_item (struct path * path,
+ struct item_head * le_ih,
+ struct inode * inode,
+ loff_t new_file_length,
+ int * cut_size)
+{
+ loff_t round_len;
+
+
+ if ( new_file_length == max_reiserfs_offset (inode) ) {
+ /* item has to be deleted */
+ *cut_size = -(IH_SIZE + le16_to_cpu (le_ih->ih_item_len));
+ return M_DELETE;
+ }
+
+ // new file gets truncated
+ if (inode_items_version (inode) == ITEM_VERSION_2) {
+ //
+ round_len = ROUND_UP (new_file_length);
+ /* this was n_new_file_length < le_ih ... */
+ if ( round_len < le_ih_k_offset (le_ih) ) {
+ *cut_size = -(IH_SIZE + le16_to_cpu (le_ih->ih_item_len));
+ return M_DELETE; /* Delete this item. */
+ }
+ /* Calculate first position and size for cutting from item. */
+ pos_in_item (path) = round_len - (le_ih_k_offset (le_ih) - 1);
+ *cut_size = -(le16_to_cpu (le_ih->ih_item_len) - pos_in_item(path));
+
+ return M_CUT; /* Cut from this item. */
+ }
+
+
+ // old file: items may have any length
+
+ if ( new_file_length < le_ih_k_offset (le_ih) ) {
+ *cut_size = -(IH_SIZE + le16_to_cpu (le_ih->ih_item_len));
+ return M_DELETE; /* Delete this item. */
+ }
+ /* Calculate first position and size for cutting from item. */
+ *cut_size = -(le16_to_cpu (le_ih->ih_item_len) -
+ (pos_in_item (path) = new_file_length + 1 - le_ih_k_offset (le_ih)));
+ return M_CUT; /* Cut from this item. */
+}
+
+
+static inline int prepare_for_direntry_item (struct path * path,
+ struct item_head * le_ih,
+ struct inode * inode,
+ loff_t new_file_length,
+ int * cut_size)
+{
+ if (le_ih_k_offset (le_ih) == DOT_OFFSET &&
+ new_file_length == max_reiserfs_offset (inode)) {
+#ifdef CONFIG_REISERFS_CHECK
+ if (ih_entry_count (le_ih) != 2)
+ reiserfs_panic(inode->i_sb,"PAP-5220: prepare_for_delete_or_cut: "
+ "incorrect empty directory item (%h)", le_ih);
+#endif
+ *cut_size = -(IH_SIZE + le16_to_cpu (le_ih->ih_item_len));
+ return M_DELETE; /* Delete the directory item containing "." and ".." entry. */
+ }
+
+ if ( ih_entry_count (le_ih) == 1 ) {
+ /* Delete the directory item such as there is one record only
+ in this item*/
+ *cut_size = -(IH_SIZE + le16_to_cpu (le_ih->ih_item_len));
+ return M_DELETE;
+ }
+
+ /* Cut one record from the directory item. */
+ *cut_size = -(DEH_SIZE + entry_length (get_bh (path), le_ih, pos_in_item (path)));
+ return M_CUT;
+}
+
+
+/* If the path points to a directory or direct item, calculate mode and the size cut, for balance.
+ If the path points to an indirect item, remove some number of its unformatted nodes.
+ In case of file truncate calculate whether this item must be deleted/truncated or last
+ unformatted node of this item will be converted to a direct item.
+ This function returns a determination of what balance mode the calling function should employ. */
+static char prepare_for_delete_or_cut(
+ struct reiserfs_transaction_handle *th,
+ struct inode * inode,
+ struct path * p_s_path,
+ struct cpu_key * p_s_item_key,
+ int * p_n_removed, /* Number of unformatted nodes which were removed
+ from end of the file. */
+ int * p_n_cut_size,
+ unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */
+ ) {
+ struct super_block * p_s_sb = inode->i_sb;
+ struct item_head * p_le_ih = PATH_PITEM_HEAD(p_s_path);
+ struct buffer_head * p_s_bh = PATH_PLAST_BUFFER(p_s_path);
+
+#ifdef CONFIG_REISERFS_CHECK
+ int n_repeat_counter = 0;
+#endif
+
+ /* Stat_data item. */
+ if ( is_statdata_le_ih (p_le_ih) ) {
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_new_file_length != max_reiserfs_offset (inode) )
+ reiserfs_panic(p_s_sb, "PAP-5210: prepare_for_delete_or_cut: mode must be M_DELETE");
+#endif
+
+ *p_n_cut_size = -(IH_SIZE + le16_to_cpu (p_le_ih->ih_item_len));
+ return M_DELETE;
+ }
+
+
+ /* Directory item. */
+ if ( is_direntry_le_ih (p_le_ih) )
+ return prepare_for_direntry_item (p_s_path, p_le_ih, inode, n_new_file_length, p_n_cut_size);
+
+ /* Direct item. */
+ if ( is_direct_le_ih (p_le_ih) )
+ return prepare_for_direct_item (p_s_path, p_le_ih, inode, n_new_file_length, p_n_cut_size);
+
+
+ /* Case of an indirect item. */
+ {
+ int n_unfm_number, /* Number of the item unformatted nodes. */
+ n_counter,
+ n_retry, /* Set to one if there is unformatted node buffer in use. */
+ n_blk_size;
+ __u32 * p_n_unfm_pointer; /* Pointer to the unformatted node number. */
+ __u32 tmp;
+ struct item_head s_ih; /* Item header. */
+ char c_mode; /* Returned mode of the balance. */
+ struct buffer_head * p_s_un_bh;
+ int need_research;
+
+
+ n_blk_size = p_s_sb->s_blocksize;
+
+ /* Search for the needed object indirect item until there are no unformatted nodes to be removed. */
+ do {
+ need_research = 0;
+ p_s_bh = PATH_PLAST_BUFFER(p_s_path);
+ /* Copy indirect item header to a temp variable. */
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+ /* Calculate number of unformatted nodes in this item. */
+ n_unfm_number = I_UNFM_NUM(&s_ih);
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( ! is_indirect_le_ih(&s_ih) || ! n_unfm_number ||
+ pos_in_item (p_s_path) + 1 != n_unfm_number ) {
+ printk("n_unfm_number = %d *p_n_pos_in_item = %d\n",n_unfm_number, pos_in_item (p_s_path));
+ reiserfs_panic(p_s_sb, "PAP-5240: prepare_for_delete_or_cut: illegal item %h", &s_ih);
+ }
+#endif
+
+ /* Calculate balance mode and position in the item to remove unformatted nodes. */
+ if ( n_new_file_length == max_reiserfs_offset (inode) ) {/* Case of delete. */
+ pos_in_item (p_s_path) = 0;
+ *p_n_cut_size = -(IH_SIZE + le16_to_cpu (s_ih.ih_item_len));
+ c_mode = M_DELETE;
+ }
+ else { /* Case of truncate. */
+ if ( n_new_file_length < le_ih_k_offset (&s_ih) ) {
+ pos_in_item (p_s_path) = 0;
+ *p_n_cut_size = -(IH_SIZE + le16_to_cpu (s_ih.ih_item_len));
+ c_mode = M_DELETE; /* Delete this item. */
+ }
+ else {
+ /* indirect item must be truncated starting from *p_n_pos_in_item-th position */
+ pos_in_item (p_s_path) = (n_new_file_length + n_blk_size - le_ih_k_offset (&s_ih) ) >> p_s_sb->s_blocksize_bits;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( pos_in_item (p_s_path) > n_unfm_number )
+ reiserfs_panic(p_s_sb, "PAP-5250: prepare_for_delete_or_cut: illegal position in the item");
+#endif
+
+ /* Either convert last unformatted node of indirect item to direct item or increase
+ its free space. */
+ if ( pos_in_item (p_s_path) == n_unfm_number ) {
+ *p_n_cut_size = 0; /* Nothing to cut. */
+ return M_CONVERT; /* Maybe convert last unformatted node to the direct item. */
+ }
+ /* Calculate size to cut. */
+ *p_n_cut_size = -(s_ih.ih_item_len - pos_in_item (p_s_path) * UNFM_P_SIZE);
+
+ c_mode = M_CUT; /* Cut from this indirect item. */
+ }
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_unfm_number <= pos_in_item (p_s_path) )
+ reiserfs_panic(p_s_sb, "PAP-5260: prepare_for_delete_or_cut: illegal position in the indirect item");
+#endif
+
+ /* pointers to be cut */
+ n_unfm_number -= pos_in_item (p_s_path);
+ /* Set pointer to the last unformatted node pointer that is to be cut. */
+ p_n_unfm_pointer = (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1 - *p_n_removed;
+
+
+ /* We go through the unformatted nodes pointers of the indirect
+ item and look for the unformatted nodes in the cache. If we
+ found some of them we free it, zero corresponding indirect item
+ entry and log buffer containing that indirect item. For this we
+ need to prepare last path element for logging. If some
+ unformatted node has b_count > 1 we must not free this
+ unformatted node since it is in use. */
+ reiserfs_prepare_for_journal(p_s_sb, p_s_bh, 1);
+ // note: path could be changed, first line in for loop takes care
+ // of it
+
+ for ( n_retry = 0, n_counter = *p_n_removed;
+ n_counter < n_unfm_number; n_counter++, p_n_unfm_pointer-- ) {
+
+ if (item_moved (&s_ih, p_s_path)) {
+ need_research = 1 ;
+ break;
+ }
+#ifdef CONFIG_REISERFS_CHECK
+ if (p_n_unfm_pointer < (__u32 *)B_I_PITEM(p_s_bh, &s_ih) ||
+ p_n_unfm_pointer > (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1)
+ reiserfs_panic (p_s_sb, "vs-5265: prepare_for_delete_or_cut: pointer out of range");
+#endif
+
+ if ( ! *p_n_unfm_pointer ) { /* Hole, nothing to remove. */
+ if ( ! n_retry )
+ (*p_n_removed)++;
+ continue;
+ }
+ /* Search for the buffer in cache. */
+ p_s_un_bh = get_hash_table(p_s_sb->s_dev, *p_n_unfm_pointer, n_blk_size);
+
+ if (p_s_un_bh && buffer_locked(p_s_un_bh)) {
+ __wait_on_buffer(p_s_un_bh) ;
+ if ( item_moved (&s_ih, p_s_path) ) {
+ need_research = 1;
+ brelse(p_s_un_bh) ;
+ break ;
+ }
+ }
+ if ( p_s_un_bh && block_in_use (p_s_un_bh)) {
+ /* Block is locked or held more than by one holder and by
+ journal. */
+
+#ifndef __KERNEL__
+ reiserfs_panic(p_s_sb, "PAP-5270: prepare_for_delete_or_cut: b_count != 1");
+#endif
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (n_repeat_counter && (n_repeat_counter % 100000) == 0) {
+ printk("prepare_for_delete, waiting on buffer %lu, b_count %d, %s%cJDIRTY %cJDIRTY_WAIT\n",
+ p_s_un_bh->b_blocknr, atomic_read (&p_s_un_bh->b_count),
+ buffer_locked (p_s_un_bh) ? "locked, " : "",
+ buffer_journaled(p_s_un_bh) ? ' ' : '!',
+ buffer_journal_dirty(p_s_un_bh) ? ' ' : '!') ;
+
+ }
+#endif
+ n_retry = 1;
+ brelse (p_s_un_bh);
+ continue;
+ }
+
+ if ( ! n_retry )
+ (*p_n_removed)++;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( p_s_un_bh && (*p_n_unfm_pointer != p_s_un_bh->b_blocknr ))
+ // note: minix_truncate allows that. As truncate is
+ // protected by down (inode->i_sem), two truncates can not
+ // co-exist
+ reiserfs_panic(p_s_sb, "PAP-5280: prepare_for_delete_or_cut: blocks numbers are different");
+#endif
+
+ tmp = *p_n_unfm_pointer;
+ *p_n_unfm_pointer = 0;
+ journal_mark_dirty (th, p_s_sb, p_s_bh);
+ bforget (p_s_un_bh);
+ inode->i_blocks -= p_s_sb->s_blocksize / 512;
+ reiserfs_free_block(th, tmp);
+ if ( item_moved (&s_ih, p_s_path) ) {
+ need_research = 1;
+ break ;
+#if 0
+ reiserfs_prepare_for_journal(p_s_sb,
+ PATH_PLAST_BUFFER(p_s_path),
+ 1) ;
+ if ( comp_items(&s_ih, p_s_path) ) {
+ reiserfs_restore_prepared_buffer(p_s_sb,
+ PATH_PLAST_BUFFER(p_s_path)) ;
+ brelse(p_s_un_bh);
+ break;
+ }
+ *p_n_unfm_pointer = 0;
+ journal_mark_dirty (th,p_s_sb,PATH_PLAST_BUFFER(p_s_path));
+
+ reiserfs_free_block(th, p_s_sb, block_addr);
+ if (p_s_un_bh) {
+ mark_buffer_clean (p_s_un_bh);
+ brelse (p_s_un_bh);
+ }
+ if ( comp_items(&s_ih, p_s_path) ) {
+ break ;
+ }
+#endif
+ }
+
+ }
+
+ /* a trick. If the buffer has been logged, this
+ ** will do nothing. If we've broken the loop without
+ ** logging it, it will restore the buffer
+ **
+ */
+ reiserfs_restore_prepared_buffer(p_s_sb, p_s_bh);
+
+ if ( n_retry ) {
+ /* There is block in use. Wait, they should release it soon */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( *p_n_removed >= n_unfm_number )
+ reiserfs_panic(p_s_sb, "PAP-5290: prepare_for_delete_or_cut: illegal case");
+ if ( !(++n_repeat_counter % 500000) ) {
+ reiserfs_warning("PAP-5300: prepare_for_delete_or_cut: (pid %u): "
+ "could not delete item %k in (%d) iterations. New file length %Lu. (inode %Ld), Still trying\n",
+ current->pid, p_s_item_key, n_repeat_counter, n_new_file_length, inode->i_size);
+ if (n_repeat_counter == 5000000) {
+ print_block (PATH_PLAST_BUFFER(p_s_path), 3,
+ PATH_LAST_POSITION (p_s_path) - 2, PATH_LAST_POSITION (p_s_path) + 2);
+ reiserfs_panic(p_s_sb, "PAP-5305: prepare_for_delete_or_cut: key %k, new_file_length %Ld",
+ p_s_item_key, n_new_file_length);
+ }
+ }
+#endif
+
+#ifdef __KERNEL__
+ run_task_queue(&tq_disk);
+ current->policy |= SCHED_YIELD;
+ schedule();
+#endif
+ }
+ /* This loop can be optimized. */
+ } while ( (*p_n_removed < n_unfm_number || need_research) &&
+ search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_FOUND );
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( *p_n_removed < n_unfm_number )
+ reiserfs_panic(p_s_sb, "PAP-5310: prepare_for_delete_or_cut: indirect item is not found");
+
+ if (item_moved (&s_ih, p_s_path) ) {
+ printk("prepare_for_delete_or_cut: after while, comp failed, retry\n") ;
+ BUG ();
+ }
+#endif
+
+ if (c_mode == M_CUT)
+ pos_in_item (p_s_path) *= UNFM_P_SIZE;
+ return c_mode;
+ }
+}
+
+
+/* Calculate bytes number which will be deleted or cutted in the balance. */
+int calc_deleted_bytes_number(
+ struct tree_balance * p_s_tb,
+ char c_mode
+ ) {
+ int n_del_size;
+ struct item_head * p_le_ih = PATH_PITEM_HEAD(p_s_tb->tb_path);
+
+ if ( is_statdata_le_ih (p_le_ih) )
+ return 0;
+
+ if ( is_direntry_le_ih (p_le_ih) ) {
+ // return EMPTY_DIR_SIZE; /* We delete emty directoris only. */
+ // we can't use EMPTY_DIR_SIZE, as old format dirs have a different
+ // empty size. ick. FIXME, is this right?
+ //
+ return le16_to_cpu(p_le_ih->ih_item_len) ;
+ }
+ n_del_size = ( c_mode == M_DELETE ) ? le16_to_cpu (p_le_ih->ih_item_len) : -p_s_tb->insert_size[0];
+
+ if ( is_indirect_le_ih (p_le_ih) )
+ n_del_size = (n_del_size/UNFM_P_SIZE)*
+ (PATH_PLAST_BUFFER(p_s_tb->tb_path)->b_size);// - get_ih_free_space (p_le_ih);
+ return n_del_size;
+}
+
+static void init_tb_struct(
+ struct reiserfs_transaction_handle *th,
+ struct tree_balance * p_s_tb,
+ struct super_block * p_s_sb,
+ struct path * p_s_path,
+ int n_size
+ ) {
+ memset (p_s_tb,'\0',sizeof(struct tree_balance));
+ p_s_tb->transaction_handle = th ;
+ p_s_tb->tb_sb = p_s_sb;
+ p_s_tb->tb_path = p_s_path;
+ PATH_OFFSET_PBUFFER(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
+ PATH_OFFSET_POSITION(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
+ p_s_tb->insert_size[0] = n_size;
+}
+
+
+
+void padd_item (char * item, int total_length, int length)
+{
+ int i;
+
+ for (i = total_length; i > length; )
+ item [--i] = 0;
+}
+
+
+/* Delete object item. */
+int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
+ struct path * p_s_path, /* Path to the deleted item. */
+ struct cpu_key * p_s_item_key, /* Key to search for the deleted item. */
+ struct inode * p_s_inode,/* inode is here just to update i_blocks */
+ struct buffer_head * p_s_un_bh) /* NULL or unformatted node pointer. */
+{
+ struct super_block * p_s_sb = p_s_inode->i_sb;
+ struct tree_balance s_del_balance;
+ struct item_head s_ih;
+ int n_ret_value,
+ n_del_size,
+ n_removed;
+
+#ifdef CONFIG_REISERFS_CHECK
+ char c_mode;
+ int n_iter = 0;
+#endif
+
+ init_tb_struct(th, &s_del_balance, p_s_sb, p_s_path, 0/*size is unknown*/);
+
+ while ( 1 ) {
+ n_removed = 0;
+
+#ifdef CONFIG_REISERFS_CHECK
+ n_iter++;
+ c_mode =
+#endif
+ prepare_for_delete_or_cut(th, p_s_inode, p_s_path, p_s_item_key, &n_removed, &n_del_size, max_reiserfs_offset (p_s_inode));
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( c_mode != M_DELETE )
+ reiserfs_panic(p_s_sb, "PAP-5320: reiserfs_delete_item: mode must be M_DELETE");
+#endif
+
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+ s_del_balance.insert_size[0] = n_del_size;
+
+ n_ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, 0);
+ if ( n_ret_value != REPEAT_SEARCH )
+ break;
+
+ // file system changed, repeat search
+ n_ret_value = search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path);
+ if (n_ret_value == IO_ERROR)
+ break;
+ if (n_ret_value == FILE_NOT_FOUND) {
+ reiserfs_warning ("vs-5340: reiserfs_delete_item: "
+ "no items of the file %K found\n", p_s_item_key);
+ break;
+ }
+ } /* while (1) */
+
+ if ( n_ret_value != CARRY_ON ) {
+ unfix_nodes(&s_del_balance);
+ return 0;
+ }
+
+ // reiserfs_delete_item returns item length when success
+ n_ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
+
+ if ( p_s_un_bh ) {
+ int off;
+ int block_off ;
+ char *data ;
+
+ /* We are in direct2indirect conversion, so move tail contents
+ to the unformatted node */
+ /* note, we do the copy before preparing the buffer because we
+ ** don't care about the contents of the unformatted node yet.
+ ** the only thing we really care about is the direct item's data
+ ** is in the unformatted node.
+ **
+ ** Otherwise, we would have to call reiserfs_prepare_for_journal on
+ ** the unformatted node, which might schedule, meaning we'd have to
+ ** loop all the way back up to the start of the while loop.
+ **
+ ** The unformatted node is prepared and logged after the do_balance.
+ **
+ ** p_s_un_bh is from the page cache (all unformatted nodes are
+ ** from the page cache) and might be a highmem page. So, we
+ ** can't use p_s_un_bh->b_data. But, the page has already been
+ ** kmapped, so we can use page_address()
+ ** -clm
+ */
+
+ data = page_address(p_s_un_bh->b_page) ;
+ off = ((le_ih_k_offset (&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
+ block_off = off & (p_s_un_bh->b_size - 1) ;
+ memcpy(data + off,
+ B_I_PITEM(PATH_PLAST_BUFFER(p_s_path), &s_ih), n_ret_value);
+
+ /* clear out the rest of the block past the end of the file. */
+ if (block_off + n_ret_value < p_s_un_bh->b_size) {
+ memset(data + off + n_ret_value, 0,
+ p_s_un_bh->b_size - block_off - n_ret_value) ;
+ }
+ }
+
+ /* Perform balancing after all resources have been collected at once. */
+ do_balance(&s_del_balance, NULL, NULL, M_DELETE);
+
+ /* see comment above for why this is after the do_balance */
+ if (p_s_un_bh) {
+ mark_buffer_dirty(p_s_un_bh) ;
+ }
+
+ /* Return deleted body length */
+ return n_ret_value;
+}
+
+
+/* Summary Of Mechanisms For Handling Collisions Between Processes:
+
+ deletion of the body of the object is performed by iput(), with the
+ result that if multiple processes are operating on a file, the
+ deletion of the body of the file is deferred until the last process
+ that has an open inode performs its iput().
+
+ writes and truncates are protected from collisions by use of
+ semaphores.
+
+ creates, linking, and mknod are protected from collisions with other
+ processes by making the reiserfs_add_entry() the last step in the
+ creation, and then rolling back all changes if there was a collision.
+ - Hans
+*/
+
+
+/* this deletes item which never gets split */
+static void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
+ struct key * key)
+{
+ struct tree_balance tb;
+ INITIALIZE_PATH (path);
+ int item_len;
+ int tb_init = 0 ;
+ struct cpu_key cpu_key;
+ int retval;
+
+ le_key2cpu_key (&cpu_key, key);
+
+ while (1) {
+ retval = search_item (th->t_super, &cpu_key, &path);
+ if (retval == IO_ERROR) {
+ reiserfs_warning ("vs-: reiserfs_delete_solid_item: "
+ "i/o failure occured trying to delete %K\n", &cpu_key);
+ break;
+ }
+ if (retval != ITEM_FOUND) {
+ pathrelse (&path);
+ reiserfs_warning ("vs-: reiserfs_delete_solid_item: %k not found",
+ key);
+ break;
+ }
+ if (!tb_init) {
+ tb_init = 1 ;
+ item_len = le16_to_cpu (PATH_PITEM_HEAD (&path)->ih_item_len);
+ init_tb_struct (th, &tb, th->t_super, &path, - (IH_SIZE + item_len));
+ }
+
+ retval = fix_nodes (M_DELETE, &tb, NULL, 0);
+ if (retval == REPEAT_SEARCH)
+ continue;
+
+ if (retval == CARRY_ON) {
+ do_balance (&tb, 0, 0, M_DELETE);
+ break;
+ }
+
+ // IO_ERROR, NO_DISK_SPACE, etc
+ reiserfs_warning ("vs-: reiserfs_delete_solid_item: "
+ "could not delete %K due to fix_nodes failure\n", &cpu_key);
+ unfix_nodes (&tb);
+ break;
+ }
+
+ reiserfs_check_path(&path) ;
+}
+
+
+void reiserfs_delete_object (struct reiserfs_transaction_handle *th, struct inode * inode)
+{
+ inode->i_size = 0;
+
+ /* for directory this deletes item containing "." and ".." */
+ reiserfs_do_truncate (th, inode, NULL, 0/*no timestamp updates*/);
+
+ /* delete stat data */
+ /* this debug code needs to go away. Trying to find a truncate race
+ ** -- clm -- 4/1/2000
+ */
+#if 0
+ if (inode->i_nlink != 0) {
+ reiserfs_warning("clm-4001: deleting inode with link count==%d\n", inode->i_nlink) ;
+ }
+#endif
+ reiserfs_delete_solid_item (th, INODE_PKEY (inode));
+}
+
+
+static int maybe_indirect_to_direct (struct reiserfs_transaction_handle *th,
+ struct inode * p_s_inode,
+ struct page *page,
+ struct path * p_s_path,
+ struct cpu_key * p_s_item_key,
+ loff_t n_new_file_size,
+ char * p_c_mode
+ ) {
+ struct super_block * p_s_sb = p_s_inode->i_sb;
+ int n_block_size = p_s_sb->s_blocksize;
+ int cut_bytes;
+
+ if (n_new_file_size != p_s_inode->i_size)
+ BUG ();
+
+ /* the page being sent in could be NULL if there was an i/o error
+ ** reading in the last block. The user will hit problems trying to
+ ** read the file, but for now we just skip the indirect2direct
+ */
+ if (atomic_read(&p_s_inode->i_count) > 1 ||
+ !tail_has_to_be_packed (p_s_inode) ||
+ !page || p_s_inode->u.reiserfs_i.nopack) {
+ // leave tail in an unformatted node
+ *p_c_mode = M_SKIP_BALANCING;
+ cut_bytes = n_block_size - (n_new_file_size & (n_block_size - 1));
+ pathrelse(p_s_path);
+ return cut_bytes;
+ }
+ /* Permorm the conversion to a direct_item. */
+ /*return indirect_to_direct (p_s_inode, p_s_path, p_s_item_key, n_new_file_size, p_c_mode);*/
+ return indirect2direct (th, p_s_inode, page, p_s_path, p_s_item_key, n_new_file_size, p_c_mode);
+}
+
+
+/* we did indirect_to_direct conversion. And we have inserted direct
+ item successesfully, but there were no disk space to cut unfm
+ pointer being converted. Therefore we have to delete inserted
+ direct item(s) */
+static void indirect_to_direct_roll_back (struct reiserfs_transaction_handle *th, struct inode * inode, struct path * path)
+{
+ struct cpu_key tail_key;
+ int tail_len;
+ int removed;
+
+ make_cpu_key (&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4);// !!!!
+ tail_key.key_length = 4;
+
+ tail_len = (cpu_key_k_offset (&tail_key) & (inode->i_sb->s_blocksize - 1)) - 1;
+ while (tail_len) {
+ /* look for the last byte of the tail */
+ if (search_for_position_by_key (inode->i_sb, &tail_key, path) == POSITION_NOT_FOUND)
+ reiserfs_panic (inode->i_sb, "vs-5615: indirect_to_direct_roll_back: found invalid item");
+#ifdef CONFIG_REISERFS_CHECK
+ if (path->pos_in_item != PATH_PITEM_HEAD (path)->ih_item_len - 1)
+ reiserfs_panic (inode->i_sb, "vs-5616: indirect_to_direct_roll_back: appended bytes found");
+#endif
+ PATH_LAST_POSITION (path) --;
+
+ removed = reiserfs_delete_item (th, path, &tail_key, inode, 0/*unbh not needed*/);
+#ifdef CONFIG_REISERFS_CHECK
+ if (removed <= 0 || removed > tail_len)
+ reiserfs_panic (inode->i_sb, "vs-5617: indirect_to_direct_roll_back: "
+ "there was tail %d bytes, removed item length %d bytes",
+ tail_len, removed);
+#endif
+ tail_len -= removed;
+ set_cpu_key_k_offset (&tail_key, cpu_key_k_offset (&tail_key) - removed);
+ }
+ printk ("indirect_to_direct_roll_back: indirect_to_direct conversion has been rolled back due to lack of disk space\n");
+ //mark_file_without_tail (inode);
+ mark_inode_dirty (inode);
+}
+
+
+/* (Truncate or cut entry) or delete object item. Returns < 0 on failure */
+int reiserfs_cut_from_item (struct reiserfs_transaction_handle *th,
+ struct path * p_s_path,
+ struct cpu_key * p_s_item_key,
+ struct inode * p_s_inode,
+ struct page *page,
+ loff_t n_new_file_size)
+{
+ struct super_block * p_s_sb = p_s_inode->i_sb;
+ /* Every function which is going to call do_balance must first
+ create a tree_balance structure. Then it must fill up this
+ structure by using the init_tb_struct and fix_nodes functions.
+ After that we can make tree balancing. */
+ struct tree_balance s_cut_balance;
+ int n_cut_size = 0, /* Amount to be cut. */
+ n_ret_value = CARRY_ON,
+ n_removed = 0, /* Number of the removed unformatted nodes. */
+ n_is_inode_locked = 0;
+ char c_mode; /* Mode of the balance. */
+ int retval2 = -1;
+
+
+ init_tb_struct(th, &s_cut_balance, p_s_inode->i_sb, p_s_path, n_cut_size);
+
+
+ /* Repeat this loop until we either cut the item without needing
+ to balance, or we fix_nodes without schedule occuring */
+ while ( 1 ) {
+ /* Determine the balance mode, position of the first byte to
+ be cut, and size to be cut. In case of the indirect item
+ free unformatted nodes which are pointed to by the cut
+ pointers. */
+
+ c_mode = prepare_for_delete_or_cut(th, p_s_inode, p_s_path, p_s_item_key, &n_removed,
+ &n_cut_size, n_new_file_size);
+ if ( c_mode == M_CONVERT ) {
+ /* convert last unformatted node to direct item or leave
+ tail in the unformatted node */
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_ret_value != CARRY_ON )
+ reiserfs_panic (p_s_sb, "PAP-5570: reiserfs_cut_from_item: can not convert twice");
+#endif
+
+ n_ret_value = maybe_indirect_to_direct (th, p_s_inode, page, p_s_path, p_s_item_key,
+ n_new_file_size, &c_mode);
+ if ( c_mode == M_SKIP_BALANCING )
+ /* tail has been left in the unformatted node */
+ return n_ret_value;
+
+ n_is_inode_locked = 1;
+
+ /* removing of last unformatted node will change value we
+ have to return to truncate. Save it */
+ retval2 = n_ret_value;
+ /*retval2 = p_s_sb->s_blocksize - (n_new_file_size & (p_s_sb->s_blocksize - 1));*/
+
+ /* So, we have performed the first part of the conversion:
+ inserting the new direct item. Now we are removing the
+ last unformatted node pointer. Set key to search for
+ it. */
+ set_cpu_key_k_type (p_s_item_key, TYPE_INDIRECT);
+ p_s_item_key->key_length = 4;
+ n_new_file_size -= (n_new_file_size & (p_s_sb->s_blocksize - 1));
+ set_cpu_key_k_offset (p_s_item_key, n_new_file_size + 1);
+ if ( search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_NOT_FOUND ){
+ print_block (PATH_PLAST_BUFFER (p_s_path), 3, PATH_LAST_POSITION (p_s_path) - 1, PATH_LAST_POSITION (p_s_path) + 1);
+ reiserfs_panic(p_s_sb, "PAP-5580: reiserfs_cut_from_item: item to convert does not exist (%k)", p_s_item_key);
+ }
+ continue;
+ }
+ if (n_cut_size == 0) {
+ pathrelse (p_s_path);
+ return 0;
+ }
+
+ s_cut_balance.insert_size[0] = n_cut_size;
+
+ n_ret_value = fix_nodes(c_mode, &s_cut_balance, NULL, 0);
+ if ( n_ret_value != REPEAT_SEARCH )
+ break;
+
+ n_ret_value = search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path);
+ if (n_ret_value == POSITION_FOUND)
+ continue;
+
+ reiserfs_warning ("PAP-5610: reiserfs_cut_from_item: item %K not found\n", p_s_item_key);
+ pathrelse (p_s_path);
+ return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT;
+ } /* while */
+
+ // check fix_nodes results (IO_ERROR or NO_DISK_SPACE)
+ if ( n_ret_value != CARRY_ON ) {
+ if ( n_is_inode_locked ) {
+ // FIXME: this seems to be not needed: we are always able
+ // to cut item
+ indirect_to_direct_roll_back (th, p_s_inode, p_s_path);
+ }
+ if (n_ret_value == NO_DISK_SPACE)
+ reiserfs_warning ("");
+ unfix_nodes (&s_cut_balance);
+ return -EIO;
+ }
+
+ /* go ahead and perform balancing */
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( c_mode == M_PASTE || c_mode == M_INSERT )
+ reiserfs_panic (p_s_sb, "PAP-5640: reiserfs_cut_from_item: illegal mode");
+#endif
+
+ /* Calculate number of bytes that need to be cut from the item. */
+ if (retval2 == -1)
+ n_ret_value = calc_deleted_bytes_number(&s_cut_balance, c_mode);
+ else
+ n_ret_value = retval2;
+
+ if ( c_mode == M_DELETE ) {
+ struct item_head * p_le_ih = PATH_PITEM_HEAD (s_cut_balance.tb_path);
+
+ if ( is_direct_le_ih (p_le_ih) && (le_ih_k_offset (p_le_ih) & (p_s_sb->s_blocksize - 1)) == 1 ) {
+ /* we delete first part of tail which was stored in direct
+ item(s) */
+ // FIXME: this is to keep 3.5 happy
+ p_s_inode->u.reiserfs_i.i_first_direct_byte = U32_MAX;
+ p_s_inode->i_blocks -= p_s_sb->s_blocksize / 512;
+ }
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if (n_is_inode_locked) {
+ struct item_head * le_ih = PATH_PITEM_HEAD (s_cut_balance.tb_path);
+ /* we are going to complete indirect2direct conversion. Make
+ sure, that we exactly remove last unformatted node pointer
+ of the item */
+ if (!is_indirect_le_ih (le_ih))
+ reiserfs_panic (p_s_sb, "vs-5652: reiserfs_cut_from_item: "
+ "item must be indirect %h", le_ih);
+
+ if (c_mode == M_DELETE && le16_to_cpu (le_ih->ih_item_len) != UNFM_P_SIZE)
+ reiserfs_panic (p_s_sb, "vs-5653: reiserfs_cut_from_item: "
+ "completing indirect2direct conversion indirect item %h"
+ "being deleted must be of 4 byte long", le_ih);
+
+ if (c_mode == M_CUT && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) {
+ reiserfs_panic (p_s_sb, "vs-5654: reiserfs_cut_from_item: "
+ "can not complete indirect2direct conversion of %h (CUT, insert_size==%d)",
+ le_ih, s_cut_balance.insert_size[0]);
+ }
+ /* it would be useful to make sure, that right neighboring
+ item is direct item of this file */
+ }
+#endif
+
+ do_balance(&s_cut_balance, NULL, NULL, c_mode);
+ if ( n_is_inode_locked ) {
+ /* we've converted from indirect to direct, we must remove
+ ** ourselves from the list of pages that need flushing before
+ ** this transaction can commit
+ */
+ reiserfs_remove_page_from_flush_list(th, p_s_inode) ;
+ p_s_inode->u.reiserfs_i.i_pack_on_close = 0 ;
+ }
+ return n_ret_value;
+}
+
+
+static void truncate_directory (struct reiserfs_transaction_handle *th, struct inode * inode)
+{
+ if (inode->i_nlink)
+ reiserfs_warning ("vs-5655: truncate_directory: link count != 0");
+
+ set_le_key_k_offset (ITEM_VERSION_1, INODE_PKEY (inode), DOT_OFFSET);
+ set_le_key_k_type (ITEM_VERSION_1, INODE_PKEY (inode), TYPE_DIRENTRY);
+ reiserfs_delete_solid_item (th, INODE_PKEY (inode));
+
+ set_le_key_k_offset (ITEM_VERSION_1, INODE_PKEY (inode), SD_OFFSET);
+ set_le_key_k_type (ITEM_VERSION_1, INODE_PKEY (inode), TYPE_STAT_DATA);
+}
+
+
+
+
+/* Truncate file to the new size. Note, this must be called with a transaction
+ already started */
+void reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
+ struct inode * p_s_inode, /* ->i_size contains new
+ size */
+ struct page *page, /* up to date for last block */
+ int update_timestamps /* when it is called by
+ file_release to convert
+ the tail - no timestamps
+ should be updated */
+ ) {
+ INITIALIZE_PATH (s_search_path); /* Path to the current object item. */
+ struct item_head * p_le_ih; /* Pointer to an item header. */
+ struct cpu_key s_item_key; /* Key to search for a previous file item. */
+ loff_t n_file_size, /* Old file size. */
+ n_new_file_size;/* New file size. */
+ int n_deleted; /* Number of deleted or truncated bytes. */
+ int retval;
+
+ if ( ! (S_ISREG(p_s_inode->i_mode) || S_ISDIR(p_s_inode->i_mode) || S_ISLNK(p_s_inode->i_mode)) )
+ return;
+
+ if (S_ISDIR(p_s_inode->i_mode)) {
+ // deletion of directory - no need to update timestamps
+ truncate_directory (th, p_s_inode);
+ return;
+ }
+
+ /* Get new file size. */
+ n_new_file_size = p_s_inode->i_size;
+
+ // FIXME: note, that key type is unimportant here
+ make_cpu_key (&s_item_key, p_s_inode, max_reiserfs_offset (p_s_inode), TYPE_DIRECT, 3);
+
+ retval = search_for_position_by_key(p_s_inode->i_sb, &s_item_key, &s_search_path);
+ if (retval == IO_ERROR) {
+ reiserfs_warning ("vs-5657: reiserfs_do_truncate: "
+ "i/o failure occured trying to truncate %K\n", &s_item_key);
+ return;
+ }
+ if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) {
+ reiserfs_warning ("PAP-5660: reiserfs_do_truncate: "
+ "wrong result %d of search for %K\n", retval, &s_item_key);
+ return;
+ }
+
+ s_search_path.pos_in_item --;
+
+ /* Get real file size (total length of all file items) */
+ p_le_ih = PATH_PITEM_HEAD(&s_search_path);
+ if ( is_statdata_le_ih (p_le_ih) )
+ n_file_size = 0;
+ else {
+ loff_t offset = le_ih_k_offset (p_le_ih);
+ int bytes = op_bytes_number (p_le_ih,p_s_inode->i_sb->s_blocksize);
+
+ /* this may mismatch with real file size: if last direct item
+ had no padding zeros and last unformatted node had no free
+ space, this file would have this file size */
+ n_file_size = offset + bytes - 1;
+ }
+
+ if ( n_file_size == 0 || n_file_size < n_new_file_size ) {
+ pathrelse(&s_search_path);
+ return;
+ }
+ /* Update key to search for the last file item. */
+ set_cpu_key_k_offset (&s_item_key, n_file_size);
+
+ do {
+ /* Cut or delete file item. */
+ n_deleted = reiserfs_cut_from_item(th, &s_search_path, &s_item_key, p_s_inode, page, n_new_file_size);
+ if (n_deleted < 0) {
+ reiserfs_warning ("vs-5665: reiserfs_truncate_file: cut_from_item failed");
+ reiserfs_check_path(&s_search_path) ;
+ return;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_deleted > n_file_size ){
+ reiserfs_panic (p_s_inode->i_sb, "PAP-5670: reiserfs_truncate_file: "
+ "reiserfs_truncate_file returns too big number: deleted %d, file_size %lu, item_key %k",
+ n_deleted, n_file_size, &s_item_key);
+ }
+#endif
+
+ /* Change key to search the last file item. */
+ n_file_size -= n_deleted;
+
+ set_cpu_key_k_offset (&s_item_key, n_file_size);
+
+ /* While there are bytes to truncate and previous file item is presented in the tree. */
+
+ /*
+ ** This loop could take a really long time, and could log
+ ** many more blocks than a transaction can hold. So, we do a polite
+ ** journal end here, and if the transaction needs ending, we make
+ ** sure the file is consistent before ending the current trans
+ ** and starting a new one
+ */
+ if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
+ int orig_len_alloc = th->t_blocks_allocated ;
+ decrement_counters_in_path(&s_search_path) ;
+
+ if (update_timestamps) {
+ p_s_inode->i_mtime = p_s_inode->i_ctime = CURRENT_TIME;
+ // FIXME: sd gets wrong size here
+ }
+ reiserfs_update_sd(th, p_s_inode) ;
+
+ journal_end(th, p_s_inode->i_sb, orig_len_alloc) ;
+ journal_begin(th, p_s_inode->i_sb, orig_len_alloc) ;
+ }
+ } while ( n_file_size > ROUND_UP (n_new_file_size) &&
+ search_for_position_by_key(p_s_inode->i_sb, &s_item_key, &s_search_path) == POSITION_FOUND ) ;
+
+#ifdef CONFIG_REISERFS_CHECK
+ if ( n_file_size > ROUND_UP (n_new_file_size) )
+ reiserfs_panic (p_s_inode->i_sb, "PAP-5680: reiserfs_truncate_file: "
+ "truncate did not finish: new_file_size %Ld, current %Ld, oid %d\n",
+ n_new_file_size, n_file_size, s_item_key.on_disk_key.k_objectid);
+#endif
+
+ if (update_timestamps) {
+ // this is truncate, not file closing
+ p_s_inode->i_mtime = p_s_inode->i_ctime = CURRENT_TIME;
+ }
+ reiserfs_update_sd (th, p_s_inode);
+
+ pathrelse(&s_search_path) ;
+}
+
+
+#ifdef CONFIG_REISERFS_CHECK
+// this makes sure, that we __append__, not overwrite or add holes
+static void check_research_for_paste (struct path * path, struct cpu_key * p_s_key)
+{
+ struct item_head * found_ih = get_ih (path);
+
+ if (is_direct_le_ih (found_ih)) {
+ if (le_ih_k_offset (found_ih) + op_bytes_number (found_ih, get_bh (path)->b_size) !=
+ cpu_key_k_offset (p_s_key) ||
+ op_bytes_number (found_ih, get_bh (path)->b_size) != pos_in_item (path))
+ reiserfs_panic (0, "PAP-5720: check_research_for_paste: "
+ "found direct item %h or position (%d) does not match to key %K",
+ found_ih, pos_in_item (path), p_s_key);
+ }
+ if (is_indirect_le_ih (found_ih)) {
+ if (le_ih_k_offset (found_ih) + op_bytes_number (found_ih, get_bh (path)->b_size) != cpu_key_k_offset (p_s_key) ||
+ I_UNFM_NUM (found_ih) != pos_in_item (path) ||
+ get_ih_free_space (found_ih) != 0)
+ reiserfs_panic (0, "PAP-5730: check_research_for_paste: "
+ "found indirect item (%h) or position (%d) does not match to key (%K)",
+ found_ih, pos_in_item (path), p_s_key);
+ }
+}
+#endif /* config reiserfs check */
+
+
+/* Paste bytes to the existing item. Returns bytes number pasted into the item. */
+int reiserfs_paste_into_item (struct reiserfs_transaction_handle *th,
+ struct path * p_s_search_path, /* Path to the pasted item. */
+ struct cpu_key * p_s_key, /* Key to search for the needed item.*/
+ const char * p_c_body, /* Pointer to the bytes to paste. */
+ int n_pasted_size) /* Size of pasted bytes. */
+{
+ struct tree_balance s_paste_balance;
+ int retval;
+
+ init_tb_struct(th, &s_paste_balance, th->t_super, p_s_search_path, n_pasted_size);
+
+ while ( (retval = fix_nodes(M_PASTE, &s_paste_balance, NULL, p_c_body)) == REPEAT_SEARCH ) {
+ /* file system changed while we were in the fix_nodes */
+ retval = search_for_position_by_key (th->t_super, p_s_key, p_s_search_path);
+ if (retval == IO_ERROR)
+ return -EIO;
+ if (retval == POSITION_FOUND) {
+ reiserfs_warning ("PAP-5710: reiserfs_paste_into_item: entry or pasted byte (%K) exists", p_s_key);
+ pathrelse (p_s_search_path);
+ return -EEXIST;
+ }
+
+#ifdef CONFIG_REISERFS_CHECK
+ check_research_for_paste (p_s_search_path, p_s_key);
+#endif
+ }
+
+ /* Perform balancing after all resources are collected by fix_nodes, and
+ accessing them will not risk triggering schedule. */
+ if ( retval == CARRY_ON ) {
+ do_balance(&s_paste_balance, NULL/*ih*/, p_c_body, M_PASTE);
+ return 0;
+ }
+
+ unfix_nodes(&s_paste_balance);
+ return (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
+}
+
+
+/* Insert new item into the buffer at the path. */
+int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
+ struct path * p_s_path, /* Path to the inserteded item. */
+ struct cpu_key * key,
+ struct item_head * p_s_ih, /* Pointer to the item header to insert.*/
+ const char * p_c_body) /* Pointer to the bytes to insert. */
+{
+ struct tree_balance s_ins_balance;
+ int retval;
+
+ init_tb_struct(th, &s_ins_balance, th->t_super, p_s_path, IH_SIZE + p_s_ih->ih_item_len);
+
+ /*
+ if (p_c_body == 0)
+ n_zeros_num = p_s_ih->ih_item_len;
+ */
+ // le_key2cpu_key (&key, &(p_s_ih->ih_key));
+
+ while ( (retval = fix_nodes(M_INSERT, &s_ins_balance, p_s_ih, p_c_body)) == REPEAT_SEARCH) {
+ /* file system changed while we were in the fix_nodes */
+ retval = search_item (th->t_super, key, p_s_path);
+ if (retval == IO_ERROR)
+ return -EIO;
+
+ if (retval == ITEM_FOUND) {
+ reiserfs_warning ("PAP-5760: reiserfs_insert_item: "
+ "key %K already exists in the tree\n", key);
+ pathrelse (p_s_path);
+ return -EEXIST;
+ }
+ }
+
+ /* make balancing after all resources will be collected at a time */
+ if ( retval == CARRY_ON ) {
+ do_balance (&s_ins_balance, p_s_ih, p_c_body, M_INSERT);
+ return 0;
+ }
+
+ unfix_nodes(&s_ins_balance);
+ return (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
+}
+
+
+
+
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
new file mode 100644
index 000000000..9838a1dcd
--- /dev/null
+++ b/fs/reiserfs/super.c
@@ -0,0 +1,879 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/locks.h>
+#include <linux/init.h>
+
+#else
+
+#include "nokernel.h"
+#include <stdlib.h> // for simple_strtoul
+
+#endif
+
+#define SUPPORT_OLD_FORMAT
+
+#define REISERFS_OLD_BLOCKSIZE 4096
+#define REISERFS_SUPER_MAGIC_STRING_OFFSET_NJ 20
+
+
+#if 0
+// this one is not used currently
+inline void reiserfs_mark_buffer_dirty (struct buffer_head * bh, int flag)
+{
+ mark_buffer_dirty (bh, flag);
+}
+#endif
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+void reiserfs_write_super (struct super_block * s)
+{
+
+ int dirty = 0 ;
+ lock_kernel() ;
+ if (!(s->s_flags & MS_RDONLY)) {
+ dirty = flush_old_commits(s, 1) ;
+ }
+ s->s_dirt = dirty;
+ unlock_kernel() ;
+}
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+void reiserfs_write_super_lockfs (struct super_block * s)
+{
+
+ int dirty = 0 ;
+ struct reiserfs_transaction_handle th ;
+ lock_kernel() ;
+ if (!(s->s_flags & MS_RDONLY)) {
+ journal_begin(&th, s, 1) ;
+ journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+ reiserfs_block_writes(&th) ;
+ journal_end(&th, s, 1) ;
+ }
+ s->s_dirt = dirty;
+ unlock_kernel() ;
+}
+
+void reiserfs_unlockfs(struct super_block *s) {
+ reiserfs_allow_writes(s) ;
+}
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+/* there should be no suspected recipients already. True and cautious
+ bitmaps should not differ. We only have to free preserve list and
+ write both bitmaps */
+void reiserfs_put_super (struct super_block * s)
+{
+ int i;
+ struct reiserfs_transaction_handle th ;
+
+ /* change file system state to current state if it was mounted with read-write permissions */
+ if (!(s->s_flags & MS_RDONLY)) {
+ journal_begin(&th, s, 10) ;
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+ s->u.reiserfs_sb.s_rs->s_state = le16_to_cpu (s->u.reiserfs_sb.s_mount_state);
+ journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+ }
+
+ /* note, journal_release checks for readonly mount, and can decide not
+ ** to do a journal_end
+ */
+ journal_release(&th, s) ;
+
+ for (i = 0; i < SB_BMAP_NR (s); i ++)
+ brelse (SB_AP_BITMAP (s)[i]);
+
+ reiserfs_kfree (SB_AP_BITMAP (s), sizeof (struct buffer_head *) * SB_BMAP_NR (s), s);
+
+ brelse (SB_BUFFER_WITH_SB (s));
+
+ print_statistics (s);
+
+ if (s->u.reiserfs_sb.s_kmallocs != 0) {
+ reiserfs_warning ("vs-2004: reiserfs_put_super: aloocated memory left %d\n",
+ s->u.reiserfs_sb.s_kmallocs);
+ }
+
+ return;
+}
+
+struct super_operations reiserfs_sops =
+{
+ read_inode: reiserfs_read_inode,
+ read_inode2: reiserfs_read_inode2,
+ write_inode: reiserfs_write_inode,
+ dirty_inode: reiserfs_dirty_inode,
+ delete_inode: reiserfs_delete_inode,
+ put_super: reiserfs_put_super,
+ write_super: reiserfs_write_super,
+ write_super_lockfs: reiserfs_write_super_lockfs,
+ unlockfs: reiserfs_unlockfs,
+ statfs: reiserfs_statfs,
+ remount_fs: reiserfs_remount,
+
+};
+
+/* this was (ext2)parse_options */
+static int parse_options (char * options, unsigned long * mount_options, unsigned long * blocks)
+{
+ char * this_char;
+ char * value;
+
+ *blocks = 0;
+ if (!options)
+ /* use default configuration: create tails, journaling on, no
+ conversion to newest format */
+ return 1;
+ for (this_char = strtok (options, ","); this_char != NULL; this_char = strtok (NULL, ",")) {
+ if ((value = strchr (this_char, '=')) != NULL)
+ *value++ = 0;
+ if (!strcmp (this_char, "notail")) {
+ set_bit (NOTAIL, mount_options);
+ } else if (!strcmp (this_char, "conv")) {
+ // if this is set, we update super block such that
+ // the partition will not be mounable by 3.5.x anymore
+ set_bit (REISERFS_CONVERT, mount_options);
+ } else if (!strcmp (this_char, "noborder")) {
+ /* this is used for benchmarking
+ experimental variations, it is not
+ intended for users to use, only for
+ developers who want to casually
+ hack in something to test */
+ set_bit (REISERFS_NO_BORDER, mount_options);
+ } else if (!strcmp (this_char, "no_unhashed_relocation")) {
+ set_bit (REISERFS_NO_UNHASHED_RELOCATION, mount_options);
+ } else if (!strcmp (this_char, "hashed_relocation")) {
+ set_bit (REISERFS_HASHED_RELOCATION, mount_options);
+ } else if (!strcmp (this_char, "test4")) {
+ set_bit (REISERFS_TEST4, mount_options);
+ } else if (!strcmp (this_char, "nolog")) {
+ reiserfs_warning("reiserfs: nolog mount option not supported yet\n");
+ } else if (!strcmp (this_char, "replayonly")) {
+ set_bit (REPLAYONLY, mount_options);
+ } else if (!strcmp (this_char, "resize")) {
+ if (value && *value){
+ *blocks = simple_strtoul (value, &value, 0);
+ } else {
+ printk("reiserfs: resize option requires a value\n");
+ return 0;
+ }
+ } else if (!strcmp (this_char, "hash")) {
+ if (value && *value) {
+ /* if they specify any hash option, we force detection
+ ** to make sure they aren't using the wrong hash
+ */
+ if (!strcmp(value, "rupasov")) {
+ set_bit (FORCE_RUPASOV_HASH, mount_options);
+ set_bit (FORCE_HASH_DETECT, mount_options);
+ } else if (!strcmp(value, "tea")) {
+ set_bit (FORCE_TEA_HASH, mount_options);
+ set_bit (FORCE_HASH_DETECT, mount_options);
+ } else if (!strcmp(value, "r5")) {
+ set_bit (FORCE_R5_HASH, mount_options);
+ set_bit (FORCE_HASH_DETECT, mount_options);
+ } else if (!strcmp(value, "detect")) {
+ set_bit (FORCE_HASH_DETECT, mount_options);
+ } else {
+ printk("reiserfs: invalid hash function specified\n") ;
+ return 0 ;
+ }
+ } else {
+ printk("reiserfs: hash option requires a value\n");
+ return 0 ;
+ }
+ } else {
+ printk ("reiserfs: Unrecognized mount option %s\n", this_char);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+
+int reiserfs_is_super(struct super_block *s) {
+ return (s->s_dev != 0 && s->s_op == &reiserfs_sops) ;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+int reiserfs_remount (struct super_block * s, int * flags, char * data)
+{
+ struct reiserfs_super_block * rs;
+ struct reiserfs_transaction_handle th ;
+ unsigned long blocks;
+ unsigned long mount_options;
+
+ rs = SB_DISK_SUPER_BLOCK (s);
+
+ if (!parse_options(data, &mount_options, &blocks))
+ return 0;
+
+ if(blocks) {
+ int rc = reiserfs_resize(s, blocks);
+ if (rc != 0)
+ return rc;
+ }
+
+ if ((unsigned long)(*flags & MS_RDONLY) == (s->s_flags & MS_RDONLY)) {
+ /* there is nothing to do to remount read-only fs as read-only fs */
+ return 0;
+ }
+
+ if (*flags & MS_RDONLY) {
+ /* try to remount file system with read-only permissions */
+ if (le16_to_cpu (rs->s_state) == REISERFS_VALID_FS || s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
+ return 0;
+ }
+
+ journal_begin(&th, s, 10) ;
+ /* Mounting a rw partition read-only. */
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+ rs->s_state = cpu_to_le16 (s->u.reiserfs_sb.s_mount_state);
+ journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+ s->s_dirt = 0;
+ } else {
+ s->u.reiserfs_sb.s_mount_state = le16_to_cpu(rs->s_state) ;
+ s->s_flags &= ~MS_RDONLY ; /* now it is safe to call journal_begin */
+ journal_begin(&th, s, 10) ;
+
+ /* Mount a partition which is read-only, read-write */
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+ s->u.reiserfs_sb.s_mount_state = le16_to_cpu (rs->s_state);
+ s->s_flags &= ~MS_RDONLY;
+ rs->s_state = cpu_to_le16 (REISERFS_ERROR_FS);
+ /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
+ journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+ s->s_dirt = 0;
+ s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
+ }
+ /* this will force a full flush of all journal lists */
+ SB_JOURNAL(s)->j_must_wait = 1 ;
+ journal_end(&th, s, 10) ;
+ return 0;
+}
+
+
+static int read_bitmaps (struct super_block * s)
+{
+ int i, bmp, dl ;
+ struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK(s);
+
+ SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_BUFFER, s);
+ if (SB_AP_BITMAP (s) == 0)
+ return 1;
+ memset (SB_AP_BITMAP (s), 0, sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr));
+
+ /* reiserfs leaves the first 64k unused so that any partition
+ labeling scheme currently used will have enough space. Then we
+ need one block for the super. -Hans */
+ bmp = (REISERFS_DISK_OFFSET_IN_BYTES / s->s_blocksize) + 1; /* first of bitmap blocks */
+ SB_AP_BITMAP (s)[0] = reiserfs_bread (s->s_dev, bmp, s->s_blocksize);
+ if(!SB_AP_BITMAP(s)[0])
+ return 1;
+ for (i = 1, bmp = dl = rs->s_blocksize * 8; i < le16_to_cpu (rs->s_bmap_nr); i ++) {
+ SB_AP_BITMAP (s)[i] = reiserfs_bread (s->s_dev, bmp, s->s_blocksize);
+ if (!SB_AP_BITMAP (s)[i])
+ return 1;
+ bmp += dl;
+ }
+
+ return 0;
+}
+
+static int read_old_bitmaps (struct super_block * s)
+{
+ int i ;
+ struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK(s);
+ int bmp1 = (REISERFS_OLD_DISK_OFFSET_IN_BYTES / s->s_blocksize) + 1; /* first of bitmap blocks */
+
+ /* read true bitmap */
+ SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_BUFFER, s);
+ if (SB_AP_BITMAP (s) == 0)
+ return 1;
+
+ memset (SB_AP_BITMAP (s), 0, sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr));
+
+ for (i = 0; i < le16_to_cpu (rs->s_bmap_nr); i ++) {
+ SB_AP_BITMAP (s)[i] = reiserfs_bread (s->s_dev, bmp1 + i, s->s_blocksize);
+ if (!SB_AP_BITMAP (s)[i])
+ return 1;
+ }
+
+ return 0;
+}
+
+void check_bitmap (struct super_block * s)
+{
+ int i = 0;
+ int free = 0;
+ char * buf;
+
+ while (i < SB_BLOCK_COUNT (s)) {
+ buf = SB_AP_BITMAP (s)[i / (s->s_blocksize * 8)]->b_data;
+ if (!reiserfs_test_le_bit (i % (s->s_blocksize * 8), buf))
+ free ++;
+ i ++;
+ }
+
+ if (free != SB_FREE_BLOCKS (s))
+ reiserfs_warning ("vs-4000: check_bitmap: %d free blocks, must be %d\n",
+ free, SB_FREE_BLOCKS (s));
+}
+
+#ifdef SUPPORT_OLD_FORMAT
+
+/* support old disk layout */
+static int read_old_super_block (struct super_block * s, int size)
+{
+ struct buffer_head * bh;
+ struct reiserfs_super_block * rs;
+
+ printk("read_old_super_block: try to find super block in old location\n");
+ /* there are only 4k-sized blocks in v3.5.10 */
+ if (size != REISERFS_OLD_BLOCKSIZE)
+ set_blocksize(s->s_dev, REISERFS_OLD_BLOCKSIZE);
+ bh = bread (s->s_dev,
+ REISERFS_OLD_DISK_OFFSET_IN_BYTES / REISERFS_OLD_BLOCKSIZE,
+ REISERFS_OLD_BLOCKSIZE);
+ if (!bh) {
+ printk("read_old_super_block: unable to read superblock on dev %s\n", kdevname(s->s_dev));
+ return 1;
+ }
+
+ rs = (struct reiserfs_super_block *)bh->b_data;
+ if (strncmp (rs->s_magic, REISERFS_SUPER_MAGIC_STRING, strlen ( REISERFS_SUPER_MAGIC_STRING))) {
+ /* pre-journaling version check */
+ if(!strncmp((char*)rs + REISERFS_SUPER_MAGIC_STRING_OFFSET_NJ,
+ REISERFS_SUPER_MAGIC_STRING, strlen(REISERFS_SUPER_MAGIC_STRING))) {
+ printk("read_old_super_blockr: a pre-journaling reiserfs filesystem isn't suitable there.\n");
+ brelse(bh);
+ return 1;
+ }
+
+ brelse (bh);
+ printk ("read_old_super_block: can't find a reiserfs filesystem on dev %s.\n", kdevname(s->s_dev));
+ return 1;
+ }
+
+ if(REISERFS_OLD_BLOCKSIZE != le16_to_cpu (rs->s_blocksize)) {
+ printk("read_old_super_block: blocksize mismatch, super block corrupted\n");
+ brelse(bh);
+ return 1;
+ }
+
+ s->s_blocksize = REISERFS_OLD_BLOCKSIZE;
+ s->s_blocksize_bits = 0;
+ while ((1 << s->s_blocksize_bits) != s->s_blocksize)
+ s->s_blocksize_bits ++;
+
+ SB_BUFFER_WITH_SB (s) = bh;
+ SB_DISK_SUPER_BLOCK (s) = rs;
+ s->s_op = &reiserfs_sops;
+ return 0;
+}
+#endif
+
+//
+// FIXME: mounting old filesystems we _must_ change magic string to
+// make then unmountable by reiserfs of 3.5.x
+//
+static int read_super_block (struct super_block * s, int size)
+{
+ struct buffer_head * bh;
+ struct reiserfs_super_block * rs;
+
+ bh = bread (s->s_dev, REISERFS_DISK_OFFSET_IN_BYTES / size, size);
+ if (!bh) {
+ printk("read_super_block: unable to read superblock on dev %s\n", kdevname(s->s_dev));
+ return 1;
+ }
+
+ rs = (struct reiserfs_super_block *)bh->b_data;
+ if (!is_reiserfs_magic_string (rs)) {
+ printk ("read_super_block: can't find a reiserfs filesystem on dev %s\n",
+ kdevname(s->s_dev));
+ brelse (bh);
+ return 1;
+ }
+
+ //
+ // ok, reiserfs signature (old or new) found in 64-th 1k block of
+ // the device
+ //
+
+#ifndef SUPPORT_OLD_FORMAT
+ // with SUPPORT_OLD_FORMAT undefined - detect old format by
+ // checking super block version
+ if (le16_to_cpu (rs->s_version) != REISERFS_VERSION_2) {
+ brelse (bh);
+ printk ("read_super_block: unsupported version (%d) of reiserfs found on dev %s\n",
+ le16_to_cpu (rs->s_version), kdevname(s->s_dev));
+ return 1;
+ }
+#endif
+
+ s->s_blocksize = le16_to_cpu (rs->s_blocksize);
+ s->s_blocksize_bits = 0;
+ while ((1 << s->s_blocksize_bits) != s->s_blocksize)
+ s->s_blocksize_bits ++;
+
+ brelse (bh);
+
+ if (s->s_blocksize != size)
+ set_blocksize (s->s_dev, s->s_blocksize);
+ bh = reiserfs_bread (s->s_dev, REISERFS_DISK_OFFSET_IN_BYTES / s->s_blocksize, s->s_blocksize);
+ if (!bh) {
+ printk("read_super_block: unable to read superblock on dev %s\n", kdevname(s->s_dev));
+ return 1;
+ }
+
+ rs = (struct reiserfs_super_block *)bh->b_data;
+ if (!is_reiserfs_magic_string (rs) ||
+ le16_to_cpu (rs->s_blocksize) != s->s_blocksize) {
+ brelse (bh);
+ printk ("read_super_block: can't find a reiserfs filesystem on dev %s.\n", kdevname(s->s_dev));
+ return 1;
+ }
+ /* must check to be sure we haven't pulled an old format super out
+ ** of the old format's log. This is a kludge of a check, but it
+ ** will work. If block we've just read in is inside the
+ ** journal for that super, it can't be valid.
+ */
+ if (bh->b_blocknr >= le32_to_cpu(rs->s_journal_block) &&
+ bh->b_blocknr < (le32_to_cpu(rs->s_journal_block) + JOURNAL_BLOCK_COUNT)) {
+ brelse(bh) ;
+ printk("super-459: read_super_block: super found at block %lu is within its own log. "
+ "It must not be of this format type.\n", bh->b_blocknr) ;
+ return 1 ;
+ }
+ SB_BUFFER_WITH_SB (s) = bh;
+ SB_DISK_SUPER_BLOCK (s) = rs;
+ s->s_op = &reiserfs_sops;
+ return 0;
+}
+
+/* after journal replay, reread all bitmap and super blocks */
+static int reread_meta_blocks(struct super_block *s) {
+ int i ;
+ ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s))) ;
+ wait_on_buffer(SB_BUFFER_WITH_SB(s)) ;
+ if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
+ printk("reread_meta_blocks, error reading the super\n") ;
+ return 1 ;
+ }
+
+ for (i = 0; i < SB_BMAP_NR(s) ; i++) {
+ ll_rw_block(READ, 1, &(SB_AP_BITMAP(s)[i])) ;
+ wait_on_buffer(SB_AP_BITMAP(s)[i]) ;
+ if (!buffer_uptodate(SB_AP_BITMAP(s)[i])) {
+ printk("reread_meta_blocks, error reading bitmap block number %d at %ld\n", i, SB_AP_BITMAP(s)[i]->b_blocknr) ;
+ return 1 ;
+ }
+ }
+ return 0 ;
+
+}
+
+
+/////////////////////////////////////////////////////
+// hash detection stuff
+
+
+// if root directory is empty - we set default - Yura's - hash and
+// warn about it
+// FIXME: we look for only one name in a directory. If tea and yura
+// bith have the same value - we ask user to send report to the
+// mailing list
+__u32 find_hash_out (struct super_block * s)
+{
+ int retval;
+ struct inode * inode;
+ struct cpu_key key;
+ INITIALIZE_PATH (path);
+ struct reiserfs_dir_entry de;
+ __u32 hash = DEFAULT_HASH;
+
+ inode = s->s_root->d_inode;
+
+ while (1) {
+ make_cpu_key (&key, inode, ~0, TYPE_DIRENTRY, 3);
+ retval = search_by_entry_key (s, &key, &path, &de);
+ if (retval == IO_ERROR) {
+ pathrelse (&path);
+ return UNSET_HASH ;
+ }
+ if (retval == NAME_NOT_FOUND)
+ de.de_entry_num --;
+ set_de_name_and_namelen (&de);
+ if (le32_to_cpu (de.de_deh[de.de_entry_num].deh_offset) == DOT_DOT_OFFSET) {
+ /* allow override in this case */
+ if (reiserfs_rupasov_hash(s)) {
+ hash = YURA_HASH ;
+ }
+ reiserfs_warning("reiserfs: FS seems to be empty, autodetect "
+ "is using the default hash\n");
+ break;
+ }
+ if (GET_HASH_VALUE(yura_hash (de.de_name, de.de_namelen)) ==
+ GET_HASH_VALUE(keyed_hash (de.de_name, de.de_namelen))) {
+ reiserfs_warning ("reiserfs: Could not detect hash function "
+ "please mount with -o hash={tea,rupasov,r5}\n") ;
+ hash = UNSET_HASH ;
+ break;
+ }
+ if (GET_HASH_VALUE(le32_to_cpu(de.de_deh[de.de_entry_num].deh_offset))==
+ GET_HASH_VALUE (yura_hash (de.de_name, de.de_namelen)))
+ hash = YURA_HASH;
+ else
+ hash = TEA_HASH;
+ break;
+ }
+
+ pathrelse (&path);
+ return hash;
+}
+
+// finds out which hash names are sorted with
+static int what_hash (struct super_block * s)
+{
+ __u32 code;
+
+ code = le32_to_cpu (s->u.reiserfs_sb.s_rs->s_hash_function_code);
+
+ /* reiserfs_hash_detect() == true if any of the hash mount options
+ ** were used. We must check them to make sure the user isn't
+ ** using a bad hash value
+ */
+ if (code == UNSET_HASH || reiserfs_hash_detect(s))
+ code = find_hash_out (s);
+
+ if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
+ /* detection has found the hash, and we must check against the
+ ** mount options
+ */
+ if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
+ printk("REISERFS: Error, tea hash detected, "
+ "unable to force rupasov hash\n") ;
+ code = UNSET_HASH ;
+ } else if (reiserfs_tea_hash(s) && code != TEA_HASH) {
+ printk("REISERFS: Error, rupasov hash detected, "
+ "unable to force tea hash\n") ;
+ code = UNSET_HASH ;
+ } else if (reiserfs_r5_hash(s) && code != R5_HASH) {
+ printk("REISERFS: Error, r5 hash detected, "
+ "unable to force r5 hash\n") ;
+ code = UNSET_HASH ;
+ }
+ } else {
+ /* find_hash_out was not called or could not determine the hash */
+ if (reiserfs_rupasov_hash(s)) {
+ code = YURA_HASH ;
+ } else if (reiserfs_tea_hash(s)) {
+ code = TEA_HASH ;
+ } else if (reiserfs_r5_hash(s)) {
+ code = R5_HASH ;
+ }
+ }
+
+ /* if we are mounted RW, and we have a new valid hash code, update
+ ** the super
+ */
+ if (code != UNSET_HASH &&
+ !(s->s_flags & MS_RDONLY) &&
+ code != le32_to_cpu (s->u.reiserfs_sb.s_rs->s_hash_function_code)) {
+ s->u.reiserfs_sb.s_rs->s_hash_function_code = cpu_to_le32(code) ;
+ }
+ return code;
+}
+
+// return pointer to appropriate function
+static hashf_t hash_function (struct super_block * s)
+{
+ switch (what_hash (s)) {
+ case TEA_HASH:
+ reiserfs_warning ("Using tea hash to sort names\n");
+ return keyed_hash;
+ case YURA_HASH:
+ reiserfs_warning ("Using rupasov hash to sort names\n");
+ return yura_hash;
+ case R5_HASH:
+ reiserfs_warning ("Using r5 hash to sort names\n");
+ return r5_hash;
+ }
+ return NULL;
+}
+
+// this is used to set up correct value for old partitions
+int function2code (hashf_t func)
+{
+ if (func == keyed_hash)
+ return TEA_HASH;
+ if (func == yura_hash)
+ return YURA_HASH;
+ if (func == r5_hash)
+ return R5_HASH;
+
+ BUG() ; // should never happen
+
+ return 0;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+struct super_block * reiserfs_read_super (struct super_block * s, void * data, int silent)
+{
+ int size;
+ struct inode *root_inode;
+ kdev_t dev = s->s_dev;
+ int j;
+ extern int *blksize_size[];
+ struct reiserfs_transaction_handle th ;
+ int old_format = 0;
+ unsigned long blocks;
+ int jinit_done = 0 ;
+ struct reiserfs_iget4_args args ;
+
+
+ memset (&s->u.reiserfs_sb, 0, sizeof (struct reiserfs_sb_info));
+
+ if (parse_options ((char *) data, &(s->u.reiserfs_sb.s_mount_opt), &blocks) == 0) {
+ return NULL;
+ }
+
+ if (blocks) {
+ printk("reserfs: resize option for remount only\n");
+ return NULL;
+ }
+
+ if (blksize_size[MAJOR(dev)] && blksize_size[MAJOR(dev)][MINOR(dev)] != 0) {
+ /* as blocksize is set for partition we use it */
+ size = blksize_size[MAJOR(dev)][MINOR(dev)];
+ } else {
+ size = BLOCK_SIZE;
+ set_blocksize (s->s_dev, BLOCK_SIZE);
+ }
+
+ /* read block (64-th 1k block), which can contain reiserfs super block */
+ if (read_super_block (s, size)) {
+#ifdef SUPPORT_OLD_FORMAT
+ // try old format (undistributed bitmap, super block in 8-th 1k block of a device)
+ if(read_old_super_block(s,size))
+ goto error;
+ else
+ old_format = 1;
+#endif
+ goto error ;
+ }
+
+ s->u.reiserfs_sb.s_mount_state = le16_to_cpu (SB_DISK_SUPER_BLOCK (s)->s_state); /* journal victim */
+ s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
+
+ if (old_format ? read_old_bitmaps(s) : read_bitmaps(s)) {
+ printk ("reiserfs_read_super: unable to read bitmap\n");
+ goto error;
+ }
+#ifdef CONFIG_REISERFS_CHECK
+ printk("reiserfs:warning: CONFIG_REISERFS_CHECK is set ON\n");
+ printk("reiserfs:warning: - it is slow mode for debugging.\n");
+#endif
+
+ // set_device_ro(s->s_dev, 1) ;
+ if (journal_init(s)) {
+ printk("reiserfs_read_super: unable to initialize journal space\n") ;
+ goto error ;
+ } else {
+ jinit_done = 1 ; /* once this is set, journal_release must be called
+ ** if we error out of the mount
+ */
+ }
+ if (reread_meta_blocks(s)) {
+ printk("reiserfs_read_super: unable to reread meta blocks after journal init\n") ;
+ goto error ;
+ }
+
+ if (replay_only (s))
+ goto error;
+
+ if (is_read_only(s->s_dev) && !(s->s_flags & MS_RDONLY)) {
+ printk("clm-7000: Detected readonly device, marking FS readonly\n") ;
+ s->s_flags |= MS_RDONLY ;
+ }
+ args.objectid = REISERFS_ROOT_PARENT_OBJECTID ;
+ root_inode = iget4 (s, REISERFS_ROOT_OBJECTID, 0, (void *)(&args));
+ if (!root_inode) {
+ printk ("reiserfs_read_super: get root inode failed\n");
+ goto error;
+ }
+
+ s->s_root = d_alloc_root(root_inode);
+ if (!s->s_root) {
+ iput(root_inode);
+ goto error;
+ }
+
+ // define and initialize hash function
+ s->u.reiserfs_sb.s_hash_function = hash_function (s);
+ if (s->u.reiserfs_sb.s_hash_function == NULL) {
+ dput(s->s_root) ;
+ s->s_root = NULL ;
+ goto error ;
+ }
+
+ if (!(s->s_flags & MS_RDONLY)) {
+ struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
+
+ journal_begin(&th, s, 1) ;
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
+
+ rs->s_state = cpu_to_le16 (REISERFS_ERROR_FS);
+
+ if (strncmp (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING,
+ strlen ( REISER2FS_SUPER_MAGIC_STRING))) {
+ if (le16_to_cpu(rs->s_version) != 0)
+ BUG ();
+ // filesystem created under 3.5.x found
+ if (!old_format_only (s)) {
+ reiserfs_warning("reiserfs: converting 3.5.x filesystem to the new format\n") ;
+ // after this 3.5.x will not be able to mount this partition
+ memcpy (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING,
+ sizeof (REISER2FS_SUPER_MAGIC_STRING));
+
+ reiserfs_convert_objectid_map_v1(s) ;
+ } else {
+ reiserfs_warning("reiserfs: using 3.5.x disk format\n") ;
+ }
+ } else {
+ // new format found
+ set_bit (REISERFS_CONVERT, &(s->u.reiserfs_sb.s_mount_opt));
+ }
+
+ // mark hash in super block: it could be unset. overwrite should be ok
+ rs->s_hash_function_code = cpu_to_le32 (function2code (s->u.reiserfs_sb.s_hash_function));
+
+ journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+ journal_end(&th, s, 1) ;
+ s->s_dirt = 0;
+ } else {
+ struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
+ if (strncmp (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING,
+ strlen ( REISER2FS_SUPER_MAGIC_STRING))) {
+ reiserfs_warning("reiserfs: using 3.5.x disk format\n") ;
+ }
+ }
+
+ init_waitqueue_head (&(s->u.reiserfs_sb.s_wait));
+
+ printk("%s\n", reiserfs_get_version_string()) ;
+ return s;
+
+ error:
+ if (jinit_done) { /* kill the commit thread, free journal ram */
+ journal_release_error(NULL, s) ;
+ }
+ if (SB_DISK_SUPER_BLOCK (s)) {
+ for (j = 0; j < SB_BMAP_NR (s); j ++) {
+ if (SB_AP_BITMAP (s))
+ brelse (SB_AP_BITMAP (s)[j]);
+ }
+ if (SB_AP_BITMAP (s))
+ reiserfs_kfree (SB_AP_BITMAP (s), sizeof (struct buffer_head *) * SB_BMAP_NR (s), s);
+ }
+ if (SB_BUFFER_WITH_SB (s))
+ brelse(SB_BUFFER_WITH_SB (s));
+
+ return NULL;
+}
+
+
+//
+// a portion of this function, particularly the VFS interface portion,
+// was derived from minix or ext2's analog and evolved as the
+// prototype did. You should be able to tell which portion by looking
+// at the ext2 code and comparing. It's subfunctions contain no code
+// used as a template unless they are so labeled.
+//
+int reiserfs_statfs (struct super_block * s, struct statfs * buf)
+{
+ struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
+
+ /* changed to accomodate gcc folks.*/
+ buf->f_type = REISERFS_SUPER_MAGIC;
+ buf->f_bsize = le32_to_cpu (s->s_blocksize);
+ buf->f_blocks = le32_to_cpu (rs->s_block_count) - le16_to_cpu (rs->s_bmap_nr) - 1;
+ buf->f_bfree = le32_to_cpu (rs->s_free_blocks);
+ buf->f_bavail = buf->f_bfree;
+ buf->f_files = -1;
+ buf->f_ffree = -1;
+ buf->f_namelen = (REISERFS_MAX_NAME_LEN (s->s_blocksize));
+ return 0;
+}
+
+#ifdef __KERNEL__
+
+static DECLARE_FSTYPE_DEV(reiserfs_fs_type,"reiserfs",reiserfs_read_super);
+
+//
+// this is exactly what 2.3.99-pre9's init_ext2_fs is
+//
+static int __init init_reiserfs_fs (void)
+{
+ return register_filesystem(&reiserfs_fs_type);
+}
+
+EXPORT_NO_SYMBOLS;
+
+//
+// this is exactly what 2.3.99-pre9's init_ext2_fs is
+//
+static void __exit exit_reiserfs_fs(void)
+{
+ unregister_filesystem(&reiserfs_fs_type);
+}
+
+module_init(init_reiserfs_fs) ;
+module_exit(exit_reiserfs_fs) ;
+
+#endif
+
+
+
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
new file mode 100644
index 000000000..25da132b6
--- /dev/null
+++ b/fs/reiserfs/tail_conversion.c
@@ -0,0 +1,297 @@
+/*
+ * Copyright 1999 Hans Reiser, see reiserfs/README for licensing and copyright details
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/locks.h>
+
+#else
+
+#include "nokernel.h"
+
+#endif
+
+
+/* access to tail : when one is going to read tail it must make sure, that is not running.
+ direct2indirect and indirect2direct can not run concurrently */
+
+
+/* Converts direct items to an unformatted node. Panics if file has no
+ tail. -ENOSPC if no disk space for conversion */
+/* path points to first direct item of the file regarless of how many of
+ them are there */
+int direct2indirect (struct reiserfs_transaction_handle *th, struct inode * inode,
+ struct path * path, struct buffer_head * unbh,
+ loff_t tail_offset)
+{
+ struct super_block * sb = inode->i_sb;
+ struct buffer_head *up_to_date_bh ;
+ struct item_head * p_le_ih = PATH_PITEM_HEAD (path);
+ struct cpu_key end_key; /* Key to search for the last byte of the
+ converted item. */
+ struct item_head ind_ih; /* new indirect item to be inserted or
+ key of unfm pointer to be pasted */
+ int n_blk_size,
+ n_retval; /* returned value for reiserfs_insert_item and clones */
+ struct unfm_nodeinfo unfm_ptr; /* Handle on an unformatted node
+ that will be inserted in the
+ tree. */
+
+
+ sb->u.reiserfs_sb.s_direct2indirect ++;
+
+ n_blk_size = sb->s_blocksize;
+
+ /* and key to search for append or insert pointer to the new
+ unformatted node. */
+ copy_item_head (&ind_ih, p_le_ih);
+ set_le_ih_k_offset (&ind_ih, tail_offset);
+ set_le_ih_k_type (&ind_ih, TYPE_INDIRECT);
+
+ /* Set the key to search for the place for new unfm pointer */
+ make_cpu_key (&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
+
+ // FIXME: we could avoid this
+ if ( search_for_position_by_key (sb, &end_key, path) == POSITION_FOUND )
+ reiserfs_panic (sb, "PAP-14030: direct2indirect: "
+ "pasted or inserted byte exists in the tree");
+
+ p_le_ih = PATH_PITEM_HEAD (path);
+
+ unfm_ptr.unfm_nodenum = cpu_to_le32 (unbh->b_blocknr);
+ unfm_ptr.unfm_freespace = 0; // ???
+
+ if ( is_statdata_le_ih (p_le_ih) ) {
+ /* Insert new indirect item. */
+ set_ih_free_space (&ind_ih, 0); /* delete at nearest future */
+ ind_ih.ih_item_len = cpu_to_le16 (UNFM_P_SIZE);
+ PATH_LAST_POSITION (path)++;
+ n_retval = reiserfs_insert_item (th, path, &end_key, &ind_ih,
+ (char *)&unfm_ptr);
+ } else {
+ /* Paste into last indirect item of an object. */
+ n_retval = reiserfs_paste_into_item(th, path, &end_key,
+ (char *)&unfm_ptr, UNFM_P_SIZE);
+ }
+ if ( n_retval ) {
+ return n_retval;
+ }
+
+ // note: from here there are two keys which have matching first
+ // three key components. They only differ by the fourth one.
+
+
+ /* Set the key to search for the direct items of the file */
+ make_cpu_key (&end_key, inode, max_reiserfs_offset (inode), TYPE_DIRECT, 4);
+
+ /* Move bytes from the direct items to the new unformatted node
+ and delete them. */
+ while (1) {
+ int item_len, first_direct;
+
+ /* end_key.k_offset is set so, that we will always have found
+ last item of the file */
+ if ( search_for_position_by_key (sb, &end_key, path) == POSITION_FOUND )
+ reiserfs_panic (sb, "PAP-14050: direct2indirect: "
+ "direct item (%k) not found", &end_key);
+ p_le_ih = PATH_PITEM_HEAD (path);
+#ifdef CONFIG_REISERFS_CHECK
+ if (!is_direct_le_ih (p_le_ih))
+ reiserfs_panic (sb, "vs-14055: direct2indirect: "
+ "direct item expected, found %h", p_le_ih);
+#endif
+ if ((le_ih_k_offset (p_le_ih) & (n_blk_size - 1)) == 1)
+ first_direct = 1;
+ else
+ first_direct = 0;
+ item_len = le16_to_cpu (p_le_ih->ih_item_len);
+
+ /* we only send the unbh pointer if the buffer is not up to date.
+ ** this avoids overwriting good data from writepage() with old data
+ ** from the disk or buffer cache
+ */
+ if (buffer_uptodate(unbh) || Page_Uptodate(unbh->b_page)) {
+ up_to_date_bh = NULL ;
+ } else {
+ up_to_date_bh = unbh ;
+ }
+ n_retval = reiserfs_delete_item (th, path, &end_key, inode,
+ up_to_date_bh) ;
+
+ if (first_direct && item_len == n_retval)
+ // done: file does not have direct items anymore
+ break;
+
+ }
+
+ inode->u.reiserfs_i.i_first_direct_byte = U32_MAX;
+
+ return 0;
+}
+
+
+/* stolen from fs/buffer.c */
+void reiserfs_unmap_buffer(struct buffer_head *bh) {
+ if (buffer_mapped(bh)) {
+ if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
+ BUG() ;
+ }
+ mark_buffer_clean(bh) ;
+ wait_on_buffer(bh) ;
+ // clear_bit(BH_Uptodate, &bh->b_state) ;
+ clear_bit(BH_Mapped, &bh->b_state) ;
+ clear_bit(BH_Req, &bh->b_state) ;
+ clear_bit(BH_New, &bh->b_state) ;
+ }
+}
+
+static void
+unmap_buffers(struct page *page, loff_t pos) {
+ struct buffer_head *bh ;
+ struct buffer_head *head ;
+ struct buffer_head *next ;
+ unsigned long tail_index ;
+ unsigned long cur_index ;
+
+ if (page) {
+ if (page->buffers) {
+ tail_index = pos & (PAGE_CACHE_SIZE - 1) ;
+ cur_index = 0 ;
+ head = page->buffers ;
+ bh = head ;
+ do {
+ next = bh->b_this_page ;
+
+ /* we want to unmap the buffers that contain the tail, and
+ ** all the buffers after it (since the tail must be at the
+ ** end of the file). We don't want to unmap file data
+ ** before the tail, since it might be dirty and waiting to
+ ** reach disk
+ */
+ cur_index += bh->b_size ;
+ if (cur_index > tail_index) {
+ reiserfs_unmap_buffer(bh) ;
+ }
+ bh = next ;
+ } while (bh != head) ;
+ }
+ }
+}
+
+/* this first locks inode (neither reads nor sync are permitted),
+ reads tail through page cache, insert direct item. When direct item
+ inserted successfully inode is left locked. Return value is always
+ what we expect from it (number of cut bytes). But when tail remains
+ in the unformatted node, we set mode to SKIP_BALANCING and unlock
+ inode */
+int indirect2direct (struct reiserfs_transaction_handle *th,
+ struct inode * p_s_inode,
+ struct page *page,
+ struct path * p_s_path, /* path to the indirect item. */
+ struct cpu_key * p_s_item_key, /* Key to look for unformatted node pointer to be cut. */
+ loff_t n_new_file_size, /* New file size. */
+ char * p_c_mode)
+{
+ struct super_block * p_s_sb = p_s_inode->i_sb;
+ struct item_head s_ih;
+ unsigned long n_block_size = p_s_sb->s_blocksize;
+ char * tail;
+ int tail_len, round_tail_len;
+ loff_t pos, pos1; /* position of first byte of the tail */
+ struct cpu_key key;
+
+ p_s_sb->u.reiserfs_sb.s_indirect2direct ++;
+
+ *p_c_mode = M_SKIP_BALANCING;
+
+ /* store item head path points to. */
+ copy_item_head (&s_ih, PATH_PITEM_HEAD(p_s_path));
+
+ tail_len = (n_new_file_size & (n_block_size - 1));
+ if (!old_format_only (p_s_sb))
+ round_tail_len = ROUND_UP (tail_len);
+ else
+ round_tail_len = tail_len;
+
+ pos = le_ih_k_offset (&s_ih) - 1 + (le16_to_cpu (s_ih.ih_item_len) / UNFM_P_SIZE - 1) * p_s_sb->s_blocksize;
+ pos1 = pos;
+
+ // we are protected by i_sem. The tail can not disapper, not
+ // append can be done either
+ // we are in truncate or packing tail in file_release
+
+ tail = (char *)kmap(page) ; /* this can schedule */
+
+ if (path_changed (&s_ih, p_s_path)) {
+ /* re-search indirect item */
+ if ( search_for_position_by_key (p_s_sb, p_s_item_key, p_s_path) == POSITION_NOT_FOUND )
+ reiserfs_panic(p_s_sb, "PAP-5520: indirect2direct: "
+ "item to be converted %k does not exist", p_s_item_key);
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+#ifdef CONFIG_REISERFS_CHECK
+ pos = le_ih_k_offset (&s_ih) - 1 +
+ (le16_to_cpu (s_ih.ih_item_len) / UNFM_P_SIZE - 1) * p_s_sb->s_blocksize;
+ if (pos != pos1)
+ reiserfs_panic (p_s_sb, "vs-5530: indirect2direct: "
+ "tail position changed while we were reading it");
+#endif
+ }
+
+
+ /* Set direct item header to insert. */
+ make_le_item_head (&s_ih, 0, inode_items_version (p_s_inode), pos1 + 1,
+ TYPE_DIRECT, round_tail_len, 0xffff/*ih_free_space*/);
+
+ /* we want a pointer to the first byte of the tail in the page.
+ ** the page was locked and this part of the page was up to date when
+ ** indirect2direct was called, so we know the bytes are still valid
+ */
+ tail = tail + (pos & (PAGE_CACHE_SIZE - 1)) ;
+
+ PATH_LAST_POSITION(p_s_path)++;
+
+ key = *p_s_item_key;
+ set_cpu_key_k_type (&key, TYPE_DIRECT);
+ key.key_length = 4;
+ /* Insert tail as new direct item in the tree */
+ if ( reiserfs_insert_item(th, p_s_path, &key, &s_ih,
+ tail ? tail : NULL) < 0 ) {
+ /* No disk memory. So we can not convert last unformatted node
+ to the direct item. In this case we used to adjust
+ indirect items's ih_free_space. Now ih_free_space is not
+ used, it would be ideal to write zeros to corresponding
+ unformatted node. For now i_size is considered as guard for
+ going out of file size */
+ kunmap(page) ;
+ return n_block_size - round_tail_len;
+ }
+ kunmap(page) ;
+
+ /* this will invalidate all the buffers in the page after
+ ** pos1
+ */
+ unmap_buffers(page, pos1) ;
+
+ // note: we have now the same as in above direct2indirect
+ // conversion: there are two keys which have matching first three
+ // key components. They only differ by the fouhth one.
+
+ /* We have inserted new direct item and must remove last
+ unformatted node. */
+ p_s_inode->i_blocks += (p_s_sb->s_blocksize / 512);
+ *p_c_mode = M_CUT;
+
+ /* we store position of first direct item in the in-core inode */
+ //mark_file_with_tail (p_s_inode, pos1 + 1);
+ p_s_inode->u.reiserfs_i.i_first_direct_byte = pos1 + 1;
+
+ return n_block_size - round_tail_len;
+}
+
+
+
diff --git a/fs/reiserfs/version.c b/fs/reiserfs/version.c
new file mode 100644
index 000000000..361884e80
--- /dev/null
+++ b/fs/reiserfs/version.c
@@ -0,0 +1,7 @@
+/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+char *reiserfs_get_version_string(void) {
+ return "ReiserFS version 3.6.25" ;
+}
diff --git a/fs/super.c b/fs/super.c
index 1c0b48c11..6262c7675 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -54,6 +54,7 @@ extern void wait_for_keypress(void);
extern int root_mountflags;
static int do_remount_sb(struct super_block *sb, int flags, char * data);
+static int do_remount(const char *dir, int flags, char * data);
/* this is initialized in init/main.c */
kdev_t ROOT_DEV;
@@ -1025,15 +1026,12 @@ static int do_umount(struct vfsmount *mnt, int umount_root, int flags)
* call reboot(9). Then init(8) could umount root and exec /reboot.
*/
if (mnt == current->fs->rootmnt && !umount_root) {
- int retval = 0;
/*
* Special case for "unmounting" root ...
* we just try to remount it readonly.
*/
mntput(mnt);
- if (!(sb->s_flags & MS_RDONLY))
- retval = do_remount_sb(sb, MS_RDONLY, 0);
- return retval;
+ return do_remount("/", MS_RDONLY, NULL);
}
spin_lock(&dcache_lock);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 9ab57d370..02aa4f025 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -86,6 +86,7 @@ static int udf_adinicb_writepage(struct page *page)
brelse(bh);
SetPageUptodate(page);
kunmap(page);
+ UnlockPage(page);
return 0;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 3783b43fd..8c818666c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -203,7 +203,6 @@ void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
udf_release_data(bh);
inode->i_data.a_ops->writepage(page);
- UnlockPage(page);
page_cache_release(page);
mark_inode_dirty(inode);
diff --git a/include/asm-alpha/errno.h b/include/asm-alpha/errno.h
index 724f03b77..fd5b8fafc 100644
--- a/include/asm-alpha/errno.h
+++ b/include/asm-alpha/errno.h
@@ -139,4 +139,6 @@
#define ENOMEDIUM 129 /* No medium found */
#define EMEDIUMTYPE 130 /* Wrong medium type */
+#define EHASHCOLLISION 131 /* Number of hash collisons exceeds maximum generation counter value. */
+
#endif
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index 4a5b98607..265c1b2dc 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -572,7 +572,6 @@ static inline long sync(void)
return sys_sync();
}
-extern long sys_wait4(int, int *, int, struct rusage *);
static inline pid_t waitpid(int pid, int * wait_stat, int flags)
{
return sys_wait4(pid, wait_stat, flags, NULL);
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 4e77e5d8a..257f1f879 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -76,26 +76,23 @@ static void __init check_fpu(void)
}
/* Enable FXSR and company _before_ testing for FP problems. */
-#if defined(CONFIG_X86_FXSR) || defined(CONFIG_X86_RUNTIME_FXSR)
/*
* Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
*/
- if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
- panic("Kernel compiled for PII/PIII+ with FXSR, data not 16-byte aligned!");
-
+ if (offsetof(struct task_struct, thread.i387.fxsave) & 15) {
+ extern void __buggy_fxsr_alignment(void);
+ __buggy_fxsr_alignment();
+ }
if (cpu_has_fxsr) {
printk(KERN_INFO "Enabling fast FPU save and restore... ");
set_in_cr4(X86_CR4_OSFXSR);
printk("done.\n");
}
-#endif
-#ifdef CONFIG_X86_XMM
if (cpu_has_xmm) {
printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... ");
set_in_cr4(X86_CR4_OSXMMEXCPT);
printk("done.\n");
}
-#endif
/* Test for the divl bug.. */
__asm__("fninit\n\t"
@@ -203,14 +200,6 @@ static void __init check_config(void)
&& (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
#endif
-
-/*
- * If we configured ourselves for FXSR, we'd better have it.
- */
-#ifdef CONFIG_X86_FXSR
- if (!cpu_has_fxsr)
- panic("Kernel compiled for PII/PIII+, requires FXSR feature!");
-#endif
}
static void __init check_bugs(void)
diff --git a/include/asm-i386/errno.h b/include/asm-i386/errno.h
index 7cf599f4d..d22c4472b 100644
--- a/include/asm-i386/errno.h
+++ b/include/asm-i386/errno.h
@@ -128,5 +128,6 @@
#define ENOMEDIUM 123 /* No medium found */
#define EMEDIUMTYPE 124 /* Wrong medium type */
+#define EHASHCOLLISION 125 /* Number of hash collisons exceeds maximum generation counter value. */
#endif
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index 04ba635e5..1cf8dc2ab 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -23,6 +23,10 @@ extern void init_fpu(void);
extern void save_init_fpu( struct task_struct *tsk );
extern void restore_fpu( struct task_struct *tsk );
+extern void kernel_fpu_begin(void);
+#define kernel_fpu_end() stts()
+
+
#define unlazy_fpu( tsk ) do { \
if ( tsk->flags & PF_USEDFPU ) \
save_init_fpu( tsk ); \
@@ -50,10 +54,8 @@ extern void set_fpu_twd( struct task_struct *tsk, unsigned short twd );
extern void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr );
#define load_mxcsr( val ) do { \
- if ( cpu_has_xmm ) { \
- unsigned long __mxcsr = ((unsigned long)(val) & 0xffff); \
- asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \
- } \
+ unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \
+ asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \
} while (0)
/*
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index bf32a7449..a25f3bcfd 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -140,7 +140,11 @@ extern unsigned long empty_zero_page[1024];
#define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
~(VMALLOC_OFFSET-1))
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#define VMALLOC_END (FIXADDR_START)
+#if CONFIG_HIGHMEM
+# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+#else
+# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+#endif
/*
* The 4MB page is guessing.. Detailed in the infamous "Chapter H"
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index d3b01ab8b..52e24682e 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -267,15 +267,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* I expect future Intel CPU's to have a weaker ordering,
* but I'd also expect them to finally get their act together
* and add some real memory barriers if so.
- *
- * The Pentium III does add a real memory barrier with the
- * sfence instruction, so we use that where appropriate.
*/
-#ifndef CONFIG_X86_XMM
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#else
-#define mb() __asm__ __volatile__ ("sfence": : :"memory")
-#endif
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory")
diff --git a/include/asm-mips/errno.h b/include/asm-mips/errno.h
index 0763d0e99..736774592 100644
--- a/include/asm-mips/errno.h
+++ b/include/asm-mips/errno.h
@@ -1,11 +1,9 @@
/*
- * include/asm-mips/errno.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1995, 1999 by Ralf Baechle
+ * Copyright (C) 1995, 1999, 2001 by Ralf Baechle
*/
#ifndef _ASM_ERRNO_H
#define _ASM_ERRNO_H
@@ -143,6 +141,7 @@
*/
#define ENOMEDIUM 159 /* No medium found */
#define EMEDIUMTYPE 160 /* Wrong medium type */
+#define EHASHCOLLISION 161 /* Number of hash collisons exceeds maximum generation counter value. */
#define EDQUOT 1133 /* Quota exceeded */
diff --git a/include/asm-mips64/errno.h b/include/asm-mips64/errno.h
index b2f507f4f..7864a4039 100644
--- a/include/asm-mips64/errno.h
+++ b/include/asm-mips64/errno.h
@@ -1,10 +1,10 @@
-/* $Id: errno.h,v 1.1 1999/08/18 23:37:51 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1995 by Ralf Baechle
+ * Copyright (C) 1995, 2001 by Ralf Baechle
+ * Copyright (C) 2001 Silicon Graphics, Inc.
*/
#ifndef _ASM_ERRNO_H
#define _ASM_ERRNO_H
@@ -142,6 +142,7 @@
*/
#define ENOMEDIUM 159 /* No medium found */
#define EMEDIUMTYPE 160 /* Wrong medium type */
+#define EHASHCOLLISION 125 /* Number of hash collisons exceeds maximum generation counter value. */
#define EDQUOT 1133 /* Quota exceeded */
diff --git a/include/asm-ppc/delay.h b/include/asm-ppc/delay.h
index 2116a2f2c..8cc1fde24 100644
--- a/include/asm-ppc/delay.h
+++ b/include/asm-ppc/delay.h
@@ -2,6 +2,8 @@
#ifndef _PPC_DELAY_H
#define _PPC_DELAY_H
+#include <asm/param.h>
+
/*
* Copyright 1996, Paul Mackerras.
*
@@ -11,25 +13,38 @@
* 2 of the License, or (at your option) any later version.
*/
-extern unsigned long loops_per_sec;
+extern unsigned long loops_per_jiffy;
-extern __inline__ void __delay(unsigned int loops)
-{
- if (loops != 0)
- __asm__ __volatile__("mtctr %0; 1: bdnz 1b" : :
- "r" (loops) : "ctr");
-}
+/* maximum permitted argument to udelay */
+#define __MAX_UDELAY 1000000
-extern __inline__ void udelay(unsigned long usecs)
+extern void __delay(unsigned int loops);
+
+/* N.B. the `secs' parameter here is a fixed-point number with
+ the binary point to the left of the most-significant bit. */
+extern __inline__ void __const_udelay(unsigned int secs)
{
- unsigned long loops;
+ unsigned int loops;
- /* compute (usecs * 2^32 / 10^6) * loops_per_sec / 2^32 */
- usecs *= 0x10c6; /* 2^32 / 10^6 */
__asm__("mulhwu %0,%1,%2" : "=r" (loops) :
- "r" (usecs), "r" (loops_per_sec));
- __delay(loops);
+ "r" (secs), "r" (loops_per_jiffy));
+ __delay(loops * HZ);
}
+/*
+ * note that 4294 == 2^32 / 10^6, multiplying by 4294 converts from
+ * microseconds to a 32-bit fixed-point number of seconds.
+ */
+extern __inline__ void __udelay(unsigned int usecs)
+{
+ __const_udelay(usecs * 4294);
+}
+
+extern void __bad_udelay(void); /* deliberately undefined */
+
+#define udelay(n) (__builtin_constant_p(n)? \
+ ((n) > __MAX_UDELAY? __bad_udelay(): __const_udelay((n) * 4294u)) : \
+ __udelay(n))
+
#endif /* defined(_PPC_DELAY_H) */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/dma.h b/include/asm-ppc/dma.h
index 735d91d1f..56f56cd0a 100644
--- a/include/asm-ppc/dma.h
+++ b/include/asm-ppc/dma.h
@@ -102,13 +102,13 @@ extern unsigned long ISA_DMA_THRESHOLD;
/* used in nasty hack for sound - see prep_setup_arch() -- Cort */
extern long ppc_cs4232_dma, ppc_cs4232_dma2;
#if defined(CONFIG_CS4232)
-#if defined(CONFIG_PREP) || defined(CONFIG_ALL_PPC)
+#if defined(CONFIG_ALL_PPC)
#define SND_DMA1 ppc_cs4232_dma
#define SND_DMA2 ppc_cs4232_dma2
-#else /* !CONFIG_PREP && !CONFIG_ALL_PPC */
+#else /* !CONFIG_ALL_PPC */
#define SND_DMA1 -1
#define SND_DMA2 -1
-#endif /* !CONFIG_PREP */
+#endif /* CONFIG_ALL_PPC */
#elif defined(CONFIG_MSS)
#define SND_DMA1 CONFIG_MSS_DMA
#define SND_DMA2 CONFIG_MSS_DMA2
@@ -201,40 +201,8 @@ static __inline__ void release_dma_lock(unsigned long flags)
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
- /*
- * The Radstone PPC2 and PPC2a boards have inverted DREQ
- * lines (active low) so each command needs to be logically
- * ORed with 0x40
- */
unsigned char ucDmaCmd=0x00;
-#if defined(CONFIG_PREP) || defined(CONFIG_ALL_PPC)
- if(_prep_type==_PREP_Radstone)
- {
- switch(ucSystemType)
- {
- case RS_SYS_TYPE_PPC2:
- case RS_SYS_TYPE_PPC2a:
- case RS_SYS_TYPE_PPC2ep:
- {
- /*
- * DREQ lines are active low
- */
- ucDmaCmd=0x40;
- break;
- }
-
- default:
- {
- /*
- * DREQ lines are active high
- */
- break;
- }
- }
- }
-#endif /* CONFIG_PREP || CONFIG_ALL_PPC */
-
if (dmanr != 4)
{
dma_outb(0, DMA2_MASK_REG); /* This may not be enabled */
diff --git a/include/asm-ppc/elf.h b/include/asm-ppc/elf.h
index 6a0e2e874..85124797a 100644
--- a/include/asm-ppc/elf.h
+++ b/include/asm-ppc/elf.h
@@ -70,5 +70,24 @@ typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
+/*
+ * We need to put in some extra aux table entries to tell glibc what
+ * the cache block size is, so it can use the dcbz instruction safely.
+ */
+#define AT_DCACHEBSIZE 17
+#define AT_ICACHEBSIZE 18
+#define AT_UCACHEBSIZE 19
+
+extern int dcache_bsize;
+extern int icache_bsize;
+extern int ucache_bsize;
+
+#define DLINFO_EXTRA_ITEMS 3
+#define EXTRA_DLINFO do { \
+ NEW_AUX_ENT(0, AT_DCACHEBSIZE, dcache_bsize); \
+ NEW_AUX_ENT(1, AT_ICACHEBSIZE, icache_bsize); \
+ NEW_AUX_ENT(2, AT_UCACHEBSIZE, ucache_bsize); \
+} while (0)
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-ppc/feature.h b/include/asm-ppc/feature.h
index 7a33ea8d3..9e9f831d5 100644
--- a/include/asm-ppc/feature.h
+++ b/include/asm-ppc/feature.h
@@ -86,6 +86,8 @@ extern void feature_set_usb_power(struct device_node* device, int power);
extern void feature_set_firewire_power(struct device_node* device, int power);
+extern void feature_core99_kick_cpu1(void);
+
/*
* Sleep related functions. At term, they should be high-priority notifiers
*/
diff --git a/include/asm-ppc/hardirq.h b/include/asm-ppc/hardirq.h
index 8a270c8c1..66aa6abeb 100644
--- a/include/asm-ppc/hardirq.h
+++ b/include/asm-ppc/hardirq.h
@@ -47,7 +47,7 @@ typedef struct {
#include <asm/atomic.h>
extern unsigned char global_irq_holder;
-extern unsigned volatile int global_irq_lock;
+extern unsigned volatile long global_irq_lock;
extern atomic_t global_irq_count;
static inline void release_irqlock(int cpu)
@@ -66,8 +66,8 @@ static inline void hardirq_enter(int cpu)
++local_irq_count(cpu);
atomic_inc(&global_irq_count);
while (test_bit(0,&global_irq_lock)) {
- if (smp_processor_id() == global_irq_holder) {
- printk("uh oh, interrupt while we hold global irq lock!\n");
+ if (cpu == global_irq_holder) {
+ printk("uh oh, interrupt while we hold global irq lock! (CPU %d)\n", cpu);
#ifdef CONFIG_XMON
xmon(0);
#endif
diff --git a/include/asm-ppc/hw_irq.h b/include/asm-ppc/hw_irq.h
index 7d4790171..54e4d7111 100644
--- a/include/asm-ppc/hw_irq.h
+++ b/include/asm-ppc/hw_irq.h
@@ -28,17 +28,23 @@ extern void __no_use_set_lost(unsigned long);
#define __cli() int_control.int_cli()
#define __sti() int_control.int_sti()
-#define __save_flags(flags) int_control.int_save_flags(&flags)
-#define __restore_flags(flags) int_control.int_restore_flags(flags)
+#define __save_flags(flags) int_control.int_save_flags((unsigned long *)&flags)
+#define __restore_flags(flags) int_control.int_restore_flags((unsigned long)flags)
#define __save_and_cli(flags) ({__save_flags(flags);__cli();})
-#define __set_lost(irq) ({ if ((ulong)int_control.int_set_lost) int_control.int_set_lost(irq); })
+#define __set_lost(irq) ({ if ((unsigned long)int_control.int_set_lost) int_control.int_set_lost(irq); })
extern void do_lost_interrupts(unsigned long);
-extern atomic_t ppc_n_lost_interrupts;
#define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
#define unmask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->enable) irq_desc[irq].handler->enable(irq);})
-#define mask_and_ack_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->ack) irq_desc[irq].handler->ack(irq);})
+#define ack_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->ack) irq_desc[irq].handler->ack(irq);})
+
+/* Should we handle this via lost interrupts and IPIs or should we don't care like
+ * we do now ? --BenH.
+ */
+struct hw_interrupt_type;
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+
#endif /* _PPC_HW_IRQ_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/ide.h b/include/asm-ppc/ide.h
index 179bdec67..a9e3baf0d 100644
--- a/include/asm-ppc/ide.h
+++ b/include/asm-ppc/ide.h
@@ -37,6 +37,8 @@ extern ide_ioreg_t chrp_idedma_regbase; /* one for both channels */
extern unsigned int chrp_ide_irq;
extern void chrp_ide_probe(void);
+extern void ppc_generic_ide_fix_driveid(struct hd_driveid *id);
+
struct ide_machdep_calls {
void (*insw)(ide_ioreg_t port, void *buf, int ns);
void (*outsw)(ide_ioreg_t port, void *buf, int ns);
@@ -90,10 +92,9 @@ static __inline__ int ide_default_irq(ide_ioreg_t base)
static __inline__ ide_ioreg_t ide_default_io_base(int index)
{
- if ( ppc_ide_md.default_io_base )
+ if (ppc_ide_md.default_io_base)
return ppc_ide_md.default_io_base(index);
- else
- return -1;
+ return 0;
}
static __inline__ void ide_init_hwif_ports(hw_regs_t *hw,
@@ -124,10 +125,9 @@ static __inline__ void ide_init_default_hwifs(void)
static __inline__ int ide_check_region (ide_ioreg_t from, unsigned int extent)
{
- if ( ppc_ide_md.ide_check_region )
+ if (ppc_ide_md.ide_check_region)
return ppc_ide_md.ide_check_region(from, extent);
- else
- return -1;
+ return 0;
}
static __inline__ void ide_request_region (ide_ioreg_t from, unsigned int extent, const char *name)
@@ -148,19 +148,6 @@ static __inline__ void ide_fix_driveid (struct hd_driveid *id)
ppc_ide_md.fix_driveid(id);
}
-#if 0 /* inb/outb from io.h is OK now -- paulus */
-#undef inb
-#define inb(port) in_8((unsigned char *)((port) + ppc_ide_md.io_base))
-#undef inb_p
-#define inb_p(port) inb(port)
-
-#undef outb
-#define outb(val, port) \
- out_8((unsigned char *)((port) + ppc_ide_md.io_base), (val) )
-#undef outb_p
-#define outb_p(val, port) outb(val, port)
-#endif
-
typedef union {
unsigned all : 8; /* all of the bits together */
struct {
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 03f9db09a..602363511 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -222,20 +222,6 @@ extern inline void * bus_to_virt(unsigned long address)
}
/*
- * The PCI bus bridge can translate addresses issued by the processor(s)
- * into a different address on the PCI bus. On 32-bit cpus, we assume
- * this mapping is 1-1, but on 64-bit systems it often isn't.
- */
-#ifndef CONFIG_PPC64BRIDGE
-#define phys_to_bus(x) (x)
-#define bus_to_phys(x) (x)
-
-#else
-extern unsigned long phys_to_bus(unsigned long pa);
-extern unsigned long bus_to_phys(unsigned int ba, int busnr);
-#endif /* CONFIG_PPC64BRIDGE */
-
-/*
* Change virtual addresses to physical addresses and vv, for
* addresses in the area where the kernel has the RAM mapped.
*/
@@ -364,6 +350,13 @@ out:
return retval;
}
+/* Make some pcmcia drivers happy */
+static inline int isa_check_signature(unsigned long io_addr,
+ const unsigned char *signature, int length)
+{
+ return 0;
+}
+
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
diff --git a/include/asm-ppc/ioctls.h b/include/asm-ppc/ioctls.h
index 0f879c163..e700d29fd 100644
--- a/include/asm-ppc/ioctls.h
+++ b/include/asm-ppc/ioctls.h
@@ -9,6 +9,7 @@
#define FIONBIO _IOW('f', 126, int)
#define FIONREAD _IOR('f', 127, int)
#define TIOCINQ FIONREAD
+#define FIOQSIZE _IOR('f', 128, loff_t)
#define TIOCGETP _IOR('t', 8, struct sgttyb)
#define TIOCSETP _IOW('t', 9, struct sgttyb)
diff --git a/include/asm-ppc/irq.h b/include/asm-ppc/irq.h
index c3ae15763..139dfea0c 100644
--- a/include/asm-ppc/irq.h
+++ b/include/asm-ppc/irq.h
@@ -4,6 +4,7 @@
#include <linux/config.h>
#include <asm/machdep.h> /* ppc_md */
+#include <asm/atomic.h>
extern void disable_irq(unsigned int);
extern void disable_irq_nosync(unsigned int);
@@ -163,9 +164,6 @@ extern irq_node_t *new_irq_node(void);
#ifndef CONFIG_8260
#define NUM_8259_INTERRUPTS 16
-#define IRQ_8259_CASCADE 16
-#define openpic_to_irq(n) ((n)+NUM_8259_INTERRUPTS)
-#define irq_to_openpic(n) ((n)-NUM_8259_INTERRUPTS)
#else /* CONFIG_8260 */
@@ -214,7 +212,10 @@ static __inline__ int irq_cannonicalize(int irq)
#endif
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
-extern unsigned int ppc_lost_interrupts[NR_MASK_WORDS];
+/* pendatic: these are long because they are used with set_bit --RR */
+extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
+extern atomic_t ppc_n_lost_interrupts;
#endif /* _ASM_IRQ_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/keylargo.h b/include/asm-ppc/keylargo.h
index 5408262a1..02d3d58b2 100644
--- a/include/asm-ppc/keylargo.h
+++ b/include/asm-ppc/keylargo.h
@@ -20,10 +20,19 @@
#define KEYLARGO_GPIO_CNT 17
/* Specific GPIO regs */
+
#define KL_GPIO_ETH_PHY_RESET (KEYLARGO_GPIO_0+0x10)
#define KL_GPIO_ETH_PHY_RESET_ASSERT 0x04
#define KL_GPIO_ETH_PHY_RESET_RELEASE 0x05
#define KL_GPIO_ETH_PHY_RESET_TRISTATE 0x00
+
+#define KL_GPIO_KICK_CPU1 (KEYLARGO_GPIO_0+0x0a)
+#define KL_GPIO_KICK_CPU1_UP 0x04
+#define KL_GPIO_KICK_CPU1_DOWN 0x38
+
+#define KL_GPIO_PMU_MESSAGE_IRQ (KEYLARGO_GPIO_EXTINT_0+0x09)
+#define KL_GPIO_PMU_MESSAGE_BIT 0x02
+
/*
* Bits in feature control register
*/
diff --git a/include/asm-ppc/linux_logo.h b/include/asm-ppc/linux_logo.h
index c35e6db5b..d3aa2d267 100644
--- a/include/asm-ppc/linux_logo.h
+++ b/include/asm-ppc/linux_logo.h
@@ -34,9 +34,6 @@ extern unsigned char linux_logo_green[];
extern unsigned char linux_logo_blue[];
extern unsigned char linux_logo[];
extern unsigned char linux_logo_bw[];
-extern unsigned char linux_logo16_red[];
-extern unsigned char linux_logo16_green[];
-extern unsigned char linux_logo16_blue[];
extern unsigned char linux_logo16[];
#endif
diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h
index 67111965f..ca254d0e7 100644
--- a/include/asm-ppc/machdep.h
+++ b/include/asm-ppc/machdep.h
@@ -10,6 +10,7 @@
struct pt_regs;
struct pci_bus;
+struct pci_dev;
struct machdep_calls {
void (*setup_arch)(void);
@@ -45,7 +46,7 @@ struct machdep_calls {
unsigned char (*nvram_read_val)(int addr);
void (*nvram_write_val)(int addr, unsigned char val);
-/* Tons of keyboard stuff. */
+ /* Tons of keyboard stuff. */
int (*kbd_setkeycode)(unsigned int scancode,
unsigned int keycode);
int (*kbd_getkeycode)(unsigned int scancode);
@@ -59,25 +60,25 @@ struct machdep_calls {
unsigned char *ppc_kbd_sysrq_xlate;
#endif
- /* PCI interfaces */
- int (*pcibios_read_config_byte)(unsigned char bus,
- unsigned char dev_fn, unsigned char offset, unsigned char *val);
- int (*pcibios_read_config_word)(unsigned char bus,
- unsigned char dev_fn, unsigned char offset, unsigned short *val);
- int (*pcibios_read_config_dword)(unsigned char bus,
- unsigned char dev_fn, unsigned char offset, unsigned int *val);
- int (*pcibios_write_config_byte)(unsigned char bus,
- unsigned char dev_fn, unsigned char offset, unsigned char val);
- int (*pcibios_write_config_word)(unsigned char bus,
- unsigned char dev_fn, unsigned char offset, unsigned short val);
- int (*pcibios_write_config_dword)(unsigned char bus,
- unsigned char dev_fn, unsigned char offset, unsigned int val);
+ /*
+ * optional PCI "hooks"
+ */
+
+ /* Called after scanning the bus, before allocating
+ * resources
+ */
void (*pcibios_fixup)(void);
- void (*pcibios_fixup_bus)(struct pci_bus *);
- void* (*pci_dev_io_base)(unsigned char bus, unsigned char devfn, int physical);
- void* (*pci_dev_mem_base)(unsigned char bus, unsigned char devfn);
- int (*pci_dev_root_bridge)(unsigned char bus, unsigned char devfn);
+ /* Called for each PCI bus in the system
+ * when it's probed
+ */
+ void (*pcibios_fixup_bus)(struct pci_bus *);
+
+ /* Called when pci_enable_device() is called (initial=0) or
+ * when a device with no assigned resource is found (initial=1).
+ * Returns 0 to allow assignement/enabling of the device
+ */
+ int (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
/* this is for modules, since _machine can be a define -- Cort */
int ppc_machine;
diff --git a/include/asm-ppc/mman.h b/include/asm-ppc/mman.h
index 64abf0c58..1c0dbe205 100644
--- a/include/asm-ppc/mman.h
+++ b/include/asm-ppc/mman.h
@@ -13,6 +13,7 @@
#define MAP_ANONYMOUS 0x20 /* don't use a file */
#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
+#define MAP_LOCKED 0x80
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h
index 3efe39d00..976ea6cc9 100644
--- a/include/asm-ppc/mmu.h
+++ b/include/asm-ppc/mmu.h
@@ -6,12 +6,13 @@
#ifndef _PPC_MMU_H_
#define _PPC_MMU_H_
-/* Default "unsigned long" context */
-typedef unsigned long mm_context_t;
-
#include <linux/config.h>
#ifndef __ASSEMBLY__
+
+/* Default "unsigned long" context */
+typedef unsigned long mm_context_t;
+
/* Hardware Page Table Entry */
typedef struct _PTE {
#ifdef CONFIG_PPC64BRIDGE
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index fed474d14..f8ef4e604 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -56,13 +56,6 @@ extern void mmu_context_overflow(void);
*/
extern void set_context(int context, void *pgd);
-#ifdef CONFIG_8xx
-extern inline void mmu_context_overflow(void)
-{
- atomic_set(&next_mmu_context, -1);
-}
-#endif
-
/*
* Get a new mmu context for task tsk if necessary.
*/
diff --git a/include/asm-ppc/parport.h b/include/asm-ppc/parport.h
new file mode 100644
index 000000000..11f96d3de
--- /dev/null
+++ b/include/asm-ppc/parport.h
@@ -0,0 +1,18 @@
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_PPC_PARPORT_H
+#define _ASM_PPC_PARPORT_H
+
+static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+ return parport_pc_find_isa_ports (autoirq, autodma);
+}
+
+#endif /* !(_ASM_PPC_PARPORT_H) */
diff --git a/include/asm-ppc/pci-bridge.h b/include/asm-ppc/pci-bridge.h
index 9e5385e51..a6c8d8919 100644
--- a/include/asm-ppc/pci-bridge.h
+++ b/include/asm-ppc/pci-bridge.h
@@ -2,49 +2,70 @@
#ifndef _ASM_PCI_BRIDGE_H
#define _ASM_PCI_BRIDGE_H
-void pmac_find_bridges(void);
+struct device_node;
+struct pci_controller;
/*
* pci_io_base returns the memory address at which you can access
* the I/O space for PCI bus number `bus' (or NULL on error).
- *
- * NOTE: This doesn't handle the new Uni-N chip which requires
- * per-device io_base.
*/
-void *pci_io_base(unsigned int bus);
-
-/* This version handles the new Uni-N host bridge, the iobase is now
- * a per-device thing. I also added the memory base so PReP can
- * be fixed to return 0xc0000000 (I didn't actually implement it)
- *
- * pci_dev_io_base() returns either a virtual (ioremap'ed) address or
- * a physical address. In-kernel clients will use logical while the
- * sys_pciconfig_iobase syscall returns a physical one to userland.
+extern void *pci_bus_io_base(unsigned int bus);
+extern unsigned long pci_bus_io_base_phys(unsigned int bus);
+extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
+
+/*
+ * PCI <-> OF matching functions
*/
-void *pci_dev_io_base(unsigned char bus, unsigned char devfn, int physical);
-void *pci_dev_mem_base(unsigned char bus, unsigned char devfn);
+extern int pci_device_from_OF_node(struct device_node *node,
+ u8* bus, u8* devfn);
+extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
-/* Returns the root-bridge number (Uni-N number) of a device */
-int pci_dev_root_bridge(unsigned char bus, unsigned char devfn);
+/* Get the PCI host controller for a bus */
+extern struct pci_controller* pci_bus_to_hose(int bus);
+
+/* Get the PCI host controller for an OF device */
+extern struct pci_controller*
+pci_find_hose_for_OF_device(struct device_node* node);
/*
- * pci_device_loc returns the bus number and device/function number
- * for a device on a PCI bus, given its device_node struct.
- * It returns 0 if OK, -1 on error.
+ * Structure of a PCI controller (host bridge)
*/
-int pci_device_loc(struct device_node *dev, unsigned char *bus_ptr,
- unsigned char *devfn_ptr);
+struct pci_controller {
+ struct pci_controller *next;
+ struct pci_bus *bus;
+ void *arch_data;
+
+ int first_busno;
+ int last_busno;
+
+ void *io_base_virt;
+ unsigned long io_base_phys;
+
+ /* Some machines (PReP) have a non 1:1 mapping of
+ * the PCI memory space in the CPU bus space
+ */
+ unsigned long pci_mem_offset;
-struct bridge_data {
+ struct pci_ops *ops;
volatile unsigned int *cfg_addr;
volatile unsigned char *cfg_data;
- void *io_base; /* virtual */
- unsigned long io_base_phys;
- int bus_number;
- int max_bus;
- struct bridge_data *next;
- struct device_node *node;
+
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ int mem_resource_count;
};
+/* These are used for config access before all the PCI probing
+ has been done. */
+int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn, int where, u8 *val);
+int early_read_config_word(struct pci_controller *hose, int bus, int dev_fn, int where, u16 *val);
+int early_read_config_dword(struct pci_controller *hose, int bus, int dev_fn, int where, u32 *val);
+int early_write_config_byte(struct pci_controller *hose, int bus, int dev_fn, int where, u8 val);
+int early_write_config_word(struct pci_controller *hose, int bus, int dev_fn, int where, u16 val);
+int early_write_config_dword(struct pci_controller *hose, int bus, int dev_fn, int where, u32 val);
+
#endif
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/pci.h b/include/asm-ppc/pci.h
index 1a661f050..a1cfa7d31 100644
--- a/include/asm-ppc/pci.h
+++ b/include/asm-ppc/pci.h
@@ -6,13 +6,11 @@
#define IOBASE_BRIDGE_NUMBER 0
#define IOBASE_MEMORY 1
#define IOBASE_IO 2
+#define IOBASE_ISA_IO 3
+#define IOBASE_ISA_MEM 4
-/* Can be used to override the logic in pci_scan_bus for skipping
- * already-configured bus numbers - to be used for buggy BIOSes
- * or architectures with incomplete PCI setup by the loader.
- */
-#define pcibios_assign_all_busses() 0
+extern int pcibios_assign_all_busses(void);
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
@@ -27,6 +25,18 @@ extern inline void pcibios_penalize_isa_irq(int irq)
/* We don't do dynamic PCI IRQ allocation */
}
+extern unsigned long pci_resource_to_bus(struct pci_dev *pdev, struct resource *res);
+
+/*
+ * The PCI bus bridge can translate addresses issued by the processor(s)
+ * into a different address on the PCI bus. On 32-bit cpus, we assume
+ * this mapping is 1-1, but on 64-bit systems it often isn't.
+ *
+ * Obsolete ! Drivers should now use pci_resource_to_bus
+ */
+extern unsigned long pci_phys_to_bus(unsigned long pa, int busnr);
+extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
+
/* Dynamic DMA Mapping stuff
* ++ajoshi
*/
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 0f4579cd7..353f5c9bf 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -17,22 +17,22 @@ extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
-extern inline void flush_hash_page(unsigned context, unsigned long va)
+static inline void flush_hash_page(unsigned context, unsigned long va)
{ }
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia" : : )
-extern inline void local_flush_tlb_all(void)
+static inline void local_flush_tlb_all(void)
{ __tlbia(); }
-extern inline void local_flush_tlb_mm(struct mm_struct *mm)
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
-extern inline void local_flush_tlb_page(struct vm_area_struct *vma,
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ __tlbia(); }
-extern inline void local_flush_tlb_range(struct mm_struct *mm,
+static inline void local_flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{ __tlbia(); }
-extern inline void flush_hash_page(unsigned context, unsigned long va)
+static inline void flush_hash_page(unsigned context, unsigned long va)
{ }
#else
struct mm_struct;
@@ -49,7 +49,7 @@ extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* PPC has hw page tables. */
@@ -323,9 +323,9 @@ extern pte_t * __bad_pagetable(void);
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
-extern inline int pgd_none(pgd_t pgd) { return 0; }
-extern inline int pgd_bad(pgd_t pgd) { return 0; }
-extern inline int pgd_present(pgd_t pgd) { return 1; }
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
#define pgd_clear(xp) do { } while (0)
#define pgd_page(pgd) \
@@ -335,45 +335,45 @@ extern inline int pgd_present(pgd_t pgd) { return 1; }
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
-extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-extern inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
-extern inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
+static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
+static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
-extern inline pte_t pte_rdprotect(pte_t pte) {
+static inline pte_t pte_rdprotect(pte_t pte) {
pte_val(pte) &= ~_PAGE_USER; return pte; }
-extern inline pte_t pte_exprotect(pte_t pte) {
+static inline pte_t pte_exprotect(pte_t pte) {
pte_val(pte) &= ~_PAGE_USER; return pte; }
-extern inline pte_t pte_wrprotect(pte_t pte) {
+static inline pte_t pte_wrprotect(pte_t pte) {
pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
-extern inline pte_t pte_mkclean(pte_t pte) {
+static inline pte_t pte_mkclean(pte_t pte) {
pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
-extern inline pte_t pte_mkold(pte_t pte) {
+static inline pte_t pte_mkold(pte_t pte) {
pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-extern inline pte_t pte_mkread(pte_t pte) {
+static inline pte_t pte_mkread(pte_t pte) {
pte_val(pte) |= _PAGE_USER; return pte; }
-extern inline pte_t pte_mkexec(pte_t pte) {
+static inline pte_t pte_mkexec(pte_t pte) {
pte_val(pte) |= _PAGE_USER; return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte)
{
pte_val(pte) |= _PAGE_RW;
if (pte_val(pte) & _PAGE_DIRTY)
pte_val(pte) |= _PAGE_HWWRITE;
return pte;
}
-extern inline pte_t pte_mkdirty(pte_t pte)
+static inline pte_t pte_mkdirty(pte_t pte)
{
pte_val(pte) |= _PAGE_DIRTY;
if (pte_val(pte) & _PAGE_RW)
pte_val(pte) |= _PAGE_HWWRITE;
return pte;
}
-extern inline pte_t pte_mkyoung(pte_t pte) {
+static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
/* Certain architectures need to do special things when pte's
@@ -387,7 +387,7 @@ extern inline pte_t pte_mkyoung(pte_t pte) {
* and a page entry and page directory to the page they refer to.
*/
-extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
+static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t pte;
pte_val(pte) = physpage | pgprot_val(pgprot);
@@ -401,12 +401,73 @@ extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
pte; \
})
-extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
return pte;
}
+/*
+ * Atomic PTE updates.
+ *
+ * pte_update clears and sets bit atomically, and returns
+ * the old pte value.
+ */
+static inline unsigned long pte_update(pte_t *p, unsigned long clr,
+ unsigned long set)
+{
+ unsigned long old, tmp;
+
+ __asm__ __volatile__("\
+1: lwarx %0,0,%3
+ andc %1,%0,%4
+ or %1,%1,%5
+ stwcx. %1,0,%3
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
+ : "r" (p), "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+ return old;
+}
+
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+ return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
+}
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+ return (pte_update(ptep, _PAGE_DIRTY | _PAGE_HWWRITE, 0)
+ & _PAGE_DIRTY) != 0;
+}
+
+static inline pte_t ptep_get_and_clear(pte_t *ptep)
+{
+ return __pte(pte_update(ptep, ~0UL, 0));
+}
+
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+ pte_update(ptep, _PAGE_RW | _PAGE_HWWRITE, 0);
+}
+
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+ /*
+ * N.B. this doesn't set the _PAGE_HWWRITE bit in the case
+ * where _PAGE_RW is set and _PAGE_DIRTY was clear. This
+ * doesn't matter; all it will mean is that if the next call
+ * to hash_page for this page is for a read, it will put a
+ * readonly HPTE into the hash table rather than a R/W HPTE.
+ * A call to hash_page for a write to this page will set
+ * _PAGE_HWWRITE and put a R/W HPTE into the hash table.
+ * -- paulus.
+ */
+ pte_update(ptep, 0, _PAGE_DIRTY);
+}
+
+#define pte_same(A,B) (pte_val(A) == pte_val(B))
+
#define pmd_page(pmd) (pmd_val(pmd))
/* to find an entry in a kernel page-table-directory */
@@ -417,13 +478,13 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/* Find an entry in the second-level page table.. */
-extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) dir;
}
/* Find an entry in the third-level page table.. */
-extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
+static inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
{
return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
}
@@ -486,8 +547,6 @@ extern void kernel_set_cachemode (unsigned long address, unsigned long size,
#define io_remap_page_range remap_page_range
-#include <asm-generic/pgtable.h>
-
-#endif __ASSEMBLY__
+#endif /* __ASSEMBLY__ */
#endif /* _PPC_PGTABLE_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/prep_nvram.h b/include/asm-ppc/prep_nvram.h
index 82ec21508..85694899c 100644
--- a/include/asm-ppc/prep_nvram.h
+++ b/include/asm-ppc/prep_nvram.h
@@ -21,6 +21,11 @@
#ifndef _PPC_PREP_NVRAM_H
#define _PPC_PREP_NVRAM_H
+#define MAX_PREP_NVRAM 0x8000
+#define PREP_NVRAM_AS0 0x74
+#define PREP_NVRAM_AS1 0x75
+#define PREP_NVRAM_DATA 0x77
+
#define NVSIZE 4096 /* size of NVRAM */
#define OSAREASIZE 512 /* size of OSArea space */
#define CONFSIZE 1024 /* guess at size of Configuration space */
diff --git a/include/asm-ppc/processor.h b/include/asm-ppc/processor.h
index 5908d7f32..cd87f248e 100644
--- a/include/asm-ppc/processor.h
+++ b/include/asm-ppc/processor.h
@@ -203,6 +203,12 @@
#define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */
#define SPRN_IMMR 0x27E /* Internal Memory Map Register */
#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
+#define L2CR_PIPE_LATEWR (0x01800000) /* late-write SRAM */
+#define L2CR_L2CTL (0x00100000) /* RAM control */
+#define L2CR_INST_DISABLE (0x00400000) /* disable for insn's */
+#define L2CR_L2I (0x00200000) /* global invalidate */
+#define L2CR_L2E (0x80000000) /* enable */
+#define L2CR_L2WT (0x00080000) /* write-through */
#define SPRN_LR 0x008 /* Link Register */
#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
@@ -233,14 +239,14 @@
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
#define SPRN_SRR2 0x3DE /* Save/Restore Register 2 */
#define SPRN_SRR3 0x3DF /* Save/Restore Register 3 */
-#define SPRN_TBHI 0x3DC /* Time Base High */
-#define SPRN_TBHU 0x3CC /* Time Base High User-mode */
-#define SPRN_TBLO 0x3DD /* Time Base Low */
-#define SPRN_TBLU 0x3CD /* Time Base Low User-mode */
-#define SPRN_TBRL 0x10D /* Time Base Read Lower Register */
-#define SPRN_TBRU 0x10C /* Time Base Read Upper Register */
-#define SPRN_TBWL 0x11D /* Time Base Write Lower Register */
-#define SPRN_TBWU 0x11C /* Time Base Write Upper Register */
+#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
+#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
+#define SPRN_TBWL 0x11C /* Time Base Lower Register (supervisor, R/W) */
+#define SPRN_TBWU 0x11D /* Time Base Upper Register (supervisor, R/W) */
+#define SPRN_TBHI 0x3DC /* Time Base High (4xx) */
+#define SPRN_TBHU 0x3CC /* Time Base High User-mode (4xx) */
+#define SPRN_TBLO 0x3DD /* Time Base Low (4xx) */
+#define SPRN_TBLU 0x3CD /* Time Base Low User-mode (4xx) */
#define SPRN_TCR 0x3DA /* Timer Control Register */
#define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */
#define WP_2_17 0 /* 2^17 clocks */
@@ -262,15 +268,17 @@
#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
#define TCR_ARE 0x00400000 /* Auto Reload Enable */
#define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */
-#define THRM1_TIN (1<<0)
-#define THRM1_TIV (1<<1)
-#define THRM1_THRES (0x7f<<2)
-#define THRM1_TID (1<<29)
-#define THRM1_TIE (1<<30)
-#define THRM1_V (1<<31)
+/* these bits were defined in inverted endian sense originally, ugh, confusing */
+#define THRM1_TIN (1 << 31)
+#define THRM1_TIV (1 << 30)
+#define THRM1_THRES(x) ((x&0x7f)<<23)
+#define THRM3_SITV(x) ((x&0x3fff)<<1)
+#define THRM1_TID (1<<2)
+#define THRM1_TIE (1<<1)
+#define THRM1_V (1<<0)
#define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */
#define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */
-#define THRM3_E (1<<31)
+#define THRM3_E (1<<0)
#define SPRN_TSR 0x3D8 /* Timer Status Register */
#define TSR_ENW 0x80000000 /* Enable Next Watchdog */
#define TSR_WIS 0x40000000 /* WDT Interrupt Status */
@@ -500,8 +508,8 @@
#define _MACH_fads 0x00000020 /* Motorola FADS board */
#define _MACH_rpxlite 0x00000040 /* RPCG RPX-Lite 8xx board */
#define _MACH_bseip 0x00000080 /* Bright Star Engineering ip-Engine */
-#define _MACH_yk 0x00000100 /* Motorola Yellowknife */
-#define _MACH_gemini 0x00000200 /* Synergy Microsystems gemini board */
+#define _MACH_unused0 0x00000100 /* Now free to be used */
+#define _MACH_unused1 0x00000200 /* Now free to be used */
#define _MACH_classic 0x00000400 /* RPCG RPX-Classic 8xx board */
#define _MACH_oak 0x00000800 /* IBM "Oak" 403 eval. board */
#define _MACH_walnut 0x00001000 /* IBM "Walnut" 405GP eval. board */
@@ -509,24 +517,11 @@
#define _MACH_tqm860 0x00004000 /* TQM860/L */
#define _MACH_tqm8xxL 0x00008000 /* TQM8xxL */
-
/* see residual.h for these */
#define _PREP_Motorola 0x01 /* motorola prep */
#define _PREP_Firm 0x02 /* firmworks prep */
#define _PREP_IBM 0x00 /* ibm prep */
#define _PREP_Bull 0x03 /* bull prep */
-#define _PREP_Radstone 0x04 /* Radstone Technology PLC prep */
-
-/*
- * Radstone board types
- */
-#define RS_SYS_TYPE_PPC1 0
-#define RS_SYS_TYPE_PPC2 1
-#define RS_SYS_TYPE_PPC1a 2
-#define RS_SYS_TYPE_PPC2a 3
-#define RS_SYS_TYPE_PPC4 4
-#define RS_SYS_TYPE_PPC4a 5
-#define RS_SYS_TYPE_PPC2ep 6
/* these are arbitrary */
#define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
@@ -715,9 +710,6 @@ void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
#elif defined(CONFIG_APUS)
#define _machine _MACH_apus
#define have_of 0
-#elif defined(CONFIG_GEMINI)
-#define _machine _MACH_gemini
-#define have_of 0
#elif defined(CONFIG_8260)
#define _machine _MACH_8260
#define have_of 0
diff --git a/include/asm-ppc/prom.h b/include/asm-ppc/prom.h
index 0c57690ea..b80c460b3 100644
--- a/include/asm-ppc/prom.h
+++ b/include/asm-ppc/prom.h
@@ -80,14 +80,15 @@ extern struct device_node *find_type_devices(const char *type);
extern struct device_node *find_path_device(const char *path);
extern struct device_node *find_compatible_devices(const char *type,
const char *compat);
-extern struct device_node *find_pci_device_OFnode(unsigned char bus,
- unsigned char dev_fn);
extern struct device_node *find_phandle(phandle);
extern struct device_node *find_all_nodes(void);
extern int device_is_compatible(struct device_node *device, const char *);
extern int machine_is_compatible(const char *compat);
extern unsigned char *get_property(struct device_node *node, const char *name,
int *lenp);
+extern void prom_add_property(struct device_node* np, struct property* prop);
+extern void prom_get_irq_senses(unsigned char *, int, int);
+
extern void print_properties(struct device_node *node);
extern int call_rtas(const char *service, int nargs, int nret,
unsigned long *outputs, ...);
@@ -96,7 +97,8 @@ extern void prom_drawhex(unsigned long v);
extern void prom_drawchar(char c);
extern void map_bootx_text(void);
-
+extern void bootx_update_display(unsigned long phys, int width, int height,
+ int depth, int pitch);
#endif /* _PPC_PROM_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/raven.h b/include/asm-ppc/raven.h
index ee873ff82..e912088b0 100644
--- a/include/asm-ppc/raven.h
+++ b/include/asm-ppc/raven.h
@@ -31,5 +31,5 @@
extern struct hw_interrupt_type raven_pic;
extern int raven_init(void);
-#endif _ASMPPC_RAVEN_H
+#endif /* _ASMPPC_RAVEN_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/segment.h b/include/asm-ppc/segment.h
index 0eef1e5e7..0f2f7428d 100644
--- a/include/asm-ppc/segment.h
+++ b/include/asm-ppc/segment.h
@@ -1,7 +1 @@
-#ifndef __PPC_SEGMENT_H
-#define __PPC_SEGMENT_H
-
-/* Only here because we have some old header files that expect it.. */
-
-#endif
#include <asm/uaccess.h>
diff --git a/include/asm-ppc/serial.h b/include/asm-ppc/serial.h
index 60c1a14ff..721476591 100644
--- a/include/asm-ppc/serial.h
+++ b/include/asm-ppc/serial.h
@@ -5,10 +5,6 @@
#ifdef __KERNEL__
#include <linux/config.h>
-#ifdef CONFIG_GEMINI
-#include <asm/gemini_serial.h>
-#else
-
/*
* This assumes you have a 1.8432 MHz clock for your UART.
*
@@ -127,5 +123,4 @@
HUB6_SERIAL_PORT_DFNS \
MCA_SERIAL_PORT_DFNS
-#endif /* CONFIG_GEMINI */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h
index 4851e13fd..6dfc778b9 100644
--- a/include/asm-ppc/smp.h
+++ b/include/asm-ppc/smp.h
@@ -15,7 +15,7 @@
#ifndef __ASSEMBLY__
struct cpuinfo_PPC {
- unsigned long loops_per_sec;
+ unsigned long loops_per_jiffy;
unsigned long pvr;
unsigned long *pgd_cache;
unsigned long *pte_cache;
diff --git a/include/asm-ppc/smplock.h b/include/asm-ppc/smplock.h
index 5fdd5733b..4b7ba58e9 100644
--- a/include/asm-ppc/smplock.h
+++ b/include/asm-ppc/smplock.h
@@ -39,13 +39,13 @@ do { \
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
+static __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-extern __inline__ void unlock_kernel(void)
+static __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff --git a/include/asm-ppc/termios.h b/include/asm-ppc/termios.h
index 2a9b8b025..22f53ce8b 100644
--- a/include/asm-ppc/termios.h
+++ b/include/asm-ppc/termios.h
@@ -43,6 +43,7 @@ struct ltchars {
#define FIONBIO _IOW('f', 126, int)
#define FIONREAD _IOR('f', 127, int)
#define TIOCINQ FIONREAD
+#define FIOQSIZE _IOR('f', 128, loff_t)
#define TIOCGETP _IOR('t', 8, struct sgttyb)
#define TIOCSETP _IOW('t', 9, struct sgttyb)
diff --git a/include/asm-ppc/unistd.h b/include/asm-ppc/unistd.h
index 5c432792b..c0a69ef72 100644
--- a/include/asm-ppc/unistd.h
+++ b/include/asm-ppc/unistd.h
@@ -206,6 +206,10 @@
#define __NR_pciconfig_iobase 200
#define __NR_multiplexer 201
#define __NR_getdents64 202
+#define __NR_pivot_root 203
+#define __NR_fcntl64 204
+#define __NR_madvise 205
+#define __NR_mincore 206
#define __NR(n) #n
diff --git a/include/asm-sh/current.h b/include/asm-sh/current.h
index 355a2cd14..c690c233b 100644
--- a/include/asm-sh/current.h
+++ b/include/asm-sh/current.h
@@ -12,7 +12,7 @@ static __inline__ struct task_struct * get_current(void)
{
struct task_struct *current;
- __asm__("stc $r7_bank, %0"
+ __asm__("stc r7_bank, %0"
:"=r" (current));
return current;
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index b1a6f9461..a448c6f00 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -39,6 +39,7 @@ extern void paging_init(void);
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
+#define flush_cache_sigtramp(vaddr) do { } while (0)
#elif defined(__SH4__)
/*
* Caches are broken on SH-4, so we need them.
@@ -52,6 +53,7 @@ extern void flush_page_to_ram(struct page *page);
extern void flush_dcache_page(struct page *pg);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
+extern void flush_cache_sigtramp(unsigned long addr);
#endif
/*
@@ -125,11 +127,7 @@ extern unsigned long empty_zero_page[1024];
/* Hardware flags: SZ=1 (4k-byte) */
#define _PAGE_FLAGS_HARD 0x00000010
-#if defined(__sh3__)
-#define _PAGE_SHARED _PAGE_HW_SHARED
-#elif defined(__SH4__)
#define _PAGE_SHARED _PAGE_U0_SHARED
-#endif
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
diff --git a/include/asm-sh/sh_bios.h b/include/asm-sh/sh_bios.h
index cdb73bdea..0ca261956 100644
--- a/include/asm-sh/sh_bios.h
+++ b/include/asm-sh/sh_bios.h
@@ -13,5 +13,7 @@ extern void sh_bios_char_out(char ch);
extern int sh_bios_in_gdb_mode(void);
extern void sh_bios_gdb_detach(void);
+extern void sh_bios_get_node_addr(unsigned char *node_addr);
+extern void sh_bios_shutdown(unsigned int how);
#endif /* __ASM_SH_BIOS_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 86786c730..a8f720582 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -6,6 +6,7 @@
*/
#include <linux/config.h>
+#include <linux/kernel.h>
/*
* switch_to() should switch tasks to task nr n, first
@@ -28,37 +29,37 @@ typedef struct {
register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
__asm__ __volatile__ (".balign 4\n\t" \
- "stc.l $gbr, @-$r15\n\t" \
- "sts.l $pr, @-$r15\n\t" \
- "mov.l $r8, @-$r15\n\t" \
- "mov.l $r9, @-$r15\n\t" \
- "mov.l $r10, @-$r15\n\t" \
- "mov.l $r11, @-$r15\n\t" \
- "mov.l $r12, @-$r15\n\t" \
- "mov.l $r13, @-$r15\n\t" \
- "mov.l $r14, @-$r15\n\t" \
- "mov.l $r15, @$r1 ! save SP\n\t" \
- "mov.l @$r6, $r15 ! change to new stack\n\t" \
- "mov.l %0, @-$r15 ! push R0 onto new stack\n\t" \
+ "stc.l gbr, @-r15\n\t" \
+ "sts.l pr, @-r15\n\t" \
+ "mov.l r8, @-r15\n\t" \
+ "mov.l r9, @-r15\n\t" \
+ "mov.l r10, @-r15\n\t" \
+ "mov.l r11, @-r15\n\t" \
+ "mov.l r12, @-r15\n\t" \
+ "mov.l r13, @-r15\n\t" \
+ "mov.l r14, @-r15\n\t" \
+ "mov.l r15, @r1 ! save SP\n\t" \
+ "mov.l @r6, r15 ! change to new stack\n\t" \
+ "mov.l %0, @-r15 ! push R0 onto new stack\n\t" \
"mova 1f, %0\n\t" \
- "mov.l %0, @$r2 ! save PC\n\t" \
+ "mov.l %0, @r2 ! save PC\n\t" \
"mov.l 2f, %0\n\t" \
"jmp @%0 ! call __switch_to\n\t" \
- " lds $r7, $pr ! with return to new PC\n\t" \
+ " lds r7, pr ! with return to new PC\n\t" \
".balign 4\n" \
"2:\n\t" \
".long " "__switch_to\n" \
"1:\n\t" \
- "mov.l @$r15+, %0 ! pop R0 from new stack\n\t" \
- "mov.l @$r15+, $r14\n\t" \
- "mov.l @$r15+, $r13\n\t" \
- "mov.l @$r15+, $r12\n\t" \
- "mov.l @$r15+, $r11\n\t" \
- "mov.l @$r15+, $r10\n\t" \
- "mov.l @$r15+, $r9\n\t" \
- "mov.l @$r15+, $r8\n\t" \
- "lds.l @$r15+, $pr\n\t" \
- "ldc.l @$r15+, $gbr\n\t" \
+ "mov.l @r15+, %0 ! pop R0 from new stack\n\t" \
+ "mov.l @r15+, r14\n\t" \
+ "mov.l @r15+, r13\n\t" \
+ "mov.l @r15+, r12\n\t" \
+ "mov.l @r15+, r11\n\t" \
+ "mov.l @r15+, r10\n\t" \
+ "mov.l @r15+, r9\n\t" \
+ "mov.l @r15+, r8\n\t" \
+ "lds.l @r15+, pr\n\t" \
+ "ldc.l @r15+, gbr\n\t" \
:"=&z" (__last) \
:"0" (prev), \
"r" (__ts1), "r" (__ts2), \
@@ -107,11 +108,11 @@ static __inline__ void __sti(void)
{
unsigned long __dummy0, __dummy1;
- __asm__ __volatile__("stc $sr, %0\n\t"
+ __asm__ __volatile__("stc sr, %0\n\t"
"and %1, %0\n\t"
- "stc $r6_bank, %1\n\t"
+ "stc r6_bank, %1\n\t"
"or %1, %0\n\t"
- "ldc %0, $sr"
+ "ldc %0, sr"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x000000f0)
: "memory");
@@ -120,9 +121,9 @@ static __inline__ void __sti(void)
static __inline__ void __cli(void)
{
unsigned long __dummy;
- __asm__ __volatile__("stc $sr, %0\n\t"
+ __asm__ __volatile__("stc sr, %0\n\t"
"or #0xf0, %0\n\t"
- "ldc %0, $sr"
+ "ldc %0, sr"
: "=&z" (__dummy)
: /* no inputs */
: "memory");
@@ -131,7 +132,7 @@ static __inline__ void __cli(void)
#define __save_flags(x) \
x = (__extension__ ({ unsigned long __sr; \
__asm__ __volatile__( \
- "stc $sr, %0" \
+ "stc sr, %0" \
: "=&r" (__sr) \
: /* no inputs */ \
: "memory"); \
@@ -140,10 +141,10 @@ x = (__extension__ ({ unsigned long __sr; \
#define __save_and_cli(x) \
x = (__extension__ ({ unsigned long __dummy,__sr; \
__asm__ __volatile__( \
- "stc $sr, %1\n\t" \
+ "stc sr, %1\n\t" \
"mov %1, %0\n\t" \
"or #0xf0, %0\n\t" \
- "ldc %0, $sr" \
+ "ldc %0, sr" \
: "=&z" (__dummy), "=&r" (__sr) \
: /* no inputs */ \
: "memory"); (__sr & 0x000000f0); }))
diff --git a/include/asm-sparc/mostek.h b/include/asm-sparc/mostek.h
index c9a10cd86..be60a9afa 100644
--- a/include/asm-sparc/mostek.h
+++ b/include/asm-sparc/mostek.h
@@ -1,4 +1,4 @@
-/* $Id: mostek.h,v 1.12 1999/08/31 18:51:41 davem Exp $
+/* $Id: mostek.h,v 1.13 2001/01/11 15:07:09 davem Exp $
* mostek.h: Describes the various Mostek time of day clock registers.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -64,6 +64,7 @@ struct mostek48t02 {
volatile unsigned char year; /* Year (0-99) */
};
+extern spinlock_t mostek_lock;
extern unsigned long mstk48t02_regs;
/* Control register values. */
diff --git a/include/asm-sparc64/mostek.h b/include/asm-sparc64/mostek.h
index e153a36cf..b000c1586 100644
--- a/include/asm-sparc64/mostek.h
+++ b/include/asm-sparc64/mostek.h
@@ -1,4 +1,4 @@
-/* $Id: mostek.h,v 1.3 1999/08/30 10:14:50 davem Exp $
+/* $Id: mostek.h,v 1.4 2001/01/11 15:07:09 davem Exp $
* mostek.h: Describes the various Mostek time of day clock registers.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -66,6 +66,7 @@ static __inline__ void mostek_write(unsigned long addr, u8 val)
#define MOSTEK_MONTH 0x07feUL
#define MOSTEK_YEAR 0x07ffUL
+extern spinlock_t mostek_lock;
extern unsigned long mstk48t02_regs;
/* Control register values. */
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h
index c17e9bf23..082626725 100644
--- a/include/asm-sparc64/pbm.h
+++ b/include/asm-sparc64/pbm.h
@@ -1,4 +1,4 @@
-/* $Id: pbm.h,v 1.22 2000/03/25 05:18:30 davem Exp $
+/* $Id: pbm.h,v 1.23 2001/01/11 16:26:45 davem Exp $
* pbm.h: UltraSparc PCI controller software state.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
@@ -18,11 +18,12 @@
/* The abstraction used here is that there are PCI controllers,
* each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules
- * underneath. Each PCI controller has a single IOMMU shared
- * by the PCI bus modules underneath, and if a streaming buffer
+ * underneath. Each PCI bus module uses an IOMMU (shared by both
+ * PBMs of a controller, or per-PBM), and if a streaming buffer
* is present, each PCI bus module has it's own. (ie. the IOMMU
- * is shared between PBMs, the STC is not) Furthermore, each
- * PCI bus module controls it's own autonomous PCI bus.
+ * might be shared between PBMs, the STC is never shared)
+ * Furthermore, each PCI bus module controls it's own autonomous
+ * PCI bus.
*/
#define PBM_LOGCLUSTERS 3
@@ -150,6 +151,9 @@ struct pci_pbm_info {
/* This PBM's streaming buffer. */
struct pci_strbuf stc;
+ /* IOMMU state, potentially shared by both PBM segments. */
+ struct pci_iommu *iommu;
+
/* Now things for the actual PCI bus probes. */
unsigned int pci_first_busno;
unsigned int pci_last_busno;
@@ -189,9 +193,6 @@ struct pci_controller_info {
unsigned int pci_first_busno;
unsigned int pci_last_busno;
- /* IOMMU state shared by both PBM segments. */
- struct pci_iommu iommu;
-
void *starfire_cookie;
};
diff --git a/include/asm-sparc64/watchdog.h b/include/asm-sparc64/watchdog.h
new file mode 100644
index 000000000..4d5b03b60
--- /dev/null
+++ b/include/asm-sparc64/watchdog.h
@@ -0,0 +1,31 @@
+/* $Id: watchdog.h,v 1.1 2001/01/18 04:47:44 davem Exp $
+ *
+ * watchdog - Driver interface for the hardware watchdog timers
+ * present on Sun Microsystems boardsets
+ *
+ * Copyright (c) 2000 Eric Brower <ebrower@usa.net>
+ *
+ */
+
+#ifndef _SPARC64_WATCHDOG_H
+#define _SPARC64_WATCHDOG_H
+
+#include <linux/watchdog.h>
+
+/* Solaris compatibility ioctls--
+ * Ref. <linux/watchdog.h> for standard linux watchdog ioctls
+ */
+#define WIOCSTART _IO (WATCHDOG_IOCTL_BASE, 10) /* Start Timer */
+#define WIOCSTOP _IO (WATCHDOG_IOCTL_BASE, 11) /* Stop Timer */
+#define WIOCGSTAT _IOR(WATCHDOG_IOCTL_BASE, 12, int)/* Get Timer Status */
+
+/* Status flags from WIOCGSTAT ioctl
+ */
+#define WD_FREERUN 0x01 /* timer is running, interrupts disabled */
+#define WD_EXPIRED 0x02 /* timer has expired */
+#define WD_RUNNING 0x04 /* timer is running, interrupts enabled */
+#define WD_STOPPED 0x08 /* timer has not been started */
+#define WD_SERVICED 0x10 /* timer interrupt was serviced */
+
+#endif /* ifndef _SPARC64_WATCHDOG_H */
+
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ff1dcaf45..4fce3f847 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -28,21 +28,6 @@
#include <linux/wait.h>
#endif /* __KERNEL__ */
-u64 acpi_get_rsdp_ptr(void);
-
-/*
- * System sleep states
- */
-enum
-{
- ACPI_S0, /* working */
- ACPI_S1, /* sleep */
- ACPI_S2, /* sleep */
- ACPI_S3, /* sleep */
- ACPI_S4, /* non-volatile sleep */
- ACPI_S5, /* soft-off */
-};
-
typedef int acpi_sstate_t;
/*
diff --git a/include/linux/blk.h b/include/linux/blk.h
index 11db342a3..3f9c707c7 100644
--- a/include/linux/blk.h
+++ b/include/linux/blk.h
@@ -87,10 +87,6 @@ void initrd_init(void);
static inline void blkdev_dequeue_request(struct request * req)
{
- if (req->e) {
- req->e->dequeue_fn(req);
- req->e = NULL;
- }
list_del(&req->queue);
}
@@ -322,7 +318,7 @@ static void floppy_off(unsigned int nr);
#define DEVICE_NAME "ida"
#define TIMEOUT_VALUE (25*HZ)
-#define DEVICE_REQUEST do_ida_request0
+#define DEVICE_REQUEST do_ida_request
#define DEVICE_NR(device) (MINOR(device) >> 4)
#endif /* MAJOR_NR == whatever */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 851bf3c53..01cd38a83 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -23,8 +23,6 @@ struct request {
int elevator_sequence;
struct list_head table;
- struct list_head *free_list;
-
volatile int rq_status; /* should split this into a few status bits */
#define RQ_INACTIVE (-1)
#define RQ_ACTIVE 1
@@ -47,7 +45,6 @@ struct request {
struct buffer_head * bh;
struct buffer_head * bhtail;
request_queue_t *q;
- elevator_t *e;
};
#include <linux/elevator.h>
@@ -67,9 +64,10 @@ typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
typedef void (unplug_device_fn) (void *q);
/*
- * Default nr free requests per queue
+ * Default nr free requests per queue, ll_rw_blk will scale it down
+ * according to available RAM at init time
*/
-#define QUEUE_NR_REQUESTS 256
+#define QUEUE_NR_REQUESTS 8192
struct request_queue
{
@@ -77,6 +75,8 @@ struct request_queue
* the queue request freelist, one for reads and one for writes
*/
struct list_head request_freelist[2];
+ struct list_head pending_freelist[2];
+ int pending_free[2];
/*
* Together with queue_head for cacheline sharing
@@ -116,7 +116,7 @@ struct request_queue
* Is meant to protect the queue in the future instead of
* io_request_lock
*/
- spinlock_t request_lock;
+ spinlock_t queue_lock;
/*
* Tasks wait here for free request
@@ -152,6 +152,7 @@ extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, lon
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
extern void generic_make_request(int rw, struct buffer_head * bh);
extern request_queue_t *blk_get_queue(kdev_t dev);
+extern inline request_queue_t *__blk_get_queue(kdev_t dev);
extern void blkdev_release_request(struct request *);
/*
@@ -162,6 +163,7 @@ extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_headactive(request_queue_t *, int);
extern void blk_queue_pluggable(request_queue_t *, plug_device_fn *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
+extern void generic_unplug_device(void *);
extern int * blk_size[MAX_BLKDEV];
@@ -175,9 +177,10 @@ extern int * max_sectors[MAX_BLKDEV];
extern int * max_segments[MAX_BLKDEV];
-#define MAX_SECTORS 254
+extern atomic_t queued_sectors;
-#define MAX_SEGMENTS MAX_SECTORS
+#define MAX_SEGMENTS 128
+#define MAX_SECTORS (MAX_SEGMENTS*8)
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
@@ -203,5 +206,14 @@ static inline int get_hardsect_size(kdev_t dev)
return 512;
}
+#define blk_finished_io(nsects) \
+ atomic_sub(nsects, &queued_sectors); \
+ if (atomic_read(&queued_sectors) < 0) { \
+ printk("block: queued_sectors < 0\n"); \
+ atomic_set(&queued_sectors, 0); \
+ }
+
+#define blk_started_io(nsects) \
+ atomic_add(nsects, &queued_sectors);
#endif
diff --git a/include/linux/dn.h b/include/linux/dn.h
index c7448158b..782cae49e 100644
--- a/include/linux/dn.h
+++ b/include/linux/dn.h
@@ -45,7 +45,12 @@
#define DSO_LINKINFO 7 /* Set/Get link information */
#define DSO_STREAM 8 /* Set socket type to stream */
#define DSO_SEQPACKET 9 /* Set socket type to sequenced packet */
-#define DSO_MAX 10 /* Maximum option number */
+#define DSO_MAXWINDOW 11 /* Maximum window size allowed */
+#define DSO_NODELAY 12 /* Turn off nagle */
+#define DSO_CORK 13 /* Wait for more data! */
+#define DSO_SERVICES 14 /* NSP Services field */
+#define DSO_INFO 15 /* NSP Info field */
+#define DSO_MAX 15 /* Maximum option number */
/* LINK States */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 16fa9cae6..9e9b27dfa 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -7,34 +7,32 @@ typedef void (elevator_fn) (struct request *, elevator_t *,
struct list_head *,
struct list_head *, int);
-typedef int (elevator_merge_fn) (request_queue_t *, struct request **,
- struct buffer_head *, int, int *, int *);
+typedef int (elevator_merge_fn) (request_queue_t *, struct request **, struct list_head *,
+ struct buffer_head *, int, int, int);
-typedef void (elevator_dequeue_fn) (struct request *);
+typedef void (elevator_merge_cleanup_fn) (request_queue_t *, struct request *, int);
+
+typedef void (elevator_merge_req_fn) (struct request *, struct request *);
struct elevator_s
{
- int sequence;
-
int read_latency;
int write_latency;
- int max_bomb_segments;
- unsigned int nr_segments;
- int read_pendings;
-
- elevator_fn * elevator_fn;
elevator_merge_fn *elevator_merge_fn;
- elevator_dequeue_fn *dequeue_fn;
+ elevator_merge_cleanup_fn *elevator_merge_cleanup_fn;
+ elevator_merge_req_fn *elevator_merge_req_fn;
unsigned int queue_ID;
};
-void elevator_noop(struct request *, elevator_t *, struct list_head *, struct list_head *, int);
-int elevator_noop_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *);
-void elevator_noop_dequeue(struct request *);
-void elevator_linus(struct request *, elevator_t *, struct list_head *, struct list_head *, int);
-int elevator_linus_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *);
+int elevator_noop_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int, int);
+void elevator_noop_merge_cleanup(request_queue_t *, struct request *, int);
+void elevator_noop_merge_req(struct request *, struct request *);
+
+int elevator_linus_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int, int);
+void elevator_linus_merge_cleanup(request_queue_t *, struct request *, int);
+void elevator_linus_merge_req(struct request *, struct request *);
typedef struct blkelv_ioctl_arg_s {
int queue_ID;
@@ -69,6 +67,10 @@ extern void elevator_init(elevator_t *, elevator_t);
(s1)->sector < (s2)->sector)) || \
(s1)->rq_dev < (s2)->rq_dev)
+#define BHRQ_IN_ORDER(bh, rq) \
+ (((bh)->b_rdev == (rq)->rq_dev && \
+ (bh)->b_rsector < (rq)->sector))
+
static inline int elevator_request_latency(elevator_t * elevator, int rw)
{
int latency;
@@ -80,36 +82,24 @@ static inline int elevator_request_latency(elevator_t * elevator, int rw)
return latency;
}
-#define ELEVATOR_NOOP \
-((elevator_t) { \
- 0, /* sequence */ \
- \
- 0, /* read_latency */ \
- 0, /* write_latency */ \
- 0, /* max_bomb_segments */ \
- \
- 0, /* nr_segments */ \
- 0, /* read_pendings */ \
- \
- elevator_noop, /* elevator_fn */ \
- elevator_noop_merge, /* elevator_merge_fn */ \
- elevator_noop_dequeue, /* dequeue_fn */ \
+#define ELEVATOR_NOOP \
+((elevator_t) { \
+ 0, /* read_latency */ \
+ 0, /* write_latency */ \
+ \
+ elevator_noop_merge, /* elevator_merge_fn */ \
+ elevator_noop_merge_cleanup, /* elevator_merge_cleanup_fn */ \
+ elevator_noop_merge_req, /* elevator_merge_req_fn */ \
})
-#define ELEVATOR_LINUS \
-((elevator_t) { \
- 0, /* not used */ \
- \
- 1000000, /* read passovers */ \
- 2000000, /* write passovers */ \
- 0, /* max_bomb_segments */ \
- \
- 0, /* not used */ \
- 0, /* not used */ \
- \
- elevator_linus, /* elevator_fn */ \
- elevator_linus_merge, /* elevator_merge_fn */ \
- elevator_noop_dequeue, /* dequeue_fn */ \
+#define ELEVATOR_LINUS \
+((elevator_t) { \
+ 8192, /* read passovers */ \
+ 16384, /* write passovers */ \
+ \
+ elevator_linus_merge, /* elevator_merge_fn */ \
+ elevator_linus_merge_cleanup, /* elevator_merge_cleanup_fn */ \
+ elevator_linus_merge_req, /* elevator_merge_req_fn */ \
})
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8032db992..686aef31a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -288,6 +288,7 @@ extern void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long
#include <linux/hfs_fs_i.h>
#include <linux/adfs_fs_i.h>
#include <linux/qnx4_fs_i.h>
+#include <linux/reiserfs_fs_i.h>
#include <linux/bfs_fs_i.h>
#include <linux/udf_fs_i.h>
#include <linux/ncp_fs_i.h>
@@ -450,6 +451,7 @@ struct inode {
struct hfs_inode_info hfs_i;
struct adfs_inode_info adfs_i;
struct qnx4_inode_info qnx4_i;
+ struct reiserfs_inode_info reiserfs_i;
struct bfs_inode_info bfs_i;
struct udf_inode_info udf_i;
struct ncp_inode_info ncpfs_i;
@@ -460,35 +462,6 @@ struct inode {
} u;
};
-/* Inode state bits.. */
-#define I_DIRTY_SYNC 1 /* Not dirty enough for O_DATASYNC */
-#define I_DIRTY_DATASYNC 2 /* Data-related inode changes pending */
-#define I_DIRTY_PAGES 4 /* Data-related inode changes pending */
-#define I_LOCK 8
-#define I_FREEING 16
-#define I_CLEAR 32
-
-#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
-
-extern void __mark_inode_dirty(struct inode *, int);
-static inline void mark_inode_dirty(struct inode *inode)
-{
- if ((inode->i_state & I_DIRTY) != I_DIRTY)
- __mark_inode_dirty(inode, I_DIRTY);
-}
-
-static inline void mark_inode_dirty_sync(struct inode *inode)
-{
- if (!(inode->i_state & I_DIRTY_SYNC))
- __mark_inode_dirty(inode, I_DIRTY_SYNC);
-}
-
-static inline void mark_inode_dirty_pages(struct inode *inode)
-{
- if (inode && !(inode->i_state & I_DIRTY_PAGES))
- __mark_inode_dirty(inode, I_DIRTY_PAGES);
-}
-
struct fown_struct {
int pid; /* pid or -pgrp where SIGIO should be sent */
uid_t uid, euid; /* uid/euid of process setting the owner */
@@ -654,6 +627,7 @@ struct quota_mount_options
#include <linux/hfs_fs_sb.h>
#include <linux/adfs_fs_sb.h>
#include <linux/qnx4_fs_sb.h>
+#include <linux/reiserfs_fs_sb.h>
#include <linux/bfs_fs_sb.h>
#include <linux/udf_fs_sb.h>
#include <linux/ncp_fs_sb.h>
@@ -702,6 +676,7 @@ struct super_block {
struct hfs_sb_info hfs_sb;
struct adfs_sb_info adfs_sb;
struct qnx4_sb_info qnx4_sb;
+ struct reiserfs_sb_info reiserfs_sb;
struct bfs_sb_info bfs_sb;
struct udf_sb_info udf_sb;
struct ncp_sb_info ncpfs_sb;
@@ -815,17 +790,54 @@ struct inode_operations {
*/
struct super_operations {
void (*read_inode) (struct inode *);
+
+ /* reiserfs kludge. reiserfs needs 64 bits of information to
+ ** find an inode. We are using the read_inode2 call to get
+ ** that information. We don't like this, and are waiting on some
+ ** VFS changes for the real solution.
+ ** iget4 calls read_inode2, iff it is defined
+ */
+ void (*read_inode2) (struct inode *, void *) ;
+ void (*dirty_inode) (struct inode *);
void (*write_inode) (struct inode *, int);
void (*put_inode) (struct inode *);
void (*delete_inode) (struct inode *);
void (*put_super) (struct super_block *);
void (*write_super) (struct super_block *);
+ void (*write_super_lockfs) (struct super_block *);
+ void (*unlockfs) (struct super_block *);
int (*statfs) (struct super_block *, struct statfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*clear_inode) (struct inode *);
void (*umount_begin) (struct super_block *);
};
+/* Inode state bits.. */
+#define I_DIRTY_SYNC 1 /* Not dirty enough for O_DATASYNC */
+#define I_DIRTY_DATASYNC 2 /* Data-related inode changes pending */
+#define I_DIRTY_PAGES 4 /* Data-related inode changes pending */
+#define I_LOCK 8
+#define I_FREEING 16
+#define I_CLEAR 32
+
+#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+
+extern void __mark_inode_dirty(struct inode *, int);
+static inline void mark_inode_dirty(struct inode *inode)
+{
+ __mark_inode_dirty(inode, I_DIRTY);
+}
+
+static inline void mark_inode_dirty_sync(struct inode *inode)
+{
+ __mark_inode_dirty(inode, I_DIRTY_SYNC);
+}
+
+static inline void mark_inode_dirty_pages(struct inode *inode)
+{
+ __mark_inode_dirty(inode, I_DIRTY_PAGES);
+}
+
struct dquot_operations {
void (*initialize) (struct inode *, short);
void (*drop) (struct inode *);
@@ -988,6 +1000,9 @@ extern int fs_may_remount_ro(struct super_block *);
extern int try_to_free_buffers(struct page *, int);
extern void refile_buffer(struct buffer_head * buf);
+/* reiserfs_writepage needs this */
+extern void set_buffer_async_io(struct buffer_head *bh) ;
+
#define BUF_CLEAN 0
#define BUF_LOCKED 1 /* Buffers scheduled for write */
#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
diff --git a/include/linux/lvm.h b/include/linux/lvm.h
index 7b4ff94f9..4e95eb73d 100644
--- a/include/linux/lvm.h
+++ b/include/linux/lvm.h
@@ -9,6 +9,7 @@
* May-July 1998
* January-March,July,September,October,Dezember 1999
* January,February,July,November 2000
+ * January 2001
*
* lvm is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -57,6 +58,8 @@
* 26/06/2000 - implemented snapshot persistency and resizing support
* 02/11/2000 - added hash table size member to lv structure
* 12/11/2000 - removed unneeded timestamp definitions
+ * 24/12/2000 - removed LVM_TO_{CORE,DISK}*, use cpu_{from, to}_le*
+ * instead - Christoph Hellwig
*
*/
@@ -64,11 +67,10 @@
#ifndef _LVM_H_INCLUDE
#define _LVM_H_INCLUDE
-#define _LVM_KERNEL_H_VERSION "LVM 0.9 (13/11/2000)"
+#define _LVM_KERNEL_H_VERSION "LVM 0.9.1_beta2 (18/01/2001)"
#include <linux/config.h>
#include <linux/version.h>
-#include <endian.h>
/*
* preprocessor definitions
@@ -77,8 +79,7 @@
#define LVM_TOTAL_RESET
#ifdef __KERNEL__
-#define LVM_GET_INODE
-#undef LVM_HD_NAME /* display nice names in /proc/partitions */
+#undef LVM_HD_NAME /* display nice names in /proc/partitions */
/* lots of debugging output (see driver source)
#define DEBUG_LVM_GET_INFO
@@ -109,6 +110,7 @@
#ifdef __KERNEL__
#include <linux/spinlock.h>
+
#include <asm/semaphore.h>
#endif /* #ifdef __KERNEL__ */
@@ -216,15 +218,13 @@
*
* 1K volume group structure ~200 byte
*
- * 5K time stamp structure ~
- *
* 6K namelist of physical volumes 128 byte each
*
- * 6k + n * 128byte n logical volume structures ~300 byte each
+ * 6k + n * ~300byte n logical volume structures ~300 byte each
*
- * + m * 328byte m physical extent alloc. structs 4 byte each
+ * + m * 4byte m physical extent alloc. structs 4 byte each
*
- * End of disk - first physical extent typical 4 megabyte
+ * End of disk - first physical extent typically 4 megabyte
* PE total *
* PE size
*
@@ -292,7 +292,7 @@
#define LVM_MAX_PE_SIZE ( 16L * 1024L * 1024L / SECTOR_SIZE * 1024) /* 16GB in sectors */
#define LVM_DEFAULT_PE_SIZE ( 4096L * 1024 / SECTOR_SIZE) /* 4 MB in sectors */
#define LVM_DEFAULT_STRIPE_SIZE 16L /* 16 KB */
-#define LVM_MIN_STRIPE_SIZE ( PAGE_SIZE>>9) /* PAGESIZE in sectors */
+#define LVM_MIN_STRIPE_SIZE ( PAGE_SIZE/SECTOR_SIZE) /* PAGESIZE in sectors */
#define LVM_MAX_STRIPE_SIZE ( 512L * 1024 / SECTOR_SIZE) /* 512 KB in sectors */
#define LVM_MAX_STRIPES 128 /* max # of stripes */
#define LVM_MAX_SIZE ( 1024LU * 1024 / SECTOR_SIZE * 1024 * 1024) /* 1TB[sectors] */
@@ -326,51 +326,6 @@
COW_table_entries_per_PE - COW_table_chunks_per_PE;})
-/* to disk and to core data conversion macros */
-#if __BYTE_ORDER == __BIG_ENDIAN
-
-#define LVM_TO_CORE16(x) ( \
- ((uint16_t)((((uint16_t)(x) & 0x00FFU) << 8) | \
- (((uint16_t)(x) & 0xFF00U) >> 8))))
-
-#define LVM_TO_DISK16(x) LVM_TO_CORE16(x)
-
-#define LVM_TO_CORE32(x) ( \
- ((uint32_t)((((uint32_t)(x) & 0x000000FFU) << 24) | \
- (((uint32_t)(x) & 0x0000FF00U) << 8))) \
- (((uint32_t)(x) & 0x00FF0000U) >> 8))) \
- (((uint32_t)(x) & 0xFF000000U) >> 24))))
-
-#define LVM_TO_DISK32(x) LVM_TO_CORE32(x)
-
-#define LVM_TO_CORE64(x) \
- ((uint64_t)((((uint64_t)(x) & 0x00000000000000FFULL) << 56) | \
- (((uint64_t)(x) & 0x000000000000FF00ULL) << 40) | \
- (((uint64_t)(x) & 0x0000000000FF0000ULL) << 24) | \
- (((uint64_t)(x) & 0x00000000FF000000ULL) << 8) | \
- (((uint64_t)(x) & 0x000000FF00000000ULL) >> 8) | \
- (((uint64_t)(x) & 0x0000FF0000000000ULL) >> 24) | \
- (((uint64_t)(x) & 0x00FF000000000000ULL) >> 40) | \
- (((uint64_t)(x) & 0xFF00000000000000ULL) >> 56)))
-
-#define LVM_TO_DISK64(x) LVM_TO_CORE64(x)
-
-#elif __BYTE_ORDER == __LITTLE_ENDIAN
-
-#define LVM_TO_CORE16(x) x
-#define LVM_TO_DISK16(x) x
-#define LVM_TO_CORE32(x) x
-#define LVM_TO_DISK32(x) x
-#define LVM_TO_CORE64(x) x
-#define LVM_TO_DISK64(x) x
-
-#else
-
-#error "__BYTE_ORDER must be defined as __LITTLE_ENDIAN or __BIG_ENDIAN"
-
-#endif /* #if __BYTE_ORDER == __BIG_ENDIAN */
-
-
/*
* ioctls
*/
@@ -687,6 +642,8 @@ typedef struct lv_v4 {
wait_queue_head_t lv_snapshot_wait;
int lv_snapshot_use_rate;
void *vg;
+
+ uint lv_allocated_snapshot_le;
#else
char dummy[200];
#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 245201241..713b01316 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -386,6 +386,7 @@ extern void clear_page_tables(struct mm_struct *, unsigned long, int);
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int no_share);
struct file *shmem_file_setup(char * name, loff_t size);
+extern void shmem_lock(struct file * file, int lock);
extern int shmem_zero_setup(struct vm_area_struct *);
extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
@@ -464,6 +465,7 @@ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
#else
#define __GFP_HIGHMEM 0x0 /* noop */
#endif
+#define __GFP_VM 0x20
#define GFP_BUFFER (__GFP_HIGH | __GFP_WAIT)
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 1ee4dd616..4c77c2081 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -74,7 +74,7 @@ struct nfs_readargs {
struct nfs_readres {
struct nfs_fattr * fattr;
- unsigned int count;
+ __u32 count;
int eof;
};
@@ -84,7 +84,7 @@ struct nfs_readres {
#define NFS_WRITE_MAXIOV 8
struct nfs_writeargs {
struct nfs_fh * fh;
- __u32 offset;
+ __u64 offset;
__u32 count;
enum nfs3_stable_how stable;
unsigned int nriov;
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
index c96b0e404..22a154380 100644
--- a/include/linux/raid/md_u.h
+++ b/include/linux/raid/md_u.h
@@ -22,6 +22,7 @@
#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)
#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13)
+#define RAID_AUTORUN _IO (MD_MAJOR, 0x14)
/* configuration */
#define CLEAR_ARRAY _IO (MD_MAJOR, 0x20)
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
new file mode 100644
index 000000000..277281d6d
--- /dev/null
+++ b/include/linux/reiserfs_fs.h
@@ -0,0 +1,2074 @@
+/*
+ * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for licensing and copyright details
+ */
+
+ /* this file has an amazingly stupid
+ name, yura please fix it to be
+ reiserfs.h, and merge all the rest
+ of our .h files that are in this
+ directory into it. */
+
+
+#ifndef _LINUX_REISER_FS_H
+#define _LINUX_REISER_FS_H
+
+
+#include <linux/types.h>
+#ifdef __KERNEL__
+#include <linux/malloc.h>
+#include <linux/tqueue.h>
+#endif
+
+/*
+ * include/linux/reiser_fs.h
+ *
+ * Reiser File System constants and structures
+ *
+ */
+
+/* in reading the #defines, it may help to understand that they employ
+ the following abbreviations:
+
+ B = Buffer
+ I = Item header
+ H = Height within the tree (should be changed to LEV)
+ N = Number of the item in the node
+ STAT = stat data
+ DEH = Directory Entry Header
+ EC = Entry Count
+ E = Entry number
+ UL = Unsigned Long
+ BLKH = BLocK Header
+ UNFM = UNForMatted node
+ DC = Disk Child
+ P = Path
+
+ These #defines are named by concatenating these abbreviations,
+ where first comes the arguments, and last comes the return value,
+ of the macro.
+
+*/
+
+ /* Vladimir, what is the story with
+ new_get_new_buffer nowadays? I
+ want a complete explanation written
+ here. */
+
+/* NEW_GET_NEW_BUFFER will try to allocate new blocks better */
+/*#define NEW_GET_NEW_BUFFER*/
+#define OLD_GET_NEW_BUFFER
+
+ /* Vladimir, what about this one too? */
+/* if this is undefined, all inode changes get into stat data immediately, if it can be found in RAM */
+#define DIRTY_LATER
+
+/* enable journalling */
+#define ENABLE_JOURNAL
+
+#ifdef __KERNEL__
+
+/* #define REISERFS_CHECK */
+
+#define REISERFS_PREALLOCATE
+#endif
+#define PREALLOCATION_SIZE 8
+
+/* if this is undefined, all inode changes get into stat data
+ immediately, if it can be found in RAM */
+#define DIRTY_LATER
+
+
+/*#define READ_LOCK_REISERFS*/
+
+
+/* n must be power of 2 */
+#define _ROUND_UP(x,n) (((x)+(n)-1u) & ~((n)-1u))
+
+// to be ok for alpha and others we have to align structures to 8 byte
+// boundary.
+// FIXME: do not change 4 by anything else: there is code which relies on that
+ /* what 4? -Hans */
+#define ROUND_UP(x) _ROUND_UP(x,8LL)
+
+/* debug levels. Right now, CONFIG_REISERFS_CHECK means print all debug
+** messages.
+*/
+#define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */
+
+/*
+ * Disk Data Structures
+ */
+
+/***************************************************************************/
+/* SUPER BLOCK */
+/***************************************************************************/
+
+/*
+ * Structure of super block on disk, a version of which in RAM is often accessed as s->u.reiserfs_sb.s_rs
+ * the version in RAM is part of a larger structure containing fields never written to disk.
+ */
+
+ /* used by gcc */
+#define REISERFS_SUPER_MAGIC 0x52654973
+ /* used by file system utilities that
+ look at the superblock, etc. */
+#define REISERFS_SUPER_MAGIC_STRING "ReIsErFs"
+#define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs"
+
+extern inline int is_reiserfs_magic_string (struct reiserfs_super_block * rs)
+{
+ return (!strncmp (rs->s_magic, REISERFS_SUPER_MAGIC_STRING,
+ strlen ( REISERFS_SUPER_MAGIC_STRING)) ||
+ !strncmp (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING,
+ strlen ( REISER2FS_SUPER_MAGIC_STRING)));
+}
+
+ /* ReiserFS leaves the first 64k unused,
+ so that partition labels have enough
+ space. If someone wants to write a
+ fancy bootloader that needs more than
+ 64k, let us know, and this will be
+ increased in size. This number must
+ be larger than than the largest block
+ size on any platform, or code will
+ break. -Hans */
+#define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024)
+#define REISERFS_FIRST_BLOCK unused_define
+
+/* the spot for the super in versions 3.5 - 3.5.10 (inclusive) */
+#define REISERFS_OLD_DISK_OFFSET_IN_BYTES (8 * 1024)
+
+
+// reiserfs internal error code (used by search_by_key adn fix_nodes))
+#define CARRY_ON 0
+#define REPEAT_SEARCH -1
+#define IO_ERROR -2
+#define NO_DISK_SPACE -3
+#define NO_BALANCING_NEEDED (-4)
+#define NO_MORE_UNUSED_CONTIGUOUS_BLOCKS (-5)
+
+//#define SCHEDULE_OCCURRED 1
+//#define PATH_INCORRECT 2
+
+//#define NO_DISK_SPACE (-1)
+
+
+
+typedef unsigned long b_blocknr_t;
+typedef __u32 unp_t;
+
+ /* who is responsible for this
+ completely uncommented struct? */
+struct unfm_nodeinfo {
+ /* This is what? */
+ unp_t unfm_nodenum;
+ /* now this I know what it is, and
+ most of the people on our project
+ know what it is, but I bet nobody
+ new I hire will have a clue. */
+ unsigned short unfm_freespace;
+};
+
+
+/* when reiserfs_file_write is called with a byte count >= MIN_PACK_ON_CLOSE,
+** it sets the inode to pack on close, and when extending the file, will only
+** use unformatted nodes.
+**
+** This is a big speed up for the journal, which is badly hurt by direct->indirect
+** conversions (they must be logged).
+*/
+#define MIN_PACK_ON_CLOSE 512
+
+/* the defines below say, that if file size is >=
+ DIRECT_TAIL_SUPPRESSION_SIZE * blocksize, then if tail is longer
+ than MAX_BYTES_SUPPRESS_DIRECT_TAIL, it will be stored in
+ unformatted node */
+#define DIRECT_TAIL_SUPPRESSION_SIZE 1024
+#define MAX_BYTES_SUPPRESS_DIRECT_TAIL 1024
+
+#if 0
+
+//
+#define mark_file_with_tail(inode,offset) \
+{\
+inode->u.reiserfs_i.i_has_tail = 1;\
+}
+
+#define mark_file_without_tail(inode) \
+{\
+inode->u.reiserfs_i.i_has_tail = 0;\
+}
+
+#endif
+
+// this says about version of all items (but stat data) the object
+// consists of
+#define inode_items_version(inode) ((inode)->u.reiserfs_i.i_version)
+
+
+/* We store tail in unformatted node if it is too big to fit into a
+ formatted node or if DIRECT_TAIL_SUPPRESSION_SIZE,
+ MAX_BYTES_SUPPRESS_DIRECT_TAIL and file size say that. */
+/* #define STORE_TAIL_IN_UNFM(n_file_size,n_tail_size,n_block_size) \ */
+/* ( ((n_tail_size) > MAX_DIRECT_ITEM_LEN(n_block_size)) || \ */
+/* ( ( (n_file_size) >= (n_block_size) * DIRECT_TAIL_SUPPRESSION_SIZE ) && \ */
+/* ( (n_tail_size) >= MAX_BYTES_SUPPRESS_DIRECT_TAIL ) ) ) */
+
+ /* This is an aggressive tail suppression policy, I am hoping it
+ improves our benchmarks. The principle behind it is that
+ percentage space saving is what matters, not absolute space
+ saving. This is non-intuitive, but it helps to understand it if
+ you consider that the cost to access 4 blocks is not much more
+ than the cost to access 1 block, if you have to do a seek and
+ rotate. A tail risks a non-linear disk access that is
+ significant as a percentage of total time cost for a 4 block file
+ and saves an amount of space that is less significant as a
+ percentage of space, or so goes the hypothesis. -Hans */
+#define STORE_TAIL_IN_UNFM(n_file_size,n_tail_size,n_block_size) \
+(\
+ (!(n_tail_size)) || \
+ (((n_tail_size) > MAX_DIRECT_ITEM_LEN(n_block_size)) || \
+ ( (n_file_size) >= (n_block_size) * 4 ) || \
+ ( ( (n_file_size) >= (n_block_size) * 3 ) && \
+ ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size))/4) ) || \
+ ( ( (n_file_size) >= (n_block_size) * 2 ) && \
+ ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size))/2) ) || \
+ ( ( (n_file_size) >= (n_block_size) ) && \
+ ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size) * 3)/4) ) ) \
+)
+
+
+/*
+ * values for s_state field
+ */
+#define REISERFS_VALID_FS 1
+#define REISERFS_ERROR_FS 2
+
+
+
+/***************************************************************************/
+/* KEY & ITEM HEAD */
+/***************************************************************************/
+
+//
+// we do support for old format of reiserfs: the problem is to
+// distinuquish keys with 32 bit offset and keys with 60 bit ones. On
+// leaf level we use ih_version of struct item_head (was
+// ih_reserved). For all old items it is set to 0
+// (ITEM_VERSION_1). For new items it is ITEM_VERSION_2. On internal
+// levels we have to know version of item key belongs to.
+//
+#define ITEM_VERSION_1 0
+#define ITEM_VERSION_2 1
+
+
+/* loff_t - long long */
+
+
+//
+// directories use this key as well as old files
+//
+struct offset_v1 {
+ __u32 k_offset;
+ __u32 k_uniqueness;
+} __attribute__ ((__packed__));
+
+struct offset_v2 {
+ __u64 k_offset:60;
+ __u64 k_type: 4;
+} __attribute__ ((__packed__));
+
+
+
+/* Key of an item determines its location in the S+tree, and
+ is composed of 4 components */
+struct key {
+ __u32 k_dir_id; /* packing locality: by default parent
+ directory object id */
+ __u32 k_objectid; /* object identifier */
+ union {
+ struct offset_v1 k_offset_v1;
+ struct offset_v2 k_offset_v2;
+ } __attribute__ ((__packed__)) u;
+} __attribute__ ((__packed__));
+
+
+struct cpu_key {
+ struct key on_disk_key;
+ int version;
+ int key_length; /* 3 in all cases but direct2indirect and
+ indirect2direct conversion */
+};
+
+
+
+
+
+
+
+ /* Our function for comparing keys can compare keys of different
+ lengths. It takes as a parameter the length of the keys it is to
+ compare. These defines are used in determining what is to be
+ passed to it as that parameter. */
+#define REISERFS_FULL_KEY_LEN 4
+
+#define REISERFS_SHORT_KEY_LEN 2
+
+/* The result of the key compare */
+#define FIRST_GREATER 1
+#define SECOND_GREATER -1
+#define KEYS_IDENTICAL 0
+#define KEY_FOUND 1
+#define KEY_NOT_FOUND 0
+
+
+#define KEY_SIZE (sizeof(struct key))
+#define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32))
+
+/* return values for search_by_key and clones */
+#define ITEM_FOUND 1
+#define ITEM_NOT_FOUND 0
+#define ENTRY_FOUND 1
+#define ENTRY_NOT_FOUND 0
+#define DIRECTORY_NOT_FOUND -1
+#define REGULAR_FILE_FOUND -2
+#define DIRECTORY_FOUND -3
+#define BYTE_FOUND 1
+#define BYTE_NOT_FOUND 0
+#define FILE_NOT_FOUND -1
+
+#define POSITION_FOUND 1
+#define POSITION_NOT_FOUND 0
+
+// return values for reiserfs_find_entry and search_by_entry_key
+#define NAME_FOUND 1
+#define NAME_NOT_FOUND 0
+#define GOTO_PREVIOUS_ITEM 2
+#define NAME_FOUND_INVISIBLE 3
+
+
+
+/* Everything in the filesystem is stored as a set of items. The
+ item head contains the key of the item, its free space (for
+ indirect items) and specifies the location of the item itself
+ within the block. */
+
+struct item_head
+{
+ struct key ih_key; /* Everything in the tree is found by searching for it based on its key.*/
+
+ /* This is bloat, this should be part
+ of the item not the item
+ header. -Hans */
+ union {
+ __u16 ih_free_space_reserved; /* The free space in the last unformatted node of an indirect item if this
+ is an indirect item. This equals 0xFFFF iff this is a direct item or
+ stat data item. Note that the key, not this field, is used to determine
+ the item type, and thus which field this union contains. */
+ __u16 ih_entry_count; /* Iff this is a directory item, this field equals the number of directory
+ entries in the directory item. */
+ } __attribute__ ((__packed__)) u;
+ __u16 ih_item_len; /* total size of the item body */
+ __u16 ih_item_location; /* an offset to the item body within the block */
+ /* I thought we were going to use this
+ for having lots of item types? Why
+ don't you use this for item type
+ not item version. That is how you
+ talked me into this field a year
+ ago, remember? I am still not
+ convinced it needs to be 16 bits
+ (for at least many years), but at
+ least I can sympathize with that
+ hope. Change the name from version
+ to type, and tell people not to use
+ FFFF in case 16 bits is someday too
+ small and needs to be extended:-). */
+ __u16 ih_version; /* 0 for all old items, 2 for new
+ ones. Highest bit is set by fsck
+ temporary, cleaned after all done */
+} __attribute__ ((__packed__));
+/* size of item header */
+#define IH_SIZE (sizeof(struct item_head))
+
+#define ih_free_space(ih) le16_to_cpu((ih)->u.ih_free_space_reserved)
+#define ih_version(ih) le16_to_cpu((ih)->ih_version)
+#define ih_entry_count(ih) le16_to_cpu((ih)->u.ih_entry_count)
+#define ih_location(ih) le16_to_cpu((ih)->ih_item_location)
+#define ih_item_len(ih) le16_to_cpu((ih)->ih_item_len)
+
+#define put_ih_free_space(ih, val) do { (ih)->u.ih_free_space_reserved = cpu_to_le16(val); } while(0)
+#define put_ih_version(ih, val) do { (ih)->ih_version = cpu_to_le16(val); } while (0)
+#define put_ih_entry_count(ih, val) do { (ih)->u.ih_entry_count = cpu_to_le16(val); } while (0)
+#define put_ih_location(ih, val) do { (ih)->ih_item_location = cpu_to_le16(val); } while (0)
+#define put_ih_item_len(ih, val) do { (ih)->ih_item_len = cpu_to_le16(val); } while (0)
+
+
+// FIXME: now would that work for other than i386 archs
+#define unreachable_item(ih) (ih->ih_version & (1 << 15))
+
+#define get_ih_free_space(ih) (ih_version (ih) == ITEM_VERSION_2 ? 0 : ih_free_space (ih))
+#define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == ITEM_VERSION_2) ? 0 : (val)))
+
+
+//
+// there are 5 item types currently
+//
+#define TYPE_STAT_DATA 0
+#define TYPE_INDIRECT 1
+#define TYPE_DIRECT 2
+#define TYPE_DIRENTRY 3
+#define TYPE_ANY 15 // FIXME: comment is required
+
+//
+// in old version uniqueness field shows key type
+//
+#define V1_SD_UNIQUENESS 0
+#define V1_INDIRECT_UNIQUENESS 0xfffffffe
+#define V1_DIRECT_UNIQUENESS 0xffffffff
+#define V1_DIRENTRY_UNIQUENESS 500
+#define V1_ANY_UNIQUENESS 555 // FIXME: comment is required
+
+//
+// here are conversion routines
+//
+extern inline int uniqueness2type (__u32 uniqueness)
+{
+ switch (uniqueness) {
+ case V1_SD_UNIQUENESS: return TYPE_STAT_DATA;
+ case V1_INDIRECT_UNIQUENESS: return TYPE_INDIRECT;
+ case V1_DIRECT_UNIQUENESS: return TYPE_DIRECT;
+ case V1_DIRENTRY_UNIQUENESS: return TYPE_DIRENTRY;
+ }
+/*
+ if (uniqueness != V1_ANY_UNIQUENESS) {
+ printk ("uniqueness %d\n", uniqueness);
+ BUG ();
+ }
+*/
+ return TYPE_ANY;
+}
+
+extern inline __u32 type2uniqueness (int type)
+{
+ switch (type) {
+ case TYPE_STAT_DATA: return V1_SD_UNIQUENESS;
+ case TYPE_INDIRECT: return V1_INDIRECT_UNIQUENESS;
+ case TYPE_DIRECT: return V1_DIRECT_UNIQUENESS;
+ case TYPE_DIRENTRY: return V1_DIRENTRY_UNIQUENESS;
+ }
+ /*
+ if (type != TYPE_ANY)
+ BUG ();
+ */
+ return V1_ANY_UNIQUENESS;
+}
+
+
+//
+// key is pointer to on disk key which is stored in le, result is cpu,
+// there is no way to get version of object from key, so, provide
+// version to these defines
+//
+extern inline loff_t le_key_k_offset (int version, struct key * key)
+{
+ return (version == ITEM_VERSION_1) ? key->u.k_offset_v1.k_offset :
+ le64_to_cpu (key->u.k_offset_v2.k_offset);
+}
+extern inline loff_t le_ih_k_offset (struct item_head * ih)
+{
+ return le_key_k_offset (ih_version (ih), &(ih->ih_key));
+}
+
+
+extern inline loff_t le_key_k_type (int version, struct key * key)
+{
+ return (version == ITEM_VERSION_1) ? uniqueness2type (key->u.k_offset_v1.k_uniqueness) :
+ le16_to_cpu (key->u.k_offset_v2.k_type);
+}
+extern inline loff_t le_ih_k_type (struct item_head * ih)
+{
+ return le_key_k_type (ih_version (ih), &(ih->ih_key));
+}
+
+
+extern inline void set_le_key_k_offset (int version, struct key * key, loff_t offset)
+{
+ (version == ITEM_VERSION_1) ? (key->u.k_offset_v1.k_offset = offset) :
+ (key->u.k_offset_v2.k_offset = cpu_to_le64 (offset));
+}
+extern inline void set_le_ih_k_offset (struct item_head * ih, loff_t offset)
+{
+ set_le_key_k_offset (ih_version (ih), &(ih->ih_key), offset);
+}
+
+
+
+extern inline void set_le_key_k_type (int version, struct key * key, int type)
+{
+ (version == ITEM_VERSION_1) ? (key->u.k_offset_v1.k_uniqueness = type2uniqueness (type)) :
+ (key->u.k_offset_v2.k_type = cpu_to_le16 (type));
+}
+extern inline void set_le_ih_k_type (struct item_head * ih, int type)
+{
+ set_le_key_k_type (ih_version (ih), &(ih->ih_key), type);
+}
+
+
+#define is_direntry_le_key(version,key) (le_key_k_type (version, key) == TYPE_DIRENTRY)
+#define is_direct_le_key(version,key) (le_key_k_type (version, key) == TYPE_DIRECT)
+#define is_indirect_le_key(version,key) (le_key_k_type (version, key) == TYPE_INDIRECT)
+#define is_statdata_le_key(version,key) (le_key_k_type (version, key) == TYPE_STAT_DATA)
+
+//
+// item header has version.
+//
+#define is_direntry_le_ih(ih) is_direntry_le_key (ih_version (ih), &((ih)->ih_key))
+#define is_direct_le_ih(ih) is_direct_le_key (ih_version (ih), &((ih)->ih_key))
+#define is_indirect_le_ih(ih) is_indirect_le_key (ih_version(ih), &((ih)->ih_key))
+#define is_statdata_le_ih(ih) is_statdata_le_key (ih_version (ih), &((ih)->ih_key))
+
+
+
+//
+// key is pointer to cpu key, result is cpu
+//
+extern inline loff_t cpu_key_k_offset (struct cpu_key * key)
+{
+ return (key->version == ITEM_VERSION_1) ? key->on_disk_key.u.k_offset_v1.k_offset :
+ key->on_disk_key.u.k_offset_v2.k_offset;
+}
+
+extern inline loff_t cpu_key_k_type (struct cpu_key * key)
+{
+ return (key->version == ITEM_VERSION_1) ? uniqueness2type (key->on_disk_key.u.k_offset_v1.k_uniqueness) :
+ key->on_disk_key.u.k_offset_v2.k_type;
+}
+
+extern inline void set_cpu_key_k_offset (struct cpu_key * key, loff_t offset)
+{
+ (key->version == ITEM_VERSION_1) ? (key->on_disk_key.u.k_offset_v1.k_offset = offset) :
+ (key->on_disk_key.u.k_offset_v2.k_offset = offset);
+}
+
+
+extern inline void set_cpu_key_k_type (struct cpu_key * key, int type)
+{
+ (key->version == ITEM_VERSION_1) ? (key->on_disk_key.u.k_offset_v1.k_uniqueness = type2uniqueness (type)) :
+ (key->on_disk_key.u.k_offset_v2.k_type = type);
+}
+
+extern inline void cpu_key_k_offset_dec (struct cpu_key * key)
+{
+ if (key->version == ITEM_VERSION_1)
+ key->on_disk_key.u.k_offset_v1.k_offset --;
+ else
+ key->on_disk_key.u.k_offset_v2.k_offset --;
+}
+
+
+#define is_direntry_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRENTRY)
+#define is_direct_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRECT)
+#define is_indirect_cpu_key(key) (cpu_key_k_type (key) == TYPE_INDIRECT)
+#define is_statdata_cpu_key(key) (cpu_key_k_type (key) == TYPE_STAT_DATA)
+
+
+/* are these used ? */
+#define is_direntry_cpu_ih(ih) (is_direntry_cpu_key (&((ih)->ih_key)))
+#define is_direct_cpu_ih(ih) (is_direct_cpu_key (&((ih)->ih_key)))
+#define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key)))
+#define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key)))
+
+
+
+
+
+#define I_K_KEY_IN_ITEM(p_s_ih, p_s_key, n_blocksize) \
+ ( ! COMP_SHORT_KEYS(p_s_ih, p_s_key) && \
+ I_OFF_BYTE_IN_ITEM(p_s_ih, k_offset (p_s_key), n_blocksize) )
+
+/* maximal length of item */
+#define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE)
+#define MIN_ITEM_LEN 1
+
+
+/* object identifier for root dir */
+#define REISERFS_ROOT_OBJECTID 2
+#define REISERFS_ROOT_PARENT_OBJECTID 1
+extern struct key root_key;
+
+
+
+
+/*
+ * Picture represents a leaf of the S+tree
+ * ______________________________________________________
+ * | | Array of | | |
+ * |Block | Object-Item | F r e e | Objects- |
+ * | head | Headers | S p a c e | Items |
+ * |______|_______________|___________________|___________|
+ */
+
+/* Header of a disk block. More precisely, header of a formatted leaf
+ or internal node, and not the header of an unformatted node. */
+struct block_head {
+ __u16 blk_level; /* Level of a block in the tree. */
+ __u16 blk_nr_item; /* Number of keys/items in a block. */
+ __u16 blk_free_space; /* Block free space in bytes. */
+ __u16 blk_reserved;
+ /* dump this in v4/planA */
+ struct key blk_right_delim_key; /* kept only for compatibility */
+};
+
+#define BLKH_SIZE (sizeof(struct block_head))
+
+/*
+ * values for blk_level field of the struct block_head
+ */
+
+#define FREE_LEVEL 0 /* when node gets removed from the tree its
+ blk_level is set to FREE_LEVEL. It is then
+ used to see whether the node is still in the
+ tree */
+
+#define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level.*/
+
+/* Given the buffer head of a formatted node, resolve to the block head of that node. */
+#define B_BLK_HEAD(p_s_bh) ((struct block_head *)((p_s_bh)->b_data))
+/* Number of items that are in buffer. */
+#define B_NR_ITEMS(p_s_bh) (le16_to_cpu ( B_BLK_HEAD(p_s_bh)->blk_nr_item ))
+#define B_LEVEL(bh) (le16_to_cpu ( B_BLK_HEAD(bh)->blk_level ))
+#define B_FREE_SPACE(bh) (le16_to_cpu ( B_BLK_HEAD(bh)->blk_free_space ))
+
+#define PUT_B_NR_ITEMS(p_s_bh) do { B_BLK_HEAD(p_s_bh)->blk_nr_item = cpu_to_le16(val); } while (0)
+#define PUT_B_LEVEL(bh, val) do { B_BLK_HEAD(bh)->blk_level = cpu_to_le16(val); } while (0)
+#define PUT_B_FREE_SPACE(bh) do { B_BLK_HEAD(bh)->blk_free_space = cpu_to_le16(val); } while (0)
+
+/* Get right delimiting key. */
+#define B_PRIGHT_DELIM_KEY(p_s_bh) ( &(B_BLK_HEAD(p_s_bh)->blk_right_delim_key) )
+
+/* Does the buffer contain a disk leaf. */
+#define B_IS_ITEMS_LEVEL(p_s_bh) ( B_BLK_HEAD(p_s_bh)->blk_level == DISK_LEAF_NODE_LEVEL )
+
+/* Does the buffer contain a disk internal node */
+#define B_IS_KEYS_LEVEL(p_s_bh) ( B_BLK_HEAD(p_s_bh)->blk_level > DISK_LEAF_NODE_LEVEL &&\
+ B_BLK_HEAD(p_s_bh)->blk_level <= MAX_HEIGHT )
+
+
+
+
+/***************************************************************************/
+/* STAT DATA */
+/***************************************************************************/
+
+
+//
+// old stat data is 32 bytes long. We are going to distinguish new one by
+// different size
+//
+struct stat_data_v1
+{
+ __u16 sd_mode; /* file type, permissions */
+ __u16 sd_nlink; /* number of hard links */
+ __u16 sd_uid; /* owner */
+ __u16 sd_gid; /* group */
+ __u32 sd_size; /* file size */
+ __u32 sd_atime; /* time of last access */
+ __u32 sd_mtime; /* time file was last modified */
+ __u32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
+ union {
+ __u32 sd_rdev;
+ __u32 sd_blocks; /* number of blocks file uses */
+ } __attribute__ ((__packed__)) u;
+ __u32 sd_first_direct_byte; /* first byte of file which is stored
+ in a direct item: except that if it
+ equals 1 it is a symlink and if it
+ equals ~(__u32)0 there is no
+ direct item. The existence of this
+ field really grates on me. Let's
+ replace it with a macro based on
+ sd_size and our tail suppression
+ policy. Someday. -Hans */
+} __attribute__ ((__packed__));
+
+#define SD_V1_SIZE (sizeof(struct stat_data_v1))
+
+
+/* Stat Data on disk (reiserfs version of UFS disk inode minus the
+ address blocks) */
+struct stat_data {
+ __u16 sd_mode; /* file type, permissions */
+ __u16 sd_reserved;
+ __u32 sd_nlink; /* number of hard links */
+ __u64 sd_size; /* file size */
+ __u32 sd_uid; /* owner */
+ __u32 sd_gid; /* group */
+ __u32 sd_atime; /* time of last access */
+ __u32 sd_mtime; /* time file was last modified */
+ __u32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
+ __u32 sd_blocks;
+ union {
+ __u32 sd_rdev;
+ //__u32 sd_first_direct_byte;
+ /* first byte of file which is stored in a
+ direct item: except that if it equals 1
+ it is a symlink and if it equals
+ ~(__u32)0 there is no direct item. The
+ existence of this field really grates
+ on me. Let's replace it with a macro
+ based on sd_size and our tail
+ suppression policy? */
+ } __attribute__ ((__packed__)) u;
+} __attribute__ ((__packed__));
+//
+// this is 40 bytes long
+//
+#define SD_SIZE (sizeof(struct stat_data))
+
+#define stat_data_v1(ih) (ih_version (ih) == ITEM_VERSION_1)
+
+
+/***************************************************************************/
+/* DIRECTORY STRUCTURE */
+/***************************************************************************/
+/*
+ Picture represents the structure of directory items
+ ________________________________________________
+ | Array of | | | | | |
+ | directory |N-1| N-2 | .... | 1st |0th|
+ | entry headers | | | | | |
+ |_______________|___|_____|________|_______|___|
+ <---- directory entries ------>
+
+ First directory item has k_offset component 1. We store "." and ".."
+ in one item, always, we never split "." and ".." into differing
+ items. This makes, among other things, the code for removing
+ directories simpler. */
+#define SD_OFFSET 0
+#define SD_UNIQUENESS 0
+#define DOT_OFFSET 1
+#define DOT_DOT_OFFSET 2
+#define DIRENTRY_UNIQUENESS 500
+
+/* */
+#define FIRST_ITEM_OFFSET 1
+
+/*
+ Q: How to get key of object pointed to by entry from entry?
+
+ A: Each directory entry has its header. This header has deh_dir_id and deh_objectid fields, those are key
+ of object, entry points to */
+
+/* NOT IMPLEMENTED:
+ Directory will someday contain stat data of object */
+
+
+
+struct reiserfs_de_head
+{
+ __u32 deh_offset; /* third component of the directory entry key */
+ __u32 deh_dir_id; /* objectid of the parent directory of the object, that is referenced
+ by directory entry */
+ __u32 deh_objectid; /* objectid of the object, that is referenced by directory entry */
+ __u16 deh_location; /* offset of name in the whole item */
+ __u16 deh_state; /* whether 1) entry contains stat data (for future), and 2) whether
+ entry is hidden (unlinked) */
+} __attribute__ ((__packed__));
+#define DEH_SIZE sizeof(struct reiserfs_de_head)
+
+/* empty directory contains two entries "." and ".." and their headers */
+#define EMPTY_DIR_SIZE \
+(DEH_SIZE * 2 + ROUND_UP (strlen (".")) + ROUND_UP (strlen ("..")))
+
+/* old format directories have this size when empty */
+#define EMPTY_DIR_SIZE_V1 (DEH_SIZE * 2 + 3)
+
+#define DEH_Statdata 0 /* not used now */
+#define DEH_Visible 2
+
+/* bitops which deals with unaligned addrs;
+ needed for alpha port. --zam */
+#ifdef __alpha__
+# define ADDR_UNALIGNED_BITS (5)
+#endif
+
+#ifdef ADDR_UNALIGNED_BITS
+
+# define aligned_address(addr) ((void *)((long)(addr) & ~((1UL << ADDR_UNALIGNED_BITS) - 1)))
+# define unaligned_offset(addr) (((int)((long)(addr) & ((1 << ADDR_UNALIGNED_BITS) - 1))) << 3)
+
+# define set_bit_unaligned(nr, addr) set_bit((nr) + unaligned_offset(addr), aligned_address(addr))
+# define clear_bit_unaligned(nr, addr) clear_bit((nr) + unaligned_offset(addr), aligned_address(addr))
+# define test_bit_unaligned(nr, addr) test_bit((nr) + unaligned_offset(addr), aligned_address(addr))
+
+#else
+
+# define set_bit_unaligned(nr, addr) set_bit(nr, addr)
+# define clear_bit_unaligned(nr, addr) clear_bit(nr, addr)
+# define test_bit_unaligned(nr, addr) test_bit(nr, addr)
+
+#endif
+
+#define deh_dir_id(deh) (__le32_to_cpu ((deh)->deh_dir_id))
+#define deh_objectid(deh) (__le32_to_cpu ((deh)->deh_objectid))
+#define deh_offset(deh) (__le32_to_cpu ((deh)->deh_offset))
+
+
+#define mark_de_with_sd(deh) set_bit_unaligned (DEH_Statdata, &((deh)->deh_state))
+#define mark_de_without_sd(deh) clear_bit_unaligned (DEH_Statdata, &((deh)->deh_state))
+#define mark_de_visible(deh) set_bit_unaligned (DEH_Visible, &((deh)->deh_state))
+#define mark_de_hidden(deh) clear_bit_unaligned (DEH_Visible, &((deh)->deh_state))
+
+#define de_with_sd(deh) test_bit_unaligned (DEH_Statdata, &((deh)->deh_state))
+#define de_visible(deh) test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
+#define de_hidden(deh) !test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
+
+/* compose directory item containing "." and ".." entries (entries are
+ not aligned to 4 byte boundary) */
+extern inline void make_empty_dir_item_v1 (char * body, __u32 dirid, __u32 objid,
+ __u32 par_dirid, __u32 par_objid)
+{
+ struct reiserfs_de_head * deh;
+
+ memset (body, 0, EMPTY_DIR_SIZE_V1);
+ deh = (struct reiserfs_de_head *)body;
+
+ /* direntry header of "." */
+ deh[0].deh_offset = cpu_to_le32 (DOT_OFFSET);
+ deh[0].deh_dir_id = cpu_to_le32 (dirid);
+ deh[0].deh_objectid = cpu_to_le32 (objid);
+ deh[0].deh_location = cpu_to_le16 (EMPTY_DIR_SIZE_V1 - strlen ("."));
+ deh[0].deh_state = 0;
+ mark_de_visible(&(deh[0]));
+
+ /* direntry header of ".." */
+ deh[1].deh_offset = cpu_to_le32 (DOT_DOT_OFFSET);
+ /* key of ".." for the root directory */
+ deh[1].deh_dir_id = cpu_to_le32 (par_dirid);
+ deh[1].deh_objectid = cpu_to_le32 (par_objid);
+ deh[1].deh_location = cpu_to_le16 (le16_to_cpu (deh[0].deh_location) - strlen (".."));
+ deh[1].deh_state = 0;
+ mark_de_visible(&(deh[1]));
+
+ /* copy ".." and "." */
+ memcpy (body + deh[0].deh_location, ".", 1);
+ memcpy (body + deh[1].deh_location, "..", 2);
+}
+
+/* compose directory item containing "." and ".." entries */
+extern inline void make_empty_dir_item (char * body, __u32 dirid, __u32 objid,
+ __u32 par_dirid, __u32 par_objid)
+{
+ struct reiserfs_de_head * deh;
+
+ memset (body, 0, EMPTY_DIR_SIZE);
+ deh = (struct reiserfs_de_head *)body;
+
+ /* direntry header of "." */
+ deh[0].deh_offset = cpu_to_le32 (DOT_OFFSET);
+ deh[0].deh_dir_id = cpu_to_le32 (dirid);
+ deh[0].deh_objectid = cpu_to_le32 (objid);
+ deh[0].deh_location = cpu_to_le16 (EMPTY_DIR_SIZE - ROUND_UP (strlen (".")));
+ deh[0].deh_state = 0;
+ mark_de_visible(&(deh[0]));
+
+ /* direntry header of ".." */
+ deh[1].deh_offset = cpu_to_le32 (DOT_DOT_OFFSET);
+ /* key of ".." for the root directory */
+ deh[1].deh_dir_id = cpu_to_le32 (par_dirid);
+ deh[1].deh_objectid = cpu_to_le32 (par_objid);
+ deh[1].deh_location = cpu_to_le16 (le16_to_cpu (deh[0].deh_location) - ROUND_UP (strlen ("..")));
+ deh[1].deh_state = 0;
+ mark_de_visible(&(deh[1]));
+
+ /* copy ".." and "." */
+ memcpy (body + deh[0].deh_location, ".", 1);
+ memcpy (body + deh[1].deh_location, "..", 2);
+}
+
+
+/* array of the entry headers */
+ /* get item body */
+#define B_I_PITEM(bh,ih) ( (bh)->b_data + (ih)->ih_item_location )
+#define B_I_DEH(bh,ih) ((struct reiserfs_de_head *)(B_I_PITEM(bh,ih)))
+
+/* length of the directory entry in directory item. This define
+ calculates length of i-th directory entry using directory entry
+ locations from dir entry head. When it calculates length of 0-th
+ directory entry, it uses length of whole item in place of entry
+ location of the non-existent following entry in the calculation.
+ See picture above.*/
+/*
+#define I_DEH_N_ENTRY_LENGTH(ih,deh,i) \
+((i) ? (((deh)-1)->deh_location - (deh)->deh_location) : ((ih)->ih_item_len) - (deh)->deh_location)
+*/
+extern inline int entry_length (struct buffer_head * bh, struct item_head * ih,
+ int pos_in_item)
+{
+ struct reiserfs_de_head * deh;
+
+ deh = B_I_DEH (bh, ih) + pos_in_item;
+ if (pos_in_item)
+ return (le16_to_cpu ((deh - 1)->deh_location) - le16_to_cpu (deh->deh_location));
+ return (le16_to_cpu (ih->ih_item_len) - le16_to_cpu (deh->deh_location));
+}
+
+
+
+/* number of entries in the directory item, depends on ENTRY_COUNT being at the start of directory dynamic data. */
+#define I_ENTRY_COUNT(ih) ((ih)->u.ih_entry_count)
+
+
+/* name by bh, ih and entry_num */
+#define B_I_E_NAME(bh,ih,entry_num) ((char *)(bh->b_data + ih->ih_item_location + (B_I_DEH(bh,ih)+(entry_num))->deh_location))
+
+// two entries per block (at least)
+//#define REISERFS_MAX_NAME_LEN(block_size)
+//((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE * 2) / 2)
+
+// two entries per block (at least)
+#define REISERFS_MAX_NAME_LEN(block_size) 255
+
+
+
+
+/* this structure is used for operations on directory entries. It is
+ not a disk structure. */
+/* When reiserfs_find_entry or search_by_entry_key find directory
+ entry, they return filled reiserfs_dir_entry structure */
+struct reiserfs_dir_entry
+{
+ struct buffer_head * de_bh;
+ int de_item_num;
+ struct item_head * de_ih;
+ int de_entry_num;
+ struct reiserfs_de_head * de_deh;
+ int de_entrylen;
+ int de_namelen;
+ char * de_name;
+ char * de_gen_number_bit_string;
+
+ __u32 de_dir_id;
+ __u32 de_objectid;
+
+ struct cpu_key de_entry_key;
+};
+
+/* these defines are useful when a particular member of a reiserfs_dir_entry is needed */
+
+/* pointer to file name, stored in entry */
+#define B_I_DEH_ENTRY_FILE_NAME(bh,ih,deh) (B_I_PITEM (bh, ih) + (deh)->deh_location)
+
+/* length of name */
+#define I_DEH_N_ENTRY_FILE_NAME_LENGTH(ih,deh,entry_num) \
+(I_DEH_N_ENTRY_LENGTH (ih, deh, entry_num) - (de_with_sd (deh) ? SD_SIZE : 0))
+
+
+
+/* hash value occupies bits from 7 up to 30 */
+#define GET_HASH_VALUE(offset) ((offset) & 0x7fffff80LL)
+/* generation number occupies 7 bits starting from 0 up to 6 */
+#define GET_GENERATION_NUMBER(offset) ((offset) & 0x7fLL)
+#define MAX_GENERATION_NUMBER 127
+
+#define SET_GENERATION_NUMBER(offset,gen_number) (GET_HASH_VALUE(offset)|(gen_number))
+
+
+/*
+ * Picture represents an internal node of the reiserfs tree
+ * ______________________________________________________
+ * | | Array of | Array of | Free |
+ * |block | keys | pointers | space |
+ * | head | N | N+1 | |
+ * |______|_______________|___________________|___________|
+ */
+
+/***************************************************************************/
+/* DISK CHILD */
+/***************************************************************************/
+/* Disk child pointer: The pointer from an internal node of the tree
+ to a node that is on disk. */
+struct disk_child {
+ __u32 dc_block_number; /* Disk child's block number. */
+ __u16 dc_size; /* Disk child's used space. */
+ __u16 dc_reserved;
+};
+
+#define DC_SIZE (sizeof(struct disk_child))
+
+/* Get disk child by buffer header and position in the tree node. */
+#define B_N_CHILD(p_s_bh,n_pos) ((struct disk_child *)\
+((p_s_bh)->b_data+BLKH_SIZE+B_NR_ITEMS(p_s_bh)*KEY_SIZE+DC_SIZE*(n_pos)))
+
+/* Get disk child number by buffer header and position in the tree node. */
+#define B_N_CHILD_NUM(p_s_bh,n_pos) (le32_to_cpu (B_N_CHILD(p_s_bh,n_pos)->dc_block_number))
+#define PUT_B_N_CHILD_NUM(p_s_bh,n_pos, val) do { B_N_CHILD(p_s_bh,n_pos)->dc_block_number = cpu_to_le32(val); } while (0)
+
+ /* maximal value of field child_size in structure disk_child */
+ /* child size is the combined size of all items and their headers */
+#define MAX_CHILD_SIZE(bh) ((int)( (bh)->b_size - BLKH_SIZE ))
+
+/* amount of used space in buffer (not including block head) */
+#define B_CHILD_SIZE(cur) (MAX_CHILD_SIZE(cur)-(B_FREE_SPACE(cur)))
+
+/* max and min number of keys in internal node */
+#define MAX_NR_KEY(bh) ( (MAX_CHILD_SIZE(bh)-DC_SIZE)/(KEY_SIZE+DC_SIZE) )
+#define MIN_NR_KEY(bh) (MAX_NR_KEY(bh)/2)
+
+/***************************************************************************/
+/* PATH STRUCTURES AND DEFINES */
+/***************************************************************************/
+
+
+/* Search_by_key fills up the path from the root to the leaf as it descends the tree looking for the
+ key. It uses reiserfs_bread to try to find buffers in the cache given their block number. If it
+ does not find them in the cache it reads them from disk. For each node search_by_key finds using
+ reiserfs_bread it then uses bin_search to look through that node. bin_search will find the
+ position of the block_number of the next node if it is looking through an internal node. If it
+ is looking through a leaf node bin_search will find the position of the item which has key either
+ equal to given key, or which is the maximal key less than the given key. */
+
+struct path_element {
+ struct buffer_head * pe_buffer; /* Pointer to the buffer at the path in the tree. */
+ int pe_position; /* Position in the tree node which is placed in the */
+ /* buffer above. */
+};
+
+#define MAX_HEIGHT 5 /* maximal height of a tree. don't change this without changing JOURNAL_PER_BALANCE_CNT */
+#define EXTENDED_MAX_HEIGHT 7 /* Must be equals MAX_HEIGHT + FIRST_PATH_ELEMENT_OFFSET */
+#define FIRST_PATH_ELEMENT_OFFSET 2 /* Must be equal to at least 2. */
+
+#define ILLEGAL_PATH_ELEMENT_OFFSET 1 /* Must be equal to FIRST_PATH_ELEMENT_OFFSET - 1 */
+#define MAX_FEB_SIZE 6 /* this MUST be MAX_HEIGHT + 1. See about FEB below */
+
+
+
+/* We need to keep track of who the ancestors of nodes are. When we
+ perform a search we record which nodes were visited while
+ descending the tree looking for the node we searched for. This list
+ of nodes is called the path. This information is used while
+ performing balancing. Note that this path information may become
+ invalid, and this means we must check it when using it to see if it
+ is still valid. You'll need to read search_by_key and the comments
+ in it, especially about decrement_counters_in_path(), to understand
+ this structure.
+
+Paths make the code so much harder to work with and debug.... An
+enormous number of bugs are due to them, and trying to write or modify
+code that uses them just makes my head hurt. They are based on an
+excessive effort to avoid disturbing the precious VFS code.:-( The
+gods only know how we are going to SMP the code that uses them.
+znodes are the way! */
+
+
+struct path {
+ int path_length; /* Length of the array above. */
+ struct path_element path_elements[EXTENDED_MAX_HEIGHT]; /* Array of the path elements. */
+ int pos_in_item;
+};
+
+#define pos_in_item(path) ((path)->pos_in_item)
+
+#define INITIALIZE_PATH(var) \
+struct path var = {ILLEGAL_PATH_ELEMENT_OFFSET, }
+
+/* Get path element by path and path position. */
+#define PATH_OFFSET_PELEMENT(p_s_path,n_offset) ((p_s_path)->path_elements +(n_offset))
+
+/* Get buffer header at the path by path and path position. */
+#define PATH_OFFSET_PBUFFER(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_buffer)
+
+/* Get position in the element at the path by path and path position. */
+#define PATH_OFFSET_POSITION(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_position)
+
+
+#define PATH_PLAST_BUFFER(p_s_path) (PATH_OFFSET_PBUFFER((p_s_path), (p_s_path)->path_length))
+ /* you know, to the person who didn't
+ write this the macro name does not
+ at first suggest what it does.
+ Maybe POSITION_FROM_PATH_END? Or
+ maybe we should just focus on
+ dumping paths... -Hans */
+#define PATH_LAST_POSITION(p_s_path) (PATH_OFFSET_POSITION((p_s_path), (p_s_path)->path_length))
+
+
+#define PATH_PITEM_HEAD(p_s_path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_path),PATH_LAST_POSITION(p_s_path))
+
+/* in do_balance leaf has h == 0 in contrast with path structure,
+ where root has level == 0. That is why we need these defines */
+#define PATH_H_PBUFFER(p_s_path, h) PATH_OFFSET_PBUFFER (p_s_path, p_s_path->path_length - (h)) /* tb->S[h] */
+#define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */
+#define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h))
+#define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */
+
+#define PATH_H_PATH_OFFSET(p_s_path, n_h) ((p_s_path)->path_length - (n_h))
+
+#define get_bh(path) PATH_PLAST_BUFFER(path)
+#define get_ih(path) PATH_PITEM_HEAD(path)
+#define get_item_pos(path) PATH_LAST_POSITION(path)
+#define get_item(path) ((void *)B_N_PITEM(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION (path)))
+#define item_moved(ih,path) comp_items(ih, path)
+#define path_changed(ih,path) comp_items (ih, path)
+
+
+/***************************************************************************/
+/* MISC */
+/***************************************************************************/
+
+/* Size of pointer to the unformatted node. */
+#define UNFM_P_SIZE (sizeof(unp_t))
+
+// in in-core inode key is stored on le form
+#define INODE_PKEY(inode) ((struct key *)((inode)->u.reiserfs_i.i_key))
+//#define mark_tail_converted(inode) (atomic_set(&((inode)->u.reiserfs_i.i_converted),1))
+//#define unmark_tail_converted(inode) (atomic_set(&((inode)->u.reiserfs_i.i_converted), 0))
+//#define is_tail_converted(inode) (atomic_read(&((inode)->u.reiserfs_i.i_converted)))
+
+
+
+#define MAX_UL_INT 0xffffffff
+#define MAX_INT 0x7ffffff
+#define MAX_US_INT 0xffff
+
+///#define TOO_LONG_LENGTH (~0ULL)
+
+// reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset
+#define U32_MAX (~(__u32)0)
+extern inline loff_t max_reiserfs_offset (struct inode * inode)
+{
+ if (inode_items_version (inode) == ITEM_VERSION_1)
+ return (loff_t)U32_MAX;
+
+ return (loff_t)((~(__u64)0) >> 4);
+}
+
+
+/*#define MAX_KEY_UNIQUENESS MAX_UL_INT*/
+#define MAX_KEY_OBJECTID MAX_UL_INT
+
+
+#define MAX_B_NUM MAX_UL_INT
+#define MAX_FC_NUM MAX_US_INT
+
+
+/* the purpose is to detect overflow of an unsigned short */
+#define REISERFS_LINK_MAX (MAX_US_INT - 1000)
+
+
+/* The following defines are used in reiserfs_insert_item and reiserfs_append_item */
+#define REISERFS_KERNEL_MEM 0 /* reiserfs kernel memory mode */
+#define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
+
+#define fs_generation(s) ((s)->u.reiserfs_sb.s_generation_counter)
+#define get_generation(s) atomic_read (&fs_generation(s))
+#define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
+#define fs_changed(gen,s) (gen != get_generation (s))
+
+
+/***************************************************************************/
+/* FIXATE NODES */
+/***************************************************************************/
+
+//#define VI_TYPE_STAT_DATA 1
+//#define VI_TYPE_DIRECT 2
+//#define VI_TYPE_INDIRECT 4
+//#define VI_TYPE_DIRECTORY 8
+//#define VI_TYPE_FIRST_DIRECTORY_ITEM 16
+//#define VI_TYPE_INSERTED_DIRECTORY_ITEM 32
+
+#define VI_TYPE_LEFT_MERGEABLE 1
+#define VI_TYPE_RIGHT_MERGEABLE 2
+
+/* To make any changes in the tree we always first find node, that
+ contains item to be changed/deleted or place to insert a new
+ item. We call this node S. To do balancing we need to decide what
+ we will shift to left/right neighbor, or to a new node, where new
+ item will be etc. To make this analysis simpler we build virtual
+ node. Virtual node is an array of items, that will replace items of
+ node S. (For instance if we are going to delete an item, virtual
+ node does not contain it). Virtual node keeps information about
+ item sizes and types, mergeability of first and last items, sizes
+ of all entries in directory item. We use this array of items when
+ calculating what we can shift to neighbors and how many nodes we
+ have to have if we do not any shiftings, if we shift to left/right
+ neighbor or to both. */
+struct virtual_item
+{
+ int vi_index; // index in the array of item operations
+ unsigned short vi_type; // left/right mergeability
+ unsigned short vi_item_len; /* length of item that it will have after balancing */
+ struct item_head * vi_ih;
+ const char * vi_item; // body of item (old or new)
+ const void * vi_new_data; // 0 always but paste mode
+ void * vi_uarea; // item specific area
+};
+
+
+struct virtual_node
+{
+ char * vn_free_ptr; /* this is a pointer to the free space in the buffer */
+ unsigned short vn_nr_item; /* number of items in virtual node */
+ short vn_size; /* size of node , that node would have if it has unlimited size and no balancing is performed */
+ short vn_mode; /* mode of balancing (paste, insert, delete, cut) */
+ short vn_affected_item_num;
+ short vn_pos_in_item;
+ struct item_head * vn_ins_ih; /* item header of inserted item, 0 for other modes */
+ const void * vn_data;
+ struct virtual_item * vn_vi; /* array of items (including a new one, excluding item to be deleted) */
+};
+
+
+/***************************************************************************/
+/* TREE BALANCE */
+/***************************************************************************/
+
+/* This temporary structure is used in tree balance algorithms, and
+ constructed as we go to the extent that its various parts are
+ needed. It contains arrays of nodes that can potentially be
+ involved in the balancing of node S, and parameters that define how
+ each of the nodes must be balanced. Note that in these algorithms
+ for balancing the worst case is to need to balance the current node
+ S and the left and right neighbors and all of their parents plus
+ create a new node. We implement S1 balancing for the leaf nodes
+ and S0 balancing for the internal nodes (S1 and S0 are defined in
+ our papers.)*/
+
+#define MAX_FREE_BLOCK 7 /* size of the array of buffers to free at end of do_balance */
+
+/* maximum number of FEB blocknrs on a single level */
+#define MAX_AMOUNT_NEEDED 2
+
+/* someday somebody will prefix every field in this struct with tb_ */
+struct tree_balance
+{
+ int tb_mode;
+ int need_balance_dirty;
+ struct super_block * tb_sb;
+ struct reiserfs_transaction_handle *transaction_handle ;
+ struct path * tb_path;
+ struct buffer_head * L[MAX_HEIGHT]; /* array of left neighbors of nodes in the path */
+ struct buffer_head * R[MAX_HEIGHT]; /* array of right neighbors of nodes in the path*/
+ struct buffer_head * FL[MAX_HEIGHT]; /* array of fathers of the left neighbors */
+ struct buffer_head * FR[MAX_HEIGHT]; /* array of fathers of the right neighbors */
+ struct buffer_head * CFL[MAX_HEIGHT]; /* array of common parents of center node and its left neighbor */
+ struct buffer_head * CFR[MAX_HEIGHT]; /* array of common parents of center node and its right neighbor */
+
+ struct buffer_head * FEB[MAX_FEB_SIZE]; /* array of empty buffers. Number of buffers in array equals
+ cur_blknum. */
+ struct buffer_head * used[MAX_FEB_SIZE];
+ struct buffer_head * thrown[MAX_FEB_SIZE];
+ int lnum[MAX_HEIGHT]; /* array of number of items which must be
+ shifted to the left in order to balance the
+ current node; for leaves includes item that
+ will be partially shifted; for internal
+ nodes, it is the number of child pointers
+ rather than items. It includes the new item
+ being created. The code sometimes subtracts
+ one to get the number of wholly shifted
+ items for other purposes. */
+ int rnum[MAX_HEIGHT]; /* substitute right for left in comment above */
+ int lkey[MAX_HEIGHT]; /* array indexed by height h mapping the key delimiting L[h] and
+ S[h] to its item number within the node CFL[h] */
+ int rkey[MAX_HEIGHT]; /* substitute r for l in comment above */
+ int insert_size[MAX_HEIGHT]; /* the number of bytes by we are trying to add or remove from
+ S[h]. A negative value means removing. */
+ int blknum[MAX_HEIGHT]; /* number of nodes that will replace node S[h] after
+ balancing on the level h of the tree. If 0 then S is
+ being deleted, if 1 then S is remaining and no new nodes
+ are being created, if 2 or 3 then 1 or 2 new nodes is
+ being created */
+
+ /* fields that are used only for balancing leaves of the tree */
+ int cur_blknum; /* number of empty blocks having been already allocated */
+ int s0num; /* number of items that fall into left most node when S[0] splits */
+ int s1num; /* number of items that fall into first new node when S[0] splits */
+ int s2num; /* number of items that fall into second new node when S[0] splits */
+ int lbytes; /* number of bytes which can flow to the left neighbor from the left */
+ /* most liquid item that cannot be shifted from S[0] entirely */
+ /* if -1 then nothing will be partially shifted */
+ int rbytes; /* number of bytes which will flow to the right neighbor from the right */
+ /* most liquid item that cannot be shifted from S[0] entirely */
+ /* if -1 then nothing will be partially shifted */
+ int s1bytes; /* number of bytes which flow to the first new node when S[0] splits */
+ /* note: if S[0] splits into 3 nodes, then items do not need to be cut */
+ int s2bytes;
+ struct buffer_head * buf_to_free[MAX_FREE_BLOCK]; /* buffers which are to be freed after do_balance finishes by unfix_nodes */
+ char * vn_buf; /* kmalloced memory. Used to create
+ virtual node and keep map of
+ dirtied bitmap blocks */
+ int vn_buf_size; /* size of the vn_buf */
+ struct virtual_node * tb_vn; /* VN starts after bitmap of bitmap blocks */
+
+ int fs_gen; /* saved value of `reiserfs_generation' counter
+ see FILESYSTEM_CHANGED() macro in reiserfs_fs.h */
+} ;
+
+
+#if 0
+ /* when balancing we potentially affect a 3 node wide column of nodes
+ in the tree (the top of the column may be tapered). C is the nodes
+ at the center of this column, and L and R are the nodes to the
+ left and right. */
+ struct seal * L_path_seals[MAX_HEIGHT];
+ struct seal * C_path_seals[MAX_HEIGHT];
+ struct seal * R_path_seals[MAX_HEIGHT];
+ char L_path_lock_types[MAX_HEIGHT]; /* 'r', 'w', or 'n' for read, write, or none */
+ char C_path_lock_types[MAX_HEIGHT];
+ char R_path_lock_types[MAX_HEIGHT];
+
+
+ struct seal_list_elem * C_seal[MAX_HEIGHT]; /* array of seals on nodes in the path */
+ struct seal_list_elem * L_seal[MAX_HEIGHT]; /* array of seals on left neighbors of nodes in the path */
+ struct seal_list_elem * R_seal[MAX_HEIGHT]; /* array of seals on right neighbors of nodes in the path*/
+ struct seal_list_elem * FL_seal[MAX_HEIGHT]; /* array of seals on fathers of the left neighbors */
+ struct seal_list_elem * FR_seal[MAX_HEIGHT]; /* array of seals on fathers of the right neighbors */
+ struct seal_list_elem * CFL_seal[MAX_HEIGHT]; /* array of seals on common parents of center node and its left neighbor */
+ struct seal_list_elem * CFR_seal[MAX_HEIGHT]; /* array of seals on common parents of center node and its right neighbor */
+
+ struct char C_desired_lock_type[MAX_HEIGHT]; /* 'r', 'w', or 'n' for read, write, or none */
+ struct char L_desired_lock_type[MAX_HEIGHT];
+ struct char R_desired_lock_type[MAX_HEIGHT];
+ struct char FL_desired_lock_type[MAX_HEIGHT];
+ struct char FR_desired_lock_type[MAX_HEIGHT];
+ struct char CFL_desired_lock_type[MAX_HEIGHT];
+ struct char CFR_desired_lock_type[MAX_HEIGHT];
+#endif
+
+
+
+
+
+/* These are modes of balancing */
+
+/* When inserting an item. */
+#define M_INSERT 'i'
+/* When inserting into (directories only) or appending onto an already
+ existant item. */
+#define M_PASTE 'p'
+/* When deleting an item. */
+#define M_DELETE 'd'
+/* When truncating an item or removing an entry from a (directory) item. */
+#define M_CUT 'c'
+
+/* used when balancing on leaf level skipped (in reiserfsck) */
+#define M_INTERNAL 'n'
+
+/* When further balancing is not needed, then do_balance does not need
+ to be called. */
+#define M_SKIP_BALANCING 's'
+#define M_CONVERT 'v'
+
+/* modes of leaf_move_items */
+#define LEAF_FROM_S_TO_L 0
+#define LEAF_FROM_S_TO_R 1
+#define LEAF_FROM_R_TO_L 2
+#define LEAF_FROM_L_TO_R 3
+#define LEAF_FROM_S_TO_SNEW 4
+
+#define FIRST_TO_LAST 0
+#define LAST_TO_FIRST 1
+
+/* used in do_balance for passing parent of node information that has
+ been gotten from tb struct */
+struct buffer_info {
+ struct tree_balance * tb;
+ struct buffer_head * bi_bh;
+ struct buffer_head * bi_parent;
+ int bi_position;
+};
+
+
+/* there are 4 types of items: stat data, directory item, indirect, direct.
++-------------------+------------+--------------+------------+
+| | k_offset | k_uniqueness | mergeable? |
++-------------------+------------+--------------+------------+
+| stat data | 0 | 0 | no |
++-------------------+------------+--------------+------------+
+| 1st directory item| DOT_OFFSET |DIRENTRY_UNIQUENESS| no |
+| non 1st directory | hash value | | yes |
+| item | | | |
++-------------------+------------+--------------+------------+
+| indirect item | offset + 1 |TYPE_INDIRECT | if this is not the first indirect item of the object
++-------------------+------------+--------------+------------+
+| direct item | offset + 1 |TYPE_DIRECT | if not this is not the first direct item of the object
++-------------------+------------+--------------+------------+
+*/
+
+struct item_operations {
+ int (*bytes_number) (struct item_head * ih, int block_size);
+ void (*decrement_key) (struct cpu_key *);
+ int (*is_left_mergeable) (struct key * ih, unsigned long bsize);
+ void (*print_item) (struct item_head *, char * item);
+ void (*check_item) (struct item_head *, char * item);
+
+ int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
+ int is_affected, int insert_size);
+ int (*check_left) (struct virtual_item * vi, int free,
+ int start_skip, int end_skip);
+ int (*check_right) (struct virtual_item * vi, int free);
+ int (*part_size) (struct virtual_item * vi, int from, int to);
+ int (*unit_num) (struct virtual_item * vi);
+ void (*print_vi) (struct virtual_item * vi);
+};
+
+
+extern struct item_operations stat_data_ops, indirect_ops, direct_ops,
+ direntry_ops;
+extern struct item_operations * item_ops [4];
+
+#define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
+#define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
+#define op_print_item(ih,item) item_ops[le_ih_k_type (ih)]->print_item (ih, item)
+#define op_check_item(ih,item) item_ops[le_ih_k_type (ih)]->check_item (ih, item)
+#define op_create_vi(vn,vi,is_affected,insert_size) item_ops[le_ih_k_type ((vi)->vi_ih)]->create_vi (vn,vi,is_affected,insert_size)
+#define op_check_left(vi,free,start_skip,end_skip) item_ops[(vi)->vi_index]->check_left (vi, free, start_skip, end_skip)
+#define op_check_right(vi,free) item_ops[(vi)->vi_index]->check_right (vi, free)
+#define op_part_size(vi,from,to) item_ops[(vi)->vi_index]->part_size (vi, from, to)
+#define op_unit_num(vi) item_ops[(vi)->vi_index]->unit_num (vi)
+#define op_print_vi(vi) item_ops[(vi)->vi_index]->print_vi (vi)
+
+
+
+
+
+#define COMP_KEYS comp_keys
+#define COMP_SHORT_KEYS comp_short_keys
+#define keys_of_same_object comp_short_keys
+
+/*#define COMP_KEYS(p_s_key1, p_s_key2) comp_keys((unsigned long *)(p_s_key1), (unsigned long *)(p_s_key2))
+#define COMP_SHORT_KEYS(p_s_key1, p_s_key2) comp_short_keys((unsigned long *)(p_s_key1), (unsigned long *)(p_s_key2))*/
+
+
+/* number of blocks pointed to by the indirect item */
+#define I_UNFM_NUM(p_s_ih) ( (p_s_ih)->ih_item_len / UNFM_P_SIZE )
+
+/* the used space within the unformatted node corresponding to pos within the item pointed to by ih */
+#define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - (ih)->u.ih_free_space : (size))
+
+/* number of bytes contained by the direct item or the unformatted nodes the indirect item points to */
+
+
+/* get the item header */
+#define B_N_PITEM_HEAD(bh,item_num) ( (struct item_head * )((bh)->b_data + BLKH_SIZE) + (item_num) )
+
+/* get key */
+#define B_N_PDELIM_KEY(bh,item_num) ( (struct key * )((bh)->b_data + BLKH_SIZE) + (item_num) )
+
+/* get the key */
+#define B_N_PKEY(bh,item_num) ( &(B_N_PITEM_HEAD(bh,item_num)->ih_key) )
+
+/* get item body */
+#define B_N_PITEM(bh,item_num) ( (bh)->b_data + B_N_PITEM_HEAD((bh),(item_num))->ih_item_location)
+
+/* get the stat data by the buffer header and the item order */
+#define B_N_STAT_DATA(bh,nr) \
+( (struct stat_data *)((bh)->b_data+B_N_PITEM_HEAD((bh),(nr))->ih_item_location ) )
+
+ /* following defines use reiserfs buffer header and item header */
+
+/* get stat-data */
+#define B_I_STAT_DATA(bh, ih) ( (struct stat_data * )((bh)->b_data + (ih)->ih_item_location) )
+
+// this is 3976 for size==4096
+#define MAX_DIRECT_ITEM_LEN(size) ((size) - BLKH_SIZE - 2*IH_SIZE - SD_SIZE - UNFM_P_SIZE)
+
+/* indirect items consist of entries which contain blocknrs, pos
+ indicates which entry, and B_I_POS_UNFM_POINTER resolves to the
+ blocknr contained by the entry pos points to */
+#define B_I_POS_UNFM_POINTER(bh,ih,pos) (*(((unp_t *)B_I_PITEM(bh,ih)) + (pos)))
+#define PUT_B_I_POS_UNFM_POINTER(bh,ih,pos, val) do {*(((unp_t *)B_I_PITEM(bh,ih)) + (pos)) = cpu_to_le32(val); } while (0)
+
+/* Reiserfs buffer cache statistics. */
+#ifdef REISERFS_CACHE_STAT
+ struct reiserfs_cache_stat
+ {
+ int nr_reiserfs_ll_r_block; /* Number of block reads. */
+ int nr_reiserfs_ll_w_block; /* Number of block writes. */
+ int nr_reiserfs_schedule; /* Number of locked buffers waits. */
+ unsigned long nr_reiserfs_bread; /* Number of calls to reiserfs_bread function */
+ unsigned long nr_returns; /* Number of breads of buffers that were hoped to contain a key but did not after bread completed
+ (usually due to object shifting while bread was executing.)
+ In the code this manifests as the number
+ of times that the repeat variable is nonzero in search_by_key.*/
+ unsigned long nr_fixed; /* number of calls of fix_nodes function */
+ unsigned long nr_failed; /* number of calls of fix_nodes in which schedule occurred while the function worked */
+ unsigned long nr_find1; /* How many times we access a child buffer using its direct pointer from an internal node.*/
+ unsigned long nr_find2; /* Number of times there is neither a direct pointer to
+ nor any entry in the child list pointing to the buffer. */
+ unsigned long nr_find3; /* When parent is locked (meaning that there are no direct pointers)
+ or parent is leaf and buffer to be found is an unformatted node. */
+ } cache_stat;
+#endif
+
+struct reiserfs_iget4_args {
+ __u32 objectid ;
+} ;
+
+/***************************************************************************/
+/* FUNCTION DECLARATIONS */
+/***************************************************************************/
+
+/*#ifdef __KERNEL__*/
+
+/* journal.c see journal.c for all the comments here */
+
+#define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit structs at 4k */
+
+
+/* first block written in a commit. */
+struct reiserfs_journal_desc {
+ __u32 j_trans_id ; /* id of commit */
+ __u32 j_len ; /* length of commit. len +1 is the commit block */
+ __u32 j_mount_id ; /* mount id of this trans*/
+ __u32 j_realblock[JOURNAL_TRANS_HALF] ; /* real locations for each block */
+ char j_magic[12] ;
+} ;
+
+/* last block written in a commit */
+struct reiserfs_journal_commit {
+ __u32 j_trans_id ; /* must match j_trans_id from the desc block */
+ __u32 j_len ; /* ditto */
+ __u32 j_realblock[JOURNAL_TRANS_HALF] ; /* real locations for each block */
+ char j_digest[16] ; /* md5 sum of all the blocks involved, including desc and commit. not used, kill it */
+} ;
+
+/* this header block gets written whenever a transaction is considered fully flushed, and is more recent than the
+** last fully flushed transaction. fully flushed means all the log blocks and all the real blocks are on disk,
+** and this transaction does not need to be replayed.
+*/
+struct reiserfs_journal_header {
+ __u32 j_last_flush_trans_id ; /* id of last fully flushed transaction */
+ __u32 j_first_unflushed_offset ; /* offset in the log of where to start replay after a crash */
+ __u32 j_mount_id ;
+} ;
+
+/* these are used to keep flush pages that contain converted direct items.
+** if the page is not flushed before the transaction that converted it
+** is committed, we risk losing data
+**
+** note, while a page is in this list, its counter is incremented.
+*/
+struct reiserfs_page_list {
+ struct reiserfs_page_list *next ;
+ struct reiserfs_page_list *prev ;
+ struct page *page ;
+ unsigned long blocknr ; /* block number holding converted data */
+
+ /* if a transaction writer has the page locked the flush_page_list
+ ** function doesn't need to (and can't) get the lock while flushing
+ ** the page. do_not_lock needs to be set by anyone who calls journal_end
+ ** with a page lock held. They have to look in the inode and see
+ ** if the inode has the page they have locked in the flush list.
+ **
+ ** this sucks.
+ */
+ int do_not_lock ;
+} ;
+
+extern task_queue reiserfs_commit_thread_tq ;
+extern wait_queue_head_t reiserfs_commit_thread_wait ;
+
+/* biggest tunable defines are right here */
+#define JOURNAL_BLOCK_COUNT 8192 /* number of blocks in the journal */
+#define JOURNAL_MAX_BATCH 900 /* max blocks to batch into one transaction, don't make this any bigger than 900 */
+#define JOURNAL_MAX_COMMIT_AGE 30
+#define JOURNAL_MAX_TRANS_AGE 30
+#define JOURNAL_PER_BALANCE_CNT 12 /* must be >= (5 + 2 * (MAX_HEIGHT-2) + 1) */
+
+/* both of these can be as low as 1, or as high as you want. The min is the
+** number of 4k bitmap nodes preallocated on mount. New nodes are allocated
+** as needed, and released when transactions are committed. On release, if
+** the current number of nodes is > max, the node is freed, otherwise,
+** it is put on a free list for faster use later.
+*/
+#define REISERFS_MIN_BITMAP_NODES 10
+#define REISERFS_MAX_BITMAP_NODES 100
+
+#define JBH_HASH_SHIFT 13 /* these are based on journal hash size of 8192 */
+#define JBH_HASH_MASK 8191
+
+/* After several hours of tedious analysis, the following hash
+ * function won. Do not mess with it... -DaveM
+ */
+#define _jhashfn(dev,block) \
+ ((((dev)<<(JBH_HASH_SHIFT - 6)) ^ ((dev)<<(JBH_HASH_SHIFT - 9))) ^ \
+ (((block)<<(JBH_HASH_SHIFT - 6)) ^ ((block) >> 13) ^ ((block) << (JBH_HASH_SHIFT - 12))))
+#define journal_hash(t,dev,block) ((t)[_jhashfn((dev),(block)) & JBH_HASH_MASK])
+
+/* finds n'th buffer with 0 being the start of this commit. Needs to go away, j_ap_blocks has changed
+** since I created this. One chunk of code in journal.c needs changing before deleting it
+*/
+#define JOURNAL_BUFFER(j,n) ((j)->j_ap_blocks[((j)->j_start + (n)) % JOURNAL_BLOCK_COUNT])
+
+void reiserfs_wait_on_write_block(struct super_block *s) ;
+void reiserfs_block_writes(struct reiserfs_transaction_handle *th) ;
+void reiserfs_allow_writes(struct super_block *s) ;
+void reiserfs_check_lock_depth(char *caller) ;
+void reiserfs_prepare_for_journal(struct super_block *, struct buffer_head *bh, int wait) ;
+void reiserfs_restore_prepared_buffer(struct super_block *, struct buffer_head *bh) ;
+int journal_init(struct super_block *) ;
+int journal_release(struct reiserfs_transaction_handle*, struct super_block *) ;
+int journal_release_error(struct reiserfs_transaction_handle*, struct super_block *) ;
+int journal_end(struct reiserfs_transaction_handle *, struct super_block *, unsigned long) ;
+int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *, unsigned long) ;
+int journal_mark_dirty_nolog(struct reiserfs_transaction_handle *, struct super_block *, struct buffer_head *bh) ;
+int journal_mark_freed(struct reiserfs_transaction_handle *, struct super_block *, unsigned long blocknr) ;
+int push_journal_writer(char *w) ;
+int pop_journal_writer(int windex) ;
+int journal_lock_dobalance(struct super_block *p_s_sb) ;
+int journal_unlock_dobalance(struct super_block *p_s_sb) ;
+int journal_transaction_should_end(struct reiserfs_transaction_handle *, int) ;
+int reiserfs_in_journal(struct super_block *p_s_sb, kdev_t dev, unsigned long bl, int size, int searchall, unsigned long *next) ;
+int journal_begin(struct reiserfs_transaction_handle *, struct super_block *p_s_sb, unsigned long) ;
+int journal_join(struct reiserfs_transaction_handle *, struct super_block *p_s_sb, unsigned long) ;
+struct super_block *reiserfs_get_super(kdev_t dev) ;
+void flush_async_commits(struct super_block *p_s_sb) ;
+
+int remove_from_transaction(struct super_block *p_s_sb, unsigned long blocknr, int already_cleaned) ;
+int remove_from_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_head *bh, int remove_freed) ;
+
+int buffer_journaled(struct buffer_head *bh) ;
+int mark_buffer_journal_new(struct buffer_head *bh) ;
+int reiserfs_sync_all_buffers(kdev_t dev, int wait) ;
+int reiserfs_sync_buffers(kdev_t dev, int wait) ;
+int reiserfs_add_page_to_flush_list(struct reiserfs_transaction_handle *,
+ struct inode *, struct buffer_head *) ;
+int reiserfs_remove_page_from_flush_list(struct reiserfs_transaction_handle *,
+ struct inode *) ;
+
+int reiserfs_allocate_list_bitmaps(struct super_block *s, struct reiserfs_list_bitmap *, int) ;
+
+ /* why is this kerplunked right here? */
+static inline int reiserfs_buffer_prepared(struct buffer_head *bh) {
+ if (bh && test_bit(BH_JPrepared, &bh->b_state))
+ return 1 ;
+ else
+ return 0 ;
+}
+
+/* buffer was journaled, waiting to get to disk */
+static inline int buffer_journal_dirty(struct buffer_head *bh) {
+ if (bh)
+ return test_bit(BH_JDirty_wait, &bh->b_state) ;
+ else
+ return 0 ;
+}
+static inline int mark_buffer_notjournal_dirty(struct buffer_head *bh) {
+ if (bh)
+ clear_bit(BH_JDirty_wait, &bh->b_state) ;
+ return 0 ;
+}
+static inline int mark_buffer_notjournal_new(struct buffer_head *bh) {
+ if (bh) {
+ clear_bit(BH_JNew, &bh->b_state) ;
+ }
+ return 0 ;
+}
+
+/* objectid.c */
+__u32 reiserfs_get_unused_objectid (struct reiserfs_transaction_handle *th);
+void reiserfs_release_objectid (struct reiserfs_transaction_handle *th, __u32 objectid_to_release);
+int reiserfs_convert_objectid_map_v1(struct super_block *) ;
+
+/* stree.c */
+int B_IS_IN_TREE(struct buffer_head *);
+extern inline void copy_key (void * to, void * from);
+extern inline void copy_short_key (void * to, void * from);
+extern inline void copy_item_head(void * p_v_to, void * p_v_from);
+
+// first key is in cpu form, second - le
+extern inline int comp_keys (struct key * le_key, struct cpu_key * cpu_key);
+extern inline int comp_short_keys (struct key * le_key, struct cpu_key * cpu_key);
+extern inline void le_key2cpu_key (struct cpu_key * to, struct key * from);
+
+// both are cpu keys
+extern inline int comp_cpu_keys (struct cpu_key *, struct cpu_key *);
+extern inline int comp_short_cpu_keys (struct cpu_key *, struct cpu_key *);
+extern inline void cpu_key2cpu_key (struct cpu_key *, struct cpu_key *);
+
+// both are in le form
+extern inline int comp_le_keys (struct key *, struct key *);
+extern inline int comp_short_le_keys (struct key *, struct key *);
+
+//
+// get key version from on disk key - kludge
+//
+extern inline int le_key_version (struct key * key)
+{
+ int type;
+
+ type = le16_to_cpu (key->u.k_offset_v2.k_type);
+ if (type != TYPE_DIRECT && type != TYPE_INDIRECT && type != TYPE_DIRENTRY)
+ return ITEM_VERSION_1;
+
+ return ITEM_VERSION_2;
+
+}
+
+
+extern inline void copy_key (void * to, void * from)
+{
+ memcpy (to, from, KEY_SIZE);
+}
+
+
+int comp_items (struct item_head * p_s_ih, struct path * p_s_path);
+struct key * get_rkey (struct path * p_s_chk_path, struct super_block * p_s_sb);
+inline int bin_search (void * p_v_key, void * p_v_base, int p_n_num, int p_n_width, int * p_n_pos);
+int search_by_key (struct super_block *, struct cpu_key *, struct path *, int);
+#define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL)
+int search_for_position_by_key (struct super_block * p_s_sb, struct cpu_key * p_s_cpu_key, struct path * p_s_search_path);
+extern inline void decrement_bcount (struct buffer_head * p_s_bh);
+void decrement_counters_in_path (struct path * p_s_search_path);
+void pathrelse (struct path * p_s_search_path);
+int reiserfs_check_path(struct path *p) ;
+void pathrelse_and_restore (struct super_block *s, struct path * p_s_search_path);
+
+int reiserfs_insert_item (struct reiserfs_transaction_handle *th,
+ struct path * path,
+ struct cpu_key * key,
+ struct item_head * ih, const char * body);
+
+int reiserfs_paste_into_item (struct reiserfs_transaction_handle *th,
+ struct path * path,
+ struct cpu_key * key,
+ const char * body, int paste_size);
+
+int reiserfs_cut_from_item (struct reiserfs_transaction_handle *th,
+ struct path * path,
+ struct cpu_key * key,
+ struct inode * inode,
+ struct page *page,
+ loff_t new_file_size);
+
+int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
+ struct path * path,
+ struct cpu_key * key,
+ struct inode * inode,
+ struct buffer_head * p_s_un_bh);
+
+
+void reiserfs_delete_object (struct reiserfs_transaction_handle *th, struct inode * p_s_inode);
+void reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
+ struct inode * p_s_inode, struct page *,
+ int update_timestamps);
+//
+//void lock_inode_to_convert (struct inode * p_s_inode);
+//void unlock_inode_after_convert (struct inode * p_s_inode);
+//void increment_i_read_sync_counter (struct inode * p_s_inode);
+//void decrement_i_read_sync_counter (struct inode * p_s_inode);
+
+
+#define block_size(inode) ((inode)->i_sb->s_blocksize)
+#define file_size(inode) ((inode)->i_size)
+#define tail_size(inode) (file_size (inode) & (block_size (inode) - 1))
+
+#define tail_has_to_be_packed(inode) (!dont_have_tails ((inode)->i_sb) &&\
+!STORE_TAIL_IN_UNFM(file_size (inode), tail_size(inode), block_size (inode)))
+
+/*
+int get_buffer_by_range (struct super_block * p_s_sb, struct key * p_s_range_begin, struct key * p_s_range_end,
+ struct buffer_head ** pp_s_buf, unsigned long * p_n_objectid);
+int get_buffers_from_range (struct super_block * p_s_sb, struct key * p_s_range_start, struct key * p_s_range_end,
+ struct buffer_head ** p_s_range_buffers,
+ int n_max_nr_buffers_to_return);
+*/
+
+#ifndef REISERFS_FSCK
+
+//inline int is_left_mergeable (struct item_head * ih, unsigned long bsize);
+
+#else
+
+int is_left_mergeable (struct super_block * s, struct path * path);
+int is_right_mergeable (struct super_block * s, struct path * path);
+int are_items_mergeable (struct item_head * left, struct item_head * right, int bsize);
+
+#endif
+void padd_item (char * item, int total_length, int length);
+
+
+/* inode.c */
+
+int reiserfs_prepare_write(struct file *, struct page *, unsigned, unsigned) ;
+void reiserfs_truncate_file(struct inode *, int update_timestamps) ;
+void make_cpu_key (struct cpu_key * cpu_key, const struct inode * inode, loff_t offset,
+ int type, int key_length);
+void make_le_item_head (struct item_head * ih, struct cpu_key * key, int version,
+ loff_t offset, int type, int length, int entry_count);
+/*void store_key (struct key * key);
+void forget_key (struct key * key);*/
+int reiserfs_get_block (struct inode * inode, long block,
+ struct buffer_head * bh_result, int create);
+struct inode * reiserfs_iget (struct super_block * s, struct cpu_key * key);
+void reiserfs_read_inode (struct inode * inode) ;
+void reiserfs_read_inode2(struct inode * inode, void *p) ;
+void reiserfs_delete_inode (struct inode * inode);
+extern int reiserfs_notify_change(struct dentry * dentry, struct iattr * attr);
+void reiserfs_write_inode (struct inode * inode, int) ;
+
+/* we don't mark inodes dirty, we just log them */
+void reiserfs_dirty_inode (struct inode * inode) ;
+
+struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th, const struct inode * dir, int mode,
+ const char * symname, int item_len,
+ struct dentry *dentry, struct inode *inode, int * err);
+int reiserfs_sync_inode (struct reiserfs_transaction_handle *th, struct inode * inode);
+void reiserfs_update_sd (struct reiserfs_transaction_handle *th, struct inode * inode);
+int reiserfs_inode_setattr(struct dentry *, struct iattr * attr);
+
+/* namei.c */
+inline void set_de_name_and_namelen (struct reiserfs_dir_entry * de);
+int search_by_entry_key (struct super_block * sb, struct cpu_key * key, struct path * path,
+ struct reiserfs_dir_entry * de);
+struct dentry * reiserfs_lookup (struct inode * dir, struct dentry *dentry);
+int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode);
+int reiserfs_mknod (struct inode * dir_inode, struct dentry *dentry, int mode, int rdev);
+int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode);
+int reiserfs_rmdir (struct inode * dir, struct dentry *dentry);
+int reiserfs_unlink (struct inode * dir, struct dentry *dentry);
+int reiserfs_symlink (struct inode * dir, struct dentry *dentry, const char * symname);
+int reiserfs_link (struct dentry * old_dentry, struct inode * dir, struct dentry *dentry);
+int reiserfs_rename (struct inode * old_dir, struct dentry *old_dentry, struct inode * new_dir, struct dentry *new_dentry);
+
+/* super.c */
+inline void reiserfs_mark_buffer_dirty (struct buffer_head * bh, int flag);
+inline void reiserfs_mark_buffer_clean (struct buffer_head * bh);
+void reiserfs_panic (struct super_block * s, const char * fmt, ...);
+void reiserfs_write_super (struct super_block * s);
+void reiserfs_put_super (struct super_block * s);
+int reiserfs_remount (struct super_block * s, int * flags, char * data);
+/*int read_super_block (struct super_block * s, int size);
+int read_bitmaps (struct super_block * s);
+int read_old_bitmaps (struct super_block * s);
+int read_old_super_block (struct super_block * s, int size);*/
+struct super_block * reiserfs_read_super (struct super_block * s, void * data, int silent);
+int reiserfs_statfs (struct super_block * s, struct statfs * buf);
+
+/* dir.c */
+extern struct inode_operations reiserfs_dir_inode_operations;
+extern struct file_operations reiserfs_dir_operations;
+
+/* tail_conversion.c */
+int direct2indirect (struct reiserfs_transaction_handle *, struct inode *, struct path *, struct buffer_head *, loff_t);
+int indirect2direct (struct reiserfs_transaction_handle *, struct inode *, struct page *, struct path *, struct cpu_key *, loff_t, char *);
+void reiserfs_unmap_buffer(struct buffer_head *) ;
+
+
+/* file.c */
+extern struct inode_operations reiserfs_file_inode_operations;
+extern struct file_operations reiserfs_file_operations;
+extern struct address_space_operations reiserfs_address_space_operations ;
+int get_new_buffer (struct reiserfs_transaction_handle *th, struct buffer_head *,
+ struct buffer_head **, struct path *);
+
+
+/* buffer2.c */
+struct buffer_head * reiserfs_getblk (kdev_t n_dev, int n_block, int n_size);
+void wait_buffer_until_released (struct buffer_head * bh);
+struct buffer_head * reiserfs_bread (kdev_t n_dev, int n_block, int n_size);
+
+
+/* fix_nodes.c */
+void * reiserfs_kmalloc (size_t size, int flags, struct super_block * s);
+void reiserfs_kfree (const void * vp, size_t size, struct super_block * s);
+int fix_nodes (int n_op_mode, struct tree_balance * p_s_tb, struct item_head * p_s_ins_ih, const void *);
+void unfix_nodes (struct tree_balance *);
+void free_buffers_in_tb (struct tree_balance * p_s_tb);
+
+
+/* prints.c */
+void reiserfs_panic (struct super_block * s, const char * fmt, ...);
+void reiserfs_warning (const char * fmt, ...);
+void reiserfs_debug (struct super_block *s, int level, const char * fmt, ...);
+void print_virtual_node (struct virtual_node * vn);
+void print_indirect_item (struct buffer_head * bh, int item_num);
+void store_print_tb (struct tree_balance * tb);
+void print_cur_tb (char * mes);
+void print_de (struct reiserfs_dir_entry * de);
+void print_bi (struct buffer_info * bi, char * mes);
+#define PRINT_LEAF_ITEMS 1 /* print all items */
+#define PRINT_DIRECTORY_ITEMS 2 /* print directory items */
+#define PRINT_DIRECT_ITEMS 4 /* print contents of direct items */
+void print_block (struct buffer_head * bh, ...);
+void print_path (struct tree_balance * tb, struct path * path);
+void print_bmap (struct super_block * s, int silent);
+void print_bmap_block (int i, char * data, int size, int silent);
+/*void print_super_block (struct super_block * s, char * mes);*/
+void print_objectid_map (struct super_block * s);
+void print_block_head (struct buffer_head * bh, char * mes);
+void check_leaf (struct buffer_head * bh);
+void check_internal (struct buffer_head * bh);
+void print_statistics (struct super_block * s);
+
+/* lbalance.c */
+int leaf_move_items (int shift_mode, struct tree_balance * tb, int mov_num, int mov_bytes, struct buffer_head * Snew);
+int leaf_shift_left (struct tree_balance * tb, int shift_num, int shift_bytes);
+int leaf_shift_right (struct tree_balance * tb, int shift_num, int shift_bytes);
+void leaf_delete_items (struct buffer_info * cur_bi, int last_first, int first, int del_num, int del_bytes);
+void leaf_insert_into_buf (struct buffer_info * bi, int before,
+ struct item_head * inserted_item_ih, const char * inserted_item_body, int zeros_number);
+void leaf_paste_in_buffer (struct buffer_info * bi, int pasted_item_num,
+ int pos_in_item, int paste_size, const char * body, int zeros_number);
+void leaf_cut_from_buffer (struct buffer_info * bi, int cut_item_num, int pos_in_item,
+ int cut_size);
+void leaf_paste_entries (struct buffer_head * bh, int item_num, int before,
+ int new_entry_count, struct reiserfs_de_head * new_dehs, const char * records, int paste_size);
+/* ibalance.c */
+int balance_internal (struct tree_balance * , int, int, struct item_head * ,
+ struct buffer_head **);
+
+/* do_balance.c */
+inline void do_balance_mark_leaf_dirty (struct tree_balance * tb,
+ struct buffer_head * bh, int flag);
+#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
+#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
+
+void do_balance (struct tree_balance * tb, struct item_head * ih,
+ const char * body, int flag);
+void reiserfs_invalidate_buffer (struct tree_balance * tb, struct buffer_head * bh);
+
+int get_left_neighbor_position (struct tree_balance * tb, int h);
+int get_right_neighbor_position (struct tree_balance * tb, int h);
+void replace_key (struct tree_balance * tb, struct buffer_head *, int, struct buffer_head *, int);
+void replace_lkey (struct tree_balance *, int, struct item_head *);
+void replace_rkey (struct tree_balance *, int, struct item_head *);
+void make_empty_node (struct buffer_info *);
+struct buffer_head * get_FEB (struct tree_balance *);
+
+/* bitmap.c */
+int is_reusable (struct super_block * s, unsigned long block, int bit_value);
+void reiserfs_free_block (struct reiserfs_transaction_handle *th, unsigned long);
+int reiserfs_new_blocknrs (struct reiserfs_transaction_handle *th,
+ unsigned long * pblocknrs, unsigned long start_from, int amount_needed);
+int reiserfs_new_unf_blocknrs (struct reiserfs_transaction_handle *th,
+ unsigned long * pblocknr, unsigned long start_from);
+#ifdef REISERFS_PREALLOCATE
+int reiserfs_new_unf_blocknrs2 (struct reiserfs_transaction_handle *th,
+ struct inode * inode,
+ unsigned long * pblocknr,
+ unsigned long start_from);
+
+void reiserfs_discard_prealloc (struct reiserfs_transaction_handle *th,
+ struct inode * inode);
+#endif
+
+/* hashes.c */
+__u32 keyed_hash (const char *msg, int len);
+__u32 yura_hash (const char *msg, int len);
+__u32 r5_hash (const char *msg, int len);
+
+/* version.c */
+char *reiserfs_get_version_string(void) ;
+
+/* the ext2 bit routines adjust for big or little endian as
+** appropriate for the arch, so in our laziness we use them rather
+** than using the bit routines they call more directly. These
+** routines must be used when changing on disk bitmaps. */
+#define reiserfs_test_and_set_le_bit ext2_set_bit
+#define reiserfs_test_and_clear_le_bit ext2_clear_bit
+#define reiserfs_test_le_bit ext2_test_bit
+#define reiserfs_find_next_zero_le_bit ext2_find_next_zero_bit
+
+
+//
+// this was totally copied from from linux's
+// find_first_zero_bit and changed a bit
+//
+
+#ifdef __i386__
+
+extern __inline__ int
+find_first_nonzero_bit(void * addr, unsigned size) {
+ int res;
+ int __d0;
+ void *__d1;
+
+
+ if (!size) {
+ return (0);
+ }
+ __asm__ __volatile__ (
+ "cld\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "repe; scasl\n\t"
+ "je 1f\n\t"
+ "movl -4(%%edi),%%eax\n\t"
+ "subl $4, %%edi\n\t"
+ "bsfl %%eax,%%eax\n\t"
+ "1:\tsubl %%edx,%%edi\n\t"
+ "shll $3,%%edi\n\t"
+ "addl %%edi,%%eax"
+ :"=a" (res),
+ "=c"(__d0), "=D"(__d1)
+ :"1" ((size + 31) >> 5), "d" (addr), "2" (addr));
+ return (res);
+}
+
+#else /* __i386__ */
+
+extern __inline__ int find_next_nonzero_bit(void * addr, unsigned size, unsigned offset)
+{
+ unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
+ unsigned int result = offset & ~31UL;
+ unsigned int tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if (offset) {
+ tmp = *p++;
+ /* set to zero first offset bits */
+ tmp &= ~(~0UL >> (32-offset));
+ if (size < 32)
+ goto found_first;
+ if (tmp != 0U)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size >= 32) {
+ if ((tmp = *p++) != 0U)
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+found_first:
+found_middle:
+ return result + ffs(tmp);
+}
+
+#define find_first_nonzero_bit(addr,size) find_next_nonzero_bit((addr), (size), 0)
+
+#endif /* 0 */
+
+/* sometimes reiserfs_truncate may require to allocate few new blocks
+ to perform indirect2direct conversion. People probably used to
+ think, that truncate should work without problems on a filesystem
+ without free disk space. They may complain that they can not
+ truncate due to lack of free disk space. This spare space allows us
+ to not worry about it. 500 is probably too much, but it should be
+ absolutely safe */
+#define SPARE_SPACE 500
+
+extern inline unsigned long reiserfs_get_journal_block(struct super_block *s) {
+ return le32_to_cpu(SB_DISK_SUPER_BLOCK(s)->s_journal_block) ;
+}
+extern inline unsigned long reiserfs_get_journal_orig_size(struct super_block *s) {
+ return le32_to_cpu(SB_DISK_SUPER_BLOCK(s)->s_orig_journal_size) ;
+}
+
+/* prototypes from ioctl.c */
+int reiserfs_ioctl (struct inode * inode, struct file * filp,
+ unsigned int cmd, unsigned long arg);
+int reiserfs_unpack (struct inode * inode, struct file * filp);
+
+/* ioctl's command */
+#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long)
+
+#endif /* _LINUX_REISER_FS_H */
+
+
diff --git a/include/linux/reiserfs_fs_i.h b/include/linux/reiserfs_fs_i.h
new file mode 100644
index 000000000..3a60b8667
--- /dev/null
+++ b/include/linux/reiserfs_fs_i.h
@@ -0,0 +1,63 @@
+#ifndef _REISER_FS_I
+#define _REISER_FS_I
+
+/* these are used to keep track of the pages that need
+** flushing before the current transaction can commit
+*/
+struct reiserfs_page_list ;
+
+struct reiserfs_inode_info {
+ __u32 i_key [4];/* key is still 4 32 bit integers */
+
+ /* this comment will be totally
+ cryptic to readers not familiar
+ with 3.5/3.6 format conversion, and
+ it does not consider that that 3.6
+ might not be the last version */
+ int i_version; // this says whether file is old or new
+
+ int i_pack_on_close ; // file might need tail packing on close
+
+ __u32 i_first_direct_byte; // offset of first byte stored in direct item.
+
+ /* pointer to the page that must be flushed before
+ ** the current transaction can commit.
+ **
+ ** this pointer is only used when the tail is converted back into
+ ** a direct item, or the file is deleted
+ */
+ struct reiserfs_page_list *i_converted_page ;
+
+ /* we save the id of the transaction when we did the direct->indirect
+ ** conversion. That allows us to flush the buffers to disk
+ ** without having to update this inode to zero out the converted
+ ** page variable
+ */
+ int i_conversion_trans_id ;
+
+ /* My guess is this contains the first
+ unused block of a sequence of
+ blocks plus the length of the
+ sequence, which I think is always
+ at least two at the time of the
+ preallocation. I really prefer
+ allocate on flush conceptually.....
+
+ You know, it really annoys me when
+ code is this badly commented that I
+ have to guess what it does.
+ Neither I nor anyone else has time
+ for guessing what your
+ datastructures mean. -Hans */
+ //For preallocation
+ int i_prealloc_block;
+ int i_prealloc_count;
+
+ /* I regret that you think the below
+ is a comment you should make.... -Hans */
+ //nopack-attribute
+ int nopack;
+};
+
+
+#endif
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
new file mode 100644
index 000000000..a6921dad0
--- /dev/null
+++ b/include/linux/reiserfs_fs_sb.h
@@ -0,0 +1,398 @@
+/* Copyright 1996-2000 Hans Reiser, see reiserfs/README for licensing
+ * and copyright details */
+
+#ifndef _LINUX_REISER_FS_SB
+#define _LINUX_REISER_FS_SB
+
+#ifdef __KERNEL__
+#include <linux/tqueue.h>
+#endif
+
+//
+// super block's field values
+//
+/*#define REISERFS_VERSION 0 undistributed bitmap */
+/*#define REISERFS_VERSION 1 distributed bitmap and resizer*/
+#define REISERFS_VERSION_2 2 /* distributed bitmap, resizer, 64-bit, etc*/
+#define UNSET_HASH 0 // read_super will guess about, what hash names
+ // in directories were sorted with
+#define TEA_HASH 1
+#define YURA_HASH 2
+#define R5_HASH 3
+#define DEFAULT_HASH R5_HASH
+
+/* this is the on disk super block */
+
+struct reiserfs_super_block
+{
+ __u32 s_block_count;
+ __u32 s_free_blocks; /* free blocks count */
+ __u32 s_root_block; /* root block number */
+ __u32 s_journal_block; /* journal block number */
+ __u32 s_journal_dev; /* journal device number */
+
+ /* Since journal size is currently a #define in a header file, if
+ ** someone creates a disk with a 16MB journal and moves it to a
+ ** system with 32MB journal default, they will overflow their journal
+ ** when they mount the disk. s_orig_journal_size, plus some checks
+ ** while mounting (inside journal_init) prevent that from happening
+ */
+
+ /* great comment Chris. Thanks. -Hans */
+
+ __u32 s_orig_journal_size;
+ __u32 s_journal_trans_max ; /* max number of blocks in a transaction. */
+ __u32 s_journal_block_count ; /* total size of the journal. can change over time */
+ __u32 s_journal_max_batch ; /* max number of blocks to batch into a trans */
+ __u32 s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
+ __u32 s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
+ __u16 s_blocksize; /* block size */
+ __u16 s_oid_maxsize; /* max size of object id array, see get_objectid() commentary */
+ __u16 s_oid_cursize; /* current size of object id array */
+ __u16 s_state; /* valid or error */
+ char s_magic[12]; /* reiserfs magic string indicates that file system is reiserfs */
+ __u32 s_hash_function_code; /* indicate, what hash function is being use to sort names in a directory*/
+ __u16 s_tree_height; /* height of disk tree */
+ __u16 s_bmap_nr; /* amount of bitmap blocks needed to address each block of file system */
+ __u16 s_version; /* I'd prefer it if this was a string,
+ something like "3.6.4", and maybe
+ 16 bytes long mostly unused. We
+ don't need to save bytes in the
+ superblock. -Hans */
+ __u16 s_reserved;
+ char s_unused[128] ; /* zero filled by mkreiserfs */
+} __attribute__ ((__packed__));
+
+#define SB_SIZE (sizeof(struct reiserfs_super_block))
+
+/* this is the super from 3.5.X, where X >= 10 */
+struct reiserfs_super_block_v1
+{
+ __u32 s_block_count; /* blocks count */
+ __u32 s_free_blocks; /* free blocks count */
+ __u32 s_root_block; /* root block number */
+ __u32 s_journal_block; /* journal block number */
+ __u32 s_journal_dev; /* journal device number */
+ __u32 s_orig_journal_size; /* size of the journal on FS creation. used to make sure they don't overflow it */
+ __u32 s_journal_trans_max ; /* max number of blocks in a transaction. */
+ __u32 s_journal_block_count ; /* total size of the journal. can change over time */
+ __u32 s_journal_max_batch ; /* max number of blocks to batch into a trans */
+ __u32 s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
+ __u32 s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
+ __u16 s_blocksize; /* block size */
+ __u16 s_oid_maxsize; /* max size of object id array, see get_objectid() commentary */
+ __u16 s_oid_cursize; /* current size of object id array */
+ __u16 s_state; /* valid or error */
+ char s_magic[16]; /* reiserfs magic string indicates that file system is reiserfs */
+ __u16 s_tree_height; /* height of disk tree */
+ __u16 s_bmap_nr; /* amount of bitmap blocks needed to address each block of file system */
+ __u32 s_reserved;
+} __attribute__ ((__packed__));
+
+#define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1))
+
+/* LOGGING -- */
+
+/* These all interelate for performance.
+**
+** If the journal block count is smaller than n transactions, you lose speed.
+** I don't know what n is yet, I'm guessing 8-16.
+**
+** typical transaction size depends on the application, how often fsync is
+** called, and how many metadata blocks you dirty in a 30 second period.
+** The more small files (<16k) you use, the larger your transactions will
+** be.
+**
+** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
+** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
+** to prevent wrapping before dirty meta blocks get to disk.
+**
+** If the batch max is smaller than the transaction max, you'll waste space at the end of the journal
+** because journal_end sets the next transaction to start at 0 if the next transaction has any chance of wrapping.
+**
+** The large the batch max age, the better the speed, and the more meta data changes you'll lose after a crash.
+**
+*/
+
+/* don't mess with these for a while */
+ /* we have a node size define somewhere in reiserfs_fs.h. -Hans */
+#define JOURNAL_BLOCK_SIZE 4096 /* BUG gotta get rid of this */
+#define JOURNAL_MAX_CNODE 1500 /* max cnodes to allocate. */
+#define JOURNAL_TRANS_MAX 1024 /* biggest possible single transaction, don't change for now (8/3/99) */
+#define JOURNAL_HASH_SIZE 8192
+#define JOURNAL_NUM_BITMAPS 5 /* number of copies of the bitmaps to have floating. Must be >= 2 */
+#define JOURNAL_LIST_COUNT 64
+
+/* these are bh_state bit flag offset numbers, for use in the buffer head */
+
+#define BH_JDirty 16 /* journal data needs to be written before buffer can be marked dirty */
+#define BH_JDirty_wait 18 /* commit is done, buffer marked dirty */
+#define BH_JNew 19 /* buffer allocated during this transaction, no need to write if freed during this trans too */
+
+/* ugly. metadata blocks must be prepared before they can be logged.
+** prepared means unlocked and cleaned. If the block is prepared, but not
+** logged for some reason, any bits cleared while preparing it must be
+** set again.
+*/
+#define BH_JPrepared 20 /* block has been prepared for the log */
+#define BH_JRestore_dirty 22 /* restore the dirty bit later */
+
+/* One of these for every block in every transaction
+** Each one is in two hash tables. First, a hash of the current transaction, and after journal_end, a
+** hash of all the in memory transactions.
+** next and prev are used by the current transaction (journal_hash).
+** hnext and hprev are used by journal_list_hash. If a block is in more than one transaction, the journal_list_hash
+** links it in multiple times. This allows flush_journal_list to remove just the cnode belonging
+** to a given transaction.
+*/
+struct reiserfs_journal_cnode {
+ struct buffer_head *bh ; /* real buffer head */
+ kdev_t dev ; /* dev of real buffer head */
+ unsigned long blocknr ; /* block number of real buffer head, == 0 when buffer on disk */
+ int state ;
+ struct reiserfs_journal_list *jlist ; /* journal list this cnode lives in */
+ struct reiserfs_journal_cnode *next ; /* next in transaction list */
+ struct reiserfs_journal_cnode *prev ; /* prev in transaction list */
+ struct reiserfs_journal_cnode *hprev ; /* prev in hash list */
+ struct reiserfs_journal_cnode *hnext ; /* next in hash list */
+};
+
+struct reiserfs_bitmap_node {
+ int id ;
+ char *data ;
+ struct list_head list ;
+} ;
+
+struct reiserfs_list_bitmap {
+ struct reiserfs_journal_list *journal_list ;
+ struct reiserfs_bitmap_node **bitmaps ;
+} ;
+
+/*
+** transaction handle which is passed around for all journal calls
+*/
+struct reiserfs_transaction_handle {
+ /* ifdef it. -Hans */
+ char *t_caller ; /* debugging use */
+ int t_blocks_logged ; /* number of blocks this writer has logged */
+ int t_blocks_allocated ; /* number of blocks this writer allocated */
+ unsigned long t_trans_id ; /* sanity check, equals the current trans id */
+ struct super_block *t_super ; /* super for this FS when journal_begin was
+ called. saves calls to reiserfs_get_super */
+
+} ;
+
+/*
+** one of these for each transaction. The most important part here is the j_realblock.
+** this list of cnodes is used to hash all the blocks in all the commits, to mark all the
+** real buffer heads dirty once all the commits hit the disk,
+** and to make sure every real block in a transaction is on disk before allowing the log area
+** to be overwritten */
+struct reiserfs_journal_list {
+ unsigned long j_start ;
+ unsigned long j_len ;
+ atomic_t j_nonzerolen ;
+ atomic_t j_commit_left ;
+ atomic_t j_flushing ;
+ atomic_t j_commit_flushing ;
+ atomic_t j_older_commits_done ; /* all commits older than this on disk*/
+ unsigned long j_trans_id ;
+ time_t j_timestamp ;
+ struct reiserfs_list_bitmap *j_list_bitmap ;
+ struct buffer_head *j_commit_bh ; /* commit buffer head */
+ struct reiserfs_journal_cnode *j_realblock ;
+ struct reiserfs_journal_cnode *j_freedlist ; /* list of buffers that were freed during this trans. free each of these on flush */
+ wait_queue_head_t j_commit_wait ; /* wait for all the commit blocks to be flushed */
+ wait_queue_head_t j_flush_wait ; /* wait for all the real blocks to be flushed */
+} ;
+
+struct reiserfs_page_list ; /* defined in reiserfs_fs.h */
+
+struct reiserfs_journal {
+ struct buffer_head ** j_ap_blocks ; /* journal blocks on disk */
+ struct reiserfs_journal_cnode *j_last ; /* newest journal block */
+ struct reiserfs_journal_cnode *j_first ; /* oldest journal block. start here for traverse */
+
+ int j_state ;
+ unsigned long j_trans_id ;
+ unsigned long j_mount_id ;
+ unsigned long j_start ; /* start of current waiting commit (index into j_ap_blocks) */
+ unsigned long j_len ; /* lenght of current waiting commit */
+ unsigned long j_len_alloc ; /* number of buffers requested by journal_begin() */
+ atomic_t j_wcount ; /* count of writers for current commit */
+ unsigned long j_bcount ; /* batch count. allows turning X transactions into 1 */
+ unsigned long j_first_unflushed_offset ; /* first unflushed transactions offset */
+ unsigned long j_last_flush_trans_id ; /* last fully flushed journal timestamp */
+ struct buffer_head *j_header_bh ;
+
+ /* j_flush_pages must be flushed before the current transaction can
+ ** commit
+ */
+ struct reiserfs_page_list *j_flush_pages ;
+ time_t j_trans_start_time ; /* time this transaction started */
+ wait_queue_head_t j_wait ; /* wait journal_end to finish I/O */
+ atomic_t j_wlock ; /* lock for j_wait */
+ wait_queue_head_t j_join_wait ; /* wait for current transaction to finish before starting new one */
+ atomic_t j_jlock ; /* lock for j_join_wait */
+ int j_journal_list_index ; /* journal list number of the current trans */
+ int j_list_bitmap_index ; /* number of next list bitmap to use */
+ int j_must_wait ; /* no more journal begins allowed. MUST sleep on j_join_wait */
+ int j_next_full_flush ; /* next journal_end will flush all journal list */
+ int j_next_async_flush ; /* next journal_end will flush all async commits */
+
+ int j_cnode_used ; /* number of cnodes on the used list */
+ int j_cnode_free ; /* number of cnodes on the free list */
+
+ struct reiserfs_journal_cnode *j_cnode_free_list ;
+ struct reiserfs_journal_cnode *j_cnode_free_orig ; /* orig pointer returned from vmalloc */
+
+ int j_free_bitmap_nodes ;
+ int j_used_bitmap_nodes ;
+ struct list_head j_bitmap_nodes ;
+ struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS] ; /* array of bitmaps to record the deleted blocks */
+ struct reiserfs_journal_list j_journal_list[JOURNAL_LIST_COUNT] ; /* array of all the journal lists */
+ struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE] ; /* hash table for real buffer heads in current trans */
+ struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE] ; /* hash table for all the real buffer heads in all
+ the transactions */
+};
+
+#define JOURNAL_DESC_MAGIC "ReIsErLB" /* ick. magic string to find desc blocks in the journal */
+
+
+typedef __u32 (*hashf_t) (const char *, int);
+
+/* reiserfs union of in-core super block data */
+struct reiserfs_sb_info
+{
+ struct buffer_head * s_sbh; /* Buffer containing the super block */
+ /* both the comment and the choice of
+ name are unclear for s_rs -Hans */
+ struct reiserfs_super_block * s_rs; /* Pointer to the super block in the buffer */
+ struct buffer_head ** s_ap_bitmap; /* array of buffers, holding block bitmap */
+ struct reiserfs_journal *s_journal ; /* pointer to journal information */
+ unsigned short s_mount_state; /* reiserfs state (valid, invalid) */
+
+ /* Comment? -Hans */
+ void (*end_io_handler)(struct buffer_head *, int);
+ hashf_t s_hash_function; /* pointer to function which is used
+ to sort names in directory. Set on
+ mount */
+ unsigned long s_mount_opt; /* reiserfs's mount options are set
+ here (currently - NOTAIL, NOLOG,
+ REPLAYONLY) */
+
+ /* Comment? -Hans */
+ wait_queue_head_t s_wait;
+ /* To be obsoleted soon by per buffer seals.. -Hans */
+ atomic_t s_generation_counter; // increased by one every time the
+ // tree gets re-balanced
+
+ /* session statistics */
+ int s_kmallocs;
+ int s_disk_reads;
+ int s_disk_writes;
+ int s_fix_nodes;
+ int s_do_balance;
+ int s_unneeded_left_neighbor;
+ int s_good_search_by_key_reada;
+ int s_bmaps;
+ int s_bmaps_without_search;
+ int s_direct2indirect;
+ int s_indirect2direct;
+};
+
+
+#define NOTAIL 0 /* -o notail: no tails will be created in a session */
+#define REPLAYONLY 3 /* replay journal and return 0. Use by fsck */
+#define REISERFS_NOLOG 4 /* -o nolog: turn journalling off */
+#define REISERFS_CONVERT 5 /* -o conv: causes conversion of old
+ format super block to the new
+ format. If not specified - old
+ partition will be dealt with in a
+ manner of 3.5.x */
+
+/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
+** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
+** is not required. If the normal autodection code can't determine which
+** hash to use (because both hases had the same value for a file)
+** use this option to force a specific hash. It won't allow you to override
+** the existing hash on the FS, so if you have a tea hash disk, and mount
+** with -o hash=rupasov, the mount will fail.
+*/
+#define FORCE_TEA_HASH 6 /* try to force tea hash on mount */
+#define FORCE_RUPASOV_HASH 7 /* try to force rupasov hash on mount */
+#define FORCE_R5_HASH 8 /* try to force rupasov hash on mount */
+#define FORCE_HASH_DETECT 9 /* try to detect hash function on mount */
+
+
+/* used for testing experimental features, makes benchmarking new
+ features with and without more convenient, should never be used by
+ users in any code shipped to users (ideally) */
+
+#define REISERFS_NO_BORDER 11
+#define REISERFS_NO_UNHASHED_RELOCATION 12
+#define REISERFS_HASHED_RELOCATION 13
+#define REISERFS_TEST4 14
+
+#define REISERFS_TEST1 11
+#define REISERFS_TEST2 12
+#define REISERFS_TEST3 13
+#define REISERFS_TEST4 14
+
+#define reiserfs_r5_hash(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << FORCE_R5_HASH))
+#define reiserfs_rupasov_hash(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << FORCE_RUPASOV_HASH))
+#define reiserfs_tea_hash(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << FORCE_TEA_HASH))
+#define reiserfs_hash_detect(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << FORCE_HASH_DETECT))
+#define reiserfs_no_border(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_NO_BORDER))
+#define reiserfs_no_unhashed_relocation(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_NO_UNHASHED_RELOCATION))
+#define reiserfs_hashed_relocation(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_HASHED_RELOCATION))
+#define reiserfs_test4(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_TEST4))
+
+#define dont_have_tails(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << NOTAIL))
+#define replay_only(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REPLAYONLY))
+#define reiserfs_dont_log(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_NOLOG))
+#define old_format_only(s) ((SB_VERSION(s) != REISERFS_VERSION_2) && !((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_CONVERT)))
+
+
+void reiserfs_file_buffer (struct buffer_head * bh, int list);
+int reiserfs_is_super(struct super_block *s) ;
+int journal_mark_dirty(struct reiserfs_transaction_handle *, struct super_block *, struct buffer_head *bh) ;
+int flush_old_commits(struct super_block *s, int) ;
+int show_reiserfs_locks(void) ;
+int reiserfs_resize(struct super_block *, unsigned long) ;
+
+#define CARRY_ON 0
+#define SCHEDULE_OCCURRED 1
+
+
+#define SB_BUFFER_WITH_SB(s) ((s)->u.reiserfs_sb.s_sbh)
+#define SB_JOURNAL(s) ((s)->u.reiserfs_sb.s_journal)
+#define SB_JOURNAL_LIST(s) (SB_JOURNAL(s)->j_journal_list)
+#define SB_JOURNAL_LIST_INDEX(s) (SB_JOURNAL(s)->j_journal_list_index)
+#define SB_JOURNAL_LEN_FREE(s) (SB_JOURNAL(s)->j_journal_len_free)
+#define SB_AP_BITMAP(s) ((s)->u.reiserfs_sb.s_ap_bitmap)
+
+
+// on-disk super block fields converted to cpu form
+#define SB_DISK_SUPER_BLOCK(s) ((s)->u.reiserfs_sb.s_rs)
+#define SB_BLOCK_COUNT(s) le32_to_cpu ((SB_DISK_SUPER_BLOCK(s)->s_block_count))
+#define SB_FREE_BLOCKS(s) le32_to_cpu ((SB_DISK_SUPER_BLOCK(s)->s_free_blocks))
+#define SB_REISERFS_MAGIC(s) (SB_DISK_SUPER_BLOCK(s)->s_magic)
+#define SB_ROOT_BLOCK(s) le32_to_cpu ((SB_DISK_SUPER_BLOCK(s)->s_root_block))
+#define SB_TREE_HEIGHT(s) le16_to_cpu ((SB_DISK_SUPER_BLOCK(s)->s_tree_height))
+#define SB_REISERFS_STATE(s) le16_to_cpu ((SB_DISK_SUPER_BLOCK(s)->s_state))
+#define SB_VERSION(s) le16_to_cpu ((SB_DISK_SUPER_BLOCK(s)->s_version))
+#define SB_BMAP_NR(s) le16_to_cpu ((SB_DISK_SUPER_BLOCK(s)->s_bmap_nr))
+
+#define PUT_SB_BLOCK_COUNT(s, val) do { SB_DISK_SUPER_BLOCK(s)->s_block_count = cpu_to_le32(val); } while (0)
+#define PUT_SB_FREE_BLOCKS(s, val) do { SB_DISK_SUPER_BLOCK(s)->s_free_blocks = cpu_to_le32(val); } while (0)
+#define PUT_SB_ROOT_BLOCK(s, val) do { SB_DISK_SUPER_BLOCK(s)->s_root_block = cpu_to_le32(val); } while (0)
+#define PUT_SB_TREE_HEIGHT(s, val) do { SB_DISK_SUPER_BLOCK(s)->s_tree_height = cpu_to_le16(val); } while (0)
+#define PUT_SB_REISERFS_STATE(s, val) do { SB_DISK_SUPER_BLOCK(s)->s_state = cpu_to_le16(val); } while (0)
+#define PUT_SB_VERSION(s, val) do { SB_DISK_SUPER_BLOCK(s)->s_version = cpu_to_le16(val); } while (0)
+#define PUT_SB_BMAP_NR(s, val) do { SB_DISK_SUPER_BLOCK(s)->s_bmap_nr = cpu_to_le16 (val); } while (0)
+
+#endif /* _LINUX_REISER_FS_SB */
+
+
+
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index fba9111da..df958b66a 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -8,7 +8,7 @@
* Copyright (C) 1999 Hewlett-Packard Co.
* Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
*/
-#ifndef _LINUX_RTC_H
+#ifndef _LINUX_RTC_H_
#define _LINUX_RTC_H_
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 25196fc53..a382b6736 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -219,13 +219,14 @@ struct mm_struct {
unsigned long rss, total_vm, locked_vm;
unsigned long def_flags;
unsigned long cpu_vm_mask;
- unsigned long swap_cnt; /* number of pages to swap on next pass */
unsigned long swap_address;
/* Architecture-specific MM context */
mm_context_t context;
};
+extern int mmlist_nr;
+
#define INIT_MM(name) \
{ \
mmap: &init_mmap, \
@@ -542,8 +543,8 @@ extern unsigned long prof_shift;
#define CURRENT_TIME (xtime.tv_sec)
-extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode));
-extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode));
+extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
+extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(sleep_on(wait_queue_head_t *q));
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
@@ -552,12 +553,17 @@ extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
extern void FASTCALL(wake_up_process(struct task_struct * tsk));
-#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
-#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,0)
-#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
-#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
-#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE,0)
-#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
+#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
+#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
+#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0)
+#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
+#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
+#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
+#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
+#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync_nr(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr)
+asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e001de887..afb1d96b3 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -107,7 +107,7 @@ extern wait_queue_head_t kreclaimd_wait;
extern int page_launder(int, int);
extern int free_shortage(void);
extern int inactive_shortage(void);
-extern void wakeup_kswapd(int);
+extern void wakeup_kswapd(void);
extern int try_to_free_pages(unsigned int gfp_mask);
/* linux/mm/page_io.c */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 80feae4c7..f24e4de6b 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -30,7 +30,7 @@ extern struct vt_struct {
wait_queue_head_t paste_wait;
} *vt_cons[MAX_NR_CONSOLES];
-void (*kd_mksound)(unsigned int hz, unsigned int ticks);
+extern void (*kd_mksound)(unsigned int hz, unsigned int ticks);
/* console.c */
diff --git a/include/net/dn.h b/include/net/dn.h
index 52c6ba44d..88a0bab3e 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -9,6 +9,8 @@ typedef unsigned short dn_address;
#define dn_ntohs(x) le16_to_cpu((unsigned short)(x))
#define dn_htons(x) cpu_to_le16((unsigned short)(x))
+#define DN_SK(sk) (&sk->protinfo.dn)
+
struct dn_scp /* Session Control Port */
{
unsigned char state;
@@ -44,9 +46,22 @@ struct dn_scp /* Session Control Port */
#define DN_SEND 2
#define DN_DONTSEND 1
#define DN_NOCHANGE 0
+ unsigned short flowrem_dat;
+ unsigned short flowrem_oth;
+ unsigned short flowloc_dat;
+ unsigned short flowloc_oth;
+ unsigned char services_rem;
+ unsigned char services_loc;
+ unsigned char info_rem;
+ unsigned char info_loc;
+
+ unsigned short segsize_rem;
+ unsigned short segsize_loc;
+
+ unsigned char nonagle;
+ unsigned char multi_ireq;
unsigned char accept_mode;
- unsigned short mss;
- unsigned long seg_size; /* Running total of current segment */
+ unsigned long seg_total; /* Running total of current segment */
struct optdata_dn conndata_in;
struct optdata_dn conndata_out;
@@ -80,7 +95,8 @@ struct dn_scp /* Session Control Port */
* multipliers.
*/
#define NSP_MIN_WINDOW 1
-#define NSP_MAX_WINDOW 512
+#define NSP_MAX_WINDOW (0x07fe)
+ unsigned long max_window;
unsigned long snd_window;
#define NSP_INITIAL_SRTT (HZ)
unsigned long nsp_srtt;
@@ -116,6 +132,7 @@ struct dn_scp /* Session Control Port */
struct timer_list delack_timer;
int delack_pending;
void (*delack_fxn)(struct sock *sk);
+
};
/*
@@ -128,7 +145,7 @@ struct dn_scp /* Session Control Port */
* segsize : Size of segment
* segnum : Number, for data, otherdata and linkservice
* xmit_count : Number of times we've transmitted this skb
- * stamp : Time stamp of first transmission, used in RTT calculations
+ * stamp : Time stamp of most recent transmission, used in RTT calculations
* iif: Input interface number
*
* As a general policy, this structure keeps all addresses in network
@@ -136,6 +153,7 @@ struct dn_scp /* Session Control Port */
* and src_port are in network order. All else is in host order.
*
*/
+#define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb)
struct dn_skb_cb {
unsigned short dst;
unsigned short src;
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index 8b628d2e5..3948c30a3 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -24,12 +24,12 @@ extern void dn_nsp_send_disc(struct sock *sk, unsigned char type,
unsigned short reason, int gfp);
extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
unsigned short reason);
-extern void dn_nsp_send_lnk(struct sock *sk, unsigned short flags);
+extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
extern void dn_nsp_output(struct sock *sk);
extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
-extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int oob);
+extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oob);
extern unsigned long dn_nsp_persist(struct sock *sk);
extern int dn_nsp_xmit_timeout(struct sock *sk);
@@ -120,6 +120,7 @@ struct nsp_conn_init_msg
#define NSP_FC_NONE 0x00 /* Flow Control None */
#define NSP_FC_SRC 0x04 /* Seg Req. Count */
#define NSP_FC_SCMC 0x08 /* Sess. Control Mess */
+#define NSP_FC_MASK 0x0c /* FC type mask */
unsigned char info __attribute__((packed));
unsigned short segsize __attribute__((packed));
};
@@ -178,13 +179,13 @@ static __inline__ int before_or_equal(unsigned short seq1, unsigned short seq2)
static __inline__ void seq_add(unsigned short *seq, unsigned short off)
{
- *seq += off;
- *seq &= 0x0fff;
+ (*seq) += off;
+ (*seq) &= 0x0fff;
}
static __inline__ int seq_next(unsigned short seq1, unsigned short seq2)
{
- return (((seq2&0x0fff) - (seq1&0x0fff)) == 1);
+ return equal(seq1 + 1, seq2);
}
/*
diff --git a/include/net/ipx.h b/include/net/ipx.h
index bc023a662..ba788670f 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -73,6 +73,14 @@ typedef struct ipx_route {
struct ipx_route *ir_next;
} ipx_route;
+#ifdef __KERNEL__
+struct ipx_cb {
+ u8 ipx_tctrl;
+ u32 ipx_dest_net;
+ u32 ipx_source_net;
+ int last_hop_index;
+};
+#endif
#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
#define IPX_MAX_EPHEMERAL_SOCKET 0x7fff
diff --git a/include/net/x25.h b/include/net/x25.h
index 257618d44..2e1e88426 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -217,6 +217,7 @@ extern void x25_start_t23timer(struct sock *);
extern void x25_stop_heartbeat(struct sock *);
extern void x25_stop_timer(struct sock *);
extern unsigned long x25_display_timer(struct sock *);
+extern void x25_check_rbuf(struct sock *);
/* sysctl_net_x25.c */
extern void x25_register_sysctl(void);
diff --git a/ipc/shm.c b/ipc/shm.c
index b1c0b96c2..71e22cd58 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -121,6 +121,7 @@ static void shm_destroy (struct shmid_kernel *shp)
{
shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
shm_rmid (shp->id);
+ shmem_lock(shp->shm_file, 0);
fput (shp->shm_file);
kfree (shp);
}
@@ -467,10 +468,10 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
if(err)
goto out_unlock;
if(cmd==SHM_LOCK) {
- shp->shm_file->f_dentry->d_inode->u.shmem_i.locked = 1;
+ shmem_lock(shp->shm_file, 1);
shp->shm_flags |= SHM_LOCKED;
} else {
- shp->shm_file->f_dentry->d_inode->u.shmem_i.locked = 0;
+ shmem_lock(shp->shm_file, 0);
shp->shm_flags &= ~SHM_LOCKED;
}
shm_unlock(shmid);
diff --git a/kernel/context.c b/kernel/context.c
index 864a70131..6bfaaca92 100644
--- a/kernel/context.c
+++ b/kernel/context.c
@@ -148,7 +148,7 @@ void flush_scheduled_tasks(void)
int start_context_thread(void)
{
- kernel_thread(context_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+ kernel_thread(context_thread, NULL, CLONE_FS | CLONE_FILES);
return 0;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 99c1f2317..e578a9644 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -134,7 +134,6 @@ static inline int dup_mmap(struct mm_struct * mm)
mm->mmap_cache = NULL;
mm->map_count = 0;
mm->cpu_vm_mask = 0;
- mm->swap_cnt = 0;
mm->swap_address = 0;
pprev = &mm->mmap;
for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
@@ -193,6 +192,7 @@ fail_nomem:
}
spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
+int mmlist_nr;
#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
@@ -246,6 +246,7 @@ void mmput(struct mm_struct *mm)
{
if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
list_del(&mm->mmlist);
+ mmlist_nr--;
spin_unlock(&mmlist_lock);
exit_mmap(mm);
mmdrop(mm);
@@ -326,6 +327,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
*/
spin_lock(&mmlist_lock);
list_add(&mm->mmlist, &oldmm->mmlist);
+ mmlist_nr++;
spin_unlock(&mmlist_lock);
if (retval)
@@ -445,7 +447,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
if (size > __FD_SETSIZE) {
newf->max_fdset = 0;
write_lock(&newf->file_lock);
- error = expand_fdset(newf, size);
+ error = expand_fdset(newf, size-1);
write_unlock(&newf->file_lock);
if (error)
goto out_release;
@@ -464,7 +466,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
read_unlock(&oldf->file_lock);
newf->max_fds = 0;
write_lock(&newf->file_lock);
- error = expand_fd_array(newf, open_files);
+ error = expand_fd_array(newf, open_files-1);
write_unlock(&newf->file_lock);
if (error)
goto out_release;
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 8afe07cca..f36261500 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -159,6 +159,7 @@ EXPORT_SYMBOL(d_alloc);
EXPORT_SYMBOL(d_lookup);
EXPORT_SYMBOL(__d_path);
EXPORT_SYMBOL(mark_buffer_dirty);
+EXPORT_SYMBOL(set_buffer_async_io); /* for reiserfs_writepage */
EXPORT_SYMBOL(__mark_buffer_dirty);
EXPORT_SYMBOL(__mark_inode_dirty);
EXPORT_SYMBOL(get_empty_filp);
@@ -394,6 +395,7 @@ EXPORT_SYMBOL(unmap_kiobuf);
EXPORT_SYMBOL(lock_kiovec);
EXPORT_SYMBOL(unlock_kiovec);
EXPORT_SYMBOL(brw_kiovec);
+EXPORT_SYMBOL(kiobuf_wait_for_io);
/* dma handling */
EXPORT_SYMBOL(request_dma);
diff --git a/kernel/sched.c b/kernel/sched.c
index bc2dcfa70..ec1f463d0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -690,19 +690,15 @@ scheduling_in_interrupt:
}
static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
- unsigned int wq_mode, const int sync)
+ int nr_exclusive, const int sync)
{
struct list_head *tmp, *head;
- struct task_struct *p, *best_exclusive;
+ struct task_struct *p;
unsigned long flags;
- int best_cpu, irq;
if (!q)
goto out;
- best_cpu = smp_processor_id();
- irq = in_interrupt();
- best_exclusive = NULL;
wq_write_lock_irqsave(&q->lock, flags);
#if WAITQUEUE_DEBUG
@@ -730,47 +726,27 @@ static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
#if WAITQUEUE_DEBUG
curr->__waker = (long)__builtin_return_address(0);
#endif
- /*
- * If waking up from an interrupt context then
- * prefer processes which are affine to this
- * CPU.
- */
- if (irq && (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE)) {
- if (!best_exclusive)
- best_exclusive = p;
- if (p->processor == best_cpu) {
- best_exclusive = p;
- break;
- }
- } else {
- if (sync)
- wake_up_process_synchronous(p);
- else
- wake_up_process(p);
- if (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE)
- break;
- }
+ if (sync)
+ wake_up_process_synchronous(p);
+ else
+ wake_up_process(p);
+ if ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
+ break;
}
}
- if (best_exclusive) {
- if (sync)
- wake_up_process_synchronous(best_exclusive);
- else
- wake_up_process(best_exclusive);
- }
wq_write_unlock_irqrestore(&q->lock, flags);
out:
return;
}
-void __wake_up(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)
+void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr)
{
- __wake_up_common(q, mode, wq_mode, 0);
+ __wake_up_common(q, mode, nr, 0);
}
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)
{
- __wake_up_common(q, mode, wq_mode, 1);
+ __wake_up_common(q, mode, nr, 1);
}
#define SLEEP_ON_VAR \
diff --git a/mm/filemap.c b/mm/filemap.c
index dedd7911e..4c89ad3e9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -143,7 +143,8 @@ void __set_page_dirty(struct page *page)
list_add(&page->list, &mapping->dirty_pages);
spin_unlock(&pagecache_lock);
- mark_inode_dirty_pages(mapping->host);
+ if (mapping->host)
+ mark_inode_dirty_pages(mapping->host);
}
/**
@@ -306,7 +307,7 @@ inside:
*/
age_page_up(page);
if (inactive_shortage() > inactive_target / 2 && free_shortage())
- wakeup_kswapd(0);
+ wakeup_kswapd();
not_found:
return page;
}
@@ -974,10 +975,6 @@ static void generic_file_readahead(int reada_ok,
* accessed sequentially.
*/
if (ahead) {
- if (reada_ok == 2) {
- run_task_queue(&tq_disk);
- }
-
filp->f_ralen += ahead;
filp->f_rawin += filp->f_ralen;
filp->f_raend = raend + ahead + 1;
@@ -1835,7 +1832,8 @@ static long madvise_fixup_start(struct vm_area_struct * vma,
n->vm_end = end;
setup_read_behavior(n, behavior);
n->vm_raend = 0;
- get_file(n->vm_file);
+ if (n->vm_file)
+ get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
lock_vma_mappings(vma);
@@ -1861,7 +1859,8 @@ static long madvise_fixup_end(struct vm_area_struct * vma,
n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
setup_read_behavior(n, behavior);
n->vm_raend = 0;
- get_file(n->vm_file);
+ if (n->vm_file)
+ get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
lock_vma_mappings(vma);
@@ -1893,7 +1892,8 @@ static long madvise_fixup_middle(struct vm_area_struct * vma,
right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
left->vm_raend = 0;
right->vm_raend = 0;
- atomic_add(2, &vma->vm_file->f_count);
+ if (vma->vm_file)
+ atomic_add(2, &vma->vm_file->f_count);
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
diff --git a/mm/memory.c b/mm/memory.c
index 6f1f318a3..7fc8de5eb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -207,7 +207,8 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
src_pte = pte_offset(src_pmd, address);
dst_pte = pte_offset(dst_pmd, address);
-
+
+ spin_lock(&src->page_table_lock);
do {
pte_t pte = *src_pte;
struct page *ptepage;
@@ -240,10 +241,11 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
cont_copy_pte_range: set_pte(dst_pte, pte);
cont_copy_pte_range_noset: address += PAGE_SIZE;
if (address >= end)
- goto out;
+ goto out_unlock;
src_pte++;
dst_pte++;
} while ((unsigned long)src_pte & PTE_TABLE_MASK);
+ spin_unlock(&src->page_table_lock);
cont_copy_pmd_range: src_pmd++;
dst_pmd++;
@@ -252,6 +254,10 @@ cont_copy_pmd_range: src_pmd++;
out:
return 0;
+out_unlock:
+ spin_unlock(&src->page_table_lock);
+ return 0;
+
nomem:
return -ENOMEM;
}
@@ -939,7 +945,6 @@ void vmtruncate(struct inode * inode, loff_t offset)
if (inode->i_size < offset)
goto do_expand;
inode->i_size = offset;
- truncate_inode_pages(mapping, offset);
spin_lock(&mapping->i_shared_lock);
if (!mapping->i_mmap && !mapping->i_mmap_shared)
goto out_unlock;
@@ -954,8 +959,7 @@ void vmtruncate(struct inode * inode, loff_t offset)
out_unlock:
spin_unlock(&mapping->i_shared_lock);
- /* this should go into ->truncate */
- inode->i_size = offset;
+ truncate_inode_pages(mapping, offset);
if (inode->i_op && inode->i_op->truncate)
inode->i_op->truncate(inode);
return;
diff --git a/mm/mmap.c b/mm/mmap.c
index e5b3a989e..e1faba3c7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -883,6 +883,8 @@ void exit_mmap(struct mm_struct * mm)
mm->rss = 0;
mm->total_vm = 0;
mm->locked_vm = 0;
+
+ flush_cache_mm(mm);
while (mpnt) {
struct vm_area_struct * next = mpnt->vm_next;
unsigned long start = mpnt->vm_start;
@@ -895,13 +897,13 @@ void exit_mmap(struct mm_struct * mm)
}
mm->map_count--;
remove_shared_vm_struct(mpnt);
- flush_cache_range(mm, start, end);
zap_page_range(mm, start, size);
if (mpnt->vm_file)
fput(mpnt->vm_file);
kmem_cache_free(vm_area_cachep, mpnt);
mpnt = next;
}
+ flush_tlb_mm(mm);
/* This is just debugging */
if (mm->map_count)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b67aa4913..09ac27284 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/slab.h>
int nr_swap_pages;
int nr_active_pages;
@@ -303,7 +304,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* an inactive page shortage, wake up kswapd.
*/
if (inactive_shortage() > inactive_target / 2 && free_shortage())
- wakeup_kswapd(0);
+ wakeup_kswapd();
/*
* If we are about to get low on free pages and cleaning
* the inactive_dirty pages would fix the situation,
@@ -379,7 +380,7 @@ try_again:
* - if we don't have __GFP_IO set, kswapd may be
* able to free some memory we can't free ourselves
*/
- wakeup_kswapd(0);
+ wakeup_kswapd();
if (gfp_mask & __GFP_WAIT) {
__set_current_state(TASK_RUNNING);
current->policy |= SCHED_YIELD;
@@ -404,7 +405,7 @@ try_again:
* - we're doing a higher-order allocation
* --> move pages to the free list until we succeed
* - we're /really/ tight on memory
- * --> wait on the kswapd waitqueue until memory is freed
+ * --> try to free pages ourselves with page_launder
*/
if (!(current->flags & PF_MEMALLOC)) {
/*
@@ -443,36 +444,20 @@ try_again:
/*
* When we arrive here, we are really tight on memory.
*
- * We wake up kswapd and sleep until kswapd wakes us
- * up again. After that we loop back to the start.
- *
- * We have to do this because something else might eat
- * the memory kswapd frees for us and we need to be
- * reliable. Note that we don't loop back for higher
- * order allocations since it is possible that kswapd
- * simply cannot free a large enough contiguous area
- * of memory *ever*.
+ * We try to free pages ourselves by:
+ * - shrinking the i/d caches.
+ * - reclaiming unused memory from the slab caches.
+ * - swapping/syncing pages to disk (done by page_launder)
+ * - moving clean pages from the inactive dirty list to
+ * the inactive clean list. (done by page_launder)
*/
- if ((gfp_mask & (__GFP_WAIT|__GFP_IO)) == (__GFP_WAIT|__GFP_IO)) {
- wakeup_kswapd(1);
+ if (gfp_mask & __GFP_WAIT) {
memory_pressure++;
- if (!order)
- goto try_again;
- /*
- * If __GFP_IO isn't set, we can't wait on kswapd because
- * kswapd just might need some IO locks /we/ are holding ...
- *
- * SUBTLE: The scheduling point above makes sure that
- * kswapd does get the chance to free memory we can't
- * free ourselves...
- */
- } else if (gfp_mask & __GFP_WAIT) {
try_to_free_pages(gfp_mask);
- memory_pressure++;
+ wakeup_bdflush(0);
if (!order)
goto try_again;
}
-
}
/*
@@ -554,14 +539,8 @@ void __free_pages(struct page *page, unsigned long order)
void free_pages(unsigned long addr, unsigned long order)
{
- struct page *fpage;
-
-#ifdef CONFIG_DISCONTIGMEM
- if (addr == 0) return;
-#endif
- fpage = virt_to_page(addr);
- if (VALID_PAGE(fpage))
- __free_pages(fpage, order);
+ if (addr != 0)
+ __free_pages(virt_to_page(addr), order);
}
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index a81a74659..00426ca27 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -117,11 +117,43 @@ shmem_truncate_part (swp_entry_t * dir, unsigned long size,
return 0;
}
+/*
+ * shmem_recalc_inode - recalculate the size of an inode
+ *
+ * @inode: inode to recalc
+ *
+ * We have to calculate the free blocks since the mm can drop pages
+ * behind our back
+ *
+ * But we know that normally
+ * inodes->i_blocks == inode->i_mapping->nrpages + info->swapped
+ *
+ * So the mm freed
+ * inodes->i_blocks - (inode->i_mapping->nrpages + info->swapped)
+ *
+ * It has to be called with the spinlock held.
+ */
+
+static void shmem_recalc_inode(struct inode * inode)
+{
+ unsigned long freed;
+
+ freed = inode->i_blocks -
+ (inode->i_mapping->nrpages + inode->u.shmem_i.swapped);
+ if (freed){
+ struct shmem_sb_info * info = &inode->i_sb->u.shmem_sb;
+ inode->i_blocks -= freed;
+ spin_lock (&info->stat_lock);
+ info->free_blocks += freed;
+ spin_unlock (&info->stat_lock);
+ }
+}
+
static void shmem_truncate (struct inode * inode)
{
int clear_base;
unsigned long start;
- unsigned long mmfreed, freed = 0;
+ unsigned long freed = 0;
swp_entry_t **base, **ptr;
struct shmem_inode_info * info = &inode->u.shmem_i;
@@ -154,26 +186,9 @@ static void shmem_truncate (struct inode * inode)
info->i_indirect = 0;
out:
-
- /*
- * We have to calculate the free blocks since we do not know
- * how many pages the mm discarded
- *
- * But we know that normally
- * inodes->i_blocks == inode->i_mapping->nrpages + info->swapped
- *
- * So the mm freed
- * inodes->i_blocks - (inode->i_mapping->nrpages + info->swapped)
- */
-
- mmfreed = inode->i_blocks - (inode->i_mapping->nrpages + info->swapped);
info->swapped -= freed;
- inode->i_blocks -= freed + mmfreed;
+ shmem_recalc_inode(inode);
spin_unlock (&info->lock);
-
- spin_lock (&inode->i_sb->u.shmem_sb.stat_lock);
- inode->i_sb->u.shmem_sb.free_blocks += freed + mmfreed;
- spin_unlock (&inode->i_sb->u.shmem_sb.stat_lock);
}
static void shmem_delete_inode(struct inode * inode)
@@ -201,13 +216,15 @@ static int shmem_writepage(struct page * page)
swp_entry_t *entry, swap;
info = &page->mapping->host->u.shmem_i;
- if (info->locked)
- return 1;
swap = __get_swap_page(2);
- if (!swap.val)
- return 1;
+ if (!swap.val) {
+ set_page_dirty(page);
+ UnlockPage(page);
+ return -ENOMEM;
+ }
spin_lock(&info->lock);
+ shmem_recalc_inode(page->mapping->host);
entry = shmem_swp_entry (info, page->index);
if (!entry) /* this had been allocted on page allocation */
BUG();
@@ -269,6 +286,9 @@ struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, i
entry = shmem_swp_entry (info, idx);
if (!entry)
goto oom;
+ spin_lock (&info->lock);
+ shmem_recalc_inode(inode);
+ spin_unlock (&info->lock);
if (entry->val) {
unsigned long flags;
@@ -310,6 +330,8 @@ struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, i
}
/* We have the page */
SetPageUptodate (page);
+ if (info->locked)
+ page_cache_get(page);
cached_page:
UnlockPage (page);
@@ -374,8 +396,7 @@ struct inode *shmem_get_inode(struct super_block *sb, int mode, int dev)
inode->i_fop = &shmem_dir_operations;
break;
case S_IFLNK:
- inode->i_op = &page_symlink_inode_operations;
- break;
+ BUG();
}
spin_lock (&shmem_ilock);
list_add (&inode->u.shmem_i.list, &shmem_inodes);
@@ -401,6 +422,32 @@ static int shmem_statfs(struct super_block *sb, struct statfs *buf)
return 0;
}
+void shmem_lock(struct file * file, int lock)
+{
+ struct inode * inode = file->f_dentry->d_inode;
+ struct shmem_inode_info * info = &inode->u.shmem_i;
+ struct page * page;
+ unsigned long idx, size;
+
+ if (info->locked == lock)
+ return;
+ down(&inode->i_sem);
+ info->locked = lock;
+ size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ for (idx = 0; idx < size; idx++) {
+ page = find_lock_page(inode->i_mapping, idx);
+ if (!page)
+ continue;
+ if (!lock) {
+ /* release the extra count and our reference */
+ page_cache_release(page);
+ page_cache_release(page);
+ }
+ UnlockPage(page);
+ }
+ up(&inode->i_sem);
+}
+
/*
* Lookup the data. This is trivial - if the dentry didn't already
* exist, we know it is negative.
@@ -528,19 +575,6 @@ static int shmem_rename(struct inode * old_dir, struct dentry *old_dentry, struc
return error;
}
-static int shmem_symlink(struct inode * dir, struct dentry *dentry, const char * symname)
-{
- int error;
-
- error = shmem_mknod(dir, dentry, S_IFLNK | S_IRWXUGO, 0);
- if (!error) {
- int l = strlen(symname)+1;
- struct inode *inode = dentry->d_inode;
- error = block_symlink(inode, symname, l);
- }
- return error;
-}
-
static int shmem_mmap(struct file * file, struct vm_area_struct * vma)
{
struct vm_operations_struct * ops;
@@ -677,7 +711,6 @@ static struct inode_operations shmem_dir_inode_operations = {
lookup: shmem_lookup,
link: shmem_link,
unlink: shmem_unlink,
- symlink: shmem_symlink,
mkdir: shmem_mkdir,
rmdir: shmem_rmdir,
mknod: shmem_mknod,
diff --git a/mm/slab.c b/mm/slab.c
index b3bd852d1..f6f8be1db 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1702,7 +1702,7 @@ static void enable_all_cpucaches (void)
* kmem_cache_reap - Reclaim memory from caches.
* @gfp_mask: the type of memory required.
*
- * Called from try_to_free_page().
+ * Called from do_try_to_free_pages() and __alloc_pages()
*/
void kmem_cache_reap (int gfp_mask)
{
diff --git a/mm/swap.c b/mm/swap.c
index 693773ccd..b1a6640bc 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -30,8 +30,7 @@
* start background swapping if we fall below freepages.high free
* pages, and we begin intensive swapping below freepages.low.
*
- * Actual initialization is done in mm/page_alloc.c or
- * arch/sparc(64)/mm/init.c.
+ * Actual initialization is done in mm/page_alloc.c
*/
freepages_t freepages = {
0, /* freepages.min */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 62ce5f1ff..93edab662 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -9,6 +9,7 @@
#include <linux/malloc.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
+#include <linux/highmem.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
diff --git a/mm/vmscan.c b/mm/vmscan.c
index afa5261c1..f41c53328 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -35,45 +35,21 @@
* using a process that no longer actually exists (it might
* have died while we slept).
*/
-static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask)
+static void try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page)
{
pte_t pte;
swp_entry_t entry;
- struct page * page;
- int onlist;
-
- pte = *page_table;
- if (!pte_present(pte))
- goto out_failed;
- page = pte_page(pte);
- if ((!VALID_PAGE(page)) || PageReserved(page))
- goto out_failed;
-
- if (!mm->swap_cnt)
- return 1;
-
- mm->swap_cnt--;
- onlist = PageActive(page);
/* Don't look at this pte if it's been accessed recently. */
if (ptep_test_and_clear_young(page_table)) {
- age_page_up(page);
- goto out_failed;
+ page->age += PAGE_AGE_ADV;
+ if (page->age > PAGE_AGE_MAX)
+ page->age = PAGE_AGE_MAX;
+ return;
}
- if (!onlist)
- /* The page is still mapped, so it can't be freeable... */
- age_page_down_ageonly(page);
-
- /*
- * If the page is in active use by us, or if the page
- * is in active use by others, don't unmap it or
- * (worse) start unneeded IO.
- */
- if (page->age > 0)
- goto out_failed;
if (TryLockPage(page))
- goto out_failed;
+ return;
/* From this point on, the odds are that we're going to
* nuke this pte, so read and clear the pte. This hook
@@ -87,9 +63,6 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
* Is the page already in the swap cache? If so, then
* we can just drop our reference to it without doing
* any IO - it's already up-to-date on disk.
- *
- * Return 0, as we didn't actually free any real
- * memory, and we should just continue our scan.
*/
if (PageSwapCache(page)) {
entry.val = page->index;
@@ -99,12 +72,12 @@ set_swap_pte:
swap_duplicate(entry);
set_pte(page_table, swp_entry_to_pte(entry));
drop_pte:
- UnlockPage(page);
mm->rss--;
- deactivate_page(page);
+ if (!page->age)
+ deactivate_page(page);
+ UnlockPage(page);
page_cache_release(page);
-out_failed:
- return 0;
+ return;
}
/*
@@ -153,34 +126,20 @@ out_failed:
out_unlock_restore:
set_pte(page_table, pte);
UnlockPage(page);
- return 0;
+ return;
}
-/*
- * A new implementation of swap_out(). We do not swap complete processes,
- * but only a small number of blocks, before we continue with the next
- * process. The number of blocks actually swapped is determined on the
- * number of page faults, that this process actually had in the last time,
- * so we won't swap heavily used processes all the time ...
- *
- * Note: the priority argument is a hint on much CPU to waste with the
- * swap block search, not a hint, of how much blocks to swap with
- * each process.
- *
- * (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de
- */
-
-static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int gfp_mask)
+static int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count)
{
pte_t * pte;
unsigned long pmd_end;
if (pmd_none(*dir))
- return 0;
+ return count;
if (pmd_bad(*dir)) {
pmd_ERROR(*dir);
pmd_clear(dir);
- return 0;
+ return count;
}
pte = pte_offset(dir, address);
@@ -190,28 +149,33 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
end = pmd_end;
do {
- int result;
- mm->swap_address = address + PAGE_SIZE;
- result = try_to_swap_out(mm, vma, address, pte, gfp_mask);
- if (result)
- return result;
+ if (pte_present(*pte)) {
+ struct page *page = pte_page(*pte);
+
+ if (VALID_PAGE(page) && !PageReserved(page)) {
+ try_to_swap_out(mm, vma, address, pte, page);
+ if (!--count)
+ break;
+ }
+ }
address += PAGE_SIZE;
pte++;
} while (address && (address < end));
- return 0;
+ mm->swap_address = address + PAGE_SIZE;
+ return count;
}
-static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int gfp_mask)
+static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count)
{
pmd_t * pmd;
unsigned long pgd_end;
if (pgd_none(*dir))
- return 0;
+ return count;
if (pgd_bad(*dir)) {
pgd_ERROR(*dir);
pgd_clear(dir);
- return 0;
+ return count;
}
pmd = pmd_offset(dir, address);
@@ -221,23 +185,23 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm
end = pgd_end;
do {
- int result = swap_out_pmd(mm, vma, pmd, address, end, gfp_mask);
- if (result)
- return result;
+ count = swap_out_pmd(mm, vma, pmd, address, end, count);
+ if (!count)
+ break;
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address && (address < end));
- return 0;
+ return count;
}
-static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int gfp_mask)
+static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count)
{
pgd_t *pgdir;
unsigned long end;
/* Don't swap out areas which are locked down */
if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
- return 0;
+ return count;
pgdir = pgd_offset(mm, address);
@@ -245,18 +209,17 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi
if (address >= end)
BUG();
do {
- int result = swap_out_pgd(mm, vma, pgdir, address, end, gfp_mask);
- if (result)
- return result;
+ count = swap_out_pgd(mm, vma, pgdir, address, end, count);
+ if (!count)
+ break;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
pgdir++;
} while (address && (address < end));
- return 0;
+ return count;
}
-static int swap_out_mm(struct mm_struct * mm, int gfp_mask)
+static int swap_out_mm(struct mm_struct * mm, int count)
{
- int result = 0;
unsigned long address;
struct vm_area_struct* vma;
@@ -276,8 +239,8 @@ static int swap_out_mm(struct mm_struct * mm, int gfp_mask)
address = vma->vm_start;
for (;;) {
- result = swap_out_vma(mm, vma, address, gfp_mask);
- if (result)
+ count = swap_out_vma(mm, vma, address, count);
+ if (!count)
goto out_unlock;
vma = vma->vm_next;
if (!vma)
@@ -287,94 +250,63 @@ static int swap_out_mm(struct mm_struct * mm, int gfp_mask)
}
/* Reset to 0 when we reach the end of address space */
mm->swap_address = 0;
- mm->swap_cnt = 0;
out_unlock:
spin_unlock(&mm->page_table_lock);
- return result;
+ return !count;
}
/*
- * Select the task with maximal swap_cnt and try to swap out a page.
* N.B. This function returns only 0 or 1. Return values != 1 from
* the lower level routines result in continued processing.
*/
#define SWAP_SHIFT 5
#define SWAP_MIN 8
+static inline int swap_amount(struct mm_struct *mm)
+{
+ int nr = mm->rss >> SWAP_SHIFT;
+ return nr < SWAP_MIN ? SWAP_MIN : nr;
+}
+
static int swap_out(unsigned int priority, int gfp_mask)
{
int counter;
- int __ret = 0;
-
- /*
- * We make one or two passes through the task list, indexed by
- * assign = {0, 1}:
- * Pass 1: select the swappable task with maximal RSS that has
- * not yet been swapped out.
- * Pass 2: re-assign rss swap_cnt values, then select as above.
- *
- * With this approach, there's no need to remember the last task
- * swapped out. If the swap-out fails, we clear swap_cnt so the
- * task won't be selected again until all others have been tried.
- *
- * Think of swap_cnt as a "shadow rss" - it tells us which process
- * we want to page out (always try largest first).
- */
- counter = (nr_threads << SWAP_SHIFT) >> priority;
- if (counter < 1)
- counter = 1;
+ int retval = 0;
+ struct mm_struct *mm = current->mm;
- for (; counter >= 0; counter--) {
+ /* Always start by trying to penalize the process that is allocating memory */
+ if (mm)
+ retval = swap_out_mm(mm, swap_amount(mm));
+
+ /* Then, look at the other mm's */
+ counter = mmlist_nr >> priority;
+ do {
struct list_head *p;
- unsigned long max_cnt = 0;
- struct mm_struct *best = NULL;
- int assign = 0;
- int found_task = 0;
- select:
+
spin_lock(&mmlist_lock);
p = init_mm.mmlist.next;
- for (; p != &init_mm.mmlist; p = p->next) {
- struct mm_struct *mm = list_entry(p, struct mm_struct, mmlist);
- if (mm->rss <= 0)
- continue;
- found_task++;
- /* Refresh swap_cnt? */
- if (assign == 1) {
- mm->swap_cnt = (mm->rss >> SWAP_SHIFT);
- if (mm->swap_cnt < SWAP_MIN)
- mm->swap_cnt = SWAP_MIN;
- }
- if (mm->swap_cnt > max_cnt) {
- max_cnt = mm->swap_cnt;
- best = mm;
- }
- }
+ if (p == &init_mm.mmlist)
+ goto empty;
+
+ /* Move it to the back of the queue.. */
+ list_del(p);
+ list_add_tail(p, &init_mm.mmlist);
+ mm = list_entry(p, struct mm_struct, mmlist);
- /* Make sure it doesn't disappear */
- if (best)
- atomic_inc(&best->mm_users);
+ /* Make sure the mm doesn't disappear when we drop the lock.. */
+ atomic_inc(&mm->mm_users);
spin_unlock(&mmlist_lock);
- /*
- * We have dropped the tasklist_lock, but we
- * know that "mm" still exists: we are running
- * with the big kernel lock, and exit_mm()
- * cannot race with us.
- */
- if (!best) {
- if (!assign && found_task > 0) {
- assign = 1;
- goto select;
- }
- break;
- } else {
- __ret = swap_out_mm(best, gfp_mask);
- mmput(best);
- break;
- }
- }
- return __ret;
+ /* Walk about 6% of the address space each time */
+ retval |= swap_out_mm(mm, swap_amount(mm));
+ mmput(mm);
+ } while (--counter >= 0);
+ return retval;
+
+empty:
+ spin_unlock(&mmlist_lock);
+ return 0;
}
@@ -540,7 +472,6 @@ dirty_page_rescan:
*/
if (PageDirty(page)) {
int (*writepage)(struct page *) = page->mapping->a_ops->writepage;
- int result;
if (!writepage)
goto page_active;
@@ -558,16 +489,12 @@ dirty_page_rescan:
page_cache_get(page);
spin_unlock(&pagemap_lru_lock);
- result = writepage(page);
+ writepage(page);
page_cache_release(page);
/* And re-start the thing.. */
spin_lock(&pagemap_lru_lock);
- if (result != 1)
- continue;
- /* writepage refused to do anything */
- set_page_dirty(page);
- goto page_active;
+ continue;
}
/*
@@ -808,6 +735,9 @@ int free_shortage(void)
int inactive_shortage(void)
{
int shortage = 0;
+ pg_data_t *pgdat = pgdat_list;
+
+ /* Is the inactive dirty list too small? */
shortage += freepages.high;
shortage += inactive_target;
@@ -818,7 +748,27 @@ int inactive_shortage(void)
if (shortage > 0)
return shortage;
- return 0;
+ /* If not, do we have enough per-zone pages on the inactive list? */
+
+ shortage = 0;
+
+ do {
+ int i;
+ for(i = 0; i < MAX_NR_ZONES; i++) {
+ int zone_shortage;
+ zone_t *zone = pgdat->node_zones+ i;
+
+ zone_shortage = zone->pages_high;
+ zone_shortage -= zone->inactive_dirty_pages;
+ zone_shortage -= zone->inactive_clean_pages;
+ zone_shortage -= zone->free_pages;
+ if (zone_shortage > 0)
+ shortage += zone_shortage;
+ }
+ pgdat = pgdat->node_next;
+ } while (pgdat);
+
+ return shortage;
}
/*
@@ -833,72 +783,35 @@ int inactive_shortage(void)
* really care about latency. In that case we don't try
* to free too many pages.
*/
+#define DEF_PRIORITY (6)
static int refill_inactive(unsigned int gfp_mask, int user)
{
- int priority, count, start_count, made_progress;
+ int count, start_count, maxtry;
count = inactive_shortage() + free_shortage();
if (user)
count = (1 << page_cluster);
start_count = count;
- /* Always trim SLAB caches when memory gets low. */
- kmem_cache_reap(gfp_mask);
-
- priority = 6;
+ maxtry = 6;
do {
- made_progress = 0;
-
if (current->need_resched) {
__set_current_state(TASK_RUNNING);
schedule();
}
- while (refill_inactive_scan(priority, 1)) {
- made_progress = 1;
- if (--count <= 0)
- goto done;
- }
-
- /*
- * don't be too light against the d/i cache since
- * refill_inactive() almost never fail when there's
- * really plenty of memory free.
- */
- shrink_dcache_memory(priority, gfp_mask);
- shrink_icache_memory(priority, gfp_mask);
-
- /*
- * Then, try to page stuff out..
- */
- while (swap_out(priority, gfp_mask)) {
- made_progress = 1;
+ while (refill_inactive_scan(DEF_PRIORITY, 1)) {
if (--count <= 0)
goto done;
}
- /*
- * If we either have enough free memory, or if
- * page_launder() will be able to make enough
- * free memory, then stop.
- */
- if (!inactive_shortage() || !free_shortage())
- goto done;
+ /* If refill_inactive_scan failed, try to page stuff out.. */
+ swap_out(DEF_PRIORITY, gfp_mask);
- /*
- * Only switch to a lower "priority" if we
- * didn't make any useful progress in the
- * last loop.
- */
- if (!made_progress)
- priority--;
- } while (priority >= 0);
-
- /* Always end on a refill_inactive.., may sleep... */
- while (refill_inactive_scan(0, 1)) {
- if (--count <= 0)
- goto done;
- }
+ if (--maxtry <= 0)
+ return 0;
+
+ } while (inactive_shortage());
done:
return (count < start_count);
@@ -922,20 +835,29 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user)
/*
* If needed, we move pages from the active list
- * to the inactive list. We also "eat" pages from
- * the inode and dentry cache whenever we do this.
+ * to the inactive list.
*/
- if (free_shortage() || inactive_shortage()) {
- shrink_dcache_memory(6, gfp_mask);
- shrink_icache_memory(6, gfp_mask);
+ if (inactive_shortage())
ret += refill_inactive(gfp_mask, user);
+
+ /*
+ * Delete pages from the inode and dentry caches and
+ * reclaim unused slab cache if memory is low.
+ */
+ if (free_shortage()) {
+ shrink_dcache_memory(DEF_PRIORITY, gfp_mask);
+ shrink_icache_memory(DEF_PRIORITY, gfp_mask);
} else {
/*
- * Reclaim unused slab cache memory.
+ * Illogical, but true. At least for now.
+ *
+ * If we're _not_ under shortage any more, we
+ * reap the caches. Why? Because a noticeable
+ * part of the caches are the buffer-heads,
+ * which we'll want to keep if under shortage.
*/
kmem_cache_reap(gfp_mask);
- ret = 1;
- }
+ }
return ret;
}
@@ -988,13 +910,8 @@ int kswapd(void *unused)
static int recalc = 0;
/* If needed, try to free some memory. */
- if (inactive_shortage() || free_shortage()) {
- int wait = 0;
- /* Do we need to do some synchronous flushing? */
- if (waitqueue_active(&kswapd_done))
- wait = 1;
- do_try_to_free_pages(GFP_KSWAPD, wait);
- }
+ if (inactive_shortage() || free_shortage())
+ do_try_to_free_pages(GFP_KSWAPD, 0);
/*
* Do some (very minimal) background scanning. This
@@ -1002,7 +919,7 @@ int kswapd(void *unused)
* every minute. This clears old referenced bits
* and moves unused pages to the inactive list.
*/
- refill_inactive_scan(6, 0);
+ refill_inactive_scan(DEF_PRIORITY, 0);
/* Once a second, recalculate some VM stats. */
if (time_after(jiffies, recalc + HZ)) {
@@ -1010,11 +927,6 @@ int kswapd(void *unused)
recalculate_vm_stats();
}
- /*
- * Wake up everybody waiting for free memory
- * and unplug the disk queue.
- */
- wake_up_all(&kswapd_done);
run_task_queue(&tq_disk);
/*
@@ -1045,33 +957,10 @@ int kswapd(void *unused)
}
}
-void wakeup_kswapd(int block)
+void wakeup_kswapd(void)
{
- DECLARE_WAITQUEUE(wait, current);
-
- if (current == kswapd_task)
- return;
-
- if (!block) {
- if (waitqueue_active(&kswapd_wait))
- wake_up(&kswapd_wait);
- return;
- }
-
- /*
- * Kswapd could wake us up before we get a chance
- * to sleep, so we have to be very careful here to
- * prevent SMP races...
- */
- __set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&kswapd_done, &wait);
-
- if (waitqueue_active(&kswapd_wait))
- wake_up(&kswapd_wait);
- schedule();
-
- remove_wait_queue(&kswapd_done, &wait);
- __set_current_state(TASK_RUNNING);
+ if (current != kswapd_task)
+ wake_up_process(kswapd_task);
}
/*
@@ -1096,7 +985,7 @@ DECLARE_WAIT_QUEUE_HEAD(kreclaimd_wait);
/*
* Kreclaimd will move pages from the inactive_clean list to the
* free list, in order to keep atomic allocations possible under
- * all circumstances. Even when kswapd is blocked on IO.
+ * all circumstances.
*/
int kreclaimd(void *unused)
{
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index c67e3fdd0..ce5fc6d0d 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -25,6 +25,7 @@
* Jaume Grau - flush caches on AARP_PROBE
* Rob Newberry - Added proxy AARP and AARP proc fs,
* moved probing from DDP module.
+ * Arnaldo C. Melo - don't mangle rx packets
*
*/
@@ -58,16 +59,12 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
-
int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME;
int sysctl_aarp_tick_time = AARP_TICK_TIME;
int sysctl_aarp_retransmit_limit = AARP_RETRANSMIT_LIMIT;
int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME;
-/*
- * Lists of aarp entries
- */
-
+/* Lists of aarp entries */
struct aarp_entry {
/* These first two are only used for unresolved entries */
unsigned long last_sent; /* Last time we xmitted the aarp request */
@@ -81,22 +78,16 @@ struct aarp_entry {
struct aarp_entry *next; /* Next entry in chain */
};
-/*
- * Hashed list of resolved, unresolved and proxy entries
- */
-
+/* Hashed list of resolved, unresolved and proxy entries */
static struct aarp_entry *resolved[AARP_HASH_SIZE];
static struct aarp_entry *unresolved[AARP_HASH_SIZE];
static struct aarp_entry *proxies[AARP_HASH_SIZE];
-static int unresolved_count = 0;
+static int unresolved_count;
/* One lock protects it all. */
static spinlock_t aarp_lock = SPIN_LOCK_UNLOCKED;
-/*
- * Used to walk the list and purge/kick entries.
- */
-
+/* Used to walk the list and purge/kick entries. */
static struct timer_list aarp_timer;
/*
@@ -108,7 +99,7 @@ static void __aarp_expire(struct aarp_entry *a)
{
struct sk_buff *skb;
- while ((skb=skb_dequeue(&a->packet_queue)) != NULL)
+ while ((skb = skb_dequeue(&a->packet_queue)) != NULL)
kfree_skb(skb);
kfree(a);
@@ -125,33 +116,29 @@ static void __aarp_send_query(struct aarp_entry *a)
static char aarp_eth_multicast[ETH_ALEN] =
{ 0x09, 0x00, 0x07, 0xFF, 0xFF, 0xFF };
struct net_device *dev = a->dev;
- int len = dev->hard_header_len + sizeof(struct elapaarp) + aarp_dl->header_length;
+ int len = dev->hard_header_len + sizeof(struct elapaarp) +
+ aarp_dl->header_length;
struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
- struct elapaarp *eah;
struct at_addr *sat = atalk_find_dev_addr(dev);
+ struct elapaarp *eah;
- if (skb == NULL)
+ if (!skb)
return;
- if (sat == NULL) {
+ if (!sat) {
kfree_skb(skb);
return;
}
- /*
- * Set up the buffer.
- */
-
+ /* Set up the buffer */
skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length);
- eah = (struct elapaarp *)skb_put(skb, sizeof(struct elapaarp));
+ eah = (struct elapaarp *)skb_put(skb,
+ sizeof(struct elapaarp));
skb->protocol = htons(ETH_P_ATALK);
skb->nh.raw = skb->h.raw = (void *) eah;
skb->dev = dev;
- /*
- * Set up the ARP.
- */
-
+ /* Set up the ARP */
eah->hw_type = htons(AARP_HW_TYPE_ETHERNET);
eah->pa_type = htons(ETH_P_ATALK);
eah->hw_len = ETH_ALEN;
@@ -170,52 +157,37 @@ static void __aarp_send_query(struct aarp_entry *a)
eah->pa_dst_net = a->target_addr.s_net;
eah->pa_dst_node= a->target_addr.s_node;
- /*
- * Add ELAP headers and set target to the AARP multicast.
- */
-
+ /* Add ELAP headers and set target to the AARP multicast */
aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast);
- /*
- * Send it.
- */
-
+ /* Send it */
dev_queue_xmit(skb);
-
- /*
- * Update the sending count
- */
-
+ /* Update the sending count */
a->xmit_count++;
}
-/* This runs under aarp_lock and in softint context, so only
- * atomic memory allocations can be used.
- */
+/* This runs under aarp_lock and in softint context, so only atomic memory
+ * allocations can be used. */
static void aarp_send_reply(struct net_device *dev, struct at_addr *us,
struct at_addr *them, unsigned char *sha)
{
- int len = dev->hard_header_len + sizeof(struct elapaarp) + aarp_dl->header_length;
+ int len = dev->hard_header_len + sizeof(struct elapaarp) +
+ aarp_dl->header_length;
struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
struct elapaarp *eah;
- if (skb == NULL)
+ if (!skb)
return;
- /*
- * Set up the buffer.
- */
-
+ /* Set up the buffer */
skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length);
- eah = (struct elapaarp *)skb_put(skb, sizeof(struct elapaarp));
+ eah = (struct elapaarp *)skb_put(skb,
+ sizeof(struct elapaarp));
skb->protocol = htons(ETH_P_ATALK);
skb->nh.raw = skb->h.raw = (void *) eah;
skb->dev = dev;
- /*
- * Set up the ARP.
- */
-
+ /* Set up the ARP */
eah->hw_type = htons(AARP_HW_TYPE_ETHERNET);
eah->pa_type = htons(ETH_P_ATALK);
eah->hw_len = ETH_ALEN;
@@ -228,7 +200,7 @@ static void aarp_send_reply(struct net_device *dev, struct at_addr *us,
eah->pa_src_net = us->s_net;
eah->pa_src_node= us->s_node;
- if (sha == NULL)
+ if (!sha)
memset(eah->hw_dst, '\0', ETH_ALEN);
else
memcpy(eah->hw_dst, sha, ETH_ALEN);
@@ -237,47 +209,38 @@ static void aarp_send_reply(struct net_device *dev, struct at_addr *us,
eah->pa_dst_net = them->s_net;
eah->pa_dst_node= them->s_node;
- /*
- * Add ELAP headers and set target to the AARP multicast.
- */
-
+ /* Add ELAP headers and set target to the AARP multicast */
aarp_dl->datalink_header(aarp_dl, skb, sha);
-
- /*
- * Send it.
- */
+ /* Send it */
dev_queue_xmit(skb);
}
/*
- * Send probe frames. Called from aarp_probe_network and aarp_proxy_probe_network.
+ * Send probe frames. Called from aarp_probe_network and
+ * aarp_proxy_probe_network.
*/
void aarp_send_probe(struct net_device *dev, struct at_addr *us)
{
- int len = dev->hard_header_len + sizeof(struct elapaarp) + aarp_dl->header_length;
+ int len = dev->hard_header_len + sizeof(struct elapaarp) +
+ aarp_dl->header_length;
struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
- struct elapaarp *eah;
static char aarp_eth_multicast[ETH_ALEN] =
{ 0x09, 0x00, 0x07, 0xFF, 0xFF, 0xFF };
+ struct elapaarp *eah;
- if (skb == NULL)
+ if (!skb)
return;
- /*
- * Set up the buffer.
- */
-
+ /* Set up the buffer */
skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length);
- eah = (struct elapaarp *)skb_put(skb, sizeof(struct elapaarp));
+ eah = (struct elapaarp *)skb_put(skb,
+ sizeof(struct elapaarp));
skb->protocol = htons(ETH_P_ATALK);
skb->nh.raw = skb->h.raw = (void *) eah;
skb->dev = dev;
- /*
- * Set up the ARP.
- */
-
+ /* Set up the ARP */
eah->hw_type = htons(AARP_HW_TYPE_ETHERNET);
eah->pa_type = htons(ETH_P_ATALK);
eah->hw_len = ETH_ALEN;
@@ -296,15 +259,9 @@ void aarp_send_probe(struct net_device *dev, struct at_addr *us)
eah->pa_dst_net = us->s_net;
eah->pa_dst_node= us->s_node;
- /*
- * Add ELAP headers and set target to the AARP multicast.
- */
-
+ /* Add ELAP headers and set target to the AARP multicast */
aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast);
-
- /*
- * Send it.
- */
+ /* Send it */
dev_queue_xmit(skb);
}
@@ -318,16 +275,14 @@ static void __aarp_expire_timer(struct aarp_entry **n)
{
struct aarp_entry *t;
- while ((*n) != NULL) {
+ while (*n)
/* Expired ? */
- if(time_after(jiffies, (*n)->expires_at)) {
+ if (time_after(jiffies, (*n)->expires_at)) {
t = *n;
*n = (*n)->next;
__aarp_expire(t);
- } else {
+ } else
n = &((*n)->next);
- }
- }
}
/*
@@ -340,10 +295,8 @@ static void __aarp_kick(struct aarp_entry **n)
{
struct aarp_entry *t;
- while ((*n) != NULL) {
- /* Expired - if this will be the 11th transmit, we delete
- * instead.
- */
+ while (*n)
+ /* Expired: if this will be the 11th tx, we delete instead. */
if ((*n)->xmit_count >= sysctl_aarp_retransmit_limit) {
t = *n;
*n = (*n)->next;
@@ -352,7 +305,6 @@ static void __aarp_kick(struct aarp_entry **n)
__aarp_send_query(*n);
n = &((*n)->next);
}
- }
}
/*
@@ -366,21 +318,16 @@ static void __aarp_expire_device(struct aarp_entry **n, struct net_device *dev)
{
struct aarp_entry *t;
- while ((*n) != NULL) {
+ while (*n)
if ((*n)->dev == dev) {
t = *n;
*n = (*n)->next;
__aarp_expire(t);
- } else {
+ } else
n = &((*n)->next);
- }
- }
}
-/*
- * Handle the timer event
- */
-
+/* Handle the timer event */
static void aarp_expire_timeout(unsigned long unused)
{
int ct;
@@ -395,17 +342,14 @@ static void aarp_expire_timeout(unsigned long unused)
}
spin_unlock_bh(&aarp_lock);
-
mod_timer(&aarp_timer, jiffies +
- (unresolved_count ? sysctl_aarp_tick_time:
+ (unresolved_count ? sysctl_aarp_tick_time :
sysctl_aarp_expiry_time));
}
-/*
- * Network device notifier chain handler.
- */
-
-static int aarp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
+/* Network device notifier chain handler. */
+static int aarp_device_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
{
int ct;
@@ -432,11 +376,8 @@ static struct aarp_entry *aarp_alloc(void)
{
struct aarp_entry *a = kmalloc(sizeof(struct aarp_entry), GFP_ATOMIC);
- if (a == NULL)
- return NULL;
-
- skb_queue_head_init(&a->packet_queue);
-
+ if (a)
+ skb_queue_head_init(&a->packet_queue);
return a;
}
@@ -464,10 +405,8 @@ static struct aarp_entry *__aarp_find_entry(struct aarp_entry *list,
/* Called from the DDP code, and thus must be exported. */
void aarp_proxy_remove(struct net_device *dev, struct at_addr *sa)
{
+ int hash = sa->s_node % (AARP_HASH_SIZE - 1);
struct aarp_entry *a;
- int hash;
-
- hash = sa->s_node % (AARP_HASH_SIZE-1);
spin_lock_bh(&aarp_lock);
@@ -479,23 +418,15 @@ void aarp_proxy_remove(struct net_device *dev, struct at_addr *sa)
}
/* This must run under aarp_lock. */
-static struct at_addr *__aarp_proxy_find(struct net_device *dev, struct at_addr *sa)
+static struct at_addr *__aarp_proxy_find(struct net_device *dev,
+ struct at_addr *sa)
{
- struct at_addr *retval;
- struct aarp_entry *a;
- int hash;
+ int hash = sa->s_node % (AARP_HASH_SIZE - 1);
+ struct aarp_entry *a = __aarp_find_entry(proxies[hash], dev, sa);
- hash = sa->s_node % (AARP_HASH_SIZE-1);
-
- retval = NULL;
- a = __aarp_find_entry(proxies[hash], dev, sa);
- if (a != NULL)
- retval = sa;
-
- return retval;
+ return a ? sa : NULL;
}
-
/*
* Probe a Phase 1 device or a device that requires its Net:Node to
* be set via an ioctl.
@@ -506,13 +437,13 @@ void aarp_send_probe_phase1(struct atalk_iface *iface)
struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr;
sa->sat_addr.s_node = iface->address.s_node;
- sa->sat_addr.s_net = ntohs(iface->address.s_net);
+ sa->sat_addr.s_net = ntohs(iface->address.s_net);
/* We pass the Net:Node to the drivers/cards by a Device ioctl. */
if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
(void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
- if ((iface->address.s_net != htons(sa->sat_addr.s_net)) ||
- (iface->address.s_node != sa->sat_addr.s_node))
+ if (iface->address.s_net != htons(sa->sat_addr.s_net) ||
+ iface->address.s_node != sa->sat_addr.s_node)
iface->status |= ATIF_PROBE_FAIL;
iface->address.s_net = htons(sa->sat_addr.s_net);
@@ -523,17 +454,16 @@ void aarp_send_probe_phase1(struct atalk_iface *iface)
void aarp_probe_network(struct atalk_iface *atif)
{
- if(atif->dev->type == ARPHRD_LOCALTLK || atif->dev->type == ARPHRD_PPP) {
+ if (atif->dev->type == ARPHRD_LOCALTLK ||
+ atif->dev->type == ARPHRD_PPP)
aarp_send_probe_phase1(atif);
- } else {
+ else {
unsigned int count;
for (count = 0; count < AARP_RETRANSMIT_LIMIT; count++) {
aarp_send_probe(atif->dev, &atif->address);
- /*
- * Defer 1/10th
- */
+ /* Defer 1/10th */
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(HZ/10);
@@ -545,9 +475,9 @@ void aarp_probe_network(struct atalk_iface *atif)
int aarp_proxy_probe_network(struct atalk_iface *atif, struct at_addr *sa)
{
- struct aarp_entry *entry;
+ int hash, retval = 1;
+ struct aarp_entry *entry;
unsigned int count;
- int hash, retval;
/*
* we don't currently support LocalTalk or PPP for proxy AARP;
@@ -564,7 +494,7 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct at_addr *sa)
* we need this one to hang around even if it's in use
*/
entry = aarp_alloc();
- if (entry == NULL)
+ if (!entry)
return -ENOMEM;
entry->expires_at = -1;
@@ -582,56 +512,38 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct at_addr *sa)
for (count = 0; count < AARP_RETRANSMIT_LIMIT; count++) {
aarp_send_probe(atif->dev, sa);
- /*
- * Defer 1/10th
- */
+ /* Defer 1/10th */
current->state = TASK_INTERRUPTIBLE;
-
spin_unlock_bh(&aarp_lock);
-
schedule_timeout(HZ/10);
-
spin_lock_bh(&aarp_lock);
if (entry->status & ATIF_PROBE_FAIL)
break;
}
- retval = 1;
-
if (entry->status & ATIF_PROBE_FAIL) {
- /* free the entry */
- entry->expires_at = jiffies - 1;
-
- /* return network full */
- retval = -EADDRINUSE;
- } else {
- /* clear the probing flag */
+ entry->expires_at = jiffies - 1; /* free the entry */
+ retval = -EADDRINUSE; /* return network full */
+ } else /* clear the probing flag */
entry->status &= ~ATIF_PROBE;
- }
spin_unlock_bh(&aarp_lock);
-
return retval;
}
-
-/*
- * Send a DDP frame
- */
-int aarp_send_ddp(struct net_device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr)
+/* Send a DDP frame */
+int aarp_send_ddp(struct net_device *dev,struct sk_buff *skb,
+ struct at_addr *sa, void *hwaddr)
{
- static char ddp_eth_multicast[ETH_ALEN] = { 0x09, 0x00, 0x07, 0xFF, 0xFF, 0xFF };
+ static char ddp_eth_multicast[ETH_ALEN] =
+ { 0x09, 0x00, 0x07, 0xFF, 0xFF, 0xFF };
int hash;
struct aarp_entry *a;
skb->nh.raw = skb->data;
- /*
- * Check for LocalTalk first
- */
-
-
+ /* Check for LocalTalk first */
if (dev->type == ARPHRD_LOCALTLK) {
struct at_addr *at = atalk_find_dev_addr(dev);
struct ddpehdr *ddp = (struct ddpehdr *)skb->data;
@@ -644,8 +556,8 @@ int aarp_send_ddp(struct net_device *dev,struct sk_buff *skb, struct at_addr *sa
* (zero matches anything)
*/
- if( ( ddp->deh_snet==0 || at->s_net==ddp->deh_snet) &&
- ( ddp->deh_dnet==0 || at->s_net==ddp->deh_dnet) ) {
+ if ((!ddp->deh_snet || at->s_net == ddp->deh_snet) &&
+ (!ddp->deh_dnet || at->s_net == ddp->deh_dnet)) {
skb_pull(skb, sizeof(struct ddpehdr) - 4);
/*
@@ -665,104 +577,58 @@ int aarp_send_ddp(struct net_device *dev,struct sk_buff *skb, struct at_addr *sa
skb->data[0] = sa->s_node;
skb->data[1] = at->s_node;
skb->data[2] = ft;
-
- if (skb->sk)
- skb->priority = skb->sk->priority;
skb->dev = dev;
- dev_queue_xmit(skb);
- return 1;
+ goto sendit;
}
- /*
- * On a PPP link we neither compress nor aarp.
- */
+ /* On a PPP link we neither compress nor aarp. */
if (dev->type == ARPHRD_PPP) {
skb->protocol = htons(ETH_P_PPPTALK);
- if (skb->sk)
- skb->priority = skb->sk->priority;
skb->dev = dev;
- dev_queue_xmit(skb);
- return 1;
+ goto sendit;
}
- /*
- * Non ELAP we cannot do.
- */
-
+ /* Non ELAP we cannot do. */
if (dev->type != ARPHRD_ETHER)
return -1;
skb->dev = dev;
skb->protocol = htons(ETH_P_ATALK);
-
hash = sa->s_node % (AARP_HASH_SIZE - 1);
- /*
- * Do we have a resolved entry ?
- */
-
+ /* Do we have a resolved entry? */
if (sa->s_node == ATADDR_BCAST) {
ddp_dl->datalink_header(ddp_dl, skb, ddp_eth_multicast);
-
- if (skb->sk)
- skb->priority = skb->sk->priority;
- dev_queue_xmit(skb);
- return 1;
+ goto sendit;
}
spin_lock_bh(&aarp_lock);
-
a = __aarp_find_entry(resolved[hash], dev, sa);
- if (a != NULL) {
- /*
- * Return 1 and fill in the address
- */
-
+ if (a) { /* Return 1 and fill in the address */
a->expires_at = jiffies + (sysctl_aarp_expiry_time * 10);
ddp_dl->datalink_header(ddp_dl, skb, a->hwaddr);
- if(skb->sk)
- skb->priority = skb->sk->priority;
- dev_queue_xmit(skb);
-
spin_unlock_bh(&aarp_lock);
- return 1;
+ goto sendit;
}
- /*
- * Do we have an unresolved entry: This is the less common path
- */
-
+ /* Do we have an unresolved entry: This is the less common path */
a = __aarp_find_entry(unresolved[hash], dev, sa);
- if (a != NULL) {
- /*
- * Queue onto the unresolved queue
- */
-
+ if (a) { /* Queue onto the unresolved queue */
skb_queue_tail(&a->packet_queue, skb);
-
spin_unlock_bh(&aarp_lock);
return 0;
}
- /*
- * Allocate a new entry
- */
-
+ /* Allocate a new entry */
a = aarp_alloc();
- if (a == NULL) {
- /*
- * Whoops slipped... good job it's an unreliable
- * protocol 8)
- */
+ if (!a) {
+ /* Whoops slipped... good job it's an unreliable protocol 8) */
spin_unlock_bh(&aarp_lock);
return -1;
}
- /*
- * Set up the queue
- */
-
+ /* Set up the queue */
skb_queue_tail(&a->packet_queue, skb);
a->expires_at = jiffies + sysctl_aarp_resolve_time;
a->dev = dev;
@@ -772,10 +638,7 @@ int aarp_send_ddp(struct net_device *dev,struct sk_buff *skb, struct at_addr *sa
unresolved[hash] = a;
unresolved_count++;
- /*
- * Send an initial request for the address
- */
-
+ /* Send an initial request for the address */
__aarp_send_query(a);
/*
@@ -786,18 +649,16 @@ int aarp_send_ddp(struct net_device *dev,struct sk_buff *skb, struct at_addr *sa
if (unresolved_count == 1)
mod_timer(&aarp_timer, jiffies + sysctl_aarp_tick_time);
-
- /*
- * Now finally, it is safe to drop the lock.
- */
-
+ /* Now finally, it is safe to drop the lock. */
spin_unlock_bh(&aarp_lock);
- /*
- * Tell the ddp layer we have taken over for this frame.
- */
-
+ /* Tell the ddp layer we have taken over for this frame. */
return 0;
+
+sendit: if (skb->sk)
+ skb->priority = skb->sk->priority;
+ dev_queue_xmit(skb);
+ return 1;
}
/*
@@ -806,170 +667,112 @@ int aarp_send_ddp(struct net_device *dev,struct sk_buff *skb, struct at_addr *sa
*
* Must run under aarp_lock.
*/
-static void __aarp_resolved(struct aarp_entry **list, struct aarp_entry *a, int hash)
+static void __aarp_resolved(struct aarp_entry **list, struct aarp_entry *a,
+ int hash)
{
struct sk_buff *skb;
- while (*list != NULL) {
+ while (*list)
if (*list == a) {
unresolved_count--;
-
*list = a->next;
- /*
- * Move into the resolved list
- */
-
+ /* Move into the resolved list */
a->next = resolved[hash];
resolved[hash] = a;
- /*
- * Kick frames off
- */
-
+ /* Kick frames off */
while ((skb = skb_dequeue(&a->packet_queue)) != NULL) {
- a->expires_at = jiffies + (sysctl_aarp_expiry_time*10);
+ a->expires_at = jiffies +
+ sysctl_aarp_expiry_time * 10;
ddp_dl->datalink_header(ddp_dl, skb, a->hwaddr);
if (skb->sk)
skb->priority = skb->sk->priority;
dev_queue_xmit(skb);
}
- } else {
+ } else
list = &((*list)->next);
- }
- }
}
/*
* This is called by the SNAP driver whenever we see an AARP SNAP
* frame. We currently only support Ethernet.
*/
-static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
+static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
{
- struct elapaarp *ea=(struct elapaarp *)skb->h.raw;
+ struct elapaarp *ea = (struct elapaarp *)skb->h.raw;
+ int hash, ret = 0;
+ __u16 function;
struct aarp_entry *a;
struct at_addr sa, *ma, da;
- int hash;
struct atalk_iface *ifa;
- /*
- * We only do Ethernet SNAP AARP.
- */
-
- if (dev->type != ARPHRD_ETHER) {
- kfree_skb(skb);
- return 0;
- }
-
- /*
- * Frame size ok ?
- */
-
- if (!skb_pull(skb, sizeof(*ea))) {
- kfree_skb(skb);
- return 0;
- }
-
- ea->function = ntohs(ea->function);
+ /* We only do Ethernet SNAP AARP. */
+ if (dev->type != ARPHRD_ETHER)
+ goto out0;
- /*
- * Sanity check fields.
- */
+ /* Frame size ok? */
+ if (!skb_pull(skb, sizeof(*ea)))
+ goto out0;
- if (ea->function < AARP_REQUEST ||
- ea->function > AARP_PROBE ||
- ea->hw_len != ETH_ALEN ||
- ea->pa_len != AARP_PA_ALEN ||
- ea->pa_src_zero != 0 ||
- ea->pa_dst_zero != 0) {
- kfree_skb(skb);
- return 0;
- }
+ function = ntohs(ea->function);
- /*
- * Looks good.
- */
+ /* Sanity check fields. */
+ if (function < AARP_REQUEST || function > AARP_PROBE ||
+ ea->hw_len != ETH_ALEN || ea->pa_len != AARP_PA_ALEN ||
+ ea->pa_src_zero || ea->pa_dst_zero)
+ goto out0;
+ /* Looks good. */
hash = ea->pa_src_node % (AARP_HASH_SIZE - 1);
- /*
- * Build an address.
- */
-
+ /* Build an address. */
sa.s_node = ea->pa_src_node;
sa.s_net = ea->pa_src_net;
- /*
- * Process the packet.
- * Check for replies of me.
- */
-
+ /* Process the packet. Check for replies of me. */
ifa = atalk_find_dev(dev);
- if (ifa == NULL) {
- kfree_skb(skb);
- return 1;
- }
-
- if (ifa->status & ATIF_PROBE) {
- if (ifa->address.s_node == ea->pa_dst_node &&
- ifa->address.s_net == ea->pa_dst_net) {
- /*
- * Fail the probe (in use)
- */
-
- ifa->status |= ATIF_PROBE_FAIL;
- kfree_skb(skb);
- return 1;
- }
+ if (!ifa)
+ goto out1;
+
+ if (ifa->status & ATIF_PROBE &&
+ ifa->address.s_node == ea->pa_dst_node &&
+ ifa->address.s_net == ea->pa_dst_net) {
+ ifa->status |= ATIF_PROBE_FAIL; /* Fail the probe (in use) */
+ goto out1;
}
- /*
- * Check for replies of proxy AARP entries
- */
-
+ /* Check for replies of proxy AARP entries */
da.s_node = ea->pa_dst_node;
da.s_net = ea->pa_dst_net;
spin_lock_bh(&aarp_lock);
-
a = __aarp_find_entry(proxies[hash], dev, &da);
- if (a != NULL) {
- if (a->status & ATIF_PROBE) {
- a->status |= ATIF_PROBE_FAIL;
-
- spin_unlock_bh(&aarp_lock);
-
- /*
- * we do not respond to probe or request packets for
- * this address while we are probing this address
- */
- kfree_skb(skb);
-
- return 1;
- }
+ if (a && a->status & ATIF_PROBE) {
+ a->status |= ATIF_PROBE_FAIL;
+ /*
+ * we do not respond to probe or request packets for
+ * this address while we are probing this address
+ */
+ goto unlock;
}
- switch (ea->function) {
+ switch (function) {
case AARP_REPLY:
- if (unresolved_count == 0) /* Speed up */
+ if (!unresolved_count) /* Speed up */
break;
- /*
- * Find the entry.
- */
-
- if ((a = __aarp_find_entry(unresolved[hash],dev,&sa)) == NULL ||
- (dev != a->dev))
+ /* Find the entry. */
+ a = __aarp_find_entry(unresolved[hash],dev,&sa);
+ if (!a || dev != a->dev)
break;
- /*
- * We can fill one in - this is good.
- */
-
+ /* We can fill one in - this is good. */
memcpy(a->hwaddr,ea->hw_src,ETH_ALEN);
__aarp_resolved(&unresolved[hash],a,hash);
- if (unresolved_count == 0)
+ if (!unresolved_count)
mod_timer(&aarp_timer,
jiffies + sysctl_aarp_expiry_time);
break;
@@ -977,49 +780,45 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_t
case AARP_REQUEST:
case AARP_PROBE:
/*
- * If it is my address set ma to my address and reply. We can treat probe and
- * request the same. Probe simply means we shouldn't cache the querying host,
- * as in a probe they are proposing an address not using one.
+ * If it is my address set ma to my address and
+ * reply. We can treat probe and request the
+ * same. Probe simply means we shouldn't cache
+ * the querying host, as in a probe they are
+ * proposing an address not using one.
*
- * Support for proxy-AARP added. We check if the address is one
- * of our proxies before we toss the packet out.
+ * Support for proxy-AARP added. We check if the
+ * address is one of our proxies before we toss
+ * the packet out.
*/
sa.s_node = ea->pa_dst_node;
sa.s_net = ea->pa_dst_net;
- /*
- * See if we have a matching proxy.
- */
+ /* See if we have a matching proxy. */
ma = __aarp_proxy_find(dev, &sa);
- if (!ma) {
+ if (!ma)
ma = &ifa->address;
- } else {
- /*
- * We need to make a copy of the entry.
- */
+ else { /* We need to make a copy of the entry. */
da.s_node = sa.s_node;
da.s_net = da.s_net;
ma = &da;
}
- if (ea->function == AARP_PROBE) {
+ if (function == AARP_PROBE) {
/* A probe implies someone trying to get an
* address. So as a precaution flush any
- * entries we have for this address.
- */
+ * entries we have for this address. */
struct aarp_entry *a = __aarp_find_entry(
- resolved[sa.s_node%(AARP_HASH_SIZE-1)],
- skb->dev,
- &sa);
+ resolved[sa.s_node%(AARP_HASH_SIZE-1)],
+ skb->dev, &sa);
/* Make it expire next tick - that avoids us
- * getting into a probe/flush/learn/probe/flush/learn
- * cycle during probing of a slow to respond host addr.
- */
- if (a != NULL)
- {
+ * getting into a probe/flush/learn/probe/
+ * flush/learn cycle during probing of a slow
+ * to respond host addr. */
+ if (a) {
a->expires_at = jiffies - 1;
- mod_timer(&aarp_timer, jiffies + sysctl_aarp_tick_time);
+ mod_timer(&aarp_timer, jiffies +
+ sysctl_aarp_tick_time);
}
}
@@ -1032,18 +831,16 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_t
sa.s_node = ea->pa_src_node;
sa.s_net = ea->pa_src_net;
- /*
- * aarp_my_address has found the address to use for us.
- */
-
+ /* aarp_my_address has found the address to use for us.
+ */
aarp_send_reply(dev, ma, &sa, ea->hw_src);
break;
- };
-
- spin_unlock_bh(&aarp_lock);
+ }
- kfree_skb(skb);
- return 1;
+unlock: spin_unlock_bh(&aarp_lock);
+out1: ret = 1;
+out0: kfree_skb(skb);
+ return ret;
}
static struct notifier_block aarp_notifier = {
@@ -1054,10 +851,10 @@ static struct notifier_block aarp_notifier = {
static char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
-
void __init aarp_proto_init(void)
{
- if ((aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv)) == NULL)
+ aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
+ if (!aarp_dl)
printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
init_timer(&aarp_timer);
aarp_timer.function = aarp_expire_timeout;
@@ -1067,9 +864,7 @@ void __init aarp_proto_init(void)
register_netdevice_notifier(&aarp_notifier);
}
-/*
- * Remove the AARP entries associated with a device.
- */
+/* Remove the AARP entries associated with a device. */
void aarp_device_down(struct net_device *dev)
{
int ct;
@@ -1085,14 +880,12 @@ void aarp_device_down(struct net_device *dev)
spin_unlock_bh(&aarp_lock);
}
-/*
- * Called from proc fs
- */
+/* Called from proc fs */
static int aarp_get_info(char *buffer, char **start, off_t offset, int length)
{
/* we should dump all our AARP entries */
- struct aarp_entry *entry;
- int len, ct;
+ struct aarp_entry *entry;
+ int len, ct;
len = sprintf(buffer,
"%-10.10s ""%-10.10s""%-18.18s""%12.12s""%12.12s"" xmit_count status\n",
@@ -1171,25 +964,19 @@ static int aarp_get_info(char *buffer, char **start, off_t offset, int length)
}
spin_unlock_bh(&aarp_lock);
-
return len;
}
#ifdef MODULE
-/*
- * General module cleanup. Called from cleanup_module() in ddp.c.
- */
+/* General module cleanup. Called from cleanup_module() in ddp.c. */
void aarp_cleanup_module(void)
{
del_timer(&aarp_timer);
unregister_netdevice_notifier(&aarp_notifier);
unregister_snap_client(aarp_snap_id);
}
-
#endif /* MODULE */
-
#ifdef CONFIG_PROC_FS
-
void aarp_register_proc_fs(void)
{
proc_net_create("aarp", 0, aarp_get_info);
@@ -1199,7 +986,5 @@ void aarp_unregister_proc_fs(void)
{
proc_net_remove("aarp");
}
-
#endif
-
#endif /* CONFIG_ATALK || CONFIG_ATALK_MODULE */
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index e7784dabd..0e514faae 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -29,15 +29,17 @@
* driver file. (ipddp.c & ipddp.h)
* Jay Schulist : Made work as module with
* AppleTalk drivers, cleaned it.
- * Rob Newberry : Added proxy AARP and AARP proc fs,
- * moved probing to AARP module.
+ * Rob Newberry : Added proxy AARP and AARP
+ * procfs, moved probing to AARP
+ * module.
* Adrian Sun/
* Michael Zuelsdorff : fix for net.0 packets. don't
* allow illegal ether/tokentalk
* port assignment. we lose a
* valid localtalk port as a
* result.
- *
+ * Arnaldo C. de Melo : Cleanup, in preparation for
+ * shared skb support 8)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -84,19 +86,19 @@
#include <linux/stat.h>
#include <linux/init.h>
-
#ifdef CONFIG_PROC_FS
extern void aarp_register_proc_fs(void);
extern void aarp_unregister_proc_fs(void);
#endif
+extern void aarp_cleanup_module(void);
+
extern void aarp_probe_network(struct atalk_iface *atif);
-extern int aarp_proxy_probe_network(struct atalk_iface *atif, struct at_addr *sa);
+extern int aarp_proxy_probe_network(struct atalk_iface *atif,
+ struct at_addr *sa);
extern void aarp_proxy_remove(struct net_device *dev, struct at_addr *sa);
-
#undef APPLETALK_DEBUG
-
#ifdef APPLETALK_DEBUG
#define DPRINT(x) print(x)
#else
@@ -119,13 +121,14 @@ static struct proto_ops atalk_dgram_ops;
* *
\**************************************************************************/
-static struct sock *atalk_sockets = NULL;
+static struct sock *atalk_sockets;
static spinlock_t atalk_sockets_lock = SPIN_LOCK_UNLOCKED;
extern inline void atalk_insert_socket(struct sock *sk)
{
spin_lock_bh(&atalk_sockets_lock);
- if ((sk->next = atalk_sockets) != NULL)
+ sk->next = atalk_sockets;
+ if (sk->next)
atalk_sockets->pprev = &sk->next;
atalk_sockets = sk;
sk->pprev = &atalk_sockets;
@@ -135,7 +138,7 @@ extern inline void atalk_insert_socket(struct sock *sk)
extern inline void atalk_remove_socket(struct sock *sk)
{
spin_lock_bh(&atalk_sockets_lock);
- if (sk->pprev != NULL) {
+ if (sk->pprev) {
if (sk->next)
sk->next->pprev = sk->pprev;
*sk->pprev = sk->next;
@@ -144,12 +147,13 @@ extern inline void atalk_remove_socket(struct sock *sk)
spin_unlock_bh(&atalk_sockets_lock);
}
-static struct sock *atalk_search_socket(struct sockaddr_at *to, struct atalk_iface *atif)
+static struct sock *atalk_search_socket(struct sockaddr_at *to,
+ struct atalk_iface *atif)
{
struct sock *s;
spin_lock_bh(&atalk_sockets_lock);
- for (s = atalk_sockets; s != NULL; s = s->next) {
+ for (s = atalk_sockets; s; s = s->next) {
if (to->sat_port != s->protinfo.af_at.src_port)
continue;
@@ -174,7 +178,6 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to, struct atalk_ifa
}
}
spin_unlock_bh(&atalk_sockets_lock);
-
return s;
}
@@ -185,29 +188,28 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to, struct atalk_ifa
*
* This entire operation must execute atomically.
*/
-static struct sock *atalk_find_or_insert_socket(struct sock *sk, struct sockaddr_at *sat)
+static struct sock *atalk_find_or_insert_socket(struct sock *sk,
+ struct sockaddr_at *sat)
{
struct sock *s;
spin_lock_bh(&atalk_sockets_lock);
-
- for (s = atalk_sockets; s != NULL; s = s->next) {
+ for (s = atalk_sockets; s; s = s->next)
if (s->protinfo.af_at.src_net == sat->sat_addr.s_net &&
s->protinfo.af_at.src_node == sat->sat_addr.s_node &&
s->protinfo.af_at.src_port == sat->sat_port)
break;
- }
if (!s) {
/* Wheee, it's free, assign and insert. */
- if ((sk->next = atalk_sockets) != NULL)
+ sk->next = atalk_sockets;
+ if (sk->next)
atalk_sockets->pprev = &sk->next;
atalk_sockets = sk;
sk->pprev = &atalk_sockets;
}
spin_unlock_bh(&atalk_sockets_lock);
-
return s;
}
@@ -215,9 +217,8 @@ static void atalk_destroy_timer(unsigned long data)
{
struct sock *sk = (struct sock *) data;
- if (atomic_read(&sk->wmem_alloc) == 0 &&
- atomic_read(&sk->rmem_alloc) == 0 &&
- sk->dead) {
+ if (!atomic_read(&sk->wmem_alloc) &&
+ !atomic_read(&sk->rmem_alloc) && sk->dead) {
sock_put(sk);
MOD_DEC_USE_COUNT;
} else {
@@ -228,16 +229,11 @@ static void atalk_destroy_timer(unsigned long data)
extern inline void atalk_destroy_socket(struct sock *sk)
{
- struct sk_buff *skb;
-
atalk_remove_socket(sk);
+ skb_queue_purge(&sk->receive_queue);
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL)
- kfree_skb(skb);
-
- if (atomic_read(&sk->wmem_alloc) == 0 &&
- atomic_read(&sk->rmem_alloc) == 0 &&
- sk->dead) {
+ if (!atomic_read(&sk->wmem_alloc) &&
+ !atomic_read(&sk->rmem_alloc) && sk->dead) {
sock_put(sk);
MOD_DEC_USE_COUNT;
} else {
@@ -249,37 +245,31 @@ extern inline void atalk_destroy_socket(struct sock *sk)
}
}
-/*
- * Called from proc fs
- */
+/* Called from proc fs */
static int atalk_get_info(char *buffer, char **start, off_t offset, int length)
{
- struct sock *s;
- int len = 0;
off_t pos = 0;
off_t begin = 0;
-
- /*
- * Output the AppleTalk data for the /proc filesystem.
- */
-
- len += sprintf(buffer,"Type local_addr remote_addr tx_queue rx_queue st uid\n");
+ int len = sprintf(buffer, "Type local_addr remote_addr tx_queue "
+ "rx_queue st uid\n");
+ struct sock *s;
+ /* Output the AppleTalk data for the /proc filesystem */
spin_lock_bh(&atalk_sockets_lock);
- for (s = atalk_sockets; s != NULL; s = s->next) {
- len += sprintf(buffer+len,"%02X ", s->type);
- len += sprintf(buffer+len,"%04X:%02X:%02X ",
+ for (s = atalk_sockets; s; s = s->next) {
+ len += sprintf(buffer + len,"%02X ", s->type);
+ len += sprintf(buffer + len,"%04X:%02X:%02X ",
ntohs(s->protinfo.af_at.src_net),
s->protinfo.af_at.src_node,
s->protinfo.af_at.src_port);
- len += sprintf(buffer+len,"%04X:%02X:%02X ",
+ len += sprintf(buffer + len,"%04X:%02X:%02X ",
ntohs(s->protinfo.af_at.dest_net),
s->protinfo.af_at.dest_node,
s->protinfo.af_at.dest_port);
- len += sprintf(buffer+len,"%08X:%08X ",
+ len += sprintf(buffer + len,"%08X:%08X ",
atomic_read(&s->wmem_alloc),
atomic_read(&s->rmem_alloc));
- len += sprintf(buffer+len,"%02X %d\n", s->state,
+ len += sprintf(buffer + len,"%02X %d\n", s->state,
SOCK_INODE(s->socket)->i_uid);
/* Are we still dumping unwanted data then discard the record */
@@ -295,8 +285,8 @@ static int atalk_get_info(char *buffer, char **start, off_t offset, int length)
spin_unlock_bh(&atalk_sockets_lock);
/* The data in question runs from begin to begin+len */
- *start = buffer + (offset - begin); /* Start of wanted data */
- len -= (offset - begin); /* Remove unwanted header data from length */
+ *start = buffer + offset - begin; /* Start of wanted data */
+ len -= offset - begin; /* Remove unwanted header data from length */
if (len > length)
len = length; /* Remove unwanted tail data from length */
@@ -310,18 +300,16 @@ static int atalk_get_info(char *buffer, char **start, off_t offset, int length)
\**************************************************************************/
/* Anti-deadlock ordering is router_lock --> iface_lock -DaveM */
-static struct atalk_route *atalk_router_list = NULL;
+static struct atalk_route *atalk_router_list;
static rwlock_t atalk_router_lock = RW_LOCK_UNLOCKED;
-static struct atalk_iface *atalk_iface_list = NULL;
+static struct atalk_iface *atalk_iface_list;
static spinlock_t atalk_iface_lock = SPIN_LOCK_UNLOCKED;
-static struct atalk_route atrtr_default; /* For probing devices or in a routerless network */
-
-/*
- * AppleTalk interface control
- */
+/* For probing devices or in a routerless network */
+static struct atalk_route atrtr_default;
+/* AppleTalk interface control */
/*
* Drop a device. Doesn't drop any of its routes - that is the caller's
* problem. Called when we down the interface or delete the address.
@@ -344,12 +332,12 @@ static void atif_drop_device(struct net_device *dev)
spin_unlock_bh(&atalk_iface_lock);
}
-static struct atalk_iface *atif_add_device(struct net_device *dev, struct at_addr *sa)
+static struct atalk_iface *atif_add_device(struct net_device *dev,
+ struct at_addr *sa)
{
- struct atalk_iface *iface = (struct atalk_iface *)
- kmalloc(sizeof(*iface), GFP_KERNEL);
+ struct atalk_iface *iface = kmalloc(sizeof(*iface), GFP_KERNEL);
- if (iface == NULL)
+ if (!iface)
return NULL;
iface->dev = dev;
@@ -363,51 +351,37 @@ static struct atalk_iface *atif_add_device(struct net_device *dev, struct at_add
spin_unlock_bh(&atalk_iface_lock);
MOD_INC_USE_COUNT;
-
return iface;
}
-
-/*
- * Perform phase 2 AARP probing on our tentative address.
- */
+/* Perform phase 2 AARP probing on our tentative address */
static int atif_probe_device(struct atalk_iface *atif)
{
- int netrange = ntohs(atif->nets.nr_lastnet) - ntohs(atif->nets.nr_firstnet) + 1;
+ int netrange = ntohs(atif->nets.nr_lastnet) -
+ ntohs(atif->nets.nr_firstnet) + 1;
int probe_net = ntohs(atif->address.s_net);
int probe_node = atif->address.s_node;
int netct, nodect;
- /*
- * Offset the network we start probing with.
- */
-
+ /* Offset the network we start probing with */
if (probe_net == ATADDR_ANYNET) {
- if (!netrange)
- probe_net = ntohs(atif->nets.nr_firstnet);
- else
- probe_net = ntohs(atif->nets.nr_firstnet) + (jiffies % netrange);
+ probe_net = ntohs(atif->nets.nr_firstnet);
+ if (netrange)
+ probe_net += jiffies % netrange;
}
-
if (probe_node == ATADDR_ANYNODE)
probe_node = jiffies & 0xFF;
- /*
- * Scan the networks.
- */
+ /* Scan the networks */
atif->status |= ATIF_PROBE;
for (netct = 0; netct <= netrange; netct++) {
- /*
- * Sweep the available nodes from a given start.
- */
-
+ /* Sweep the available nodes from a given start */
atif->address.s_net = htons(probe_net);
for (nodect = 0; nodect < 256; nodect++) {
atif->address.s_node = ((nodect+probe_node) & 0xFF);
- if (atif->address.s_node > 0 && atif->address.s_node < 254) {
- /*
- * Probe a proposed address.
- */
+ if (atif->address.s_node > 0 &&
+ atif->address.s_node < 254) {
+ /* Probe a proposed address */
aarp_probe_network(atif);
if (!(atif->status & ATIF_PROBE_FAIL)) {
@@ -427,58 +401,45 @@ static int atif_probe_device(struct atalk_iface *atif)
}
-/*
- * Perform AARP probing for a proxy address
- */
-static int atif_proxy_probe_device(struct atalk_iface *atif, struct at_addr* proxy_addr)
+/* Perform AARP probing for a proxy address */
+static int atif_proxy_probe_device(struct atalk_iface *atif,
+ struct at_addr* proxy_addr)
{
- int netrange = ntohs(atif->nets.nr_lastnet) - ntohs(atif->nets.nr_firstnet) + 1;
- int probe_net = ntohs(atif->address.s_net); /* we probe the interface's network */
+ int netrange = ntohs(atif->nets.nr_lastnet) -
+ ntohs(atif->nets.nr_firstnet) + 1;
+ /* we probe the interface's network */
+ int probe_net = ntohs(atif->address.s_net);
int probe_node = ATADDR_ANYNODE; /* we'll take anything */
int netct, nodect;
- /*
- * Offset the network we start probing with.
- */
-
+ /* Offset the network we start probing with */
if (probe_net == ATADDR_ANYNET) {
- if (!netrange)
- probe_net = ntohs(atif->nets.nr_firstnet);
- else
- probe_net = ntohs(atif->nets.nr_firstnet) + (jiffies % netrange);
+ probe_net = ntohs(atif->nets.nr_firstnet);
+ if (netrange)
+ probe_net += jiffies % netrange;
}
if (probe_node == ATADDR_ANYNODE)
probe_node = jiffies & 0xFF;
- /*
- * Scan the networks.
- */
-
+ /* Scan the networks */
for (netct = 0; netct <= netrange; netct++) {
- /*
- * Sweep the available nodes from a given start.
- */
-
+ /* Sweep the available nodes from a given start */
proxy_addr->s_net = htons(probe_net);
for (nodect = 0; nodect < 256; nodect++) {
proxy_addr->s_node = ((nodect + probe_node) & 0xFF);
- if ((proxy_addr->s_node > 0) && (proxy_addr->s_node < 254)) {
- /*
- * Tell AARP to probe a proposed address.
- */
- int probe_result = aarp_proxy_probe_network(atif,
- proxy_addr);
-
- if (probe_result == 0)
- return 0;
-
- if (probe_result != -EADDRINUSE)
- return probe_result;
+ if (proxy_addr->s_node > 0 &&
+ proxy_addr->s_node < 254) {
+ /* Tell AARP to probe a proposed address */
+ int ret = aarp_proxy_probe_network(atif,
+ proxy_addr);
+
+ if (ret != -EADDRINUSE)
+ return ret;
}
}
probe_net++;
- if(probe_net > ntohs(atif->nets.nr_lastnet))
+ if (probe_net > ntohs(atif->nets.nr_lastnet))
probe_net = ntohs(atif->nets.nr_firstnet);
}
@@ -489,25 +450,21 @@ static int atif_proxy_probe_device(struct atalk_iface *atif, struct at_addr* pro
struct at_addr *atalk_find_dev_addr(struct net_device *dev)
{
struct atalk_iface *iface = dev->atalk_ptr;
-
- if(iface)
- return &iface->address;
-
- return NULL;
+ return iface ? &iface->address : NULL;
}
static struct at_addr *atalk_find_primary(void)
{
+ struct atalk_iface *fiface = NULL;
struct at_addr *retval;
struct atalk_iface *iface;
- struct atalk_iface *fiface = NULL;
/*
* Return a point-to-point interface only if
* there is no non-ptp interface available.
*/
spin_lock_bh(&atalk_iface_lock);
- for (iface = atalk_iface_list; iface != NULL; iface = iface->next) {
+ for (iface = atalk_iface_list; iface; iface = iface->next) {
if (!fiface && !(iface->dev->flags & IFF_LOOPBACK))
fiface = iface;
if (!(iface->dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))) {
@@ -516,16 +473,13 @@ static struct at_addr *atalk_find_primary(void)
}
}
- if (fiface) {
+ if (fiface)
retval = &fiface->address;
- } else if (atalk_iface_list != NULL) {
+ else if (atalk_iface_list)
retval = &atalk_iface_list->address;
- } else {
+ else
retval = NULL;
- }
-out:
- spin_unlock_bh(&atalk_iface_lock);
-
+out: spin_unlock_bh(&atalk_iface_lock);
return retval;
}
@@ -535,9 +489,9 @@ out:
*/
static struct atalk_iface *atalk_find_anynet(int node, struct net_device *dev)
{
- struct atalk_iface *iface=dev->atalk_ptr;
+ struct atalk_iface *iface = dev->atalk_ptr;
- if (iface==NULL || (iface->status & ATIF_PROBE))
+ if (!iface || iface->status & ATIF_PROBE)
return NULL;
if (node == ATADDR_BCAST ||
@@ -548,15 +502,13 @@ static struct atalk_iface *atalk_find_anynet(int node, struct net_device *dev)
return NULL;
}
-/*
- * Find a match for a specific network:node pair
- */
+/* Find a match for a specific network:node pair */
static struct atalk_iface *atalk_find_interface(int net, int node)
{
struct atalk_iface *iface;
spin_lock_bh(&atalk_iface_lock);
- for (iface = atalk_iface_list; iface != NULL; iface = iface->next) {
+ for (iface = atalk_iface_list; iface; iface = iface->next) {
if ((node == ATADDR_BCAST ||
node == ATADDR_ANYNODE ||
iface->address.s_node == node) &&
@@ -565,13 +517,12 @@ static struct atalk_iface *atalk_find_interface(int net, int node)
break;
/* XXXX.0 -- net.0 returns the iface associated with net */
- if ((node == ATADDR_ANYNODE) && (net != ATADDR_ANYNET) &&
- (ntohs(iface->nets.nr_firstnet) <= ntohs(net)) &&
- (ntohs(net) <= ntohs(iface->nets.nr_lastnet)))
+ if (node == ATADDR_ANYNODE && net != ATADDR_ANYNET &&
+ ntohs(iface->nets.nr_firstnet) <= ntohs(net) &&
+ ntohs(net) <= ntohs(iface->nets.nr_lastnet))
break;
}
spin_unlock_bh(&atalk_iface_lock);
-
return iface;
}
@@ -588,11 +539,11 @@ static struct atalk_route *atrtr_find(struct at_addr *target)
* host route, because some host routes might overlap
* network routes
*/
- struct atalk_route *r;
struct atalk_route *net_route = NULL;
+ struct atalk_route *r;
read_lock_bh(&atalk_router_lock);
- for (r = atalk_router_list; r != NULL; r = r->next) {
+ for (r = atalk_router_list; r; r = r->next) {
if (!(r->flags & RTF_UP))
continue;
@@ -604,13 +555,12 @@ static struct atalk_route *atrtr_find(struct at_addr *target)
*/
if (r->target.s_node == target->s_node)
goto out;
- } else {
+ } else
/*
* this route will work if there isn't a
* direct host route, so cache it
*/
net_route = r;
- }
}
}
@@ -618,19 +568,13 @@ static struct atalk_route *atrtr_find(struct at_addr *target)
* if we found a network route but not a direct host
* route, then return it
*/
- if (net_route != NULL) {
+ if (net_route)
r = net_route;
- } else if (atrtr_default.dev) {
+ else if (atrtr_default.dev)
r = &atrtr_default;
- } else {
- /*
- * No route can be found.
- */
+ else /* No route can be found */
r = NULL;
- }
-
-out:
- read_unlock_bh(&atalk_router_lock);
+out: read_unlock_bh(&atalk_router_lock);
return r;
}
@@ -642,16 +586,10 @@ out:
struct net_device *atrtr_get_dev(struct at_addr *sa)
{
struct atalk_route *atr = atrtr_find(sa);
-
- if (atr == NULL)
- return NULL;
- else
- return atr->dev;
+ return atr ? atr->dev : NULL;
}
-/*
- * Set up a default router.
- */
+/* Set up a default router */
static void atrtr_set_default(struct net_device *dev)
{
atrtr_default.dev = dev;
@@ -678,20 +616,16 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
* operations.
*/
- /*
- * Validate the request
- */
+ /* Validate the request */
if (ta->sat_family != AF_APPLETALK)
return -EINVAL;
- if (devhint == NULL && ga->sat_family != AF_APPLETALK)
+ if (!devhint && ga->sat_family != AF_APPLETALK)
return -EINVAL;
- /*
- * Now walk the routing table and make our decisions.
- */
+ /* Now walk the routing table and make our decisions */
write_lock_bh(&atalk_router_lock);
- for (rt = atalk_router_list; rt != NULL; rt = rt->next) {
+ for (rt = atalk_router_list; rt; rt = rt->next) {
if (r->rt_flags != rt->flags)
continue;
@@ -703,65 +637,59 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
}
}
- if(devhint == NULL) {
+ if (!devhint) {
riface = NULL;
spin_lock_bh(&atalk_iface_lock);
for (iface = atalk_iface_list; iface; iface = iface->next) {
- if (riface == NULL &&
- ntohs(ga->sat_addr.s_net) >= ntohs(iface->nets.nr_firstnet) &&
- ntohs(ga->sat_addr.s_net) <= ntohs(iface->nets.nr_lastnet))
+ if (!riface &&
+ ntohs(ga->sat_addr.s_net) >=
+ ntohs(iface->nets.nr_firstnet) &&
+ ntohs(ga->sat_addr.s_net) <=
+ ntohs(iface->nets.nr_lastnet))
riface = iface;
- if (ga->sat_addr.s_net == iface->address.s_net &&
+ if (ga->sat_addr.s_net == iface->address.s_net &&
ga->sat_addr.s_node == iface->address.s_node)
riface = iface;
}
spin_unlock_bh(&atalk_iface_lock);
retval = -ENETUNREACH;
- if (riface == NULL)
+ if (!riface)
goto out;
devhint = riface->dev;
}
- if (rt == NULL) {
- rt = (struct atalk_route *)
- kmalloc(sizeof(struct atalk_route), GFP_ATOMIC);
+ if (!rt) {
+ rt = kmalloc(sizeof(struct atalk_route), GFP_ATOMIC);
retval = -ENOBUFS;
- if (rt == NULL)
+ if (!rt)
goto out;
rt->next = atalk_router_list;
atalk_router_list = rt;
}
- /*
- * Fill in the routing entry.
- */
+ /* Fill in the routing entry */
rt->target = ta->sat_addr;
rt->dev = devhint;
rt->flags = r->rt_flags;
rt->gateway = ga->sat_addr;
retval = 0;
-
-out:
- write_unlock_bh(&atalk_router_lock);
-
+out: write_unlock_bh(&atalk_router_lock);
return retval;
}
-/*
- * Delete a route. Find it and discard it.
- */
+/* Delete a route. Find it and discard it */
static int atrtr_delete(struct at_addr * addr)
{
struct atalk_route **r = &atalk_router_list;
- struct atalk_route *tmp;
int retval = 0;
+ struct atalk_route *tmp;
write_lock_bh(&atalk_router_lock);
while ((tmp = *r) != NULL) {
@@ -775,8 +703,7 @@ static int atrtr_delete(struct at_addr * addr)
r = &tmp->next;
}
retval = -ENOENT;
-out:
- write_unlock_bh(&atalk_router_lock);
+out: write_unlock_bh(&atalk_router_lock);
return retval;
}
@@ -794,9 +721,8 @@ void atrtr_device_down(struct net_device *dev)
if (tmp->dev == dev) {
*r = tmp->next;
kfree(tmp);
- } else {
+ } else
r = &tmp->next;
- }
}
write_unlock_bh(&atalk_router_lock);
@@ -804,9 +730,7 @@ void atrtr_device_down(struct net_device *dev)
atrtr_set_default(NULL);
}
-/*
- * Actually down the interface.
- */
+/* Actually down the interface */
static inline void atalk_dev_down(struct net_device *dev)
{
atrtr_device_down(dev); /* Remove all routes for the device */
@@ -818,27 +742,22 @@ static inline void atalk_dev_down(struct net_device *dev)
* A device event has occurred. Watch for devices going down and
* delete our use of them (iface and route).
*/
-static int ddp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
+static int ddp_device_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
{
- if (event == NETDEV_DOWN) {
+ if (event == NETDEV_DOWN)
/* Discard any use of this */
atalk_dev_down((struct net_device *) ptr);
- }
return NOTIFY_DONE;
}
-/*
- * ioctl calls. Shouldn't even need touching.
- */
-
-/*
- * Device configuration ioctl calls.
- */
+/* ioctl calls. Shouldn't even need touching */
+/* Device configuration ioctl calls */
int atif_ioctl(int cmd, void *arg)
{
- struct ifreq atreq;
static char aarp_mcast[6] = {0x09, 0x00, 0x00, 0xFF, 0xFF, 0xFF};
+ struct ifreq atreq;
struct netrange *nr;
struct sockaddr_at *sa;
struct net_device *dev;
@@ -846,12 +765,13 @@ int atif_ioctl(int cmd, void *arg)
int ct;
int limit;
struct rtentry rtdef;
- int add_route;
+ int add_route;
if (copy_from_user(&atreq, arg, sizeof(atreq)))
return -EFAULT;
- if ((dev = __dev_get_by_name(atreq.ifr_name)) == NULL)
+ dev = __dev_get_by_name(atreq.ifr_name);
+ if (!dev)
return -ENODEV;
sa = (struct sockaddr_at*) &atreq.ifr_addr;
@@ -870,16 +790,19 @@ int atif_ioctl(int cmd, void *arg)
return -EPROTONOSUPPORT;
nr = (struct netrange *) &sa->sat_zero[0];
-
add_route = 1;
/*
- * if this is a point-to-point iface, and we already have an
- * iface for this AppleTalk address, then we should not add a route
+ * if this is a point-to-point iface, and we already
+ * have an iface for this AppleTalk address, then we
+ * should not add a route
*/
if ((dev->flags & IFF_POINTOPOINT) &&
- atalk_find_interface(sa->sat_addr.s_net, sa->sat_addr.s_node)) {
- printk(KERN_DEBUG "AppleTalk: point-to-point interface added with existing address\n");
+ atalk_find_interface(sa->sat_addr.s_net,
+ sa->sat_addr.s_node)) {
+ printk(KERN_DEBUG "AppleTalk: point-to-point "
+ "interface added with "
+ "existing address\n");
add_route = 0;
}
@@ -889,13 +812,11 @@ int atif_ioctl(int cmd, void *arg)
*/
if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
return -EPROTONOSUPPORT;
- if (sa->sat_addr.s_node == ATADDR_BCAST ||
+ if (sa->sat_addr.s_node == ATADDR_BCAST ||
sa->sat_addr.s_node == 254)
return -EINVAL;
if (atif) {
- /*
- * Already setting address.
- */
+ /* Already setting address */
if (atif->status & ATIF_PROBE)
return -EBUSY;
@@ -904,7 +825,7 @@ int atif_ioctl(int cmd, void *arg)
atrtr_device_down(dev); /* Flush old routes */
} else {
atif = atif_add_device(dev, &sa->sat_addr);
- if (atif == NULL)
+ if (!atif)
return -ENOMEM;
}
atif->nets = *nr;
@@ -921,10 +842,7 @@ int atif_ioctl(int cmd, void *arg)
return -EADDRINUSE;
}
- /*
- * Hey it worked - add the direct routes.
- */
-
+ /* Hey it worked - add the direct routes */
sa = (struct sockaddr_at *) &rtdef.rt_gateway;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_net = atif->address.s_net;
@@ -933,13 +851,11 @@ int atif_ioctl(int cmd, void *arg)
rtdef.rt_flags = RTF_UP;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_node = ATADDR_ANYNODE;
- if ((dev->flags & IFF_LOOPBACK) ||
- (dev->flags & IFF_POINTOPOINT))
+ if (dev->flags & IFF_LOOPBACK ||
+ dev->flags & IFF_POINTOPOINT)
rtdef.rt_flags |= RTF_HOST;
- /*
- * Routerless initial state.
- */
+ /* Routerless initial state */
if (nr->nr_firstnet == htons(0) &&
nr->nr_lastnet == htons(0xFFFE)) {
sa->sat_addr.s_net = atif->address.s_net;
@@ -948,37 +864,35 @@ int atif_ioctl(int cmd, void *arg)
} else {
limit = ntohs(nr->nr_lastnet);
if (limit - ntohs(nr->nr_firstnet) > 4096) {
- printk(KERN_WARNING "Too many routes/iface.\n");
+ printk(KERN_WARNING "Too many routes/"
+ "iface.\n");
return -EINVAL;
}
- if (add_route) {
- for(ct = ntohs(nr->nr_firstnet);ct <= limit; ct++) {
+ if (add_route)
+ for (ct = ntohs(nr->nr_firstnet);
+ ct <= limit; ct++) {
sa->sat_addr.s_net = htons(ct);
atrtr_create(&rtdef, dev);
}
- }
}
dev_mc_add(dev, aarp_mcast, 6, 1);
return 0;
case SIOCGIFADDR:
- if (atif == NULL)
+ if (!atif)
return -EADDRNOTAVAIL;
- ((struct sockaddr_at *)(&atreq.ifr_addr))->sat_family =
- AF_APPLETALK;
- ((struct sockaddr_at *)(&atreq.ifr_addr))->sat_addr =
- atif->address;
+
+ sa->sat_family = AF_APPLETALK;
+ sa->sat_addr = atif->address;
break;
case SIOCGIFBRDADDR:
- if (atif == NULL)
+ if (!atif)
return -EADDRNOTAVAIL;
- ((struct sockaddr_at *)(&atreq.ifr_addr))->sat_family =
- AF_APPLETALK;
- ((struct sockaddr_at *)(&atreq.ifr_addr))->sat_addr.s_net =
- atif->address.s_net;
- ((struct sockaddr_at *)(&atreq.ifr_addr))->sat_addr.s_node =
- ATADDR_BCAST;
+
+ sa->sat_family = AF_APPLETALK;
+ sa->sat_addr.s_net = atif->address.s_net;
+ sa->sat_addr.s_node = ATADDR_BCAST;
break;
case SIOCATALKDIFADDR:
@@ -995,7 +909,7 @@ int atif_ioctl(int cmd, void *arg)
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
- if (atif == NULL)
+ if (!atif)
return -EADDRNOTAVAIL;
/*
@@ -1007,9 +921,10 @@ int atif_ioctl(int cmd, void *arg)
/*
* atif points to the current interface on this network;
- * we aren't concerned about its current status (at least for now),
- * but it has all the settings about the network we're going
- * to probe. consequently, it must exist.
+ * we aren't concerned about its current status (at
+ * least for now), but it has all the settings about
+ * the network we're going to probe. Consequently, it
+ * must exist.
*/
if (!atif)
return -EADDRNOTAVAIL;
@@ -1034,10 +949,10 @@ int atif_ioctl(int cmd, void *arg)
return -EADDRINUSE;
/*
- * We now have an address on the local network, and the AARP
- * code will defend it for us until we take it down.
- * We don't set up any routes right now, because ATCP will
- * install them manually via SIOCADDRT.
+ * We now have an address on the local network, and
+ * the AARP code will defend it for us until we take it
+ * down. We don't set up any routes right now, because
+ * ATCP will install them manually via SIOCADDRT.
*/
break;
@@ -1046,30 +961,22 @@ int atif_ioctl(int cmd, void *arg)
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
- if (atif == NULL)
+ if (!atif)
return -EADDRNOTAVAIL;
- /*
- * give to aarp module to remove proxy entry
- */
+ /* give to aarp module to remove proxy entry */
aarp_proxy_remove(atif->dev, &(sa->sat_addr));
-
return 0;
- };
-
- if (copy_to_user(arg, &atreq, sizeof(atreq)))
- return -EFAULT;
+ }
- return 0;
+ return copy_to_user(arg, &atreq, sizeof(atreq)) ? -EFAULT : 0;
}
-/*
- * Routing ioctl() calls
- */
+/* Routing ioctl() calls */
static int atrtr_ioctl(unsigned int cmd, void *arg)
{
- struct rtentry rt;
struct net_device *dev = NULL;
+ struct rtentry rt;
if (copy_from_user(&rt, arg, sizeof(rt)))
return -EFAULT;
@@ -1078,37 +985,38 @@ static int atrtr_ioctl(unsigned int cmd, void *arg)
case SIOCDELRT:
if (rt.rt_dst.sa_family != AF_APPLETALK)
return -EINVAL;
- return atrtr_delete(&((struct sockaddr_at *)&rt.rt_dst)->sat_addr);
+ return atrtr_delete(&((struct sockaddr_at *)
+ &rt.rt_dst)->sat_addr);
case SIOCADDRT:
- /* FIX ME: the name of the device is still in user space, isn't it? */
- if (rt.rt_dev != NULL) {
- if ((dev = __dev_get_by_name(rt.rt_dev)) == NULL)
+ /* FIXME: the name of the device is still in user
+ * space, isn't it? */
+ if (rt.rt_dev) {
+ dev = __dev_get_by_name(rt.rt_dev);
+ if (!dev)
return -ENODEV;
}
return atrtr_create(&rt, dev);
-
- default:
- return -EINVAL;
- };
+ }
+ return -EINVAL;
}
/* Called from proc fs - just make it print the ifaces neatly */
-
-static int atalk_if_get_info(char *buffer, char **start, off_t offset, int length)
+static int atalk_if_get_info(char *buffer, char **start, off_t offset,
+ int length)
{
- struct atalk_iface *iface;
- int len = 0;
off_t pos = 0;
off_t begin = 0;
-
- len += sprintf(buffer,"Interface Address Networks Status\n");
+ struct atalk_iface *iface;
+ int len = sprintf(buffer, "Interface Address "
+ "Networks Status\n");
spin_lock_bh(&atalk_iface_lock);
- for (iface = atalk_iface_list; iface != NULL; iface = iface->next) {
+ for (iface = atalk_iface_list; iface; iface = iface->next) {
len += sprintf(buffer+len,"%-16s %04X:%02X %04X-%04X %d\n",
iface->dev->name, ntohs(iface->address.s_net),
- iface->address.s_node, ntohs(iface->nets.nr_firstnet),
+ iface->address.s_node,
+ ntohs(iface->nets.nr_firstnet),
ntohs(iface->nets.nr_lastnet), iface->status);
pos = begin + len;
if (pos < offset) {
@@ -1124,33 +1032,32 @@ static int atalk_if_get_info(char *buffer, char **start, off_t offset, int lengt
len -= (offset - begin);
if (len > length)
len = length;
-
- return (len);
+ return len;
}
/* Called from proc fs - just make it print the routes neatly */
-
-static int atalk_rt_get_info(char *buffer, char **start, off_t offset, int length)
+static int atalk_rt_get_info(char *buffer, char **start, off_t offset,
+ int length)
{
- struct atalk_route *rt;
- int len = 0;
off_t pos = 0;
off_t begin = 0;
+ int len = sprintf(buffer, "Target Router Flags Dev\n");
+ struct atalk_route *rt;
- len += sprintf(buffer,"Target Router Flags Dev\n");
if (atrtr_default.dev) {
rt = &atrtr_default;
- len += sprintf(buffer+len,"Default %04X:%02X %-4d %s\n",
+ len += sprintf(buffer + len,"Default %04X:%02X %-4d %s\n",
ntohs(rt->gateway.s_net), rt->gateway.s_node,
rt->flags, rt->dev->name);
}
read_lock_bh(&atalk_router_lock);
- for (rt = atalk_router_list; rt != NULL; rt = rt->next) {
- len += sprintf(buffer+len,"%04X:%02X %04X:%02X %-4d %s\n",
+ for (rt = atalk_router_list; rt; rt = rt->next) {
+ len += sprintf(buffer + len,
+ "%04X:%02X %04X:%02X %-4d %s\n",
ntohs(rt->target.s_net), rt->target.s_node,
- ntohs(rt->gateway.s_net), rt->gateway.s_node, rt->flags,
- rt->dev->name);
+ ntohs(rt->gateway.s_net), rt->gateway.s_node,
+ rt->flags, rt->dev->name);
pos = begin + len;
if (pos < offset) {
len = 0;
@@ -1165,7 +1072,6 @@ static int atalk_rt_get_info(char *buffer, char **start, off_t offset, int lengt
len -= (offset - begin);
if (len > length)
len = length;
-
return len;
}
@@ -1198,11 +1104,8 @@ unsigned short atalk_checksum(struct ddpehdr *ddp, int len)
}
data++;
}
-
- if (sum)
- return htons((unsigned short) sum);
-
- return 0xFFFF; /* Use 0xFFFF for 0. 0 itself means none */
+ /* Use 0xFFFF for 0. 0 itself means none */
+ return sum ? htons((unsigned short) sum) : 0xFFFF;
}
/*
@@ -1211,10 +1114,9 @@ unsigned short atalk_checksum(struct ddpehdr *ddp, int len)
*/
static int atalk_create(struct socket *sock, int protocol)
{
- struct sock *sk;
+ struct sock *sk = sk_alloc(PF_APPLETALK, GFP_KERNEL, 1);
- sk = sk_alloc(PF_APPLETALK, GFP_KERNEL, 1);
- if (sk == NULL)
+ if (!sk)
return -ENOMEM;
switch (sock->type) {
@@ -1230,36 +1132,32 @@ static int atalk_create(struct socket *sock, int protocol)
case SOCK_STREAM:
/*
- * TO DO: if you want to implement ADSP, here's the place to start
+ * TODO: if you want to implement ADSP, here's the
+ * place to start
*/
/*
sock->ops = &atalk_stream_ops;
break;
*/
default:
- sk_free((void *) sk);
+ sk_free(sk);
return -ESOCKTNOSUPPORT;
- };
+ }
MOD_INC_USE_COUNT;
-
sock_init_data(sock, sk);
-
sk->destruct = NULL;
/* Checksums on by default */
sk->zapped = 1;
-
return 0;
}
-/*
- * Free a socket. No work needed
- */
+/* Free a socket. No work needed */
static int atalk_release(struct socket *sock)
{
- struct sock *sk=sock->sk;
+ struct sock *sk = sock->sk;
- if (sk == NULL)
+ if (!sk)
return 0;
if (!sk->dead)
@@ -1268,7 +1166,6 @@ static int atalk_release(struct socket *sock)
sk->dead = 1;
sock->sk = NULL;
atalk_destroy_socket(sk);
-
return 0;
}
@@ -1289,21 +1186,21 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
for (sat->sat_port = ATPORT_RESERVED;
sat->sat_port < ATPORT_LAST;
sat->sat_port++) {
- for (s = atalk_sockets; s != NULL; s = s->next) {
+ for (s = atalk_sockets; s; s = s->next) {
if (s->protinfo.af_at.src_net == sat->sat_addr.s_net &&
- s->protinfo.af_at.src_node == sat->sat_addr.s_node &&
+ s->protinfo.af_at.src_node ==
+ sat->sat_addr.s_node &&
s->protinfo.af_at.src_port == sat->sat_port)
goto try_next_port;
}
/* Wheee, it's free, assign and insert. */
- if ((sk->next = atalk_sockets) != NULL)
+ sk->next = atalk_sockets;
+ if (sk->next)
atalk_sockets->pprev = &sk->next;
atalk_sockets = sk;
sk->pprev = &atalk_sockets;
-
sk->protinfo.af_at.src_port = sat->sat_port;
-
retval = 0;
goto out;
@@ -1312,61 +1209,53 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
}
retval = -EBUSY;
-out:
- spin_unlock_bh(&atalk_sockets_lock);
-
+out: spin_unlock_bh(&atalk_sockets_lock);
return retval;
}
static int atalk_autobind(struct sock *sk)
{
- struct at_addr *ap = atalk_find_primary();
struct sockaddr_at sat;
int n;
+ struct at_addr *ap = atalk_find_primary();
- if (ap == NULL || ap->s_net == htons(ATADDR_ANYNET))
+ if (!ap || ap->s_net == htons(ATADDR_ANYNET))
return -EADDRNOTAVAIL;
sk->protinfo.af_at.src_net = sat.sat_addr.s_net = ap->s_net;
sk->protinfo.af_at.src_node = sat.sat_addr.s_node = ap->s_node;
- if ((n = atalk_pick_and_bind_port(sk, &sat)) < 0)
+ n = atalk_pick_and_bind_port(sk, &sat);
+ if (n < 0)
return n;
sk->zapped = 0;
-
return 0;
}
-/*
- * Set the address 'our end' of the connection.
- */
+/* Set the address 'our end' of the connection */
static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- struct sock *sk;
struct sockaddr_at *addr = (struct sockaddr_at *)uaddr;
+ struct sock *sk = sock->sk;
- sk = sock->sk;
-
- if(sk->zapped == 0)
- return -EINVAL;
-
- if(addr_len != sizeof(struct sockaddr_at))
+ if (!sk->zapped || addr_len != sizeof(struct sockaddr_at))
return -EINVAL;
- if(addr->sat_family != AF_APPLETALK)
+ if (addr->sat_family != AF_APPLETALK)
return -EAFNOSUPPORT;
- if(addr->sat_addr.s_net == htons(ATADDR_ANYNET)) {
+ if (addr->sat_addr.s_net == htons(ATADDR_ANYNET)) {
struct at_addr *ap = atalk_find_primary();
- if(ap == NULL)
+ if (!ap)
return -EADDRNOTAVAIL;
sk->protinfo.af_at.src_net = addr->sat_addr.s_net = ap->s_net;
sk->protinfo.af_at.src_node = addr->sat_addr.s_node= ap->s_node;
} else {
- if (atalk_find_interface(addr->sat_addr.s_net, addr->sat_addr.s_node) == NULL)
+ if (!atalk_find_interface(addr->sat_addr.s_net,
+ addr->sat_addr.s_node))
return -EADDRNOTAVAIL;
sk->protinfo.af_at.src_net = addr->sat_addr.s_net;
@@ -1381,18 +1270,15 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
} else {
sk->protinfo.af_at.src_port = addr->sat_port;
- if (atalk_find_or_insert_socket(sk, addr) != NULL)
+ if (atalk_find_or_insert_socket(sk, addr))
return -EADDRINUSE;
}
sk->zapped = 0;
-
return 0;
}
-/*
- * Set the address we talk to.
- */
+/* Set the address we talk to */
static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
@@ -1412,19 +1298,20 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
if (addr->sat_addr.s_node == ATADDR_BCAST && !sk->broadcast) {
#if 1
- printk(KERN_WARNING "%s is broken and did not set SO_BROADCAST. It will break when 2.2 is released.\n",
+ printk(KERN_WARNING "%s is broken and did not set "
+ "SO_BROADCAST. It will break when 2.2 is "
+ "released.\n",
current->comm);
#else
return -EACCES;
#endif
}
- if (sk->zapped) {
+ if (sk->zapped)
if (atalk_autobind(sk) < 0)
return -EBUSY;
- }
- if (atrtr_get_dev(&addr->sat_addr) == NULL)
+ if (!atrtr_get_dev(&addr->sat_addr))
return -ENETUNREACH;
sk->protinfo.af_at.dest_port = addr->sat_port;
@@ -1433,7 +1320,6 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTED;
sk->state = TCP_ESTABLISHED;
-
return 0;
}
@@ -1446,13 +1332,11 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sockaddr_at sat;
- struct sock *sk;
+ struct sock *sk = sock->sk;
- sk = sock->sk;
- if (sk->zapped) {
+ if (sk->zapped)
if (atalk_autobind(sk) < 0)
return -ENOBUFS;
- }
*uaddr_len = sizeof(struct sockaddr_at);
@@ -1471,7 +1355,6 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
sat.sat_family = AF_APPLETALK;
memcpy(uaddr, &sat, sizeof(sat));
-
return 0;
}
@@ -1482,20 +1365,19 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
* extracted. PPP should probably pass frames marked as for this layer.
* [ie ARPHRD_ETHERTALK]
*/
-static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
+static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
{
- struct sock *sock;
struct ddpehdr *ddp = (void *) skb->h.raw;
+ struct sock *sock;
struct atalk_iface *atif;
struct sockaddr_at tosat;
int origlen;
struct ddpebits ddphv;
/* Size check */
- if (skb->len < sizeof(*ddp)) {
- kfree_skb(skb);
- return 0;
- }
+ if (skb->len < sizeof(*ddp))
+ goto freeit;
/*
* Fix up the length field [Ok this is horrible but otherwise
@@ -1506,15 +1388,10 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
* happens to be safe BUT.. (Its safe as user space will not
* run until we put it back)
*/
-
*((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp));
- /*
- * Trim buffer in case of stray trailing data
- */
-
+ /* Trim buffer in case of stray trailing data */
origlen = skb->len;
-
skb_trim(skb, min(skb->len, ddphv.deh_len));
/*
@@ -1522,32 +1399,26 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
* (Otherwise we'll detonate most spectacularly
* in the middle of recvmsg()).
*/
- if (skb->len < sizeof(*ddp)) {
- kfree_skb(skb);
- return 0;
- }
+ if (skb->len < sizeof(*ddp))
+ goto freeit;
/*
* Any checksums. Note we don't do htons() on this == is assumed to be
* valid for net byte orders all over the networking code...
*/
- if (ddp->deh_sum && atalk_checksum(ddp, ddphv.deh_len) != ddp->deh_sum) {
+ if (ddp->deh_sum &&
+ atalk_checksum(ddp, ddphv.deh_len) != ddp->deh_sum)
/* Not a valid AppleTalk frame - dustbin time */
- kfree_skb(skb);
- return 0;
- }
+ goto freeit;
/* Check the packet is aimed at us */
-
- if (ddp->deh_dnet == 0) /* Net 0 is 'this network' */
+ if (!ddp->deh_dnet) /* Net 0 is 'this network' */
atif = atalk_find_anynet(ddp->deh_dnode, dev);
else
atif = atalk_find_interface(ddp->deh_dnet, ddp->deh_dnode);
- /*
- * Not ours, so we route the packet via the correct AppleTalk interface.
- */
- if (atif == NULL) {
+ /* Not ours, so we route the packet via the correct AppleTalk iface */
+ if (!atif) {
struct atalk_route *rt;
struct at_addr ta;
@@ -1555,16 +1426,16 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
* Don't route multicast, etc., packets, or packets
* sent to "this network"
*/
- if (skb->pkt_type != PACKET_HOST || ddp->deh_dnet == 0) {
- /*
- * FIX ME:
- * Can it ever happen that a packet is from a PPP iface and needs to be broadcast onto the default network?
- */
+ if (skb->pkt_type != PACKET_HOST || !ddp->deh_dnet) {
+ /* FIXME:
+ * Can it ever happen that a packet is from a PPP
+ * iface and needs to be broadcast onto the default
+ * network? */
if (dev->type == ARPHRD_PPP)
- printk(KERN_DEBUG "AppleTalk: didn't forward broadcast packet received from PPP iface\n");
-
- kfree_skb(skb);
- return 0;
+ printk(KERN_DEBUG "AppleTalk: didn't forward "
+ "broadcast packet received "
+ "from PPP iface\n");
+ goto freeit;
}
ta.s_net = ddp->deh_dnet;
@@ -1572,10 +1443,8 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
/* Route the packet */
rt = atrtr_find(&ta);
- if (rt == NULL || ddphv.deh_hops == DDP_MAXHOPS) {
- kfree_skb(skb);
- return 0;
- }
+ if (!rt || ddphv.deh_hops == DDP_MAXHOPS)
+ goto freeit;
ddphv.deh_hops++;
/*
@@ -1606,40 +1475,33 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
*
* Note. ddp-> becomes invalid at the realloc.
*/
- if (skb_headroom(skb) < 22)
- {
- struct sk_buff *newskb;
+ if (skb_headroom(skb) < 22) {
/* 22 bytes - 12 ether, 2 len, 3 802.2 5 snap */
- newskb = skb_realloc_headroom(skb, 32);
+ struct sk_buff *nskb = skb_realloc_headroom(skb, 32);
kfree_skb(skb);
- if (!newskb)
- return 0;
- skb = newskb;
- }
- else
+ if (!nskb)
+ goto out;
+ skb = nskb;
+ } else
skb = skb_unshare(skb, GFP_ATOMIC);
/*
* If the buffer didn't vanish into the lack of
* space bitbucket we can send it.
*/
- if (skb) {
- if (aarp_send_ddp(rt->dev, skb, &ta, NULL) == -1)
- kfree_skb(skb);
- }
-
- return 0;
+ if (skb && aarp_send_ddp(rt->dev, skb, &ta, NULL) == -1)
+ goto freeit;
+ goto out;
}
#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE)
- /*
- * Check if IP-over-DDP
- */
+ /* Check if IP-over-DDP */
if (skb->data[12] == 22) {
- struct net_device *dev;
+ struct net_device *dev = __dev_get_by_name("ipddp0");
+ struct net_device_stats *stats;
/* This needs to be able to handle ipddp"N" devices */
- if ((dev = __dev_get_by_name("ipddp0")) == NULL)
+ if (!dev)
return -ENODEV;
skb->protocol = htons(ETH_P_IP);
@@ -1647,14 +1509,13 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
skb->dev = dev;
skb->h.raw = skb->data;
- ((struct net_device_stats *)dev->priv)->rx_packets++;
- ((struct net_device_stats *)dev->priv)->rx_bytes += skb->len + 13;
+ stats = dev->priv;
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len + 13;
netif_rx(skb); /* Send the SKB up to a higher place. */
-
- return 0;
+ goto out;
}
#endif
-
/*
* Which socket - atalk_search_socket() looks for a *full match*
* of the <net,node,port> tuple.
@@ -1664,23 +1525,17 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
tosat.sat_port = ddp->deh_dport;
sock = atalk_search_socket(&tosat, atif);
-
- if (sock == NULL) {
- /* But not one of our sockets */
- kfree_skb(skb);
- return 0;
- }
+ if (!sock) /* But not one of our sockets */
+ goto freeit;
- /*
- * Queue packet (standard)
- */
-
+ /* Queue packet (standard) */
skb->sk = sock;
if (sock_queue_rcv_skb(sock, skb) < 0)
- kfree_skb(skb);
-
- return 0;
+ goto freeit;
+ goto out;
+freeit: kfree_skb(skb);
+out: return 0;
}
/*
@@ -1688,21 +1543,18 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
* Caller must provide enough headroom on the packet to pull the short
* header and append a long one.
*/
-static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
+static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
{
struct ddpehdr *ddp;
struct at_addr *ap;
- /*
- * Expand any short form frames.
- */
+ /* Expand any short form frames */
if (skb->mac.raw[2] == 1) {
- /*
- * Find our address.
- */
+ /* Find our address */
ap = atalk_find_dev_addr(dev);
- if (ap == NULL || skb->len < sizeof(struct ddpshdr)) {
+ if (!ap || skb->len < sizeof(struct ddpshdr)) {
kfree_skb(skb);
return 0;
}
@@ -1715,9 +1567,7 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
skb_push(skb, sizeof(*ddp) - 4);
ddp = (struct ddpehdr *)skb->data;
- /*
- * Now fill in the long header.
- */
+ /* Now fill in the long header */
/*
* These two first. The mac overlays the new source/dest
@@ -1737,7 +1587,6 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
ddp->deh_len = skb->len;
ddp->deh_hops = DDP_MAXHOPS; /* Non routable, so force a drop
if we slip up later */
-
/* Mend the byte order */
*((__u16 *)ddp) = htons(*((__u16 *)ddp));
}
@@ -1746,19 +1595,20 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_
return atalk_rcv(skb, dev, pt);
}
-static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, int len, struct scm_cookie *scm)
+static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, int len,
+ struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
struct sockaddr_at *usat = (struct sockaddr_at *)msg->msg_name;
+ int flags = msg->msg_flags;
+ int loopback = 0;
struct sockaddr_at local_satalk, gsat;
struct sk_buff *skb;
struct net_device *dev;
struct ddpehdr *ddp;
int size;
struct atalk_route *rt;
- int loopback = 0;
int err;
- int flags = msg->msg_flags;
if (flags & ~MSG_DONTWAIT)
return -EINVAL;
@@ -1767,19 +1617,18 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, int len, struc
return -EMSGSIZE;
if (usat) {
- if(sk->zapped) {
+ if (sk->zapped)
if (atalk_autobind(sk) < 0)
return -EBUSY;
- }
- if (msg->msg_namelen < sizeof(*usat))
- return -EINVAL;
- if (usat->sat_family != AF_APPLETALK)
+ if (msg->msg_namelen < sizeof(*usat) ||
+ usat->sat_family != AF_APPLETALK)
return -EINVAL;
/* netatalk doesn't implement this check */
if (usat->sat_addr.s_node == ATADDR_BCAST && !sk->broadcast) {
- printk(KERN_INFO "SO_BROADCAST: Fix your netatalk as it will break before 2.2\n");
+ printk(KERN_INFO "SO_BROADCAST: Fix your netatalk as "
+ "it will break before 2.2\n");
#if 0
return -EPERM;
#endif
@@ -1795,15 +1644,14 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, int len, struc
}
/* Build a packet */
-
SOCK_DEBUG(sk, "SK %p: Got address.\n", sk);
/* For headers */
size = sizeof(struct ddpehdr) + len + ddp_dl->header_length;
- if (usat->sat_addr.s_net != 0 || usat->sat_addr.s_node == ATADDR_ANYNODE) {
+ if (usat->sat_addr.s_net || usat->sat_addr.s_node == ATADDR_ANYNODE) {
rt = atrtr_find(&usat->sat_addr);
- if (rt == NULL)
+ if (!rt)
return -ENETUNREACH;
dev = rt->dev;
@@ -1814,24 +1662,23 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, int len, struc
at_hint.s_net = sk->protinfo.af_at.src_net;
rt = atrtr_find(&at_hint);
- if (rt == NULL)
+ if (!rt)
return -ENETUNREACH;
dev = rt->dev;
}
- SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n", sk, size, dev->name);
+ SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
+ sk, size, dev->name);
size += dev->hard_header_len;
-
skb = sock_alloc_send_skb(sk, size, 0, (flags & MSG_DONTWAIT), &err);
- if (skb == NULL)
+ if (!skb)
return err;
skb->sk = sk;
skb_reserve(skb, ddp_dl->header_length);
skb_reserve(skb, dev->hard_header_len);
-
skb->dev = dev;
SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
@@ -1871,28 +1718,30 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, int len, struc
* Loopback broadcast packets to non gateway targets (ie routes
* to group we are in)
*/
- if (ddp->deh_dnode == ATADDR_BCAST) {
- if ((!(rt->flags&RTF_GATEWAY)) && (!(dev->flags&IFF_LOOPBACK))) {
- struct sk_buff *skb2 = skb_copy(skb, GFP_KERNEL);
- if (skb2) {
- loopback = 1;
- SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk);
- if (aarp_send_ddp(dev, skb2, &usat->sat_addr, NULL) == -1)
- kfree_skb(skb2);
+ if (ddp->deh_dnode == ATADDR_BCAST &&
+ !(rt->flags & RTF_GATEWAY) && !(dev->flags & IFF_LOOPBACK)) {
+ struct sk_buff *skb2 = skb_copy(skb, GFP_KERNEL);
+
+ if (skb2) {
+ loopback = 1;
+ SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk);
+ if (aarp_send_ddp(dev, skb2,
+ &usat->sat_addr, NULL) == -1)
+ kfree_skb(skb2);
/* else queued/sent above in the aarp queue */
- }
}
}
- if ((dev->flags & IFF_LOOPBACK) || loopback) {
+ if (dev->flags & IFF_LOOPBACK || loopback) {
SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk);
/* loop back */
skb_orphan(skb);
ddp_dl->datalink_header(ddp_dl, skb, dev->dev_addr);
skb->mac.raw = skb->data;
- skb->h.raw = skb->data + ddp_dl->header_length + dev->hard_header_len;
- skb_pull(skb,dev->hard_header_len);
- skb_pull(skb,ddp_dl->header_length);
+ skb->h.raw = skb->data + ddp_dl->header_length +
+ dev->hard_header_len;
+ skb_pull(skb, dev->hard_header_len);
+ skb_pull(skb, ddp_dl->header_length);
atalk_rcv(skb, dev, NULL);
} else {
SOCK_DEBUG(sk, "SK %p: send out.\n", sk);
@@ -1915,15 +1764,15 @@ static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, int size,
{
struct sock *sk = sock->sk;
struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
- struct ddpehdr *ddp = NULL;
- struct ddpebits ddphv;
+ struct ddpehdr *ddp = NULL;
int copied = 0;
- struct sk_buff *skb;
int err = 0;
+ struct ddpebits ddphv;
+ struct sk_buff *skb;
- skb = skb_recv_datagram(sk, (flags & ~MSG_DONTWAIT),
- (flags & MSG_DONTWAIT), &err);
- if (skb == NULL)
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
return err;
ddp = (struct ddpehdr *)(skb->h.raw);
@@ -1943,7 +1792,8 @@ static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, int size,
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
- err = skb_copy_datagram_iovec(skb, sizeof(*ddp), msg->msg_iov, copied);
+ err = skb_copy_datagram_iovec(skb, sizeof(*ddp),
+ msg->msg_iov, copied);
}
if (!err) {
@@ -1957,7 +1807,6 @@ static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, int size,
}
skb_free_datagram(sk, skb); /* Free the datagram. */
-
return err ? err : copied;
}
@@ -1967,50 +1816,40 @@ static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, int size,
*/
static int atalk_ioctl(struct socket *sock,unsigned int cmd, unsigned long arg)
{
- long amount=0;
- struct sock *sk=sock->sk;
+ long amount = 0;
+ struct sock *sk = sock->sk;
- switch(cmd)
- {
- /*
- * Protocol layer
- */
+ switch (cmd) {
+ /* Protocol layer */
case TIOCOUTQ:
amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
- if(amount < 0)
+ if (amount < 0)
amount = 0;
break;
-
case TIOCINQ:
{
- struct sk_buff *skb;
- /* These two are safe on a single CPU system as only user tasks fiddle here */
- if((skb = skb_peek(&sk->receive_queue)) != NULL)
+ /* These two are safe on a single CPU system as only
+ * user tasks fiddle here */
+ struct sk_buff *skb = skb_peek(&sk->receive_queue);
+
+ if (skb)
amount = skb->len-sizeof(struct ddpehdr);
break;
}
-
case SIOCGSTAMP:
- if(sk)
- {
- if(sk->stamp.tv_sec == 0)
- return -ENOENT;
- return (copy_to_user((void *)arg,&sk->stamp,sizeof(struct timeval)) ? -EFAULT : 0);
- }
- return (-EINVAL);
-
- /*
- * Routing
- */
+ if (!sk)
+ return -EINVAL;
+ if (!sk->stamp.tv_sec)
+ return -ENOENT;
+ return copy_to_user((void *)arg, &sk->stamp,
+ sizeof(struct timeval)) ? -EFAULT : 0;
+ /* Routing */
case SIOCADDRT:
case SIOCDELRT:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
- return (atrtr_ioctl(cmd,(void *)arg));
-
- /*
- * Interface
- */
+ return atrtr_ioctl(cmd, (void *)arg);
+ /* Interface */
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFBRDADDR:
@@ -2018,11 +1857,8 @@ static int atalk_ioctl(struct socket *sock,unsigned int cmd, unsigned long arg)
case SIOCDIFADDR:
case SIOCSARP: /* proxy AARP */
case SIOCDARP: /* proxy AARP */
- return (atif_ioctl(cmd,(void *)arg));
-
- /*
- * Physical layer ioctl calls
- */
+ return atif_ioctl(cmd, (void *)arg);
+ /* Physical layer ioctl calls */
case SIOCSIFLINK:
case SIOCGIFHWADDR:
case SIOCSIFHWADDR:
@@ -2035,8 +1871,7 @@ static int atalk_ioctl(struct socket *sock,unsigned int cmd, unsigned long arg)
case SIOCGIFCOUNT:
case SIOCGIFINDEX:
case SIOCGIFNAME:
- return ((dev_ioctl(cmd,(void *) arg)));
-
+ return dev_ioctl(cmd,(void *) arg);
case SIOCSIFMETRIC:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
@@ -2045,16 +1880,15 @@ static int atalk_ioctl(struct socket *sock,unsigned int cmd, unsigned long arg)
case SIOCSIFMEM:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
- return (-EINVAL);
-
+ return -EINVAL;
default:
- return (-EINVAL);
+ return -EINVAL;
}
- return (put_user(amount, (int *)arg));
+ return put_user(amount, (int *)arg);
}
-static struct net_proto_family atalk_family_ops=
+static struct net_proto_family atalk_family_ops =
{
PF_APPLETALK,
atalk_create
@@ -2111,19 +1945,17 @@ struct packet_type ppptalk_packet_type=
static char ddp_snap_id[] = {0x08, 0x00, 0x07, 0x80, 0x9B};
-/*
- * Export symbols for use by drivers when AppleTalk is a module.
- */
+/* Export symbols for use by drivers when AppleTalk is a module */
EXPORT_SYMBOL(aarp_send_ddp);
EXPORT_SYMBOL(atrtr_get_dev);
EXPORT_SYMBOL(atalk_find_dev_addr);
/* Called by proto.c on kernel start up */
-
static int __init atalk_init(void)
{
(void) sock_register(&atalk_family_ops);
- if((ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv)) == NULL)
+ ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
+ if (!ddp_dl)
printk(KERN_CRIT "Unable to register DDP with SNAP.\n");
ltalk_packet_type.type = htons(ETH_P_LOCALTALK);
@@ -2135,19 +1967,16 @@ static int __init atalk_init(void)
register_netdevice_notifier(&ddp_notifier);
aarp_proto_init();
-#ifdef CONFIG_PROC_FS
proc_net_create("appletalk", 0, atalk_get_info);
proc_net_create("atalk_route", 0, atalk_rt_get_info);
proc_net_create("atalk_iface", 0, atalk_if_get_info);
-
+#ifdef CONFIG_PROC_FS
aarp_register_proc_fs();
#endif /* CONFIG_PROC_FS */
-
#ifdef CONFIG_SYSCTL
atalk_register_sysctl();
#endif /* CONFIG_SYSCTL */
-
- printk(KERN_INFO "NET4: AppleTalk 0.18 for Linux NET4.0\n");
+ printk(KERN_INFO "NET4: AppleTalk 0.18a for Linux NET4.0\n");
return 0;
}
module_init(atalk_init);
@@ -2165,32 +1994,24 @@ module_init(atalk_init);
* Ergo, before the AppleTalk module can be removed, all AppleTalk
* sockets be closed from user space.
*/
-
static void __exit atalk_exit(void)
{
#ifdef CONFIG_SYSCTL
atalk_unregister_sysctl();
#endif /* CONFIG_SYSCTL */
-
-#ifdef CONFIG_PROC_FS
proc_net_remove("appletalk");
proc_net_remove("atalk_route");
proc_net_remove("atalk_iface");
-
+#ifdef CONFIG_PROC_FS
aarp_unregister_proc_fs();
#endif /* CONFIG_PROC_FS */
-
aarp_cleanup_module(); /* General aarp clean-up. */
-
unregister_netdevice_notifier(&ddp_notifier);
dev_remove_pack(&ltalk_packet_type);
dev_remove_pack(&ppptalk_packet_type);
unregister_snap_client(ddp_snap_id);
sock_unregister(PF_APPLETALK);
-
- return;
}
module_exit(atalk_exit);
#endif /* MODULE */
-
#endif /* CONFIG_ATALK || CONFIG_ATALK_MODULE */
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 51be9f077..ec0dc2671 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -51,6 +51,11 @@ static unsigned char bridge_ula_lec[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
#define DPRINTK(format,args...)
#endif
+struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
+ unsigned char *addr);
+void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
+
+
#define DUMP_PACKETS 0 /* 0 = None,
* 1 = 30 first bytes
* 2 = Whole packet
@@ -853,8 +858,11 @@ static void __exit lane_module_cleanup(void)
if (dev_lec[i] != NULL) {
priv = (struct lec_priv *)dev_lec[i]->priv;
#if defined(CONFIG_TR)
- unregister_trdev(dev_lec[i]);
+ if (priv->is_trdev)
+ unregister_trdev(dev_lec[i]);
+ else
#endif
+ unregister_netdev(dev_lec[i]);
kfree(dev_lec[i]);
dev_lec[i] = NULL;
}
diff --git a/net/atm/lec.h b/net/atm/lec.h
index f40a37fa3..5dd0494e9 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -16,9 +16,9 @@
#if defined (CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
#include <linux/if_bridge.h>
-struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
+extern struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
unsigned char *addr);
-void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
+extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
#define LEC_HEADER_LEN 16
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 940d69be0..8fcfe3fe4 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -4,7 +4,7 @@
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
- * $Id: br_private.h,v 1.3 2000/05/05 02:17:17 davem Exp $
+ * $Id: br_private.h,v 1.4 2001/01/19 04:51:48 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -112,8 +112,8 @@ struct net_bridge
int gc_interval;
};
-struct notifier_block br_device_notifier;
-unsigned char bridge_ula[6];
+extern struct notifier_block br_device_notifier;
+extern unsigned char bridge_ula[6];
/* br.c */
void br_dec_use_count(void);
diff --git a/net/core/netfilter.c b/net/core/netfilter.c
index f4bb62818..b12e1d5ec 100644
--- a/net/core/netfilter.c
+++ b/net/core/netfilter.c
@@ -442,7 +442,6 @@ static void nf_queue(struct sk_buff *skb,
}
}
-/* We have BR_NETPROTO_LOCK here */
int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
@@ -452,6 +451,9 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
unsigned int verdict;
int ret = 0;
+ /* We may already have this, but read-locks nest anyway */
+ br_read_lock_bh(BR_NETPROTO_LOCK);
+
#ifdef CONFIG_NETFILTER_DEBUG
if (skb->nf_debug & (1 << hook)) {
printk("nf_hook: hook %i already set.\n", hook);
@@ -479,6 +481,7 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
break;
}
+ br_read_unlock_bh(BR_NETPROTO_LOCK);
return ret;
}
diff --git a/net/decnet/Makefile b/net/decnet/Makefile
index ac7a1a462..11f7c8b08 100644
--- a/net/decnet/Makefile
+++ b/net/decnet/Makefile
@@ -11,5 +11,3 @@ obj-y += sysctl_net_decnet.o
include $(TOPDIR)/Rules.make
-tar:
- tar -cvf /dev/f1 .
diff --git a/net/decnet/TODO b/net/decnet/TODO
index 1607d6d1b..c8ea8178d 100644
--- a/net/decnet/TODO
+++ b/net/decnet/TODO
@@ -43,15 +43,15 @@ Steve's quick list of things that need finishing off:
o Hello messages should be generated for each primary address on each
interface.
- o Add more information into /proc/net/decnet and finalise the format to
- allow DECnet support in netstat.
-
- o Make sure that returned connect messages are generated when they should
- be, and that the correct error messages are sent too.
-
o Add the routing message grabbing netfilter module [written, tested,
awaiting merge]
o Add perfect socket hashing - an idea suggested by Paul Koning [part written,
awaiting debugging and merge]
+ o Add session control message flow control
+
+ o Add NSP message flow control
+
+ o DECnet sendpages() function
+
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index bc51b636d..b930878c2 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -35,6 +35,7 @@
* Arnaldo C. Melo: use capable, not suser
* Steve Whitehouse: Removed unused code. Fix to use sk->allocation
* when required.
+ * Patrick Caulfield: /proc/net/decnet now has object name/number
*/
@@ -128,8 +129,6 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
#include <net/dn_fib.h>
#include <net/dn_neigh.h>
-#define MAX(a,b) ((a)>(b)?(a):(b))
-
static void dn_keepalive(struct sock *sk);
/*
@@ -141,15 +140,15 @@ unsigned char decnet_ether_address[ETH_ALEN] = { 0xAA, 0x00, 0x04, 0x00, 0x00, 0
static struct proto_ops dn_proto_ops;
rwlock_t dn_hash_lock = RW_LOCK_UNLOCKED;
-static struct sock *dn_sklist = NULL;
-static struct sock *dn_wild_sk = NULL;
+static struct sock *dn_sklist;
+static struct sock *dn_wild_sk;
static int __dn_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen, int flags);
static int __dn_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen, int flags);
static struct sock **dn_find_list(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
if (scp->addr.sdn_flags & SDF_WILD)
return dn_wild_sk ? NULL : &dn_wild_sk;
@@ -159,7 +158,7 @@ static struct sock **dn_find_list(struct sock *sk)
static unsigned short port_alloc(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
static unsigned short port = 0x2000;
if (port == 0)
@@ -177,12 +176,17 @@ static unsigned short port = 0x2000;
*/
static int dn_hash_sock(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
struct sock **skp;
int rv = -EUSERS;
- write_lock_bh(&dn_hash_lock);
+ if (sk->next)
+ BUG();
+ if (sk->pprev)
+ BUG();
+ write_lock_bh(&dn_hash_lock);
+
if (!scp->addrloc && !port_alloc(sk))
goto out;
@@ -327,7 +331,7 @@ struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
read_lock(&dn_hash_lock);
for(sk = dn_sklist; sk != NULL; sk = sk->next) {
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
if (sk->state != TCP_LISTEN)
continue;
if (scp->addr.sdn_objnum) {
@@ -355,13 +359,13 @@ struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
struct sock *dn_find_by_skb(struct sk_buff *skb)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct sock *sk;
struct dn_scp *scp;
read_lock(&dn_hash_lock);
for(sk = dn_sklist; sk != NULL; sk = sk->next) {
- scp = &sk->protinfo.dn;
+ scp = DN_SK(sk);
if (cb->src != dn_saddr2dn(&scp->peer))
continue;
if (cb->dst_port != scp->addrloc)
@@ -383,7 +387,7 @@ struct sock *dn_find_by_skb(struct sk_buff *skb)
static void dn_destruct(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
skb_queue_purge(&scp->data_xmit_queue);
skb_queue_purge(&scp->other_xmit_queue);
@@ -394,25 +398,26 @@ static void dn_destruct(struct sock *sk)
MOD_DEC_USE_COUNT;
}
-struct sock *dn_alloc_sock(struct socket *sock, int flags)
+struct sock *dn_alloc_sock(struct socket *sock, int gfp)
{
struct sock *sk;
struct dn_scp *scp;
- if ((sk = sk_alloc(PF_DECnet, flags, 1)) == NULL)
+ if ((sk = sk_alloc(PF_DECnet, gfp, 1)) == NULL)
goto no_sock;
if (sock) {
sock->ops = &dn_proto_ops;
}
sock_init_data(sock,sk);
- scp = &sk->protinfo.dn;
+ scp = DN_SK(sk);
sk->backlog_rcv = dn_nsp_backlog_rcv;
sk->destruct = dn_destruct;
sk->no_check = 1;
sk->family = PF_DECnet;
sk->protocol = 0;
+ sk->allocation = gfp;
/* Initialization of DECnet Session Control Port */
scp->state = DN_O; /* Open */
@@ -424,13 +429,25 @@ struct sock *dn_alloc_sock(struct socket *sock, int flags)
scp->ackrcv_oth = 0; /* Last oth data ack rec*/
scp->flowrem_sw = DN_SEND;
scp->flowloc_sw = DN_SEND;
+ scp->flowrem_dat = 0;
+ scp->flowrem_oth = 1;
+ scp->flowloc_dat = 0;
+ scp->flowloc_oth = 1;
+ scp->services_rem = 0;
+ scp->services_loc = 1 | NSP_FC_NONE;
+ scp->info_rem = 0;
+ scp->info_loc = 0x03; /* NSP version 4.1 */
+ scp->segsize_rem = 230; /* Default: Updated by remote segsize */
+ scp->segsize_loc = 1450; /* Best guess for ethernet */
+ scp->nonagle = 0;
+ scp->multi_ireq = 1;
scp->accept_mode = ACC_IMMED;
scp->addr.sdn_family = AF_DECnet;
scp->peer.sdn_family = AF_DECnet;
scp->accessdata.acc_accl = 5;
memcpy(scp->accessdata.acc_acc, "LINUX", 5);
- scp->mss = 1460;
+ scp->max_window = NSP_MAX_WINDOW;
scp->snd_window = NSP_MIN_WINDOW;
scp->nsp_srtt = NSP_INITIAL_SRTT;
scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
@@ -464,7 +481,7 @@ no_sock:
*/
static void dn_keepalive(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
/*
* By checking the other_data transmit queue is empty
@@ -472,7 +489,7 @@ static void dn_keepalive(struct sock *sk)
* many of these keepalive frames.
*/
if (skb_queue_len(&scp->other_xmit_queue) == 0)
- dn_nsp_send_lnk(sk, DN_NOCHANGE);
+ dn_nsp_send_link(sk, DN_NOCHANGE, 0);
}
@@ -485,7 +502,7 @@ static void dn_keepalive(struct sock *sk)
*/
int dn_destroy_timer(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
scp->persist = dn_nsp_persist(sk);
@@ -527,7 +544,7 @@ int dn_destroy_timer(struct sock *sk)
static void dn_destroy_sock(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
scp->nsp_rxtshift = 0; /* reset back off */
@@ -674,7 +691,7 @@ dn_release(struct socket *sock)
static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
struct net_device *dev;
int rv;
@@ -722,11 +739,8 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
memcpy(&scp->addr, saddr, addr_len);
sk->zapped = 0;
- if ((rv = dn_hash_sock(sk)) == 0)
- goto out;
-
- sk->zapped = 1;
-out:
+ if ((rv = dn_hash_sock(sk)) != 0)
+ sk->zapped = 1;
return rv;
}
@@ -735,7 +749,7 @@ out:
static int dn_auto_bind(struct socket *sock)
{
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
sk->zapped = 0;
@@ -769,7 +783,7 @@ static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len,
{
struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
int err = -EISCONN;
lock_sock(sk);
@@ -788,7 +802,7 @@ static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len,
}
err = -EINVAL;
- if (sk->protinfo.dn.state != DN_O)
+ if (DN_SK(sk)->state != DN_O)
goto out;
if (addr_len != sizeof(struct sockaddr_dn))
@@ -812,7 +826,7 @@ static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len,
sk->state = TCP_SYN_SENT;
sock->state = SS_CONNECTING;
- sk->protinfo.dn.state = DN_CI;
+ DN_SK(sk)->state = DN_CI;
dn_nsp_send_conninit(sk, NSP_CI);
@@ -853,7 +867,7 @@ out:
return err;
}
-static int dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
+static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
{
unsigned char *ptr = skb->data;
@@ -870,10 +884,9 @@ static int dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
- return 0;
}
-static int dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
+static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
{
unsigned char *ptr = skb->data;
@@ -882,7 +895,6 @@ static int dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
memcpy(opt->opt_data, ptr, opt->opt_optl);
skb_pull(skb, opt->opt_optl + 1);
- return 0;
}
@@ -910,7 +922,7 @@ static int dn_wait_accept(struct socket *sock, int flags)
return -ERESTARTSYS; /* But of course you don't! */
}
- if ((sk->protinfo.dn.state != DN_RUN) && (sk->protinfo.dn.state != DN_DRC)) {
+ if ((DN_SK(sk)->state != DN_RUN) && (DN_SK(sk)->state != DN_DRC)) {
sock->state = SS_UNCONNECTED;
return sock_error(sk);
}
@@ -937,7 +949,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
return -EINVAL;
}
- if (sk->protinfo.dn.state != DN_O) {
+ if (DN_SK(sk)->state != DN_O) {
release_sock(sk);
return -EINVAL;
}
@@ -967,7 +979,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
}
} while (skb == NULL);
- cb = (struct dn_skb_cb *)skb->cb;
+ cb = DN_SKB_CB(skb);
if ((newsk = dn_alloc_sock(newsock, sk->allocation)) == NULL) {
release_sock(sk);
@@ -980,50 +992,52 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
dst_release(xchg(&newsk->dst_cache, skb->dst));
skb->dst = NULL;
- newsk->protinfo.dn.state = DN_CR;
- newsk->protinfo.dn.addrrem = cb->src_port;
- newsk->protinfo.dn.mss = cb->segsize;
- newsk->protinfo.dn.accept_mode = sk->protinfo.dn.accept_mode;
+ DN_SK(newsk)->state = DN_CR;
+ DN_SK(newsk)->addrrem = cb->src_port;
+ DN_SK(newsk)->services_rem = cb->services;
+ DN_SK(newsk)->info_rem = cb->info;
+ DN_SK(newsk)->segsize_rem = cb->segsize;
+ DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
- if (newsk->protinfo.dn.mss < 230)
- newsk->protinfo.dn.mss = 230;
+ if (DN_SK(newsk)->segsize_rem < 230)
+ DN_SK(newsk)->segsize_rem = 230;
newsk->state = TCP_LISTEN;
newsk->zapped = 0;
- memcpy(&newsk->protinfo.dn.addr, &sk->protinfo.dn.addr, sizeof(struct sockaddr_dn));
+ memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
/*
* If we are listening on a wild socket, we don't want
* the newly created socket on the wrong hash queue.
*/
- newsk->protinfo.dn.addr.sdn_flags &= ~SDF_WILD;
+ DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
- skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &newsk->protinfo.dn.addr, &type));
- skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &newsk->protinfo.dn.peer, &type));
- *(dn_address *)newsk->protinfo.dn.peer.sdn_add.a_addr = cb->src;
- *(dn_address *)newsk->protinfo.dn.addr.sdn_add.a_addr = cb->dst;
+ skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
+ skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
+ *(dn_address *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
+ *(dn_address *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
menuver = *skb->data;
skb_pull(skb, 1);
if (menuver & DN_MENUVER_ACC)
- dn_access_copy(skb, &newsk->protinfo.dn.accessdata);
+ dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
if (menuver & DN_MENUVER_USR)
- dn_user_copy(skb, &newsk->protinfo.dn.conndata_in);
+ dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
if (menuver & DN_MENUVER_PRX)
- newsk->protinfo.dn.peer.sdn_flags |= SDF_PROXY;
+ DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
if (menuver & DN_MENUVER_UIC)
- newsk->protinfo.dn.peer.sdn_flags |= SDF_UICPROXY;
+ DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
kfree_skb(skb);
- memcpy(&newsk->protinfo.dn.conndata_out, &sk->protinfo.dn.conndata_out,
+ memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
sizeof(struct optdata_dn));
- memcpy(&newsk->protinfo.dn.discdata_out, &sk->protinfo.dn.discdata_out,
+ memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
sizeof(struct optdata_dn));
lock_sock(newsk);
@@ -1031,9 +1045,13 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
dn_send_conn_ack(newsk);
- if (newsk->protinfo.dn.accept_mode == ACC_IMMED) {
- newsk->protinfo.dn.state = DN_CC;
- dn_send_conn_conf(newsk, newsk->allocation);
+ /*
+ * Here we use sk->allocation since although the conn conf is
+ * for the newsk, the context is the old socket.
+ */
+ if (DN_SK(newsk)->accept_mode == ACC_IMMED) {
+ DN_SK(newsk)->state = DN_CC;
+ dn_send_conn_conf(newsk, sk->allocation);
err = dn_wait_accept(newsock, flags);
}
@@ -1046,7 +1064,7 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len
{
struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
*uaddr_len = sizeof(struct sockaddr_dn);
@@ -1070,7 +1088,7 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len
static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
int mask = datagram_poll(file, sock, wait);
if (skb_queue_len(&scp->other_receive_queue))
@@ -1082,100 +1100,32 @@ static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table
static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
int err = -EOPNOTSUPP;
unsigned long amount = 0;
struct sk_buff *skb;
+ int val;
-#if 0
- struct dn_naddr dnaddr;
-#endif
switch(cmd)
{
case SIOCGIFADDR:
case SIOCSIFADDR:
return dn_dev_ioctl(cmd, (void *)arg);
+ case SIOCATMARK:
+ lock_sock(sk);
+ val = (skb_queue_len(&scp->other_receive_queue) != 0);
+ if (scp->state != DN_RUN)
+ val = -ENOTCONN;
+ release_sock(sk);
+ return val;
+
#ifdef CONFIG_DECNET_ROUTER
case SIOCADDRT:
case SIOCDELRT:
return dn_fib_ioctl(sock, cmd, arg);
#endif /* CONFIG_DECNET_ROUTER */
-#if 0
- case SIOCSIFADDR:
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
-
- if ((err = copy_from_user(devname, ioarg->devname, 5)) != 0)
- break;
- if ((err = copy_from_user(addr, ioarg->exec_addr, 6)) != 0)
- break;
- if ((dev = dev_get(devname)) == NULL) {
- err = -ENODEV;
- break;
- }
- if (dev->dn_ptr == NULL) {
- err = -ENODEV;
- break;
- }
-
- dn_dev_devices_off();
-
- decnet_default_device = dev;
- memcpy(decnet_ether_address, addr, ETH_ALEN);
- decnet_address = dn_htons(dn_eth2dn(decnet_ether_address));
-
- dn_dev_devices_on();
-
- break;
-
- case SIOCGIFADDR:
- if (decnet_default_device)
- strcpy(devname, decnet_default_device->name);
- else
- memset(devname, 0, 6);
-
- if ((err = copy_to_user(ioarg->devname, devname, 5)) != 0)
- break;
-
- if ((err = copy_to_user(ioarg->exec_addr, decnet_ether_address, 6)) != 0)
- break;
-
- break;
-#endif
-
-#if 0
- case SIOCSNETADDR:
- if (!capable(CAP_NET_ADMIN)) {
- err = -EPERM;
- break;
- }
-
- if ((err = copy_from_user(&dnaddr, (void *)arg, sizeof(struct dn_naddr))) != 0)
- break;
-
- if (dnaddr.a_len != ETH_ALEN) {
- err = -EINVAL;
- break;
- }
-
- dn_dev_devices_off();
-
- memcpy(decnet_ether_address, dnaddr.a_addr, ETH_ALEN);
- decnet_address = dn_htons(dn_eth2dn(decnet_ether_address));
-
- dn_dev_devices_on();
- break;
-
- case SIOCGNETADDR:
- dnaddr.a_len = ETH_ALEN;
- memcpy(dnaddr.a_addr, decnet_ether_address, ETH_ALEN);
-
- if ((err = copy_to_user((void *)arg, &dnaddr, sizeof(struct dn_naddr))) != 0)
- break;
-
- break;
-#endif
case OSIOCSNETADDR:
if (!capable(CAP_NET_ADMIN)) {
err = -EPERM;
@@ -1237,7 +1187,7 @@ static int dn_listen(struct socket *sock, int backlog)
if (sk->zapped)
goto out;
- if ((sk->protinfo.dn.state != DN_O) || (sk->state == TCP_LISTEN))
+ if ((DN_SK(sk)->state != DN_O) || (sk->state == TCP_LISTEN))
goto out;
sk->max_ack_backlog = backlog;
@@ -1255,7 +1205,7 @@ out:
static int dn_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
int err = -ENOTCONN;
lock_sock(sk);
@@ -1300,14 +1250,27 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char *optv
static int __dn_setsockopt(struct socket *sock, int level,int optname, char *optval, int optlen, int flags)
{
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
- struct optdata_dn opt;
- struct accessdata_dn acc;
+ struct dn_scp *scp = DN_SK(sk);
+ union {
+ struct optdata_dn opt;
+ struct accessdata_dn acc;
+ int mode;
+ unsigned long win;
+ int val;
+ unsigned char services;
+ unsigned char info;
+ } u;
int err;
if (optlen && !optval)
return -EINVAL;
+ if (optlen > sizeof(u))
+ return -EINVAL;
+
+ if (copy_from_user(&u, optval, optlen))
+ return -EFAULT;
+
switch(optname) {
case DSO_CONDATA:
if (sock->state == SS_CONNECTED)
@@ -1318,29 +1281,23 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char *opt
if (optlen != sizeof(struct optdata_dn))
return -EINVAL;
- if (copy_from_user(&opt, optval, optlen))
- return -EFAULT;
-
- if (opt.opt_optl > 16)
+ if (u.opt.opt_optl > 16)
return -EINVAL;
- memcpy(&scp->conndata_out, &opt, sizeof(struct optdata_dn));
+ memcpy(&scp->conndata_out, &u.opt, optlen);
break;
case DSO_DISDATA:
- if (sock->state != SS_CONNECTED && sk->protinfo.dn.accept_mode == ACC_IMMED)
+ if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
return -ENOTCONN;
if (optlen != sizeof(struct optdata_dn))
return -EINVAL;
- if (copy_from_user(&opt, optval, sizeof(struct optdata_dn)))
- return -EFAULT;
-
- if (opt.opt_optl > 16)
+ if (u.opt.opt_optl > 16)
return -EINVAL;
- memcpy(&scp->discdata_out, &opt, sizeof(struct optdata_dn));
+ memcpy(&scp->discdata_out, &u.opt, optlen);
break;
case DSO_CONACCESS:
@@ -1352,15 +1309,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char *opt
if (optlen != sizeof(struct accessdata_dn))
return -EINVAL;
- if (copy_from_user(&acc, optval, sizeof(struct accessdata_dn)))
- return -EFAULT;
-
- if ((acc.acc_accl > DN_MAXACCL) ||
- (acc.acc_passl > DN_MAXACCL) ||
- (acc.acc_userl > DN_MAXACCL))
+ if ((u.acc.acc_accl > DN_MAXACCL) ||
+ (u.acc.acc_passl > DN_MAXACCL) ||
+ (u.acc.acc_userl > DN_MAXACCL))
return -EINVAL;
- memcpy(&scp->accessdata, &acc, sizeof(struct accessdata_dn));
+ memcpy(&scp->accessdata, &u.acc, optlen);
break;
case DSO_ACCEPTMODE:
@@ -1372,16 +1326,10 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char *opt
if (optlen != sizeof(int))
return -EINVAL;
- {
- int mode;
-
- if (get_user(mode, optval))
- return -EFAULT;
- if ((mode != ACC_IMMED) && (mode != ACC_DEFER))
- return -EINVAL;
+ if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
+ return -EINVAL;
- scp->accept_mode = (unsigned char)mode;
- }
+ scp->accept_mode = (unsigned char)u.mode;
break;
case DSO_CONACCEPT:
@@ -1411,8 +1359,55 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char *opt
case DSO_LINKINFO:
case DSO_STREAM:
case DSO_SEQPACKET:
-
return -ENOPROTOOPT;
+
+ case DSO_MAXWINDOW:
+ if (optlen != sizeof(unsigned long))
+ return -EINVAL;
+ if (u.win > NSP_MAX_WINDOW)
+ u.win = NSP_MAX_WINDOW;
+ if (u.win == 0)
+ return -EINVAL;
+ scp->max_window = u.win;
+ if (scp->snd_window > u.win)
+ scp->snd_window = u.win;
+ break;
+
+ case DSO_NODELAY:
+ if (optlen != sizeof(int))
+ return -EINVAL;
+ if (scp->nonagle == 2)
+ return -EINVAL;
+ scp->nonagle = (u.val == 0) ? 0 : 1;
+ /* if (scp->nonagle == 1) { Push pending frames } */
+ break;
+
+ case DSO_CORK:
+ if (optlen != sizeof(int))
+ return -EINVAL;
+ if (scp->nonagle == 1)
+ return -EINVAL;
+ scp->nonagle = (u.val == 0) ? 0 : 2;
+ /* if (scp->nonagle == 0) { Push pending frames } */
+ break;
+
+ case DSO_SERVICES:
+ if (optlen != sizeof(unsigned char))
+ return -EINVAL;
+ if ((u.services & ~NSP_FC_MASK) != 0x01)
+ return -EINVAL;
+ if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
+ return -EINVAL;
+ scp->services_loc = u.services;
+ break;
+
+ case DSO_INFO:
+ if (optlen != sizeof(unsigned char))
+ return -EINVAL;
+ if (u.info & 0xfc)
+ return -EINVAL;
+ scp->info_loc = u.info;
+ break;
}
return 0;
@@ -1433,44 +1428,40 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char *optv
static int __dn_getsockopt(struct socket *sock, int level,int optname, char *optval,int *optlen, int flags)
{
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
struct linkinfo_dn link;
- int mode = scp->accept_mode;
+ int r_len = *optlen;
+ void *r_data = NULL;
+ int val;
switch(optname) {
case DSO_CONDATA:
- if (*optlen != sizeof(struct optdata_dn))
- return -EINVAL;
-
- if (copy_to_user(optval, &scp->conndata_in, sizeof(struct optdata_dn)))
- return -EFAULT;
+ if (r_len > sizeof(struct optdata_dn))
+ r_len = sizeof(struct optdata_dn);
+ r_data = &scp->conndata_in;
break;
case DSO_DISDATA:
- if (*optlen != sizeof(struct optdata_dn))
- return -EINVAL;
-
- if (copy_to_user(optval, &scp->discdata_in, sizeof(struct optdata_dn)))
- return -EFAULT;
-
+ if (r_len > sizeof(struct optdata_dn))
+ r_len = sizeof(struct optdata_dn);
+ r_data = &scp->discdata_in;
break;
case DSO_CONACCESS:
- if (*optlen != sizeof(struct accessdata_dn))
- return -EINVAL;
-
- if (copy_to_user(optval, &scp->accessdata, sizeof(struct accessdata_dn)))
- return -EFAULT;
+ if (r_len > sizeof(struct accessdata_dn))
+ r_len = sizeof(struct accessdata_dn);
+ r_data = &scp->accessdata;
break;
case DSO_ACCEPTMODE:
- if (put_user(mode, optval))
- return -EFAULT;
+ if (r_len > sizeof(unsigned char))
+ r_len = sizeof(unsigned char);
+ r_data = &scp->accept_mode;
break;
case DSO_LINKINFO:
- if (*optlen != sizeof(struct linkinfo_dn))
- return -EINVAL;
+ if (r_len > sizeof(struct linkinfo_dn))
+ r_len = sizeof(struct linkinfo_dn);
switch(sock->state) {
case SS_CONNECTING:
@@ -1486,10 +1477,8 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char *opt
link.idn_linkstate = LL_INACTIVE;
}
- link.idn_segsize = scp->mss;
-
- if (copy_to_user(optval, &link, sizeof(struct linkinfo_dn)))
- return -EFAULT;
+ link.idn_segsize = scp->segsize_rem;
+ r_data = &link;
break;
default:
@@ -1508,6 +1497,45 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char *opt
case DSO_CONACCEPT:
case DSO_CONREJECT:
return -ENOPROTOOPT;
+
+ case DSO_MAXWINDOW:
+ if (r_len > sizeof(unsigned long))
+ r_len = sizeof(unsigned long);
+ r_data = &scp->max_window;
+ break;
+
+ case DSO_NODELAY:
+ if (r_len > sizeof(int))
+ r_len = sizeof(int);
+ val = (scp->nonagle == 1);
+ r_data = &val;
+ break;
+
+ case DSO_CORK:
+ if (r_len > sizeof(int))
+ r_len = sizeof(int);
+ val = (scp->nonagle == 2);
+ r_data = &val;
+ break;
+
+ case DSO_SERVICES:
+ if (r_len > sizeof(unsigned char))
+ r_len = sizeof(unsigned char);
+ r_data = &scp->services_rem;
+ break;
+
+ case DSO_INFO:
+ if (r_len > sizeof(unsigned char))
+ r_len = sizeof(unsigned char);
+ r_data = &scp->info_rem;
+ break;
+ }
+
+ if (r_data) {
+ if (copy_to_user(optval, r_data, r_len))
+ return -EFAULT;
+ if (put_user(r_len, optlen))
+ return -EFAULT;
}
return 0;
@@ -1520,7 +1548,7 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char *opt
*/
static int dn_wait_run(struct sock *sk, int flags)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
int err = 0;
switch(scp->state) {
@@ -1572,7 +1600,7 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
return skb_queue_len(q) ? 1 : 0;
while(skb != (struct sk_buff *)q) {
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
len += skb->len;
if (cb->nsp_flags & 0x40) {
@@ -1599,7 +1627,7 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, int size,
int flags, struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
struct sk_buff_head *queue = &sk->receive_queue;
int target = size > 1 ? 1 : 0;
int copied = 0;
@@ -1681,7 +1709,7 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, int size,
for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) {
int chunk = skb->len;
- cb = (struct dn_skb_cb *)skb->cb;
+ cb = DN_SKB_CB(skb);
if ((chunk + copied) > size)
chunk = size - copied;
@@ -1693,7 +1721,7 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, int size,
copied += chunk;
if (!(flags & MSG_PEEK))
- skb->len -= chunk;
+ skb_pull(skb, chunk);
eor = cb->nsp_flags & 0x40;
nskb = skb->next;
@@ -1707,7 +1735,7 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, int size,
*/
if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
scp->flowloc_sw = DN_SEND;
- dn_nsp_send_lnk(sk, DN_SEND);
+ dn_nsp_send_link(sk, DN_SEND, 0);
}
}
@@ -1727,6 +1755,7 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, int size,
rv = copied;
+
if (eor && (sk->type == SOCK_SEQPACKET))
msg->msg_flags |= MSG_EOR;
@@ -1745,16 +1774,31 @@ out:
}
+static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
+{
+ unsigned char fctype = scp->services_rem & NSP_FC_MASK;
+ if (skb_queue_len(queue) >= scp->snd_window)
+ return 1;
+ if (fctype != NSP_FC_NONE) {
+ if (flags & MSG_OOB) {
+ if (scp->flowrem_oth == 0)
+ return 1;
+ } else {
+ if (scp->flowrem_dat == 0)
+ return 1;
+ }
+ }
+ return 0;
+}
+
static int dn_sendmsg(struct socket *sock, struct msghdr *msg, int size,
struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
- struct dn_scp *scp = &sk->protinfo.dn;
- int mss = scp->mss;
- int mtu = 230 - 11; /* maximum value thats always safe */
+ struct dn_scp *scp = DN_SK(sk);
+ int mss;
struct sk_buff_head *queue = &scp->data_xmit_queue;
int flags = msg->msg_flags;
- unsigned short numseg = 0;
int err = 0;
int sent = 0;
int addr_len = msg->msg_namelen;
@@ -1765,6 +1809,7 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, int size,
unsigned char *ptr;
unsigned short ack;
int len;
+ unsigned char fctype;
if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR))
return -EOPNOTSUPP;
@@ -1801,16 +1846,19 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, int size,
if ((flags & MSG_TRYHARD) && sk->dst_cache)
dst_negative_advice(&sk->dst_cache);
+ mss = scp->segsize_rem;
+ fctype = scp->services_rem & NSP_FC_MASK;
+
if (sk->dst_cache && sk->dst_cache->neighbour) {
struct dn_neigh *dn = (struct dn_neigh *)sk->dst_cache->neighbour;
- if (dn->blksize > 230)
- mtu = dn->blksize - 11;
+ if (dn->blksize < (mss + 11))
+ mss = dn->blksize - 11;
}
/*
* The only difference between SEQPACKET & STREAM sockets under DECnet
- * AFAIK is that SEQPACKET sockets set the MSG_EOR flag for the last
- * session control message segment.
+ * is that SEQPACKET sockets set the MSG_EOR flag for the last
+ * session control message segment.
*/
if (flags & MSG_OOB) {
@@ -1822,9 +1870,6 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, int size,
}
}
- if (mss < mtu)
- mtu = mss;
-
scp->persist_fxn = dn_nsp_xmit_timeout;
while(sent < size) {
@@ -1842,14 +1887,14 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, int size,
*/
len = size - sent;
- if (len > mtu)
- len = mtu;
+ if (len > mss)
+ len = mss;
/*
* Wait for queue size to go down below the window
* size.
*/
- if (skb_queue_len(queue) >= scp->snd_window) {
+ if (dn_queue_too_long(scp, queue, flags)) {
if (flags & MSG_DONTWAIT) {
err = -EWOULDBLOCK;
goto out;
@@ -1857,7 +1902,7 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, int size,
SOCK_SLEEP_PRE(sk)
- if (skb_queue_len(queue) >= scp->snd_window)
+ if (dn_queue_too_long(scp, queue, flags))
schedule();
SOCK_SLEEP_POST(sk)
@@ -1876,7 +1921,7 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, int size,
if (!skb)
continue;
- cb = (struct dn_skb_cb *)skb->cb;
+ cb = DN_SKB_CB(skb);
ptr = skb_put(skb, 9);
@@ -1886,26 +1931,34 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, int size,
}
if (flags & MSG_OOB) {
- cb->segnum = scp->numoth++;
- scp->numoth &= 0x0fff;
+ cb->segnum = scp->numoth;
+ seq_add(&scp->numoth, 1);
msgflg = 0x30;
- ack = scp->ackxmt_oth | 0x8000;
+ ack = (scp->numoth_rcv & 0x0FFF) | 0x8000;
+ scp->ackxmt_oth = scp->numoth_rcv;
+ if (fctype != NSP_FC_NONE)
+ scp->flowrem_oth--;
} else {
- cb->segnum = scp->numdat++;
- scp->numdat &= 0x0fff;
+ cb->segnum = scp->numdat;
+ seq_add(&scp->numdat, 1);
msgflg = 0x00;
if (sock->type == SOCK_STREAM)
msgflg = 0x60;
- if (scp->seg_size == 0)
+ if (scp->seg_total == 0)
msgflg |= 0x20;
- scp->seg_size += len;
+ scp->seg_total += len;
if (((sent + len) == size) && (flags & MSG_EOR)) {
msgflg |= 0x40;
- scp->seg_size = 0;
+ scp->seg_total = 0;
+ if (fctype == NSP_FC_SCMC)
+ scp->flowrem_dat--;
}
- ack = scp->ackxmt_dat | 0x8000;
+ ack = (scp->numdat_rcv & 0x0FFF) | 0x8000;
+ scp->ackxmt_dat = scp->numdat_rcv;
+ if (fctype == NSP_FC_SRC)
+ scp->flowrem_dat--;
}
*ptr++ = msgflg;
@@ -1918,8 +1971,7 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, int size,
*(__u16 *)ptr = dn_htons(cb->segnum);
sent += len;
- dn_nsp_queue_xmit(sk, skb, flags & MSG_OOB);
- numseg++;
+ dn_nsp_queue_xmit(sk, skb, sk->allocation, flags & MSG_OOB);
skb = NULL;
scp->persist = dn_nsp_persist(sk);
@@ -1955,21 +2007,38 @@ static int dn_device_event(struct notifier_block *this, unsigned long event,
}
static struct notifier_block dn_dev_notifier = {
- dn_device_event,
- 0
+ notifier_call: dn_device_event,
};
extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *);
-static struct packet_type dn_dix_packet_type =
-{
- __constant_htons(ETH_P_DNA_RT),
- NULL, /* All devices */
- dn_route_rcv,
- (void*)1,
- NULL,
+static struct packet_type dn_dix_packet_type = {
+ type: __constant_htons(ETH_P_DNA_RT),
+ dev: NULL, /* All devices */
+ func: dn_route_rcv,
+ data: (void*)1,
};
+#define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
+
+static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
+{
+ int i;
+
+ switch (dn_ntohs(dn->sdn_objnamel)) {
+ case 0:
+ sprintf(buf, "%d", dn->sdn_objnum);
+ break;
+ default:
+ for (i = 0; i < dn_ntohs(dn->sdn_objnamel); i++) {
+ buf[i] = dn->sdn_objname[i];
+ if (IS_NOT_PRINTABLE(buf[i]))
+ buf[i] = '.';
+ }
+ buf[i] = 0;
+ }
+}
+
static int dn_get_info(char *buffer, char **start, off_t offset, int length)
{
struct sock *sk;
@@ -1979,15 +2048,20 @@ static int dn_get_info(char *buffer, char **start, off_t offset, int length)
off_t begin = 0;
char buf1[DN_ASCBUF_LEN];
char buf2[DN_ASCBUF_LEN];
+ char local_object[DN_MAXOBJL+3];
+ char remote_object[DN_MAXOBJL+3];
- len += sprintf(buffer + len, "Local Remote\n");
+ len += sprintf(buffer + len, "Local Remote\n");
read_lock(&dn_hash_lock);
for(sk = dn_sklist; sk != NULL; sk = sk->next) {
- scp = &sk->protinfo.dn;
+ scp = DN_SK(sk);
+
+ dn_printable_object(&scp->addr, local_object);
+ dn_printable_object(&scp->peer, remote_object);
len += sprintf(buffer + len,
- "%6s/%04X %04d:%04d %04d:%04d %01d %6s/%04X %04d:%04d %04d:%04d %01d %4s %s\n",
+ "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->addr)), buf1),
scp->addrloc,
scp->numdat,
@@ -1995,6 +2069,7 @@ static int dn_get_info(char *buffer, char **start, off_t offset, int length)
scp->ackxmt_dat,
scp->ackxmt_oth,
scp->flowloc_sw,
+ local_object,
dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->peer)), buf2),
scp->addrrem,
scp->numdat_rcv,
@@ -2002,6 +2077,7 @@ static int dn_get_info(char *buffer, char **start, off_t offset, int length)
scp->ackrcv_dat,
scp->ackrcv_oth,
scp->flowrem_sw,
+ remote_object,
dn_state2asc(scp->state),
((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
@@ -2026,8 +2102,8 @@ static int dn_get_info(char *buffer, char **start, off_t offset, int length)
static struct net_proto_family dn_family_ops = {
- AF_DECnet,
- dn_create
+ family: AF_DECnet,
+ create: dn_create,
};
static struct proto_ops dn_proto_ops = {
@@ -2067,6 +2143,7 @@ MODULE_PARM(addr, "2i");
MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
#endif
+static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.4.0-test12s (C) 1995-2000 Linux DECnet Project Team\n";
static int __init decnet_init(void)
{
@@ -2085,7 +2162,7 @@ static int __init decnet_init(void)
dn_dn2eth(decnet_ether_address, dn_ntohs(decnet_address));
#endif
- printk(KERN_INFO "NET4: DECnet for Linux: V.2.4.0-test10s (C) 1995-2000 Linux DECnet Project Team\n");
+ printk(banner);
sock_register(&dn_family_ops);
dev_add_pack(&dn_dix_packet_type);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index d8f91ac38..056eaa043 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -52,7 +52,7 @@ static unsigned char dn_eco_version[3] = {0x02,0x00,0x00};
extern struct neigh_table dn_neigh_table;
-struct net_device *decnet_default_device = NULL;
+struct net_device *decnet_default_device;
static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
static void dn_dev_delete(struct net_device *dev);
@@ -1286,9 +1286,7 @@ void __exit dn_dev_cleanup(void)
}
#endif /* CONFIG_SYSCTL */
-#ifdef CONFIG_PROC_FS
proc_net_remove("decnet_dev");
-#endif /* CONFIG_PROC_FS */
dn_dev_devices_off();
}
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index aff2dc05d..f82e26406 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -55,7 +55,7 @@ extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
#endif /* CONFIG_RTNETLINK */
-static struct dn_fib_info *dn_fib_info_list = NULL;
+static struct dn_fib_info *dn_fib_info_list;
static rwlock_t dn_fib_info_lock = RW_LOCK_UNLOCKED;
int dn_fib_info_cnt;
@@ -641,15 +641,11 @@ static int decnet_rt_get_info(char *buffer, char **start, off_t offset, int leng
return 0;
}
-
#endif /* CONFIG_PROC_FS */
-
void __exit dn_fib_cleanup(void)
{
-#ifdef CONFIG_PROC_FS
proc_net_remove("decnet_route");
-#endif
dn_fib_table_cleanup();
dn_fib_rules_cleanup();
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 00c27cdec..f25f37275 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -18,6 +18,8 @@
* forwarding now stands a good chance of
* working.
* Steve Whitehouse : Fixed neighbour states (for now anyway).
+ * Steve Whitehouse : Made error_report functions dummies. This
+ * is not the right place to return skbs.
*
*/
@@ -52,81 +54,66 @@ static int dn_phase3_output(struct sk_buff *);
* For talking to broadcast devices: Ethernet & PPP
*/
static struct neigh_ops dn_long_ops = {
- AF_DECnet,
- NULL,
- NULL,
- dn_long_error_report,
- dn_long_output,
- dn_long_output,
- dev_queue_xmit,
- dev_queue_xmit
+ family: AF_DECnet,
+ error_report: dn_long_error_report,
+ output: dn_long_output,
+ connected_output: dn_long_output,
+ hh_output: dev_queue_xmit,
+ queue_xmit: dev_queue_xmit,
};
/*
* For talking to pointopoint and multidrop devices: DDCMP and X.25
*/
static struct neigh_ops dn_short_ops = {
- AF_DECnet,
- NULL,
- NULL,
- dn_short_error_report,
- dn_short_output,
- dn_short_output,
- dev_queue_xmit,
- dev_queue_xmit
+ family: AF_DECnet,
+ error_report: dn_short_error_report,
+ output: dn_short_output,
+ connected_output: dn_short_output,
+ hh_output: dev_queue_xmit,
+ queue_xmit: dev_queue_xmit,
};
/*
* For talking to DECnet phase III nodes
*/
static struct neigh_ops dn_phase3_ops = {
- AF_DECnet,
- NULL,
- NULL,
- dn_short_error_report, /* Can use short version here */
- dn_phase3_output,
- dn_phase3_output,
- dev_queue_xmit,
- dev_queue_xmit
+ family: AF_DECnet,
+ error_report: dn_short_error_report, /* Can use short version here */
+ output: dn_phase3_output,
+ connected_output: dn_phase3_output,
+ hh_output: dev_queue_xmit,
+ queue_xmit: dev_queue_xmit
};
struct neigh_table dn_neigh_table = {
- NULL,
- PF_DECnet,
- sizeof(struct dn_neigh),
- sizeof(dn_address),
- dn_neigh_hash,
- dn_neigh_construct,
- NULL, /* pconstructor */
- NULL, /* pdestructor */
- NULL, /* proxyredo */
- "dn_neigh_cache",
- {
- NULL,
- NULL,
- &dn_neigh_table,
- 0,
- NULL,
- NULL,
- 30 * HZ, /* base_reachable_time */
- 1 * HZ, /* retrans_time */
- 60 * HZ, /* gc_staletime */
- 30 * HZ, /* reachable_time */
- 5 * HZ, /* delay_probe_time */
- 3, /* queue_len */
- 0, /* ucast_probes */
- 0, /* app_probes */
- 0, /* mcast_probes */
- 0, /* anycast_delay */
- 0, /* proxy_delay */
- 0, /* proxy_qlen */
- 1 * HZ, /* locktime */
+ family: PF_DECnet,
+ entry_size: sizeof(struct dn_neigh),
+ key_len: sizeof(dn_address),
+ hash: dn_neigh_hash,
+ constructor: dn_neigh_construct,
+ id: "dn_neigh_cache",
+ parms: {
+ tbl: &dn_neigh_table,
+ entries: 0,
+ base_reachable_time: 30 * HZ,
+ retrans_time: 1 * HZ,
+ gc_staletime: 60 * HZ,
+ reachable_time: 30 * HZ,
+ delay_probe_time: 5 * HZ,
+ queue_len: 3,
+ ucast_probes: 0,
+ app_probes: 0,
+ mcast_probes: 0,
+ anycast_delay: 0,
+ proxy_delay: 0,
+ proxy_qlen: 0,
+ locktime: 1 * HZ,
},
- 30 * HZ, /* gc_interval */
- 128, /* gc_thresh1 */
- 512, /* gc_thresh2 */
- 1024, /* gc_thresh3 */
-
+ gc_interval: 30 * HZ,
+ gc_thresh1: 128,
+ gc_thresh2: 512,
+ gc_thresh3: 1024,
};
static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev)
@@ -180,66 +167,15 @@ static int dn_neigh_construct(struct neighbour *neigh)
static void dn_long_error_report(struct neighbour *neigh, struct sk_buff *skb)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
- unsigned char *ptr;
-
printk(KERN_DEBUG "dn_long_error_report: called\n");
-
- if (!(cb->rt_flags & DN_RT_F_RQR)) {
- kfree_skb(skb);
- return;
- }
-
- skb_push(skb, skb->data - skb->nh.raw);
- ptr = skb->data;
-
- *(unsigned short *)ptr = dn_htons(skb->len - 2);
- ptr += 2;
-
- if (*ptr & DN_RT_F_PF) {
- char padlen = (*ptr & ~DN_RT_F_PF);
- ptr += padlen;
- }
-
- *ptr++ |= (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
-
- ptr += 2;
- dn_dn2eth(ptr, dn_ntohs(cb->src));
- ptr += 8;
- dn_dn2eth(ptr, dn_ntohs(cb->dst));
- ptr += 6;
- *ptr = 0;
-
- skb->dst->neighbour->ops->queue_xmit(skb);
+ kfree_skb(skb);
}
static void dn_short_error_report(struct neighbour *neigh, struct sk_buff *skb)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
- unsigned char *ptr;
-
printk(KERN_DEBUG "dn_short_error_report: called\n");
-
- if (!(cb->rt_flags & DN_RT_F_RQR)) {
- kfree_skb(skb);
- return;
- }
-
- skb_push(skb, skb->data - skb->nh.raw);
- ptr = skb->data;
-
- *(unsigned short *)ptr = dn_htons(skb->len - 2);
- ptr += 2;
- *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
-
- *(dn_address *)ptr = cb->src;
- ptr += 2;
- *(dn_address *)ptr = cb->dst;
- ptr += 2;
- *ptr = 0;
-
- skb->dst->neighbour->ops->queue_xmit(skb);
+ kfree_skb(skb);
}
static int dn_neigh_output_packet(struct sk_buff *skb)
@@ -266,7 +202,7 @@ static int dn_long_output(struct sk_buff *skb)
int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
unsigned char *data;
struct dn_long_packet *lp;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
if (skb_headroom(skb) < headroom) {
@@ -312,7 +248,7 @@ static int dn_short_output(struct sk_buff *skb)
int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
struct dn_short_packet *sp;
unsigned char *data;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
if (skb_headroom(skb) < headroom) {
@@ -355,7 +291,7 @@ static int dn_phase3_output(struct sk_buff *skb)
int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
struct dn_short_packet *sp;
unsigned char *data;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
if (skb_headroom(skb) < headroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
@@ -659,8 +595,6 @@ void __init dn_neigh_init(void)
void __exit dn_neigh_cleanup(void)
{
-#ifdef CONFIG_PROC_FS
proc_net_remove("decnet_neigh");
-#endif /* CONFIG_PROC_FS */
neigh_table_clear(&dn_neigh_table);
}
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 361729458..11a4e8237 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -25,6 +25,9 @@
* Steve Whitehouse:
* Patrick Caulfield: Checking conninits for correctness & sending of error
* responses.
+ * Steve Whitehouse: Added backlog congestion level return codes.
+ * Patrick Caulfield:
+ * Steve Whitehouse: Added flow control support (outbound)
*/
/******************************************************************************
@@ -79,7 +82,7 @@ static void dn_log_martian(struct sk_buff *skb, const char *msg)
{
if (decnet_log_martians && net_ratelimit()) {
char *devname = skb->dev ? skb->dev->name : "???";
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n", msg, devname, cb->src, cb->dst, cb->src_port, cb->dst_port);
}
}
@@ -91,7 +94,7 @@ static void dn_log_martian(struct sk_buff *skb, const char *msg)
*/
static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
unsigned short type = ((ack >> 12) & 0x0003);
int wakeup = 0;
@@ -212,7 +215,7 @@ static struct {
*/
static struct sock *dn_find_listener(struct sk_buff *skb, unsigned short *reason)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct nsp_conn_init_msg *msg = (struct nsp_conn_init_msg *)skb->data;
struct sockaddr_dn dstaddr;
struct sockaddr_dn srcaddr;
@@ -331,33 +334,26 @@ static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
+ struct dn_scp *scp = DN_SK(sk);
+ unsigned char *ptr;
- if (skb->len < 3)
+ if (skb->len < 4)
goto out;
- cb->services = *skb->data;
- cb->info = *(skb->data+1);
- skb_pull(skb, 2);
- cb->segsize = dn_ntohs(*(__u16 *)skb->data);
- skb_pull(skb, 2);
-
- /*
- * FIXME: Check out services and info fields to check that
- * we can talk to this kind of node.
- */
+ ptr = skb->data;
+ cb->services = *ptr++;
+ cb->info = *ptr++;
+ cb->segsize = dn_ntohs(*(__u16 *)ptr);
if ((scp->state == DN_CI) || (scp->state == DN_CD)) {
scp->persist = 0;
scp->addrrem = cb->src_port;
sk->state = TCP_ESTABLISHED;
scp->state = DN_RUN;
-
- if (scp->mss > cb->segsize)
- scp->mss = cb->segsize;
- if (scp->mss < 230)
- scp->mss = 230;
+ scp->services_rem = cb->services;
+ scp->info_rem = cb->info;
+ scp->segsize_rem = cb->segsize;
if (skb->len > 0) {
unsigned char dlen = *skb->data;
@@ -366,7 +362,7 @@ static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
memcpy(scp->conndata_in.opt_data, skb->data + 1, dlen);
}
}
- dn_nsp_send_lnk(sk, DN_NOCHANGE);
+ dn_nsp_send_link(sk, DN_NOCHANGE, 0);
if (!sk->dead)
sk->state_change(sk);
}
@@ -377,7 +373,7 @@ out:
static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
if (scp->state == DN_CI) {
scp->state = DN_CD;
@@ -389,8 +385,8 @@ static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb)
static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
{
- struct dn_scp *scp = &sk->protinfo.dn;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_scp *scp = DN_SK(sk);
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
unsigned short reason;
if (skb->len < 2)
@@ -448,7 +444,7 @@ out:
*/
static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
unsigned short reason;
if (skb->len != 2)
@@ -492,38 +488,65 @@ out:
static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_scp *scp = DN_SK(sk);
unsigned short segnum;
unsigned char lsflags;
char fcval;
+ int wake_up = 0;
+ char *ptr = skb->data;
+ unsigned char fctype = scp->services_rem & NSP_FC_MASK;
if (skb->len != 4)
goto out;
- cb->segnum = segnum = dn_ntohs(*(__u16 *)skb->data);
- skb_pull(skb, 2);
- lsflags = *(unsigned char *)skb->data;
- skb_pull(skb, 1);
- fcval = *(char *)skb->data;
+ segnum = dn_ntohs(*(__u16 *)ptr);
+ ptr += 2;
+ lsflags = *(unsigned char *)ptr++;
+ fcval = *ptr;
- if (lsflags & 0xf0)
+ /*
+ * Here we ignore erronous packets which should really
+ * should cause a connection abort. It is not critical
+ * for now though.
+ */
+ if (lsflags & 0xf8)
goto out;
- if (((sk->protinfo.dn.numoth_rcv + 1) & 0x0FFF) == (segnum & 0x0FFF)) {
- sk->protinfo.dn.numoth_rcv += 1;
- switch(lsflags & 0x03) {
- case 0x00:
- break;
- case 0x01:
- sk->protinfo.dn.flowrem_sw = DN_DONTSEND;
- break;
- case 0x02:
- sk->protinfo.dn.flowrem_sw = DN_SEND;
+ if (seq_next(scp->numoth_rcv, segnum)) {
+ seq_add(&scp->numoth_rcv, 1);
+ switch(lsflags & 0x04) { /* FCVAL INT */
+ case 0x00: /* Normal Request */
+ switch(lsflags & 0x03) { /* FCVAL MOD */
+ case 0x00: /* Request count */
+ if (fcval < 0) {
+ unsigned char p_fcval = -fcval;
+ if ((scp->flowrem_dat > p_fcval) &&
+ (fctype == NSP_FC_SCMC)) {
+ scp->flowrem_dat -= p_fcval;
+ }
+ } else if (fcval > 0) {
+ scp->flowrem_dat += fcval;
+ wake_up = 1;
+ }
+ break;
+ case 0x01: /* Stop outgoing data */
+ scp->flowrem_sw = DN_DONTSEND;
+ break;
+ case 0x02: /* Ok to start again */
+ scp->flowrem_sw = DN_SEND;
dn_nsp_output(sk);
- if (!sk->dead)
- sk->state_change(sk);
+ wake_up = 1;
+ }
+ break;
+ case 0x04: /* Interrupt Request */
+ if (fcval > 0) {
+ scp->flowrem_oth += fcval;
+ wake_up = 1;
+ }
+ break;
}
-
+ if (wake_up && !sk->dead)
+ sk->state_change(sk);
}
dn_nsp_send_oth_ack(sk);
@@ -582,9 +605,9 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
unsigned short segnum;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
int queued = 0;
if (skb->len < 2)
@@ -593,10 +616,10 @@ static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb)
cb->segnum = segnum = dn_ntohs(*(__u16 *)skb->data);
skb_pull(skb, 2);
- if (((sk->protinfo.dn.numoth_rcv + 1) & 0x0fff) == (segnum & 0x0fff)) {
+ if (seq_next(scp->numoth_rcv, segnum)) {
if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) {
- sk->protinfo.dn.numoth_rcv++;
+ seq_add(&scp->numoth_rcv, 1);
scp->other_report = 0;
queued = 1;
}
@@ -612,8 +635,8 @@ static void dn_nsp_data(struct sock *sk, struct sk_buff *skb)
{
int queued = 0;
unsigned short segnum;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
+ struct dn_scp *scp = DN_SK(sk);
if (skb->len < 2)
goto out;
@@ -621,17 +644,15 @@ static void dn_nsp_data(struct sock *sk, struct sk_buff *skb)
cb->segnum = segnum = dn_ntohs(*(__u16 *)skb->data);
skb_pull(skb, 2);
- if (((sk->protinfo.dn.numdat_rcv + 1) & 0x0FFF) ==
- (segnum & 0x0FFF)) {
-
+ if (seq_next(scp->numdat_rcv, segnum)) {
if (dn_queue_skb(sk, skb, SIGIO, &sk->receive_queue) == 0) {
- sk->protinfo.dn.numdat_rcv++;
+ seq_add(&scp->numdat_rcv, 1);
queued = 1;
}
if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) {
scp->flowloc_sw = DN_DONTSEND;
- dn_nsp_send_lnk(sk, DN_DONTSEND);
+ dn_nsp_send_link(sk, DN_DONTSEND, 0);
}
}
@@ -648,7 +669,7 @@ out:
*/
static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
if (scp->state == DN_CI) {
scp->state = DN_NC;
@@ -660,28 +681,37 @@ static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
-static void dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason)
+static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
+ int ret = NET_RX_DROP;
+
+ /* Must not reply to returned packets */
+ if (cb->rt_flags & DN_RT_F_RTS)
+ goto out;
if ((reason != NSP_REASON_OK) && ((cb->nsp_flags & 0x0c) == 0x08)) {
switch(cb->nsp_flags & 0x70) {
case 0x10:
case 0x60: /* (Retransmitted) Connect Init */
dn_nsp_return_disc(skb, NSP_DISCINIT, reason);
+ ret = NET_RX_SUCCESS;
break;
case 0x20: /* Connect Confirm */
dn_nsp_return_disc(skb, NSP_DISCCONF, reason);
+ ret = NET_RX_SUCCESS;
break;
}
}
+out:
kfree_skb(skb);
+ return ret;
}
static int dn_nsp_rx_packet(struct sk_buff *skb)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct sock *sk = NULL;
unsigned char *ptr = (unsigned char *)skb->data;
unsigned short reason = NSP_REASON_NL;
@@ -754,14 +784,19 @@ static int dn_nsp_rx_packet(struct sk_buff *skb)
sk = dn_find_by_skb(skb);
got_it:
if (sk != NULL) {
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
int ret;
/* Reset backoff */
scp->nsp_rxtshift = 0;
bh_lock_sock(sk);
- ret = 0;
+ ret = NET_RX_SUCCESS;
+ if (decnet_debug_level & 8)
+ printk(KERN_DEBUG "NSP: 0x%02x 0x%02x 0x%04x 0x%04x %d\n",
+ (int)cb->rt_flags, (int)cb->nsp_flags,
+ (int)cb->src_port, (int)cb->dst_port,
+ (int)sk->lock.users);
if (sk->lock.users == 0)
ret = dn_nsp_backlog_rcv(sk, skb);
else
@@ -772,12 +807,11 @@ got_it:
return ret;
}
- dn_nsp_no_socket(skb, reason);
- return 1;
+ return dn_nsp_no_socket(skb, reason);
free_out:
kfree_skb(skb);
- return 0;
+ return NET_RX_DROP;
}
int dn_nsp_rx(struct sk_buff *skb)
@@ -792,12 +826,12 @@ int dn_nsp_rx(struct sk_buff *skb)
*/
int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
- struct dn_scp *scp = &sk->protinfo.dn;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_scp *scp = DN_SK(sk);
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
if (cb->rt_flags & DN_RT_F_RTS) {
dn_returned_conn_init(sk, skb);
- return 0;
+ return NET_RX_SUCCESS;
}
/*
@@ -875,6 +909,6 @@ free_out:
}
}
- return 0;
+ return NET_RX_SUCCESS;
}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 6965cbf42..5e8482ce0 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -20,6 +20,7 @@
* Steve Whitehouse: New output state machine
* Paul Koning: Connect Confirm message fix.
* Eduardo Serrat: Fix to stop dn_nsp_do_disc() sending malformed packets.
+ * Steve Whitehouse: dn_nsp_output() and friends needed a spring clean
*/
/******************************************************************************
@@ -165,7 +166,7 @@ struct sk_buff *dn_alloc_send_skb(struct sock *sk, int *size, int noblock, int *
*/
unsigned long dn_nsp_persist(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
@@ -188,7 +189,7 @@ unsigned long dn_nsp_persist(struct sock *sk)
*/
static void dn_nsp_rtt(struct sock *sk, long rtt)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
long srtt = (long)scp->nsp_srtt;
long rttvar = (long)scp->nsp_rttvar;
long delta;
@@ -223,65 +224,64 @@ static void dn_nsp_rtt(struct sock *sk, long rtt)
/* printk(KERN_DEBUG "srtt=%lu rttvar=%lu\n", scp->nsp_srtt, scp->nsp_rttvar); */
}
-/*
- * Walk the queues, otherdata/linkservice first. Send as many
- * frames as the window allows, increment send counts on all
- * skbs which are sent. Reduce the window if we are retransmitting
- * frames.
+/**
+ * dn_nsp_clone_and_send - Send a data packet by cloning it
+ * @skb: The packet to clone and transmit
+ * @gfp: memory allocation flag
+ *
+ * Clone a queued data or other data packet and transmit it.
+ *
+ * Returns: The number of times the packet has been sent previously
*/
-void dn_nsp_output(struct sock *sk)
+static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, int gfp)
{
- struct dn_scp *scp = &sk->protinfo.dn;
- unsigned long win = scp->snd_window;
- struct sk_buff *skb, *skb2, *list;
- struct dn_skb_cb *cb;
- int reduce_win = 0;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
+ struct sk_buff *skb2;
+ int ret = 0;
- /* printk(KERN_DEBUG "dn_nsp_output: ping\n"); */
+ if ((skb2 = skb_clone(skb, gfp)) != NULL) {
+ ret = cb->xmit_count;
+ cb->xmit_count++;
+ cb->stamp = jiffies;
+ skb2->sk = skb->sk;
+ dn_nsp_send(skb2);
+ }
+
+ return ret;
+}
+
+/**
+ * dn_nsp_output - Try and send something from socket queues
+ * @sk: The socket whose queues are to be investigated
+ * @gfp: The memory allocation flags
+ *
+ * Try and send the packet on the end of the data and other data queues.
+ * Other data gets priority over data, and if we retransmit a packet we
+ * reduce the window by dividing it in two.
+ *
+ */
+void dn_nsp_output(struct sock *sk)
+{
+ struct dn_scp *scp = DN_SK(sk);
+ struct sk_buff *skb;
+ unsigned reduce_win = 0;
/*
* First we check for otherdata/linkservice messages
*/
- skb = scp->other_xmit_queue.next;
- list = (struct sk_buff *)&scp->other_xmit_queue;
- while(win && (skb != list)) {
- if ((skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
- cb = (struct dn_skb_cb *)skb;
- if (cb->xmit_count > 0)
- reduce_win = 1;
- else
- cb->stamp = jiffies;
- cb->xmit_count++;
- skb2->sk = sk;
- dn_nsp_send(skb2);
- }
- skb = skb->next;
- win--;
- }
+ if ((skb = skb_peek(&scp->other_xmit_queue)) != NULL)
+ reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
/*
* If we may not send any data, we don't.
- * Should this apply to otherdata as well ? - SJW
+ * If we are still trying to get some other data down the
+ * channel, we don't try and send any data.
*/
- if (scp->flowrem_sw != DN_SEND)
+ if (reduce_win || (scp->flowrem_sw != DN_SEND))
goto recalc_window;
- skb = scp->data_xmit_queue.next;
- list = (struct sk_buff *)&scp->data_xmit_queue;
- while(win && (skb != list)) {
- if ((skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
- cb = (struct dn_skb_cb *)skb;
- if (cb->xmit_count > 0)
- reduce_win = 1;
- else
- cb->stamp = jiffies;
- cb->xmit_count++;
- skb2->sk = sk;
- dn_nsp_send(skb2);
- }
- skb = skb->next;
- win--;
- }
+ if ((skb = skb_peek(&scp->data_xmit_queue)) != NULL)
+ reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
/*
* If we've sent any frame more than once, we cut the
@@ -290,7 +290,6 @@ void dn_nsp_output(struct sock *sk)
*/
recalc_window:
if (reduce_win) {
- /* printk(KERN_DEBUG "Window reduction %ld\n", scp->snd_window); */
scp->snd_window >>= 1;
if (scp->snd_window < NSP_MIN_WINDOW)
scp->snd_window = NSP_MIN_WINDOW;
@@ -299,7 +298,7 @@ recalc_window:
int dn_nsp_xmit_timeout(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
dn_nsp_output(sk);
@@ -309,14 +308,60 @@ int dn_nsp_xmit_timeout(struct sock *sk)
return 0;
}
-void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int oth)
+static inline unsigned char *dn_mk_common_header(struct dn_scp *scp, struct sk_buff *skb, unsigned char msgflag, int len)
+{
+ unsigned char *ptr = skb_push(skb, len);
+
+ if (len < 5)
+ BUG();
+
+ *ptr++ = msgflag;
+ *((unsigned short *)ptr) = scp->addrrem;
+ ptr += 2;
+ *((unsigned short *)ptr) = scp->addrloc;
+ ptr += 2;
+ return ptr;
+}
+
+static unsigned short *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other)
+{
+ struct dn_scp *scp = DN_SK(sk);
+ unsigned short acknum = scp->numdat_rcv & 0x0FFF;
+ unsigned short ackcrs = scp->numoth_rcv & 0x0FFF;
+ unsigned short *ptr;
+
+ if (hlen < 9)
+ BUG();
+
+ scp->ackxmt_dat = acknum;
+ scp->ackxmt_oth = ackcrs;
+ acknum |= 0x8000;
+ ackcrs |= 0x8000;
+
+ /* If this is an "other data/ack" message, swap acknum and ackcrs */
+ if (other) {
+ unsigned short tmp = acknum;
+ acknum = ackcrs;
+ ackcrs = tmp;
+ }
+
+ /* Set "cross subchannel" bit in ackcrs */
+ ackcrs |= 0x2000;
+
+ ptr = (unsigned short *)dn_mk_common_header(scp, skb, msgflag, hlen);
+
+ *ptr++ = dn_htons(acknum);
+ *ptr++ = dn_htons(ackcrs);
+
+ return ptr;
+}
+
+void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oth)
{
- struct dn_scp *scp = &sk->protinfo.dn;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_scp *scp = DN_SK(sk);
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
- struct sk_buff *skb2;
- if (t < HZ) t = HZ;
/*
* Slow start: If we have been idle for more than
* one RTT, then reset window to min size.
@@ -336,20 +381,17 @@ void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int oth)
if (scp->flowrem_sw != DN_SEND)
return;
- if ((skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
- cb->stamp = jiffies;
- cb->xmit_count++;
- skb2->sk = sk;
- dn_nsp_send(skb2);
- }
+ dn_nsp_clone_and_send(skb, gfp);
}
+
int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
+ struct dn_scp *scp = DN_SK(sk);
struct sk_buff *skb2, *list, *ack = NULL;
int wakeup = 0;
+ int try_retrans = 0;
unsigned long reftime = cb->stamp;
unsigned long pkttime;
unsigned short xmit_count;
@@ -358,7 +400,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
skb2 = q->next;
list = (struct sk_buff *)q;
while(list != skb2) {
- struct dn_skb_cb *cb2 = (struct dn_skb_cb *)skb2->cb;
+ struct dn_skb_cb *cb2 = DN_SKB_CB(skb2);
if (before_or_equal(cb2->segnum, acknum))
ack = skb2;
@@ -372,27 +414,50 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
/* printk(KERN_DEBUG "check_xmit_queue: %04x, %d\n", acknum, cb2->xmit_count); */
+ /* Does _last_ packet acked have xmit_count > 1 */
+ try_retrans = 0;
+ /* Remember to wake up the sending process */
wakeup = 1;
+ /* Keep various statistics */
pkttime = cb2->stamp;
xmit_count = cb2->xmit_count;
segnum = cb2->segnum;
+ /* Remove and drop ack'ed packet */
skb_unlink(ack);
kfree_skb(ack);
ack = NULL;
+
+ /*
+ * We don't expect to see acknowledgements for packets we
+ * haven't sent yet.
+ */
+ if (xmit_count == 0)
+ BUG();
+ /*
+ * If the packet has only been sent once, we can use it
+ * to calculate the RTT and also open the window a little
+ * further.
+ */
if (xmit_count == 1) {
if (equal(segnum, acknum))
dn_nsp_rtt(sk, (long)(pkttime - reftime));
- if (scp->snd_window < NSP_MAX_WINDOW)
+ if (scp->snd_window < scp->max_window)
scp->snd_window++;
}
+
+ /*
+ * Packet has been sent more than once. If this is the last
+ * packet to be acknowledged then we want to send the next
+ * packet in the send queue again (assumes the remote host does
+ * go-back-N error control).
+ */
+ if (xmit_count > 1)
+ try_retrans = 1;
}
-#if 0 /* Turned off due to possible interference in socket shutdown */
- if ((skb_queue_len(&scp->data_xmit_queue) == 0) &&
- (skb_queue_len(&scp->other_xmit_queue) == 0))
- scp->persist = 0;
-#endif
+ if (try_retrans)
+ dn_nsp_output(sk);
return wakeup;
}
@@ -400,47 +465,31 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
void dn_nsp_send_data_ack(struct sock *sk)
{
struct sk_buff *skb = NULL;
- struct nsp_data_ack_msg *msg;
- if ((skb = dn_alloc_skb(sk, 200, GFP_ATOMIC)) == NULL)
+ if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
return;
-
- msg = (struct nsp_data_ack_msg *)skb_put(skb,sizeof(*msg));
-
- msg->msgflg = 0x04; /* data ack message */
- msg->dstaddr = sk->protinfo.dn.addrrem;
- msg->srcaddr = sk->protinfo.dn.addrloc;
- msg->acknum = dn_htons((sk->protinfo.dn.numdat_rcv & 0x0FFF) | 0x8000);
-
- sk->protinfo.dn.ackxmt_dat = sk->protinfo.dn.numdat_rcv;
+ skb_reserve(skb, 9);
+ dn_mk_ack_header(sk, skb, 0x04, 9, 0);
dn_nsp_send(skb);
}
void dn_nsp_send_oth_ack(struct sock *sk)
{
struct sk_buff *skb = NULL;
- struct nsp_data_ack_msg *msg;
- if ((skb = dn_alloc_skb(sk, 200, GFP_ATOMIC)) == NULL)
+ if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
return;
-
- msg = (struct nsp_data_ack_msg *)skb_put(skb,sizeof(*msg));
-
- msg->msgflg = 0x14; /* oth ack message */
- msg->dstaddr = sk->protinfo.dn.addrrem;
- msg->srcaddr = sk->protinfo.dn.addrloc;
- msg->acknum = dn_htons((sk->protinfo.dn.numoth_rcv & 0x0FFF) | 0x8000);
-
- sk->protinfo.dn.ackxmt_oth = sk->protinfo.dn.numoth_rcv;
+ skb_reserve(skb, 9);
+ dn_mk_ack_header(sk, skb, 0x14, 9, 1);
dn_nsp_send(skb);
}
void dn_send_conn_ack (struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
struct sk_buff *skb = NULL;
struct nsp_conn_ack_msg *msg;
@@ -456,7 +505,7 @@ void dn_send_conn_ack (struct sock *sk)
void dn_nsp_delayed_ack(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
if (scp->ackxmt_oth != scp->numoth_rcv)
dn_nsp_send_oth_ack(sk);
@@ -467,7 +516,7 @@ void dn_nsp_delayed_ack(struct sock *sk)
static int dn_nsp_retrans_conn_conf(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
if (scp->state == DN_CC)
dn_send_conn_conf(sk, GFP_ATOMIC);
@@ -477,7 +526,7 @@ static int dn_nsp_retrans_conn_conf(struct sock *sk)
void dn_send_conn_conf(struct sock *sk, int gfp)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
struct sk_buff *skb = NULL;
struct nsp_conn_init_msg *msg;
unsigned char len = scp->conndata_out.opt_optl;
@@ -489,9 +538,9 @@ void dn_send_conn_conf(struct sock *sk, int gfp)
msg->msgflg = 0x28;
msg->dstaddr = scp->addrrem;
msg->srcaddr = scp->addrloc;
- msg->services = 0x01;
- msg->info = 0x03;
- msg->segsize = dn_htons(0x05B3);
+ msg->services = scp->services_loc;
+ msg->info = scp->info_loc;
+ msg->segsize = dn_htons(scp->segsize_loc);
*skb_put(skb,1) = len;
@@ -551,7 +600,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
unsigned short reason, int gfp)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
int ddl = 0;
if (msgflg == NSP_DISCINIT)
@@ -568,7 +617,7 @@ void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg,
unsigned short reason)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
int ddl = 0;
int gfp = GFP_ATOMIC;
@@ -577,38 +626,35 @@ void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg,
}
-void dn_nsp_send_lnk(struct sock *sk, unsigned short flgs)
+void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval)
{
- struct dn_scp *scp = &sk->protinfo.dn;
- struct sk_buff *skb = NULL;
- struct nsp_data_seg_msg *msg;
- struct nsp_data_opt_msg *msg1;
- struct dn_skb_cb *cb;
+ struct dn_scp *scp = DN_SK(sk);
+ struct sk_buff *skb;
+ unsigned short *segnum;
+ unsigned char *ptr;
+ int gfp = GFP_ATOMIC;
- if ((skb = dn_alloc_skb(sk, 80, GFP_ATOMIC)) == NULL)
+ if ((skb = dn_alloc_skb(sk, 13, gfp)) == NULL)
return;
- cb = (struct dn_skb_cb *)skb->cb;
- msg = (struct nsp_data_seg_msg *)skb_put(skb, sizeof(*msg));
- msg->msgflg = 0x10; /* Link svc message */
- msg->dstaddr = scp->addrrem;
- msg->srcaddr = scp->addrloc;
+ skb_reserve(skb, 13);
+ segnum = dn_mk_ack_header(sk, skb, 0x10, 13, 1);
+ *segnum = dn_htons(scp->numoth);
+ DN_SKB_CB(skb)->segnum = scp->numoth;
+ seq_add(&scp->numoth, 1);
+ ptr = (unsigned char *)(segnum + 1);
+ *ptr++ = lsflags;
+ *ptr = fcval;
- msg1 = (struct nsp_data_opt_msg *)skb_put(skb, sizeof(*msg1));
- msg1->acknum = dn_htons((scp->ackxmt_oth & 0x0FFF) | 0x8000);
- msg1->segnum = dn_htons(cb->segnum = (scp->numoth++ & 0x0FFF));
- msg1->lsflgs = flgs;
-
- dn_nsp_queue_xmit(sk, skb, 1);
+ dn_nsp_queue_xmit(sk, skb, gfp, 1);
scp->persist = dn_nsp_persist(sk);
scp->persist_fxn = dn_nsp_xmit_timeout;
-
}
static int dn_nsp_retrans_conninit(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
if (scp->state == DN_CI)
dn_nsp_send_conninit(sk, NSP_RCI);
@@ -618,7 +664,7 @@ static int dn_nsp_retrans_conninit(struct sock *sk)
void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
struct sk_buff *skb = NULL;
struct nsp_conn_init_msg *msg;
unsigned char aux;
@@ -629,16 +675,16 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
if ((skb = dn_alloc_skb(sk, 200, (msgflg == NSP_CI) ? sk->allocation : GFP_ATOMIC)) == NULL)
return;
- cb = (struct dn_skb_cb *)skb->cb;
+ cb = DN_SKB_CB(skb);
msg = (struct nsp_conn_init_msg *)skb_put(skb,sizeof(*msg));
msg->msgflg = msgflg;
msg->dstaddr = 0x0000; /* Remote Node will assign it*/
- msg->srcaddr = sk->protinfo.dn.addrloc;
- msg->services = 1 | NSP_FC_NONE; /* Requested flow control */
- msg->info = 0x03; /* Version Number */
- msg->segsize = dn_htons(1459); /* Max segment size */
+ msg->srcaddr = scp->addrloc;
+ msg->services = scp->services_loc; /* Requested flow control */
+ msg->info = scp->info_loc; /* Version Number */
+ msg->segsize = dn_htons(scp->segsize_loc); /* Max segment size */
if (scp->peer.sdn_objnum)
type = 0;
@@ -674,8 +720,8 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
if (aux > 0)
memcpy(skb_put(skb,aux), scp->conndata_out.opt_data, aux);
- sk->protinfo.dn.persist = dn_nsp_persist(sk);
- sk->protinfo.dn.persist_fxn = dn_nsp_retrans_conninit;
+ scp->persist = dn_nsp_persist(sk);
+ scp->persist_fxn = dn_nsp_retrans_conninit;
cb->rt_flags = DN_RT_F_RQR;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 70646fc11..b40c601b6 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -33,6 +33,9 @@
* Steve Whitehouse : Real SMP at last :-) Also new netfilter
* stuff. Look out raw sockets your days
* are numbered!
+ * Steve Whitehouse : Added return-to-sender functions. Added
+ * backlog congestion level return codes.
+ *
*/
/******************************************************************************
@@ -109,17 +112,16 @@ static struct timer_list dn_rt_flush_timer = { function: dn_run_flush };
int decnet_dst_gc_interval = 2;
static struct dst_ops dn_dst_ops = {
- PF_DECnet,
- __constant_htons(ETH_P_DNA_RT),
- 128,
- dn_dst_gc,
- dn_dst_check,
- dn_dst_reroute,
- NULL,
- dn_dst_negative_advice,
- dn_dst_link_failure,
- sizeof(struct dn_route),
- ATOMIC_INIT(0)
+ family: PF_DECnet,
+ protocol: __constant_htons(ETH_P_DNA_RT),
+ gc_thresh: 128,
+ gc: dn_dst_gc,
+ check: dn_dst_check,
+ reroute: dn_dst_reroute,
+ negative_advice: dn_dst_negative_advice,
+ link_failure: dn_dst_link_failure,
+ entry_size: sizeof(struct dn_route),
+ entries: ATOMIC_INIT(0),
};
static __inline__ unsigned dn_hash(unsigned short src, unsigned short dst)
@@ -294,21 +296,131 @@ void dn_rt_cache_flush(int delay)
spin_unlock_bh(&dn_rt_flush_lock);
}
+/**
+ * dn_return_short - Return a short packet to its sender
+ * @skb: The packet to return
+ *
+ */
+static int dn_return_short(struct sk_buff *skb)
+{
+ struct dn_skb_cb *cb;
+ unsigned char *ptr;
+ dn_address *src;
+ dn_address *dst;
+ dn_address tmp;
+
+ /* Add back headers */
+ skb_push(skb, skb->data - skb->nh.raw);
+
+ if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
+ return NET_RX_DROP;
+
+ cb = DN_SKB_CB(skb);
+ /* Skip packet length and point to flags */
+ ptr = skb->data + 2;
+ *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
+
+ dst = (dn_address *)ptr;
+ ptr += 2;
+ src = (dn_address *)ptr;
+ ptr += 2;
+ *ptr = 0; /* Zero hop count */
+
+ /* Swap source and destination */
+ tmp = *src;
+ *src = *dst;
+ *dst = tmp;
+
+ skb->pkt_type = PACKET_OUTGOING;
+ dn_rt_finish_output(skb, NULL);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * dn_return_long - Return a long packet to its sender
+ * @skb: The long format packet to return
+ *
+ */
+static int dn_return_long(struct sk_buff *skb)
+{
+ struct dn_skb_cb *cb;
+ unsigned char *ptr;
+ unsigned char *src_addr, *dst_addr;
+ unsigned char tmp[ETH_ALEN];
+
+ /* Add back all headers */
+ skb_push(skb, skb->data - skb->nh.raw);
+
+ if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
+ return NET_RX_DROP;
+ cb = DN_SKB_CB(skb);
+ /* Ignore packet length and point to flags */
+ ptr = skb->data + 2;
+
+ /* Skip padding */
+ if (*ptr & DN_RT_F_PF) {
+ char padlen = (*ptr & ~DN_RT_F_PF);
+ ptr += padlen;
+ }
+
+ *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
+ ptr += 2;
+ dst_addr = ptr;
+ ptr += 8;
+ src_addr = ptr;
+ ptr += 6;
+ *ptr = 0; /* Zero hop count */
+
+ /* Swap source and destination */
+ memcpy(tmp, src_addr, ETH_ALEN);
+ memcpy(src_addr, dst_addr, ETH_ALEN);
+ memcpy(dst_addr, tmp, ETH_ALEN);
+
+ skb->pkt_type = PACKET_OUTGOING;
+ dn_rt_finish_output(skb, tmp);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * dn_route_rx_packet - Try and find a route for an incoming packet
+ * @skb: The packet to find a route for
+ *
+ * Returns: result of input function if route is found, error code otherwise
+ */
static int dn_route_rx_packet(struct sk_buff *skb)
{
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
int err;
if ((err = dn_route_input(skb)) == 0)
return skb->dst->input(skb);
+ if (decnet_debug_level & 4) {
+ char *devname = skb->dev ? skb->dev->name : "???";
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
+ printk(KERN_DEBUG
+ "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
+ (int)cb->rt_flags, devname, skb->len, cb->src, cb->dst,
+ err, skb->pkt_type);
+ }
+
+ if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
+ switch(cb->rt_flags & DN_RT_PKT_MSK) {
+ case DN_RT_PKT_SHORT:
+ return dn_return_short(skb);
+ case DN_RT_PKT_LONG:
+ return dn_return_long(skb);
+ }
+ }
+
kfree_skb(skb);
- return err;
+ return NET_RX_DROP;
}
static int dn_route_rx_long(struct sk_buff *skb)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
unsigned char *ptr = skb->data;
if (skb->len < 21) /* 20 for long header, 1 for shortest nsp */
@@ -339,14 +451,14 @@ static int dn_route_rx_long(struct sk_buff *skb)
drop_it:
kfree_skb(skb);
- return 0;
+ return NET_RX_DROP;
}
static int dn_route_rx_short(struct sk_buff *skb)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
unsigned char *ptr = skb->data;
if (skb->len < 6) /* 5 for short header + 1 for shortest nsp */
@@ -365,29 +477,33 @@ static int dn_route_rx_short(struct sk_buff *skb)
drop_it:
kfree_skb(skb);
- return 0;
+ return NET_RX_DROP;
}
static int dn_route_discard(struct sk_buff *skb)
{
+ /*
+ * I know we drop the packet here, but thats considered success in
+ * this case
+ */
kfree_skb(skb);
- return 0;
+ return NET_RX_SUCCESS;
}
static int dn_route_ptp_hello(struct sk_buff *skb)
{
dn_dev_hello(skb);
dn_neigh_pointopoint_hello(skb);
- return 0;
+ return NET_RX_SUCCESS;
}
int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
{
struct dn_skb_cb *cb;
unsigned char flags = 0;
- int padlen = 0;
__u16 len = dn_ntohs(*(__u16 *)skb->data);
struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr;
+ unsigned char padlen = 0;
if (dn == NULL)
goto dump_it;
@@ -404,7 +520,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
flags = *skb->data;
- cb = (struct dn_skb_cb *)skb->cb;
+ cb = DN_SKB_CB(skb);
cb->stamp = jiffies;
cb->iif = dev->ifindex;
@@ -448,20 +564,16 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
switch(flags & DN_RT_CNTL_MSK) {
case DN_RT_PKT_HELO:
- NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
- goto out;
+ return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
case DN_RT_PKT_L1RT:
case DN_RT_PKT_L2RT:
- NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
- goto out;
+ return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
case DN_RT_PKT_ERTH:
- NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
- goto out;
+ return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
case DN_RT_PKT_EEDH:
- NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
- goto out;
+ return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
}
} else {
if (dn->parms.state != DN_DEV_S_RU)
@@ -480,7 +592,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
dump_it:
kfree_skb(skb);
out:
- return 0;
+ return NET_RX_DROP;
}
static int dn_output(struct sk_buff *skb)
@@ -488,7 +600,7 @@ static int dn_output(struct sk_buff *skb)
struct dst_entry *dst = skb->dst;
struct dn_route *rt = (struct dn_route *)dst;
struct net_device *dev = dst->dev;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct neighbour *neigh;
int err = -EINVAL;
@@ -524,7 +636,7 @@ error:
#ifdef CONFIG_DECNET_ROUTER
static int dn_forward(struct sk_buff *skb)
{
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct dst_entry *dst = skb->dst;
struct net_device *dev = skb->dev;
struct neighbour *neigh;
@@ -536,7 +648,7 @@ static int dn_forward(struct sk_buff *skb)
/*
* Hop count exceeded.
*/
- err = 0;
+ err = NET_RX_DROP;
if (++cb->hops > 30)
goto drop;
@@ -573,7 +685,7 @@ drop:
static int dn_blackhole(struct sk_buff *skb)
{
kfree_skb(skb);
- return 0;
+ return NET_RX_DROP;
}
/*
@@ -583,7 +695,7 @@ static int dn_blackhole(struct sk_buff *skb)
static int dn_rt_bug(struct sk_buff *skb)
{
if (net_ratelimit()) {
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n",
cb->src, cb->dst);
@@ -591,7 +703,7 @@ static int dn_rt_bug(struct sk_buff *skb)
kfree_skb(skb);
- return -EINVAL;
+ return NET_RX_BAD;
}
static int dn_route_output_slow(struct dst_entry **pprt, dn_address dst, dn_address src, int flags)
@@ -732,7 +844,7 @@ int dn_route_output(struct dst_entry **pprt, dn_address dst, dn_address src, int
static int dn_route_input_slow(struct sk_buff *skb)
{
struct dn_route *rt = NULL;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct net_device *dev = skb->dev;
struct dn_dev *dn_db;
struct neighbour *neigh = NULL;
@@ -880,7 +992,7 @@ add_entry:
int dn_route_input(struct sk_buff *skb)
{
struct dn_route *rt;
- struct dn_skb_cb *cb = (struct dn_skb_cb *)skb->cb;
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
unsigned hash = dn_hash(cb->src, cb->dst);
if (skb->dst)
@@ -964,7 +1076,7 @@ int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
if (skb == NULL)
return -ENOBUFS;
skb->mac.raw = skb->data;
- cb = (struct dn_skb_cb *)skb->cb;
+ cb = DN_SKB_CB(skb);
if (rta[RTA_SRC-1])
memcpy(&src, RTA_DATA(rta[RTA_SRC-1]), 2);
@@ -1185,8 +1297,6 @@ void __exit dn_route_cleanup(void)
del_timer(&dn_route_timer);
dn_run_flush(0);
-#ifdef CONFIG_PROC_FS
proc_net_remove("decnet_cache");
-#endif /* CONFIG_PROC_FS */
}
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 133591f0b..32adfecba 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -57,7 +57,12 @@ struct dn_fib_rule
int r_dead;
};
-static struct dn_fib_rule default_rule = { NULL, ATOMIC_INIT(2), 0x7fff, DN_DEFAULT_TABLE, RTN_UNICAST };
+static struct dn_fib_rule default_rule = {
+ r_clntref: ATOMIC_INIT(2),
+ r_preference: 0x7fff,
+ r_table: DN_DEFAULT_TABLE,
+ r_action: RTN_UNICAST
+};
static struct dn_fib_rule *dn_fib_rules = &default_rule;
static rwlock_t dn_fib_rules_lock = RW_LOCK_UNLOCKED;
@@ -291,9 +296,7 @@ static int dn_fib_rules_event(struct notifier_block *this, unsigned long event,
static struct notifier_block dn_fib_rules_notifier = {
- dn_fib_rules_event,
- NULL,
- 0
+ notifier_call: dn_fib_rules_event,
};
#ifdef CONFIG_RTNETLINK
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index ce7a3ac06..bc21acb4b 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -78,7 +78,7 @@ static rwlock_t dn_fib_tables_lock = RW_LOCK_UNLOCKED;
static struct dn_fib_table *dn_fib_tables[DN_NUM_TABLES + 1];
static kmem_cache_t *dn_hash_kmem;
-static int dn_fib_hash_zombies = 0;
+static int dn_fib_hash_zombies;
static __inline__ dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
{
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index bbba58b02..41a4aa602 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -52,7 +52,7 @@ void dn_stop_slow_timer(struct sock *sk)
static void dn_slow_timer(unsigned long arg)
{
struct sock *sk = (struct sock *)arg;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
sock_hold(sk);
bh_lock_sock(sk);
@@ -112,7 +112,7 @@ out:
static void dn_fast_timer(unsigned long arg)
{
struct sock *sk = (struct sock *)arg;
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
bh_lock_sock(sk);
if (sk->lock.users != 0) {
@@ -131,7 +131,7 @@ out:
void dn_start_fast_timer(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
if (!scp->delack_pending) {
scp->delack_pending = 1;
@@ -145,7 +145,7 @@ void dn_start_fast_timer(struct sock *sk)
void dn_stop_fast_timer(struct sock *sk)
{
- struct dn_scp *scp = &sk->protinfo.dn;
+ struct dn_scp *scp = DN_SK(sk);
if (scp->delack_pending) {
scp->delack_pending = 0;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1d8002bdd..81fae9233 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -504,8 +504,8 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
im->timer.function=&igmp_timer_expire;
im->unsolicit_count = IGMP_Unsolicited_Report_Count;
im->reporter = 0;
- im->loaded = 0;
#endif
+ im->loaded = 0;
write_lock_bh(&in_dev->lock);
im->next=in_dev->mc_list;
in_dev->mc_list=im;
diff --git a/net/ipv4/netfilter/Config.in b/net/ipv4/netfilter/Config.in
index 406d2ea3d..5887658fb 100644
--- a/net/ipv4/netfilter/Config.in
+++ b/net/ipv4/netfilter/Config.in
@@ -37,11 +37,20 @@ if [ "$CONFIG_IP_NF_IPTABLES" != "n" ]; then
fi
if [ "$CONFIG_IP_NF_CONNTRACK" != "n" ]; then
- dep_tristate ' Full NAT' CONFIG_IP_NF_NAT $CONFIG_IP_NF_IPTABLES
+ dep_tristate ' Full NAT' CONFIG_IP_NF_NAT $CONFIG_IP_NF_IPTABLES $CONFIG_IP_NF_CONNTRACK
if [ "$CONFIG_IP_NF_NAT" != "n" ]; then
define_bool CONFIG_IP_NF_NAT_NEEDED y
dep_tristate ' MASQUERADE target support' CONFIG_IP_NF_TARGET_MASQUERADE $CONFIG_IP_NF_NAT
dep_tristate ' REDIRECT target support' CONFIG_IP_NF_TARGET_REDIRECT $CONFIG_IP_NF_NAT
+ # If they want FTP, set to $CONFIG_IP_NF_NAT (m or y),
+ # or $CONFIG_IP_NF_FTP (m or y), whichever is weaker. Argh.
+ if [ "$CONFIG_IP_NF_FTP" = "m" ]; then
+ define_tristate CONFIG_IP_NF_NAT_FTP m
+ else
+ if [ "$CONFIG_IP_NF_FTP" = "y" ]; then
+ define_tristate CONFIG_IP_NF_NAT_FTP $CONFIG_IP_NF_NAT
+ fi
+ fi
fi
fi
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 995860767..c40caa75e 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -35,7 +35,7 @@ obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o
obj-$(CONFIG_IP_NF_FTP) += ip_conntrack_ftp.o
# NAT helpers
-obj-$(CONFIG_IP_NF_FTP) += ip_nat_ftp.o
+obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o
# generic IP tables
obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 2e4dd82ee..bc7e64c8b 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -882,10 +882,15 @@ ip_ct_gather_frags(struct sk_buff *skb)
#ifdef CONFIG_NETFILTER_DEBUG
unsigned int olddebug = skb->nf_debug;
#endif
- if (sk) sock_hold(sk);
+ if (sk) {
+ sock_hold(sk);
+ skb_orphan(skb);
+ }
+
local_bh_disable();
skb = ip_defrag(skb);
- local_bh_enable();
+ local_bh_enable();
+
if (!skb) {
if (sk) sock_put(sk);
return skb;
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 9ba62dc84..cc19e1f0b 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -438,8 +438,27 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
conntrack));
ret = 1;
goto clear_fulls;
+ } else if (HOOK2MANIP(hooknum) == IP_NAT_MANIP_DST) {
+ /* Try implicit source NAT; protocol
+ may be able to play with ports to
+ make it unique. */
+ struct ip_nat_range r
+ = { IP_NAT_RANGE_MAP_IPS,
+ tuple->src.ip, tuple->src.ip,
+ { 0 }, { 0 } };
+ DEBUGP("Trying implicit mapping\n");
+ if (proto->unique_tuple(tuple, &r,
+ IP_NAT_MANIP_SRC,
+ conntrack)) {
+ /* Must be unique. */
+ IP_NF_ASSERT(!ip_nat_used_tuple
+ (tuple, conntrack));
+ ret = 1;
+ goto clear_fulls;
+ }
}
- DEBUGP("Protocol can't get unique tuple.\n");
+ DEBUGP("Protocol can't get unique tuple %u.\n",
+ hooknum);
}
/* Eliminate that from range, and try again. */
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 99164a7a0..f2a19702d 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -1,5 +1,6 @@
/* Masquerade. Simple mapping which alters range to a local IP address
(depending on route). */
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/timer.h>
@@ -68,6 +69,7 @@ masquerade_target(struct sk_buff **pskb,
struct ip_nat_multi_range newrange;
u_int32_t newsrc;
struct rtable *rt;
+ struct rt_key key;
IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING);
@@ -82,10 +84,14 @@ masquerade_target(struct sk_buff **pskb,
mr = targinfo;
- if (ip_route_output(&rt, (*pskb)->nh.iph->daddr,
- 0,
- RT_TOS((*pskb)->nh.iph->tos)|RTO_CONN,
- out->ifindex) != 0) {
+ key.dst = (*pskb)->nh.iph->daddr;
+ key.src = 0; /* Unknown: that's what we're trying to establish */
+ key.tos = RT_TOS((*pskb)->nh.iph->tos)|RTO_CONN;
+ key.oif = out->ifindex;
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ key.fwmark = (*pskb)->nfmark;
+#endif
+ if (ip_route_output_key(&rt, &key) != 0) {
/* Shouldn't happen */
printk("MASQUERADE: No route: Rusty's brain broke!\n");
return NF_DROP;
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 9c1088e76..cc5ffbc4a 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -167,27 +167,9 @@ static unsigned int reject(struct sk_buff **pskb,
case IPT_ICMP_HOST_PROHIBITED:
icmp_send(*pskb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
break;
- case IPT_ICMP_ECHOREPLY: {
- struct icmphdr *icmph = (struct icmphdr *)
- ((u_int32_t *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl);
- unsigned int datalen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4;
-
- /* Not non-head frags, or truncated */
- if (((ntohs((*pskb)->nh.iph->frag_off) & IP_OFFSET) == 0)
- && datalen >= 4) {
- /* Usually I don't like cut & pasting code,
- but dammit, my party is starting in 45
- mins! --RR */
- struct icmp_bxm icmp_param;
-
- icmp_param.icmph=*icmph;
- icmp_param.icmph.type=ICMP_ECHOREPLY;
- icmp_param.data_ptr=(icmph+1);
- icmp_param.data_len=datalen;
- icmp_reply(&icmp_param, *pskb);
- }
- }
- break;
+ case IPT_ICMP_ECHOREPLY:
+ printk("REJECT: ECHOREPLY no longer supported.\n");
+ break;
case IPT_TCP_RESET:
send_reset(*pskb, hooknum == NF_IP_LOCAL_IN);
break;
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index c52ada64e..60d4698fb 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -53,7 +53,7 @@ static struct
sizeof(struct ipt_entry),
sizeof(struct ipt_standard),
0, { 0, 0 }, { } },
- { { { { sizeof(struct ipt_standard_target), "" } }, { } },
+ { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-NF_ACCEPT - 1 } },
/* LOCAL_OUT */
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
@@ -61,7 +61,7 @@ static struct
sizeof(struct ipt_entry),
sizeof(struct ipt_standard),
0, { 0, 0 }, { } },
- { { { { sizeof(struct ipt_standard_target), "" } }, { } },
+ { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-NF_ACCEPT - 1 } }
},
/* ERROR */
@@ -70,7 +70,7 @@ static struct
sizeof(struct ipt_entry),
sizeof(struct ipt_error),
0, { 0, 0 }, { } },
- { { { { sizeof(struct ipt_error_target), IPT_ERROR_TARGET } },
+ { { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } },
{ } },
"ERROR"
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b370fcdf9..6660e0f72 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -954,7 +954,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
*/
skb = sk->write_queue.prev;
if (tp->send_head &&
- (mss_now - skb->len) > 0) {
+ (mss_now > skb->len)) {
copy = skb->len;
if (skb_tailroom(skb) > 0) {
int last_byte_was_odd = (copy % 4);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4e3eab087..a3f83272b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1705,7 +1705,7 @@ static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp)
if ((__s32)when < (__s32)tp->rttvar)
when = tp->rttvar;
- tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, when);
+ tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, min(when, TCP_RTO_MAX));
}
}
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index aec5db879..fcc9bc058 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IP6_NF_MATCH_MARK) += ip6t_mark.o
obj-$(CONFIG_IP6_NF_MATCH_MAC) += ip6t_mac.o
obj-$(CONFIG_IP6_NF_MATCH_MULTIPORT) += ip6t_multiport.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
+obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o
include $(TOPDIR)/Rules.make
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 57430f29f..659bb3a1e 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/tcp.h>
#include <linux/udp.h>
-#include <linux/icmp.h>
+#include <linux/icmpv6.h>
#include <net/ip.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
@@ -1642,7 +1642,7 @@ udp_checkentry(const char *tablename,
/* Returns 1 if the type and code is matched by the range, 0 otherwise */
static inline int
-icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
+icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
u_int8_t type, u_int8_t code,
int invert)
{
@@ -1651,7 +1651,7 @@ icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
}
static int
-icmp_match(const struct sk_buff *skb,
+icmp6_match(const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const void *matchinfo,
@@ -1660,7 +1660,7 @@ icmp_match(const struct sk_buff *skb,
u_int16_t datalen,
int *hotdrop)
{
- const struct icmphdr *icmp = hdr;
+ const struct icmp6hdr *icmp = hdr;
const struct ip6t_icmp *icmpinfo = matchinfo;
if (offset == 0 && datalen < 2) {
@@ -1673,16 +1673,16 @@ icmp_match(const struct sk_buff *skb,
/* Must not be a fragment. */
return !offset
- && icmp_type_code_match(icmpinfo->type,
+ && icmp6_type_code_match(icmpinfo->type,
icmpinfo->code[0],
icmpinfo->code[1],
- icmp->type, icmp->code,
+ icmp->icmp6_type, icmp->icmp6_code,
!!(icmpinfo->invflags&IP6T_ICMP_INV));
}
/* Called when user tries to insert an entry of this type. */
static int
-icmp_checkentry(const char *tablename,
+icmp6_checkentry(const char *tablename,
const struct ip6t_ip6 *ipv6,
void *matchinfo,
unsigned int matchsize,
@@ -1691,7 +1691,7 @@ icmp_checkentry(const char *tablename,
const struct ip6t_icmp *icmpinfo = matchinfo;
/* Must specify proto == ICMP, and no unknown invflags */
- return ipv6->proto == IPPROTO_ICMP
+ return ipv6->proto == IPPROTO_ICMPV6
&& !(ipv6->invflags & IP6T_INV_PROTO)
&& matchsize == IP6T_ALIGN(sizeof(struct ip6t_icmp))
&& !(icmpinfo->invflags & ~IP6T_ICMP_INV);
@@ -1711,8 +1711,8 @@ static struct ip6t_match tcp_matchstruct
= { { NULL, NULL }, "tcp", &tcp_match, &tcp_checkentry, NULL };
static struct ip6t_match udp_matchstruct
= { { NULL, NULL }, "udp", &udp_match, &udp_checkentry, NULL };
-static struct ip6t_match icmp_matchstruct
-= { { NULL, NULL }, "icmp", &icmp_match, &icmp_checkentry, NULL };
+static struct ip6t_match icmp6_matchstruct
+= { { NULL, NULL }, "icmp6", &icmp6_match, &icmp6_checkentry, NULL };
#ifdef CONFIG_PROC_FS
static inline int print_name(const struct ip6t_table *t,
@@ -1761,7 +1761,7 @@ static int __init init(void)
list_append(&ip6t_target, &ip6t_error_target);
list_append(&ip6t_match, &tcp_matchstruct);
list_append(&ip6t_match, &udp_matchstruct);
- list_append(&ip6t_match, &icmp_matchstruct);
+ list_append(&ip6t_match, &icmp6_matchstruct);
up(&ip6t_mutex);
/* Register setsockopt */
diff --git a/net/ipv6/netfilter/ip6t_MARK.c b/net/ipv6/netfilter/ip6t_MARK.c
index dd8bb3226..08df336e8 100644
--- a/net/ipv6/netfilter/ip6t_MARK.c
+++ b/net/ipv6/netfilter/ip6t_MARK.c
@@ -4,8 +4,8 @@
#include <linux/ip.h>
#include <net/checksum.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_MARK.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_ipv6/ip6t_MARK.h>
static unsigned int
target(struct sk_buff **pskb,
@@ -15,26 +15,26 @@ target(struct sk_buff **pskb,
const void *targinfo,
void *userinfo)
{
- const struct ipt_mark_target_info *markinfo = targinfo;
+ const struct ip6t_mark_target_info *markinfo = targinfo;
if((*pskb)->nfmark != markinfo->mark) {
(*pskb)->nfmark = markinfo->mark;
(*pskb)->nfcache |= NFC_ALTERED;
}
- return IPT_CONTINUE;
+ return IP6T_CONTINUE;
}
static int
checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const struct ip6t_entry *e,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
- if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info))) {
+ if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_mark_target_info))) {
printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
targinfosize,
- IPT_ALIGN(sizeof(struct ipt_mark_target_info)));
+ IP6T_ALIGN(sizeof(struct ip6t_mark_target_info)));
return 0;
}
@@ -46,12 +46,13 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_target ipt_mark_reg
+static struct ip6t_target ip6t_mark_reg
= { { NULL, NULL }, "MARK", target, checkentry, NULL, THIS_MODULE };
static int __init init(void)
{
- if (ipt_register_target(&ipt_mark_reg))
+ printk(KERN_DEBUG "registreing ipv6 mark target\n");
+ if (ip6t_register_target(&ip6t_mark_reg))
return -EINVAL;
return 0;
@@ -59,7 +60,7 @@ static int __init init(void)
static void __exit fini(void)
{
- ipt_unregister_target(&ipt_mark_reg);
+ ip6t_unregister_target(&ip6t_mark_reg);
}
module_init(init);
diff --git a/net/ipv6/netfilter/ip6t_mark.c b/net/ipv6/netfilter/ip6t_mark.c
index babe202a4..9a78b1ca2 100644
--- a/net/ipv6/netfilter/ip6t_mark.c
+++ b/net/ipv6/netfilter/ip6t_mark.c
@@ -2,7 +2,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ipt_mark.h>
+#include <linux/netfilter_ipv6/ip6t_mark.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
static int
@@ -15,7 +15,7 @@ match(const struct sk_buff *skb,
u_int16_t datalen,
int *hotdrop)
{
- const struct ipt_mark_info *info = matchinfo;
+ const struct ip6t_mark_info *info = matchinfo;
return ((skb->nfmark & info->mask) == info->mark) ^ info->invert;
}
@@ -27,7 +27,7 @@ checkentry(const char *tablename,
unsigned int matchsize,
unsigned int hook_mask)
{
- if (matchsize != IP6T_ALIGN(sizeof(struct ipt_mark_info)))
+ if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_mark_info)))
return 0;
return 1;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
new file mode 100644
index 000000000..612c292c4
--- /dev/null
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -0,0 +1,189 @@
+/*
+ * IPv6 packet mangling table, a port of the IPv4 mangle table to IPv6
+ *
+ * Copyright (C) 2000 by Harald Welte <laforge@gnumonks.org>
+ */
+#include <linux/module.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+#define MANGLE_VALID_HOOKS ((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_OUT))
+
+#if 1
+#define DEBUGP(x, args...) printk(KERN_DEBUG x, ## args)
+#else
+#define DEBUGP(x, args...)
+#endif
+
+/* Standard entry. */
+struct ip6t_standard
+{
+ struct ip6t_entry entry;
+ struct ip6t_standard_target target;
+};
+
+struct ip6t_error_target
+{
+ struct ip6t_entry_target target;
+ char errorname[IP6T_FUNCTION_MAXNAMELEN];
+};
+
+struct ip6t_error
+{
+ struct ip6t_entry entry;
+ struct ip6t_error_target target;
+};
+
+static struct
+{
+ struct ip6t_replace repl;
+ struct ip6t_standard entries[2];
+ struct ip6t_error term;
+} initial_table __initdata
+= { { "mangle", MANGLE_VALID_HOOKS, 3,
+ sizeof(struct ip6t_standard) * 2 + sizeof(struct ip6t_error),
+ { [NF_IP6_PRE_ROUTING] 0,
+ [NF_IP6_LOCAL_OUT] sizeof(struct ip6t_standard) },
+ { [NF_IP6_PRE_ROUTING] 0,
+ [NF_IP6_LOCAL_OUT] sizeof(struct ip6t_standard) },
+ 0, NULL, { } },
+ {
+ /* PRE_ROUTING */
+ { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
+ 0,
+ sizeof(struct ip6t_entry),
+ sizeof(struct ip6t_standard),
+ 0, { 0, 0 }, { } },
+ { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
+ -NF_ACCEPT - 1 } },
+ /* LOCAL_OUT */
+ { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
+ 0,
+ sizeof(struct ip6t_entry),
+ sizeof(struct ip6t_standard),
+ 0, { 0, 0 }, { } },
+ { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
+ -NF_ACCEPT - 1 } }
+ },
+ /* ERROR */
+ { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
+ 0,
+ sizeof(struct ip6t_entry),
+ sizeof(struct ip6t_error),
+ 0, { 0, 0 }, { } },
+ { { { { IP6T_ALIGN(sizeof(struct ip6t_error_target)), IP6T_ERROR_TARGET } },
+ { } },
+ "ERROR"
+ }
+ }
+};
+
+static struct ip6t_table packet_mangler
+= { { NULL, NULL }, "mangle", &initial_table.repl,
+ MANGLE_VALID_HOOKS, RW_LOCK_UNLOCKED, NULL };
+
+/* The work comes in here from netfilter.c. */
+static unsigned int
+ip6t_hook(unsigned int hook,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ip6t_do_table(pskb, hook, in, out, &packet_mangler, NULL);
+}
+
+static unsigned int
+ip6t_local_out_hook(unsigned int hook,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+
+ unsigned long nfmark;
+ unsigned int ret;
+ struct in6_addr saddr, daddr;
+ u_int8_t hop_limit;
+ u_int32_t flowlabel;
+
+#if 0
+ /* root is playing with raw sockets. */
+ if ((*pskb)->len < sizeof(struct iphdr)
+ || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
+ if (net_ratelimit())
+ printk("ip6t_hook: happy cracking.\n");
+ return NF_ACCEPT;
+ }
+#endif
+
+ /* save source/dest address, nfmark, hoplimit, flowlabel, priority, */
+ memcpy(&saddr, &(*pskb)->nh.ipv6h->saddr, sizeof(saddr));
+ memcpy(&daddr, &(*pskb)->nh.ipv6h->daddr, sizeof(daddr));
+ nfmark = (*pskb)->nfmark;
+ hop_limit = (*pskb)->nh.ipv6h->hop_limit;
+
+ /* flowlabel and prio (includes version, which shouldn't change either */
+ flowlabel = (u_int32_t) (*pskb)->nh.ipv6h;
+
+ ret = ip6t_do_table(pskb, hook, in, out, &packet_mangler, NULL);
+
+ if (ret != NF_DROP && ret != NF_STOLEN
+ && (memcmp(&(*pskb)->nh.ipv6h->saddr, &saddr, sizeof(saddr))
+ || memcmp(&(*pskb)->nh.ipv6h->daddr, &daddr, sizeof(daddr))
+ || (*pskb)->nfmark != nfmark
+ || (*pskb)->nh.ipv6h->hop_limit != hop_limit)) {
+
+ /* something which could affect routing has changed */
+
+ DEBUGP("ip6table_mangle: we'd need to re-route a packet\n");
+ }
+
+ return ret;
+}
+
+static struct nf_hook_ops ip6t_ops[]
+= { { { NULL, NULL }, ip6t_hook, PF_INET6, NF_IP6_PRE_ROUTING, NF_IP6_PRI_MANGLE },
+ { { NULL, NULL }, ip6t_local_out_hook, PF_INET6, NF_IP6_LOCAL_OUT,
+ NF_IP6_PRI_MANGLE }
+};
+
+static int __init init(void)
+{
+ int ret;
+
+ /* Register table */
+ ret = ip6t_register_table(&packet_mangler);
+ if (ret < 0)
+ return ret;
+
+ /* Register hooks */
+ ret = nf_register_hook(&ip6t_ops[0]);
+ if (ret < 0)
+ goto cleanup_table;
+
+ ret = nf_register_hook(&ip6t_ops[1]);
+ if (ret < 0)
+ goto cleanup_hook0;
+
+ return ret;
+
+ cleanup_hook0:
+ nf_unregister_hook(&ip6t_ops[0]);
+ cleanup_table:
+ ip6t_unregister_table(&packet_mangler);
+
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(ip6t_ops)/sizeof(struct nf_hook_ops); i++)
+ nf_unregister_hook(&ip6t_ops[i]);
+
+ ip6t_unregister_table(&packet_mangler);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 26cc63348..fb73651bf 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -61,6 +61,9 @@
* suggestions and guidance.
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>,
* November, 2000
+ * Revision 043: Shared SKBs, don't mangle packets, some cleanups
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br>,
+ * December, 2000
*
* Protect the module by a MOD_INC_USE_COUNT/MOD_DEC_USE_COUNT
* pair. Also, now usage count is managed this way
@@ -140,28 +143,23 @@ static ipx_interface *ipx_internal_net;
atomic_t ipx_sock_nr;
#endif
-static int ipxcfg_set_auto_create(char val)
+static void ipxcfg_set_auto_create(char val)
{
- if(ipxcfg_auto_create_interfaces != val)
- {
- if(val)
+ if (ipxcfg_auto_create_interfaces != val) {
+ if (val)
MOD_INC_USE_COUNT;
else
MOD_DEC_USE_COUNT;
ipxcfg_auto_create_interfaces = val;
}
-
- return (0);
}
-static int ipxcfg_set_auto_select(char val)
+static void ipxcfg_set_auto_select(char val)
{
ipxcfg_auto_select_primary = val;
- if(val && (ipx_primary_net == NULL))
+ if (val && !ipx_primary_net)
ipx_primary_net = ipx_interfaces;
-
- return (0);
}
static int ipxcfg_get_config_data(ipx_config_data *arg)
@@ -171,7 +169,7 @@ static int ipxcfg_get_config_data(ipx_config_data *arg)
vals.ipxcfg_auto_create_interfaces = ipxcfg_auto_create_interfaces;
vals.ipxcfg_auto_select_primary = ipxcfg_auto_select_primary;
- return (copy_to_user(arg, &vals, sizeof(vals)) ? -EFAULT : 0);
+ return copy_to_user(arg, &vals, sizeof(vals)) ? -EFAULT : 0;
}
/**************************************************************************\
@@ -213,22 +211,19 @@ void ipx_remove_socket(struct sock *sk)
/* Determine interface with which socket is associated */
intrfc = sk->protinfo.af_ipx.intrfc;
- if(intrfc == NULL)
+ if (!intrfc)
return;
ipxitf_hold(intrfc);
spin_lock_bh(&intrfc->if_sklist_lock);
s = intrfc->if_sklist;
- if(s == sk)
- {
+ if (s == sk) {
intrfc->if_sklist = s->next;
goto out;
}
- while(s && s->next)
- {
- if(s->next == sk)
- {
+ while (s && s->next) {
+ if (s->next == sk) {
s->next = sk->next;
goto out;
}
@@ -264,7 +259,7 @@ static ipx_route * ipxrtr_lookup(__u32);
static void ipxitf_clear_primary_net(void)
{
- if(ipxcfg_auto_select_primary && (ipx_interfaces != NULL))
+ if (ipxcfg_auto_select_primary && ipx_interfaces)
ipx_primary_net = ipx_interfaces;
else
ipx_primary_net = NULL;
@@ -273,14 +268,14 @@ static void ipxitf_clear_primary_net(void)
static ipx_interface *__ipxitf_find_using_phys(struct net_device *dev,
unsigned short datalink)
{
- ipx_interface *i;
+ ipx_interface *i;
- for(i = ipx_interfaces;
- i && ((i->if_dev != dev) || (i->if_dlink_type != datalink));
+ for (i = ipx_interfaces;
+ i && (i->if_dev != dev || i->if_dlink_type != datalink);
i = i->if_next)
;
- return (i);
+ return i;
}
static ipx_interface *ipxitf_find_using_phys(struct net_device *dev,
@@ -298,12 +293,12 @@ static ipx_interface *ipxitf_find_using_phys(struct net_device *dev,
static ipx_interface *ipxitf_find_using_net(__u32 net)
{
- ipx_interface *i;
+ ipx_interface *i;
spin_lock_bh(&ipx_interfaces_lock);
- if(net)
- for(i = ipx_interfaces; i && (i->if_netnum != net);
- i = i->if_next)
+ if (net)
+ for (i = ipx_interfaces; i && i->if_netnum != net;
+ i = i->if_next)
;
else
i = ipx_primary_net;
@@ -311,7 +306,7 @@ static ipx_interface *ipxitf_find_using_net(__u32 net)
ipxitf_hold(i);
spin_unlock_bh(&ipx_interfaces_lock);
- return (i);
+ return i;
}
/* Sockets are bound to a particular IPX interface. */
@@ -324,11 +319,10 @@ static void ipxitf_insert_socket(ipx_interface *intrfc, struct sock *sk)
spin_lock_bh(&intrfc->if_sklist_lock);
sk->protinfo.af_ipx.intrfc = intrfc;
sk->next = NULL;
- if(intrfc->if_sklist == NULL)
+ if (!intrfc->if_sklist)
intrfc->if_sklist = sk;
- else
- {
- for (s = intrfc->if_sklist; s->next != NULL; s = s->next)
+ else {
+ for (s = intrfc->if_sklist; s->next; s = s->next)
;
s->next = sk;
}
@@ -337,19 +331,21 @@ static void ipxitf_insert_socket(ipx_interface *intrfc, struct sock *sk)
}
/* caller must hold intrfc->if_sklist_lock */
-static struct sock *__ipxitf_find_socket(ipx_interface *intrfc, unsigned short port)
+static struct sock *__ipxitf_find_socket(ipx_interface *intrfc,
+ unsigned short port)
{
struct sock *s;
- for(s = intrfc->if_sklist;
- (s != NULL) && (s->protinfo.af_ipx.port != port);
- s = s->next)
+ for (s = intrfc->if_sklist;
+ s && s->protinfo.af_ipx.port != port;
+ s = s->next)
;
return s;
}
/* caller must hold a reference to intrfc */
-static struct sock *ipxitf_find_socket(ipx_interface *intrfc, unsigned short port)
+static struct sock *ipxitf_find_socket(ipx_interface *intrfc,
+ unsigned short port)
{
struct sock *s;
@@ -359,11 +355,10 @@ static struct sock *ipxitf_find_socket(ipx_interface *intrfc, unsigned short por
sock_hold(s);
spin_unlock_bh(&intrfc->if_sklist_lock);
- return (s);
+ return s;
}
#ifdef CONFIG_IPX_INTERN
-
static struct sock *ipxitf_find_internal_socket(ipx_interface *intrfc,
unsigned char *node, unsigned short port)
{
@@ -373,19 +368,16 @@ static struct sock *ipxitf_find_internal_socket(ipx_interface *intrfc,
spin_lock_bh(&intrfc->if_sklist_lock);
s = intrfc->if_sklist;
- while(s != NULL)
- {
- if((s->protinfo.af_ipx.port == port)
- && (memcmp(node, s->protinfo.af_ipx.node, IPX_NODE_LEN) == 0))
- {
+ while (s) {
+ if (s->protinfo.af_ipx.port == port &&
+ !memcmp(node, s->protinfo.af_ipx.node, IPX_NODE_LEN))
break;
- }
s = s->next;
}
spin_unlock_bh(&intrfc->if_sklist_lock);
ipxitf_put(intrfc);
- return (s);
+ return s;
}
#endif
@@ -401,8 +393,7 @@ static void __ipxitf_down(ipx_interface *intrfc)
spin_lock_bh(&intrfc->if_sklist_lock);
/* error sockets */
- for(s = intrfc->if_sklist; s != NULL; )
- {
+ for (s = intrfc->if_sklist; s; ) {
s->err = ENOLINK;
s->error_report(s);
s->protinfo.af_ipx.intrfc = NULL;
@@ -416,30 +407,27 @@ static void __ipxitf_down(ipx_interface *intrfc)
spin_unlock_bh(&intrfc->if_sklist_lock);
/* remove this interface from list */
- if(intrfc == ipx_interfaces)
+ if (intrfc == ipx_interfaces)
ipx_interfaces = intrfc->if_next;
- else
- {
- for(i = ipx_interfaces;
- (i != NULL) && (i->if_next != intrfc);
- i = i->if_next)
+ else {
+ for (i = ipx_interfaces;
+ i && i->if_next != intrfc;
+ i = i->if_next)
;
- if((i != NULL) && (i->if_next == intrfc))
+ if (i && i->if_next == intrfc)
i->if_next = intrfc->if_next;
}
/* remove this interface from *special* networks */
- if(intrfc == ipx_primary_net)
+ if (intrfc == ipx_primary_net)
ipxitf_clear_primary_net();
- if(intrfc == ipx_internal_net)
+ if (intrfc == ipx_internal_net)
ipx_internal_net = NULL;
if (intrfc->if_dev)
dev_put(intrfc->if_dev);
kfree(intrfc);
MOD_DEC_USE_COUNT;
-
- return;
}
static void ipxitf_down(ipx_interface *intrfc)
@@ -449,36 +437,31 @@ static void ipxitf_down(ipx_interface *intrfc)
spin_unlock_bh(&ipx_interfaces_lock);
}
-static int ipxitf_device_event(struct notifier_block *notifier, unsigned long event, void *ptr)
+static int ipxitf_device_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
ipx_interface *i, *tmp;
- if(event != NETDEV_DOWN)
+ if (event != NETDEV_DOWN)
return NOTIFY_DONE;
spin_lock_bh(&ipx_interfaces_lock);
- for(i = ipx_interfaces; i != NULL;)
- {
+ for (i = ipx_interfaces; i;) {
tmp = i->if_next;
- if(i->if_dev == dev)
+ if (i->if_dev == dev)
__ipxitf_put(i);
i = tmp;
}
spin_unlock_bh(&ipx_interfaces_lock);
-
- return (NOTIFY_DONE);
+ return NOTIFY_DONE;
}
-static int ipxitf_def_skb_handler(struct sock *sock, struct sk_buff *skb)
+static void ipxitf_def_skb_handler(struct sock *sock, struct sk_buff *skb)
{
- int retval;
-
- if((retval = sock_queue_rcv_skb(sock, skb)) < 0)
+ if (sock_queue_rcv_skb(sock, skb) < 0)
kfree_skb(skb);
-
- return (retval);
}
/*
@@ -500,57 +483,50 @@ static int ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int c
spin_lock_bh(&intrfc->if_sklist_lock);
s = intrfc->if_sklist;
- while(s != NULL)
- {
- if((s->protinfo.af_ipx.port == ipx->ipx_dest.sock)
- && (is_broadcast
- || (memcmp(ipx->ipx_dest.node, s->protinfo.af_ipx.node,
- IPX_NODE_LEN) == 0)))
- {
+ while (s) {
+ if (s->protinfo.af_ipx.port == ipx->ipx_dest.sock &&
+ (is_broadcast || !memcmp(ipx->ipx_dest.node,
+ s->protinfo.af_ipx.node,
+ IPX_NODE_LEN))) {
/* We found a socket to which to send */
struct sk_buff *skb1;
- if(copy != 0)
- {
+ if (copy) {
skb1 = skb_clone(skb, GFP_ATOMIC);
ret = -ENOMEM;
- if (skb1 == NULL)
+ if (!skb1)
goto out;
- }
- else
- {
+ } else {
skb1 = skb;
copy = 1; /* skb may only be used once */
}
ipxitf_def_skb_handler(s, skb1);
/* On an external interface, one socket can listen */
- if(intrfc != ipx_internal_net)
+ if (intrfc != ipx_internal_net)
break;
}
s = s->next;
}
/* skb was solely for us, and we did not make a copy, so free it. */
- if(copy == 0)
+ if (!copy)
kfree_skb(skb);
ret = 0;
out: spin_unlock_bh(&intrfc->if_sklist_lock);
return ret;
}
-
#else
-
-static int ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int copy)
+static int ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb,
+ int copy)
{
struct ipxhdr *ipx = skb->nh.ipxh;
struct sock *sock1 = NULL, *sock2 = NULL;
struct sk_buff *skb1 = NULL, *skb2 = NULL;
int ret;
- if (intrfc == ipx_primary_net && ntohs(ipx->ipx_dest.sock) == 0x451)
- {
+ if (intrfc == ipx_primary_net && ntohs(ipx->ipx_dest.sock) == 0x451) {
/*
* The packet's target is a NCP connection handler. We want to
* hand it to the correct socket directly within the kernel,
@@ -562,48 +538,34 @@ static int ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int c
* VERY fast as well.
*/
int connection = 0;
-
- if (*((char*)(ipx+1)) == 0x22 && *((char*)(ipx+1)+1) == 0x22)
- {
- /*
- * The packet is a NCP request
- */
- connection = ( ((int) *((char*)(ipx+1)+5)) << 8 )
- | (int) *((char*)(ipx+1)+3);
- }
- else if (*((char*)(ipx+1))== 0x77 && *((char*)(ipx+1)+1) == 0x77)
- {
- /*
- * The packet is a BURST packet
- */
- connection = ( ((int) *((char*)(ipx+1)+9)) << 8 )
- | (int) *((char*)(ipx+1)+8);
- }
-
- if (connection)
- {
- /*
- * Now we have to look for a special NCP connection handling
- * socket. Only these sockets have ipx_ncp_conn != 0, set
- * by SIOCIPXNCPCONN.
- */
+ u8 *ncphdr = (u8 *)(ipx + 1);
+
+ if (*ncphdr == 0x22 && *(ncphdr + 1) == 0x22)
+ /* The packet is a NCP request */
+ connection = (((int) *(ncphdr + 5)) << 8) |
+ (int) *(ncphdr+3);
+ else if (*ncphdr == 0x77 && *(ncphdr + 1) == 0x77)
+ /* The packet is a BURST packet */
+ connection = (((int) *(ncphdr+9)) << 8) |
+ (int) *(ncphdr+8);
+
+ if (connection) {
+ /* Now we have to look for a special NCP connection
+ * handling socket. Only these sockets have
+ * ipx_ncp_conn != 0, set by SIOCIPXNCPCONN. */
spin_lock_bh(&intrfc->if_sklist_lock);
- for (sock1=intrfc->if_sklist;
- (sock1 != NULL) &&
- (sock1->protinfo.af_ipx.ipx_ncp_conn != connection);
- sock1=sock1->next);
+ for (sock1 = intrfc->if_sklist;
+ sock1 &&
+ sock1->protinfo.af_ipx.ipx_ncp_conn != connection;
+ sock1 = sock1->next);
if (sock1)
sock_hold(sock1);
spin_unlock_bh(&intrfc->if_sklist_lock);
}
}
- if (sock1 == NULL)
- {
- /* No special socket found, forward the packet the
- * normal way.
- */
+ if (!sock1)
+ /* No special socket found, forward the packet the normal way */
sock1 = ipxitf_find_socket(intrfc, ipx->ipx_dest.sock);
- }
/*
* We need to check if there is a primary net and if
@@ -613,10 +575,8 @@ static int ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int c
* 0x456(Diagnostic).
*/
- if(ipx_primary_net && (intrfc != ipx_primary_net))
- {
- switch(ntohs(ipx->ipx_dest.sock))
- {
+ if (ipx_primary_net && intrfc != ipx_primary_net) {
+ switch (ntohs(ipx->ipx_dest.sock)) {
case 0x452:
case 0x453:
case 0x456:
@@ -637,11 +597,10 @@ static int ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int c
/*
* If there is nothing to do return. The kfree will cancel any charging.
*/
- if(sock1 == NULL && sock2 == NULL)
- {
- if(!copy)
+ if (!sock1 && !sock2) {
+ if (!copy)
kfree_skb(skb);
- return (0);
+ return 0;
}
/*
@@ -652,30 +611,30 @@ static int ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int c
* copies, we do as much as is possible.
*/
- if(copy)
+ if (copy)
skb1 = skb_clone(skb, GFP_ATOMIC);
else
skb1 = skb;
ret = -ENOMEM;
- if(skb1 == NULL)
+ if (!skb1)
goto out;
/* Do we need 2 SKBs? */
- if(sock1 && sock2)
+ if (sock1 && sock2)
skb2 = skb_clone(skb1, GFP_ATOMIC);
else
skb2 = skb1;
- if(sock1)
- (void) ipxitf_def_skb_handler(sock1, skb1);
+ if (sock1)
+ ipxitf_def_skb_handler(sock1, skb1);
ret = -ENOMEM;
- if(skb2 == NULL)
+ if (!skb2)
goto out;
- if(sock2)
- (void) ipxitf_def_skb_handler(sock2, skb2);
+ if (sock2)
+ ipxitf_def_skb_handler(sock2, skb2);
ret = 0;
out: if (sock1)
@@ -686,7 +645,8 @@ out: if (sock1)
}
#endif /* CONFIG_IPX_INTERN */
-static struct sk_buff *ipxitf_adjust_skbuff(ipx_interface *intrfc, struct sk_buff *skb)
+static struct sk_buff *ipxitf_adjust_skbuff(ipx_interface *intrfc,
+ struct sk_buff *skb)
{
struct sk_buff *skb2;
int in_offset = skb->h.raw - skb->head;
@@ -694,21 +654,21 @@ static struct sk_buff *ipxitf_adjust_skbuff(ipx_interface *intrfc, struct sk_buf
int len;
/* Hopefully, most cases */
- if(in_offset >= out_offset)
- return (skb);
+ if (in_offset >= out_offset)
+ return skb;
/* Need new SKB */
len = skb->len + out_offset;
skb2 = alloc_skb(len, GFP_ATOMIC);
- if(skb2 != NULL)
- {
+ if (skb2) {
skb_reserve(skb2, out_offset);
skb2->nh.raw =
skb2->h.raw = skb_put(skb2,skb->len);
memcpy(skb2->h.raw, skb->h.raw, skb->len);
+ memcpy(skb2->cb, skb->cb, sizeof(skb->cb));
}
kfree_skb(skb);
- return (skb2);
+ return skb2;
}
/* caller must hold a reference to intrfc */
@@ -716,6 +676,7 @@ static struct sk_buff *ipxitf_adjust_skbuff(ipx_interface *intrfc, struct sk_buf
static int ipxitf_send(ipx_interface *intrfc, struct sk_buff *skb, char *node)
{
struct ipxhdr *ipx = skb->nh.ipxh;
+ struct ipx_cb *cb = (struct ipx_cb *) skb->cb;
struct net_device *dev = intrfc->if_dev;
struct datalink_proto *dl = intrfc->if_dlink;
char dest_node[IPX_NODE_LEN];
@@ -727,7 +688,7 @@ static int ipxitf_send(ipx_interface *intrfc, struct sk_buff *skb, char *node)
* packet to avoid unnecessary copies.
*/
- if((dl == NULL) || (dev == NULL) || (dev->flags & IFF_LOOPBACK))
+ if (!dl || !dev || dev->flags & IFF_LOOPBACK)
send_to_wire = 0; /* No non looped */
/*
@@ -737,30 +698,27 @@ static int ipxitf_send(ipx_interface *intrfc, struct sk_buff *skb, char *node)
* up clones.
*/
- if(ipx->ipx_dest.net == intrfc->if_netnum)
- {
+ if (cb->ipx_dest_net == intrfc->if_netnum) {
/*
* To our own node, loop and free the original.
* The internal net will receive on all node address.
*/
- if((intrfc == ipx_internal_net)
- || memcmp(intrfc->if_node, node, IPX_NODE_LEN) == 0)
- {
+ if (intrfc == ipx_internal_net ||
+ !memcmp(intrfc->if_node, node, IPX_NODE_LEN)) {
/* Don't charge sender */
skb_orphan(skb);
/* Will charge receiver */
- return (ipxitf_demux_socket(intrfc, skb, 0));
+ return ipxitf_demux_socket(intrfc, skb, 0);
}
/* Broadcast, loop and possibly keep to send on. */
- if(memcmp(ipx_broadcast_node, node, IPX_NODE_LEN) == 0)
- {
- if(!send_to_wire)
+ if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN)) {
+ if (!send_to_wire)
skb_orphan(skb);
ipxitf_demux_socket(intrfc, skb, send_to_wire);
- if(!send_to_wire)
- return (0);
+ if (!send_to_wire)
+ return 0;
}
}
@@ -769,36 +727,45 @@ static int ipxitf_send(ipx_interface *intrfc, struct sk_buff *skb, char *node)
* We are still charging the sender. Which is right - the driver
* free will handle this fairly.
*/
- if(ipx->ipx_source.net != intrfc->if_netnum)
- {
+ if (cb->ipx_source_net != intrfc->if_netnum) {
/*
* Unshare the buffer before modifying the count in
* case its a flood or tcpdump
*/
skb = skb_unshare(skb, GFP_ATOMIC);
- if(!skb)
- return (0);
- if(++(ipx->ipx_tctrl) > ipxcfg_max_hops)
+ if (!skb)
+ return 0;
+ if (++(cb->ipx_tctrl) > ipxcfg_max_hops)
send_to_wire = 0;
}
- if(!send_to_wire)
- {
+ if (!send_to_wire) {
kfree_skb(skb);
- return (0);
+ return 0;
}
/* Determine the appropriate hardware address */
addr_len = dev->addr_len;
- if(memcmp(ipx_broadcast_node, node, IPX_NODE_LEN) == 0)
+ if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN))
memcpy(dest_node, dev->broadcast, addr_len);
else
memcpy(dest_node, &(node[IPX_NODE_LEN-addr_len]), addr_len);
/* Make any compensation for differing physical/data link size */
skb = ipxitf_adjust_skbuff(intrfc, skb);
- if(skb == NULL)
- return (0);
+ if (!skb)
+ return 0;
+
+ ipx->ipx_tctrl = cb->ipx_tctrl;
+ ipx->ipx_dest.net = cb->ipx_dest_net;
+ ipx->ipx_source.net = cb->ipx_source_net;
+ /* see if we need to include the netnum in the route list */
+ if (cb->last_hop_index >= 0) {
+ u32 *last_hop = (u32 *)(((u8 *) skb->data) +
+ sizeof(struct ipxhdr) + cb->last_hop_index *
+ sizeof(u32));
+ *last_hop = intrfc->if_netnum;
+ }
/* set up data link and physical headers */
skb->dev = dev;
@@ -807,15 +774,14 @@ static int ipxitf_send(ipx_interface *intrfc, struct sk_buff *skb, char *node)
/* Send it out */
dev_queue_xmit(skb);
-
- return (0);
+ return 0;
}
static int ipxrtr_add_route(__u32, ipx_interface *, unsigned char *);
static int ipxitf_add_local_route(ipx_interface *intrfc)
{
- return (ipxrtr_add_route(intrfc->if_netnum, intrfc, NULL));
+ return ipxrtr_add_route(intrfc->if_netnum, intrfc, NULL);
}
static const char * ipx_frame_name(unsigned short);
@@ -824,29 +790,26 @@ static const char * ipx_device_name(ipx_interface *);
static int ipxitf_rcv(ipx_interface *intrfc, struct sk_buff *skb)
{
struct ipxhdr *ipx = skb->nh.ipxh;
- ipx_interface *i;
+ struct ipx_cb *cb = (struct ipx_cb *) skb->cb;
int ret = 0;
ipxitf_hold(intrfc);
/* See if we should update our network number */
- if(!intrfc->if_netnum /* net number of intrfc not known yet (== 0) */
- && (ipx->ipx_source.net == ipx->ipx_dest.net) /* intra packet */
- && ipx->ipx_source.net) /* source net number of packet != 0 */
- {
+ if (!intrfc->if_netnum && /* net number of intrfc not known yet */
+ cb->ipx_source_net == cb->ipx_dest_net && /* intra packet */
+ cb->ipx_source_net) {
+ ipx_interface *i = ipxitf_find_using_net(cb->ipx_source_net);
/* NB: NetWare servers lie about their hop count so we
* dropped the test based on it. This is the best way
* to determine this is a 0 hop count packet.
*/
- if((i=ipxitf_find_using_net(ipx->ipx_source.net)) == NULL)
- {
- intrfc->if_netnum = ipx->ipx_source.net;
- (void) ipxitf_add_local_route(intrfc);
- }
- else
- {
+ if (!i) {
+ intrfc->if_netnum = cb->ipx_source_net;
+ ipxitf_add_local_route(intrfc);
+ } else {
printk(KERN_WARNING "IPX: Network number collision %lx\n %s %s and %s %s\n",
- (long unsigned int) htonl(ipx->ipx_source.net),
+ (long unsigned int) htonl(cb->ipx_source_net),
ipx_device_name(i),
ipx_frame_name(i->if_dlink_type),
ipx_device_name(intrfc),
@@ -854,75 +817,63 @@ static int ipxitf_rcv(ipx_interface *intrfc, struct sk_buff *skb)
ipxitf_put(i);
}
}
+
+ cb->last_hop_index = -1;
- if(ipx->ipx_type == IPX_TYPE_PPROP
- && ipx->ipx_tctrl < 8
- && skb->pkt_type != PACKET_OTHERHOST
- /* header + 8 network numbers */
- && ntohs(ipx->ipx_pktsize) >= sizeof(struct ipxhdr) + 8 * 4)
- {
+ if (ipx->ipx_type == IPX_TYPE_PPROP && cb->ipx_tctrl < 8 &&
+ skb->pkt_type != PACKET_OTHERHOST &&
+ /* header + 8 network numbers */
+ ntohs(ipx->ipx_pktsize) >= sizeof(struct ipxhdr) + 8 * 4) {
int i;
ipx_interface *ifcs;
struct sk_buff *skb2;
- __u32 *l;
- char *c;
-
- c = (char *) skb->data;
- c += sizeof(struct ipxhdr);
- l = (__u32 *) c;
-
- i = 0;
+ char *c = ((char *) skb->data) + sizeof(struct ipxhdr);
+ u32 *l = (u32 *) c;
/* Dump packet if already seen this net */
- for( ; i < ipx->ipx_tctrl; i++)
- if(*l++ == intrfc->if_netnum)
+ for (i = 0; i < cb->ipx_tctrl; i++)
+ if (*l++ == intrfc->if_netnum)
break;
- if(i == ipx->ipx_tctrl)
- {
+ if (i == cb->ipx_tctrl) {
/* < 8 hops && input itfc not in list */
- *l = intrfc->if_netnum; /* insert recvd netnum into list */
- ipx->ipx_tctrl++;
+ /* insert recvd netnum into list */
+ cb->last_hop_index = i;
+ cb->ipx_tctrl++;
/* xmit on all other interfaces... */
spin_lock_bh(&ipx_interfaces_lock);
- for(ifcs = ipx_interfaces; ifcs != NULL; ifcs = ifcs->if_next)
- {
+ for (ifcs = ipx_interfaces; ifcs;
+ ifcs = ifcs->if_next) {
/* Except unconfigured interfaces */
- if(ifcs->if_netnum == 0)
+ if (!ifcs->if_netnum)
continue;
/* That aren't in the list */
l = (__u32 *) c;
- for(i = 0; i <= ipx->ipx_tctrl; i++)
- if(ifcs->if_netnum == *l++)
+ for (i = 0; i <= cb->ipx_tctrl; i++)
+ if (ifcs->if_netnum == *l++)
break;
- if(i - 1 == ipx->ipx_tctrl)
- {
- ipx->ipx_dest.net = ifcs->if_netnum;
+ if (i - 1 == cb->ipx_tctrl) {
+ cb->ipx_dest_net = ifcs->if_netnum;
skb2=skb_clone(skb, GFP_ATOMIC);
if (skb2)
ipxrtr_route_skb(skb2);
}
}
spin_unlock_bh(&ipx_interfaces_lock);
-
- /* Reset network number in packet */
- ipx->ipx_dest.net = intrfc->if_netnum;
}
}
- if(!ipx->ipx_dest.net)
- ipx->ipx_dest.net = intrfc->if_netnum;
- if(!ipx->ipx_source.net)
- ipx->ipx_source.net = intrfc->if_netnum;
+ if (!cb->ipx_dest_net)
+ cb->ipx_dest_net = intrfc->if_netnum;
+ if (!cb->ipx_source_net)
+ cb->ipx_source_net = intrfc->if_netnum;
- if(intrfc->if_netnum != ipx->ipx_dest.net)
- {
+ if (intrfc->if_netnum != cb->ipx_dest_net) {
/* We only route point-to-point packets. */
- if(skb->pkt_type == PACKET_HOST)
- {
+ if (skb->pkt_type == PACKET_HOST) {
skb=skb_unshare(skb, GFP_ATOMIC);
- if(skb)
+ if (skb)
ret = ipxrtr_route_skb(skb);
goto out_intrfc;
}
@@ -931,9 +882,8 @@ static int ipxitf_rcv(ipx_interface *intrfc, struct sk_buff *skb)
}
/* see if we should keep it */
- if((memcmp(ipx_broadcast_node, ipx->ipx_dest.node, IPX_NODE_LEN) == 0)
- || (memcmp(intrfc->if_node, ipx->ipx_dest.node, IPX_NODE_LEN) == 0))
- {
+ if (!memcmp(ipx_broadcast_node, ipx->ipx_dest.node, IPX_NODE_LEN) ||
+ !memcmp(intrfc->if_node, ipx->ipx_dest.node, IPX_NODE_LEN)) {
ret = ipxitf_demux_socket(intrfc, skb, 0);
goto out_intrfc;
}
@@ -952,20 +902,17 @@ static void ipxitf_insert(ipx_interface *intrfc)
intrfc->if_next = NULL;
spin_lock_bh(&ipx_interfaces_lock);
- if(ipx_interfaces == NULL)
+ if (!ipx_interfaces)
ipx_interfaces = intrfc;
- else
- {
- for(i = ipx_interfaces; i->if_next != NULL; i = i->if_next)
+ else {
+ for (i = ipx_interfaces; i->if_next; i = i->if_next)
;
i->if_next = intrfc;
}
spin_unlock_bh(&ipx_interfaces_lock);
- if(ipxcfg_auto_select_primary && (ipx_primary_net == NULL))
+ if (ipxcfg_auto_select_primary && !ipx_primary_net)
ipx_primary_net = intrfc;
-
- return;
}
static int ipxitf_create_internal(ipx_interface_definition *idef)
@@ -974,21 +921,21 @@ static int ipxitf_create_internal(ipx_interface_definition *idef)
int ret;
/* Only one primary network allowed */
- if(ipx_primary_net != NULL)
- return (-EEXIST);
+ if (ipx_primary_net)
+ return -EEXIST;
/* Must have a valid network number */
- if(!idef->ipx_network)
- return (-EADDRNOTAVAIL);
+ if (!idef->ipx_network)
+ return -EADDRNOTAVAIL;
intrfc = ipxitf_find_using_net(idef->ipx_network);
- if(intrfc != NULL) {
+ if (intrfc) {
ipxitf_put(intrfc);
- return (-EADDRINUSE);
+ return -EADDRINUSE;
}
- intrfc = (ipx_interface *)kmalloc(sizeof(ipx_interface),GFP_ATOMIC);
- if(intrfc == NULL)
- return (-EAGAIN);
+ intrfc = kmalloc(sizeof(ipx_interface),GFP_ATOMIC);
+ if (!intrfc)
+ return -EAGAIN;
intrfc->if_dev = NULL;
intrfc->if_netnum = idef->ipx_network;
intrfc->if_dlink_type = 0;
@@ -998,8 +945,7 @@ static int ipxitf_create_internal(ipx_interface_definition *idef)
intrfc->if_ipx_offset = 0;
intrfc->if_sknum = IPX_MIN_EPHEMERAL_SOCKET;
memcpy((char *)&(intrfc->if_node), idef->ipx_node, IPX_NODE_LEN);
- ipx_internal_net = intrfc;
- ipx_primary_net = intrfc;
+ ipx_internal_net = ipx_primary_net = intrfc;
spin_lock_init(&intrfc->if_sklist_lock);
atomic_set(&intrfc->refcnt, 1);
MOD_INC_USE_COUNT;
@@ -1013,22 +959,21 @@ static int ipxitf_create_internal(ipx_interface_definition *idef)
static int ipx_map_frame_type(unsigned char type)
{
- switch(type)
- {
+ switch (type) {
case IPX_FRAME_ETHERII:
- return (htons(ETH_P_IPX));
+ return htons(ETH_P_IPX);
case IPX_FRAME_8022:
- return (htons(ETH_P_802_2));
+ return htons(ETH_P_802_2);
case IPX_FRAME_SNAP:
- return (htons(ETH_P_SNAP));
+ return htons(ETH_P_SNAP);
case IPX_FRAME_8023:
- return (htons(ETH_P_802_3));
+ return htons(ETH_P_802_3);
}
- return (0);
+ return 0;
}
static int ipxitf_create(ipx_interface_definition *idef)
@@ -1039,29 +984,29 @@ static int ipxitf_create(ipx_interface_definition *idef)
ipx_interface *intrfc;
int err;
- if(idef->ipx_special == IPX_INTERNAL)
- return (ipxitf_create_internal(idef));
+ if (idef->ipx_special == IPX_INTERNAL)
+ return ipxitf_create_internal(idef);
- if((idef->ipx_special == IPX_PRIMARY) && (ipx_primary_net != NULL))
- return (-EEXIST);
+ if (idef->ipx_special == IPX_PRIMARY && ipx_primary_net)
+ return -EEXIST;
intrfc = ipxitf_find_using_net(idef->ipx_network);
- if(idef->ipx_network && intrfc != NULL) {
+ if (idef->ipx_network && intrfc) {
ipxitf_put(intrfc);
- return (-EADDRINUSE);
+ return -EADDRINUSE;
}
if (intrfc)
ipxitf_put(intrfc);
dev = dev_get_by_name(idef->ipx_device);
- if(dev == NULL)
- return (-ENODEV);
+ if (!dev)
+ return -ENODEV;
- switch(idef->ipx_dlink_type)
- {
+ switch (idef->ipx_dlink_type) {
case IPX_FRAME_TR_8022:
- printk("IPX frame type 802.2TR is obsolete. Use 802.2 instead.\n");
+ printk(KERN_WARNING "IPX frame type 802.2TR is "
+ "obsolete Use 802.2 instead.\n");
/* fall through */
case IPX_FRAME_8022:
@@ -1070,14 +1015,14 @@ static int ipxitf_create(ipx_interface_definition *idef)
break;
case IPX_FRAME_ETHERII:
- if (dev->type != ARPHRD_IEEE802)
- {
+ if (dev->type != ARPHRD_IEEE802) {
dlink_type = htons(ETH_P_IPX);
datalink = pEII_datalink;
break;
- }
- else
- printk("IPX frame type EtherII over token-ring is obsolete. Use SNAP instead.\n");
+ } else
+ printk(KERN_WARNING "IPX frame type EtherII "
+ "over token-ring is obsolete. Use SNAP "
+ "instead.\n");
/* fall through */
case IPX_FRAME_SNAP:
@@ -1096,24 +1041,24 @@ static int ipxitf_create(ipx_interface_definition *idef)
}
err = -ENETDOWN;
- if(!(dev->flags & IFF_UP))
+ if (!(dev->flags & IFF_UP))
goto out_dev;
/* Check addresses are suitable */
err = -EINVAL;
- if(dev->addr_len > IPX_NODE_LEN)
+ if (dev->addr_len > IPX_NODE_LEN)
goto out_dev;
err = -EPROTONOSUPPORT;
- if(datalink == NULL)
+ if (!datalink)
goto out_dev;
- if((intrfc = ipxitf_find_using_phys(dev, dlink_type)) == NULL)
- {
+ intrfc = ipxitf_find_using_phys(dev, dlink_type);
+ if (!intrfc) {
/* Ok now create */
- intrfc = (ipx_interface *)kmalloc(sizeof(ipx_interface),GFP_ATOMIC);
+ intrfc = kmalloc(sizeof(ipx_interface), GFP_ATOMIC);
err = -EAGAIN;
- if(intrfc == NULL)
+ if (!intrfc)
goto out_dev;
intrfc->if_dev = dev;
intrfc->if_netnum = idef->ipx_network;
@@ -1122,17 +1067,16 @@ static int ipxitf_create(ipx_interface_definition *idef)
intrfc->if_sklist = NULL;
intrfc->if_sknum = IPX_MIN_EPHEMERAL_SOCKET;
/* Setup primary if necessary */
- if((idef->ipx_special == IPX_PRIMARY))
+ if ((idef->ipx_special == IPX_PRIMARY))
ipx_primary_net = intrfc;
intrfc->if_internal = 0;
intrfc->if_ipx_offset = dev->hard_header_len + datalink->header_length;
- if(memcmp(idef->ipx_node, "\000\000\000\000\000\000", IPX_NODE_LEN) == 0)
- {
+ if (!memcmp(idef->ipx_node, "\000\000\000\000\000\000",
+ IPX_NODE_LEN)) {
memset(intrfc->if_node, 0, IPX_NODE_LEN);
memcpy((char *)&(intrfc->if_node[IPX_NODE_LEN-dev->addr_len]),
dev->dev_addr, dev->addr_len);
- }
- else
+ } else
memcpy(intrfc->if_node, idef->ipx_node, IPX_NODE_LEN);
spin_lock_init(&intrfc->if_sklist_lock);
atomic_set(&intrfc->refcnt, 1);
@@ -1144,7 +1088,7 @@ static int ipxitf_create(ipx_interface_definition *idef)
/* If the network number is known, add a route */
err = 0;
- if(!intrfc->if_netnum)
+ if (!intrfc->if_netnum)
goto out_intrfc;
err = ipxitf_add_local_route(intrfc);
@@ -1164,10 +1108,8 @@ static int ipxitf_delete(ipx_interface_definition *idef)
int ret = 0;
spin_lock_bh(&ipx_interfaces_lock);
- if(idef->ipx_special == IPX_INTERNAL)
- {
- if(ipx_internal_net != NULL)
- {
+ if (idef->ipx_special == IPX_INTERNAL) {
+ if (ipx_internal_net) {
__ipxitf_put(ipx_internal_net);
goto out;
}
@@ -1176,19 +1118,19 @@ static int ipxitf_delete(ipx_interface_definition *idef)
}
dlink_type = ipx_map_frame_type(idef->ipx_dlink_type);
- if(dlink_type == 0) {
+ if (!dlink_type) {
ret = -EPROTONOSUPPORT;
goto out;
}
dev = __dev_get_by_name(idef->ipx_device);
- if(dev == NULL) {
+ if (!dev) {
ret = -ENODEV;
goto out;
}
intrfc = __ipxitf_find_using_phys(dev, dlink_type);
- if(intrfc != NULL)
+ if (intrfc)
__ipxitf_put(intrfc);
else
ret = -EINVAL;
@@ -1198,13 +1140,12 @@ out: spin_unlock_bh(&ipx_interfaces_lock);
}
static ipx_interface *ipxitf_auto_create(struct net_device *dev,
- unsigned short dlink_type)
+ unsigned short dlink_type)
{
struct datalink_proto *datalink = NULL;
ipx_interface *intrfc;
- switch(htons(dlink_type))
- {
+ switch (htons(dlink_type)) {
case ETH_P_IPX:
datalink = pEII_datalink;
break;
@@ -1222,19 +1163,18 @@ static ipx_interface *ipxitf_auto_create(struct net_device *dev,
break;
default:
- return (NULL);
+ return NULL;
}
- if(dev == NULL)
- return (NULL);
+ if (!dev)
+ return NULL;
/* Check addresses are suitable */
- if(dev->addr_len>IPX_NODE_LEN)
- return (NULL);
+ if (dev->addr_len > IPX_NODE_LEN)
+ return NULL;
- intrfc = (ipx_interface *)kmalloc(sizeof(ipx_interface),GFP_ATOMIC);
- if(intrfc != NULL)
- {
+ intrfc = kmalloc(sizeof(ipx_interface), GFP_ATOMIC);
+ if (intrfc) {
intrfc->if_dev = dev;
intrfc->if_netnum = 0;
intrfc->if_dlink_type = dlink_type;
@@ -1253,27 +1193,25 @@ static ipx_interface *ipxitf_auto_create(struct net_device *dev,
ipxitf_insert(intrfc);
}
- return (intrfc);
+ return intrfc;
}
static int ipxitf_ioctl(unsigned int cmd, void *arg)
{
struct ifreq ifr;
- int err, val;
+ int err = 0, val;
- switch(cmd)
- {
- case SIOCSIFADDR:
- {
+ switch (cmd) {
+ case SIOCSIFADDR: {
struct sockaddr_ipx *sipx;
ipx_interface_definition f;
- if(copy_from_user(&ifr, arg, sizeof(ifr)))
- return (-EFAULT);
+ if (copy_from_user(&ifr, arg, sizeof(ifr)))
+ return -EFAULT;
sipx = (struct sockaddr_ipx *)&ifr.ifr_addr;
- if(sipx->sipx_family != AF_IPX)
- return (-EINVAL);
+ if (sipx->sipx_family != AF_IPX)
+ return -EINVAL;
f.ipx_network = sipx->sipx_network;
memcpy(f.ipx_device,ifr.ifr_name,sizeof(f.ipx_device));
@@ -1281,10 +1219,10 @@ static int ipxitf_ioctl(unsigned int cmd, void *arg)
f.ipx_dlink_type = sipx->sipx_type;
f.ipx_special = sipx->sipx_special;
- if(sipx->sipx_action == IPX_DLTITF)
- return (ipxitf_delete(&f));
+ if (sipx->sipx_action == IPX_DLTITF)
+ return ipxitf_delete(&f);
else
- return (ipxitf_create(&f));
+ return ipxitf_create(&f);
}
case SIOCGIFADDR:
@@ -1293,50 +1231,46 @@ static int ipxitf_ioctl(unsigned int cmd, void *arg)
ipx_interface *ipxif;
struct net_device *dev;
- if(copy_from_user(&ifr, arg, sizeof(ifr)))
- return (-EFAULT);
+ if (copy_from_user(&ifr, arg, sizeof(ifr)))
+ return -EFAULT;
sipx = (struct sockaddr_ipx *)&ifr.ifr_addr;
dev = __dev_get_by_name(ifr.ifr_name);
- if(!dev)
- return (-ENODEV);
+ if (!dev)
+ return -ENODEV;
ipxif = ipxitf_find_using_phys(dev, ipx_map_frame_type(sipx->sipx_type));
- if(ipxif == NULL)
- return (-EADDRNOTAVAIL);
+ if (!ipxif)
+ return -EADDRNOTAVAIL;
sipx->sipx_family = AF_IPX;
sipx->sipx_network = ipxif->if_netnum;
- memcpy(sipx->sipx_node, ipxif->if_node, sizeof(sipx->sipx_node));
- err = -EFAULT;
- if(!copy_to_user(arg, &ifr, sizeof(ifr)))
- err = 0;
+ memcpy(sipx->sipx_node, ipxif->if_node,
+ sizeof(sipx->sipx_node));
+ if (copy_to_user(arg, &ifr, sizeof(ifr)))
+ err = -EFAULT;
ipxitf_put(ipxif);
- return (err);
+ return err;
}
case SIOCAIPXITFCRT:
- {
- err = get_user(val, (unsigned char *) arg);
- if(err)
- return (err);
-
- return (ipxcfg_set_auto_create(val));
- }
+ if (get_user(val, (unsigned char *) arg))
+ return -EFAULT;
+ ipxcfg_set_auto_create(val);
+ break;
case SIOCAIPXPRISLT:
- {
- err = get_user(val, (unsigned char *) arg);
- if(err)
- return (err);
-
- return (ipxcfg_set_auto_select(val));
- }
+ if (get_user(val, (unsigned char *) arg))
+ return -EFAULT;
+ ipxcfg_set_auto_select(val);
+ break;
default:
- return (-EINVAL);
+ return -EINVAL;
}
+
+ return 0;
}
/**************************************************************************\
@@ -1350,11 +1284,11 @@ static ipx_route *ipxrtr_lookup(__u32 net)
ipx_route *r;
read_lock_bh(&ipx_routes_lock);
- for(r = ipx_routes; (r != NULL) && (r->ir_net != net); r = r->ir_next)
+ for (r = ipx_routes; r && r->ir_net != net; r = r->ir_next)
;
read_unlock_bh(&ipx_routes_lock);
- return (r);
+ return r;
}
/* caller must hold a reference to intrfc */
@@ -1365,34 +1299,30 @@ static int ipxrtr_add_route(__u32 network, ipx_interface *intrfc, unsigned char
/* Get a route structure; either existing or create */
rt = ipxrtr_lookup(network);
- if(rt == NULL)
- {
- rt = (ipx_route *)kmalloc(sizeof(ipx_route),GFP_ATOMIC);
- if(rt == NULL)
- return (-EAGAIN);
+ if (!rt) {
+ rt = kmalloc(sizeof(ipx_route),GFP_ATOMIC);
+ if (!rt)
+ return -EAGAIN;
write_lock_bh(&ipx_routes_lock);
rt->ir_next = ipx_routes;
ipx_routes = rt;
write_unlock_bh(&ipx_routes_lock);
}
- else if(intrfc == ipx_internal_net)
- return (-EEXIST);
+ else if (intrfc == ipx_internal_net)
+ return -EEXIST;
rt->ir_net = network;
rt->ir_intrfc = intrfc;
- if(node == NULL)
- {
+ if (!node) {
memset(rt->ir_router_node, '\0', IPX_NODE_LEN);
rt->ir_routed = 0;
- }
- else
- {
+ } else {
memcpy(rt->ir_router_node, node, IPX_NODE_LEN);
rt->ir_routed = 1;
}
- return (0);
+ return 0;
}
static void ipxrtr_del_routes(ipx_interface *intrfc)
@@ -1400,14 +1330,11 @@ static void ipxrtr_del_routes(ipx_interface *intrfc)
ipx_route **r, *tmp;
write_lock_bh(&ipx_routes_lock);
- for(r = &ipx_routes; (tmp = *r) != NULL;)
- {
- if(tmp->ir_intrfc == intrfc)
- {
+ for (r = &ipx_routes; (tmp = *r) != NULL;) {
+ if (tmp->ir_intrfc == intrfc) {
*r = tmp->ir_next;
kfree(tmp);
- }
- else
+ } else
r = &(tmp->ir_next);
}
write_unlock_bh(&ipx_routes_lock);
@@ -1420,8 +1347,8 @@ static int ipxrtr_create(ipx_route_definition *rd)
/* Find the appropriate interface */
intrfc = ipxitf_find_using_net(rd->ipx_router_network);
- if(intrfc == NULL)
- return (-ENETUNREACH);
+ if (!intrfc)
+ return -ENETUNREACH;
ret = ipxrtr_add_route(rd->ipx_network, intrfc, rd->ipx_router_node);
ipxitf_put(intrfc);
return ret;
@@ -1434,13 +1361,11 @@ static int ipxrtr_delete(long net)
int err;
write_lock_bh(&ipx_routes_lock);
- for(r = &ipx_routes; (tmp = *r) != NULL;)
- {
- if(tmp->ir_net == net)
- {
+ for (r = &ipx_routes; (tmp = *r) != NULL;) {
+ if (tmp->ir_net == net) {
/* Directly connected; can't lose route */
err = -EPERM;
- if(!(tmp->ir_routed))
+ if (!tmp->ir_routed)
goto out;
*r = tmp->ir_next;
@@ -1461,45 +1386,38 @@ out: write_unlock_bh(&ipx_routes_lock);
*/
/* Note: We assume ipx_tctrl==0 and htons(length)==ipx_pktsize */
+/* This functions should *not* mess with packet contents */
-static __u16 ipx_set_checksum(struct ipxhdr *packet,int length)
+static __u16 ipx_cksum(struct ipxhdr *packet,int length)
{
/*
* NOTE: sum is a net byte order quantity, which optimizes the
* loop. This only works on big and little endian machines. (I
* don't know of a machine that isn't.)
*/
-
- __u32 sum = 0;
-
- /* Pointer to second word - We skip the checksum field */
- __u16 *p = (__u16 *)&packet->ipx_pktsize;
-
- /* Number of complete words */
- __u32 i = length >> 1;
- char hops = packet->ipx_tctrl;
-
- /* Hop count excluded from checksum calc */
- packet->ipx_tctrl = 0;
-
- /* Loop through all complete words except the checksum field */
- while(--i)
+ /* start at ipx_dest - We skip the checksum field and start with
+ * ipx_type before the loop, not considering ipx_tctrl in the calc */
+ __u16 *p = (__u16 *)&packet->ipx_dest;
+ __u32 i = (length >> 1) - 1; /* Number of complete words */
+ __u32 sum = packet->ipx_type << sizeof(packet->ipx_tctrl);
+
+ /* Loop through all complete words except the checksum field,
+ * ipx_type (accounted above) and ipx_tctrl (not used in the cksum) */
+ while (--i)
sum += *p++;
/* Add on the last part word if it exists */
- if(packet->ipx_pktsize & htons(1))
+ if (packet->ipx_pktsize & htons(1))
sum += ntohs(0xff00) & *p;
- packet->ipx_tctrl = hops;
-
/* Do final fixup */
sum = (sum & 0xffff) + (sum >> 16);
/* It's a pity there's no concept of carry in C */
- if(sum >= 0x10000)
+ if (sum >= 0x10000)
sum++;
- return (~sum);
+ return ~sum;
}
/*
@@ -1510,80 +1428,76 @@ static int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, stru
struct sk_buff *skb;
ipx_interface *intrfc;
struct ipxhdr *ipx;
+ struct ipx_cb *cb;
int size;
int ipx_offset;
ipx_route *rt = NULL;
int err;
/* Find the appropriate interface on which to send packet */
- if(!usipx->sipx_network && (ipx_primary_net != NULL))
- {
+ if (!usipx->sipx_network && ipx_primary_net) {
usipx->sipx_network = ipx_primary_net->if_netnum;
intrfc = ipx_primary_net;
- }
- else
- {
+ } else {
rt = ipxrtr_lookup(usipx->sipx_network);
- if(rt == NULL)
- return (-ENETUNREACH);
+ if (!rt)
+ return -ENETUNREACH;
intrfc = rt->ir_intrfc;
}
ipxitf_hold(intrfc);
ipx_offset = intrfc->if_ipx_offset;
- size = sizeof(struct ipxhdr) + len;
- size += ipx_offset;
+ size = sizeof(struct ipxhdr) + len + ipx_offset;
skb = sock_alloc_send_skb(sk, size, 0, noblock, &err);
- if(skb == NULL)
+ if (!skb)
goto out;
skb_reserve(skb,ipx_offset);
skb->sk = sk;
+ cb = (struct ipx_cb *) skb->cb;
/* Fill in IPX header */
ipx = (struct ipxhdr *)skb_put(skb, sizeof(struct ipxhdr));
ipx->ipx_pktsize= htons(len + sizeof(struct ipxhdr));
- ipx->ipx_tctrl = 0;
+ cb->ipx_tctrl = 0;
ipx->ipx_type = usipx->sipx_type;
skb->h.raw = (void *)skb->nh.ipxh = ipx;
- ipx->ipx_source.net = sk->protinfo.af_ipx.intrfc->if_netnum;
+ cb->last_hop_index = -1;
#ifdef CONFIG_IPX_INTERN
+ cb->ipx_source_net = sk->protinfo.af_ipx.intrfc->if_netnum;
memcpy(ipx->ipx_source.node, sk->protinfo.af_ipx.node, IPX_NODE_LEN);
#else
- if((err = ntohs(sk->protinfo.af_ipx.port)) == 0x453 || err == 0x452)
- {
+ err = ntohs(sk->protinfo.af_ipx.port);
+ if (err == 0x453 || err == 0x452) {
/* RIP/SAP special handling for mars_nwe */
- ipx->ipx_source.net = intrfc->if_netnum;
+ cb->ipx_source_net = intrfc->if_netnum;
memcpy(ipx->ipx_source.node, intrfc->if_node, IPX_NODE_LEN);
- }
- else
- {
- ipx->ipx_source.net = sk->protinfo.af_ipx.intrfc->if_netnum;
+ } else {
+ cb->ipx_source_net = sk->protinfo.af_ipx.intrfc->if_netnum;
memcpy(ipx->ipx_source.node, sk->protinfo.af_ipx.intrfc->if_node, IPX_NODE_LEN);
}
#endif /* CONFIG_IPX_INTERN */
ipx->ipx_source.sock = sk->protinfo.af_ipx.port;
- ipx->ipx_dest.net = usipx->sipx_network;
+ cb->ipx_dest_net = usipx->sipx_network;
memcpy(ipx->ipx_dest.node,usipx->sipx_node,IPX_NODE_LEN);
ipx->ipx_dest.sock = usipx->sipx_port;
err = memcpy_fromiovec(skb_put(skb,len),iov,len);
- if(err)
- {
+ if (err) {
kfree_skb(skb);
goto out;
}
/* Apply checksum. Not allowed on 802.3 links. */
- if(sk->no_check || intrfc->if_dlink_type == IPX_FRAME_8023)
+ if (sk->no_check || intrfc->if_dlink_type == IPX_FRAME_8023)
ipx->ipx_checksum=0xFFFF;
else
- ipx->ipx_checksum = ipx_set_checksum(ipx, len + sizeof(struct ipxhdr));
+ ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));
err = ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ?
rt->ir_router_node : ipx->ipx_dest.node);
@@ -1594,21 +1508,20 @@ out: ipxitf_put(intrfc);
int ipxrtr_route_skb(struct sk_buff *skb)
{
struct ipxhdr *ipx = skb->nh.ipxh;
- ipx_route *r;
+ struct ipx_cb *cb = (struct ipx_cb *) skb->cb;
+ ipx_route *r = ipxrtr_lookup(cb->ipx_dest_net);
- r = ipxrtr_lookup(ipx->ipx_dest.net);
- if(r == NULL) /* no known route */
- {
+ if (!r) { /* no known route */
kfree_skb(skb);
- return (0);
+ return 0;
}
ipxitf_hold(r->ir_intrfc);
- (void)ipxitf_send(r->ir_intrfc, skb, (r->ir_routed) ?
+ ipxitf_send(r->ir_intrfc, skb, (r->ir_routed) ?
r->ir_router_node : ipx->ipx_dest.node);
ipxitf_put(r->ir_intrfc);
- return (0);
+ return 0;
}
/*
@@ -1621,23 +1534,22 @@ static int ipxrtr_ioctl(unsigned int cmd, void *arg)
int err;
err = copy_from_user(&rt,arg,sizeof(rt));
- if(err)
- return (-EFAULT);
+ if (err)
+ return -EFAULT;
sg = (struct sockaddr_ipx *)&rt.rt_gateway;
st = (struct sockaddr_ipx *)&rt.rt_dst;
- if(!(rt.rt_flags & RTF_GATEWAY))
- return (-EINVAL); /* Direct routes are fixed */
- if(sg->sipx_family != AF_IPX)
- return (-EINVAL);
- if(st->sipx_family != AF_IPX)
- return (-EINVAL);
+ if (!(rt.rt_flags & RTF_GATEWAY))
+ return -EINVAL; /* Direct routes are fixed */
+ if (sg->sipx_family != AF_IPX)
+ return -EINVAL;
+ if (st->sipx_family != AF_IPX)
+ return -EINVAL;
- switch(cmd)
- {
+ switch (cmd) {
case SIOCDELRT:
- return (ipxrtr_delete(st->sipx_network));
+ return ipxrtr_delete(st->sipx_network);
case SIOCADDRT:
{
@@ -1645,42 +1557,40 @@ static int ipxrtr_ioctl(unsigned int cmd, void *arg)
f.ipx_network=st->sipx_network;
f.ipx_router_network=sg->sipx_network;
memcpy(f.ipx_router_node, sg->sipx_node, IPX_NODE_LEN);
- return (ipxrtr_create(&f));
+ return ipxrtr_create(&f);
}
-
- default:
- return (-EINVAL);
}
+
+ return -EINVAL;
}
static const char *ipx_frame_name(unsigned short frame)
{
- switch(ntohs(frame))
- {
+ switch (ntohs(frame)) {
case ETH_P_IPX:
- return ("EtherII");
+ return "EtherII";
case ETH_P_802_2:
- return ("802.2");
+ return "802.2";
case ETH_P_SNAP:
- return ("SNAP");
+ return "SNAP";
case ETH_P_802_3:
- return ("802.3");
+ return "802.3";
case ETH_P_TR_802_2:
- return ("802.2TR");
+ return "802.2TR";
default:
- return ("None");
+ return "None";
}
}
static const char *ipx_device_name(ipx_interface *intrfc)
{
- return (intrfc->if_internal ? "Internal" :
- (intrfc->if_dev ? intrfc->if_dev->name : "Unknown"));
+ return intrfc->if_internal ? "Internal" :
+ intrfc->if_dev ? intrfc->if_dev->name : "Unknown";
}
/* Called from proc fs */
@@ -1700,13 +1610,12 @@ static int ipx_interface_get_info(char *buffer, char **start, off_t offset,
#endif
strcat(buffer+len++, "\n");
spin_lock_bh(&ipx_interfaces_lock);
- for(i = ipx_interfaces; i != NULL; i = i->if_next)
- {
+ for (i = ipx_interfaces; i; i = i->if_next) {
len += sprintf(buffer+len, "%08lX ", (long unsigned int)ntohl(i->if_netnum));
len += sprintf(buffer+len,"%02X%02X%02X%02X%02X%02X ",
i->if_node[0], i->if_node[1], i->if_node[2],
i->if_node[3], i->if_node[4], i->if_node[5]);
- len += sprintf(buffer+len, "%-9s", (i == ipx_primary_net) ?
+ len += sprintf(buffer+len, "%-9s", i == ipx_primary_net ?
"Yes" : "No");
len += sprintf(buffer+len, "%-11s", ipx_device_name(i));
len += sprintf(buffer+len, "%-9s",
@@ -1718,12 +1627,11 @@ static int ipx_interface_get_info(char *buffer, char **start, off_t offset,
/* Are we still dumping unwanted data then discard the record */
pos = begin + len;
- if(pos < offset)
- {
+ if (pos < offset) {
len = 0; /* Keep dumping into the buffer start */
begin = pos;
}
- if(pos > offset + length) /* We have dumped enough */
+ if (pos > offset + length) /* We have dumped enough */
break;
}
spin_unlock_bh(&ipx_interfaces_lock);
@@ -1731,10 +1639,10 @@ static int ipx_interface_get_info(char *buffer, char **start, off_t offset,
/* The data in question runs from begin to begin+len */
*start = buffer + (offset - begin); /* Start of wanted data */
len -= (offset - begin); /* Remove unwanted header data from length */
- if(len > length)
+ if (len > length)
len = length; /* Remove unwanted tail data from length */
- return (len);
+ return len;
}
static int ipx_get_info(char *buffer, char **start, off_t offset, int length)
@@ -1755,12 +1663,10 @@ static int ipx_get_info(char *buffer, char **start, off_t offset, int length)
"State", "Uid");
spin_lock_bh(&ipx_interfaces_lock);
- for(i = ipx_interfaces; i != NULL; i = i->if_next)
- {
+ for (i = ipx_interfaces; i; i = i->if_next) {
ipxitf_hold(i);
spin_lock_bh(&i->if_sklist_lock);
- for(s = i->if_sklist; s != NULL; s = s->next)
- {
+ for (s = i->if_sklist; s; s = s->next) {
#ifdef CONFIG_IPX_INTERN
len += sprintf(buffer+len,
"%08lX:%02X%02X%02X%02X%02X%02X:%04X ",
@@ -1778,10 +1684,9 @@ static int ipx_get_info(char *buffer, char **start, off_t offset, int length)
htons(s->protinfo.af_ipx.port));
#endif /* CONFIG_IPX_INTERN */
- if(s->state != TCP_ESTABLISHED)
+ if (s->state != TCP_ESTABLISHED)
len += sprintf(buffer+len, "%-28s", "Not_Connected");
- else
- {
+ else {
len += sprintf(buffer+len,
"%08lX:%02X%02X%02X%02X%02X%02X:%04X ",
(unsigned long) htonl(s->protinfo.af_ipx.dest_addr.net),
@@ -1801,13 +1706,12 @@ static int ipx_get_info(char *buffer, char **start, off_t offset, int length)
s->state, SOCK_INODE(s->socket)->i_uid);
pos = begin + len;
- if(pos < offset)
- {
+ if (pos < offset) {
len = 0;
begin = pos;
}
- if(pos > offset + length) /* We have dumped enough */
+ if (pos > offset + length) /* We have dumped enough */
break;
}
spin_unlock_bh(&i->if_sklist_lock);
@@ -1818,10 +1722,10 @@ static int ipx_get_info(char *buffer, char **start, off_t offset, int length)
/* The data in question runs from begin to begin+len */
*start = buffer + (offset-begin);
len -= (offset - begin);
- if(len > length)
+ if (len > length)
len = length;
- return (len);
+ return len;
}
static int ipx_rt_get_info(char *buffer, char **start, off_t offset, int length)
@@ -1833,41 +1737,36 @@ static int ipx_rt_get_info(char *buffer, char **start, off_t offset, int length)
len += sprintf(buffer,"%-11s%-13s%s\n",
"Network", "Router_Net", "Router_Node");
read_lock_bh(&ipx_routes_lock);
- for(rt = ipx_routes; rt != NULL; rt = rt->ir_next)
- {
+ for (rt = ipx_routes; rt; rt = rt->ir_next) {
len += sprintf(buffer+len,"%08lX ", (long unsigned int) ntohl(rt->ir_net));
- if(rt->ir_routed)
- {
+ if (rt->ir_routed) {
len += sprintf(buffer+len,"%08lX %02X%02X%02X%02X%02X%02X\n",
(long unsigned int) ntohl(rt->ir_intrfc->if_netnum),
rt->ir_router_node[0], rt->ir_router_node[1],
rt->ir_router_node[2], rt->ir_router_node[3],
rt->ir_router_node[4], rt->ir_router_node[5]);
- }
- else
- {
+ } else {
len += sprintf(buffer+len, "%-13s%s\n",
"Directly", "Connected");
}
pos = begin + len;
- if(pos < offset)
- {
- len = 0;
+ if (pos < offset) {
+ len = 0;
begin = pos;
}
- if(pos > offset + length)
+ if (pos > offset + length)
break;
}
read_unlock_bh(&ipx_routes_lock);
*start = buffer + (offset - begin);
len -= (offset - begin);
- if(len > length)
+ if (len > length)
len = length;
- return (len);
+ return len;
}
/**************************************************************************\
@@ -1877,89 +1776,80 @@ static int ipx_rt_get_info(char *buffer, char **start, off_t offset, int length)
* *
\**************************************************************************/
-static int ipx_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen)
+static int ipx_setsockopt(struct socket *sock, int level, int optname,
+ char *optval, int optlen)
{
- struct sock *sk;
- int err, opt;
-
- sk = sock->sk;
+ struct sock *sk = sock->sk;
+ int opt;
- if(optlen != sizeof(int))
- return (-EINVAL);
+ if (optlen != sizeof(int))
+ return -EINVAL;
- err = get_user(opt, (unsigned int *)optval);
- if(err)
- return (err);
+ if (get_user(opt, (unsigned int *)optval))
+ return -EFAULT;
- switch(level)
- {
+ switch (level) {
case SOL_IPX:
- switch(optname)
- {
+ switch (optname) {
case IPX_TYPE:
sk->protinfo.af_ipx.type = opt;
- return (0);
+ return 0;
default:
- return (-ENOPROTOOPT);
+ return -ENOPROTOOPT;
}
break;
default:
- return (-ENOPROTOOPT);
+ return -ENOPROTOOPT;
}
}
static int ipx_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen)
{
- struct sock *sk;
- int val=0;
+ struct sock *sk = sock->sk;
+ int val = 0;
int len;
- sk = sock->sk;
-
- switch(level)
- {
+ switch (level) {
case SOL_IPX:
- switch(optname)
- {
+ switch (optname) {
case IPX_TYPE:
val = sk->protinfo.af_ipx.type;
break;
default:
- return (-ENOPROTOOPT);
+ return -ENOPROTOOPT;
}
break;
default:
- return (-ENOPROTOOPT);
+ return -ENOPROTOOPT;
}
- if(get_user(len, optlen))
- return (-EFAULT);
+ if (get_user(len, optlen))
+ return -EFAULT;
len = min(len, sizeof(int));
- if(put_user(len, optlen))
- return (-EFAULT);
+ if (put_user(len, optlen))
+ return -EFAULT;
- if(copy_to_user(optval, &val, len))
- return (-EFAULT);
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
- return (0);
+ return 0;
}
static int ipx_create(struct socket *sock, int protocol)
{
struct sock *sk;
- switch(sock->type)
- {
+ switch (sock->type) {
case SOCK_DGRAM:
sk = sk_alloc(PF_IPX, GFP_KERNEL, 1);
- if(sk == NULL)
- return (-ENOMEM);
+ if (!sk)
+ return -ENOMEM;
sock->ops = &ipx_dgram_ops;
break;
@@ -1968,12 +1858,12 @@ static int ipx_create(struct socket *sock, int protocol)
* From this point on SPX sockets are handled
* by af_spx.c and the methods replaced.
*/
- if(spx_family_ops)
- return (spx_family_ops->create(sock,protocol));
+ if (spx_family_ops)
+ return spx_family_ops->create(sock,protocol);
/* Fall through if SPX is not loaded */
case SOCK_STREAM: /* Allow higher levels to piggyback */
default:
- return (-ESOCKTNOSUPPORT);
+ return -ESOCKTNOSUPPORT;
}
#ifdef IPX_REFCNT_DEBUG
atomic_inc(&ipx_sock_nr);
@@ -1985,31 +1875,30 @@ static int ipx_create(struct socket *sock, int protocol)
sk->no_check = 1; /* Checksum off by default */
MOD_INC_USE_COUNT;
-
- return (0);
+ return 0;
}
static int ipx_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- if(sk == NULL)
- return (0);
+ if (!sk)
+ return 0;
- if(!sk->dead)
+ if (!sk->dead)
sk->state_change(sk);
sk->dead = 1;
sock->sk = NULL;
ipx_destroy_socket(sk);
- if(sock->type == SOCK_DGRAM)
+ if (sock->type == SOCK_DGRAM)
MOD_DEC_USE_COUNT;
- return (0);
+ return 0;
}
-/* caller must hold a referente to intrfc */
+/* caller must hold a reference to intrfc */
static unsigned short ipx_first_free_socketnum(ipx_interface *intrfc)
{
@@ -2017,91 +1906,80 @@ static unsigned short ipx_first_free_socketnum(ipx_interface *intrfc)
spin_lock_bh(&intrfc->if_sklist_lock);
- if(socketNum < IPX_MIN_EPHEMERAL_SOCKET)
+ if (socketNum < IPX_MIN_EPHEMERAL_SOCKET)
socketNum = IPX_MIN_EPHEMERAL_SOCKET;
- while(__ipxitf_find_socket(intrfc, ntohs(socketNum)) != NULL)
- {
- if(socketNum > IPX_MAX_EPHEMERAL_SOCKET)
+ while (__ipxitf_find_socket(intrfc, ntohs(socketNum)))
+ if (socketNum > IPX_MAX_EPHEMERAL_SOCKET)
socketNum = IPX_MIN_EPHEMERAL_SOCKET;
else
socketNum++;
- }
spin_unlock_bh(&intrfc->if_sklist_lock);
intrfc->if_sknum = socketNum;
- return (ntohs(socketNum));
+ return ntohs(socketNum);
}
static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- struct sock *sk;
+ struct sock *sk = sock->sk;
ipx_interface *intrfc;
struct sockaddr_ipx *addr = (struct sockaddr_ipx *)uaddr;
int ret;
- sk = sock->sk;
-
- if(sk->zapped == 0)
- return (-EINVAL);
+ if (!sk->zapped)
+ return -EINVAL;
- if(addr_len != sizeof(struct sockaddr_ipx))
- return (-EINVAL);
+ if (addr_len != sizeof(struct sockaddr_ipx))
+ return -EINVAL;
intrfc = ipxitf_find_using_net(addr->sipx_network);
- if(intrfc == NULL)
- return (-EADDRNOTAVAIL);
+ if (!intrfc)
+ return -EADDRNOTAVAIL;
- if(addr->sipx_port == 0)
- {
+ if (!addr->sipx_port) {
addr->sipx_port = ipx_first_free_socketnum(intrfc);
ret = -EINVAL;
- if(addr->sipx_port == 0)
+ if (!addr->sipx_port)
goto out;
}
/* protect IPX system stuff like routing/sap */
ret = -EACCES;
- if(ntohs(addr->sipx_port) < IPX_MIN_EPHEMERAL_SOCKET && !capable(CAP_NET_ADMIN))
+ if (ntohs(addr->sipx_port) < IPX_MIN_EPHEMERAL_SOCKET &&
+ !capable(CAP_NET_ADMIN))
goto out;
sk->protinfo.af_ipx.port = addr->sipx_port;
#ifdef CONFIG_IPX_INTERN
- if(intrfc == ipx_internal_net)
- {
+ if (intrfc == ipx_internal_net) {
/* The source address is to be set explicitly if the
* socket is to be bound on the internal network. If a
* node number 0 was specified, the default is used.
*/
ret = -EINVAL;
- if(memcmp(addr->sipx_node,ipx_broadcast_node,IPX_NODE_LEN) == 0)
+ if (!memcmp(addr->sipx_node,ipx_broadcast_node,IPX_NODE_LEN))
goto out;
- if(memcmp(addr->sipx_node, ipx_this_node, IPX_NODE_LEN) == 0)
- {
+ if (!memcmp(addr->sipx_node, ipx_this_node, IPX_NODE_LEN))
memcpy(sk->protinfo.af_ipx.node, intrfc->if_node,
IPX_NODE_LEN);
- }
else
- {
- memcpy(sk->protinfo.af_ipx.node, addr->sipx_node, IPX_NODE_LEN);
- }
+ memcpy(sk->protinfo.af_ipx.node, addr->sipx_node,
+ IPX_NODE_LEN);
ret = -EADDRINUSE;
- if(ipxitf_find_internal_socket(intrfc,
- sk->protinfo.af_ipx.node,
- sk->protinfo.af_ipx.port) != NULL)
- {
+ if (ipxitf_find_internal_socket(intrfc,
+ sk->protinfo.af_ipx.node,
+ sk->protinfo.af_ipx.port)) {
SOCK_DEBUG(sk,
"IPX: bind failed because port %X in use.\n",
ntohs((int)addr->sipx_port));
goto out;
}
- }
- else
- {
+ } else {
/* Source addresses are easy. It must be our
* network:node pair for an interface routed to IPX
* with the ipx routing ioctl()
@@ -2111,8 +1989,7 @@ static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
IPX_NODE_LEN);
ret = -EADDRINUSE;
- if(ipxitf_find_socket(intrfc, addr->sipx_port) != NULL)
- {
+ if (ipxitf_find_socket(intrfc, addr->sipx_port)) {
SOCK_DEBUG(sk,
"IPX: bind failed because port %X in use.\n",
ntohs((int)addr->sipx_port));
@@ -2126,8 +2003,7 @@ static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
an interface routed to IPX with the ipx routing ioctl() */
ret = -EADDRINUSE;
- if(ipxitf_find_socket(intrfc, addr->sipx_port) != NULL)
- {
+ if (ipxitf_find_socket(intrfc, addr->sipx_port)) {
SOCK_DEBUG(sk, "IPX: bind failed because port %X in use.\n",
ntohs((int)addr->sipx_port));
goto out;
@@ -2153,13 +2029,12 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
sk->state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
- if(addr_len != sizeof(*addr))
- return (-EINVAL);
+ if (addr_len != sizeof(*addr))
+ return -EINVAL;
addr = (struct sockaddr_ipx *)uaddr;
/* put the autobinding in */
- if(sk->protinfo.af_ipx.port == 0)
- {
+ if (!sk->protinfo.af_ipx.port) {
struct sockaddr_ipx uaddr;
int ret;
@@ -2167,7 +2042,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
uaddr.sipx_network = 0;
#ifdef CONFIG_IPX_INTERN
- if(sk->protinfo.af_ipx.intrfc)
+ if (sk->protinfo.af_ipx.intrfc)
memcpy(uaddr.sipx_node, sk->protinfo.af_ipx.intrfc->if_node,IPX_NODE_LEN);
else
return -ENETDOWN; /* Someone zonked the iface */
@@ -2175,13 +2050,15 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
ret = ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
- if(ret != 0)
- return (ret);
+ if (ret)
+ return ret;
}
- /* We can either connect to primary network or somewhere we can route to */
- if( !(addr->sipx_network == 0 && ipx_primary_net != NULL) && ipxrtr_lookup(addr->sipx_network) == NULL)
- return (-ENETUNREACH);
+ /* We can either connect to primary network or somewhere
+ * we can route to */
+ if (!(!addr->sipx_network && ipx_primary_net) &&
+ !ipxrtr_lookup(addr->sipx_network))
+ return -ENETUNREACH;
sk->protinfo.af_ipx.dest_addr.net = addr->sipx_network;
sk->protinfo.af_ipx.dest_addr.sock = addr->sipx_port;
@@ -2189,41 +2066,34 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
addr->sipx_node,IPX_NODE_LEN);
sk->protinfo.af_ipx.type = addr->sipx_type;
- if(sock->type == SOCK_DGRAM )
- {
+ if (sock->type == SOCK_DGRAM) {
sock->state = SS_CONNECTED;
sk->state = TCP_ESTABLISHED;
}
- return (0);
+ return 0;
}
static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
+ int *uaddr_len, int peer)
{
ipx_address *addr;
struct sockaddr_ipx sipx;
- struct sock *sk;
-
- sk = sock->sk;
+ struct sock *sk = sock->sk;
*uaddr_len = sizeof(struct sockaddr_ipx);
- if(peer)
- {
- if(sk->state != TCP_ESTABLISHED)
- return (-ENOTCONN);
+ if (peer) {
+ if (sk->state != TCP_ESTABLISHED)
+ return -ENOTCONN;
addr = &sk->protinfo.af_ipx.dest_addr;
sipx.sipx_network = addr->net;
memcpy(sipx.sipx_node,addr->node,IPX_NODE_LEN);
sipx.sipx_port = addr->sock;
- }
- else
- {
- if(sk->protinfo.af_ipx.intrfc != NULL)
- {
+ } else {
+ if (sk->protinfo.af_ipx.intrfc) {
sipx.sipx_network=sk->protinfo.af_ipx.intrfc->if_netnum;
#ifdef CONFIG_IPX_INTERN
memcpy(sipx.sipx_node, sk->protinfo.af_ipx.node, IPX_NODE_LEN);
@@ -2231,9 +2101,7 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
memcpy(sipx.sipx_node, sk->protinfo.af_ipx.intrfc->if_node, IPX_NODE_LEN);
#endif /* CONFIG_IPX_INTERN */
- }
- else
- {
+ } else {
sipx.sipx_network = 0;
memset(sipx.sipx_node, '\0', IPX_NODE_LEN);
}
@@ -2242,10 +2110,10 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
}
sipx.sipx_family = AF_IPX;
- sipx.sipx_type = sk->protinfo.af_ipx.type;
+ sipx.sipx_type = sk->protinfo.af_ipx.type;
memcpy(uaddr,&sipx,sizeof(sipx));
- return (0);
+ return 0;
}
int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
@@ -2253,97 +2121,96 @@ int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
/* NULL here for pt means the packet was looped back */
ipx_interface *intrfc;
struct ipxhdr *ipx;
+ struct ipx_cb *cb;
+ u16 ipx_pktsize;
int ret;
-
- ipx = skb->nh.ipxh;
-
- /* Too small? */
- if(ntohs(ipx->ipx_pktsize) < sizeof(struct ipxhdr))
- goto drop;
-
- /* Invalid header */
- if(ntohs(ipx->ipx_pktsize) > skb->len)
- goto drop;
/* Not ours */
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
+
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ goto out;
+
+ ipx = skb->nh.ipxh;
+ ipx_pktsize = ntohs(ipx->ipx_pktsize);
+
+ /* Too small or invalid header? */
+ if (ipx_pktsize < sizeof(struct ipxhdr) || ipx_pktsize > skb->len)
+ goto drop;
- if(ipx->ipx_checksum != IPX_NO_CHECKSUM)
- {
- if(ipx_set_checksum(ipx, ntohs(ipx->ipx_pktsize)) != ipx->ipx_checksum)
- goto drop;
- }
+ if (ipx->ipx_checksum != IPX_NO_CHECKSUM &&
+ ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize))
+ goto drop;
+
+ cb = (struct ipx_cb *) skb->cb;
+ cb->ipx_tctrl = ipx->ipx_tctrl;
+ cb->ipx_dest_net = ipx->ipx_dest.net;
+ cb->ipx_source_net = ipx->ipx_source.net;
/* Determine what local ipx endpoint this is */
intrfc = ipxitf_find_using_phys(dev, pt->type);
- if(intrfc == NULL)
- {
- if(ipxcfg_auto_create_interfaces
- && ntohl(ipx->ipx_dest.net) != 0L)
- {
+ if (!intrfc) {
+ if (ipxcfg_auto_create_interfaces &&
+ ntohl(cb->ipx_dest_net)) {
intrfc = ipxitf_auto_create(dev, pt->type);
ipxitf_hold(intrfc);
}
- if(intrfc == NULL) /* Not one of ours */
+ if (!intrfc) /* Not one of ours */
goto drop;
}
ret = ipxitf_rcv(intrfc, skb);
ipxitf_put(intrfc);
return ret;
-drop:
- kfree_skb(skb);
- return (0);
+drop: kfree_skb(skb);
+out: return 0;
}
static int ipx_sendmsg(struct socket *sock, struct msghdr *msg, int len,
struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
- struct sockaddr_ipx *usipx=(struct sockaddr_ipx *)msg->msg_name;
+ struct sockaddr_ipx *usipx = (struct sockaddr_ipx *)msg->msg_name;
struct sockaddr_ipx local_sipx;
int retval;
int flags = msg->msg_flags;
/* Socket gets bound below anyway */
-/* if(sk->zapped)
- return (-EIO); */ /* Socket not bound */
- if(flags & ~MSG_DONTWAIT)
- return (-EINVAL);
-
- if(usipx)
- {
- if(sk->protinfo.af_ipx.port == 0)
- {
+/* if (sk->zapped)
+ return -EIO; */ /* Socket not bound */
+ if (flags & ~MSG_DONTWAIT)
+ return -EINVAL;
+
+ if (usipx) {
+ if (!sk->protinfo.af_ipx.port) {
struct sockaddr_ipx uaddr;
int ret;
uaddr.sipx_port = 0;
uaddr.sipx_network = 0L;
#ifdef CONFIG_IPX_INTERN
- if(sk->protinfo.af_ipx.intrfc)
- memcpy(uaddr.sipx_node, sk->protinfo.af_ipx.intrfc
- ->if_node,IPX_NODE_LEN);
+ if (sk->protinfo.af_ipx.intrfc)
+ memcpy(uaddr.sipx_node,
+ sk->protinfo.af_ipx.intrfc->if_node,
+ IPX_NODE_LEN);
else
return -ENETDOWN; /* Someone zonked the iface */
#endif
ret = ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
- if(ret != 0)
- return (ret);
+ if (ret)
+ return ret;
}
- if(msg->msg_namelen < sizeof(*usipx))
- return (-EINVAL);
- if(usipx->sipx_family != AF_IPX)
- return (-EINVAL);
- }
- else
- {
- if(sk->state != TCP_ESTABLISHED)
- return (-ENOTCONN);
+ if (msg->msg_namelen < sizeof(*usipx))
+ return -EINVAL;
+ if (usipx->sipx_family != AF_IPX)
+ return -EINVAL;
+ } else {
+ if (sk->state != TCP_ESTABLISHED)
+ return -ENOTCONN;
usipx=&local_sipx;
usipx->sipx_family = AF_IPX;
@@ -2353,11 +2220,12 @@ static int ipx_sendmsg(struct socket *sock, struct msghdr *msg, int len,
memcpy(usipx->sipx_node,sk->protinfo.af_ipx.dest_addr.node,IPX_NODE_LEN);
}
- retval = ipxrtr_route_packet(sk, usipx, msg->msg_iov, len, flags&MSG_DONTWAIT);
- if(retval < 0)
- return (retval);
+ retval = ipxrtr_route_packet(sk, usipx, msg->msg_iov, len,
+ flags & MSG_DONTWAIT);
+ if (retval < 0)
+ return retval;
- return (len);
+ return len;
}
@@ -2371,8 +2239,7 @@ static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, int size,
int copied, err;
/* put the autobinding in */
- if(sk->protinfo.af_ipx.port == 0)
- {
+ if (!sk->protinfo.af_ipx.port) {
struct sockaddr_ipx uaddr;
int ret;
@@ -2380,7 +2247,7 @@ static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, int size,
uaddr.sipx_network = 0;
#ifdef CONFIG_IPX_INTERN
- if(sk->protinfo.af_ipx.intrfc)
+ if (sk->protinfo.af_ipx.intrfc)
memcpy(uaddr.sipx_node, sk->protinfo.af_ipx.intrfc->if_node,IPX_NODE_LEN);
else
return -ENETDOWN; /* Someone zonked the iface */
@@ -2388,47 +2255,45 @@ static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, int size,
ret = ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
- if(ret != 0)
- return (ret);
+ if (ret)
+ return ret;
}
- if(sk->zapped)
- return (-ENOTCONN);
+ if (sk->zapped)
+ return -ENOTCONN;
skb = skb_recv_datagram(sk,flags&~MSG_DONTWAIT,flags&MSG_DONTWAIT,&err);
- if(!skb)
+ if (!skb)
goto out;
ipx = skb->nh.ipxh;
copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr);
- if(copied > size)
- {
+ if (copied > size) {
copied=size;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov,
copied);
- if(err)
+ if (err)
goto out_free;
sk->stamp = skb->stamp;
msg->msg_namelen = sizeof(*sipx);
- if(sipx)
- {
+ if (sipx) {
+ struct ipx_cb *cb = (struct ipx_cb *) skb->cb;
sipx->sipx_family = AF_IPX;
sipx->sipx_port = ipx->ipx_source.sock;
memcpy(sipx->sipx_node,ipx->ipx_source.node,IPX_NODE_LEN);
- sipx->sipx_network = ipx->ipx_source.net;
+ sipx->sipx_network = cb->ipx_source_net;
sipx->sipx_type = ipx->ipx_type;
}
err = copied;
out_free:
skb_free_datagram(sk, skb);
-out:
- return (err);
+out: return err;
}
@@ -2437,40 +2302,39 @@ static int ipx_ioctl(struct socket *sock,unsigned int cmd, unsigned long arg)
long amount = 0;
struct sock *sk = sock->sk;
- switch(cmd)
- {
+ switch (cmd) {
case TIOCOUTQ:
amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
- if(amount < 0)
+ if (amount < 0)
amount = 0;
- return (put_user(amount, (int *)arg));
+ return put_user(amount, (int *)arg);
case TIOCINQ:
{
- struct sk_buff *skb;
+ struct sk_buff *skb = skb_peek(&sk->receive_queue);
/* These two are safe on a single CPU system as only user tasks fiddle here */
- if((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if (skb)
amount = skb->len - sizeof(struct ipxhdr);
- return (put_user(amount, (int *)arg));
+ return put_user(amount, (int *)arg);
}
case SIOCADDRT:
case SIOCDELRT:
- if(!capable(CAP_NET_ADMIN))
- return (-EPERM);
- return (ipxrtr_ioctl(cmd,(void *)arg));
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ return ipxrtr_ioctl(cmd,(void *)arg);
case SIOCSIFADDR:
case SIOCAIPXITFCRT:
case SIOCAIPXPRISLT:
- if(!capable(CAP_NET_ADMIN))
- return (-EPERM);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
case SIOCGIFADDR:
- return (ipxitf_ioctl(cmd,(void *)arg));
+ return ipxitf_ioctl(cmd,(void *)arg);
case SIOCIPXCFGDATA:
- return (ipxcfg_get_config_data((void *)arg));
+ return ipxcfg_get_config_data((void *)arg);
case SIOCIPXNCPCONN:
{
@@ -2479,24 +2343,24 @@ static int ipx_ioctl(struct socket *sock,unsigned int cmd, unsigned long arg)
* handed to us in arg.
*/
if (!capable(CAP_NET_ADMIN))
- return(-EPERM);
- return get_user(sk->protinfo.af_ipx.ipx_ncp_conn, (const unsigned short *)(arg));
+ return -EPERM;
+ return get_user(sk->protinfo.af_ipx.ipx_ncp_conn,
+ (const unsigned short *)(arg));
}
case SIOCGSTAMP:
{
int ret = -EINVAL;
- if(sk)
- {
- if(sk->stamp.tv_sec == 0)
- return (-ENOENT);
+ if (sk) {
+ if (!sk->stamp.tv_sec)
+ return -ENOENT;
ret = -EFAULT;
- if(!copy_to_user((void *)arg, &sk->stamp,
+ if (!copy_to_user((void *)arg, &sk->stamp,
sizeof(struct timeval)))
ret = 0;
}
- return (ret);
+ return ret;
}
case SIOCGIFDSTADDR:
@@ -2505,14 +2369,14 @@ static int ipx_ioctl(struct socket *sock,unsigned int cmd, unsigned long arg)
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
- return (-EINVAL);
+ return -EINVAL;
default:
- return (dev_ioctl(cmd,(void *) arg));
+ return dev_ioctl(cmd,(void *) arg);
}
/*NOT REACHED*/
- return (0);
+ return 0;
}
/*
@@ -2521,19 +2385,19 @@ static int ipx_ioctl(struct socket *sock,unsigned int cmd, unsigned long arg)
int ipx_register_spx(struct proto_ops **p, struct net_proto_family *spx)
{
- if(spx_family_ops!=NULL)
+ if (spx_family_ops)
return -EBUSY;
cli();
MOD_INC_USE_COUNT;
- *p=&ipx_dgram_ops;
- spx_family_ops=spx;
+ *p = &ipx_dgram_ops;
+ spx_family_ops = spx;
sti();
return 0;
}
int ipx_unregister_spx(void)
{
- spx_family_ops=NULL;
+ spx_family_ops = NULL;
MOD_DEC_USE_COUNT;
return 0;
}
@@ -2576,7 +2440,7 @@ static struct packet_type ipx_8023_packet_type =
__constant_htons(ETH_P_802_3),
NULL, /* All devices */
ipx_rcv,
- NULL,
+ (void *) 1, /* yap, I understand shared skbs :-) */
NULL,
};
@@ -2585,7 +2449,7 @@ static struct packet_type ipx_dix_packet_type =
__constant_htons(ETH_P_IPX),
NULL, /* All devices */
ipx_rcv,
- NULL,
+ (void *) 1, /* yap, I understand shared skbs :-) */
NULL,
};
@@ -2604,12 +2468,9 @@ extern void destroy_8023_client(struct datalink_proto *);
static unsigned char ipx_8022_type = 0xE0;
static unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 };
-
-
-
static int __init ipx_init(void)
{
- (void) sock_register(&ipx_family_ops);
+ sock_register(&ipx_family_ops);
pEII_datalink = make_EII_client();
dev_add_pack(&ipx_dix_packet_type);
@@ -2617,35 +2478,34 @@ static int __init ipx_init(void)
p8023_datalink = make_8023_client();
dev_add_pack(&ipx_8023_packet_type);
- if((p8022_datalink = register_8022_client(ipx_8022_type,ipx_rcv)) == NULL)
+ p8022_datalink = register_8022_client(ipx_8022_type,ipx_rcv);
+ if (!p8022_datalink)
printk(KERN_CRIT "IPX: Unable to register with 802.2\n");
- if((pSNAP_datalink = register_snap_client(ipx_snap_id,ipx_rcv)) == NULL)
+ pSNAP_datalink = register_snap_client(ipx_snap_id,ipx_rcv);
+ if (!pSNAP_datalink)
printk(KERN_CRIT "IPX: Unable to register with SNAP\n");
register_netdevice_notifier(&ipx_dev_notifier);
-
#ifdef CONFIG_PROC_FS
proc_net_create("ipx", 0, ipx_get_info);
proc_net_create("ipx_interface", 0, ipx_interface_get_info);
proc_net_create("ipx_route", 0, ipx_rt_get_info);
#endif
-
- printk(KERN_INFO "NET4: Linux IPX 0.42v4 for NET4.0\n");
+ printk(KERN_INFO "NET4: Linux IPX 0.43 for NET4.0\n");
printk(KERN_INFO "IPX Portions Copyright (c) 1995 Caldera, Inc.\n");
printk(KERN_INFO "IPX Portions Copyright (c) 2000 Conectiva, Inc.\n");
return 0;
}
+
module_init(ipx_init);
/* Higher layers need this info to prep tx pkts */
int ipx_if_offset(unsigned long ipx_net_number)
{
- ipx_route *rt = NULL;
-
- rt = ipxrtr_lookup(ipx_net_number);
+ ipx_route *rt = ipxrtr_lookup(ipx_net_number);
- return (rt ? rt->ir_intrfc->if_ipx_offset : -ENETUNREACH);
+ return rt ? rt->ir_intrfc->if_ipx_offset : -ENETUNREACH;
}
/* Export symbols for higher layers */
@@ -2683,24 +2543,22 @@ static void ipx_proto_finito(void)
unregister_netdevice_notifier(&ipx_dev_notifier);
unregister_snap_client(ipx_snap_id);
- pSNAP_datalink = NULL;
+ pSNAP_datalink = NULL;
unregister_8022_client(ipx_8022_type);
- p8022_datalink = NULL;
+ p8022_datalink = NULL;
dev_remove_pack(&ipx_8023_packet_type);
destroy_8023_client(p8023_datalink);
- p8023_datalink = NULL;
+ p8023_datalink = NULL;
dev_remove_pack(&ipx_dix_packet_type);
destroy_EII_client(pEII_datalink);
- pEII_datalink = NULL;
-
- (void) sock_unregister(ipx_family_ops.family);
+ pEII_datalink = NULL;
- return;
+ sock_unregister(ipx_family_ops.family);
}
+
module_exit(ipx_proto_finito);
#endif /* MODULE */
-
#endif /* CONFIG_IPX || CONFIG_IPX_MODULE */
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index e5504ee96..250b22ae9 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2409,6 +2409,7 @@ int __init irda_proto_init(void)
#endif
return 0;
}
+module_init(irda_proto_init);
/*
* Function irda_proto_cleanup (void)
@@ -2429,11 +2430,9 @@ void irda_proto_cleanup(void)
return;
}
-module_init(irda_proto_init);
module_exit(irda_proto_cleanup);
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
MODULE_DESCRIPTION("The Linux IrDA Protocol Subsystem");
MODULE_PARM(irda_debug, "1l");
#endif /* MODULE */
-
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 550519e0b..43ed8e26b 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -252,7 +252,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c)
do {
if (++tp_c->hgenerator == 0x7FF)
tp_c->hgenerator = 1;
- } while (i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
+ } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 76f2fc394..a66fd2b45 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -39,16 +39,22 @@
* x:y y>0 y+1 use entry [y]
* ... ... ...
* x:indices-1 indices use entry [indices-1]
+ * ... ... ...
+ * x:y y+1 use entry [y & (indices-1)]
+ * ... ... ...
+ * 0xffff 0x10000 use entry [indices-1]
*/
+#define NO_DEFAULT_INDEX (1 << 16)
+
struct dsmark_qdisc_data {
struct Qdisc *q;
struct tcf_proto *filter_list;
__u8 *mask; /* "owns" the array */
__u8 *value;
__u16 indices;
- __u16 default_index;
+ __u32 default_index; /* index range is 0...0xffff */
int set_tc_index;
};
@@ -217,7 +223,7 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
case TC_POLICE_UNSPEC:
/* fall through */
default:
- if (p->default_index)
+ if (p->default_index != NO_DEFAULT_INDEX)
skb->tc_index = p->default_index;
break;
};
@@ -325,14 +331,12 @@ int dsmark_init(struct Qdisc *sch,struct rtattr *opt)
if (tmp & 1)
return -EINVAL;
}
- p->default_index = 0;
+ p->default_index = NO_DEFAULT_INDEX;
if (tb[TCA_DSMARK_DEFAULT_INDEX-1]) {
if (RTA_PAYLOAD(tb[TCA_DSMARK_DEFAULT_INDEX-1]) < sizeof(__u16))
return -EINVAL;
p->default_index =
*(__u16 *) RTA_DATA(tb[TCA_DSMARK_DEFAULT_INDEX-1]);
- if (!p->default_index || p->default_index >= p->indices)
- return -EINVAL;
}
p->set_tc_index = !!tb[TCA_DSMARK_SET_TC_INDEX-1];
p->mask = kmalloc(p->indices*2,GFP_KERNEL);
@@ -411,9 +415,11 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
rta = (struct rtattr *) b;
RTA_PUT(skb,TCA_OPTIONS,0,NULL);
RTA_PUT(skb,TCA_DSMARK_INDICES,sizeof(__u16),&p->indices);
- if (p->default_index)
- RTA_PUT(skb,TCA_DSMARK_DEFAULT_INDEX, sizeof(__u16),
- &p->default_index);
+ if (p->default_index != NO_DEFAULT_INDEX) {
+ __u16 tmp = p->default_index;
+
+ RTA_PUT(skb,TCA_DSMARK_DEFAULT_INDEX, sizeof(__u16), &tmp);
+ }
if (p->set_tc_index)
RTA_PUT(skb, TCA_DSMARK_SET_TC_INDEX, 0, NULL);
rta->rta_len = skb->tail-b;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 4983e898e..3a48c3ebf 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -110,12 +110,9 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
unsigned long qave=0;
int i=0;
- if (!t->initd) {
- DPRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
- if (q->backlog <= q->limit) {
- __skb_queue_tail(&sch->q, skb);
- return NET_XMIT_DROP; /* @@@@ */
- }
+ if (!t->initd && skb_queue_len(&sch->q) <= sch->dev->tx_queue_len) {
+ D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
+ goto do_enqueue;
}
@@ -179,11 +176,12 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
q->qcount = -1;
enqueue:
if (q->backlog <= q->limit) {
+ q->backlog += skb->len;
+do_enqueue:
__skb_queue_tail(&sch->q, skb);
sch->stats.backlog += skb->len;
sch->stats.bytes += skb->len;
sch->stats.packets++;
- q->backlog += skb->len;
return 0;
} else {
q->pdrop++;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index a295ecaf7..a6af993c8 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -267,22 +267,26 @@ rpcauth_holdcred(struct rpc_task *task)
dprintk("RPC: %4d holding %s cred %p\n",
task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred);
if (task->tk_msg.rpc_cred) {
+ spin_lock(&rpc_credcache_lock);
task->tk_msg.rpc_cred->cr_count++;
task->tk_msg.rpc_cred->cr_expire = jiffies + task->tk_auth->au_expire;
+ spin_unlock(&rpc_credcache_lock);
}
}
void
rpcauth_releasecred(struct rpc_auth *auth, struct rpc_cred *cred)
{
+ spin_lock(&rpc_credcache_lock);
if (cred != NULL && cred->cr_count > 0) {
- cred->cr_count--;
- if (cred->cr_flags & RPCAUTH_CRED_DEAD) {
+ if (!--cred->cr_count && (cred->cr_flags & RPCAUTH_CRED_DEAD)) {
+ spin_unlock(&rpc_credcache_lock);
rpcauth_remove_credcache(auth, cred);
- if (!cred->cr_count)
- rpcauth_crdestroy(auth, cred);
+ rpcauth_crdestroy(auth, cred);
+ return;
}
}
+ spin_unlock(&rpc_credcache_lock);
}
void
@@ -335,13 +339,19 @@ rpcauth_invalcred(struct rpc_task *task)
{
dprintk("RPC: %4d invalidating %s cred %p\n",
task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred);
+ spin_lock(&rpc_credcache_lock);
if (task->tk_msg.rpc_cred)
task->tk_msg.rpc_cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
+ spin_unlock(&rpc_credcache_lock);
}
int
rpcauth_uptodatecred(struct rpc_task *task)
{
- return !(task->tk_msg.rpc_cred) ||
+ int retval;
+ spin_lock(&rpc_credcache_lock);
+ retval = !(task->tk_msg.rpc_cred) ||
(task->tk_msg.rpc_cred->cr_flags & RPCAUTH_CRED_UPTODATE);
+ spin_unlock(&rpc_credcache_lock);
+ return retval;
}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index e53759e3e..1c5fd3740 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -36,6 +36,7 @@ EXPORT_SYMBOL(rpciod_down);
EXPORT_SYMBOL(rpciod_up);
EXPORT_SYMBOL(rpc_new_task);
EXPORT_SYMBOL(rpc_wake_up_status);
+EXPORT_SYMBOL(rpc_release_task);
/* RPC client functions */
EXPORT_SYMBOL(rpc_create_client);
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index bda8d54d8..61f948390 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -26,10 +26,6 @@ extern ctl_table ipx_table[];
extern ctl_table core_table[];
-#ifdef CONFIG_UNIX
-extern ctl_table unix_table[];
-#endif
-
#ifdef CONFIG_NET
extern ctl_table ether_table[], e802_table[];
#endif
@@ -48,9 +44,6 @@ extern ctl_table econet_table[];
ctl_table net_table[] = {
{NET_CORE, "core", NULL, 0, 0555, core_table},
-#ifdef CONFIG_UNIX
- {NET_UNIX, "unix", NULL, 0, 0555, unix_table},
-#endif
#ifdef CONFIG_NET
{NET_802, "802", NULL, 0, 0555, e802_table},
{NET_ETHER, "ethernet", NULL, 0, 0555, ether_table},
diff --git a/net/unix/Makefile b/net/unix/Makefile
index 88a262671..9a840afde 100644
--- a/net/unix/Makefile
+++ b/net/unix/Makefile
@@ -16,5 +16,3 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_unix.o
include $(TOPDIR)/Rules.make
-tar:
- tar -cvf /dev/f1 .
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e48b8549a..121e4d9de 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1,5 +1,5 @@
/*
- * NET3: Implementation of BSD Unix domain sockets.
+ * NET4: Implementation of BSD Unix domain sockets.
*
* Authors: Alan Cox, <alan.cox@linux.org>
*
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: af_unix.c,v 1.108 2000/11/10 04:02:04 davem Exp $
+ * Version: $Id: af_unix.c,v 1.109 2001/01/06 00:42:23 davem Exp $
*
* Fixes:
* Linus Torvalds : Assorted bug cures.
@@ -124,13 +124,12 @@ static atomic_t unix_nr_socks = ATOMIC_INIT(0);
#define UNIX_ABSTRACT(sk) ((sk)->protinfo.af_unix.addr->hash!=UNIX_HASH_SIZE)
/*
- SMP locking strategy.
- * hash table is protceted with rwlock unix_table_lock
- * each socket state is protected by separate rwlock.
-
+ * SMP locking strategy:
+ * hash table is protected with rwlock unix_table_lock
+ * each socket state is protected by separate rwlock.
*/
-extern __inline__ unsigned unix_hash_fold(unsigned hash)
+static inline unsigned unix_hash_fold(unsigned hash)
{
hash ^= hash>>16;
hash ^= hash>>8;
@@ -139,17 +138,17 @@ extern __inline__ unsigned unix_hash_fold(unsigned hash)
#define unix_peer(sk) ((sk)->pair)
-extern __inline__ int unix_our_peer(unix_socket *sk, unix_socket *osk)
+static inline int unix_our_peer(unix_socket *sk, unix_socket *osk)
{
return unix_peer(osk) == sk;
}
-extern __inline__ int unix_may_send(unix_socket *sk, unix_socket *osk)
+static inline int unix_may_send(unix_socket *sk, unix_socket *osk)
{
return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
}
-static __inline__ unix_socket * unix_peer_get(unix_socket *s)
+static inline unix_socket * unix_peer_get(unix_socket *s)
{
unix_socket *peer;
@@ -161,7 +160,7 @@ static __inline__ unix_socket * unix_peer_get(unix_socket *s)
return peer;
}
-extern __inline__ void unix_release_addr(struct unix_address *addr)
+extern inline void unix_release_addr(struct unix_address *addr)
{
if (atomic_dec_and_test(&addr->refcnt))
kfree(addr);
@@ -231,14 +230,14 @@ static void __unix_insert_socket(unix_socket **list, unix_socket *sk)
sock_hold(sk);
}
-static __inline__ void unix_remove_socket(unix_socket *sk)
+static inline void unix_remove_socket(unix_socket *sk)
{
write_lock(&unix_table_lock);
__unix_remove_socket(sk);
write_unlock(&unix_table_lock);
}
-static __inline__ void unix_insert_socket(unix_socket **list, unix_socket *sk)
+static inline void unix_insert_socket(unix_socket **list, unix_socket *sk)
{
write_lock(&unix_table_lock);
__unix_insert_socket(list, sk);
@@ -258,7 +257,7 @@ static unix_socket *__unix_find_socket_byname(struct sockaddr_un *sunname,
return NULL;
}
-static __inline__ unix_socket *
+static inline unix_socket *
unix_find_socket_byname(struct sockaddr_un *sunname,
int len, int type, unsigned hash)
{
@@ -291,7 +290,7 @@ static unix_socket *unix_find_socket_byinode(struct inode *i)
return s;
}
-static __inline__ int unix_writable(struct sock *sk)
+static inline int unix_writable(struct sock *sk)
{
return ((atomic_read(&sk->wmem_alloc)<<2) <= sk->sndbuf);
}
@@ -1823,7 +1822,7 @@ struct proto_ops unix_stream_ops = {
struct proto_ops unix_dgram_ops = {
family: PF_UNIX,
-
+
release: unix_release,
bind: unix_bind,
connect: unix_dgram_connect,
@@ -1842,20 +1841,25 @@ struct proto_ops unix_dgram_ops = {
};
struct net_proto_family unix_family_ops = {
- PF_UNIX,
- unix_create
+ family: PF_UNIX,
+ create: unix_create
};
#ifdef CONFIG_SYSCTL
extern void unix_sysctl_register(void);
extern void unix_sysctl_unregister(void);
+#else
+static inline unix_sysctl_register() {};
+static inline unix_sysctl_unregister() {};
#endif
+static const char banner[] __initdata = KERN_INFO "NET4: Unix domain sockets 1.0/SMP for Linux NET4.0.\n";
+
static int __init af_unix_init(void)
{
struct sk_buff *dummy_skb;
-
- printk(KERN_INFO "NET4: Unix domain sockets 1.0/SMP for Linux NET4.0.\n");
+
+ printk(banner);
if (sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb))
{
printk(KERN_CRIT "unix_proto_init: panic\n");
@@ -1865,23 +1869,15 @@ static int __init af_unix_init(void)
#ifdef CONFIG_PROC_FS
create_proc_read_entry("net/unix", 0, 0, unix_read_proc, NULL);
#endif
-
-#ifdef CONFIG_SYSCTL
unix_sysctl_register();
-#endif
-
return 0;
}
static void __exit af_unix_exit(void)
{
sock_unregister(PF_UNIX);
-#ifdef CONFIG_SYSCTL
unix_sysctl_unregister();
-#endif
-#ifdef CONFIG_PROC_FS
remove_proc_entry("net/unix", 0);
-#endif
}
module_init(af_unix_init);
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 885119da3..eceea5c95 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -1,10 +1,8 @@
/*
- * NET3: Sysctl interface to net af_unix subsystem.
+ * NET4: Sysctl interface to net af_unix subsystem.
*
* Authors: Mike Shaver.
*
- * Added /proc/sys/net/unix directory entry (empty =) ).
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -13,9 +11,6 @@
#include <linux/mm.h>
#include <linux/sysctl.h>
-#include <linux/config.h>
-
-#ifdef CONFIG_SYSCTL
extern int sysctl_unix_max_dgram_qlen;
@@ -26,20 +21,18 @@ ctl_table unix_table[] = {
{0}
};
-static struct ctl_table_header * unix_sysctl_header;
-static struct ctl_table unix_root_table[];
-static struct ctl_table unix_net_table[];
-
-ctl_table unix_root_table[] = {
- {CTL_NET, "net", NULL, 0, 0555, unix_net_table},
+static ctl_table unix_net_table[] = {
+ {NET_UNIX, "unix", NULL, 0, 0555, unix_table},
{0}
};
-ctl_table unix_net_table[] = {
- {NET_UNIX, "unix", NULL, 0, 0555, unix_table},
+static ctl_table unix_root_table[] = {
+ {CTL_NET, "net", NULL, 0, 0555, unix_net_table},
{0}
};
+static struct ctl_table_header * unix_sysctl_header;
+
void unix_sysctl_register(void)
{
unix_sysctl_header = register_sysctl_table(unix_root_table, 0);
@@ -50,4 +43,3 @@ void unix_sysctl_unregister(void)
unregister_sysctl_table(unix_sysctl_header);
}
-#endif /* CONFIG_SYSCTL */
diff --git a/net/x25/Makefile b/net/x25/Makefile
index 0890eff84..d0082933f 100644
--- a/net/x25/Makefile
+++ b/net/x25/Makefile
@@ -17,5 +17,3 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_x25.o
include $(TOPDIR)/Rules.make
-tar:
- tar -cvf /dev/f1 .
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index c8f8c3e97..a938f3bc1 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1084,6 +1084,9 @@ static int x25_recvmsg(struct socket *sock, struct msghdr *msg, int size, int fl
msg->msg_namelen = sizeof(struct sockaddr_x25);
skb_free_datagram(sk, skb);
+ lock_sock(sk);
+ x25_check_rbuf(sk);
+ release_sock(sk);
return copied;
}
@@ -1258,8 +1261,8 @@ static int x25_get_info(char *buffer, char **start, off_t offset, int length)
}
struct net_proto_family x25_family_ops = {
- AF_X25,
- x25_create
+ family: AF_X25,
+ create: x25_create,
};
static struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
@@ -1286,18 +1289,13 @@ static struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
SOCKOPS_WRAP(x25_proto, AF_X25);
-static struct packet_type x25_packet_type =
-{
- 0, /* MUTTER ntohs(ETH_P_X25),*/
- 0, /* copy */
- x25_lapb_receive_frame,
- NULL,
- NULL,
+static struct packet_type x25_packet_type = {
+ type: __constant_htons(ETH_P_X25),
+ func: x25_lapb_receive_frame,
};
struct notifier_block x25_dev_notifier = {
- x25_device_event,
- 0
+ notifier_call: x25_device_event,
};
void x25_kill_by_neigh(struct x25_neigh *neigh)
@@ -1317,7 +1315,6 @@ static int __init x25_init(void)
#endif /* MODULE */
sock_register(&x25_family_ops);
- x25_packet_type.type = htons(ETH_P_X25);
dev_add_pack(&x25_packet_type);
register_netdevice_notifier(&x25_dev_notifier);
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index fbc781dce..1abb28358 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -18,7 +18,6 @@
*/
#include <linux/config.h>
-#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -249,8 +248,3 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *neigh)
dev_queue_xmit(skb);
}
-
-#endif
-
-
-
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index ad41d1cde..adf757e56 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -18,8 +18,6 @@
* negotiation.
*/
-#include <linux/config.h>
-#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -232,7 +230,3 @@ void x25_limit_facilities(struct x25_facilities *facilities,
}
}
}
-
-#endif
-
-
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index bcb5f1cf4..73a567e89 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -22,8 +22,6 @@
* i-frames.
*/
-#include <linux/config.h>
-#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -369,5 +367,3 @@ int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
-
-#endif
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index d6b878371..841499325 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -20,8 +20,6 @@
* 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
*/
-#include <linux/config.h>
-#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -42,9 +40,10 @@
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/init.h>
#include <net/x25.h>
-static struct x25_neigh *x25_neigh_list = NULL;
+static struct x25_neigh *x25_neigh_list /* = NULL initially */;
static void x25_t20timer_expiry(unsigned long);
@@ -422,12 +421,11 @@ int x25_subscr_ioctl(unsigned int cmd, void *arg)
return 0;
}
-#ifdef MODULE
/*
* Release all memory associated with X.25 neighbour structures.
*/
-void x25_link_free(void)
+void __exit x25_link_free(void)
{
struct x25_neigh *neigh, *x25_neigh = x25_neigh_list;
@@ -438,7 +436,3 @@ void x25_link_free(void)
x25_remove_neigh(neigh);
}
}
-
-#endif
-
-#endif
diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c
index b3ce30477..2521078a6 100644
--- a/net/x25/x25_out.c
+++ b/net/x25/x25_out.c
@@ -21,8 +21,6 @@
* needed cleaned seq-number fields.
*/
-#include <linux/config.h>
-#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -229,5 +227,3 @@ void x25_enquiry_response(struct sock *sk)
x25_stop_timer(sk);
}
-
-#endif
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 4cb51300b..a4651699e 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -17,7 +17,6 @@
*/
#include <linux/config.h>
-#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -42,9 +41,10 @@
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
+#include <linux/init.h>
#include <net/x25.h>
-static struct x25_route *x25_route_list = NULL;
+static struct x25_route *x25_route_list /* = NULL initially */;
/*
* Add a new route.
@@ -255,12 +255,10 @@ int x25_routes_get_info(char *buffer, char **start, off_t offset, int length)
return len;
}
-#ifdef MODULE
-
/*
* Release all memory associated with X.25 routing structures.
*/
-void x25_route_free(void)
+void __exit x25_route_free(void)
{
struct x25_route *route, *x25_route = x25_route_list;
@@ -271,7 +269,3 @@ void x25_route_free(void)
x25_remove_route(route);
}
}
-
-#endif
-
-#endif
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 25a700af9..635c872e5 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -19,8 +19,6 @@
* negotiation.
*/
-#include <linux/config.h>
-#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -351,4 +349,18 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause, unsigned c
sk->dead = 1;
}
-#endif
+/*
+ * Clear an own-rx-busy condition and tell the peer about this, provided
+ * that there is a significant amount of free receive buffer space available.
+ */
+void x25_check_rbuf(struct sock *sk)
+{
+ if (atomic_read(&sk->rmem_alloc) < (sk->rcvbuf / 2) &&
+ (sk->protinfo.x25->condition & X25_COND_OWN_RX_BUSY)) {
+ sk->protinfo.x25->condition &= ~X25_COND_OWN_RX_BUSY;
+ sk->protinfo.x25->condition &= ~X25_COND_ACK_PENDING;
+ sk->protinfo.x25->vl = sk->protinfo.x25->vr;
+ x25_write_internal(sk, X25_RR);
+ x25_stop_timer(sk);
+ }
+}
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c
index 20672419e..cf9b42606 100644
--- a/net/x25/x25_timer.c
+++ b/net/x25/x25_timer.c
@@ -18,8 +18,6 @@
* Centralised disconnection processing.
*/
-#include <linux/config.h>
-#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -121,6 +119,11 @@ static void x25_heartbeat_expiry(unsigned long param)
{
struct sock *sk = (struct sock *)param;
+ bh_lock_sock(sk);
+ if (sk->lock.users) { /* can currently only occur in state 3 */
+ goto restart_heartbeat;
+ }
+
switch (sk->protinfo.x25->state) {
case X25_STATE_0:
@@ -128,7 +131,7 @@ static void x25_heartbeat_expiry(unsigned long param)
is accepted() it isn't 'dead' so doesn't get removed. */
if (sk->destroy || (sk->state == TCP_LISTEN && sk->dead)) {
x25_destroy_socket(sk);
- return;
+ goto unlock;
}
break;
@@ -136,29 +139,21 @@ static void x25_heartbeat_expiry(unsigned long param)
/*
* Check for the state of the receive buffer.
*/
- if (atomic_read(&sk->rmem_alloc) < (sk->rcvbuf / 2) &&
- (sk->protinfo.x25->condition & X25_COND_OWN_RX_BUSY)) {
- sk->protinfo.x25->condition &= ~X25_COND_OWN_RX_BUSY;
- sk->protinfo.x25->condition &= ~X25_COND_ACK_PENDING;
- sk->protinfo.x25->vl = sk->protinfo.x25->vr;
- x25_write_internal(sk, X25_RR);
- x25_stop_timer(sk);
- break;
- }
+ x25_check_rbuf(sk);
break;
}
-
+ restart_heartbeat:
x25_start_heartbeat(sk);
+ unlock:
+ bh_unlock_sock(sk);
}
/*
* Timer has expired, it may have been T2, T21, T22, or T23. We can tell
* by the state machine state.
*/
-static void x25_timer_expiry(unsigned long param)
+static inline void x25_do_timer_expiry(struct sock * sk)
{
- struct sock *sk = (struct sock *)param;
-
switch (sk->protinfo.x25->state) {
case X25_STATE_3: /* T2 */
@@ -181,4 +176,17 @@ static void x25_timer_expiry(unsigned long param)
}
}
-#endif
+static void x25_timer_expiry(unsigned long param)
+{
+ struct sock *sk = (struct sock *)param;
+
+ bh_lock_sock(sk);
+ if (sk->lock.users) { /* can currently only occur in state 3 */
+ if (sk->protinfo.x25->state == X25_STATE_3) {
+ x25_start_t2timer(sk);
+ }
+ } else {
+ x25_do_timer_expiry(sk);
+ }
+ bh_unlock_sock(sk);
+}
diff --git a/scripts/checkconfig.pl b/scripts/checkconfig.pl
index fc352e436..8868471fd 100644
--- a/scripts/checkconfig.pl
+++ b/scripts/checkconfig.pl
@@ -14,6 +14,7 @@ foreach $file (@ARGV)
# Initialize variables.
my $fInComment = 0;
+ my $fInString = 0;
my $fUseConfig = 0;
my $iLinuxConfig = 0;
my %configList = ();
@@ -24,6 +25,10 @@ foreach $file (@ARGV)
$fInComment && (s+^.*?\*/+ +o ? ($fInComment = 0) : next);
m+/\*+o && (s+/\*.*?\*/+ +go, (s+/\*.*$+ +o && ($fInComment = 1)));
+ # Strip strings.
+ $fInString && (s+^.*?"+ +o ? ($fInString = 0) : next);
+ m+"+o && (s+".*?"+ +go, (s+".*$+ +o && ($fInString = 1)));
+
# Pick up definitions.
if ( m/^\s*#/o )
{