summaryrefslogtreecommitdiffstats
path: root/fs/hfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/hfs')
-rw-r--r--fs/hfs/.cvsignore2
-rw-r--r--fs/hfs/ChangeLog2330
-rw-r--r--fs/hfs/FAQ.txt342
-rw-r--r--fs/hfs/HFS.txt1042
-rw-r--r--fs/hfs/INSTALL.txt126
-rw-r--r--fs/hfs/Makefile18
-rw-r--r--fs/hfs/TODO54
-rw-r--r--fs/hfs/balloc.c437
-rw-r--r--fs/hfs/bdelete.c483
-rw-r--r--fs/hfs/bfind.c322
-rw-r--r--fs/hfs/bins_del.c231
-rw-r--r--fs/hfs/binsert.c541
-rw-r--r--fs/hfs/bitmap.c412
-rw-r--r--fs/hfs/bitops.c124
-rw-r--r--fs/hfs/bnode.c540
-rw-r--r--fs/hfs/brec.c239
-rw-r--r--fs/hfs/btree.c316
-rw-r--r--fs/hfs/catalog.c1674
-rw-r--r--fs/hfs/dir.c400
-rw-r--r--fs/hfs/dir_cap.c402
-rw-r--r--fs/hfs/dir_dbl.c464
-rw-r--r--fs/hfs/dir_nat.c487
-rw-r--r--fs/hfs/extent.c808
-rw-r--r--fs/hfs/file.c531
-rw-r--r--fs/hfs/file_cap.c297
-rw-r--r--fs/hfs/file_hdr.c940
-rw-r--r--fs/hfs/hfs.h532
-rw-r--r--fs/hfs/hfs_btree.h268
-rw-r--r--fs/hfs/inode.c427
-rw-r--r--fs/hfs/mdb.c298
-rw-r--r--fs/hfs/part_tbl.c244
-rw-r--r--fs/hfs/string.c152
-rw-r--r--fs/hfs/super.c527
-rw-r--r--fs/hfs/sysdep.c103
-rw-r--r--fs/hfs/trans.c556
-rw-r--r--fs/hfs/version.c10
36 files changed, 16679 insertions, 0 deletions
diff --git a/fs/hfs/.cvsignore b/fs/hfs/.cvsignore
new file mode 100644
index 000000000..857dd22e9
--- /dev/null
+++ b/fs/hfs/.cvsignore
@@ -0,0 +1,2 @@
+.depend
+.*.flags
diff --git a/fs/hfs/ChangeLog b/fs/hfs/ChangeLog
new file mode 100644
index 000000000..aa465a0a6
--- /dev/null
+++ b/fs/hfs/ChangeLog
@@ -0,0 +1,2330 @@
+Wed Jan 7 19:33:33 1998 a sun <asun@zoology.washington.edu>
+
+ * inode.c
+ don't hfs_cat_put in hfs_iget. that's a bad idea and results
+ in screwed up entry counts.
+
+ * catalog.c
+ modified hfs_cat_put to undirty deleted entries without trying to
+ write them out.
+
+Tue Jan 6 14:38:24 1998 a sun <asun@zoology.washington.edu>
+
+ * version.c
+ changed it to 0.95+asun2
+
+ * sysdep.c
+ altered catalog entry pruning to make sure that an iput
+ gets done. for some reason, shrink_dcache_parent wasn't
+ doing it.
+
+ * catalog.c
+ added a global dirty list to check for pruning.
+
+Tue Jan 6 12:29:52 1998 a sun <asun@zoology.washington.edu>
+
+ * catalog.c
+ re-wrote it to be similar to 2.1.x inode.c. this should
+ at least make catalog.c SMP safe.
+
+ * hfs.h, linux/hfs_fs.h
+ moved dentry operations into hfs.h. these probably should
+ be moved somewhere else.
+
+ * super.c, dir_cap.c, dir_nat.c, dir_dbl.c, sysdep.c
+ added dentry ops to hash everything to lowercase.
+
+Sun Dec 28 22:48:53 1997 a sun <asun@zoology.washington.edu>
+
+ * sysdep.c, catalog.c, hfs.h
+ as a temporary workaround until catalog.c gets re-written,
+ i flush the dcache if we need more entries.
+
+Fri Dec 19 15:11:21 1997 a sun <asun@zoology.washington.edu>
+
+ * dir_dbl.c
+ statically allocate tmp_name instead of doing it dynamically.
+
+ NOTE: well, those pesky hfs_cat_put messages still aren't gone. in
+ addition, catalog.c needs to be modified to free up some entries
+ when the cache gets filled up.
+
+Sun Dec 14 11:51:11 1997 a sun <asun@zoology.washington.edu>
+
+ * linux/hfs_fs.h
+ moved the dentry stuff into within the #ifdef __KERNEL__
+ part of hfs_fs.h and cleaned up a little.
+
+Sun Dec 14 11:24:54 1997 a sun <asun@zoology.washington.edu>
+
+ * dir.c
+ changed hfs_rename to delete all old dentries. hfs_cat_put
+ messages on umount should be a thing of the past now.
+
+Sun Dec 14 01:12:58 1997 a sun <asun@zoology.washington.edu>
+
+ * dir.c
+ changed mark_inodes_deleted to dget/d_delete/dput the dentry
+ instead of just dropping it. the bytes available should now
+ be updated updated properly upon deletion.
+
+Wed Dec 10 00:01:25 1997 a sun <asun@zoology.washington.edu>
+
+ * dir.c
+ changed mark_inodes_deleted to drop the dentry instead of
+ just deleting it.
+
+ TODO: bytes available aren't being properly updated when a
+ resource fork gets deleted.
+
+Mon Dec 8 23:22:40 1997 a sun <asun@zoology.washington.edu>
+
+ * dir_cap.c, dir_nat.c, dir_dbl.c, dir.c
+ * hfs.h, linux/hfs_sysdep.h, linux/hfs_fs_i.h
+ Added code to drop ({dbl,cap,nat}_drop_dentry) invalid
+ dentries when creating or moving a file.
+
+ * inode.c
+ Added code to delete cached dentries when a file gets deleted.
+
+ * current yuckiness: there's an extra hfs_cat_put somewhere. it's
+ harmless but bothersome.
+
+Thu Dec 4 00:14:03 1997 a sun <asun@zoology.washington.edu>
+
+ * dir.c, dir_cap.c, dir_nat.c, file.c, file_hdr.c, inode.c,
+ * linux/{hfs_sysdep.h, hfs_fs.h}, version.c:
+ Completed first code dentrification sweep. It mounts! It copies!
+ It dcaches!
+
+Mon Apr 28 06:58:44 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, INSTALL.sgml, HFS.sgml:
+ Bump version to 0.95 (Woohoo! We're beta!)
+
+ * linux/hfs_fs.h:
+ Modify HFS_SB() and HFS_I() when compiled into the kernel.
+
+ * FAQ.sgml:
+ Add a new question (and its answer):
+ Why does my Macintosh show generic application and document icons?
+
+ * HFS.sgml:
+ Add some URLs and remove the (now empty) FAQ section.
+
+Sun Apr 27 22:17:01 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * HFS.sgml:
+ Don't call the version 1 headers "slightly modified".
+
+ * file_hdr.c, dir_nat.c:
+ Comment some AFPD compatibility stuff.
+
+ * FAQ.sgml:
+ Update for version 0.95.
+
+ * BUG_INFO:
+ Remove the BIG_INFO script since we no longer mention it.
+
+ * README.sgml, INSTALL.sgml, HFS.sgml, Makefile:
+ Split README.sgml into HFS.sgml and INSTALL.sgml.
+ Stop including the document sources in snapshots.
+
+ * file_hdr.c:
+ Fix hdr_truncate() not to truncate the data fork.
+
+Wed Apr 16 23:56:25 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * FAQ.sgml:
+ Bump version to 0.8.4 and add two answers:
+ How to fsck an HFS filesystem.
+ How to generate linux/version.h.
+
+ * version.c, README.sgml:
+ Bump version to 0.8.4.
+
+ * README.sgml, FAQ.sgml, Makefile:
+ Separate the FAQ from the README.
+
+ * linux/hfs_fs.h:
+ Add (struct hfs_fork) to the forward declarations.
+
+Thu Apr 10 05:47:16 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * linux/hfs_sysdep.h:
+ Work around the non-const declaration of test_bit()'s second argument.
+
+ * Makefile:
+ Use .config from the kernel source to check for MODVERSIONS.
+
+Wed Apr 9 07:57:17 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * bnode.c:
+ Check the record table in each bnode as we read it from disk.
+
+ * super.c, mdb.c, hfs.h:
+ Deal with the ATTRIB_CLEAN bit of the MDB properly (in mdb.c).
+
+ * super.c, hfs.h, mdb.c:
+ Search for the alt-MDB rather than using the device size to find it.
+
+Wed Apr 9 03:39:05 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump version to 0.8.3.
+
+Mon Apr 7 20:09:56 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * part_tbl.c:
+ Fix to allow bootable CDROMs (which have blocksize != 512) to mount.
+
+ * super.c:
+ Check that blk_size[MAJOR(dev)] is non-NULL before dereferencing.
+
+Sat Apr 5 10:44:42 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_btree.h, binsert.c, brec.c, bfind.c, bins_del.c, bdelete.c:
+ Make btree operations less likely to do
+ nasty things if the tree is corrupted.
+
+ * part_tbl.c, README.sgml:
+ Count partitions from 0 rather than from 1.
+
+Wed Apr 2 23:26:51 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * bdelete.c:
+ Don't bother checking for oversized keys in hfs_bdelete().
+
+ * bdelete.c, bfind.c, binsert.c:
+ Verify key lengths against the maximum given for the tree.
+
+ * Makefile:
+ Check that /usr/include/linux/modversions.h exists before including it.
+ This allows compilation without CONFIG_MODVERSIONS enabled.
+
+Sat Mar 29 13:17:53 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * linux/hfs_fs.h, super.c, file_hdr.c, hfs.h, extent.c, file_cap.c,
+ dir_dbl.c, dir_nat.c, dir.c, dir_cap.c, binsert.c, catalog.c,
+ bfind.c:
+ Make (struct hfs_bkey) and (struct hfs_brec) more "abstract".
+
+ * binsert.c:
+ Remove redundant test in hfs_binsert().
+
+Sat Mar 29 05:24:23 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Fix formatting problems in README.sgml and bump version to 0.8.2.
+
+ * extent.c:
+ Fix bug that caused serious headaches with fragmented files.
+
+Fri Mar 28 00:23:18 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump version to 0.8.1.
+
+ * btree.c, balloc.c:
+ Commit map nodes to buffers when new map nodes are added.
+
+Thu Mar 27 22:41:07 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * Makefile:
+ Include linux/modversions.h from the gcc command line.
+
+ * mdb.c:
+ Was updating modified date twice in hfs_mdb_commit().
+
+ * linux/hfs_sysdep.h, linux/hfs_fs.h, linux/hfs_fs_i.h,
+ linux/hfs_fs_sb.h, sysdep.c, trans.c, super.c, hfs_sysdep.h, inode.c,
+ hfs_fs_i.h, hfs_fs_sb.h, hfs_fs.h, hfs.h, file_cap.c, file_hdr.c,
+ file.c, dir_nat.c, dir_cap.c, dir_dbl.c, Makefile, dir.c:
+ Rearrange headers in preparation for inclusion in the kernel.
+
+ * hfs_fs_sb.h, hfs_fs.h:
+ Add forward declarations so other code can include these headers.
+
+ * hfs_sysdep.h:
+ Include __constant_hton[ls]() for little-endian machines.
+
+ * hfs_fs.h, hfs_sysdep.h, hfs.h:
+ Move typedefs of hfs_{byte,word,lword}_t from hfs.h to hfs_sysdep.h.
+ Include hfs_sysdep.h from hfs_fs.h.
+
+ * trans.c, super.c, part_tbl.c, string.c, inode.c, mdb.c, hfs_fs_sb.h,
+ hfs_sysdep.h, hfs_fs.h, hfs.h, hfs_btree.h, file_cap.c, file_hdr.c,
+ file.c, dir_nat.c, extent.c, dir_dbl.c, dir.c, dir_cap.c, catalog.c,
+ btree.c, bnode.c, brec.c, bitmap.c, bitops.c, bins_del.c, binsert.c,
+ bdelete.c, bfind.c, balloc.c:
+ Big type system changes in preparation for kernel inclusion:
+ '[US](8|16|32)' -> 'hfs_[us](8|16|32)' (avoids name space pollution)
+ 'hfs_name_t' -> 'struct hfs_name' (allows forward declaration)
+
+ * super.c, hfs_fs.h:
+ Add init_hfs_fs() to super.c for non-module compilation.
+
+Wed Mar 26 07:53:59 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump version to 0.8.
+
+ * README.sgml:
+ Special compilation note for DEC Alpha.
+
+ * README.sgml:
+ Note status on non-Intel processors.
+
+ * hfs_fs.h:
+ Use long's for read() and write() on the Alpha.
+
+ * README.sgml:
+ Document the afpd mount option.
+
+ * inode.c:
+ Make files always writable for owner in afpd mode.
+
+Tue Mar 25 23:21:39 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * part_tbl.c:
+ Clean up the error checking code a bit.
+
+Sat Mar 22 19:43:40 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * part_tbl.c:
+ Fixed uninitialized variable in old-style partition code.
+
+ * bins_del.c, bdelete.c:
+ Fix extraneous "bad argument to shift_{left,right}" messages.
+
+ * bitops.c:
+ Note that these routines are now tested on Intel, PPC and Alpha.
+
+ * Makefile:
+ Add -fno-builtin the the CFLAGS.
+
+Fri Feb 14 10:50:14 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_sysdep.h:
+ Don't include <asm/*.h> until after <linux/types.h>.
+
+ * catalog.c:
+ Use volume create date in hashfn() rather than casting pointer to int.
+
+ * hfs.h, mdb.c:
+ Maintaing volume create, modify and backup dates in struct hfs_mdb.
+
+ * hfs_fs.h:
+ Include the header for put_user BEFORE using it!
+
+ * string.c, hfs.h:
+ Make hfs_strhash() return an unsigned int.
+
+ * trans.c, version.c, super.c, mdb.c, part_tbl.c, string.c, inode.c,
+ hfs_sysdep.h, hfs_fs.h, hfs_fs_sb.h, hfs_btree.h, hfs.h, file_cap.c,
+ file_hdr.c, extent.c, dir_dbl.c, dir_nat.c, dir_cap.c, dir.c,
+ catalog.c, btree.c, bnode.c, brec.c, bitmap.c, binsert.c,
+ bins_del.c, bdelete.c, balloc.c, README.sgml, Makefile:
+ Updated copyright notices.
+
+ * trans.c, part_tbl.c, string.c, super.c, inode.c, mdb.c, hfs_fs.h,
+ hfs_fs_sb.h, hfs_sysdep.h, hfs_btree.h, hfs.h, file_cap.c,
+ file_hdr.c, dir_nat.c, extent.c, dir_cap.c, dir_dbl.c, catalog.c,
+ dir.c, brec.c, btree.c, bitmap.c, bnode.c, bdelete.c, bins_del.c,
+ binsert.c, Makefile, TODO, balloc.c:
+ First shot at portability to the DEC Alpha and non-gcc compilers.
+ This invloved a significant overhaul of the type system.
+
+Tue Feb 4 04:26:54 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump version to "pre-0.8-4".
+
+ * dir_nat.c:
+ Allow creat() in Netatalk .AppleDouble directories.
+
+ * dir_dbl.c:
+ Make local functions static.
+
+ * dir_dbl.c:
+ Removed unnecessary 'extern' qualifiers from forward declarations.
+
+ * file_hdr.c, TODO:
+ Fixed the 30-year time warp with afpd.
+
+ * TODO, trans.c:
+ Don't mangle the name .AppleDesktop under fork=netatalk.
+
+Mon Feb 3 23:18:45 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * inode.c:
+ Make header files always writable when the afpd mount option is given.
+ Otherwise it is impossible to unlock a locked file.
+
+ * TODO, inode.c:
+ Let afpd think chmod() always succeeds, so "New Folder" works right.
+
+ * super.c:
+ The 'afpd' mount option now makes 'fork=n,names=n' the default.
+
+ * TODO:
+ List the current known afpd-compatibility problems as bugs.
+
+ * file_hdr.c:
+ Make certain date changes through header files get written to disk.
+
+Sat Feb 1 02:24:12 1997 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * mdb.c:
+ Work around for Linux rounding device sizes to 1k increments.
+
+ * README.sgml:
+ Fixed a typo: "the a".
+
+Sat Dec 28 20:41:01 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * TODO:
+ Add ioctl() interface as a "missing feature."
+
+ * dir_nat.c:
+ Finish implementing the afpd-compatibility
+ mode using the new 'afpd' mount option.
+
+ * hfs_fs_sb.h, super.c:
+ Add new 'afpd' mount option.
+
+ * file_cap.c:
+ Spelling fix.
+
+Wed Dec 11 23:16:08 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * TODO, README.sgml:
+ Optimistically document the hybrid CD problem as fixed.
+
+ * part_tbl.c:
+ Fix the partition code so at least some of the hybrid
+ CDROMs that were previously rejected are now accepted.
+
+ * hfs.h:
+ Make fs_start a 32-bit integer rather than 16-bits.
+ The 16-bit value would overflow if a partition started
+ beyond the 32M mark (e.g. the Executor 2 Beta 1 CDROM).
+
+ * extent.c:
+ Fixed a typo in an error message.
+
+Tue Dec 10 14:43:46 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * dir_nat.c:
+ Merge in the (still dormant) afpd-compatibility changes.
+
+ * inode.c:
+ Make the .AppleDouble directory writable (again).
+
+ * version.c, README.sgml:
+ Bump version up to "pre-0.8-3".
+
+ * hfs_fs.h, file_cap.c, file_hdr.c:
+ Move AFP constants to hfs_fs.h and prefix them with "HFS_".
+
+ * dir_nat.c, inode.c:
+ Back-out changes that allowed writing to the .AppleDouble directory.
+
+ * Makefile:
+ Update rules for linuxdoc-sgml v1.5.
+
+ * extent.c:
+ Fixed serious bug in decode_extent() with handling of empty extents.
+
+ * file.c:
+ Rewrote hfs_getblk().
+ It will no longer hang if hfs_extent_map() is buggy.
+ Also halves the worst-case number of calls to hfs_extent_map().
+
+ * extent.c:
+ Fixed serious bug in decode_extent() with handling of empty extents.
+
+ * hfs_fs.h:
+ Small change so the PPC (and maybe other architectures?)
+ pick up the prototypes for the user-space access functions.
+
+ * super.c, file_cap.c, file_hdr.c, hfs_fs.h, file.c:
+ Updated for new user-space memory interface.
+
+Sun Dec 8 11:49:36 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * dir_nat.c:
+ Add special code for unlink(), and rename() in the .AppleDouble
+ directory and rmdir() of the .AppleDouble directory.
+
+ * inode.c:
+ Make the .AppleDouble directory writable.
+
+ * file_hdr.c:
+ Use AFP flags in version 1 headers (for Netatalk compatibility).
+
+ * trans.c:
+ Fixed bug with long names causing kernel Oops.
+
+Mon Oct 7 06:05:01 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs.h, file_cap.c, file_hdr.c, hfs.h, extent.c, file.c, dir.c:
+ Fix types for various read/write/truncate computations.
+ Also allows compilation with 2.1.x kernels.
+
+Thu Sep 19 10:28:43 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * README.sgml, version.c:
+ Bump version up to "pre-0.8-2".
+
+ * TODO:
+ Reformat the To Do list introducing prioritized categories.
+
+ * file_hdr.c, file.c:
+ Move comments about mmap() for headers from file.c to file_hdr.c.
+ Also revise the reasoning for not yet having it implemented.
+
+ * dir_nat.c, dir_cap.c, dir_dbl.c:
+ Remove 'hfs_' prefix from names of some purely local functions.
+
+ * dir_dbl.c, TODO:
+ Under AppleDouble make create(), mkdir(), mknod(), unlink(), rename()
+ and rename() check against header files when arguments start with '%'.
+
+ * super.c, hfs_fs_sb.h, hfs_fs.h, dir_dbl.c, dir_nat.c, dir_cap.c,
+ dir.c, README.sgml:
+ Fix problem that prevented creating %RootInfo or .rootinfo in all
+ directories in addition to preventing deletion from the root directory.
+
+ * TODO:
+ Remove writable header files from the To Do list.
+
+ * README.sgml:
+ Add extensive discussion of writing to HFS filesystems and
+ the format of the special files.
+
+ * file_hdr.c:
+ Generate the 'homefs' field for version 1 header files.
+
+Wed Sep 18 23:07:45 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs.h, file_cap.c:
+ Comment the definition of (struct hfs_cap_info).
+
+ * version.c, README.sgml:
+ Bump version up to "pre-0.8-1" and update the "How can I write?" FAQ.
+
+ * file_hdr.c:
+ Implement hdr_write() and hdr_truncate()!!
+
+ * hfs_fs_i.h, inode.c:
+ Make hdr_layout per-inode (not per-file) so hdr_truncate() will work.
+
+ * file.c, hfs.h, catalog.c, extent.c, balloc.c:
+ hfs_extent_adj() now uses fork->lsize to determine the target file size.
+
+Sun Sep 15 07:55:24 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * README.sgml, trans.c:
+ Prevent creation of files & directories with '\0' or ':' in their names.
+
+ * string.c, hfs_fs.h, hfs.h, dir_dbl.c, dir_nat.c, dir_cap.c:
+ With case=lower could have run off end of string.
+
+Tue Sep 10 12:05:47 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * inode.c:
+ Small clean up of HFS_FIL_LOCK handling.
+
+ * inode.c:
+ Fix notify_change() not to accidentally make metadata executable.
+
+ * hfs_fs.h:
+ AppleSingle files should have HFS_ITYPE_NORM.
+
+ * inode.c:
+ Return to old behavior where MdDat = i_mtime.
+
+ * dir_dbl.c:
+ Fix serious bug in hfs_dbl_readdir() that would lock-up access to a
+ directory if one tried to write to a directory they had previously read.
+
+ * file.c:
+ Fix hfs_do_write() to adjust the fork's 'lsize' if it changed.
+
+ * inode.c, file_cap.c:
+ Allow truncate() to be called even on metadata.
+ Any size changes will last only until the next iput() of the inode.
+ Truncating a header file doesn't yet truncate the resource fork.
+
+ * inode.c:
+ Allow chmod() on a directory if it doesn't actually change i_mode.
+
+ * hfs_fs.h, trans.c, super.c:
+ Rename hfs_cap2mac() to hfs_colon2mac().
+ Rename hfs_apl2mac() to hfs_prcnt2mac().
+
+ * file_hdr.c:
+ Move header construction out of hdr_read() to create hdr_build_meta().
+
+ * hfs.h:
+ Add byte-order independent conversions: U32->U16, U32->U8 and U16->U8.
+
+ * file.c, file_cap.c, hfs_fs.h:
+ Rename fix_perms() to hfs_file_fix_mode() and
+ move it from from file_cap.c to file.c.
+
+ * README.sgml, super.c:
+ Make the default for the names mount option vary with the fork option.
+
+ * file_cap.c:
+ The umask was applied incorrectly in fix_perms().
+
+Mon Sep 9 13:11:28 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * README.sgml:
+ Note that it compiles on m68k machines, but needs more testing.
+
+ * hfs_sysdep.h, Makefile:
+ Changes to compile unmodified on m68k (and possibly other machines).
+
+ * dir_cap.c:
+ hfs_cap_readdir() was mistakenly producing .rootinfo entries for
+ the .finderinfo and .resource subdirectories of the root directory.
+
+ * inode.c:
+ A directory's i_size was too small by 1 under CAP, so hfs_cap_readdir()
+ would omit the last directory entry. i_nlink was also too large by 1.
+
+Sun Sep 8 12:56:06 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * file_hdr.c:
+ Rewrite hdr_read() to be more efficient and to deal correctly with
+ descriptors having lengths that differ from the actual size of the data.
+
+ * file_cap.c:
+ Add write support for CAP finderinfo files!!
+
+ * super.c, inode.c, hfs_fs.h, hfs_fs_i.h, hfs_fs_sb.h, file_dbl.c,
+ file_nat.c, file_hdr.c, file.c, file_cap.c, Makefile, dir.c:
+ Generate metadata (header files and CAP finderinfo files) on-the-fly.
+ The files file_{dbl,nat}.c are merged into file_hdr.c as a result.
+
+Sat Sep 7 08:09:24 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * README.sgml:
+ Fix silly grammatical error.
+
+Fri Sep 6 09:17:12 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs_sb.h, super.c:
+ No need to cast type of s_reserved.
+
+ * file_dbl.c, file_nat.c, dir_dbl.c, dir_nat.c, file_cap.c, dir_cap.c:
+ Add the missing NULL readpage and writepage entries to the inode_ops.
+
+ * file_dbl.c, file_nat.c, file.c, file_cap.c:
+ Cleanup error checking for read() and write().
+
+Thu Sep 5 05:29:53 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump version up to "0.7.2".
+ User-visible changes from 0.7.0:
+ + Corrected CAP finderinfo file format.
+ + Support for more features of CAP finderinfo files.
+ + No longer requires gcc 2.7.0 or newer.
+ + Now implements mknod() system call.
+
+ * hfs_fs.h, dir_nat.c, file_cap.c, file_nat.c, README.sgml, dir_cap.c:
+ Include the CAP and Netatalk copyright notices.
+
+ * hfs_fs.h, file_cap.c:
+ Repair and improve CAP support.
+
+ * catalog.c:
+ Oops! The BkDat for new files and directories was in 1972 when
+ it should have been in 1904 (not that it matters that much).
+
+ * inode.c:
+ The HFS MdDat should be the larger of the i_mtime and i_ctime.
+
+ * README.sgml:
+ Change 'm_time' to 'i_mtime'.
+
+Wed Sep 4 13:27:35 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump version up to "0.7.1".
+ User-visible changes from 0.7.0:
+ + Minor bug in CAP finderinfo file format fixed.
+ + No longer requires gcc 2.7.0 or newer.
+ + Now implements mknod() system call.
+
+ * README.sgml:
+ Removed note about needing gcc 2.7.0 or newer.
+
+ * file.c:
+ Optimize hfs_do_read() based on the fact that HFS has no holes in files.
+ Minor code formatting changes.
+
+ * hfs.h, hfs_sysdep.h, mdb.c, extent.c, file.c, btree.c, catalog.c,
+ balloc.c, bnode.c:
+ Reorganize memory management routines.
+ hfs_malloc() and hfs_free() are the main routines.
+ The macros FREE() and MALLOC() are gone.
+ HFS_NEW() and HFS_DELETE() are new 'shorthand' macros.
+
+ * btree.c:
+ Fix broken debugging code.
+
+ * super.c, hfs.h, mdb.c, part_tbl.c, Makefile:
+ Separate partition table handling into its own file.
+
+ * dir.c:
+ Spelling fixes.
+
+ * sysdep.c:
+ Oops! Error check got sense reversed while editing.
+
+ * mdb.c, sysdep.c, hfs.h, hfs_btree.h, hfs_sysdep.h, btree.c, extent.c,
+ bfind.c, bnode.c, balloc.c:
+ Make hfs_buffer a pointer to a buffer_head, rather than a buffer_head.
+
+ * hfs_fs.h, dir_cap.c, dir_dbl.c, dir_nat.c, dir.c:
+ Add a mknod() entry to the inode_operations for normal directories.
+ All it is good for is letting root create regular files.
+
+ * file_dbl.c, file_nat.c, file.c, file_cap.c, dir_cap.c, dir_dbl.c,
+ dir_nat.c:
+ Add the missing NULL entries to the end of the file_operations.
+
+ * super.c, hfs_btree.h, hfs_fs.h, mdb.c, extent.c, hfs.h, catalog.c:
+ Make the remainder of the (untested) changes
+ to allow compilation with gcc 2.6.3.
+
+ * hfs_fs.h:
+ Fix hfs_fs.h to work with gcc 2.6.3.
+
+ * hfs_fs.h:
+ (struct hfs_cap_info) should never have been 'packed'.
+
+ * BUG_INFO:
+ Use -V for getting version of module utilities.
+
+ * super.c, sysdep.c, trans.c, hfs_fs_sb.h, inode.c, hfs_fs.h,
+ hfs_fs_i.h, file_cap.c, file_dbl.c, file_nat.c, dir_dbl.c,
+ dir_nat.c, file.c, dir.c, dir_cap.c:
+ Fix up hfs_fs{,_i,_sb}.h in preparation for inclusion in kernel.
+
+Tue Sep 3 23:58:03 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs.h:
+ Change eventual destination to linux/fs/hfs rather than include/linux.
+
+ * super.c, inode.c, mdb.c, hfs_btree.h, hfs_fs.h, hfs_sysdep.h,
+ file_dbl.c, file_nat.c, hfs.h, dir_nat.c, extent.c, dir_dbl.c,
+ catalog.c, dir_cap.c, brec.c, btree.c, binsert.c, bnode.c, bdelete.c,
+ bfind.c, bins_del.c, balloc.c:
+ Replace all the swap{16,32}() stuff w/ ntohl() and friends.
+
+Fri Aug 30 09:51:23 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Rewrite installation instructions and bump version up to "0.7.0".
+
+ * Makefile:
+ Remove the INCDIR variable; we now rely on the
+ user to have the correct links in /usr/include.
+
+Mon Aug 26 12:25:41 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Reformat the documentation and bump version up to "pre-0.7-9".
+ Hopefully this will become version 0.7 in a few days.
+
+Thu Aug 22 08:00:44 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * README.sgml, version.c:
+ Bump version up to "pre-0.7-8".
+
+ * file_nat.c, file_dbl.c:
+ AppleDouble headers had resource fork size in wrong byte order.
+
+Wed Aug 21 05:22:28 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump version up to "pre-0.7-7".
+
+ * bnode.c:
+ Fixed a long-standing bug in hfs_bnode_lock().
+ This bug occasionally caused lock-up under heavy load.
+
+Tue Aug 20 09:15:10 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * README.sgml, version.c:
+ Bump version up to "pre-0.7-6".
+
+ * catalog.c:
+ Fix a deadlock problem in catalog readers/writers locking.
+
+ * bins_del.c:
+ hfs_bnode_update_key() was still corrupting the header node sometimes.
+
+ * catalog.c, dir.c:
+ Fix problem with extending the catalog B-tree hanging hfs_cat_commit().
+ Fix a race that could delete a non-empty directory.
+
+Sun Aug 18 23:16:43 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump version to "pre-0.7-5" for test release.
+
+ * dir_cap.c, README.sgml:
+ Change ".:rootinfo:" to ".rootinfo".
+
+ * hfs_fs.h, dir_cap.c, dir_dbl.c, dir_nat.c:
+ Mangle the names as first step in hfs_{cap,dbl,nat}_lookup().
+ Use the new hfs_streq() to catch mixed case matches to the special
+ files and directories in hfs_{cap,dbl,nat}_lookup().
+ Store reserved names only once.
+
+ * dir.c, hfs.h, string.c:
+ Implement hfs_streq() which tests for string equality more
+ rapidly than hfs_strcmp() by checking for equal length first,
+ and use it when checking for reserved names.
+
+ * inode.c, TODO, dir_cap.c, dir_dbl.c, README.sgml:
+ Provide the metadata for the root directory for the CAP and AppleDouble
+ schemes in the files ".:rootinfo:" and "%RootInfo", respectively.
+
+ * TODO, super.c:
+ Add (untested) support for the old Mac Plus style of partition map.
+
+ * bdelete.c, TODO:
+ Note the possibility of bdelete() to hanging on a corrupted B-tree.
+
+ * TODO:
+ Add items corresponding to some of the 'XXX' comments in the sources.
+
+ * dir_dbl.c, dir_cap.c:
+ Update comments, removing ref. to a comment that once existed in inode.c
+
+ * catalog.c:
+ Remove some redundant locking and error checks
+ that had been previously marked as questionable.
+
+Sat Aug 17 08:06:56 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * binsert.c, bfind.c, bins_del.c, balloc.c, bdelete.c:
+ Edited some comments for correctness.
+
+ * README.sgml, version.c:
+ Bump version up to "pre-0.7-4" in preparation for snapshot release.
+
+ * Makefile:
+ Have 'make dep' delete the *.o and *.s files.
+
+ * catalog.c, hfs.h, TODO, bfind.c:
+ Move looping from hfs_cat_next() into hfs_bsucc(),
+ where it can be done an entire node at a time.
+
+Fri Aug 16 05:02:59 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * TODO:
+ Add AppleShare support to the list of goals.
+
+ * trans.c, super.c, hfs_fs.h, README.sgml:
+ Add a "names=netatalk" mount option, since
+ Netatalk quotes initial periods and CAP doesn't.
+
+ * Makefile:
+ Oops! Had removed the 'include .depend' from Makefile.
+
+ * inode.c, hfs_fs.h, file_nat.c, file_dbl.c, file.c, dir_nat.c,
+ dir_dbl.c, dir_cap.c, dir.c, README.sgml:
+ Update for 2.0.1 and newer kernels.
+
+ * Makefile:
+ Get rid of ifeq stuff and use a .tmpdepend file to make sure
+ a failed 'make depend' doesn't allow a 'make hfs.o'.
+
+Wed Aug 14 01:03:01 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump version up to "pre-0.7-3" in preparation for snapshot release.
+
+ * btree.c, extent.c, bnode.c:
+ Fix up some debugging code.
+
+Tue Aug 13 12:42:12 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * version.c, README.sgml:
+ Bump revision to "pre-0.7-2".
+
+ * super.c, sysdep.c, mdb.c, file_nat.c, inode.c, file_cap.c,
+ file_dbl.c, file.c, extent.c, dir.c, catalog.c, btree.c, bnode.c,
+ balloc.c:
+ Added the remaining missing function comments.
+
+ * Makefile, README.sgml:
+ Simplify the default make rule to build the dependency file AND hfs.o.
+ Change the installation instructions to reflect the change.
+
+ * hfs.h:
+ Added missing structure comments.
+
+ * bdelete.c:
+ Merge bdelete_brec() back into hfs_bdelete().
+ Add missing function comments.
+
+
+ * extent.c:
+ Insignificant code change removing an unneeded indirection.
+
+ * btree.c, hfs_btree.h, balloc.c, bnode.c:
+ Add a 'sys_mdb' field to (struct hfs_btree).
+
+ * extent.c, hfs_sysdep.h, sysdep.c, bnode.c, balloc.c, bfind.c,
+ Makefile:
+ Move hfs_buffer_read() from hfs_sysdep.h to sysdep.c so it can use
+ the symbol HFS_SECTOR_SIZE rather than the manifest constant 512.
+ Have hfs_buffer_read() print an error message,
+ and remove redundant errors from the callers.
+
+ * hfs_sysdep.h, mdb.c, super.c, file.c, hfs.h, hfs_btree.h, catalog.c,
+ extent.c, btree.c, balloc.c, bfind.c, bnode.c:
+ Get rid of the type hfs_device and the fields of that type,
+ using the type hfs_sysmdb and the 'sys_mdb' field in its place.
+
+ * Makefile:
+ Fix definition of HDRS variable.
+
+ * README.sgml, version.c:
+ Bump version up to "pre-0.7-1".
+
+ * Makefile:
+ Separate sources and headers into three groups:
+ B-tree code, HFS code and Linux code.
+
+ * bitmap.c, bitops.c, hfs.h, hfs_sysdep.h, balloc.c:
+ Implemented portable set of bit operations in hfs_sysdep.h
+
+ * mdb.c, hfs_sysdep.h, hfs_btree.h, extent.c, btree.c, bitmap.c,
+ bnode.c, balloc.c:
+ Implement a portable set of buffer operations in hfs_sysdep.h
+
+ * TODO:
+ Remove note about separating header files into two parts.
+
+ * catalog.c:
+ Remove call to hfs_mdb_dirty(), since the hfs_brec_relse() does it.
+
+ * hfs.h, extent.c, file.c:
+ Move hfs_getblk() from extent.c to file.c, since that is now the
+ only file that actually uses it.
+
+ * balloc.c:
+ Replace use of hfs_getblk() in balloc.c with a local function
+ (get_new_node()) that doesn't retry, since B-trees can't shrink.
+
+ * hfs.h, hfs_btree.h, hfs_sysdep.h, mdb.c, extent.c:
+ Make hfs_buffer a typedef.
+
+ * inode.c, hfs.h, hfs_sysdep.h, dir.c:
+ Change hfs_sysentry to a typedef.
+ Rename 'sysentry' field of (struct hfs_cat_entry) to 'sys_entry'.
+
+ * super.c, mdb.c, catalog.c:
+ Rename hfs_cat_sync() to hfs_cat_commit() and call it
+ from hfs_mdb_commit() rather than from hfs_write_super().
+
+ * catalog.c, file.c:
+ Minimize the calls to hfs_mdb_dirty(). Now called when:
+ 1) A buffer holding a volume bitmap block is dirtied.
+ 2) A dirty B-tree node is written back to the buffers.
+ 3) A dirty catalog entry is written back to the buffers.
+
+ * hfs_sysdep.h, hfs.h:
+ Make hfs_sysmdb a typedef.
+
+Sun Aug 11 08:46:10 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_sysdep.h, extent.c, hfs.h:
+ Replace hfs_mdb_{lock,unlock} with more portable
+ scheme using a wait queue in the MDB.
+
+ * hfs.h, hfs_btree.h, hfs_sysdep.h, bnode.c, catalog.c, binsert.c:
+ Make hfs_wait_queue a typedef'd pointer to a (struct wait_queue).
+ Rename hfs_wait_on() to hfs_sleep_on().
+
+ * catalog.c, hfs_sysdep.h, super.c, bfind.c, bnode.c, balloc.c:
+ Implemented hfs_dev_name() in hfs_sysdep.h
+ as a portable call to produce a device name.
+
+ * super.c, hfs.h, mdb.c:
+ Rename hfs_mdb_read() to hfs_mdb_get(), and don't take a
+ 'sys_mdb' argument. That's the callers responsibility.
+
+ * sysdep.c, Makefile:
+ Remove the pointless file sysdep.c
+
+ * README.sgml:
+ Clean up the "System Requirements" section.
+
+Sat Aug 10 22:41:24 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * sysdep.h, sysdep.c, super.c, hfs_sysdep.h, mdb.c, string.c,
+ hfs_fs.h, hfs_fs_i.h, hfs_fs_sb.h, hfs_btree_private.h, hfs_btree.h,
+ file_cap.c, file_dbl.c, file_nat.c, hfs.h, file.c, dir_nat.c,
+ extent.c, dir.c, dir_cap.c, dir_dbl.c, catalog.c, bnode.c, brec.c,
+ btree.c, binsert.c, bitmap.c, bitops.c, bfind.c, bins_del.c,
+ Makefile, balloc.c, bdelete.c:
+ Includes the hfs.h that was missing from the previous check in.
+ MAJOR include-file cleanup:
+ hfs_btree.h merged into hfs.h
+ hfs_btree_private.h renamed hfs_btree.h
+ sysdep.h renamed hfs_sysdep.h
+ Fixed some minor portability fixes shown up by the header split.
+
+ * README.sgml:
+ Add instructions for a dealing with a missing linux/version.h
+
+ * hfs_fs.h, mdb.c, string.c, catalog.c, extent.c, btree.c, bitmap.c,
+ bitops.c, bnode.c, brec.c, bins_del.c, binsert.c, bdelete.c, bfind.c,
+ balloc.c:
+ Major split of hfs_fs.h into Linux-specific
+ part (hfs_fs.h) and HFS-specific part (hfs.h).
+
+ * file.c, extent.c:
+ Move hfs_getblk() from file.c to extent.c
+
+ * sysdep.h, super.c, mdb.c, hfs_fs_sb.h, hfs_fs.h, file.c, extent.c,
+ catalog.c, bnode.c, bitmap.c:
+ Make the field 's_mdb' in (struct hfs_sb_info) a pointer to
+ the MDB, rather than the actual MDB. This allowed the definition
+ of (struct hfs_mdb) to be moved from hfs_fs_sb.h to hfs_fs.h.
+
+ * ccache.c, hfs_fs.h, Makefile, catalog.c:
+ Merged ccache.c and catalog.c into the latter.
+ Moved definition of (struct hfs_cat_rec) into catalog.c
+
+ * extent.c:
+ Oops! Last set of changes didn't compile but they're OK now.
+
+ * hfs_btree.h, hfs_fs.h, mdb.c, ccache.c, extent.c, btree.c:
+ Move the definition of (struct hfs_raw_extent) inside
+ extent.c and treat it as simple array of U16's elsewhere.
+
+ * hfs_fs.h, dir_dbl.c, dir_nat.c, ccache.c, catalog.c, dir_cap.c:
+ Make hfs_cat_next() return the CNID and cdrType of the entry.
+ Now catalog.c and ccache.c are the only files which
+ depend on the structure of a catalog record on disk.
+
+ * dir.c, hfs_fs.h, catalog.c:
+ Replace hfs_cat_new_{file,dir}() with hfs_cat_{create,mkdir}()
+ which are wrappers for what used to be hfs_cat_create().
+
+ * hfs_fs.h, mdb.c, super.c, Makefile:
+ Split super.c into super.c (Linux stuff) and mdb.c (MDB stuff).
+
+ * super.c, hfs_fs_sb.h:
+ Add the MDB field 'drAtrb' to (struct hfs_mdb) as the field 'attrib'.
+
+ * hfs_fs_sb.h, super.c:
+ Split hfs_read_super() into hfs_read_super() and hfs_mdb_read().
+
+ * super.c, hfs_fs_sb.h:
+ Remove the unneeded 'hs' field from (struct hfs_mdb).
+
+ * TODO:
+ Remove item about hfs_notify_change() needing to update metadata.
+
+ * inode.c, hfs_fs.h, hfs_fs_sb.h, file_cap.c, file_dbl.c, file_nat.c,
+ file.c, dir.c:
+ Add a flags argument to hfs_{cap,dbl,nat}_buildmeta() so that
+ it only builds the parts that are currently out-of-date.
+ Call hfs_{cap,dbl,nat}_buildmeta() through hfs_update_meta()
+ in hfs_notify_change() and hfs_rename() to update the metadata.
+
+ * dir.c:
+ Make test for normal dir in update_dirs_{plus,minus}() more explicit.
+
+ * inode.c, file_cap.c, file_dbl.c, file_nat.c, dir_dbl.c, dir_nat.c,
+ file.c, README.sgml, dir_cap.c:
+ Resolve the "meta-data" vs. "metadata" rivalry in favor of the latter.
+
+ * btree.c:
+ Simplify some debugging code.
+
+ * hfs_btree_private.h, bnode.c, btree.c, balloc.c:
+ Put the in-core copy of the header node IN the
+ B-tree structure rather than just a pointer to it.
+
+ * hfs_btree_private.h, btree.c, bnode.c:
+ Have hfs_btree_commit() call hfs_bnode_commit()
+ to commit the header and root nodes.
+
+ * hfs_fs.h, super.c, hfs_btree_private.h, btree.c, hfs_btree.h,
+ balloc.c:
+ Change hfs_commit_mdb() to hfs_mdb_commit().
+ Make hfs_mdb_commit() call hfs_btree_commit().
+ Move code to update B-tree size and extent
+ from hfs_btree_extend() to hfs_btree_commit().
+ Make hfs_btree_extend() call hfs_mdb_commit().
+
+ * super.c:
+ Change hfs_commit_super() to hfs_commit_mdb().
+
+ * btree.c, bnode.c, bfind.c:
+ Fixed up broken debugging code and error messages.
+
+ * super.c, hfs_btree_private.h, btree.c, hfs_btree.h, bdelete.c,
+ binsert.c, balloc.c:
+ Now use write-back caching of B-tree header fields.
+
+ * hfs_fs.h:
+ Get rid of the add{16,32}() inlines as they are no longer used.
+
+ * hfs_btree_private.h, binsert.c, btree.c, bdelete.c, bfind.c, balloc.c:
+ All the needed fields of the B-tree header are
+ now cached for reading, but not yet writing.
+
+ * TODO:
+ Remove "Implement write count" from TODO list.
+
+ * file.c, super.c, bnode.c:
+ Implement write count.
+
+ * catalog.c:
+ Fix directory entry counting in hfs_cat_move().
+
+ * balloc.c:
+ Simplify hfs_btree_extend(), since the allocation
+ request will get rounded up to the clumpsize.
+
+ * extent.c:
+ Honor clumpsize when allocating blocks to files.
+
+ * file_cap.c, file_dbl.c, file_nat.c, super.c, dir.c, file.c,
+ ccache.c, catalog.c, balloc.c:
+ Mark 44 functions in need of commenting.
+
+ * hfs_fs_sb.h, super.c, extent.c, hfs_fs.h, ccache.c, btree.c, balloc.c:
+ Record clumpsize in allocation blocks rather than 512-byte blocks.
+
+ * sysdep.h, super.c, TODO, balloc.c, hfs_fs_sb.h:
+ Now updates the backup MDB when a B-tree grows.
+
+ * extent.c:
+ hfs_extent_free() had test against NULL backward.
+ The result is that access to a file with extents in the extents
+ B-tree would result in an infinite loop in hfs_cat_put().
+
+ * hfs_fs_sb.h, super.c, hfs_fs.h:
+ Reorganize partition map code to get size of partition
+ in preparation for dealing with the alternate MDB.
+
+Fri Aug 9 03:25:13 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * Makefile:
+ Add make rules for README.{ps,info}
+
+ * README, README.sgml, DOC, FAQ, Makefile, .cvsignore,
+ Merge CHANGES into ChangeLog.
+ Merge DOC, FAQ and README into README.sgml.
+ Add make rules for building README.{txt,dvi}
+
+ * BUG_INFO, Makefile:
+ Added a BUG_INFO script which attempts to collect some useful
+ information which I'd like to see in every bug report I receive.
+
+ * Makefile, version.c:
+ Added version.c which contains a version string.
+
+Thu Aug 8 21:48:24 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * trans.c:
+ Fix Latin-1 -> Macintosh filename mapping to change colons to pipes.
+
+ * trans.c:
+ Fixed Mac->Latin-1 translation to behave as documented for the
+ extended 8-bit characters without corresponding Latin-1 characters.
+
+ * inode.c, super.c, file.c, hfs_fs_i.h, hfs_fs_sb.h, DOC:
+ Added a conv={binary,text,auto} mount option similar to that of the
+ msdos, hpfs and iso9660 filesystems, but applying only to data forks.
+ As compared to those filesystems, HFS has the advantage that only a
+ single CR need be converted to a NL, rather than a CR/NL sequence, so
+ it is quite safe to seek in the file.
+ Additionally the 'Type' field is far more reliable indicator of text
+ files than a file extension.
+
+ * super.c:
+ Simplified parsing of mount options.
+
+ * super.c:
+ Oops! The part=<n> mount option was being parsed in octal!
+
+ * TODO:
+ Remove "case=lower" from the list of goals.
+
+ * super.c, hfs_fs.h, hfs_fs_sb.h, string.c, dir_dbl.c, dir_nat.c,
+ dir_cap.c, DOC:
+ Resurrect the case={asis,lower} mount option.
+
+ * dir.c:
+ Simpler test for "normal" directory in update_dirs_{plus,minus}().
+
+ * hfs_fs_sb.h, super.c, dir.c, hfs_fs.h, catalog.c, DOC:
+ Add mount options to specify what Type and Creator will be used for
+ new files and change the default from NULLs to "????".
+
+Wed Aug 7 11:32:22 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * catalog.c:
+ In hfs_cat_next() use entry->cnid rather than the key of the initial
+ brec to identify the end of the directory.
+
+ * README:
+ Update for pre-0.7 version.
+
+ * hfs_fs.h:
+ Create versioned module if CONFIG_MODVERSIONS is set in linux/config.h
+
+ * TODO:
+ Note need for special steps for unaligned accesses on some machines.
+
+ * FAQ:
+ Added Q0: What is HFS?
+ Added Q7: Does hfs_fs work w/ 400k and 800k diskettes?
+ Brought Q6 (about writability) up to date.
+ Made a few other answers more verbose.
+
+Tue Aug 6 00:58:46 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * Makefile:
+ Changed 'snapshot' rule to include cvs tag command.
+
+ * hfs_fs.h, dir_cap.c, dir_dbl.c, dir_nat.c, catalog.c, ccache.c:
+ Implemented readers half of dir locking scheme so readdir() should
+ produce consistent results and count_dir_entries() is not race prone.
+
+ * catalog.c:
+ hfs_cat_move() was calling hfs_cat_decache() after changing
+ the key rather than before, corrupting the hash lists.
+
+Mon Aug 5 14:03:46 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs.h, catalog.c:
+ Implemented the writers half of a locking scheme for directories.
+
+ * inode.c:
+ Fixed a serious bug in hfs_notify_change() that would allow a chmod()
+ on directory meta-data and would cause the directory inode (if it was
+ in memory at the time) to change into a file inode.
+
+ * inode.c:
+ Fixed a problem with write permissions on directory meta-data.
+
+ * dir_dbl.c, dir_nat.c, dir_cap.c:
+ hfs_{cap,dbl,nat}_readdir() now return the correct value in the 'd_ino'
+ field of the dirent for all cases, something I think has always been
+ done incorrectly until now.
+
+ * dir_nat.c, inode.c, dir_cap.c:
+ In hfs_{cap,nat}_lookup() take advantage of the
+ 'file_type' field of (struct hfs_inode_info).
+
+ * TODO:
+ Removed two accomplished goals (rename() and improved readdir()).
+
+ * inode.c, dir_dbl.c, dir_nat.c, hfs_fs_i.h, dir.c, dir_cap.c:
+ Rewrite hfs_{cap,dbl,nat}_readdir() to take advantage of hfs_cat_next().
+ They now use a uniform 'i_size' for all inodes for a given directory.
+ This simplifies update_dirs_{plus,minus}() and eliminates the need for
+ the 'file_size' and 'dir_link' fields of (struct hfs_inode_info).
+ For the CAP and Netatalk schemes the meta-data directories are now the
+ last entries rather than coming just after '.' and '..'. This is in
+ preparation for the day when we can write to the files in those
+ directories, and ensures that when using 'tar' to copy HFS filesystems
+ the file or directory will be created before the meta-data is written.
+ Otherwise we could be stuck writing meta-data and not knowing if it is
+ for a file or a directory!
+
+ * ccache.c:
+ Updated count_dir_entries() for new hfs_cat_next().
+
+ * hfs_fs.h, catalog.c:
+ hfs_cat{nth,next}() no longer take a 'types' argument,
+ so they now return all entries.
+ hfs_cat_next() now uses the ParID of the key to detect
+ the end of the directory.
+ hfs_cat_nth() now accepts n=0 as a valid input, requesting the thread.
+
+ * trans.c, string.c, super.c, dir_nat.c, hfs_fs.h, dir.c, dir_cap.c,
+ dir_dbl.c, catalog.c:
+ Rename (struct hfs_cname) to the more appropriate (struct hfs_pstr).
+
+ * hfs_fs.h, hfs_btree.h:
+ Move some constants from hfs_fs.h to hfs_btree.h
+
+ * bdelete.c, hfs_btree.h:
+ Remove hfs_bdelete_brec() from public B-tree interface.
+
+ * hfs_btree_private.h, hfs_fs.h, btree.c, hfs_btree.h, bnode.c, brec.c,
+ bfind.c, bins_del.c, binsert.c, balloc.c, bdelete.c, Makefile:
+ Split B-tree stuff into public and private parts:
+ brec.c split into bfind.c and brec.c
+ hfs_btree.h split into hfs_btree.h and hfs_btree_private.c
+
+ * inode.c:
+ The tests and sets of the HFS_FIL_LOCK bit where all reversed!
+
+ * hfs_fs.h, ccache.c:
+ Redo some ccache stuff, removing the 'error' field from
+ (struct hfs_cat_entry) and ensuring that hfs_cat_put()
+ will not sleep on an uninitialized entry.
+
+Sun Aug 4 23:43:28 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * sysdep.h:
+ Change swap{16,32}() back to macros since hton[ls]() are functions.
+
+ * hfs_fs.h, ccache.c:
+ Use only lowest order byte of parent CNID in hashing a catalog key.
+
+ * bdelete.c:
+ The "improved" bdelete() was TOO paranoid looking for missing parents.
+
+ * ccache.c:
+ Get rid of pointless swap16const(0).
+
+ * hfs_fs.h, inode.c, extent.c, ccache.c, dir_cap.c, dir_nat.c,
+ binsert.c, catalog.c:
+ Store cnid and associated constants in big-endian byte order.
+ This reduces the number of byte-order swaps required.
+
+ * sysdep.h:
+ Make swap32() and swap16() inline functions.
+
+ * dir_nat.c, dir_cap.c, dir_dbl.c:
+ Added hfs_rename() to the inode_operations for normal directories.
+
+ * dir.c, hfs_fs.h:
+ Added hfs_rename() and cleaned up hfs_{create,mkdir,unlink,rmdir}().
+
+ * catalog.c:
+ Added the missing check for moving a directory into itself.
+
+ * catalog.c, ccache.c, hfs_fs.h:
+ Implement a nearly ideal hfs_cat_move().
+ It still needs to prevent moving a directory into itself.
+ The functions hfs_cat_{create,delete,move}() still need work with
+ respect to their atomicity (especially vs. readdir).
+
+ * bdelete.c:
+ Fixed a serious bug in hfs_bdelete_brec() that would yield a corrupted
+ b-tree when the first record in a bnode was deleted.
+ Made bdelete() more aggressive when checking for missing parents.
+
+Sat Aug 3 06:11:50 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * btree.c, super.c:
+ Fixed a problem that caused a kernel oops when no HFS filesystem
+ is found.
+
+Wed Jul 24 13:06:12 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * catalog.c:
+ Remove race in hfs_cat_create() that could overflow directory valence.
+
+ * catalog.c:
+ Fix hfs_cat_create() so the parent directory doesn't get deleted
+ out from under it. Otherwise we could have created files and
+ directories in deleted directories.
+
+ * hfs_fs.h, dir_cap.c, dir_dbl.c, dir_nat.c, catalog.c, ccache.c:
+ Redo hfs_cat_{next,nth}() in terms of which entry types to
+ allow, rather than which to skip.
+
+ * catalog.c:
+ The function hfs_cat_create() would fail to hfs_cat_put(entry) if
+ the 'record' argument was invalid or if the 'result' argument was NULL.
+
+ * dir.c:
+ The functions hfs_{create,mkdir,unlink,rmdir} all failed to
+ call iput() when their arguments conflicted with a reserved name.
+
+ * catalog.c, hfs_fs_sb.h:
+ Start over on rename(). Still unfinished.
+ Fix silly bug in hfs_cat_create() that made it always fail.
+
+ * ccache.c:
+ Fix byte-order bug in write_entry().
+
+Tue Jul 23 12:12:58 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * dir_dbl.c, dir_nat.c, hfs_fs.h, dir.c, dir_cap.c:
+ Remove the macros KEY() and PARENT() since the key is now easy
+ to access through the catalog entry.
+ Replace the macros NAME{IN,OUT}() with inline functions
+ hfs_name{in,out}() to gain type checking of arguments.
+
+ * catalog.c:
+ Remove the macro TYPE().
+
+ * inode.c, file_dbl.c, file_nat.c, file.c, file_cap.c:
+ Remove the #define's of the unused macro KEY().
+
+ * hfs_fs.h, dir_cap.c, dir_dbl.c, dir_nat.c, catalog.c, dir.c:
+ Replace hfs_lookup_parent() in dir.c with hfs_cat_parent() in catalog.c.
+ This new function performs locking to protect against rename() changing
+ the parent during I/O.
+ It is also intended for use with files as well as directories.
+ Change hfs_{cap,dbl,nat}_lookup() to use the new function.
+
+ * dir.c, hfs_fs.h, catalog.c:
+ Remerge hfs_cat_{create,mkdir}() into hfs_cat_create() and resurrect
+ hfs_cat_new_{file,dir}().
+ Fix hfs_cat_{create,delete} to use the improved catalog cache for
+ locking in place of directory-level create/delete locks.
+ Fix hfs_{create,mkdir}() to use the new hfs_cat_create().
+
+ * hfs_fs.h, ccache.c:
+ Rewrite parts to remove need for specialized create/delete locking.
+ Use new case-independent hash function.
+ Fix bug in hfs_cat_get() that would read an entry w/o locking it.
+ Call hfs_relinquish() before retrying a deleted entry in hfs_cat_get.
+ If there is a read error, then don't retry in hfs_cat_get().
+ Remove unused 'version' field from (struct hfs_cat_entry).
+
+ * sysdep.h:
+ Add hfs_relinquish(), a system-independent alias for schedule().
+
+ * hfs_fs.h, string.c:
+ Add hfs_strhash(), a simplistic case-independent hash function.
+
+ * hfs_fs.h, inode.c:
+ Make hfs_iget() an inline function.
+
+ * TODO:
+ Add a few goals and removed those that have been achieved.
+
+ * Makefile:
+ Add ccache.c to list of source files.
+ Add rule for *.s files and include them in the 'clean' rule.
+
+Wed Jul 17 17:22:45 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * sysdep.h, trans.c, string.c, super.c, hfs_fs_i.h, hfs_fs_sb.h,
+ inode.c, hfs_btree.h, hfs_fs.h, file_dbl.c, file_nat.c, extent.c,
+ file.c, file_cap.c, dir_dbl.c, dir_nat.c, ccache.c, dir.c,
+ dir_cap.c, btree.c, catalog.c, bnode.c, brec.c, balloc.c:
+ Total rewrite of the inode-handling stuff to be centered around
+ a catalog entry cache (ccache.c). This results not only in a far
+ more sensible way of doing things, but also removed many race
+ conditions. (The source and object code both got smaller too!)
+ Many small "undocumented features" were also fixed.
+ Replace HFS_CNAME with (struct hfs_cname).
+ rename() has been temporarily abandoned.
+
+Thu Jul 11 01:14:38 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * dir.c:
+ As written hfs_lookup_parent() had two overlapping read requests
+ in the catalog tree. This could have led to deadlock.
+
+Wed Jul 10 09:27:00 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * catalog.c, hfs_fs.h, bdelete.c:
+ More work on getting rename() fleshed out. Still not done.
+ Before I can finish it looks like I'll need to build a
+ mechanism for exclusive access to the catalog tree. There
+ just doesn't seem to be any other way to get proper POSIX
+ semantics without a bunch of race conditions elsewhere.
+
+ * hfs_fs.h, inode.c, dir_cap.c, dir_dbl.c, dir_nat.c, catalog.c:
+ More work on the still incomplete rename() code.
+ Merge hfs_cat_add_{dir,file}() into hfs_cat_create().
+ Add file-thread support to hfs_cat_{create,delete,rename}.
+
+Tue Jul 9 09:43:15 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * inode.c, dir_dbl.c, dir_nat.c, extent.c, dir_cap.c:
+ The indirect (struct hfs_file) was causing blocks not to be freed
+ when files where deleted, and an omission in hfs_put_inode() was
+ preventing the inode from getting freed. Both are now fixed.
+
+ * hfs_fs.h, dir_dbl.c, dir_nat.c, hfs_btree.h, catalog.c, dir_cap.c,
+ bdelete.c:
+ Made unlink() and rmdir() more race resistant and did some more
+ work on the still incomplete code for rename().
+
+ * btree.c, bnode.c:
+ There was a serious race condition in the bnode cache, so
+ hfs_bnode_find() is now modeled after Linus's inode cache.
+
+Mon Jul 8 10:33:38 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs_i.h, inode.c, file_cap.c, file_dbl.c, file_nat.c, dir_dbl.c,
+ dir_nat.c, file.c, dir.c, dir_cap.c:
+ More changes to layout of (struct hfs_inode_info).
+
+ * super.c, inode_cap.c, inode_dbl.c, inode_nat.c, inode.c, hfs_fs_i.h,
+ hfs_fs_sb.h, file_nat.c, hfs_fs.h, file.c, file_cap.c, file_dbl.c,
+ Makefile, catalog.c:
+ Implemented new layout for (struct hfs_inode_info) resulting in the
+ elimination of lots of duplicated code for hfs_*_write_inode(),
+ hfs_*_put_inode() and *_open() functions.
+ Merged inode_*.c files back into inode.c.
+ Not fully tested.
+
+ * TODO:
+ Add a few more of my goals to the list.
+
+ * README:
+ Documentation updates.
+
+ * inode_nat.c, inode_cap.c, inode_dbl.c, inode.c, hfs_fs.h, hfs_fs_i.h,
+ file.c, file_cap.c, file_dbl.c, file_nat.c, catalog.c:
+ (struct hfs_file) and metadata are read when file is opened or
+ truncated and are released by iput().
+
+Sun Jul 7 23:55:43 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * inode_nat.c, inode_cap.c, inode_dbl.c, inode.c, dir_nat.c, hfs_fs.h,
+ hfs_fs_i.h, dir_cap.c, dir_dbl.c, catalog.c, dir.c:
+ (struct hfs_dir) is now inside (struct hfs_inode_info) once again.
+
+ * inode_nat.c, super.c, inode_cap.c, inode_dbl.c, inode.c, file_nat.c,
+ hfs_btree.h, hfs_fs.h, extent.c, file_cap.c, file_dbl.c, dir_nat.c,
+ dir_cap.c, dir_dbl.c, btree.c, catalog.c, dir.c, bpath.c, brec.c,
+ bins_del.c, binsert.c, bnode.c, bfind.c, balloc.c, bdelete.c,
+ Makefile:
+ Remerged (struct hfs_bpath) and (struct hfs_brec), merging the
+ files bfind.c and bpath.c as a resurrected brec.c.
+
+Sat Jul 6 21:47:05 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * inode_cap.c, inode_dbl.c, inode_nat.c, inode.c, hfs_fs.h, hfs_fs_i.h,
+ file_cap.c, file_dbl.c, file_nat.c, hfs_btree.h, dir_nat.c, extent.c,
+ dir.c, dir_cap.c, dir_dbl.c, btree.c, catalog.c, bfind.c, bpath.c,
+ binsert.c, bdelete.c:
+ Renamed (struct hfs_brec_key) to (struct hfs_bkey).
+
+Tue May 28 07:53:24 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * inode_cap.c, catalog.c:
+ Spelling fixes.
+
+ * inode_nat.c, super.c, inode_cap.c, inode_dbl.c, inode.c, hfs_fs.h,
+ hfs_fs_i.h, hfs_fs_sb.h, file.c, file_dbl.c, file_nat.c, dir_dbl.c,
+ dir_nat.c, extent.c, dir.c, dir_cap.c, catalog.c:
+ Structures got too big, so I had to add a layer of indirection
+ to (struct hfs_inode_info).
+ This means we must clear_inode() in inode_put().
+
+Mon May 27 01:32:42 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * catalog.c, file_cap.c:
+ Some sizeof() stuff now uses variable not type.
+
+ * hfs_fs.h:
+ Make HFS_I() and HFS_SB() inline to gain type checking.
+
+Sun May 26 13:34:17 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * dir_nat.c:
+ Oops. Had left some debugging printk()s in place.
+
+ * file_dbl.c, file_nat.c, file_cap.c:
+ Cleaned up variable names for consistency.
+
+ * hfs_fs_sb.h:
+ Add a couple 'const's to function typedefs.
+
+ * hfs_fs.h:
+ Add and update function prototypes.
+ Cleaned up type names.
+ Fix debugging malloc code.
+ Add hfs_iget_by_name() as an inline function.
+
+ * sysdep.h:
+ Remove extra semicolon from macro definitions.
+
+ * super.c:
+ Use new hfs_iget_by_name() to get root inode.
+
+ * extent.c:
+ Cleaned up some variable naming for consistency.
+
+ * catalog.c:
+ Added (untested) code for hfs_cat_move_file().
+
+ * catalog.c:
+ Fix one missed call to hfs_cat_build_key().
+ Make hfs_cat_add_{file,dir}() take a cat_entry as an argument.
+ Add hfs_cat_new_{file,dir}() to generate new cat_entry's.
+
+ * dir_dbl.c, dir_nat.c, dir.c, dir_cap.c:
+ Cleaned up type and variable names.
+ Updated calls to hfs_cat_build_key() and NAMEOUT()
+ Use new hfs_iget_by_*() calls.
+
+ * inode_cap.c, inode_dbl.c, inode_nat.c:
+ Cleaned up type and variable names.
+
+ * inode.c:
+ Update calls to hfs_cat_build_key().
+ Cleaned up type and variable names.
+ Implemented a hierarchy of hfs_iget_by*() calls.
+
+ * catalog.c:
+ Change hfs_cat_build_key() to take a HFS_CNAME as input.
+
+ * btree.c:
+ Initialize lsize and psize fields of file.
+
+ * trans.c:
+ Now passes type HFS_CNAME and has name/len in "normal" order.
+
+Tue May 21 07:02:34 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * bnode.c:
+ Attempt to read invalid bnode would have led to an infinite loop under
+ certain circumstances. One way to cause this was with an invalid
+ partition table which points beyond the end of the device.
+
+Sat May 11 12:38:42 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * sysdep.h, sysdep.c, inode_dbl.c, inode_nat.c, super.c, inode_cap.c,
+ inode.c, hfs_fs.h, hfs_fs_i.h, hfs_fs_sb.h, file_dbl.c, file_nat.c,
+ hfs_btree.h, extent.c, file.c, file_cap.c, dir_nat.c, dir.c,
+ dir_cap.c, dir_dbl.c, btree.c, catalog.c, bitmap.c, bitops.c,
+ bnode.c, bfind.c, bins_del.c, binsert.c, balloc.c, bdelete.c:
+ Another big wave of portability-oriented changes.
+
+Tue May 7 11:28:35 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * super.c, sysdep.c, sysdep.h, inode_cap.c, inode_dbl.c, inode_nat.c,
+ hfs_fs_i.h, inode.c, file_nat.c, hfs_btree.h, hfs_fs.h, file.c,
+ file_cap.c, file_dbl.c, dir_nat.c, extent.c, dir_cap.c, dir_dbl.c,
+ btree.c, catalog.c, dir.c, bnode.c, bpath.c, binsert.c, bitmap.c,
+ bitops.c, bdelete.c, bfind.c, bins_del.c, Makefile, balloc.c:
+ Start a big move to abstract all the Linux-specific stuff
+ out of the lower levels. Created sysdep.[ch] to hold it.
+
+ * FAQ, TODO:
+ Bring some documentation up-to-date.
+
+Fri May 3 20:15:29 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * super.c, inode_dbl.c, inode_nat.c, inode.c, inode_cap.c, extent.c,
+ hfs_fs.h, hfs_fs_i.h, dir_dbl.c, dir_nat.c, catalog.c, dir.c,
+ dir_cap.c, bpath.c, btree.c, binsert.c, bnode.c:
+ "FID reform": 'fid' became 'cnid' (Catalog Node ID), and is now
+ a field in (struct hfs_file). The new name is more consistent
+ with Apple's documentation. The presence of 'cnid' in (struct
+ hfs_file) help move more of the code toward OS-independence.
+
+ * inode_nat.c, super.c, trans.c, inode.c, inode_cap.c, inode_dbl.c,
+ hfs_fs.h, file_cap.c, file_dbl.c, file_nat.c, dir_nat.c, extent.c,
+ file.c, dir.c, dir_cap.c, dir_dbl.c, btree.c, catalog.c, bnode.c,
+ bpath.c, bins_del.c, binsert.c, bitmap.c, bitops.c, bdelete.c,
+ bfind.c, balloc.c:
+ A lot of changes in what headers are included and in what order.
+
+Sat Apr 27 12:28:54 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * FAQ:
+ Updated for current writability status.
+
+ * .cvsignore:
+ Added ChangeLog.
+
+ * file_dbl.c, file_nat.c, file_cap.c, file.c, dir_dbl.c, dir_nat.c,
+ dir_cap.c:
+ Added the default fsync() to all file_operations structures.
+
+ * dir_nat.c, hfs_fs.h, dir.c, dir_cap.c, dir_dbl.c:
+ Add rmdir() for normal directories.
+
+ * binsert.c:
+ I had messed up insertion so that is would sometime fail to
+ split the root, but its OK now.
+
+ * dir.c:
+ hfs_do_unlink() decremented directory counts rather than file counts.
+
+Wed Apr 24 13:20:08 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs.h, bnode.c, hfs_btree.h:
+ Fixed a couple more type size assumptions.
+
+ * hfs_fs.h, balloc.c, bitmap.c, bitops.c:
+ "Portable" bitmap handling was wrong for just about everything but
+ the i386 and the "inverse big-endian" bit ordering that I thought
+ the m68k port was using. It seems the m68k port is now using standard
+ big-endian bit-numbering conventions.
+ This code is now correct for the standard big- and little-endian bit
+ orderings. (which should cover all Linux systems?)
+ Also no longer assumes sizeof(long) == 4, though that might still be
+ a problem in other parts of the code.
+
+Tue Apr 23 19:19:27 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * FAQ:
+ Bring uptodate for this snapshot.
+
+ * Makefile:
+ Add FAQ to $(MISC)
+
+ * README, TODO:
+ Documentation updates.
+
+ * bdelete.c:
+ Spelling fixes.
+
+ * dir_cap.c:
+ In unlink() don't force metadata into memory if not present.
+
+ * bdelete.c:
+ Some function comments and some clean up.
+
+ * bins_del.c:
+ Added missing function comment for hfs_bnode_update_key().
+
+ * binsert.c, bitmap.c:
+ Spelling and grammar corrections to comments.
+
+ * hfs_btree.h, hfs_fs.h, bins_del.c, binsert.c, Makefile, bdelete.c:
+ Clean up of hfs_bdelete(), splitting bins_del.c into three files:
+ bins_del.c, binsert.c and bdelete.c
+
+ * bpath.c, bins_del.c:
+ hfs_bdelete() is now working "correctly", but needs some cleaning up.
+
+Mon Apr 22 05:35:41 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs.h, bpath.c, hfs_btree.h, bins_del.c, bnode.c, balloc.c,
+ bfind.c:
+ Rewrite bnode handling, heading toward a more write-behind approach.
+ Have done away with HFS_LOCK_BLIND.
+
+ * inode_dbl.c, inode_nat.c, extent.c, hfs_fs_i.h, inode_cap.c:
+ Was trying to truncate resource fork of directories!
+
+Sun Apr 21 08:15:43 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * balloc.c:
+ Updated to use truncate() to grow full trees.
+
+ * extent.c, hfs_fs.h, file.c, inode.c:
+ Added truncate() for normal files.
+
+ * bins_del.c:
+ hfs_bdelete() fixes for handling removal of root.
+
+ * inode_cap.c, inode_dbl.c, inode_nat.c:
+ Release storage for deleted files in hfs_*_put_inode().
+
+ * bitmap.c:
+ Make len=0 valid for hfs_{set,clear}_vbm_bits().
+
+ * super.c, inode.c, hfs_fs_i.h, hfs_fs_sb.h, btree.c, balloc.c:
+ Changed from clumpsize to clumpblks.
+
+ * inode_nat.c, hfs_fs.h, inode_cap.c, inode_dbl.c, btree.c, extent.c,
+ balloc.c:
+ Some extent-related changes in preparation for truncate() support.
+
+Sat Apr 20 10:59:13 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * inode_nat.c, hfs_fs_i.h, inode.c, inode_cap.c, inode_dbl.c,
+ dir_nat.c, hfs_fs.h, dir.c, dir_cap.c, dir_dbl.c:
+ Removed dir.valence from hfs inode.
+ Added unlink(), but still need truncate() and some more support
+ in hfs_*_put_inode() to free the disk space used by deleted files.
+
+ * bnode.c:
+ Check for NULL bnode in hfs_bnode_relse().
+
+ * bins_del.c:
+ Fixed a byte-order problem in bdelete_nonempty().
+
+ * hfs_fs.h, bnode.c, bpath.c, hfs_btree.h, balloc.c, bins_del.c:
+ First attempt at hfs_bdelete().
+
+ * dir.c:
+ The Finder would display strange things if it couldn't set frView.
+ Therefore initialize frView field for new directories.
+
+ * file_cap.c, file_dbl.c, file_nat.c, hfs_fs.h:
+ Define User/Finder info fields of catalog entry in more detail.
+
+ * hfs_fs.h:
+ HFS_BFIND_DELETE should require exact match.
+
+ * dir.c:
+ Set "record in use" bit of filFlags for new files.
+
+ * inode.c:
+ Was doing the wrong thing with i_ctime.
+
+ * dir_nat.c, dir_cap.c, dir_dbl.c:
+ Added some missing updates to the inode in hfs_*_{create,mkdir}().
+
+Sun Apr 14 00:10:52 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs.h, file_dbl.c, file_nat.c, file.c:
+ Work around the ever-changing type of f_reada.
+
+Sat Apr 13 00:43:41 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * bpath.c, bfind.c:
+ Spelling corrections in comments.
+
+ * bins_del.c:
+ ifdef out shift_left() until it is actually used.
+
+ * hfs_btree.h, hfs_fs.h, bins_del.c, bpath.c, bfind.c:
+ Cleaned up code related to 'flags' argument to hfs_bpath_find().
+
+Fri Apr 12 23:30:01 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * bpath.c:
+ Updated comments.
+ Rewrote hfs_bpath_init() and hfs_bpath_next().
+
+ * hfs_btree.h:
+ Updated prototype for hfs_bpath_init().
+
+ * bins_del.c:
+ Updated call to hfs_bpath_init().
+
+ * inode.c, inode_cap.c, inode_dbl.c, inode_nat.c, extent.c, file_cap.c,
+ file_dbl.c, file_nat.c, dir_cap.c, dir_dbl.c, dir_nat.c, catalog.c,
+ dir.c:
+ Renamed hfs_brec_relse() to hfs_brelse().
+
+ * hfs_fs.h, hfs_btree.h:
+ Updated prototypes to reflect new names in bpath.c
+
+ * bins_del.c:
+ Updated calls to functions in bpath.c
+ Updated comments.
+
+ * Makefile:
+ Renamed brec.c to bpath.c
+
+ * bfind.c:
+ Updated calls to functions in bpath.c
+ Added hfs_brelse() which was previously hfs_brec_relse() in brec.c
+
+ * bpath.c:
+ brec.c renamed to bpath.c
+ Functions renamed to reflect their current actions.
+ Comments are still out of date.
+ hfs_brec_relse() renamed to hfs_brelse() and moved to bfind.c
+
+ * brec.c:
+ brec.c renamed to bpath.c
+
+Wed Apr 10 07:20:28 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs.h, extent.c, hfs_btree.h, brec.c, dir.c, bfind.c,
+ bins_del.c:
+ Backed-out changes to hfs_binsert() that added the ability to
+ return the new record, since it will probably not ever be needed.
+
+ * extent.c:
+ Since 1.3.45 truncate() has locked the file, so there is no need
+ for all the things I've been doing to hfs_file_extend() & new_extent().
+ Those two functions have been cleaned up a bit (similar to older forms).
+
+ * extent.c:
+ hfs_file_extend() now more "robust", but new_extent() is still
+ not fully "concurrency safe."
+
+Tue Apr 9 09:01:18 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * bins_del.c:
+ Made split() inline.
+
+ * inode.c, dir_nat.c, hfs_fs.h, dir_cap.c:
+ Added hfs_itry() to get in-core inodes.
+
+ * inode_dbl.c, inode_nat.c, hfs_fs.h, inode.c, inode_cap.c, file_dbl.c,
+ file_nat.c, hfs_btree.h, extent.c, file_cap.c, dir_cap.c, dir_dbl.c,
+ dir_nat.c, brec.c, catalog.c, dir.c, bins_del.c, bnode.c,
+ bfind.c:
+ Rewrite of all the (struct hfs_brec) stuff.
+
+Mon Apr 8 21:50:01 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * btree.c, extent.c, bnode.c:
+ Fixed format strings in a few debugging printk()'s.
+
+ * brec.c, hfs_fs.h:
+ Removed hfs_brec_relse_one().
+
+ * hfs_fs.h, bnode.c, brec.c, hfs_btree.h, bfind.c, bins_del.c, balloc.c:
+ (struct hfs_bnode_ref)s are now returned by value rather than reference
+ and they are in (struct hfs_brec) rather than pointed to. Cuts down on
+ a lot of kmalloc() and kfree() traffic.
+
+ * hfs_fs.h, dir.c, extent.c, bins_del.c:
+ Modified hfs_binsert() to be able to return the new record.
+
+ * bins_del.c, hfs_btree.h:
+ Added shift_left(), still untested.
+
+ * bins_del.c:
+ new_root() was missing its comment.
+
+ * super.c, trans.c, hfs_fs_i.h, inode.c, inode_dbl.c, inode_nat.c,
+ file_nat.c, hfs_btree.h, hfs_fs.h, file.c, file_dbl.c, dir_dbl.c,
+ dir_nat.c, extent.c, dir.c, dir_cap.c, bitops.c, bnode.c, brec.c,
+ bfind.c, bins_del.c, bitmap.c, balloc.c:
+ Fixed lines over 80 characters and tabified files.
+
+ * bins_del.c:
+ Fixed line(s) over 80 columns.
+
+ * trans.c, inode_nat.c, string.c, super.c, inode.c, inode_cap.c,
+ inode_dbl.c, hfs_fs_i.h, hfs_fs_sb.h, hfs_btree.h, hfs_fs.h, file.c,
+ file_cap.c, file_dbl.c, file_nat.c, dir_dbl.c, extent.c, btree.c,
+ dir_cap.c, bitops.c, bnode.c, brec.c, bfind.c, bins_del.c, bitmap.c,
+ DOC, README, TODO, balloc.c, CHANGES:
+ About 150 spelling corrections.
+
+Sun Apr 7 23:14:28 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * dir_cap.c, dir_dbl.c, dir_nat.c, dir.c:
+ Cleaned-up check for special names in mkdir().
+
+ * extent.c:
+ More verbose error message.
+
+ * inode_dbl.c, inode_nat.c, hfs_fs_i.h, inode.c, inode_cap.c, dir.c,
+ hfs_fs.h:
+ Limit directories to 32767 entries, since Mac uses 16-bit integer.
+
+Fri Apr 5 07:27:57 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * FAQ:
+ Initial version.
+
+ * dir_dbl.c, dir_nat.c, bins_del.c, dir.c, dir_cap.c:
+ Added missing function comments.
+
+Wed Apr 3 06:38:36 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * brec.c:
+ Cleaned-up code for brec->flags.
+
+ * extent.c:
+ Added function comments.
+
+ * bins_del.c:
+ Added function comments.
+ hfs_binsert() was incrementing record count even on failure.
+
+Mon Apr 1 08:35:51 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * extent.c:
+ Rewrote find_ext() and new_extent() for new hfs_btree_extend().
+ Moved hfs_btree_extend() to balloc.c
+ Fixed potential kernel OOPS in new_extent().
+
+ * brec.c:
+ Fixed potential kernel OOPS in hfs_brec_get_root().
+ Removed hfs_brec_find_first().
+ Fixed return value of hfs_brec_find().
+
+ * bins_del.c:
+ Updated call to hfs_btree_extend().
+
+ * balloc.c:
+ Merged hfs_bnode_add() and hfs_btree_extend() into the later.
+ Commented init_mapnode().
+
+ * bfind.c:
+ Removed hfs_bfind_first().
+
+ * hfs_fs.h, hfs_btree.h:
+ Updated prototypes.
+
+Sat Mar 30 22:56:47 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * CHANGES, README, TODO:
+ Updated documentation in preparation for 0.6 release.
+
+ * inode.c, hfs_fs.h:
+ Got rid of HFS_FAKE_EXEC in favor of noexec mount option.
+
+ * inode.c, super.c, DOC, hfs_fs_sb.h:
+ Added "quiet" mount option, like the fat filesystem.
+
+ * inode.c, dir_cap.c, dir_nat.c:
+ Pseudo-directories are read-only (at least for now).
+
+ * hfs_fs.h, dir_dbl.c, dir_nat.c, dir.c, dir_cap.c:
+ mkdir() updated to check against reserved names, but the
+ AppleDouble scheme still has problems with names starting with '%'.
+
+ * dir_dbl.c, dir_nat.c, hfs_fs.h, dir.c, dir_cap.c:
+ Added mkdir(). (It only took 2 tries to get it right!!)
+ Only works in "normal" directories and doesn't yet stop
+ one from creating dirs with the reserved names.
+
+ * brec.c, extent.c, bins_del.c:
+ Now have a way to get an EEXIST back from hfs_binsert().
+
+ * btree.c, inode.c, hfs_fs_i.h, file.c, bfind.c, bnode.c, balloc.c:
+ Added 'dev' field to struct hfs_file.
+
+ * hfs_fs_i.h, inode.c, btree.c, extent.c, file.c, bnode.c, brec.c,
+ balloc.c:
+ Removed duplicated fields from struct hfs_file since
+ even B*-trees now have that information in the inode.
+
+ * extent.c:
+ zero_blocks() neglected allocation block size in computing start.
+
+Fri Mar 29 16:04:37 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * super.c:
+ hfs_statfs(): f_files and f_ffree fields are now -1, which is
+ documented as the value for "undefined" fields in struct statfs.
+
+ * trans.c, inode_nat.c, string.c, super.c, inode_dbl.c, inode_cap.c,
+ inode.c, file_nat.c, file_dbl.c, file_cap.c, file.c, dir_dbl.c,
+ extent.c, dir_cap.c, catalog.c, btree.c, brec.c, bnode.c, bitops.c,
+ bitmap.c, bins_del.c, balloc.c:
+ Stylistic editing: {} for all 'for', 'while' and 'if' blocks.
+ I hope I didn't screw-up anything.
+
+ * hfs_fs.h, dir.c, dir_cap.c, dir_dbl.c, dir_nat.c:
+ Added creation of normal files to all three fork schemes!
+ Strange things may happen when trying to create "non-normal" files.
+
+ * brec.c:
+ Cleaned up some debugging code.
+
+ * hfs_fs_i.h:
+ File and directory counts could have overflown 16-bit integer.
+
+ * hfs_btree.h:
+ Added HFS_BREC_RIGHT to help fix insertion problem.
+
+ * extent.c:
+ Various fixes to hfs_{file,btree}_extend().
+
+ * catalog.c:
+ Made hfs_build_cat_key() more "correct".
+
+ * btree.c:
+ Added and fixed debugging code.
+
+ * brec.c:
+ Fixed overflow detection.
+ Added some debugging code.
+
+ * bnode.c:
+ Dirtied some buffers in places that might have been missed.
+ Fixed some debugging code that had broken.
+
+ * bitops.c:
+ hfs_count_free_bits() was running off end of bitmap.
+
+ * bins_del.c:
+ Fixed various bugs, mostly related to variable-length keys.
+
+ * balloc.c:
+ Had forgotten to set a bit in new mapnodes.
+ Node counts were overflowing 16-bit integers.
+
+ * bitmap.c:
+ Oops! clear/set did opposite operation on full words.
+
+Wed Mar 27 10:59:07 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * hfs_fs_i.h:
+ Updated struct hfs_extent for concurrent access.
+ Also caused a slight modification to struct hfs_file.
+
+ * hfs_fs.h, hfs_btree.h:
+ Added/updated prototypes.
+
+ * balloc.c:
+ hfs_bnode_alloc() finished but still untested.
+
+ * bins_del.c:
+ Fixed up deadlock avoidance in hfs_binsert() again.
+ Perhaps I even got it right this time.
+
+ * extent.c:
+ hfs_file_extend() now safe under concurrent operations?
+
+ * file.c:
+ hfs_getblk() now safe under concurrent operations?
+
+Tue Mar 26 23:26:35 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * btree.c:
+ Added call to hfs_extent_trim() to fix memory leak.
+
+ * extent.c:
+ Oops, had left a "#define static" in from debugging.
+
+ * bins_del.c:
+ hfs_binsert() rewritten to avoid deadlock when extending
+ the extents B*-tree.
+
+ * btree.c:
+ Moved hfs_btree_extend() to extent.c
+
+ * inode_nat.c, inode_cap.c, inode_dbl.c:
+ hfs_*_put_inode() rewritten to call hfs_extent_trim().
+
+ * extent.c:
+ Big rewrite for new struct hfs_extent:
+ Now keep linked list of extents.
+ Cache is now a pointer to a list element.
+ Now have 'end' field to aid decode_extent().
+ New functions:
+ hfs_extent_trim(): frees linked list.
+ hfs_btree_extend(): for extending B*-trees.
+ Improved debugging output.
+
+ * balloc.c:
+ Added hfs_bnode_add() (incomplete and uncommented).
+
+ * btree.c:
+ Moved some work from hfs_btree_extend() to hfs_bnode_add().
+
+ * bfind.c:
+ Added hfs_bfind_first() as wrapper for hfs_brec_find_first().
+
+ * brec.c:
+ Added hfs_brec_find_first() to search first leaf node.
+
+ * bins_del.c:
+ Added error returns to hfs_binsert() and binsert().
+
+ * bins_del.c:
+ Check to see that we really need ancestors before starting.
+ Check that hfs_btree_alloc() gave us enough nodes.
+ binsert() uses info precomputed by hfs_binsert().
+
+Mon Mar 25 11:33:53 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * bnode.c:
+ Collected together the error returns in hfs_bnode_lock().
+
+ * Makefile:
+ Added ChangeLog to $(MISC).
+
+Wed Mar 20 19:41:45 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * super.c, hfs_fs.h, file.c, dir_dbl.c, dir_nat.c, dir.c, dir_cap.c:
+ Removed support for kernels older than about 1.3.70
+ Most of that support had been broken recently anyway.
+
+ * super.c:
+ Fixed so DEBUG_MEM works w/o DEBUG_ALL.
+ Updated call to hfs_btree_init().
+
+ * hfs_fs.h:
+ Updated/added prototypes.
+
+ * hfs_btree.h:
+ HFS_BFIND_CHAIN removed.
+ struct hfs_brec gets new 'flags' field with bits:
+ HFS_BREC_{FIRST,OVERFLOW,UNDERFLOW,UNINITIALIZED}
+ Removed bitmap size constants.
+ Changes to struct hfs_btree:
+ 'file' and 'cache' now structs rather than pointers.
+ Added 'reserved' field (used during insertion).
+ Added pointers to size and extent in MDB.
+
+ * file.c:
+ Made hfs_getblk() public.
+ Removed (fil->inode == NULL) special cases.
+
+ * extent.c:
+ {find,update}_ext() are no longer inline.
+ new_extent() fails when called for the extents tree;
+ previously it would hanging calling hfs_binsert().
+ extend_file():
+ renamed to hfs_file_extend() and made public.
+ fixed to work for B*-trees.
+ zeros-out blocks as they are allocated.
+ fixed bugs for (allocation block) != (physical block).
+
+ * btree.c:
+ hfs_btree_{init,free}() modified for changes to struct:
+ 'file' and 'cache' moved back into structure
+ file.inode initialized to reduce special cases
+ hfs_btree_init() gets pointer to size in MDB instead of size.
+ Added hfs_btree_extend() (incomplete and uncommented).
+
+ * bnode.c:
+ hfs_bnode_{alloc,free}() moved to separate file.
+ Removed 'const' from some function arguments
+ due to change in struct hfs_btree.
+ hfs_bnode_lock(): added WRITE/RESRV->READ transition.
+
+ * brec.c:
+ hfs_brec_get_{root,child}() now take a 'keep_mask' argument
+ indicating when to keep ancestor nodes, and store
+ information about why ancestors were kept.
+ HFS_BFIND_CHAIN eliminated in favor of HFS_BFIND_{INSERT,DELETE}
+ which are now implemented using 'keep_mask'.
+ Added hfs_brec_relse_one() that doesn't release ancestors.
+
+ * bins_del.c:
+ Lots of rewrites to cleanup insertion.
+ Now tries to extend tree before insertion starts.
+ binsert() iterative rather than recursive.
+ No point in keeping track as it is still not "stable".
+
+ * balloc.c:
+ New file: started with hfs_bnode_{free,alloc}()
+ Added hfs_bnode_init() to initialize a newly allocated bnode.
+ hfs_bnode_free():
+ Renamed hfs_bnode_bitop().
+ Can set or clear a specified bit.
+ Gets bitmap sizes from nodes directly.
+ hfs_bnode_alloc():
+ Returns actual node, calling hfs_bnode_init().
+ Gets bitmap sizes from nodes directly.
+
+ * bfind.c:
+ Removed obsolete comment from hfs_bsucc()
+ Removed 'const' from tree arg of hfs_bfind()
+ due to changes in struct hfs_btree.
+
+ * Makefile:
+ Added new file: balloc.c
+
+Sat Mar 9 22:03:53 1996 Paul H. Hargrove <hargrove@sccm.stanford.edu>
+
+ * Start of detailed CVS logging.
+
+Mar 09, 1996: snapshot-09Mar96 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ NOT AN OFFICIAL RELEASE
+ Fixed up debugging code that was broken by split of btree.c
+ Added debugging kmalloc/kfree
+ Fixed memory leak in hfs_bnode_relse()
+
+Mar 08, 1996: snapshot-08Mar96 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ NOT AN OFFICIAL RELEASE
+ now reset blocksize on device when done.
+ hfs_binsert done (except for the full tree case).
+ btree.c split up into manageable pieces (need to sort out hfs_btree.h)
+
+Feb 26, 1996: snapshot-26Feb96 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ NOT AN OFFICIAL RELEASE
+ Some writability.
+ Bug with multiple opens of meta data fixed.
+ Netatalk support no longer considered experimental.
+
+Virtually everything has changed, so I've lost track here.
+
+Nov 16, 1995: snapshot-16Nov95 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ NOT AN OFFICIAL RELEASE
+ Still more comments.
+ btree.c back to 80 columns. will do same to other files soon.
+ Starting with btree.c have begun to put file contents into some
+ sort of standard order.
+ Moved metadata reading to VFS open() routine and now free it in
+ the VFS release() routine. Much cleaner than the old way.
+ Unified hfs_iget by shifting scheme-dependent code into a function
+ pointer in the superblock. This could/should be shifted to
+ a VFS read_inode() routine if that can be done cleanly.
+ Probably lots of other changes; I've lost track.
+
+Nov 05, 1995: version 0.5.3 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ NOT AN OFFICIAL RELEASE
+ 1.2.x compatibility removed
+ Added lots of comments to btree.c and cleanup some code. The result
+ is that the source file doubled in size while the object
+ file dropped in size by 20%.
+ Added some comments to super.c and dir.c as well.
+ Cleaned up some stuff in dir.c adding some additional error checking
+ and moving closer to using a unified hfs_iget by migrating
+ common code into lookup_parent().
+ Changed btree.c to use a separate bnode cache per filesystem.
+ Renamed a bunch of the bnode functions in btree.c
+
+Jun 29, 1995: version 0.5.2 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ BUG FIX and 1.3.x-compatibility release.
+ Will compile under 1.2.x or 1.3.x by changing one line in Makefile.
+ Started adding magic numbers to structures for "safety".
+ Don't strip internal symbols when linking or loading, as this made
+ good bug reports rather difficult.
+ Fixed a bug that could cause the fs to lock-up after trying to open
+ a non-existent file.
+ Fixed a bug that allowed files to appear truncated, when in fact it
+ is still not possible to truncate a file.
+ Added more/better comments to header files.
+ Deal with volume and b-tree bitmaps in preparation for writing.
+ Fixed readdir() to deal properly with the case where the directory
+ changes while writing to user-space. (which can't yet
+ actually happen, until directories are writable).
+
+Jun 23, 1995: version 0.5.1 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ BUG FIX RELEASE
+ Removed two debugging messages that didn't belong.
+ Fixed a typo that prevented modified inodes from being written to disk.
+ Added a missing line which prevented rmmod'ing sometimes.
+ Added a missing line which caused errors when modifying .finderinfo or
+ .resource under the CAP system.
+ Added a notify_change() to keep mode bits sensible, and to cause
+ changes to an inode to affect the data fork and resource fork
+ of a file together.
+
+Jun 22, 1995: version 0.5 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ Fixed a bug that was giving wrong values for i_blocks
+ Partly writable (can only 'touch' existing files, so far)
+ Removed case= mount option. It will be back eventually.
+ Can now deal with CDROMs (and hard disks?), many thanks to
+ Holger Schemel for this work.
+ Latin-1 filename conversion also due to Holger Schemel.
+ Rewritten btree operations.
+
+Feb 28, 1995: version 0.4 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ Requires Linux >= 1.1.94: depends on changes made to asm/byteorder.h
+ Now using string comparison code donated by ARDI (see string.c)
+ Code reorganized to use data structures more like ARDI's.
+ More code reorganization to abstract the btree operations.
+ Added the fork= mount option.
+ Added AppleDouble support. Executor, from ARDI, can now run programs
+ from HFS filesystems mounted w/ the HFS module.
+
+Jan 28, 1995: version 0.3 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ Major code reorganization.
+ Known for certain to work ONLY on floppies.
+ Started caching extents, so got faster on long file reads.
+ Now compiles separate from kernel tree.
+ Supports 5 filename conversion methods.
+ Supports forks, using the method from CAP.
+ All external symbols now start with HFS_ or hfs_
+
+Jan 12, 1995: version 0.2 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ Should now work on all HFS volumes, but still only tested on floppies.
+ Got smaller and faster with some code reorganization.
+ Since Linus moved htons() and friends to an asm file, should now be
+ truly endian-independent, but still only tested on Intel machines.
+ Requires Linux >= 1.1.77, since Linus moved htons().
+
+Jan 05, 1995: version 0.1 hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ First release.
+ 1.44Mb floppies only
+ no resource forks
+ trivial name mangling only
+ read only
+ for Linux >= 1.1.75
diff --git a/fs/hfs/FAQ.txt b/fs/hfs/FAQ.txt
new file mode 100644
index 000000000..1d2a7caaf
--- /dev/null
+++ b/fs/hfs/FAQ.txt
@@ -0,0 +1,342 @@
+ Frequently Asked Questions about the HFS filesystem for
+ Linux
+ Paul H. Hargrove, hargrove@sccm.Stanford.EDU
+ version 1.0.3, 27 Apr 1997
+
+ This document provides answers to some of the most frequently asked
+ questions about the HFS filesystem for Linux. It is currently pretty
+ rough and totally unorganized. Corrections, additions and clarifica-
+ tions are appreciated. The most current version of this document is
+ kept on The HFS for Linux Page <http://www-sccm.Stanford.EDU/~har-
+ grove/HFS/>.
+ ______________________________________________________________________
+
+ Table of Contents:
+
+ 1. What is this FAQ about?
+
+ 2. What is HFS?
+
+ 3. How I mount AppleShare volumes?
+
+ 4. What is the current version of the HFS filesystem.
+
+ 5. How stable is the current version?
+
+ 6. Is there a mailing list for discussion of the HFS filesystem?
+
+ 7. What version of Linux do I need to be running?
+
+ 8. Will it run on my (your processor type here)?
+
+ 9. Will it run under (your non-Linux operating system here)?
+
+ 10. Why can I mount some HFS CDROMs but not others?
+
+ 11. What does ``only 1024-char blocks implemented (512)'' mean?
+
+ 12. Why do I get a message about a bad or unknown partition table?
+
+ 13. Can I mount multiple HFS partitions from the same Macintosh
+ disk?
+
+ 14. In what ways can I write to HFS filesystems?
+
+ 15. Does the HFS filesystem work with 400k or 800k Macintosh
+ diskettes?
+
+ 16. How can I format an HFS filesystem?
+
+ 17. How can I fsck an HFS filesystem?
+
+ 18. Why do I get ``error -50'' messages from my Mac when using
+ netatalk?
+
+ 19. Why does my Macintosh show generic application and document
+ icons?
+
+ 20. How owns all the copyrights and trademarks? ;-)
+
+ 20.1. This Document
+
+ 20.2. The Software
+
+ 20.3. Trademarks
+ ______________________________________________________________________
+
+ 11.. WWhhaatt iiss tthhiiss FFAAQQ aabboouutt??
+
+ This FAQ is about the HFS filesystem for Linux, which is available in
+ two forms. The stand-alone version (called hfs_fs) is a Linux kernel
+ loadable module implementing the Macintosh HFS filesystem. The HFS
+ filesystem is also included in some distributions of the Linux kernel
+ source (in the directory linux/fs/hfs). This version can be compiled
+ as a loadable module or compiled into the kernel.
+
+ Either version allows a machine running Linux to read and write disks
+ from a Macintosh (almost) as though they were native Linux disks.
+
+ 22.. WWhhaatt iiss HHFFSS??
+
+ HFS stands for ``Hierarchical File System'' and is the filesystem used
+ by the Mac Plus and all later Macintosh models. Earlier Macintosh
+ models used MFS (``Macintosh File System''), which is not supported.
+
+ 33.. HHooww II mmoouunntt AApppplleeSShhaarree vvoolluummeess??
+
+ The HFS filesystem is for mounting local filesystems only. There is
+ an experimental afpfs by Ben Hekster heksterb@acm.org available from
+ http://www.odyssey.co.il/~heksterb/Software/afpfs/.
+
+ 44.. WWhhaatt iiss tthhee ccuurrrreenntt vveerrssiioonn ooff tthhee HHFFSS ffiilleessyysstteemm..
+
+ As of version 1.0.3 of this FAQ, version 0.95 is the most recent. You
+ can always find the most recent version on The HFS for Linux Page
+ <http://www-sccm.Stanford.EDU/~hargrove/HFS/>. Announcements of new
+ versions are made to the comp.os.linux.announce newsgroup.
+
+ 55.. HHooww ssttaabbllee iiss tthhee ccuurrrreenntt vveerrssiioonn??
+
+ Version 0.95 is considered to be ``beta'' software, so I recommend
+ making backups of anything important before you start playing. It is
+ relatively free of bugs due to lots of testing of the previous
+ releases.
+
+ After a suitable period without new bugs the I will consider the
+ software to be ``stable'' and the version number will jump to 1.0.
+
+ 66.. IIss tthheerree aa mmaaiilliinngg lliisstt ffoorr ddiissccuussssiioonn ooff tthhee HHFFSS ffiilleessyysstteemm??
+
+ There is no mailing list devoted exclusively to the HFS filesystem.
+ However, announcements of new versions are posted to the ``linux-
+ atalk'' and ``hfs-interest'' lists. I will see bug reports sent to
+ those lists but e-mail is more reliable (hargrove@sccm.Stanford.EDU).
+
+ To subscribe to hfs-interest send e-mail with a body of ``subscribe
+ hfs-interest (your e-mail address)'' to majordomo@ccs.neu.edu.
+
+ To subscribe to linux-atalk send e-mail with a body of ``SUBSCRIBE
+ LINUX-ATALK (Your full name)'' to listserv@netspace.org.
+
+ 77.. WWhhaatt vveerrssiioonn ooff LLiinnuuxx ddoo II nneeeedd ttoo bbee rruunnnniinngg??
+
+ To compile and use the stand-alone distribution of the HFS filesystem
+ you will need Linux kernel version 2.0.1 or newer compiled with
+ modules enabled (CONFIG_MODULES). To compile you will need the kernel
+ headers which match the kernel you are running. This is covered in
+ more detail in the installation instructions in INSTALL.txt.
+
+ If your kernel came with HFS in the kernel source tree then HFS should
+ work with your Linux version. There may be small problems with a few
+ of the development kernel releases. For these releases check the HFS
+ for Linux Page <http://www-sccm.Stanford.EDU/~hargrove/HFS/> for
+ patches.
+
+ 88.. WWiillll iitt rruunn oonn mmyy ((yyoouurr pprroocceessssoorr ttyyppee hheerree))??
+
+ The code is carefully written to be independent of your processor's
+ word size and byte-order, so if your machine runs Linux it can run the
+ HFS filesystem. However some younger ports don't yet have support for
+ loadable modules.
+
+ Note that HFS is tested most extensively on Intel platforms. So there
+ could be subtle compilation problems on other platforms. If you
+ encounter any that are not addressed by the documentation then please
+ let me know.
+
+ 99.. WWiillll iitt rruunn uunnddeerr ((yyoouurr nnoonn--LLiinnuuxx ooppeerraattiinngg ssyysstteemm hheerree))??
+
+ No. There is a port in progress to NetBSD. I know of no other active
+ porting attempts. If you are interested in porting the HFS filesystem
+ to another Unix-like operating system, I am interested in providing
+ what guidance I can.
+
+ 1100.. WWhhyy ccaann II mmoouunntt ssoommee HHFFSS CCDDRROOMMss bbuutt nnoott ootthheerrss??
+
+ In the past there was a known incompatibility with some ``hybrid''
+ CDROMs that appear as HFS disks on Macs and as ISO9660 disks on other
+ systems. I think I have fixed the problem. So, if you encounter this
+ particular problem or have problems with specific non-hybrid CDROMs
+ please e-mail me with the title and manufacturer of the CD.
+
+ 1111.. WWhhaatt ddooeess ````oonnllyy 11002244--cchhaarr bblloocckkss iimmpplleemmeenntteedd ((551122))'''' mmeeaann??
+
+ This message comes from the kernel and indicates that an attempt was
+ made to read a 512-byte block from a device that doesn't support
+ 512-byte blocks. The HFS filesystem only works with 512-byte blocks,
+ and therefore doesn't function with these devices. Eventually it may
+ be able to use 1024-byte (or even 2048-byte) blocks when necessary.
+ Ideally the device driver should be enhanced to support 512-byte
+ blocks so that the various filesystems which need 512-byte blocks
+ don't each need to work around it.
+
+ 1122.. WWhhyy ddoo II ggeett aa mmeessssaaggee aabboouutt aa bbaadd oorr uunnkknnoowwnn ppaarrttiittiioonn ttaabbllee??
+
+ If your Linux kernel doesn't understand Macintosh partition tables it
+ gives this warning when it can't find a partition table it recognizes.
+ To support partitioned media with such kernels, decoding of Mac
+ partition tables is done by the HFS filesystem so you should still be
+ able to mount the disk. However, to do so you will need to mount the
+ raw device (such as /dev/sdb instead of /dev/sdb4) and use the part
+ mount option to indicate which partition you want.
+
+ 1133.. CCaann II mmoouunntt mmuullttiippllee HHFFSS ppaarrttiittiioonnss ffrroomm tthhee ssaammee MMaacciinnttoosshh ddiisskk??
+
+ Only if your kernel understands Macintosh partition tables. It the
+ kernel doesn't understand the Macintosh partition table, the HFS
+ filesystem must access the raw device. Therefore, the kernel thinks
+ the entire drive is in use and prevents additional mounts on it.
+
+ 1144.. IInn wwhhaatt wwaayyss ccaann II wwrriittee ttoo HHFFSS ffiilleessyysstteemmss??
+
+ The HFS filesystem is as capable as the MS-DOS or VFAT filesystems,
+ except that certain things can only be done with a file's data fork.
+
+ You ccaann:
+
+ +o Create, delete and rename directories and data forks of files with
+ the caveat that names are case insensitive (so foo and Foo are the
+ same file or directory).
+
+ +o Run Linux executables or shared libraries on an HFS disk if they
+ are stored in the data fork of a file.
+
+ +o Read, write and truncate both forks of files and the Finder's
+ metadata of files and directories.
+
+ +o Mmap data forks of files (and the resource fork if the filesystem
+ is mounted with the fork=cap option).
+
+ +o Toggle the 'w' permission bits (as a group) of data forks.
+
+ +o Change the i_mtime of files and directories.
+
+ You ccaannnnoott:
+
+ +o Create, delete or rename resource forks of files or the Finder's
+ metadata. Note, however, that they are created (with defaults
+ values), deleted and renamed along with the corresponding data fork
+ or directory.
+
+ +o Run Linux executables or shared libraries on an HFS disk if they
+ are stored in the resource fork of a file.
+
+ +o Mmap the Finder's metadata (when fork=cap) or AppleDouble header
+ files (when fork=double or fork=netatalk).
+
+ +o Change permissions on directories.
+
+ +o Change the uid or gid of files or directories.
+
+ +o Set the set-uid, set-gid or sticky permission bits.
+
+ +o Create multiple links to files.
+
+ +o Create symlinks, device files, sockets or FIFOs.
+
+ 1155.. DDooeess tthhee HHFFSS ffiilleessyysstteemm wwoorrkk wwiitthh 440000kk oorr 880000kk MMaacciinnttoosshh
+ ddiisskkeetttteess??
+
+ Yes and no. The software is fully capable of dealing with HFS disks
+ of any size. However, the 400k and 800k diskettes are written in a
+ physical format that is incompatible with most non-Macintosh floppy
+ drives. Note also that almost all 400k Macintosh diskettes are MFS,
+ not HFS.
+
+ 1166.. HHooww ccaann II ffoorrmmaatt aann HHFFSS ffiilleessyysstteemm??
+
+ Robert Leslie (rob@mars.org) has written a package for working with
+ HFS filesystems (like mtools plus a graphical interface). One program
+ in the package is hformat which can format HFS filesystems. The
+ latest version can be found on the HFS Utilities home page
+ <http://www.mars.org/home/rob/proj/hfs/>.
+
+ 1177.. HHooww ccaann II ffsscckk aann HHFFSS ffiilleessyysstteemm??
+
+ Right now you'll have to use a Macintosh to do this. However, Rob
+ Leslie is working on an fsck for HFS filesystems.
+
+ 1188.. WWhhyy ddoo II ggeett ````eerrrroorr --5500'''' mmeessssaaggeess ffrroomm mmyy MMaacc wwhheenn uussiinngg
+ nneettaattaallkk??
+
+ To be compatible with netatalk's afpd you will need to use netatalk
+ version 1.4b1 or newer and mount the HFS filesystem with the ``afpd''
+ mount option. More information is provided in the ``afpd'' subsection
+ of the ``Mount Options'' section of the HFS documentation (HFS.txt if
+ you have the stand-alone HFS distribution or
+ linux/Documentation/filesystems/hfs.txt if HFS is in your kernel
+ source tree.)
+
+ 1199.. WWhhyy ddooeess mmyy MMaacciinnttoosshh sshhooww ggeenneerriicc aapppplliiccaattiioonn aanndd ddooccuummeenntt
+ iiccoonnss??
+
+ When using the ``afpd'' mount option the Desktop database on the disk
+ is not made available to Netatalk's afpd. Because of this mounting an
+ HFS filesystem across the network to a Macintosh may result in the
+ Finder showing generic application and document icons. Additionally
+ double clicking on a document will fail to start the correct
+ application.
+
+ If the disk is writable you can make Netatalk build a new Desktop
+ database in its own format by holding down the Option key while
+ selecting the volume in the Chooser. If the disk is not writable then
+ these problems can be worked around by copying the application to a
+ local disk on the Macintosh.
+
+ 2200.. HHooww oowwnnss aallll tthhee ccooppyyrriigghhttss aanndd ttrraaddeemmaarrkkss?? ;;--))
+
+ 2200..11.. TThhiiss DDooccuummeenntt
+
+ This document is Copyright (c) 1996, 1997 by Paul H. Hargrove.
+
+ Permission is granted to make and distribute verbatim copies of this
+ document provided the copyright notice and this permission notice are
+ preserved on all copies.
+
+ Permission is granted to copy and distribute modified versions of this
+ document under the conditions for verbatim copies above, provided a
+ notice clearly stating that the document is a modified version is also
+ included in the modified document.
+
+ Permission is granted to copy and distribute translations of this
+ document into another language, under the conditions specified above
+ for modified versions.
+
+ Permission is granted to convert this document into another media
+ under the conditions specified above for modified versions provided
+ the requirement to acknowledge the source document is fulfilled by
+ inclusion of an obvious reference to the source document in the new
+ media. Where there is any doubt as to what defines ``obvious'' the
+ copyright owner reserves the right to decide.
+
+ 2200..22.. TThhee SSooffttwwaarree
+
+ The HFS filesystem software is Copyright (c) 1994-1997 by Paul H.
+ Hargrove.
+
+ The software is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ The software is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the software in the file ``COPYING''; if not, write to the
+ Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
+ USA.
+
+ 2200..33.. TTrraaddeemmaarrkkss
+
+ +o ``Finder'' is a trademark of Apple Computer, Inc.
+
+ +o ``Apple'', ``AppleShare'', and ``Macintosh'' are registered
+ trademarks of Apple Computer, Inc.
+
+ +o ``MS-DOS'' is a registered trademarks of Microsoft Corporation.
+
+ +o All other trademarks are the property of their respective owners.
+
diff --git a/fs/hfs/HFS.txt b/fs/hfs/HFS.txt
new file mode 100644
index 000000000..407d04174
--- /dev/null
+++ b/fs/hfs/HFS.txt
@@ -0,0 +1,1042 @@
+ Macintosh HFS Filesystem for Linux
+ Paul H. Hargrove, hargrove@sccm.Stanford.EDU
+ version 0.95, 28 Apr 1997
+
+ This document describes version 0.95 of the Macintosh HFS filesystem
+ for Linux. The most current versions of this document and the
+ software are kept at The HFS for Linux Page
+ <http://www-sccm.Stanford.EDU/~hargrove/HFS/>.
+ ______________________________________________________________________
+
+ Table of Contents:
+
+ 1. Introduction
+
+ 2. Mounting HFS Filesystems
+
+ 2.1. afpd
+
+ 2.2. case={asis, lower}
+
+ 2.3. conv={auto, binary, text}
+
+ 2.4. creator=cccc
+
+ 2.5. fork={cap, double, netatalk}
+
+ 2.6. gid=n
+
+ 2.7. names={7bit, 8bit, alpha, cap, latin, netatalk, trivial}
+
+ 2.8. part=n
+
+ 2.9. quiet
+
+ 2.10. type=cccc
+
+ 2.11. uid=n
+
+ 2.12. umask=n
+
+ 3. Writing to HFS Filesystems
+
+ 3.1. Writing with fork=cap
+
+ 3.2. Writing with fork=double
+
+ 3.3. Writing with fork=netatalk
+
+ 4. A Guide to Special File Formats
+
+ 4.1. CAP .finderinfo Files
+
+ 4.2. AppleDouble Header Files
+
+ 5. Reporting Bugs
+
+ 5.1. What Goes in a Bug Report
+
+ 5.2. How to Report a Kernel Oops or GPF
+
+ 6. Legal Notices
+
+ 6.1. This Document
+
+ 6.2. The Software
+
+ 6.2.1. The Columbia AppleTalk Package for UNIX
+
+ 6.2.2. Netatalk
+
+ 6.3. Trademarks
+ ______________________________________________________________________
+
+ 11.. IInnttrroodduuccttiioonn
+
+ This software implements the Macintosh HFS filesystem under Linux. It
+ allows you to read and write HFS filesystems on floppy disks, CDROMs,
+ hard drives, ZIP drives, etc. It is _n_o_t an AppleShare client.
+
+ If you use this software, please send me a note telling of your
+ success or failure with it. Your feedback lets me know that this
+ project is not a waste of my time.
+
+ This code is still experimental, so backup anything important before
+ you start playing. I'd like you to know that I've never lost any
+ files while using this software, or I would not release it. However,
+ a ``better safe than sorry'' attitude is probably best.
+
+ If, for instance, the buffer cache were to become corrupted you could
+ start losing things on other disks. Because of this, if you get a
+ General Protection Fault, or a kernel Oops, I _s_t_r_o_n_g_l_y recommend that
+ you reboot before writing any files.
+
+ 22.. MMoouunnttiinngg HHFFSS FFiilleessyysstteemmss
+
+ Once you have the HFS filesystem compiled into the kernel or installed
+ as a loadable module, you will be able to use hfs as a filesystem type
+ option to mount. For instance, to mount a Macintosh floppy disk on
+ the directory /mnt using the default mount options you would execute
+ ``mount -t hfs /dev/fd0 /mnt''.
+
+ The remainder of this section describes the several mount options
+ available to control how the HFS filesystem is mapped onto a Linux
+ filesystem structure. The values for the multiple-choice options
+ (case, conv, fork and names) can be abbreviated by their first
+ character.
+
+ 22..11.. aaffppdd
+
+ If included in the options, then the behavior of the filesystem is
+ changed to make it fully read-write compatible with Netatalk's afpd.
+ In this mode you should not use normal user-level tools to modify the
+ filesystem, though reading from it is acceptable. This is because the
+ return codes from some system calls are changed to fool afpd. These
+ changes will confuse many user-level tools. In particular ``rm -r''
+ will loop forever.
+
+ This option implies fork=netatalk, which in turn implies
+ names=netatalk. If either of these options are explicitly set to
+ something else they will take precedence and will confuse afpd. The
+ quiet option has no effect. The case= option functions normally, but
+ afpd usually does the same thing for you. The conv= and part= options
+ also function normally.
+
+ You will probably want to use the uid=, gid= and umask= mount options.
+ Note that because all the files on an HFS filesystem belong to a
+ single user and group and have a single umask, the full AppleShare
+ permission scheme will not work through Netatalk.
+
+ One additional limitation is that the Desktop database on the disk is
+ stored in afpd's format and is separate from any existing database
+ maintained by the Finder when the volume is used on a Macintosh.
+ Because of this mounting an HFS CDROM across the network to a
+ Macintosh may result in applications and documents showing up with
+ default application and document icons. Additionally double clicking
+ on a document will fail to start the correct application. Both of
+ these problems can be worked around by copying the application to a
+ local disk on the Macintosh.
+
+ This mode is known to be compatible with afpd from Netatalk versions
+ 1.4b1 and 1.4b2, and known to be incompatible with the afpd from
+ version 1.3.3. As of this writing Netatalk version 1.4 has not yet
+ been released. However, it is expected that this mode will be
+ compatible with afpd from Netatalk version 1.4 when it is released.
+
+ 22..22.. ccaassee=={{aassiiss,, lloowweerr}}
+
+ default value: asis
+
+ This option determines if Macintosh filenames are presented in their
+ original case or in all lowercase. Filename lookup is always case
+ insensitive, so either way foo and Foo refer to the same file but ls
+ will list Foo with case=asis, and foo with case=lower. (Same as for
+ the HPFS filesystem.)
+
+ aassiiss
+ Filenames are reported in the case they were created with.
+
+ lloowweerr
+ Filenames are reported in lowercase.
+
+ 22..33.. ccoonnvv=={{aauuttoo,, bbiinnaarryy,, tteexxtt}}
+
+ default value: binary
+
+ This option controls CR<->NL conversion of Macintosh _d_a_t_a _f_o_r_k_s. Any
+ translation takes place only for files accessed with the read() and
+ write() system calls (either directly or through the stdio functions).
+ Access through mmap() is unaffected. (Similar to the conv= option for
+ the MS-DOS filesystem.)
+
+ aauuttoo
+ If the Finder's type for a file is TEXT or ttro, then CR
+ characters are converted to NL characters when read, and NL
+ characters are converted to CR characters when written.
+
+ Be warned that some Macintosh applications create files with
+ type TEXT even though the contents is clearly binary.
+
+ bbiinnaarryy
+ No CR<->NL conversion is done.
+
+ tteexxtt
+ In all data forks, regardless of the Finder's type for the file,
+ CR characters are converted to NL characters when read, and NL
+ characters are converted to CR characters when written.
+
+ 22..44.. ccrreeaattoorr==cccccccc
+
+ default value: ``????''
+
+ Specifies the 4-character string specifying the Finder's Creator for
+ new files.
+
+ 22..55.. ffoorrkk=={{ccaapp,, ddoouubbllee,, nneettaattaallkk}}
+
+ default value: cap
+
+ This option determines how resource forks and the Finder's metadata
+ are represented within the structure of the Linux filesystem.
+
+ ccaapp
+ The scheme used by the Columbia AppleTalk Package's AUFS.
+
+ Associated with each directory are two special directories and a
+ metadata file. The directory ./bar is represented by:
+
+ ..//bbaarr
+ The directory itself, containing subdirectories, the data
+ forks of files, and the following two special directories.
+
+ ..//bbaarr//..rreessoouurrccee
+ A special directory holding resource forks of the files in
+ ./bar.
+
+ ..//bbaarr//..ffiinnddeerriinnffoo
+ A special directory holding metadata files for the files and
+ subdirectories in ./bar.
+
+ ..//..ffiinnddeerriinnffoo//bbaarr
+ The metadata file for the directory ./bar.
+
+ The files in a directory are represented as three files:
+
+ ..//ffoooo
+ The data fork of the file ./foo.
+
+ ..//..rreessoouurrccee//ffoooo
+ The resource fork of the file ./foo.
+
+ ..//..ffiinnddeerriinnffoo//ffoooo
+ The metadata file for the file ./foo.
+
+ Additionally, the file .rootinfo in the root directory of the
+ HFS filesystem is a metadata file for the root directory.
+
+ Brief documentation on the format of file containing the
+ Finder's metadata is included in the section ``A Guide to
+ Special File Formats'' in this document. More detailed
+ information is available in the Columbia AppleTalk Package.
+
+ ddoouubbllee
+ The ``AppleDouble'' format recommended by Apple. (Apple's other
+ recommended format, ``AppleSingle'', is not yet implemented.)
+
+ Associated with each directory is an AppleDouble ``header
+ file''. The directory ./bar is represented by:
+
+ ..//bbaarr
+ The directory itself, containing subdirectories, the data
+ forks for files, and the header files for files and
+ subdirectories.
+
+ ..//%%bbaarr
+ The header file for the directory ./bar, containing the
+ Finder's metadata for the directory.
+
+ The files in a directory are represented as two files:
+
+ ..//ffoooo
+ The data fork of the file ./foo.
+
+ ..//%%ffoooo
+ The header file for the file ./foo, containing the resource
+ fork and the Finder's metadata for the file.
+
+ Additionally, the file %RootInfo in the root directory of the
+ HFS filesystem is a header file for the root directory. This is
+ not quite the %RootInfo file referred to in the AppleDouble
+ specification.
+
+ The header files used in this scheme are version 2 AppleDouble
+ header files. Their format is described briefly in the section
+ ``A Guide to Special File Formats'' in this document. They are
+ documented in detail in ``AppleSingle/AppleDouble Formats:
+ Developer's Note (9/94)'', available from from Apple's Developer
+ Services Page <http://devworld.apple.com>.
+
+ Note that the naming convention for the header file can cause
+ name conflicts. For instance, using Apple's 7-bit ASCII name
+ conversion (see the names mount option) the name %Desktop could
+ be interpreted either as the header file for the file Desktop or
+ as the file with 0xDE as the hexadecimal representation of its
+ first character, and "sktop" as the remaining 5 characters. The
+ problem arises when both files exist, since only one will be
+ accessible. The behavior of the HFS filesystem in the case of
+ such a conflict is undefined, and may change in future releases.
+ (If this causes problems for you, please don't report it as a
+ bug; I didn't design this ``standard'', Apple did.)
+
+ nneettaattaallkk
+ The scheme used by the Netatalk afpd.
+
+ Associated with each directory is a special directory and a
+ metadata file. The directory ./bar is represented by:
+
+ ..//bbaarr
+ The directory itself, containing subdirectories, the data
+ forks of files, and the following special directory.
+
+ ..//bbaarr//..AApppplleeDDoouubbllee
+ A special directory holding AppleDouble header files for
+ ./bar and the files it contains, but not for the
+ subdirectories it contains.
+
+ ..//bbaarr//..AApppplleeDDoouubbllee//..PPaarreenntt
+ The header file for the directory ./bar, containing the
+ Finder's metadata for the directory.
+
+ The files in a directory are represented as two files:
+
+ ..//ffoooo
+ The data fork of the file ./foo.
+
+ ..//..AApppplleeDDoouubbllee//ffoooo
+ The header file for file ./foo, containing the resource fork
+ and the Finder's metadata.
+
+ The header files used in this scheme are version 1 AppleDouble
+ header files. They are described briefly in the section ``A
+ Guide to Special File Formats'' in this document. The format is
+ documented in detail in the ``Apple II File Type Notes'' under
+ the type ``$E0.0002/$E0.0003-AppleDouble'', and in Appendix B of
+ the ``A/UX Toolbox: Macintosh ROM Interface'' manual.
+
+ 22..66.. ggiidd==nn
+
+ default value: gid of the mounting process
+
+ Specifies the group that owns all files and directories on the
+ filesystem. (Same as for the MS-DOS and HPFS filesystems.)
+
+ 22..77.. nnaammeess=={{77bbiitt,, 88bbiitt,, aallpphhaa,, ccaapp,, llaattiinn,, nneettaattaallkk,, ttrriivviiaall}}
+
+ default value: varies as follows
+
+ +o If the fork option is set to double, then names defaults to alpha.
+
+ +o If the fork option is set to netatalk, then names defaults to
+ netatalk.
+
+ +o If the fork option is set to cap (or has taken that value by
+ default), then names defaults to cap.
+
+ This option determines how to convert between valid Macintosh
+ filenames and valid Linux filenames. The 7bit, 8bit and alpha options
+ correspond to Apple's recommended conventions named ``7-bit ASCII'',
+ ``8-bit'' and ``7-bit alphanumeric''.
+
+ 77bbiitt
+ When converting from Macintosh filenames to Linux filenames the
+ NULL (0x00), slash (/) and percent (%) characters and the
+ extended 8-bit characters (hexadecimal codes 0x80-0xff) are
+ replaced by a percent character (%) followed by the two-digit
+ hexadecimal code for the character.
+
+ When converting from Linux filenames to Macintosh filenames the
+ string "%YZ" is replaced by the character with hexadecimal code
+ 0xYZ. If 0xYZ is not a valid hexadecimal number or is the code
+ for NULL or colon (:) then the string "%YZ" is unchanged. A
+ colon (:) is replaced by a pipe character (|).
+
+ 88bbiitt
+ When converting from Macintosh filenames to Linux filenames the
+ NULL (0x00), slash (/) and percent (%) characters are replaced
+ by a percent character (%) followed by the two-digit hexadecimal
+ code for the character.
+
+ When converting from Linux filenames to Macintosh filenames the
+ string "%YZ" is replaced by the character with hexadecimal code
+ 0xYZ. If 0xYZ is not a valid hexadecimal number or is the code
+ for NULL or colon (:) then the string "%YZ" is unchanged. A
+ colon (:) is replaced by a pipe character (|).
+
+ aallpphhaa
+ When converting from Macintosh filenames to Linux filenames only
+ the alphanumeric characters (a-z, A-Z and 0-9), the underscore
+ (_) and the last period (.) in the filename are unchanged. The
+ remaining characters are replaced by a percent character (%)
+ followed by the two-digit hexadecimal code for the character.
+
+ When converting from Linux filenames to Macintosh filenames the
+ string "%YZ" is replaced by the character with hexadecimal code
+ 0xYZ. If 0xYZ is not a valid hexadecimal number or is the code
+ for NULL or colon (:) then the string "%YZ" is unchanged. A
+ colon (:) is replaced by a pipe character (|).
+
+ ccaapp
+ The convention used by the Columbia AppleTalk Package's AUFS.
+
+ When converting from Macintosh filenames to Linux filenames the
+ characters from space ( ) through tilde (~) (ASCII 32-126) are
+ unchanged, with the exception of slash (/). The slash (/) and
+ all characters outside the range 32-126 are replaced by a colon
+ (:) followed by the two-digit hexadecimal code for the
+ character.
+
+ When converting from Linux filenames to Macintosh filenames the
+ string ":YZ" is replaced by the character with hexadecimal code
+ 0xYZ. If 0xYZ is not a valid hexadecimal number or is the code
+ for NULL or colon (:) then the colon is replaced by a pipe
+ character (|).
+
+ llaattiinn
+ When converting from Macintosh filenames to Linux filenames the
+ characters from space ( ) through tilde (~) (ASCII 32-126) are
+ unchanged, with the exception of slash (/) and percent (%). The
+ extended 8-bit Macintosh characters with equivalents in the
+ Latin-1 character set are replaced by those equivalents. The
+ remaining characters are replaced by a percent character (%)
+ followed by the two-digit hexadecimal code for the character.
+
+ When converting from Linux filenames to Macintosh filenames the
+ string "%YZ" is replaced by the character with hexadecimal code
+ 0xYZ. If 0xYZ is not a valid hexadecimal number or is the code
+ for NULL or colon (:) then the string "%YZ" is unchanged. The
+ Latin-1 characters with equivalents in the extended 8-bit
+ Macintosh character set are replaced by those equivalents. A
+ colon (:) is replaced by a pipe character (|).
+
+ Thanks to Holger Schemel (aeglos@valinor.owl.de) for
+ contributing this conversion mode.
+
+ nneettaattaallkk
+ The convention used by the Netatalk afpd.
+
+ When converting from Macintosh filenames to Linux filenames the
+ characters from space ( ) through tilde (~) (ASCII 32-126) are
+ unchanged, with the exception of slash (/) and any initial
+ period (.). The slash (/) and any initial period (.) and all
+ characters outside the range 32-126 are replaced by a colon (:)
+ followed by the two-digit hexadecimal code for the character.
+
+ When converting from Linux filenames to Macintosh filenames the
+ string ":YZ" is replaced by the character with hexadecimal code
+ 0xYZ. If 0xYZ is not a valid hexadecimal number or is the code
+ for NULL or colon (:) then the colon is replaced by a pipe
+ character (|).
+
+ ttrriivviiaall
+ When converting from Macintosh filenames to Linux filenames a
+ slash character (/) is replaced by a colon (:).
+
+ When converting from Linux filenames to Macintosh filenames a
+ colon (:) is replaced by a slash character (/).
+
+ 22..88.. ppaarrtt==nn
+
+ default value: 0
+
+ Specifies which HFS partition to mount from a Macintosh CDROM or hard
+ drive. Partitions are numbered from 0 and count only those identified
+ in the partition table as containing HFS filesystems. This option is
+ only useful when the Linux platform doesn't fully support Macintosh
+ partition tables. In particular on MkLinux and Linux-Pmac this option
+ is useless.
+
+ Note that in versions before 0.8.3 partitions were numbered from 1.
+
+ 22..99.. qquuiieett
+
+ If included in the options, then chown and chmod operations will not
+ return errors, but will instead fail silently. (Same as for the MS-
+ DOS and HPFS filesystems.)
+
+ 22..1100.. ttyyppee==cccccccc
+
+ default value: ``????''
+
+ Specifies the 4-character string specifying the Finder's Type for new
+ files.
+
+ 22..1111.. uuiidd==nn
+
+ default value: uid of the mounting process
+
+ Specifies the user that owns all files and directories on the
+ filesystem. (Same as for the MS-DOS and HPFS filesystems.)
+
+ 22..1122.. uummaasskk==nn
+
+ default value: umask of the mounting process
+
+ Specifies (in octal) the umask used for all files and directories.
+ (Same as for the MS-DOS and HPFS filesystems.)
+
+ 33.. WWrriittiinngg ttoo HHFFSS FFiilleessyysstteemmss
+
+ Each of the values of the fork mount option yields a different
+ representation of the Macintosh-specific parts of a file within the
+ structure of the Linux filesystem. There are, therefore, slightly
+ different steps involved in copying files if you want to preserve the
+ resource forks and the Finder's metadata.
+
+ It is important to remember not to use normal user-level tools to
+ modify a filesystem mounted with the afpd mount option.
+
+ Regardless of the value of the fork mount option you can do virtually
+ everything to the data fork of a file that you can to a file on any
+ other filesystem. The limitations are essentially the same as those
+ imposed by the MS-DOS filesystem:
+
+ +o You can't change the uid or gid of files.
+
+ +o You can't set the set-uid, set-gid or sticky permission bits.
+
+ +o You can't clear the execute permission bits.
+
+ Likewise you can do virtually everything to a directory that you can
+ to a directory on another file system with the following exceptions:
+
+ +o You can't create, delete or rename resource forks of files or the
+ Finder's metadata. Note, however, that they are created (with
+ defaults values), deleted and renamed along with the corresponding
+ data fork or directory.
+
+ +o You can't change permissions on directories.
+
+ +o You can't change the uid or gid of directories.
+
+ +o You can't create multiple links to files.
+
+ +o You can't create symlinks, device files, sockets or FIFOs.
+
+ 33..11.. WWrriittiinngg wwiitthh ffoorrkk==ccaapp
+
+ Unlike the other schemes for representing forked files, the CAP scheme
+ presents the resource fork as an independent file; the resource fork
+ of ./foo is ./.resource/foo. Therefore, you can treat it as a normal
+ file. You can do anything to a resource fork that you can do to a
+ data fork, except that you cannot enable execute permissions on a
+ resource fork. Therefore, resource forks are not suitable for holding
+ Linux executables or shared libraries.
+
+ If you plan to use the resource fork on a Macintosh then you must obey
+ the format of a valid resource fork. This format is documented in
+ Chapter 1 of Apple's _I_n_s_i_d_e _M_a_c_i_n_t_o_s_h_: _M_o_r_e _M_a_c_i_n_t_o_s_h _T_o_o_l_b_o_x. The
+ filesystem knows nothing about this format and so does nothing to
+ enforce it.
+
+ The current support for reading and writing is sufficient to allow
+ copying of entire directories with tar, as long as both the source and
+ destination are mounted with fork=cap. tar may complain about being
+ unable to change the uid, gid or mode of files. This is normal and is
+ an unavoidable side effect of the having a single uid, gid and umask
+ for the entire filesystem.
+
+ It is impossible to create a resource fork or a Finder metadata file.
+ However, they are created automatically when the data fork is created.
+ Therefore, if you wish to copy a single file including both forks and
+ the Finder's metadata then you must create the data fork first. Then
+ you can copy the resource fork and the Finder's metadata. For
+ instance to copy the file foo to dir/bar you should do the following:
+
+ 1. cp foo dir/bar
+
+ 2. cp .resource/foo dir/.resource/bar
+
+ 3. cp .finderinfo/foo dir/.finderinfo/bar
+
+ You may get ``Operation not permitted'' errors from cp when it tries
+ to change the permissions on files. These errors can safely be
+ ignored. This method will work even if the file dir/bar exists.
+
+ If you wish to move foo to dir/bar and foo and dir are on the same
+ filesystem then you only need to execute ``mv foo dir/bar'' and the
+ resource fork and the Finder's metadata will move too. However, if
+ foo and dir are on different filesystem then this will lose the
+ resource fork and metadata. Therefore, it is safest to always move
+ files as follows:
+
+ 1. cp foo dir/bar
+
+ 2. cp .resource/foo dir/.resource/bar
+
+ 3. cp .finderinfo/foo dir/.finderinfo/bar
+
+ 4. rm foo
+
+ You may get ``Operation not permitted'' errors from cp when it tries
+ to change the permissions on files. These errors can safely be
+ ignored. This method will work even if the file dir/bar exists.
+
+ Directories have no resource fork but you may wish to create a
+ directory which has the same location and view on the Finder's screen
+ as an existing one. This can be done by copying the Finder metadata
+ file. To give the directory bar the same location, layout, creation
+ date and modify date as foo you simply execute ``cp .finderinfo/foo
+ .finderinfo/bar''.
+
+ When copying an entire directory with ``cp -R'' you may also wish to
+ copy the metadata for the directory:
+
+ 1. cp -R foo bar
+
+ 2. cp .finderinfo/foo .finderinfo/bar
+
+ You may get ``Operation not permitted'' errors from cp when it tries
+ to change the permissions on files. These errors can safely be
+ ignored.
+
+ 33..22.. WWrriittiinngg wwiitthh ffoorrkk==ddoouubbllee
+
+ The current support for reading and writing header files is sufficient
+ to allow copying of entire directories with tar, as long as both the
+ source and destination are mounted with fork=double. tar may complain
+ about being unable to change the uid, gid or mode of files. This is
+ normal and is an unavoidable side effect of the having a single uid,
+ gid and umask for the entire filesystem.
+
+ It is impossible to create a header file. However, they are created
+ automatically when the data fork is created. Therefore, if you wish
+ to copy a single file including both forks and the Finder's metadata
+ then you must create the data fork first. Then you can copy the
+ header file. instance to copy the file foo to dir/bar you should do
+ the following:
+
+ 1. cp foo dir/bar
+
+ 2. cp %foo dir/%bar
+
+ You may get ``Operation not permitted'' errors from cp when it tries
+ to change the permissions on files. These errors can safely be
+ ignored. This method will work even if the file dir/bar exists.
+
+ If you wish to move foo to dir/bar and foo and dir are on the same
+ filesystem then you only need to execute ``mv foo dir/bar'' and the
+ header file will move too. However, if foo and dir are on different
+ filesystem then this will lose the header file. Therefore, it is
+ safest to always move files as follows:
+
+ 1. cp foo dir/bar
+
+ 2. cp %foo dir/%bar
+
+ 3. rm foo
+
+ You may get ``Operation not permitted'' errors from cp when it tries
+ to change the permissions on files. These errors can safely be
+ ignored. This method will work even if the file dir/bar exists.
+
+ Directories have no resource fork but you may wish to create a
+ directory which has the same location and view on the Finder's screen
+ as an existing one. This can be done by copying the corresponding
+ header file. To give the directory bar the same location, layout,
+ creation date and modify date as foo simply execute ``cp %foo %bar''.
+
+ When copying an entire directory with ``cp -R'' you may also wish to
+ copy the header file for the directory as well:
+
+ 1. cp -R foo bar
+
+ 2. cp %foo %bar
+
+ You may get ``Operation not permitted'' errors from cp when it tries
+ to change the permissions on files. These errors can safely be
+ ignored.
+
+ 33..33.. WWrriittiinngg wwiitthh ffoorrkk==nneettaattaallkk
+
+ The current support for reading and writing header files is sufficient
+ to allow copying of entire directories with tar, as long as both the
+ source and destination are mounted fork=netatalk. tar may complain
+ about being unable to change the uid, gid or mode of files. This is
+ normal and is an unavoidable side effect of the having a single uid,
+ gid and umask for the entire filesystem.
+
+ It is impossible to create a header file. However, they are created
+ automatically when the data fork is created. Therefore, if you wish
+ to copy a single file including both forks and the Finder's metadata
+ then you must create the data fork first. Then you can copy the
+ header file. instance to copy the file foo to dir/bar you should do
+ the following:
+
+ 1. cp foo dir/bar
+
+ 2. cp .AppleDouble/foo dir/.AppleDouble/bar
+
+ You may get ``Operation not permitted'' errors from cp when it tries
+ to change the permissions on files. These errors can safely be
+ ignored. This method will work even if the file dir/bar exists.
+
+ If you wish to move foo to dir/bar and foo and dir are on the same
+ filesystem then you only need to execute ``mv foo dir/bar'' and the
+ header file will move too. However, if foo and dir are on different
+ filesystem then this will lose the header file. Therefore, it is
+ safest to always move files as follows:
+
+ 1. cp foo dir/bar
+
+ 2. cp .AppleDouble/foo dir/.AppleDouble/bar
+
+ 3. rm foo
+
+ You may get ``Operation not permitted'' errors from cp when it tries
+ to change the permissions on files. These errors can safely be
+ ignored. This method will work even if the file dir/bar exists.
+
+ Directories have no resource fork but you may wish to create a
+ directory which has the same location and view on the Finder's screen
+ as an existing one. This can be done by copying the corresponding
+ header file. To give the directory bar the same location, layout,
+ creation date and modify date as foo you simply execute ``cp
+ foo/.AppleDouble/.Parent bar/.AppleDouble/.Parent''.
+
+ Because the fork=netatalk scheme holds the header file for a directory
+ within that directory, directories can safely be copied with ``cp -R
+ foo bar'' with no loss of information. However, you may get
+ ``Operation not permitted'' errors from cp when it tries to change the
+ permissions on files. These errors can safely be ignored.
+
+ 44.. AA GGuuiiddee ttoo SSppeecciiaall FFiillee FFoorrmmaattss
+
+ Each of the values of the fork mount option yields different special
+ files to represent the Macintosh-specific parts of a file within the
+ structure of the Linux filesystem. You can write to these special
+ files to change things such as the Creator and Type of a file.
+ However, to do so safely you must follow certain rules to avoid
+ corrupting the data. Additionally, there are certain fields in the
+ special files that you can't change (writes to them will fail
+ silently).
+
+ 44..11.. CCAAPP ..ffiinnddeerriinnffoo FFiilleess
+
+ The Finder's metadata for the file ./foo in held in the file
+ ./.finderinfo/foo. The file has a fixed format defined in hfs_fs.h as
+ follows:
+
+ ______________________________________________________________________
+ struct hfs_cap_info {
+ __u8 fi_fndr[32]; /* Finder's info */
+ __u16 fi_attr; /* AFP attributes */
+ __u8 fi_magic1; /* Magic number: */
+ #define HFS_CAP_MAGIC1 0xFF
+ __u8 fi_version; /* Version of this structure: */
+ #define HFS_CAP_VERSION 0x10
+ __u8 fi_magic; /* Another magic number: */
+ #define HFS_CAP_MAGIC 0xDA
+ __u8 fi_bitmap; /* Bitmap of which names are valid: */
+ #define HFS_CAP_SHORTNAME 0x01
+ #define HFS_CAP_LONGNAME 0x02
+ __u8 fi_shortfilename[12+1]; /* "short name" (unused) */
+ __u8 fi_macfilename[32+1]; /* Original (Macintosh) name */
+ __u8 fi_comln; /* Length of comment (always 0) */
+ __u8 fi_comnt[200]; /* Finder comment (unused) */
+ /* optional: used by aufs only if compiled with USE_MAC_DATES */
+ __u8 fi_datemagic; /* Magic number for dates extension: */
+ #define HFS_CAP_DMAGIC 0xDA
+ __u8 fi_datevalid; /* Bitmap of which dates are valid: */
+ #define HFS_CAP_MDATE 0x01
+ #define HFS_CAP_CDATE 0x02
+ __u8 fi_ctime[4]; /* Creation date (in AFP format) */
+ __u8 fi_mtime[4]; /* Modify date (in AFP format) */
+ __u8 fi_utime[4]; /* Un*x time of last mtime change */
+ };
+ ______________________________________________________________________
+
+ The type __u8 is an unsigned character, and __u16 is an unsigned
+ 16-bit integer.
+
+ Currently only the fields fi_fndr, fi_attr, fi_ctime and fi_mtime can
+ be changed. Writes to the other fields are silently ignored.
+ However, you shouldn't write random bytes to the other fields, since
+ they may be writable in the future.
+
+ The fi_fndr field is the ``Finder info'' and ``Extended Finder info''
+ for a file or directory. These structures are described in various
+ books on Macintosh programming. The portion of the most interest is
+ probably the first 8 bytes which, for a file, give the 4-byte Type
+ followed by the 4-byte Creator.
+
+ The fi_attr field is the AFP attributes of the file or directory.
+ While you can write any value to this field, only the ``write-
+ inhibit'' bit is significant. Setting or clearing this bit will clear
+ or set the write bits in the file's permissions. When you read from
+ this field anything you may have written is lost. If the file has
+ write permissions enabled then you will read zero from this field.
+ With write permission disabled you will read back 0x01 0xA0, which
+ corresponds to setting the ``write-inhibit'', ``rename-inhibit'' and
+ ``delete-inhibit'' bits.
+
+ The fi_ctime and fi_mtime are the Macintosh created and modified time
+ for the file or directory, and are 32-bit signed integers in network
+ byteorder giving seconds from 00:00 GMT Jan. 1, 2000.
+
+ 44..22.. AApppplleeDDoouubbllee HHeeaaddeerr FFiilleess
+
+ Both the fork=double and fork=netatalk schemes for representing forked
+ files use AppleDouble header files to contain the resource fork and
+ the Finder's metadata together in a single file.
+
+ The AppleDouble format specifies a fixed-format header which describes
+ which fields are contained in the remainder of the file, where they
+ are located in the file and how long they are. A full description of
+ the version 1 format used when fork=netatalk is available from ??????.
+ The version 2 format used when fork=double is documented in ??????.
+ The discussion that follows assumes you have read and understood these
+ documents, which may be difficult until I've replaced the ``??????''s
+ above with something more informative :-).
+
+ Due to the variable structure of an AppleDouble header file you must
+ not use buffered I/O when reading or writing them; you should only use
+ the read() and write() system calls. It is also important that you
+ make some effort to coordinate processes that are reading and writing
+ the same header file, since a reader will receive the wrong data if
+ the location of a given entry has changed since it read the descriptor
+ for the entry. If a process tries to read the descriptor table while
+ it is changing then it is possible to read totally meaningless data.
+
+ When a header file is opened it is initially presented with a default
+ header layout. You may write to the header to change the layout, but
+ when all file descriptors for the file or directory have been closed
+ the change in format is lost and subsequent opens will yield the
+ default layout. Changes to supported entries are made directly to the
+ filesystem and are thus preserved when the file is closed and
+ reopened.
+
+ The HFS filesystem currently uses a fixed-size table to hold the
+ descriptors. Therefore you are limited to HFS_HDR_MAX (currently 10)
+ descriptors. In the unlikely event that you try to write a header
+ with more descriptors, a warning will be issued by the kernel, and
+ extra descriptors will be ignored. This should be considered a bug
+ and will hopefully change sooner rather than later.
+
+ The results of specifying overlapping entries is undefined and should
+ not be relied upon to remain unchanged from one version of the HFS
+ filesystem to the next. There is no valid reason to define
+ overlapping entries, so just don't do it!
+
+ Changes to the magic number and version fields are preserved until all
+ file descriptors are closed, however the only significance given to
+ them internally is that the 16 bytes following the version changes
+ meaning according to the version. For version 1 header files these 16
+ bytes contain the string ``Macintosh'' followed by 7 spaces. For any
+ other value of the version field these 16 bytes are all zeros. In
+ either case writes to these 16 bytes are silently ignored.
+
+ Since the magic number and version are given no other significance
+ internally, you are free to do many things that violate the official
+ formats. For instance you can create an entry for the data fork in a
+ header file with an AppleDouble magic number or create ``File Info''
+ (id=7) entries in version 2 header files and ``File Dates Info''
+ (id=8) entries in version 1 header files. However, future versions of
+ the filesystem may enforce the format more strictly.
+
+ Entry id 1 (``Data Fork'') is read-only. You should use the data file
+ to modify the data fork. The data fork is, of course, not supported
+ for directories.
+
+ Entry ids 2, 7, 8, 9 and 10 (``Resource Fork'', ``File Info'', ``File
+ Dates Info'', ``Finder Info'' and ``Macintosh File Info'') are fully
+ supported, meaning that their contents may be read and written and
+ that data written is preserved when the file is closed and reopened.
+ The resource fork is, of course, not supported for directories.
+
+ Entry id 7 specifies some of the same data given by ids 8 and 10. If
+ you create a header file with an entry for id 7 and for ids 8 or 10,
+ then the behavior with respect to their interaction is undefined. A
+ header that contains an entry for id 7 and for ids 8 or 10 is not
+ valid as either a version 1 or a version 2 header file, so there is no
+ reason to do this and future versions may prevent it.
+
+ Entry id 3 (``Real Name'') is read-only, since it will change
+ automatically when a file is renamed. Writes to the corresponding
+ entry are silently ignored.
+
+ All other entry ids are ignored. You may create descriptors for them;
+ in fact the default header layout when fork=netatalk includes a
+ descriptor for id 4 (``Comment''). However writes to the entries
+ corresponding to the ignored ids fail silently and reads from the
+ entries always return zeros. However, you shouldn't write random
+ bytes to unsupported entries, since they may be supported in the
+ future.
+
+ All of the supported entry types except the data and resource forks
+ have a fixed length. If you give them a smaller length in the
+ descriptor then you are unable to access part of the corresponding
+ entry. If you give them a larger length in the descriptor, then the
+ corresponding entry is padded with zeros and writes to the extra space
+ are silently ignored.
+
+ Writes to the length field of descriptors for the data and resource
+ forks will cause the corresponding fork to grow (with zero padding) or
+ shrink to the indicated length.
+
+ If you have an entry for the data fork then the descriptor's length
+ field does not change automatically to reflect any modification of the
+ data fork directly (the data does change however). If the data fork
+ is longer than the descriptor indicates, then a portion of it is
+ inaccessible. If the data fork is shorter than the descriptor
+ indicates then reads will be padded with zeros.
+
+ Writes beyond the end of the resource fork that extend into empty
+ space between entries or beyond the end of the file will extend the
+ fork, automatically changing the length field of the corresponding
+ descriptor. Writes to any other space between entries are silently
+ ignored and read of such spaces always return zeros.
+
+ Calling truncate() on a header file can change the length of the
+ resource fork and such a change will automatically be reflected in the
+ length field of the corresponding descriptor. If truncate() shortens
+ the file so that the entry for the resource fork would extend beyond
+ the new end of the file then the fork is shortened to fit in the space
+ that remains, or to zero bytes if the entry is now entirely beyond the
+ end of the file. If the last entry in a header file is the resource
+ fork then a call to truncate() that extends the header file will
+ extend the fork with zeros. Note that this happens even if there was
+ previously space between the end of the fork and the end of the file.
+
+ 55.. RReeppoorrttiinngg BBuuggss
+
+ If you'd like any problems you encounter fixed, you'll need to provide
+ a detailed bug report. However, you should check the FAQ (available
+ from the HFS for Linux Page <http://www-sccm.Stanford.EDU/~hargrove/HFS/>)
+ first to be certain that your problem is not a known limitation of the
+ filesystem. If your bug doesn't appear in the FAQ then you should e-mail
+ me at hargrove@sccm.Stanford.EDU.
+
+ 55..11.. WWhhaatt GGooeess iinn aa BBuugg RReeppoorrtt
+
+ When writing your bug report, include any facts you think might be
+ relevant; I'd much rather have a bunch of extra facts than need to
+ e-mail you to get the information. At a minimum the following
+ information should be included:
+
+ +o The version of the HFS filesystem you are using (see
+ linux/fs/hfs/version.h).
+
+ +o The kernel version you are using.
+
+ +o Any unofficial kernel patches or loadable modules you are using.
+
+ +o If you are loading the HFS filesystem as a module, then version of
+ the module utilities used to load hfs.o.
+
+ +o The type of media you are working with (floppy, CDROM, ZIP Drive,
+ etc.).
+
+ +o The steps required to reproduce the bug, including mount options
+ used. (If you can't reproduce the bug tell me everything you did
+ the one time it did occur, but be warned that non-reproducible bugs
+ can only rarely be fixed.)
+
+ 55..22.. HHooww ttoo RReeppoorrtt aa KKeerrnneell OOooppss oorr GGPPFF
+
+ If you encounter a bug that causes a kernel Oops or a General
+ Protection Fault then you'll need to collect some additional
+ information for the bug report. If you are loading the HFS filesystem
+ as a module, then is important that you do this before rebooting,
+ since the module is unlikely to be loaded at the same address after
+ the reboot.
+
+ You should include all the information that the kernel prints to the
+ console or to the system logs. However, the EIP and Stack Trace are
+ addresses in _y_o_u_r kernel and mean nothing to me without more
+ information. Using your System.map file (or either ksymoops or klogd)
+ determine which functions the EIP and Stack Trace are in. If you do
+ this by hand using your System.map file then the correct symbol is the
+ one of type t or T with the largest address less than or equal to the
+ one you are resolving.
+
+ If you are loading the HFS filesystem as a module and the Oops or GPF
+ was in the HFS code then the EIP and the top levels of the Stack Trace
+ will be in a loadable module, rather than in the kernel proper. So,
+ their symbols will not be in the file System.map. Therefore, you will
+ need to use /proc/ksyms, or a loadmap produced by passing the -m
+ option to insmod, to locate those symbols.
+
+ 66.. LLeeggaall NNoottiicceess
+
+ 66..11.. TThhiiss DDooccuummeenntt
+
+ This document is Copyright (c) 1996, 1997 by Paul H. Hargrove.
+
+ Permission is granted to make and distribute verbatim copies of this
+ document provided the copyright notice and this permission notice are
+ preserved on all copies.
+
+ Permission is granted to copy and distribute modified versions of this
+ document under the conditions for verbatim copies above, provided a
+ notice clearly stating that the document is a modified version is also
+ included in the modified document.
+
+ Permission is granted to copy and distribute translations of this
+ document into another language, under the conditions specified above
+ for modified versions.
+
+ Permission is granted to convert this document into another media
+ under the conditions specified above for modified versions provided
+ the requirement to acknowledge the source document is fulfilled by
+ inclusion of an obvious reference to the source document in the new
+ media. Where there is any doubt as to what defines ``obvious'' the
+ copyright owner reserves the right to decide.
+
+ 66..22.. TThhee SSooffttwwaarree
+
+ The HFS filesystem for Linux is Copyright (c) 1994-1997 by Paul H.
+ Hargrove.
+
+ This software is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This software is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this software in the file ``COPYING''; if not, write to the
+ Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
+ USA.
+
+ 66..22..11.. TThhee CCoolluummbbiiaa AApppplleeTTaallkk PPaacckkaaggee ffoorr UUNNIIXX
+
+ The source code distribution of the Columbia AppleTalk Package for
+ UNIX, version 6.0, (CAP) was used as a _s_p_e_c_i_f_i_c_a_t_i_o_n of the location
+ and format of files used by CAP's Aufs. No code from CAP appears in
+ the HFS filesystem. The HFS filesystem is not a work ``derived'' from
+ CAP in the sense of intellectual property law.
+
+ 66..22..22.. NNeettaattaallkk
+
+ The source code distributions of Netatalk, versions 1.3.3b2 and 1.4b2,
+ were used as a _s_p_e_c_i_f_i_c_a_t_i_o_n of the location and format of files used
+ by Netatalk's afpd. No code from Netatalk appears in the HFS
+ filesystem. The HFS filesystem is not a work ``derived'' from
+ Netatalk in the sense of intellectual property law.
+
+ 66..33.. TTrraaddeemmaarrkkss
+
+ +o ``Finder'' is a trademarks of Apple Computer, Inc.
+
+ +o ``Apple'', ``AppleShare'', ``AppleTalk'' and ``Macintosh'' are
+ registered trademarks of Apple Computer, Inc.
+
+ +o ``Microsoft'' and ``MS-DOS'' are registered trademarks of Microsoft
+ Corporation.
+
+ +o All other trademarks are the property of their respective owners.
+
diff --git a/fs/hfs/INSTALL.txt b/fs/hfs/INSTALL.txt
new file mode 100644
index 000000000..8ea44c0ec
--- /dev/null
+++ b/fs/hfs/INSTALL.txt
@@ -0,0 +1,126 @@
+ Installation instructions for the HFS Filesystem for Linux
+ Paul H. Hargrove, hargrove@sccm.Stanford.EDU
+ version 0.95 28 Apr 1997
+
+ This document explains how to compile and install version 0.95 of
+ hfs_fs, the HFS filesystem for Linux.
+
+ 11.. SSyysstteemm RReeqquuiirreemmeennttss
+
+ You will need the following to compile and use this release of hfs_fs:
+
+ +o Kernel version 2.0.1 or newer compiled with modules enabled
+ (CONFIG_MODULES).
+
+ +o The kernel sources (or at least the header files) available online.
+
+ +o The module utilities package current for your kernel version and an
+ understanding of how to use it. (The file
+ Documentation/modules.txt in the kernel source directory provides a
+ brief introduction.)
+
+ 22.. IInnssttaallllaattiioonn
+
+ This release of the HFS filesystem is not part of the official kernel
+ distribution. Therefore, it is compiled as a module and then loaded
+ into the kernel using the module utilities. Therefore, your kernel
+ must be compiled with CONFIG_MODULES enabled.
+
+ 22..11.. CCoommppiilliinngg tthhee llooaaddaabbllee mmoodduullee
+
+ To compile hfs.o you should only need to execute ``make'' in the
+ hfs_fs source directory.
+
+ If gcc complains about not finding a large number of header files with
+ names beginning with ``linux/'' then you probably don't have the
+ kernel header files installed correctly. Either /usr/include/linux,
+ /usr/include/asm and /usr/include/scsi should be symbolic links to
+ include/linux, include/asm and include/scsi in the kernel source tree
+ for the kernel you wish to use hfs_fs with, or else they should be
+ directories containing the header files for the kernel you wish to use
+ hfs_fs with.
+
+ If gcc complains about not finding linux/version.h, then you will need
+ to run ``make dep'' in the kernel source directory to build it. Under
+ MkLinux, run ``make include/linux/version.h'' instead.
+
+ If gcc complains about not finding the files linux/config.h or
+ linux/autoconf.h, then you will need to run ``make config'' and ``make
+ dep'' in the kernel source directory to build these two files.
+
+ If you are compiling on a DEC Alpha and receive messages saying
+ assignment from incompatible pointer type when compiling files dir_*.c
+ and file_*.c, then you need to change a single line in the file
+ linux/hfs_fs.h. Remove the text ``&& !defined(__alpha__)'' from the
+ end of line 217.
+
+ 22..22.. IInnssttaalllliinngg tthhee mmoodduullee iinn tthhee mmoodduulleess ddiirreeccttoorryy ((ooppttiioonnaall))
+
+ If you plan to use kerneld to automatically load the module or if you
+ wish to use modprobe or insmod without supplying a complete path to
+ hfs.o, then you will need to copy hfs.o into a directory where the
+ module utilities expect to find it.
+
+ The proper directory may depend slightly on your configuration.
+ However, /lib/modules/default/fs/ is a common one for filesystem
+ modules. Once hfs.o is in the proper directory you should run depmod
+ -a to update the dependency list used by kerneld and modprobe.
+
+ 22..33.. LLooaaddiinngg tthhee mmoodduullee iinnttoo tthhee rruunnnniinngg kkeerrnneell
+
+ There are three ways to accomplish this:
+
+ 1. If you are running kerneld and have installed hfs.o in the modules
+ directory then you don't need to issue any commands; the module
+ will be loaded when you attempt to mount an HFS filesystem.
+
+ 2. If you are _n_o_t running kerneld then you can load hfs.o manually by
+ running modprobe hfs.o. If you have not installed hfs.o in one of
+ the standard module directories, then you will need provide a full
+ path to the file hfs.o.
+
+ 3. If you have been experiencing kernel crashes with hfs_fs, then you
+ should file a bug report including the names of the functions which
+ the EIP and Stack Trace point into. To help with this you can ask
+ for relocation map for the module when you load it. To do this
+ load the module with ``insmod -m hfs.o >loadmap''. Again, you may
+ need a full path to the file hfs.o if you have not placed it in one
+ of the standard module directories.
+
+ 22..44.. UUssiinngg tthhee mmoodduullee wwiitthh vveerrssiioonneedd ssyymmbboollss
+
+ All the interface between the module and the kernel take place through
+ very stable (since the mid-1.3.x kernels) parts of the kernel. If you
+ enabled versioned symbols (CONFIG_MODVERSIONS) when you compiled your
+ kernel you should often be able to compile this module once and then
+ use it with many kernels newer than the one you compiled it for.
+
+ In any case, it is unlikely that this module will need changes with
+ each new kernel patch; simple recompilation should usually suffice.
+
+ 33.. LLeeggaall NNoottiicceess
+
+ 33..11.. TThhiiss DDooccuummeenntt
+
+ This document is Copyright (c) 1996, 1997 by Paul H. Hargrove.
+
+ Permission is granted to make and distribute verbatim copies of this
+ document provided the copyright notice and this permission notice are
+ preserved on all copies.
+
+ Permission is granted to copy and distribute modified versions of this
+ document under the conditions for verbatim copies above, provided a
+ notice clearly stating that the document is a modified version is also
+ included in the modified document.
+
+ Permission is granted to copy and distribute translations of this
+ document into another language, under the conditions specified above
+ for modified versions.
+
+ Permission is granted to convert this document into another media
+ under the conditions specified above for modified versions provided
+ the requirement to acknowledge the source document is fulfilled by
+ inclusion of an obvious reference to the source document in the new
+ media. Where there is any doubt as to what defines ``obvious'' the
+ copyright owner reserves the right to decide.
+
diff --git a/fs/hfs/Makefile b/fs/hfs/Makefile
new file mode 100644
index 000000000..bab9a6e4b
--- /dev/null
+++ b/fs/hfs/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the linux nfs-filesystem routines.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+O_TARGET := hfs.o
+O_OBJS := balloc.o bdelete.o bfind.o bins_del.o binsert.o bitmap.o bitops.o \
+ bnode.o brec.o btree.o catalog.o dir.o dir_cap.o dir_dbl.o \
+ dir_nat.o extent.o file.o file_cap.o file_hdr.o inode.o mdb.o \
+ part_tbl.o string.o super.o sysdep.o trans.o version.o
+
+M_OBJS := $(O_TARGET)
+
+include $(TOPDIR)/Rules.make
diff --git a/fs/hfs/TODO b/fs/hfs/TODO
new file mode 100644
index 000000000..f989c3a91
--- /dev/null
+++ b/fs/hfs/TODO
@@ -0,0 +1,54 @@
+The hfs_fs "to do" list.
+------------------------
+Items are broken down into groups and the groups are listed in order
+from most important to least important. The items within each group
+are not placed in any particular order. The order in which items are
+listed probably doesn't correlate well with the order they will be
+addressed.
+
+Genuine bugs:
+1. Header files have compiled-in limit (currently 10) on descriptors.
+
+Missing features:
+1. The partition code should be migrated into the kernel to allow
+ simultaneous access to multiple partitions on a single disk.
+2. 1k block support is needed for some devices.
+3. An ioctl()-based interface is needed to provide a consistent way
+ to do things under all of the representations of forked files.
+
+Possible additional "fork" mount options:
+1. AppleSingle.
+2. The scheme MacOS uses on FAT disks (PC Exchange).
+3. "Flat" (no resource forks or metadata).
+
+Performance issues:
+1. Use drAllocPtr to speed block allocations.
+2. Keep a real cache of bnodes, rather than just a hash table of
+ the ones that are currently in use.
+3. Keep a real cache of extent records, rather than just a linked
+ list of the ones that are currently in use and the one most
+ recently used. This is particularly needed to get acceptable
+ performance with multiple readers on a file. Perhaps simply
+ keep them in memory once they've been read until the file is
+ closed.
+
+Implementation details:
+1. Allocation scheme could/should be closer to that used by Apple.
+2. B*-tree insertion could/should be closer to that used by Apple.
+3. Magic-number checks on data structures are rarely done.
+4. Error recovery is needed for failed binsert(), bdelete() and rename().
+5. Deadlock detection is needed to make insert_empty_bnode() and
+ bdelete() less likely to hang on a corrupted B-tree.
+6. Metadata for covered directories shouldn't appear in the filesystem.
+ Under CAP and AppleDouble it currently does. However, the obvious
+ solution is a real performance killer and is not worth implementing.
+
+Fantasy features:
+1. Access Desktop file/database for comment and icon.
+2. Implement mmap() for AppleDouble header files and CAP info files.
+3. Implement AppleShare client support.
+
+Suggestions/comments/questions are welcome.
+Code addressing any of the issues listed above is especially welcome.
+Paul H. Hargrove
+hargrove@sccm.Stanford.EDU
diff --git a/fs/hfs/balloc.c b/fs/hfs/balloc.c
new file mode 100644
index 000000000..d7e17e72f
--- /dev/null
+++ b/fs/hfs/balloc.c
@@ -0,0 +1,437 @@
+/*
+ * linux/fs/hfs/balloc.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * hfs_bnode_alloc() and hfs_bnode_bitop() are based on GPLed code
+ * Copyright (C) 1995 Michael Dreher
+ *
+ * This file contains the code to create and destroy nodes
+ * in the B-tree structure.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ *
+ * The code in this file initializes some structures which contain
+ * pointers by calling memset(&foo, 0, sizeof(foo)).
+ * This produces the desired behavior only due to the non-ANSI
+ * assumption that the machine representation of NULL is all zeros.
+ */
+
+#include "hfs_btree.h"
+
+/*================ File-local functions ================*/
+
+/*
+ * get_new_node()
+ *
+ * Get a buffer for a new node with out reading it from disk.
+ */
+static hfs_buffer get_new_node(struct hfs_btree *tree, hfs_u32 node)
+{
+ int tmp;
+ hfs_buffer retval = HFS_BAD_BUFFER;
+
+ tmp = hfs_extent_map(&tree->entry.u.file.data_fork, node, 0);
+ if (tmp) {
+ retval = hfs_buffer_get(tree->sys_mdb, tmp, 0);
+ }
+ return retval;
+}
+
+/*
+ * hfs_bnode_init()
+ *
+ * Description:
+ * Initialize a newly allocated bnode.
+ * Input Variable(s):
+ * struct hfs_btree *tree: Pointer to a B-tree
+ * hfs_u32 node: the node number to allocate
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * struct hfs_bnode_ref for the new node
+ * Preconditions:
+ * 'tree' points to a "valid" (struct hfs_btree)
+ * 'node' exists and has been allocated in the bitmap of bnodes.
+ * Postconditions:
+ * On success:
+ * The node is not read from disk, nor added to the bnode cache.
+ * The 'sticky' and locking-related fields are all zero/NULL.
+ * The bnode's nd{[FB]Link, Type, NHeight} fields are uninitialized.
+ * The bnode's ndNRecs field and offsets table indicate an empty bnode.
+ * On failure:
+ * The node is deallocated.
+ */
+static struct hfs_bnode_ref hfs_bnode_init(struct hfs_btree * tree,
+ hfs_u32 node)
+{
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ extern int bnode_count;
+#endif
+ struct hfs_bnode_ref retval;
+
+ retval.lock_type = HFS_LOCK_NONE;
+ if (!HFS_NEW(retval.bn)) {
+ hfs_warn("hfs_bnode_init: out of memory.\n");
+ goto bail2;
+ }
+
+ /* Partially initialize the in-core structure */
+ memset(retval.bn, 0, sizeof(*retval.bn));
+ retval.bn->magic = HFS_BNODE_MAGIC;
+ retval.bn->tree = tree;
+ retval.bn->node = node;
+ hfs_bnode_lock(&retval, HFS_LOCK_WRITE);
+
+ retval.bn->buf = get_new_node(tree, node);
+ if (!hfs_buffer_ok(retval.bn->buf)) {
+ goto bail1;
+ }
+
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ ++bnode_count;
+#endif
+
+ /* Partially initialize the on-disk structure */
+ memset(hfs_buffer_data(retval.bn->buf), 0, HFS_SECTOR_SIZE);
+ hfs_put_hs(sizeof(struct NodeDescriptor), RECTBL(retval.bn, 1));
+
+ return retval;
+
+bail1:
+ HFS_DELETE(retval.bn);
+bail2:
+ /* clear the bit in the bitmap */
+ hfs_bnode_bitop(tree, node, 0);
+ return retval;
+}
+
+/*
+ * init_mapnode()
+ *
+ * Description:
+ * Initializes a given node as a mapnode in the given tree.
+ * Input Variable(s):
+ * struct hfs_bnode *bn: the node to add the mapnode after.
+ * hfs_u32: the node to use as a mapnode.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * struct hfs_bnode *: the new mapnode or NULL
+ * Preconditions:
+ * 'tree' is a valid (struct hfs_btree).
+ * 'node' is the number of the first node in 'tree' that is not
+ * represented by a bit in the existing mapnodes.
+ * Postconditions:
+ * On failure 'tree' is unchanged and NULL is returned.
+ * On success the node given by 'node' has been added to the linked
+ * list of mapnodes attached to 'tree', and has been initialized as
+ * a valid mapnode with its first bit set to indicate itself as
+ * allocated.
+ */
+static struct hfs_bnode *init_mapnode(struct hfs_bnode *bn, hfs_u32 node)
+{
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ extern int bnode_count;
+#endif
+ struct hfs_bnode *retval;
+
+ if (!HFS_NEW(retval)) {
+ hfs_warn("hfs_bnode_add: out of memory.\n");
+ return NULL;
+ }
+
+ memset(retval, 0, sizeof(*retval));
+ retval->magic = HFS_BNODE_MAGIC;
+ retval->tree = bn->tree;
+ retval->node = node;
+ retval->sticky = HFS_STICKY;
+ retval->buf = get_new_node(bn->tree, node);
+ if (!hfs_buffer_ok(retval->buf)) {
+ HFS_DELETE(retval);
+ return NULL;
+ }
+
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ ++bnode_count;
+#endif
+
+ /* Initialize the bnode data structure */
+ memset(hfs_buffer_data(retval->buf), 0, HFS_SECTOR_SIZE);
+ retval->ndFLink = 0;
+ retval->ndBLink = bn->node;
+ retval->ndType = ndMapNode;
+ retval->ndNHeight = 0;
+ retval->ndNRecs = 1;
+ hfs_put_hs(sizeof(struct NodeDescriptor), RECTBL(retval, 1));
+ hfs_put_hs(0x1fa, RECTBL(retval, 2));
+ *((hfs_u8 *)bnode_key(retval, 1)) = 0x80; /* set first bit of bitmap */
+ retval->prev = bn;
+ hfs_bnode_commit(retval);
+
+ bn->ndFLink = node;
+ bn->next = retval;
+ hfs_bnode_commit(bn);
+
+ return retval;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_bnode_bitop()
+ *
+ * Description:
+ * Allocate/free the requested node of a B-tree of the hfs filesystem
+ * by setting/clearing the corresponding bit in the B-tree bitmap.
+ * The size of the B-tree will not be changed.
+ * Input Variable(s):
+ * struct hfs_btree *tree: Pointer to a B-tree
+ * hfs_u32 bitnr: The node number to free
+ * int set: 0 to clear the bit, non-zero to set it.
+ * Output Variable(s):
+ * None
+ * Returns:
+ * 0: no error
+ * -1: The node was already allocated/free, nothing has been done.
+ * -2: The node is out of range of the B-tree.
+ * -4: not enough map nodes to hold all the bits
+ * Preconditions:
+ * 'tree' points to a "valid" (struct hfs_btree)
+ * 'bitnr' is a node number within the range of the btree, which is
+ * currently free/allocated.
+ * Postconditions:
+ * The bit number 'bitnr' of the node bitmap is set/cleared and the
+ * number of free nodes in the btree is decremented/incremented by one.
+ */
+int hfs_bnode_bitop(struct hfs_btree *tree, hfs_u32 bitnr, int set)
+{
+ struct hfs_bnode *bn; /* the current bnode */
+ hfs_u16 start; /* the start (in bits) of the bitmap in node */
+ hfs_u16 len; /* the len (in bits) of the bitmap in node */
+ hfs_u32 *u32; /* address of the u32 containing the bit */
+
+ if (bitnr >= tree->bthNNodes) {
+ hfs_warn("hfs_bnode_bitop: node number out of range.\n");
+ return -2;
+ }
+
+ bn = &tree->head;
+ for (;;) {
+ start = bnode_offset(bn, bn->ndNRecs) << 3;
+ len = (bnode_offset(bn, bn->ndNRecs + 1) << 3) - start;
+
+ if (bitnr < len) {
+ break;
+ }
+
+ /* continue on to next map node if available */
+ if (!(bn = bn->next)) {
+ hfs_warn("hfs_bnode_bitop: too few map nodes.\n");
+ return -4;
+ }
+ bitnr -= len;
+ }
+
+ /* Change the correct bit */
+ bitnr += start;
+ u32 = (hfs_u32 *)hfs_buffer_data(bn->buf) + (bitnr >> 5);
+ bitnr %= 32;
+ if ((set && hfs_set_bit(bitnr, u32)) ||
+ (!set && !hfs_clear_bit(bitnr, u32))) {
+ hfs_warn("hfs_bnode_bitop: bitmap corruption.\n");
+ return -1;
+ }
+ hfs_buffer_dirty(bn->buf);
+
+ /* adjust the free count */
+ tree->bthFree += (set ? -1 : 1);
+ tree->dirt = 1;
+
+ return 0;
+}
+
+/*
+ * hfs_bnode_alloc()
+ *
+ * Description:
+ * Find a cleared bit in the B-tree node bitmap of the hfs filesystem,
+ * set it and return the corresponding bnode, with its contents zeroed.
+ * When there is no free bnode in the tree, an error is returned, no
+ * new nodes will be added by this function!
+ * Input Variable(s):
+ * struct hfs_btree *tree: Pointer to a B-tree
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * struct hfs_bnode_ref for the new bnode
+ * Preconditions:
+ * 'tree' points to a "valid" (struct hfs_btree)
+ * There is at least one free bnode.
+ * Postconditions:
+ * On success:
+ * The corresponding bit in the btree bitmap is set.
+ * The number of free nodes in the btree is decremented by one.
+ * The node is not read from disk, nor added to the bnode cache.
+ * The 'sticky' field is uninitialized.
+ */
+struct hfs_bnode_ref hfs_bnode_alloc(struct hfs_btree *tree)
+{
+ struct hfs_bnode *bn; /* the current bnode */
+ hfs_u32 bitnr = 0; /* which bit are we examining */
+ hfs_u16 first; /* the first clear bit in this bnode */
+ hfs_u16 start; /* the start (in bits) of the bitmap in node */
+ hfs_u16 end; /* the end (in bits) of the bitmap in node */
+ hfs_u32 *data; /* address of the data in this bnode */
+
+ bn = &tree->head;
+ for (;;) {
+ start = bnode_offset(bn, bn->ndNRecs) << 3;
+ end = bnode_offset(bn, bn->ndNRecs + 1) << 3;
+ data = (hfs_u32 *)hfs_buffer_data(bn->buf);
+
+ /* search the current node */
+ first = hfs_find_zero_bit(data, end, start);
+ if (first < end) {
+ break;
+ }
+
+ /* continue search in next map node */
+ bn = bn->next;
+
+ if (!bn) {
+ hfs_warn("hfs_bnode_alloc: too few map nodes.\n");
+ goto bail;
+ }
+ bitnr += (end - start);
+ }
+
+ if ((bitnr += (first - start)) >= tree->bthNNodes) {
+ hfs_warn("hfs_bnode_alloc: no free nodes found, "
+ "count wrong?\n");
+ goto bail;
+ }
+
+ if (hfs_set_bit(first % 32, data + (first>>5))) {
+ hfs_warn("hfs_bnode_alloc: bitmap corruption.\n");
+ goto bail;
+ }
+ hfs_buffer_dirty(bn->buf);
+
+ /* decrement the free count */
+ --tree->bthFree;
+ tree->dirt = 1;
+
+ return hfs_bnode_init(tree, bitnr);
+
+bail:
+ return (struct hfs_bnode_ref){NULL, HFS_LOCK_NONE};
+}
+
+/*
+ * hfs_btree_extend()
+ *
+ * Description:
+ * Adds nodes to a B*-tree if possible.
+ * Input Variable(s):
+ * struct hfs_btree *tree: the btree to add nodes to.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'tree' is a valid (struct hfs_btree *).
+ * Postconditions:
+ * If possible the number of nodes indicated by the tree's clumpsize
+ * have been added to the tree, updating all in-core and on-disk
+ * allocation information.
+ * If insufficient disk-space was available then fewer nodes may have
+ * been added than would be expected based on the clumpsize.
+ * In the case of the extents B*-tree this function will add fewer
+ * nodes than expected if adding more would result in an extent
+ * record for the extents tree being added to the extents tree.
+ * The situation could be dealt with, but doing so confuses Macs.
+ */
+void hfs_btree_extend(struct hfs_btree *tree)
+{
+ struct hfs_bnode_ref head;
+ struct hfs_bnode *bn, *tmp;
+ struct hfs_cat_entry *entry = &tree->entry;
+ struct hfs_mdb *mdb = entry->mdb;
+ hfs_u32 old_nodes, new_nodes, total_nodes, new_mapnodes, seen;
+
+ old_nodes = entry->u.file.data_fork.psize;
+
+ entry->u.file.data_fork.lsize += 1; /* rounded up to clumpsize */
+ hfs_extent_adj(&entry->u.file.data_fork);
+
+ total_nodes = entry->u.file.data_fork.psize;
+ entry->u.file.data_fork.lsize = total_nodes << HFS_SECTOR_SIZE_BITS;
+ new_nodes = total_nodes - old_nodes;
+ if (!new_nodes) {
+ return;
+ }
+
+ head = hfs_bnode_find(tree, 0, HFS_LOCK_WRITE);
+ if (!(bn = head.bn)) {
+ hfs_warn("hfs_btree_extend: header node not found.\n");
+ return;
+ }
+
+ seen = 0;
+ new_mapnodes = 0;
+ for (;;) {
+ seen += bnode_rsize(bn, bn->ndNRecs) << 3;
+
+ if (seen >= total_nodes) {
+ break;
+ }
+
+ if (!bn->next) {
+ tmp = init_mapnode(bn, seen);
+ if (!tmp) {
+ hfs_warn("hfs_btree_extend: "
+ "can't build mapnode.\n");
+ hfs_bnode_relse(&head);
+ return;
+ }
+ ++new_mapnodes;
+ }
+ bn = bn->next;
+ }
+ hfs_bnode_relse(&head);
+
+ tree->bthNNodes = total_nodes;
+ tree->bthFree += (new_nodes - new_mapnodes);
+ tree->dirt = 1;
+
+ /* write the backup MDB, not returning until it is written */
+ hfs_mdb_commit(mdb, 1);
+
+ return;
+}
+
+/*
+ * hfs_bnode_free()
+ *
+ * Remove a node from the cache and mark it free in the bitmap.
+ */
+int hfs_bnode_free(struct hfs_bnode_ref *bnr)
+{
+ hfs_u32 node = bnr->bn->node;
+ struct hfs_btree *tree = bnr->bn->tree;
+
+ if (bnr->bn->count != 1) {
+ hfs_warn("hfs_bnode_free: count != 1.\n");
+ return -EIO;
+ }
+
+ hfs_bnode_relse(bnr);
+ hfs_bnode_bitop(tree, node, 0);
+ return 0;
+}
diff --git a/fs/hfs/bdelete.c b/fs/hfs/bdelete.c
new file mode 100644
index 000000000..0e47c2737
--- /dev/null
+++ b/fs/hfs/bdelete.c
@@ -0,0 +1,483 @@
+/*
+ * linux/fs/hfs/bdelete.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the code to delete records in a B-tree.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs_btree.h"
+
+/*================ Variable-like macros ================*/
+
+#define FULL (HFS_SECTOR_SIZE - sizeof(struct NodeDescriptor))
+#define NO_SPACE (HFS_SECTOR_SIZE+1)
+
+/*================ File-local functions ================*/
+
+/*
+ * bdelete_nonempty()
+ *
+ * Description:
+ * Deletes a record from a given bnode without regard to it becoming empty.
+ * Input Variable(s):
+ * struct hfs_brec* brec: pointer to the brec for the deletion
+ * struct hfs_belem* belem: which node in 'brec' to delete from
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'brec' points to a valid (struct hfs_brec).
+ * 'belem' points to a valid (struct hfs_belem) in 'brec'.
+ * Postconditions:
+ * The record has been inserted in the position indicated by 'brec'.
+ */
+static void bdelete_nonempty(struct hfs_brec *brec, struct hfs_belem *belem)
+{
+ int i, rec, nrecs, tomove;
+ hfs_u16 size;
+ hfs_u8 *start;
+ struct hfs_bnode *bnode = belem->bnr.bn;
+
+ rec = belem->record;
+ nrecs = bnode->ndNRecs;
+ size = bnode_rsize(bnode, rec);
+ tomove = bnode_offset(bnode, nrecs+1) - bnode_offset(bnode, rec+1);
+
+ /* adjust the record table */
+ for (i = rec+1; i <= nrecs; ++i) {
+ hfs_put_hs(bnode_offset(bnode,i+1) - size, RECTBL(bnode,i));
+ }
+
+ /* move it down */
+ start = bnode_key(bnode, rec);
+ memmove(start, start + size, tomove);
+
+ /* update record count */
+ --bnode->ndNRecs;
+}
+
+/*
+ * del_root()
+ *
+ * Description:
+ * Delete the current root bnode.
+ * Input Variable(s):
+ * struct hfs_bnode_ref *root: reference to the root bnode
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * int: 0 on success, error code on failure
+ * Preconditions:
+ * 'root' refers to the root bnode with HFS_LOCK_WRITE access.
+ * None of 'root's children are held with HFS_LOCK_WRITE access.
+ * Postconditions:
+ * The current 'root' node is removed from the tree and the depth
+ * of the tree is reduced by one.
+ * If 'root' is an index node with exactly one child, then that
+ * child becomes the new root of the tree.
+ * If 'root' is an empty leaf node the tree becomes empty.
+ * Upon return access to 'root' is relinquished.
+ */
+static int del_root(struct hfs_bnode_ref *root)
+{
+ struct hfs_btree *tree = root->bn->tree;
+ struct hfs_bnode_ref child;
+ hfs_u32 node;
+
+ if (root->bn->ndNRecs > 1) {
+ return 0;
+ } else if (root->bn->ndNRecs == 0) {
+ /* tree is empty */
+ tree->bthRoot = 0;
+ tree->root = NULL;
+ tree->bthRoot = 0;
+ tree->bthFNode = 0;
+ tree->bthLNode = 0;
+ --tree->bthDepth;
+ tree->dirt = 1;
+ if (tree->bthDepth) {
+ hfs_warn("hfs_bdelete: empty tree with bthDepth=%d\n",
+ tree->bthDepth);
+ goto bail;
+ }
+ return hfs_bnode_free(root);
+ } else if (root->bn->ndType == ndIndxNode) {
+ /* tree is non-empty */
+ node = hfs_get_hl(bkey_record(bnode_datastart(root->bn)));
+
+ child = hfs_bnode_find(tree, node, HFS_LOCK_READ);
+ if (!child.bn) {
+ hfs_warn("hfs_bdelete: can't read child node.\n");
+ goto bail;
+ }
+
+ child.bn->sticky = HFS_STICKY;
+ if (child.bn->next) {
+ child.bn->next->prev = child.bn->prev;
+ }
+ if (child.bn->prev) {
+ child.bn->prev->next = child.bn->next;
+ }
+ if (bhash(tree, child.bn->node) == child.bn) {
+ bhash(tree, child.bn->node) = child.bn->next;
+ }
+ child.bn->next = NULL;
+ child.bn->prev = NULL;
+
+ tree->bthRoot = child.bn->node;
+ tree->root = child.bn;
+ hfs_bnode_relse(&child);
+
+ tree->bthRoot = node;
+ tree->bthFNode = node;
+ tree->bthLNode = node;
+ --tree->bthDepth;
+ tree->dirt = 1;
+ if (!tree->bthDepth) {
+ hfs_warn("hfs_bdelete: non-empty tree with "
+ "bthDepth == 0\n");
+ goto bail;
+ }
+ return hfs_bnode_free(root); /* marks tree dirty */
+ }
+ hfs_bnode_relse(root);
+ return 0;
+
+bail:
+ hfs_bnode_relse(root);
+ return -EIO;
+}
+
+
+/*
+ * delete_empty_bnode()
+ *
+ * Description:
+ * Removes an empty non-root bnode from between 'left' and 'right'
+ * Input Variable(s):
+ * hfs_u32 left_node: node number of 'left' or zero if 'left' is invalid
+ * struct hfs_bnode_ref *left: reference to the left neighbor of the
+ * bnode to remove, or invalid if no such neighbor exists.
+ * struct hfs_bnode_ref *center: reference to the bnode to remove
+ * hfs_u32 right_node: node number of 'right' or zero if 'right' is invalid
+ * struct hfs_bnode_ref *right: reference to the right neighbor of the
+ * bnode to remove, or invalid if no such neighbor exists.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'left_node' is as described above.
+ * 'left' points to a valid (struct hfs_bnode_ref) having HFS_LOCK_WRITE
+ * access and referring to the left neighbor of 'center' if such a
+ * neighbor exists, or invalid if no such neighbor exists.
+ * 'center' points to a valid (struct hfs_bnode_ref) having HFS_LOCK_WRITE
+ * access and referring to the bnode to delete.
+ * 'right_node' is as described above.
+ * 'right' points to a valid (struct hfs_bnode_ref) having HFS_LOCK_WRITE
+ * access and referring to the right neighbor of 'center' if such a
+ * neighbor exists, or invalid if no such neighbor exists.
+ * Postconditions:
+ * If 'left' is valid its 'ndFLink' field becomes 'right_node'.
+ * If 'right' is valid its 'ndBLink' field becomes 'left_node'.
+ * If 'center' was the first leaf node then the tree's 'bthFNode'
+ * field becomes 'right_node'
+ * If 'center' was the last leaf node then the tree's 'bthLNode'
+ * field becomes 'left_node'
+ * 'center' is NOT freed and access to the nodes is NOT relinquished.
+ */
+static void delete_empty_bnode(hfs_u32 left_node, struct hfs_bnode_ref *left,
+ struct hfs_bnode_ref *center,
+ hfs_u32 right_node, struct hfs_bnode_ref *right)
+{
+ struct hfs_bnode *bnode = center->bn;
+
+ if (left_node) {
+ left->bn->ndFLink = right_node;
+ } else if (bnode->ndType == ndLeafNode) {
+ bnode->tree->bthFNode = right_node;
+ bnode->tree->dirt = 1;
+ }
+
+ if (right_node) {
+ right->bn->ndBLink = left_node;
+ } else if (bnode->ndType == ndLeafNode) {
+ bnode->tree->bthLNode = left_node;
+ bnode->tree->dirt = 1;
+ }
+}
+
+/*
+ * balance()
+ *
+ * Description:
+ * Attempt to equalize space usage in neighboring bnodes.
+ * Input Variable(s):
+ * struct hfs_bnode *left: the left bnode.
+ * struct hfs_bnode *right: the right bnode.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'left' and 'right' point to valid (struct hfs_bnode)s obtained
+ * with HFS_LOCK_WRITE access, and are neighbors.
+ * Postconditions:
+ * Records are shifted either left or right to make the space usage
+ * nearly equal. When exact equality is not possible the break
+ * point is chosen to reduce data movement.
+ * The key corresponding to 'right' in its parent is NOT updated.
+ */
+static void balance(struct hfs_bnode *left, struct hfs_bnode *right)
+{
+ int index, left_free, right_free, half;
+
+ left_free = bnode_freespace(left);
+ right_free = bnode_freespace(right);
+ half = (left_free + right_free)/2;
+
+ if (left_free < right_free) {
+ /* shift right to balance */
+ index = left->ndNRecs + 1;
+ while (right_free >= half) {
+ --index;
+ right_free -= bnode_rsize(left,index)+sizeof(hfs_u16);
+ }
+ if (index < left->ndNRecs) {
+#if defined(DEBUG_ALL) || defined(DEBUG_BALANCE)
+ hfs_warn("shifting %d of %d recs right to balance: ",
+ left->ndNRecs - index, left->ndNRecs);
+#endif
+ hfs_bnode_shift_right(left, right, index+1);
+#if defined(DEBUG_ALL) || defined(DEBUG_BALANCE)
+ hfs_warn("%d,%d\n", left->ndNRecs, right->ndNRecs);
+#endif
+ }
+ } else {
+ /* shift left to balance */
+ index = 0;
+ while (left_free >= half) {
+ ++index;
+ left_free -= bnode_rsize(right,index)+sizeof(hfs_u16);
+ }
+ if (index > 1) {
+#if defined(DEBUG_ALL) || defined(DEBUG_BALANCE)
+ hfs_warn("shifting %d of %d recs left to balance: ",
+ index-1, right->ndNRecs);
+#endif
+ hfs_bnode_shift_left(left, right, index-1);
+#if defined(DEBUG_ALL) || defined(DEBUG_BALANCE)
+ hfs_warn("%d,%d\n", left->ndNRecs, right->ndNRecs);
+#endif
+ }
+ }
+}
+
+/*
+ * bdelete()
+ *
+ * Delete the given record from a B-tree.
+ */
+static int bdelete(struct hfs_brec *brec)
+{
+ struct hfs_btree *tree = brec->tree;
+ struct hfs_belem *belem = brec->bottom;
+ struct hfs_belem *parent = (belem-1);
+ struct hfs_bnode *bnode;
+ hfs_u32 left_node, right_node;
+ struct hfs_bnode_ref left, right;
+ int left_space, right_space, min_space;
+ int fix_right_key;
+ int fix_key;
+
+ while ((belem > brec->top) &&
+ (belem->flags & (HFS_BPATH_UNDERFLOW | HFS_BPATH_FIRST))) {
+ bnode = belem->bnr.bn;
+ fix_key = belem->flags & HFS_BPATH_FIRST;
+ fix_right_key = 0;
+
+ bdelete_nonempty(brec, belem);
+
+ if (bnode->node == tree->root->node) {
+ del_root(&belem->bnr);
+ --brec->bottom;
+ goto done;
+ }
+
+ /* check for btree corruption which could lead to deadlock */
+ left_node = bnode->ndBLink;
+ right_node = bnode->ndFLink;
+ if ((left_node && hfs_bnode_in_brec(left_node, brec)) ||
+ (right_node && hfs_bnode_in_brec(right_node, brec)) ||
+ (left_node == right_node)) {
+ hfs_warn("hfs_bdelete: corrupt btree\n");
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+
+ /* grab the left neighbor if it exists */
+ if (left_node) {
+ hfs_bnode_lock(&belem->bnr, HFS_LOCK_RESRV);
+ left = hfs_bnode_find(tree,left_node,HFS_LOCK_WRITE);
+ if (!left.bn) {
+ hfs_warn("hfs_bdelete: unable to read left "
+ "neighbor.\n");
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+ hfs_bnode_lock(&belem->bnr, HFS_LOCK_WRITE);
+ if (parent->record != 1) {
+ left_space = bnode_freespace(left.bn);
+ } else {
+ left_space = NO_SPACE;
+ }
+ } else {
+ left.bn = NULL;
+ left_space = NO_SPACE;
+ }
+
+ /* grab the right neighbor if it exists */
+ if (right_node) {
+ right = hfs_bnode_find(tree,right_node,HFS_LOCK_WRITE);
+ if (!right.bn) {
+ hfs_warn("hfs_bdelete: unable to read right "
+ "neighbor.\n");
+ hfs_bnode_relse(&left);
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+ if (parent->record < parent->bnr.bn->ndNRecs) {
+ right_space = bnode_freespace(right.bn);
+ } else {
+ right_space = NO_SPACE;
+ }
+ } else {
+ right.bn = NULL;
+ right_space = NO_SPACE;
+ }
+
+ if (left_space < right_space) {
+ min_space = left_space;
+ } else {
+ min_space = right_space;
+ }
+
+ if (min_space == NO_SPACE) {
+ hfs_warn("hfs_bdelete: no siblings?\n");
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+
+ if (bnode->ndNRecs == 0) {
+ delete_empty_bnode(left_node, &left, &belem->bnr,
+ right_node, &right);
+ } else if (min_space + bnode_freespace(bnode) >= FULL) {
+ if ((right_space == NO_SPACE) ||
+ ((right_space == min_space) &&
+ (left_space != NO_SPACE))) {
+ hfs_bnode_shift_left(left.bn, bnode,
+ bnode->ndNRecs);
+ } else {
+ hfs_bnode_shift_right(bnode, right.bn, 1);
+ fix_right_key = 1;
+ }
+ delete_empty_bnode(left_node, &left, &belem->bnr,
+ right_node, &right);
+ } else if (min_space == right_space) {
+ balance(bnode, right.bn);
+ fix_right_key = 1;
+ } else {
+ balance(left.bn, bnode);
+ fix_key = 1;
+ }
+
+ if (fix_right_key) {
+ hfs_bnode_update_key(brec, belem, right.bn, 1);
+ }
+
+ hfs_bnode_relse(&left);
+ hfs_bnode_relse(&right);
+
+ if (bnode->ndNRecs) {
+ if (fix_key) {
+ hfs_bnode_update_key(brec, belem, bnode, 0);
+ }
+ goto done;
+ }
+
+ hfs_bnode_free(&belem->bnr);
+ --brec->bottom;
+ belem = parent;
+ --parent;
+ }
+
+ if (belem < brec->top) {
+ hfs_warn("hfs_bdelete: Missing parent.\n");
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+
+ bdelete_nonempty(brec, belem);
+
+done:
+ hfs_brec_relse(brec, NULL);
+ return 0;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_bdelete()
+ *
+ * Delete the requested record from a B-tree.
+ */
+int hfs_bdelete(struct hfs_btree *tree, const struct hfs_bkey *key)
+{
+ struct hfs_belem *belem;
+ struct hfs_bnode *bnode;
+ struct hfs_brec brec;
+ int retval;
+
+ if (!tree || (tree->magic != HFS_BTREE_MAGIC) || !key) {
+ hfs_warn("hfs_bdelete: invalid arguments.\n");
+ return -EINVAL;
+ }
+
+ retval = hfs_bfind(&brec, tree, key, HFS_BFIND_DELETE);
+ if (!retval) {
+ belem = brec.bottom;
+ bnode = belem->bnr.bn;
+
+ belem->flags = 0;
+ if ((bnode->ndNRecs * sizeof(hfs_u16) + bnode_end(bnode) -
+ bnode_rsize(bnode, belem->record)) < FULL/2) {
+ belem->flags |= HFS_BPATH_UNDERFLOW;
+ }
+ if (belem->record == 1) {
+ belem->flags |= HFS_BPATH_FIRST;
+ }
+
+ if (!belem->flags) {
+ hfs_brec_lock(&brec, brec.bottom);
+ } else {
+ hfs_brec_lock(&brec, NULL);
+ }
+
+ retval = bdelete(&brec);
+ if (!retval) {
+ --brec.tree->bthNRecs;
+ brec.tree->dirt = 1;
+ }
+ hfs_brec_relse(&brec, NULL);
+ }
+ return retval;
+}
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
new file mode 100644
index 000000000..d8d7e933d
--- /dev/null
+++ b/fs/hfs/bfind.c
@@ -0,0 +1,322 @@
+/*
+ * linux/fs/hfs/bfind.c
+ *
+ * Copyright (C) 1995, 1996 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the code to access records in a btree.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs_btree.h"
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_brec_relse()
+ *
+ * Description:
+ * This function releases some of the nodes associated with a brec.
+ * Input Variable(s):
+ * struct hfs_brec *brec: pointer to the brec to release some nodes from.
+ * struct hfs_belem *elem: the last node to release or NULL for all
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'brec' points to a "valid" (struct hfs_brec)
+ * Postconditions:
+ * All nodes between the indicated node and the beginning of the path
+ * are released.
+ */
+void hfs_brec_relse(struct hfs_brec *brec, struct hfs_belem *elem)
+{
+ if (!elem) {
+ elem = brec->bottom;
+ }
+
+ while (brec->top <= elem) {
+ hfs_bnode_relse(&brec->top->bnr);
+ ++brec->top;
+ }
+}
+
+/*
+ * hfs_bfind()
+ *
+ * Description:
+ * This function has sole responsibility for locating existing
+ * records in a B-tree. Given a B-tree and a key it locates the
+ * "greatest" record "less than or equal to" the given key. The
+ * exact behavior is determined by the bits of the flags variable as
+ * follows:
+ * ('flags' & HFS_LOCK_MASK):
+ * The lock_type argument to be used when calling hfs_bnode_find().
+ * HFS_BFIND_EXACT: only accept an exact match, otherwise take the
+ * "largest" record less than 'target' as a "match"
+ * HFS_BFIND_LOCK: request HFS_LOCK_WRITE access to the node containing
+ * the "matching" record when it is located
+ * HFS_BPATH_FIRST: keep access to internal nodes when accessing their
+ * first child.
+ * HFS_BPATH_OVERFLOW: keep access to internal nodes when the accessed
+ * child is too full to insert another pointer record.
+ * HFS_BPATH_UNDERFLOW: keep access to internal nodes when the accessed
+ * child is would be less than half full upon removing a pointer record.
+ * Input Variable(s):
+ * struct hfs_brec *brec: pointer to the (struct hfs_brec) to hold
+ * the search results.
+ * struct hfs_bkey *target: pointer to the (struct hfs_bkey)
+ * to search for
+ * int flags: bitwise OR of flags which determine the function's behavior
+ * Output Variable(s):
+ * 'brec' contains the results of the search on success or is invalid
+ * on failure.
+ * Returns:
+ * int: 0 or 1 on success or an error code on failure:
+ * -EINVAL: one of the input variables was NULL.
+ * -ENOENT: tree is valid but empty or no "matching" record was located.
+ * If the HFS_BFIND_EXACT bit of 'flags' is not set then the case of no
+ * matching record will give a 'brec' with a 'record' field of zero
+ * rather than returning this error.
+ * -EIO: an I/O operation or an assertion about the structure of a
+ * valid B-tree failed indicating corruption of either the B-tree
+ * structure on the disk or one of the in-core structures representing
+ * the B-tree.
+ * (This could also be returned if a kmalloc() call failed in a
+ * subordinate routine that is intended to get the data from the
+ * disk or the buffer cache.)
+ * Preconditions:
+ * 'brec' is NULL or points to a (struct hfs_brec) with a 'tree' field
+ * which points to a valid (struct hfs_btree).
+ * 'target' is NULL or points to a "valid" (struct hfs_bkey)
+ * Postconditions:
+ * If 'brec', 'brec->tree' or 'target' is NULL then -EINVAL is returned.
+ * If 'brec', 'brec->tree' and 'target' are non-NULL but the tree
+ * is empty then -ENOENT is returned.
+ * If 'brec', 'brec->tree' and 'target' are non-NULL but the call to
+ * hfs_brec_init() fails then '*brec' is NULL and -EIO is returned.
+ * If 'brec', 'brec->tree' and 'target' are non-NULL and the tree is
+ * non-empty then the tree is searched as follows:
+ * If any call to hfs_brec_next() fails or returns a node that is
+ * neither an index node nor a leaf node then -EIO is returned to
+ * indicate that the B-tree or buffer-cache are corrupted.
+ * If every record in the tree is "greater than" the given key
+ * and the HFS_BFIND_EXACT bit of 'flags' is set then -ENOENT is returned.
+ * If every record in the tree is "greater than" the given key
+ * and the HFS_BFIND_EXACT bit of 'flags' is clear then 'brec' refers
+ * to the first leaf node in the tree and has a 'record' field of
+ * zero, and 1 is returned.
+ * If a "matching" record is located with key "equal to" 'target'
+ * then the return value is 0 and 'brec' indicates the record.
+ * If a "matching" record is located with key "greater than" 'target'
+ * then the behavior is determined as follows:
+ * If the HFS_BFIND_EXACT bit of 'flags' is not set then 1 is returned
+ * and 'brec' refers to the "matching" record.
+ * If the HFS_BFIND_EXACT bit of 'flags' is set then -ENOENT is returned.
+ * If the return value is non-negative and the HFS_BFIND_LOCK bit of
+ * 'flags' is set then hfs_brec_lock() is called on the bottom element
+ * of 'brec' before returning.
+ */
+int hfs_bfind(struct hfs_brec *brec, struct hfs_btree *tree,
+ const struct hfs_bkey *target, int flags)
+{
+ struct hfs_belem *curr;
+ struct hfs_bkey *key;
+ struct hfs_bnode *bn;
+ int result, ntype;
+
+ /* check for invalid arguments */
+ if (!brec || (tree->magic != HFS_BTREE_MAGIC) || !target) {
+ return -EINVAL;
+ }
+
+ /* check for empty tree */
+ if (!tree->root || !tree->bthNRecs) {
+ return -ENOENT;
+ }
+
+ /* start search at root of tree */
+ if (!(curr = hfs_brec_init(brec, tree, flags))) {
+ return -EIO;
+ }
+
+ /* traverse the tree */
+ do {
+ bn = curr->bnr.bn;
+
+ if (!curr->record) {
+ hfs_warn("hfs_bfind: empty bnode\n");
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+
+ /* reverse linear search yielding largest key "less
+ than or equal to" 'target'.
+ It is questionable whether a binary search would be
+ significantly faster */
+ do {
+ key = belem_key(curr);
+ if (!key->KeyLen) {
+ hfs_warn("hfs_bfind: empty key\n");
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+ result = (tree->compare)(target, key);
+ } while ((result<0) && (--curr->record));
+
+ ntype = bn->ndType;
+
+ /* see if all keys > target */
+ if (!curr->record) {
+ if (bn->ndBLink) {
+ /* at a node other than the left-most at a
+ given level it means the parent had an
+ incorrect key for this child */
+ hfs_brec_relse(brec, NULL);
+ hfs_warn("hfs_bfind: corrupted b-tree %d.\n",
+ (int)ntohl(tree->entry.cnid));
+ return -EIO;
+ }
+ if (flags & HFS_BFIND_EXACT) {
+ /* we're not going to find it */
+ hfs_brec_relse(brec, NULL);
+ return -ENOENT;
+ }
+ if (ntype == ndIndxNode) {
+ /* since we are at the left-most node at
+ the current level and looking for the
+ predecessor of 'target' keep going down */
+ curr->record = 1;
+ } else {
+ /* we're at first leaf so fall through */
+ }
+ }
+
+ /* get next node if necessary */
+ if ((ntype == ndIndxNode) && !(curr = hfs_brec_next(brec))) {
+ return -EIO;
+ }
+ } while (ntype == ndIndxNode);
+
+ if (key->KeyLen > tree->bthKeyLen) {
+ hfs_warn("hfs_bfind: oversized key\n");
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+
+ if (ntype != ndLeafNode) {
+ hfs_warn("hfs_bfind: invalid node type %02x in node %d of "
+ "btree %d\n", bn->ndType, bn->node,
+ (int)ntohl(tree->entry.cnid));
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+
+ if ((flags & HFS_BFIND_EXACT) && result) {
+ hfs_brec_relse(brec, NULL);
+ return -ENOENT;
+ }
+
+ if (!(flags & HFS_BPATH_MASK)) {
+ hfs_brec_relse(brec, brec->bottom-1);
+ }
+
+ if (flags & HFS_BFIND_LOCK) {
+ hfs_brec_lock(brec, brec->bottom);
+ }
+
+ brec->key = brec_key(brec);
+ brec->data = bkey_record(brec->key);
+
+ return result ? 1 : 0;
+}
+
+/*
+ * hfs_bsucc()
+ *
+ * Description:
+ * This function overwrites '*brec' with its successor in the B-tree,
+ * obtaining the same type of access.
+ * Input Variable(s):
+ * struct hfs_brec *brec: address of the (struct hfs_brec) to overwrite
+ * with its successor
+ * Output Variable(s):
+ * struct hfs_brec *brec: address of the successor of the original
+ * '*brec' or to invalid data
+ * Returns:
+ * int: 0 on success, or one of -EINVAL, -EIO, or -EINVAL on failure
+ * Preconditions:
+ * 'brec' pointers to a "valid" (struct hfs_brec)
+ * Postconditions:
+ * If the given '*brec' is not "valid" -EINVAL is returned and
+ * '*brec' is unchanged.
+ * If the given 'brec' is "valid" but has no successor then -ENOENT
+ * is returned and '*brec' is invalid.
+ * If a call to hfs_bnode_find() is necessary to find the successor,
+ * but fails then -EIO is returned and '*brec' is invalid.
+ * If none of the three previous conditions prevents finding the
+ * successor of '*brec', then 0 is returned, and '*brec' is overwritten
+ * with the (struct hfs_brec) for its successor.
+ * In the cases when '*brec' is invalid, the old records is freed.
+ */
+int hfs_bsucc(struct hfs_brec *brec, int count)
+{
+ struct hfs_belem *belem;
+ struct hfs_bnode *bn;
+
+ if (!brec || !(belem = brec->bottom) || (belem != brec->top) ||
+ !(bn = belem->bnr.bn) || (bn->magic != HFS_BNODE_MAGIC) ||
+ !bn->tree || (bn->tree->magic != HFS_BTREE_MAGIC) ||
+ !hfs_buffer_ok(bn->buf)) {
+ hfs_warn("hfs_bsucc: invalid/corrupt arguments.\n");
+ return -EINVAL;
+ }
+
+ while (count) {
+ int left = bn->ndNRecs - belem->record;
+
+ if (left < count) {
+ struct hfs_bnode_ref old;
+ hfs_u32 node;
+
+ /* Advance to next node */
+ if (!(node = bn->ndFLink)) {
+ hfs_brec_relse(brec, belem);
+ return -ENOENT;
+ }
+ if (node == bn->node) {
+ hfs_warn("hfs_bsucc: corrupt btree\n");
+ hfs_brec_relse(brec, belem);
+ return -EIO;
+ }
+ old = belem->bnr;
+ belem->bnr = hfs_bnode_find(brec->tree, node,
+ belem->bnr.lock_type);
+ hfs_bnode_relse(&old);
+ if (!(bn = belem->bnr.bn)) {
+ return -EIO;
+ }
+ belem->record = 1;
+ count -= (left + 1);
+ } else {
+ belem->record += count;
+ break;
+ }
+ }
+ brec->key = belem_key(belem);
+ brec->data = bkey_record(brec->key);
+
+ if (brec->key->KeyLen > brec->tree->bthKeyLen) {
+ hfs_warn("hfs_bsucc: oversized key\n");
+ hfs_brec_relse(brec, NULL);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/fs/hfs/bins_del.c b/fs/hfs/bins_del.c
new file mode 100644
index 000000000..4a08a39d1
--- /dev/null
+++ b/fs/hfs/bins_del.c
@@ -0,0 +1,231 @@
+/*
+ * linux/fs/hfs/bins_del.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the code common to inserting and deleting records
+ * in a B-tree.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs_btree.h"
+
+/*================ File-local functions ================*/
+
+/*
+ * hfs_bnode_update_key()
+ *
+ * Description:
+ * Updates the key for a bnode in its parent.
+ * The key change is propagated up the tree as necessary.
+ * Input Variable(s):
+ * struct hfs_brec *brec: the search path to update keys in
+ * struct hfs_belem *belem: the search path element with the changed key
+ * struct hfs_bnode *bnode: the bnode with the changed key
+ * int offset: the "distance" from 'belem->bn' to 'bnode':
+ * 0 if the change is in 'belem->bn',
+ * 1 if the change is in its right sibling, etc.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'brec' points to a valid (struct hfs_brec)
+ * 'belem' points to a valid (struct hfs_belem) in 'brec'.
+ * 'bnode' points to a valid (struct hfs_bnode) which is non-empty
+ * and is 'belem->bn' or one of its siblings.
+ * 'offset' is as described above.
+ * Postconditions:
+ * The key change is propagated up the tree as necessary.
+ */
+void hfs_bnode_update_key(struct hfs_brec *brec, struct hfs_belem *belem,
+ struct hfs_bnode *bnode, int offset)
+{
+ int record = (--belem)->record + offset;
+ void *key = bnode_datastart(bnode) + 1;
+ int keysize = brec->tree->bthKeyLen;
+ struct hfs_belem *limit;
+
+ memcpy(1+bnode_key(belem->bnr.bn, record), key, keysize);
+
+ /* don't trash the header */
+ if (brec->top > &brec->elem[1]) {
+ limit = brec->top;
+ } else {
+ limit = &brec->elem[1];
+ }
+
+ while ((belem > limit) && (record == 1)) {
+ record = (--belem)->record;
+ memcpy(1+belem_key(belem), key, keysize);
+ }
+}
+
+/*
+ * hfs_bnode_shift_right()
+ *
+ * Description:
+ * Shifts some records from a node to its right neighbor.
+ * Input Variable(s):
+ * struct hfs_bnode* left: the node to shift records from
+ * struct hfs_bnode* right: the node to shift records to
+ * hfs_u16 first: the number of the first record in 'left' to move to 'right'
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'left' and 'right' point to valid (struct hfs_bnode)s.
+ * 'left' contains at least 'first' records.
+ * 'right' has enough free space to hold the records to be moved from 'left'
+ * Postconditions:
+ * The record numbered 'first' and all records after it in 'left' are
+ * placed at the beginning of 'right'.
+ * The key corresponding to 'right' in its parent is NOT updated.
+ */
+void hfs_bnode_shift_right(struct hfs_bnode *left, struct hfs_bnode *right,
+ int first)
+{
+ int i, adjust, nrecs;
+ unsigned size;
+ hfs_u16 *to, *from;
+
+ if ((first <= 0) || (first > left->ndNRecs)) {
+ hfs_warn("bad argument to shift_right: first=%d, nrecs=%d\n",
+ first, left->ndNRecs);
+ return;
+ }
+
+ /* initialize variables */
+ nrecs = left->ndNRecs + 1 - first;
+ size = bnode_end(left) - bnode_offset(left, first);
+
+ /* move (possibly empty) contents of right node forward */
+ memmove(bnode_datastart(right) + size,
+ bnode_datastart(right),
+ bnode_end(right) - sizeof(struct NodeDescriptor));
+
+ /* copy in new records */
+ memcpy(bnode_datastart(right), bnode_key(left,first), size);
+
+ /* fix up offsets in right node */
+ i = right->ndNRecs + 1;
+ from = RECTBL(right, i);
+ to = from - nrecs;
+ while (i--) {
+ hfs_put_hs(hfs_get_hs(from++) + size, to++);
+ }
+ adjust = sizeof(struct NodeDescriptor) - bnode_offset(left, first);
+ i = nrecs-1;
+ from = RECTBL(left, first+i);
+ while (i--) {
+ hfs_put_hs(hfs_get_hs(from++) + adjust, to++);
+ }
+
+ /* fix record counts */
+ left->ndNRecs -= nrecs;
+ right->ndNRecs += nrecs;
+}
+
+/*
+ * hfs_bnode_shift_left()
+ *
+ * Description:
+ * Shifts some records from a node to its left neighbor.
+ * Input Variable(s):
+ * struct hfs_bnode* left: the node to shift records to
+ * struct hfs_bnode* right: the node to shift records from
+ * hfs_u16 last: the number of the last record in 'right' to move to 'left'
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'left' and 'right' point to valid (struct hfs_bnode)s.
+ * 'right' contains at least 'last' records.
+ * 'left' has enough free space to hold the records to be moved from 'right'
+ * Postconditions:
+ * The record numbered 'last' and all records before it in 'right' are
+ * placed at the end of 'left'.
+ * The key corresponding to 'right' in its parent is NOT updated.
+ */
+void hfs_bnode_shift_left(struct hfs_bnode *left, struct hfs_bnode *right,
+ int last)
+{
+ int i, adjust, nrecs;
+ unsigned size;
+ hfs_u16 *to, *from;
+
+ if ((last <= 0) || (last > right->ndNRecs)) {
+ hfs_warn("bad argument to shift_left: last=%d, nrecs=%d\n",
+ last, right->ndNRecs);
+ return;
+ }
+
+ /* initialize variables */
+ size = bnode_offset(right, last + 1) - sizeof(struct NodeDescriptor);
+
+ /* copy records to left node */
+ memcpy(bnode_dataend(left), bnode_datastart(right), size);
+
+ /* move (possibly empty) remainder of right node backward */
+ memmove(bnode_datastart(right), bnode_datastart(right) + size,
+ bnode_end(right) - bnode_offset(right, last + 1));
+
+ /* fix up offsets */
+ nrecs = left->ndNRecs;
+ i = last;
+ from = RECTBL(right, 2);
+ to = RECTBL(left, nrecs + 2);
+ adjust = bnode_offset(left, nrecs + 1) - sizeof(struct NodeDescriptor);
+ while (i--) {
+ hfs_put_hs(hfs_get_hs(from--) + adjust, to--);
+ }
+ i = right->ndNRecs + 1 - last;
+ ++from;
+ to = RECTBL(right, 1);
+ while (i--) {
+ hfs_put_hs(hfs_get_hs(from--) - size, to--);
+ }
+
+ /* fix record counts */
+ left->ndNRecs += last;
+ right->ndNRecs -= last;
+}
+
+/*
+ * hfs_bnode_in_brec()
+ *
+ * Description:
+ * Determines whethet a given bnode is part of a given brec.
+ * This is used to avoid deadlock in the case of a corrupted b-tree.
+ * Input Variable(s):
+ * hfs_u32 node: the number of the node to check for.
+ * struct hfs_brec* brec: the brec to check in.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * int: 1 it found, 0 if not
+ * Preconditions:
+ * 'brec' points to a valid struct hfs_brec.
+ * Postconditions:
+ * 'brec' is unchanged.
+ */
+int hfs_bnode_in_brec(hfs_u32 node, const struct hfs_brec *brec)
+{
+ const struct hfs_belem *belem = brec->bottom;
+
+ while (belem && (belem >= brec->top)) {
+ if (belem->bnr.bn && (belem->bnr.bn->node == node)) {
+ return 1;
+ }
+ --belem;
+ }
+ return 0;
+}
diff --git a/fs/hfs/binsert.c b/fs/hfs/binsert.c
new file mode 100644
index 000000000..daab8c22d
--- /dev/null
+++ b/fs/hfs/binsert.c
@@ -0,0 +1,541 @@
+/*
+ * linux/fs/hfs/binsert.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the code to insert records in a B-tree.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs_btree.h"
+
+/*================ File-local functions ================*/
+
+/*
+ * binsert_nonfull()
+ *
+ * Description:
+ * Inserts a record in a given bnode known to have sufficient space.
+ * Input Variable(s):
+ * struct hfs_brec* brec: pointer to the brec for the insertion
+ * struct hfs_belem* belem: the element in the search path to insert in
+ * struct hfs_bkey* key: pointer to the key for the record to insert
+ * void* data: pointer to the record to insert
+ * hfs_u16 keysize: size of the key to insert
+ * hfs_u16 datasize: size of the record to insert
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * NONE
+ * Preconditions:
+ * 'brec' points to a valid (struct hfs_brec).
+ * 'belem' points to a valid (struct hfs_belem) in 'brec', the node
+ * of which has enough free space to insert 'key' and 'data'.
+ * 'key' is a pointer to a valid (struct hfs_bkey) of length 'keysize'
+ * which, in sorted order, belongs at the location indicated by 'brec'.
+ * 'data' is non-NULL an points to appropriate data of length 'datasize'
+ * Postconditions:
+ * The record has been inserted in the position indicated by 'brec'.
+ */
+static void binsert_nonfull(struct hfs_brec *brec, struct hfs_belem *belem,
+ const struct hfs_bkey *key, const void *data,
+ hfs_u8 keysize, hfs_u16 datasize)
+{
+ int i, rec, nrecs, size, tomove;
+ hfs_u8 *start;
+ struct hfs_bnode *bnode = belem->bnr.bn;
+
+ rec = ++(belem->record);
+ size = ROUND(keysize+1) + datasize;
+ nrecs = bnode->ndNRecs + 1;
+ tomove = bnode_offset(bnode, nrecs) - bnode_offset(bnode, rec);
+
+ /* adjust the record table */
+ for (i = nrecs; i >= rec; --i) {
+ hfs_put_hs(bnode_offset(bnode,i) + size, RECTBL(bnode,i+1));
+ }
+
+ /* make room */
+ start = bnode_key(bnode, rec);
+ memmove(start + size, start, tomove);
+
+ /* copy in the key and the data*/
+ *start = keysize;
+ keysize = ROUND(keysize+1);
+ memcpy(start + 1, (hfs_u8 *)key + 1, keysize-1);
+ memcpy(start + keysize, data, datasize);
+
+ /* update record count */
+ ++bnode->ndNRecs;
+}
+
+/*
+ * add_root()
+ *
+ * Description:
+ * Adds a new root to a B*-tree, increasing its height.
+ * Input Variable(s):
+ * struct hfs_btree *tree: the tree to add a new root to
+ * struct hfs_bnode *left: the new root's first child or NULL
+ * struct hfs_bnode *right: the new root's second child or NULL
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'tree' points to a valid (struct hfs_btree).
+ * 'left' and 'right' point to valid (struct hfs_bnode)s, which
+ * resulted from splitting the old root node, or are both NULL
+ * if there was no root node before.
+ * Postconditions:
+ * Upon success a new root node is added to 'tree' with either
+ * two children ('left' and 'right') or none.
+ */
+static void add_root(struct hfs_btree *tree,
+ struct hfs_bnode *left,
+ struct hfs_bnode *right)
+{
+ struct hfs_bnode_ref bnr;
+ struct hfs_bnode *root;
+ struct hfs_bkey *key;
+ int keylen = tree->bthKeyLen;
+
+ if (left && !right) {
+ hfs_warn("add_root: LEFT but no RIGHT\n");
+ return;
+ }
+
+ bnr = hfs_bnode_alloc(tree);
+ if (!(root = bnr.bn)) {
+ return;
+ }
+
+ root->sticky = HFS_STICKY;
+ tree->root = root;
+ tree->bthRoot = root->node;
+ ++tree->bthDepth;
+
+ root->ndNHeight = tree->bthDepth;
+ root->ndFLink = 0;
+ root->ndBLink = 0;
+
+ if (!left) {
+ /* tree was empty */
+ root->ndType = ndLeafNode;
+ root->ndNRecs = 0;
+
+ tree->bthFNode = root->node;
+ tree->bthLNode = root->node;
+ } else {
+ root->ndType = ndIndxNode;
+ root->ndNRecs = 2;
+
+ hfs_put_hs(sizeof(struct NodeDescriptor) + ROUND(1+keylen) +
+ sizeof(hfs_u32), RECTBL(root, 2));
+ key = bnode_key(root, 1);
+ key->KeyLen = keylen;
+ memcpy(key->value,
+ ((struct hfs_bkey *)bnode_key(left, 1))->value, keylen);
+ hfs_put_hl(left->node, bkey_record(key));
+
+ hfs_put_hs(sizeof(struct NodeDescriptor) + 2*ROUND(1+keylen) +
+ 2*sizeof(hfs_u32), RECTBL(root, 3));
+ key = bnode_key(root, 2);
+ key->KeyLen = keylen;
+ memcpy(key->value,
+ ((struct hfs_bkey *)bnode_key(right, 1))->value, keylen);
+ hfs_put_hl(right->node, bkey_record(key));
+
+ /* the former root (left) is now just a normal node */
+ left->sticky = HFS_NOT_STICKY;
+ if ((left->next = bhash(tree, left->node))) {
+ left->next->prev = left;
+ }
+ bhash(tree, left->node) = left;
+ }
+ hfs_bnode_relse(&bnr);
+ tree->dirt = 1;
+}
+
+/*
+ * insert_empty_bnode()
+ *
+ * Description:
+ * Adds an empty node to the right of 'left'.
+ * Input Variable(s):
+ * struct hfs_btree *tree: the tree to add a node to
+ * struct hfs_bnode *left: the node to add a node after
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * struct hfs_bnode_ref *: reference to the new bnode.
+ * Preconditions:
+ * 'tree' points to a valid (struct hfs_btree) with at least 1 free node.
+ * 'left' points to a valid (struct hfs_bnode) belonging to 'tree'.
+ * Postconditions:
+ * If NULL is returned then 'tree' and 'left' are unchanged.
+ * Otherwise a node with 0 records is inserted in the tree to the right
+ * of the node 'left'. The 'ndFLink' of 'left' and the 'ndBLink' of
+ * the former right-neighbor of 'left' (if one existed) point to the
+ * new node. If 'left' had no right neighbor and is a leaf node the
+ * the 'bthLNode' of 'tree' points to the new node. The free-count and
+ * bitmap for 'tree' are kept current by hfs_bnode_alloc() which supplies
+ * the required node.
+ */
+static struct hfs_bnode_ref insert_empty_bnode(struct hfs_btree *tree,
+ struct hfs_bnode *left)
+{
+ struct hfs_bnode_ref retval;
+ struct hfs_bnode_ref right;
+
+ retval = hfs_bnode_alloc(tree);
+ if (!retval.bn) {
+ hfs_warn("hfs_binsert: out of bnodes?.\n");
+ goto done;
+ }
+ retval.bn->sticky = HFS_NOT_STICKY;
+ if ((retval.bn->next = bhash(tree, retval.bn->node))) {
+ retval.bn->next->prev = retval.bn;
+ }
+ bhash(tree, retval.bn->node) = retval.bn;
+
+ if (left->ndFLink) {
+ right = hfs_bnode_find(tree, left->ndFLink, HFS_LOCK_WRITE);
+ if (!right.bn) {
+ hfs_warn("hfs_binsert: corrupt btree.\n");
+ hfs_bnode_bitop(tree, retval.bn->node, 0);
+ hfs_bnode_relse(&retval);
+ goto done;
+ }
+ right.bn->ndBLink = retval.bn->node;
+ hfs_bnode_relse(&right);
+ } else if (left->ndType == ndLeafNode) {
+ tree->bthLNode = retval.bn->node;
+ tree->dirt = 1;
+ }
+
+ retval.bn->ndFLink = left->ndFLink;
+ retval.bn->ndBLink = left->node;
+ retval.bn->ndType = left->ndType;
+ retval.bn->ndNHeight = left->ndNHeight;
+ retval.bn->ndNRecs = 0;
+
+ left->ndFLink = retval.bn->node;
+
+ done:
+ return retval;
+}
+
+/*
+ * split()
+ *
+ * Description:
+ * Splits an over full node during insertion.
+ * Picks the split point that results in the most-nearly equal
+ * space usage in the new and old nodes.
+ * Input Variable(s):
+ * struct hfs_belem *elem: the over full node.
+ * int size: the number of bytes to be used by the new record and its key.
+ * Output Variable(s):
+ * struct hfs_belem *elem: changed to indicate where the new record
+ * should be inserted.
+ * Returns:
+ * struct hfs_bnode_ref: reference to the new bnode.
+ * Preconditions:
+ * 'elem' points to a valid path element corresponding to the over full node.
+ * 'size' is positive.
+ * Postconditions:
+ * The records in the node corresponding to 'elem' are redistributed across
+ * the old and new nodes so that after inserting the new record, the space
+ * usage in these two nodes is as equal as possible.
+ * 'elem' is updated so that a call to binsert_nonfull() will insert the
+ * new record in the correct location.
+ */
+static inline struct hfs_bnode_ref split(struct hfs_belem *elem, int size)
+{
+ struct hfs_bnode *bnode = elem->bnr.bn;
+ int nrecs, cutoff, index, tmp, used, in_right;
+ struct hfs_bnode_ref right;
+
+ right = insert_empty_bnode(bnode->tree, bnode);
+ if (right.bn) {
+ nrecs = bnode->ndNRecs;
+ cutoff = (size + bnode_end(bnode) -
+ sizeof(struct NodeDescriptor) +
+ (nrecs+1)*sizeof(hfs_u16))/2;
+ used = 0;
+ in_right = 1;
+ /* note that this only works because records sizes are even */
+ for (index=1; index <= elem->record; ++index) {
+ tmp = (sizeof(hfs_u16) + bnode_rsize(bnode, index))/2;
+ used += tmp;
+ if (used > cutoff) {
+ goto found;
+ }
+ used += tmp;
+ }
+ tmp = (size + sizeof(hfs_u16))/2;
+ used += tmp;
+ if (used > cutoff) {
+ goto found;
+ }
+ in_right = 0;
+ used += tmp;
+ for (; index <= nrecs; ++index) {
+ tmp = (sizeof(hfs_u16) + bnode_rsize(bnode, index))/2;
+ used += tmp;
+ if (used > cutoff) {
+ goto found;
+ }
+ used += tmp;
+ }
+ /* couldn't find the split point! */
+ hfs_bnode_relse(&right);
+ }
+ return right;
+
+found:
+ if (in_right) {
+ elem->bnr = right;
+ elem->record -= index-1;
+ }
+ hfs_bnode_shift_right(bnode, right.bn, index);
+
+ return right;
+}
+
+/*
+ * binsert()
+ *
+ * Description:
+ * Inserts a record in a tree known to have enough room, even if the
+ * insertion requires the splitting of nodes.
+ * Input Variable(s):
+ * struct hfs_brec *brec: partial path to the node to insert in
+ * const struct hfs_bkey *key: key for the new record
+ * const void *data: data for the new record
+ * hfs_u8 keysize: size of the key
+ * hfs_u16 datasize: size of the data
+ * int reserve: number of nodes reserved in case of splits
+ * Output Variable(s):
+ * *brec = NULL
+ * Returns:
+ * int: 0 on success, error code on failure
+ * Preconditions:
+ * 'brec' points to a valid (struct hfs_brec) corresponding to a
+ * record in a leaf node, after which a record is to be inserted,
+ * or to "record 0" of the leaf node if the record is to be inserted
+ * before all existing records in the node. The (struct hfs_brec)
+ * includes all ancestors of the leaf node that are needed to
+ * complete the insertion including the parents of any nodes that
+ * will be split.
+ * 'key' points to a valid (struct hfs_bkey) which is appropriate
+ * to this tree, and which belongs at the insertion point.
+ * 'data' points data appropriate for the indicated node.
+ * 'keysize' gives the size in bytes of the key.
+ * 'datasize' gives the size in bytes of the data.
+ * 'reserve' gives the number of nodes that have been reserved in the
+ * tree to allow for splitting of nodes.
+ * Postconditions:
+ * All 'reserve'd nodes have been either used or released.
+ * *brec = NULL
+ * On success the key and data have been inserted at the indicated
+ * location in the tree, all appropriate fields of the in-core data
+ * structures have been changed and updated versions of the on-disk
+ * data structures have been scheduled for write-back to disk.
+ * On failure the B*-tree is probably invalid both on disk and in-core.
+ *
+ * XXX: Some attempt at repair might be made in the event of failure,
+ * or the fs should be remounted read-only so things don't get worse.
+ */
+static int binsert(struct hfs_brec *brec, const struct hfs_bkey *key,
+ const void *data, hfs_u8 keysize, hfs_u16 datasize,
+ int reserve)
+{
+ struct hfs_bnode_ref left, right, other;
+ struct hfs_btree *tree = brec->tree;
+ struct hfs_belem *belem = brec->bottom;
+ int tmpsize = 1 + tree->bthKeyLen;
+ struct hfs_bkey *tmpkey = hfs_malloc(tmpsize);
+ hfs_u32 node;
+
+ while ((belem >= brec->top) && (belem->flags & HFS_BPATH_OVERFLOW)) {
+ left = belem->bnr;
+ if (left.bn->ndFLink &&
+ hfs_bnode_in_brec(left.bn->ndFLink, brec)) {
+ hfs_warn("hfs_binsert: corrupt btree\n");
+ tree->reserved -= reserve;
+ hfs_free(tmpkey, tmpsize);
+ return -EIO;
+ }
+
+ right = split(belem, ROUND(keysize+1) + ROUND(datasize));
+ --reserve;
+ --tree->reserved;
+ if (!right.bn) {
+ hfs_warn("hfs_binsert: unable to split node!\n");
+ tree->reserved -= reserve;
+ hfs_free(tmpkey, tmpsize);
+ return -ENOSPC;
+ }
+ binsert_nonfull(brec, belem, key, data, keysize, datasize);
+
+ if (belem->bnr.bn == left.bn) {
+ other = right;
+ if (belem->record == 1) {
+ hfs_bnode_update_key(brec, belem, left.bn, 0);
+ }
+ } else {
+ other = left;
+ }
+
+ if (left.bn->node == tree->root->node) {
+ add_root(tree, left.bn, right.bn);
+ hfs_bnode_relse(&other);
+ goto done;
+ }
+
+ data = &node;
+ datasize = sizeof(node);
+ node = htonl(right.bn->node);
+ key = tmpkey;
+ keysize = tree->bthKeyLen;
+ memcpy(tmpkey, bnode_key(right.bn, 1), keysize+1);
+ hfs_bnode_relse(&other);
+
+ --belem;
+ }
+
+ if (belem < brec->top) {
+ hfs_warn("hfs_binsert: Missing parent.\n");
+ tree->reserved -= reserve;
+ hfs_free(tmpkey, tmpsize);
+ return -EIO;
+ }
+
+ binsert_nonfull(brec, belem, key, data, keysize, datasize);
+
+done:
+ tree->reserved -= reserve;
+ hfs_free(tmpkey, tmpsize);
+ return 0;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_binsert()
+ *
+ * Description:
+ * This function inserts a new record into a b-tree.
+ * Input Variable(s):
+ * struct hfs_btree *tree: pointer to the (struct hfs_btree) to insert in
+ * struct hfs_bkey *key: pointer to the (struct hfs_bkey) to insert
+ * void *data: pointer to the data to associate with 'key' in the b-tree
+ * unsigned int datasize: the size of the data
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * int: 0 on success, error code on failure
+ * Preconditions:
+ * 'tree' points to a valid (struct hfs_btree)
+ * 'key' points to a valid (struct hfs_bkey)
+ * 'data' points to valid memory of length 'datasize'
+ * Postconditions:
+ * If zero is returned then the record has been inserted in the
+ * indicated location updating all in-core data structures and
+ * scheduling all on-disk data structures for write-back.
+ */
+int hfs_binsert(struct hfs_btree *tree, const struct hfs_bkey *key,
+ const void *data, hfs_u16 datasize)
+{
+ struct hfs_brec brec;
+ struct hfs_belem *belem;
+ int err, reserve, retval;
+ hfs_u8 keysize;
+
+ if (!tree || (tree->magic != HFS_BTREE_MAGIC) || !key || !data) {
+ hfs_warn("hfs_binsert: invalid arguments.\n");
+ return -EINVAL;
+ }
+
+ if (key->KeyLen > tree->bthKeyLen) {
+ hfs_warn("hfs_binsert: oversized key\n");
+ return -EINVAL;
+ }
+
+restart:
+ if (!tree->bthNRecs) {
+ /* create the root bnode */
+ add_root(tree, NULL, NULL);
+ if (!hfs_brec_init(&brec, tree, HFS_BFIND_INSERT)) {
+ hfs_warn("hfs_binsert: failed to create root.\n");
+ return -ENOSPC;
+ }
+ } else {
+ err = hfs_bfind(&brec, tree, key, HFS_BFIND_INSERT);
+ if (err < 0) {
+ hfs_warn("hfs_binsert: hfs_brec_find failed.\n");
+ return err;
+ } else if (err == 0) {
+ hfs_brec_relse(&brec, NULL);
+ return -EEXIST;
+ }
+ }
+
+ keysize = key->KeyLen;
+ datasize = ROUND(datasize);
+ belem = brec.bottom;
+ belem->flags = 0;
+ if (bnode_freespace(belem->bnr.bn) <
+ (sizeof(hfs_u16) + ROUND(keysize+1) + datasize)) {
+ belem->flags |= HFS_BPATH_OVERFLOW;
+ }
+ if (belem->record == 0) {
+ belem->flags |= HFS_BPATH_FIRST;
+ }
+
+ if (!belem->flags) {
+ hfs_brec_lock(&brec, brec.bottom);
+ reserve = 0;
+ } else {
+ reserve = brec.bottom - brec.top;
+ if (brec.top == 0) {
+ ++reserve;
+ }
+ /* make certain we have enough nodes to proceed */
+ if ((tree->bthFree - tree->reserved) < reserve) {
+ hfs_brec_relse(&brec, NULL);
+ while (tree->lock) {
+ hfs_sleep_on(&tree->wait);
+ }
+ tree->lock = 1;
+ if ((tree->bthFree - tree->reserved) < reserve) {
+ hfs_btree_extend(tree);
+ }
+ tree->lock = 0;
+ hfs_wake_up(&tree->wait);
+ if ((tree->bthFree - tree->reserved) < reserve) {
+ return -ENOSPC;
+ } else {
+ goto restart;
+ }
+ }
+ tree->reserved += reserve;
+ hfs_brec_lock(&brec, NULL);
+ }
+
+ retval = binsert(&brec, key, data, keysize, datasize, reserve);
+ hfs_brec_relse(&brec, NULL);
+ if (!retval) {
+ ++tree->bthNRecs;
+ tree->dirt = 1;
+ }
+ return retval;
+}
diff --git a/fs/hfs/bitmap.c b/fs/hfs/bitmap.c
new file mode 100644
index 000000000..4fcec675f
--- /dev/null
+++ b/fs/hfs/bitmap.c
@@ -0,0 +1,412 @@
+/*
+ * linux/fs/hfs/bitmap.c
+ *
+ * Copyright (C) 1996-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * Based on GPLed code Copyright (C) 1995 Michael Dreher
+ *
+ * This file contains the code to modify the volume bitmap:
+ * search/set/clear bits.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_vbm_count_free()
+ *
+ * Description:
+ * Count the number of consecutive cleared bits in the bitmap blocks of
+ * the hfs MDB starting at bit number 'start'. 'mdb' had better
+ * be locked or the indicated number of blocks may be no longer free,
+ * when this functions returns!
+ * Input Variable(s):
+ * struct hfs_mdb *mdb: Pointer to the hfs MDB
+ * hfs_u16 start: bit number to start at
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * The number of consecutive cleared bits starting at bit 'start'
+ * Preconditions:
+ * 'mdb' points to a "valid" (struct hfs_mdb).
+ * Postconditions:
+ * NONE
+ */
+hfs_u16 hfs_vbm_count_free(const struct hfs_mdb *mdb, hfs_u16 start)
+{
+ hfs_u16 block_nr; /* index of the current bitmap block */
+ hfs_u16 bit_nr; /* index of the current bit in block */
+ hfs_u16 count; /* number of bits found so far */
+ hfs_u16 len; /* number of bits found in this block */
+ hfs_u16 max_block; /* index of last bitmap block */
+ hfs_u16 max_bits; /* index of last bit in block */
+
+ /* is this a valid HFS MDB? */
+ if (!mdb) {
+ return 0;
+ }
+
+ block_nr = start / HFS_BM_BPB;
+ bit_nr = start % HFS_BM_BPB;
+ max_block = (mdb->fs_ablocks + HFS_BM_BPB - 1) / HFS_BM_BPB - 1;
+
+ count = 0;
+ while (block_nr <= max_block) {
+ if (block_nr != max_block) {
+ max_bits = HFS_BM_BPB;
+ } else {
+ max_bits = mdb->fs_ablocks % HFS_BM_BPB;
+ }
+
+ len=hfs_count_zero_bits(hfs_buffer_data(mdb->bitmap[block_nr]),
+ max_bits, bit_nr);
+ count += len;
+
+ /* see if we fell short of the end of this block */
+ if ((len + bit_nr) < max_bits) {
+ break;
+ }
+
+ ++block_nr;
+ bit_nr = 0;
+ }
+ return count;
+}
+
+/*
+ * hfs_vbm_search_free()
+ *
+ * Description:
+ * Search for 'num_bits' consecutive cleared bits in the bitmap blocks of
+ * the hfs MDB. 'mdb' had better be locked or the returned range
+ * may be no longer free, when this functions returns!
+ * XXX Currently the search starts from bit 0, but it should start with
+ * the bit number stored in 's_alloc_ptr' of the MDB.
+ * Input Variable(s):
+ * struct hfs_mdb *mdb: Pointer to the hfs MDB
+ * hfs_u16 *num_bits: Pointer to the number of cleared bits
+ * to search for
+ * Output Variable(s):
+ * hfs_u16 *num_bits: The number of consecutive clear bits of the
+ * returned range. If the bitmap is fragmented, this will be less than
+ * requested and it will be zero, when the disk is full.
+ * Returns:
+ * The number of the first bit of the range of cleared bits which has been
+ * found. When 'num_bits' is zero, this is invalid!
+ * Preconditions:
+ * 'mdb' points to a "valid" (struct hfs_mdb).
+ * 'num_bits' points to a variable of type (hfs_u16), which contains
+ * the number of cleared bits to find.
+ * Postconditions:
+ * 'num_bits' is set to the length of the found sequence.
+ */
+hfs_u16 hfs_vbm_search_free(const struct hfs_mdb *mdb, hfs_u16 *num_bits)
+{
+ hfs_u16 block_nr; /* index of the current bitmap block */
+
+ /* position and length of current portion of a run */
+ hfs_u16 cur_pos, cur_len;
+
+ /* position and length of current complete run */
+ hfs_u16 pos=0, len=0;
+
+ /* position and length of longest complete run */
+ hfs_u16 longest_pos=0, longest_len=0;
+
+ void *bitmap; /* contents of the current bitmap block */
+ hfs_u16 max_block; /* upper limit of outer loop */
+ hfs_u16 max_bits; /* upper limit of inner loop */
+
+ /* is this a valid HFS MDB? */
+ if (!mdb) {
+ *num_bits = 0;
+ hfs_warn("hfs_vbm_search_free: not a valid MDB\n");
+ return 0;
+ }
+
+ /* make sure we have actual work to perform */
+ if (!(*num_bits)) {
+ return 0;
+ }
+
+ max_block = (mdb->fs_ablocks+HFS_BM_BPB-1) / HFS_BM_BPB - 1;
+
+ /* search all bitmap blocks */
+ for (block_nr = 0; block_nr <= max_block; block_nr++) {
+ bitmap = hfs_buffer_data(mdb->bitmap[block_nr]);
+
+ if (block_nr != max_block) {
+ max_bits = HFS_BM_BPB;
+ } else {
+ max_bits = mdb->fs_ablocks % HFS_BM_BPB;
+ }
+
+ cur_pos = 0;
+ do {
+ cur_len = hfs_count_zero_bits(bitmap, max_bits,
+ cur_pos);
+ len += cur_len;
+ if (len > longest_len) {
+ longest_pos = pos;
+ longest_len = len;
+ if (len >= *num_bits) {
+ goto search_end;
+ }
+ }
+ if ((cur_pos + cur_len) == max_bits) {
+ break; /* zeros may continue into next block */
+ }
+
+ /* find start of next run of zeros */
+ cur_pos = hfs_find_zero_bit(bitmap, max_bits,
+ cur_pos + cur_len);
+ pos = cur_pos + HFS_BM_BPB*block_nr;
+ len = 0;
+ } while (cur_pos < max_bits);
+ }
+
+search_end:
+ *num_bits = longest_len;
+ return longest_pos;
+}
+
+
+/*
+ * hfs_set_vbm_bits()
+ *
+ * Description:
+ * Set the requested bits in the volume bitmap of the hfs filesystem
+ * Input Variable(s):
+ * struct hfs_mdb *mdb: Pointer to the hfs MDB
+ * hfs_u16 start: The offset of the first bit
+ * hfs_u16 count: The number of bits
+ * Output Variable(s):
+ * None
+ * Returns:
+ * 0: no error
+ * -1: One of the bits was already set. This is a strange
+ * error and when it happens, the filesystem must be repaired!
+ * -2: One or more of the bits are out of range of the bitmap.
+ * -3: The 's_magic' field of the MDB does not match
+ * Preconditions:
+ * 'mdb' points to a "valid" (struct hfs_mdb).
+ * Postconditions:
+ * Starting with bit number 'start', 'count' bits in the volume bitmap
+ * are set. The affected bitmap blocks are marked "dirty", the free
+ * block count of the MDB is updated and the MDB is marked dirty.
+ */
+int hfs_set_vbm_bits(struct hfs_mdb *mdb, hfs_u16 start, hfs_u16 count)
+{
+ hfs_u16 block_nr; /* index of the current bitmap block */
+ hfs_u16 u32_nr; /* index of the current hfs_u32 in block */
+ hfs_u16 bit_nr; /* index of the current bit in hfs_u32 */
+ hfs_u16 left = count; /* number of bits left to be set */
+ hfs_u32 *bitmap; /* the current bitmap block's contents */
+
+ /* is this a valid HFS MDB? */
+ if (!mdb) {
+ return -3;
+ }
+
+ /* is there any actual work to be done? */
+ if (!count) {
+ return 0;
+ }
+
+ /* are all of the bits in range? */
+ if ((start + count) > mdb->fs_ablocks) {
+ return -2;
+ }
+
+ block_nr = start / HFS_BM_BPB;
+ u32_nr = (start % HFS_BM_BPB) / 32;
+ bit_nr = start % 32;
+
+ /* bitmap is always on a 32-bit boundary */
+ bitmap = (hfs_u32 *)hfs_buffer_data(mdb->bitmap[block_nr]);
+
+ /* do any partial hfs_u32 at the start */
+ if (bit_nr != 0) {
+ while ((bit_nr < 32) && left) {
+ if (hfs_set_bit(bit_nr, bitmap + u32_nr)) {
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ return -1;
+ }
+ ++bit_nr;
+ --left;
+ }
+ bit_nr=0;
+
+ /* advance u32_nr and check for end of this block */
+ if (++u32_nr > 127) {
+ u32_nr = 0;
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ ++block_nr;
+ /* bitmap is always on a 32-bit boundary */
+ bitmap = (hfs_u32 *)
+ hfs_buffer_data(mdb->bitmap[block_nr]);
+ }
+ }
+
+ /* do full hfs_u32s */
+ while (left > 31) {
+ if (bitmap[u32_nr] != ((hfs_u32)0)) {
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ return -1;
+ }
+ bitmap[u32_nr] = ~((hfs_u32)0);
+ left -= 32;
+
+ /* advance u32_nr and check for end of this block */
+ if (++u32_nr > 127) {
+ u32_nr = 0;
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ ++block_nr;
+ /* bitmap is always on a 32-bit boundary */
+ bitmap = (hfs_u32 *)
+ hfs_buffer_data(mdb->bitmap[block_nr]);
+ }
+ }
+
+
+ /* do any partial hfs_u32 at end */
+ while (left) {
+ if (hfs_set_bit(bit_nr, bitmap + u32_nr)) {
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ return -1;
+ }
+ ++bit_nr;
+ --left;
+ }
+
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ mdb->free_ablocks -= count;
+
+ /* successful completion */
+ hfs_mdb_dirty(mdb->sys_mdb);
+ return 0;
+}
+
+/*
+ * hfs_clear_vbm_bits()
+ *
+ * Description:
+ * Clear the requested bits in the volume bitmap of the hfs filesystem
+ * Input Variable(s):
+ * struct hfs_mdb *mdb: Pointer to the hfs MDB
+ * hfs_u16 start: The offset of the first bit
+ * hfs_u16 count: The number of bits
+ * Output Variable(s):
+ * None
+ * Returns:
+ * 0: no error
+ * -1: One of the bits was already clear. This is a strange
+ * error and when it happens, the filesystem must be repaired!
+ * -2: One or more of the bits are out of range of the bitmap.
+ * -3: The 's_magic' field of the MDB does not match
+ * Preconditions:
+ * 'mdb' points to a "valid" (struct hfs_mdb).
+ * Postconditions:
+ * Starting with bit number 'start', 'count' bits in the volume bitmap
+ * are cleared. The affected bitmap blocks are marked "dirty", the free
+ * block count of the MDB is updated and the MDB is marked dirty.
+ */
+int hfs_clear_vbm_bits(struct hfs_mdb *mdb, hfs_u16 start, hfs_u16 count)
+{
+ hfs_u16 block_nr; /* index of the current bitmap block */
+ hfs_u16 u32_nr; /* index of the current hfs_u32 in block */
+ hfs_u16 bit_nr; /* index of the current bit in hfs_u32 */
+ hfs_u16 left = count; /* number of bits left to be set */
+ hfs_u32 *bitmap; /* the current bitmap block's contents */
+
+ /* is this a valid HFS MDB? */
+ if (!mdb) {
+ return -3;
+ }
+
+ /* is there any actual work to be done? */
+ if (!count) {
+ return 0;
+ }
+
+ /* are all of the bits in range? */
+ if ((start + count) > mdb->fs_ablocks) {
+ return -2;
+ }
+
+ block_nr = start / HFS_BM_BPB;
+ u32_nr = (start % HFS_BM_BPB) / 32;
+ bit_nr = start % 32;
+
+ /* bitmap is always on a 32-bit boundary */
+ bitmap = (hfs_u32 *)hfs_buffer_data(mdb->bitmap[block_nr]);
+
+ /* do any partial hfs_u32 at the start */
+ if (bit_nr != 0) {
+ while ((bit_nr < 32) && left) {
+ if (!hfs_clear_bit(bit_nr, bitmap + u32_nr)) {
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ return -1;
+ }
+ ++bit_nr;
+ --left;
+ }
+ bit_nr=0;
+
+ /* advance u32_nr and check for end of this block */
+ if (++u32_nr > 127) {
+ u32_nr = 0;
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ ++block_nr;
+ /* bitmap is always on a 32-bit boundary */
+ bitmap = (hfs_u32 *)
+ hfs_buffer_data(mdb->bitmap[block_nr]);
+ }
+ }
+
+ /* do full hfs_u32s */
+ while (left > 31) {
+ if (bitmap[u32_nr] != ~((hfs_u32)0)) {
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ return -1;
+ }
+ bitmap[u32_nr] = ((hfs_u32)0);
+ left -= 32;
+
+ /* advance u32_nr and check for end of this block */
+ if (++u32_nr > 127) {
+ u32_nr = 0;
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ ++block_nr;
+ /* bitmap is always on a 32-bit boundary */
+ bitmap = (hfs_u32 *)
+ hfs_buffer_data(mdb->bitmap[block_nr]);
+ }
+ }
+
+
+ /* do any partial hfs_u32 at end */
+ while (left) {
+ if (!hfs_clear_bit(bit_nr, bitmap + u32_nr)) {
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ return -1;
+ }
+ ++bit_nr;
+ --left;
+ }
+
+ hfs_buffer_dirty(mdb->bitmap[block_nr]);
+ mdb->free_ablocks += count;
+
+ /* successful completion */
+ hfs_mdb_dirty(mdb->sys_mdb);
+ return 0;
+}
diff --git a/fs/hfs/bitops.c b/fs/hfs/bitops.c
new file mode 100644
index 000000000..1d3a113bb
--- /dev/null
+++ b/fs/hfs/bitops.c
@@ -0,0 +1,124 @@
+/*
+ * linux/fs/hfs/bitops.c
+ *
+ * Copyright (C) 1996 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains functions to handle bitmaps in "left-to-right"
+ * bit-order such that the MSB of a 32-bit big-endian word is bit 0.
+ * (This corresponds to bit 7 of a 32-bit little-endian word.)
+ *
+ * I have tested and confirmed that the results are identical on the
+ * Intel x86, PowerPC and DEC Alpha processors.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ */
+
+#include "hfs.h"
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_find_zero_bit()
+ *
+ * Description:
+ * Given a block of memory, its length in bits, and a starting bit number,
+ * determine the number of the first zero bits (in left-to-right ordering)
+ * in that range.
+ *
+ * Returns >= 'size' if no zero bits are found in the range.
+ *
+ * Accesses memory in 32-bit aligned chunks of 32-bits and thus
+ * may read beyond the 'size'th bit.
+ */
+hfs_u32 hfs_find_zero_bit(const hfs_u32 *start, hfs_u32 size, hfs_u32 offset)
+{
+ const hfs_u32 *end = start + ((size + 31) >> 5);
+ const hfs_u32 *curr = start + (offset >> 5);
+ int bit = offset % 32;
+
+ if (offset < size) {
+ /* scan the first partial hfs_u32 for zero bits */
+ if (bit != 0) {
+ do {
+ if (!hfs_test_bit(bit, curr)) {
+ goto done;
+ }
+ ++bit;
+ } while (bit < 32);
+ bit = 0;
+ ++curr;
+ }
+
+ /* scan complete hfs_u32s for the first zero bit */
+ while (curr < end) {
+ if (*curr == ~((hfs_u32)0)) {
+ ++curr;
+ } else {
+ while (hfs_test_bit(bit, curr)) {
+ ++bit;
+ }
+ break;
+ }
+ }
+
+done:
+ bit |= (curr - start) << 5;
+ return bit;
+ } else {
+ return size;
+ }
+}
+
+/*
+ * hfs_count_zero_bits()
+ *
+ * Description:
+ * Given a block of memory, its length in bits, and a starting bit number,
+ * determine the number of consecutive zero bits (in left-to-right ordering)
+ * in that range.
+ *
+ * Accesses memory in 32-bit aligned chunks of 32-bits and thus
+ * may read beyond the 'size'th bit.
+ */
+hfs_u32 hfs_count_zero_bits(const hfs_u32 *start, hfs_u32 size, hfs_u32 offset)
+{
+ const hfs_u32 *end = start + ((size + 31) >> 5);
+ const hfs_u32 *curr = start + (offset >> 5);
+ int bit = offset % 32;
+
+ if (offset < size) {
+ /* scan the first partial hfs_u32 for one bits */
+ if (bit != 0) {
+ do {
+ if (hfs_test_bit(bit, curr)) {
+ goto done;
+ }
+ ++bit;
+ } while (bit < 32);
+ bit = 0;
+ ++curr;
+ }
+
+ /* scan complete hfs_u32s for the first one bit */
+ while (curr < end) {
+ if (*curr == ((hfs_u32)0)) {
+ ++curr;
+ } else {
+ while (!hfs_test_bit(bit, curr)) {
+ ++bit;
+ }
+ break;
+ }
+ }
+
+done:
+ bit |= (curr - start) << 5;
+ if (bit > size) {
+ bit = size;
+ }
+ return bit - offset;
+ } else {
+ return 0;
+ }
+}
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
new file mode 100644
index 000000000..762278667
--- /dev/null
+++ b/fs/hfs/bnode.c
@@ -0,0 +1,540 @@
+/*
+ * linux/fs/hfs/bnode.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the code to access nodes in the B-tree structure.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ *
+ * The code in this file initializes some structures which contain
+ * pointers by calling memset(&foo, 0, sizeof(foo)).
+ * This produces the desired behavior only due to the non-ANSI
+ * assumption that the machine representation of NULL is all zeros.
+ */
+
+#include "hfs_btree.h"
+
+/*================ File-local variables ================*/
+
+/* debugging statistics */
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+int bnode_count = 0;
+#endif
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_bnode_delete()
+ *
+ * Description:
+ * This function is called to remove a bnode from the cache and
+ * release its resources.
+ * Input Variable(s):
+ * struct hfs_bnode *bn: Pointer to the (struct hfs_bnode) to be
+ * removed from the cache.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'bn' points to a "valid" (struct hfs_bnode).
+ * Postconditions:
+ * The node 'bn' is removed from the cache, its memory freed and its
+ * buffer (if any) released.
+ */
+void hfs_bnode_delete(struct hfs_bnode *bn)
+{
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ --bnode_count;
+#endif
+ /* join neighbors */
+ if (bn->next) {
+ bn->next->prev = bn->prev;
+ }
+ if (bn->prev) {
+ bn->prev->next = bn->next;
+ }
+ /* fix cache slot if necessary */
+ if (bhash(bn->tree, bn->node) == bn) {
+ bhash(bn->tree, bn->node) = bn->next;
+ }
+ /* release resources */
+ hfs_buffer_put(bn->buf); /* safe: checks for NULL argument */
+ HFS_DELETE(bn);
+}
+
+
+/*
+ * hfs_bnode_read()
+ *
+ * Description:
+ * This function creates a (struct hfs_bnode) and, if appropriate,
+ * inserts it in the cache.
+ * Input Variable(s):
+ * struct hfs_bnode *bnode: pointer to the new bnode.
+ * struct hfs_btree *tree: pointer to the (struct hfs_btree)
+ * containing the desired node
+ * hfs_u32 node: the number of the desired node.
+ * int sticky: the value to assign to the 'sticky' field.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * (struct hfs_bnode *) pointing to the newly created bnode or NULL.
+ * Preconditions:
+ * 'bnode' points to a "valid" (struct hfs_bnode).
+ * 'tree' points to a "valid" (struct hfs_btree).
+ * 'node' is an existing node number in the B-tree.
+ * Postconditions:
+ * The following are true of 'bnode' upon return:
+ * The 'magic' field is set to indicate a valid (struct hfs_bnode).
+ * The 'sticky', 'tree' and 'node' fields are initialized to the
+ * values of the of the corresponding arguments.
+ * If the 'sticky' argument is zero then the fields 'prev' and
+ * 'next' are initialized by inserting the (struct hfs_bnode) in the
+ * linked list of the appropriate cache slot; otherwise they are
+ * initialized to NULL.
+ * The data is read from disk (or buffer cache) and the 'buf' field
+ * points to the buffer for that data.
+ * If no other processes tried to access this node while this
+ * process was waiting on disk I/O (if necessary) then the
+ * remaining fields are zero ('count', 'resrv', 'lock') or NULL
+ * ('wqueue', 'rqueue') corresponding to no accesses.
+ * If there were access attempts during I/O then they were blocked
+ * until the I/O was complete, and the fields 'count', 'resrv',
+ * 'lock', 'wqueue' and 'rqueue' reflect the results of unblocking
+ * those processes when the I/O was completed.
+ */
+void hfs_bnode_read(struct hfs_bnode *bnode, struct hfs_btree *tree,
+ hfs_u32 node, int sticky)
+{
+ struct NodeDescriptor *nd;
+ int block, lcv;
+ hfs_u16 curr, prev, limit;
+
+ /* Initialize the structure */
+ memset(bnode, 0, sizeof(*bnode));
+ bnode->magic = HFS_BNODE_MAGIC;
+ bnode->tree = tree;
+ bnode->node = node;
+ bnode->sticky = sticky;
+
+ if (sticky == HFS_NOT_STICKY) {
+ /* Insert it in the cache if appropriate */
+ if ((bnode->next = bhash(tree, node))) {
+ bnode->next->prev = bnode;
+ }
+ bhash(tree, node) = bnode;
+ }
+
+ /* Make the bnode look like it is being
+ modified so other processes will wait for
+ the I/O to complete */
+ bnode->count = bnode->resrv = bnode->lock = 1;
+
+ /* Read in the node, possibly causing a schedule()
+ call. If the I/O fails then emit a warning. Each
+ process that was waiting on the bnode (including
+ the current one) will notice the failure and
+ hfs_bnode_relse() the node. The last hfs_bnode_relse()
+ will call hfs_bnode_delete() and discard the bnode. */
+
+ block = hfs_extent_map(&tree->entry.u.file.data_fork, node, 0);
+ if (!block) {
+ hfs_warn("hfs_bnode_read: bad node number 0x%08x\n", node);
+ } else if (hfs_buffer_ok(bnode->buf =
+ hfs_buffer_get(tree->sys_mdb, block, 1))) {
+ /* read in the NodeDescriptor */
+ nd = (struct NodeDescriptor *)hfs_buffer_data(bnode->buf);
+ bnode->ndFLink = hfs_get_hl(nd->ndFLink);
+ bnode->ndBLink = hfs_get_hl(nd->ndBLink);
+ bnode->ndType = nd->ndType;
+ bnode->ndNHeight = nd->ndNHeight;
+ bnode->ndNRecs = hfs_get_hs(nd->ndNRecs);
+
+ /* verify the integrity of the node */
+ prev = sizeof(struct NodeDescriptor);
+ limit = HFS_SECTOR_SIZE - sizeof(hfs_u16)*(bnode->ndNRecs + 1);
+ for (lcv=1; lcv <= (bnode->ndNRecs + 1); ++lcv) {
+ curr = hfs_get_hs(RECTBL(bnode, lcv));
+ if ((curr < prev) || (curr > limit)) {
+ hfs_warn("hfs_bnode_read: corrupt node "
+ "number 0x%08x\n", node);
+ hfs_buffer_put(bnode->buf);
+ bnode->buf = NULL;
+ break;
+ }
+ prev = curr;
+ }
+ }
+
+ /* Undo our fakery with the lock state and
+ hfs_wake_up() anyone who we managed to trick */
+ --bnode->count;
+ bnode->resrv = bnode->lock = 0;
+ hfs_wake_up(&bnode->rqueue);
+}
+
+/*
+ * hfs_bnode_lock()
+ *
+ * Description:
+ * This function does the locking of a bnode.
+ * Input Variable(s):
+ * struct hfs_bnode *bn: pointer to the (struct hfs_bnode) to lock
+ * int lock_type: the type of lock desired
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'bn' points to a "valid" (struct hfs_bnode).
+ * 'lock_type' is a valid hfs_lock_t
+ * Postconditions:
+ * The 'count' field of 'bn' is incremented by one. If 'lock_type'
+ * is HFS_LOCK_RESRV the 'resrv' field is also incremented.
+ */
+void hfs_bnode_lock(struct hfs_bnode_ref *bnr, int lock_type)
+{
+ struct hfs_bnode *bn = bnr->bn;
+
+ if ((lock_type == bnr->lock_type) || !bn) {
+ return;
+ }
+
+ if (bnr->lock_type == HFS_LOCK_WRITE) {
+ hfs_bnode_commit(bnr->bn);
+ }
+
+ switch (lock_type) {
+ default:
+ goto bail;
+ break;
+
+ case HFS_LOCK_READ:
+ /* We may not obtain read access if any process is
+ currently modifying or waiting to modify this node.
+ If we can't obtain access we wait on the rqueue
+ wait queue to be woken up by the modifying process
+ when it relinquishes its lock. */
+ switch (bnr->lock_type) {
+ default:
+ goto bail;
+ break;
+
+ case HFS_LOCK_NONE:
+ while (bn->lock || bn->wqueue) {
+ hfs_sleep_on(&bn->rqueue);
+ }
+ ++bn->count;
+ break;
+ }
+ break;
+
+ case HFS_LOCK_RESRV:
+ /* We may not obtain a reservation (read access with
+ an option to write later), if any process currently
+ holds a reservation on this node. That includes
+ any process which is currently modifying this node.
+ If we can't obtain access, then we wait on the
+ rqueue wait queue to e woken up by the
+ reservation-holder when it calls hfs_bnode_relse. */
+ switch (bnr->lock_type) {
+ default:
+ goto bail;
+ break;
+
+ case HFS_LOCK_NONE:
+ while (bn->resrv) {
+ hfs_sleep_on(&bn->rqueue);
+ }
+ bn->resrv = 1;
+ ++bn->count;
+ break;
+
+ case HFS_LOCK_WRITE:
+ bn->lock = 0;
+ hfs_wake_up(&bn->rqueue);
+ break;
+ }
+ break;
+
+ case HFS_LOCK_WRITE:
+ switch (bnr->lock_type) {
+ default:
+ goto bail;
+ break;
+
+ case HFS_LOCK_NONE:
+ while (bn->resrv) {
+ hfs_sleep_on(&bn->rqueue);
+ }
+ bn->resrv = 1;
+ ++bn->count;
+ case HFS_LOCK_RESRV:
+ while (bn->count > 1) {
+ hfs_sleep_on(&bn->wqueue);
+ }
+ bn->lock = 1;
+ break;
+ }
+ break;
+
+ case HFS_LOCK_NONE:
+ switch (bnr->lock_type) {
+ default:
+ goto bail;
+ break;
+
+ case HFS_LOCK_READ:
+ /* This process was reading this node. If
+ there is now exactly one other process using
+ the node then hfs_wake_up() a (potentially
+ nonexistent) waiting process. Note that I
+ refer to "a" process since the reservation
+ system ensures that only one process can
+ get itself on the wait queue. */
+ if (bn->count == 2) {
+ hfs_wake_up(&bn->wqueue);
+ }
+ break;
+
+ case HFS_LOCK_WRITE:
+ /* This process was modifying this node.
+ Unlock the node and fall-through to the
+ HFS_LOCK_RESRV case, since a 'reservation'
+ is a prerequisite for HFS_LOCK_WRITE. */
+ bn->lock = 0;
+ case HFS_LOCK_RESRV:
+ /* This process had placed a 'reservation' on
+ this node, indicating an intention to
+ possibly modify the node. We can get to
+ this spot directly (if the 'reservation'
+ not converted to a HFS_LOCK_WRITE), or by
+ falling through from the above case if the
+ reservation was converted.
+ Since HFS_LOCK_RESRV and HFS_LOCK_WRITE
+ both block processes that want access
+ (HFS_LOCK_RESRV blocks other processes that
+ want reservations but allow HFS_LOCK_READ
+ accesses, while HFS_LOCK_WRITE must have
+ exclusive access and thus blocks both
+ types) we hfs_wake_up() any processes that
+ might be waiting for access. If multiple
+ processes are waiting for a reservation
+ then the magic of process scheduling will
+ settle the dispute. */
+ bn->resrv = 0;
+ hfs_wake_up(&bn->rqueue);
+ break;
+ }
+ --bn->count;
+ break;
+ }
+ bnr->lock_type = lock_type;
+ return;
+
+bail:
+ hfs_warn("hfs_bnode_lock: invalid lock change: %d->%d.\n",
+ bnr->lock_type, lock_type);
+ return;
+}
+
+/*
+ * hfs_bnode_relse()
+ *
+ * Description:
+ * This function is called when a process is done using a bnode. If
+ * the proper conditions are met then we call hfs_bnode_delete() to remove
+ * it from the cache. If it is not deleted then we update its state
+ * to reflect one less process using it.
+ * Input Variable(s):
+ * struct hfs_bnode *bn: pointer to the (struct hfs_bnode) to release.
+ * int lock_type: The type of lock held by the process releasing this node.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'bn' is NULL or points to a "valid" (struct hfs_bnode).
+ * Postconditions:
+ * If 'bn' meets the appropriate conditions (see below) then it is
+ * kept in the cache and all fields are set to consistent values
+ * which reflect one less process using the node than upon entry.
+ * If 'bn' does not meet the conditions then it is deleted (see
+ * hfs_bnode_delete() for postconditions).
+ * In either case, if 'lock_type' is HFS_LOCK_WRITE
+ * then the corresponding buffer is dirtied.
+ */
+void hfs_bnode_relse(struct hfs_bnode_ref *bnr)
+{
+ struct hfs_bnode *bn;
+
+ if (!bnr || !(bn = bnr->bn)) {
+ return;
+ }
+
+ /* We update the lock state of the node if it is still in use
+ or if it is "sticky" (such as the B-tree head and root).
+ Otherwise we just delete it. */
+ if ((bn->count > 1) || (bn->rqueue) || (bn->sticky != HFS_NOT_STICKY)) {
+ hfs_bnode_lock(bnr, HFS_LOCK_NONE);
+ } else {
+ /* dirty buffer if we (might) have modified it */
+ if (bnr->lock_type == HFS_LOCK_WRITE) {
+ hfs_bnode_commit(bn);
+ }
+ hfs_bnode_delete(bn);
+ bnr->lock_type = HFS_LOCK_NONE;
+ }
+ bnr->bn = NULL;
+}
+
+/*
+ * hfs_bnode_find()
+ *
+ * Description:
+ * This function is called to obtain a bnode. The cache is
+ * searched for the node. If it not found there it is added to
+ * the cache by hfs_bnode_read(). There are two special cases node=0
+ * (the header node) and node='tree'->bthRoot (the root node), in
+ * which the nodes are obtained from fields of 'tree' without
+ * consulting or modifying the cache.
+ * Input Variable(s):
+ * struct hfs_tree *tree: pointer to the (struct hfs_btree) from
+ * which to get a node.
+ * int node: the node number to get from 'tree'.
+ * int lock_type: The kind of access (HFS_LOCK_READ, or
+ * HFS_LOCK_RESRV) to obtain to the node
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * (struct hfs_bnode_ref) Reference to the requested node.
+ * Preconditions:
+ * 'tree' points to a "valid" (struct hfs_btree).
+ * Postconditions:
+ * If 'node' refers to a valid node in 'tree' and 'lock_type' has
+ * one of the values listed above and no I/O errors occur then the
+ * value returned refers to a valid (struct hfs_bnode) corresponding
+ * to the requested node with the requested access type. The node
+ * is also added to the cache if not previously present and not the
+ * root or header.
+ * If the conditions given above are not met, the bnode in the
+ * returned reference is NULL.
+ */
+struct hfs_bnode_ref hfs_bnode_find(struct hfs_btree *tree,
+ hfs_u32 node, int lock_type)
+{
+ struct hfs_bnode *bn;
+ struct hfs_bnode *empty = NULL;
+ struct hfs_bnode_ref bnr;
+
+ bnr.lock_type = HFS_LOCK_NONE;
+ bnr.bn = NULL;
+
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ hfs_warn("hfs_bnode_find: %c %d:%d\n",
+ lock_type==HFS_LOCK_READ?'R':
+ (lock_type==HFS_LOCK_RESRV?'V':'W'),
+ (int)ntohl(tree->entry.cnid), node);
+#endif
+
+ /* check special cases */
+ if (!node) {
+ bn = &tree->head;
+ goto return_it;
+ } else if (node == tree->bthRoot) {
+ bn = tree->root;
+ goto return_it;
+ }
+
+restart:
+ /* look for the node in the cache. */
+ bn = bhash(tree, node);
+ while (bn && (bn->magic == HFS_BNODE_MAGIC)) {
+ if (bn->node == node) {
+ goto found_it;
+ }
+ bn = bn->next;
+ }
+
+ if (!empty) {
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ ++bnode_count;
+#endif
+ if (HFS_NEW(empty)) {
+ goto restart;
+ }
+ return bnr;
+ }
+ bn = empty;
+ hfs_bnode_read(bn, tree, node, HFS_NOT_STICKY);
+ goto return_it;
+
+found_it:
+ /* check validity */
+ if (bn->magic != HFS_BNODE_MAGIC) {
+ /* If we find a corrupt bnode then we return
+ NULL. However, we don't try to remove it
+ from the cache or release its resources
+ since we have no idea what kind of trouble
+ we could get into that way. */
+ hfs_warn("hfs_bnode_find: bnode cache is corrupt.\n");
+ return bnr;
+ }
+ if (empty) {
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ --bnode_count;
+#endif
+ HFS_DELETE(empty);
+ }
+
+return_it:
+ /* Wait our turn */
+ bnr.bn = bn;
+ hfs_bnode_lock(&bnr, lock_type);
+
+ /* Check for failure to read the node from disk */
+ if (!hfs_buffer_ok(bn->buf)) {
+ hfs_bnode_relse(&bnr);
+ }
+
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ if (!bnr.bn) {
+ hfs_warn("hfs_bnode_find: failed\n");
+ } else {
+ hfs_warn("hfs_bnode_find: use %d(%d) lvl %d [%d]\n", bn->count,
+ bn->buf->b_count, bn->ndNHeight, bnode_count);
+ }
+#endif
+
+ return bnr;
+}
+
+/*
+ * hfs_bnode_commit()
+ *
+ * Called to write a possibly dirty bnode back to disk.
+ */
+void hfs_bnode_commit(struct hfs_bnode *bn)
+{
+ if (hfs_buffer_ok(bn->buf)) {
+ struct NodeDescriptor *nd;
+ nd = (struct NodeDescriptor *)hfs_buffer_data(bn->buf);
+
+ hfs_put_hl(bn->ndFLink, nd->ndFLink);
+ hfs_put_hl(bn->ndBLink, nd->ndBLink);
+ nd->ndType = bn->ndType;
+ nd->ndNHeight = bn->ndNHeight;
+ hfs_put_hs(bn->ndNRecs, nd->ndNRecs);
+ hfs_buffer_dirty(bn->buf);
+
+ /* increment write count */
+ hfs_mdb_dirty(bn->tree->sys_mdb);
+ }
+}
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
new file mode 100644
index 000000000..8f6d70c07
--- /dev/null
+++ b/fs/hfs/brec.c
@@ -0,0 +1,239 @@
+/*
+ * linux/fs/hfs/brec.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the code to access records in a btree.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs_btree.h"
+
+/*================ File-local functions ================*/
+
+/*
+ * first()
+ *
+ * returns HFS_BPATH_FIRST if elem->record == 1, 0 otherwise
+ */
+static inline int first(const struct hfs_belem *elem)
+{
+ return (elem->record == 1) ? HFS_BPATH_FIRST : 0;
+}
+
+/*
+ * overflow()
+ *
+ * return HFS_BPATH_OVERFLOW if the node has no room for an
+ * additional pointer record, 0 otherwise.
+ */
+static inline int overflow(const struct hfs_btree *tree,
+ const struct hfs_bnode *bnode)
+{
+ /* there is some algebra involved in getting this form */
+ return ((HFS_SECTOR_SIZE - sizeof(hfs_u32)) <
+ (bnode_end(bnode) + (2+bnode->ndNRecs)*sizeof(hfs_u16) +
+ ROUND(tree->bthKeyLen+1))) ? HFS_BPATH_OVERFLOW : 0;
+}
+
+/*
+ * underflow()
+ *
+ * return HFS_BPATH_UNDERFLOW if the node will be less that 1/2 full
+ * upon removal of a pointer record, 0 otherwise.
+ */
+static inline int underflow(const struct hfs_btree *tree,
+ const struct hfs_bnode *bnode)
+{
+ return ((bnode->ndNRecs * sizeof(hfs_u16) +
+ bnode_offset(bnode, bnode->ndNRecs)) <
+ (HFS_SECTOR_SIZE - sizeof(struct NodeDescriptor))/2) ?
+ HFS_BPATH_UNDERFLOW : 0;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_brec_next()
+ *
+ * Description:
+ * Obtain access to a child of an internal node in a B-tree.
+ * Input Variable(s):
+ * struct hfs_brec *brec: pointer to the (struct hfs_brec) to
+ * add an element to.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * struct hfs_belem *: pointer to the new path element or NULL
+ * Preconditions:
+ * 'brec' points to a "valid" (struct hfs_brec), the last element of
+ * which corresponds to a record in a bnode of type ndIndxNode and the
+ * 'record' field indicates the index record for the desired child.
+ * Postconditions:
+ * If the call to hfs_bnode_find() fails then 'brec' is released
+ * and a NULL is returned.
+ * Otherwise:
+ * Any ancestors in 'brec' that are not needed (as determined by the
+ * 'keep_flags' field of 'brec) are released from 'brec'.
+ * A new element is added to 'brec' corresponding to the desired
+ * child.
+ * The child is obtained with the same 'lock_type' field as its
+ * parent.
+ * The 'record' field is initialized to the last record.
+ * A pointer to the new path element is returned.
+ */
+struct hfs_belem *hfs_brec_next(struct hfs_brec *brec)
+{
+ struct hfs_belem *elem = brec->bottom;
+ hfs_u32 node;
+ int lock_type;
+
+ /* release unneeded ancestors */
+ elem->flags = first(elem) |
+ overflow(brec->tree, elem->bnr.bn) |
+ underflow(brec->tree, elem->bnr.bn);
+ if (!(brec->keep_flags & elem->flags)) {
+ hfs_brec_relse(brec, brec->bottom-1);
+ } else if ((brec->bottom-2 >= brec->top) &&
+ !(elem->flags & (elem-1)->flags)) {
+ hfs_brec_relse(brec, brec->bottom-2);
+ }
+
+ node = hfs_get_hl(belem_record(elem));
+ lock_type = elem->bnr.lock_type;
+
+ if (!node || hfs_bnode_in_brec(node, brec)) {
+ hfs_warn("hfs_bfind: corrupt btree\n");
+ hfs_brec_relse(brec, NULL);
+ return NULL;
+ }
+
+ ++elem;
+ ++brec->bottom;
+
+ elem->bnr = hfs_bnode_find(brec->tree, node, lock_type);
+ if (!elem->bnr.bn) {
+ hfs_brec_relse(brec, NULL);
+ return NULL;
+ }
+ elem->record = elem->bnr.bn->ndNRecs;
+
+ return elem;
+}
+
+/*
+ * hfs_brec_lock()
+ *
+ * Description:
+ * This function obtains HFS_LOCK_WRITE access to the bnode
+ * containing this hfs_brec. All descendents in the path from this
+ * record to the leaf are given HFS_LOCK_WRITE access and all
+ * ancestors in the path from the root to here are released.
+ * Input Variable(s):
+ * struct hfs_brec *brec: pointer to the brec to obtain
+ * HFS_LOCK_WRITE access to some of the nodes of.
+ * struct hfs_belem *elem: the first node to lock or NULL for all
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'brec' points to a "valid" (struct hfs_brec)
+ * Postconditions:
+ * All nodes between the indicated node and the beginning of the path
+ * are released. hfs_bnode_lock() is called in turn on each node
+ * from the indicated node to the leaf node of the path, with a
+ * lock_type argument of HFS_LOCK_WRITE. If one of those calls
+ * results in deadlock, then this function will never return.
+ */
+void hfs_brec_lock(struct hfs_brec *brec, struct hfs_belem *elem)
+{
+ if (!elem) {
+ elem = brec->top;
+ } else if (elem > brec->top) {
+ hfs_brec_relse(brec, elem-1);
+ }
+
+ while (elem <= brec->bottom) {
+ hfs_bnode_lock(&elem->bnr, HFS_LOCK_WRITE);
+ ++elem;
+ }
+}
+
+/*
+ * hfs_brec_init()
+ *
+ * Description:
+ * Obtain access to the root node of a B-tree.
+ * Note that this first must obtain access to the header node.
+ * Input Variable(s):
+ * struct hfs_brec *brec: pointer to the (struct hfs_brec) to
+ * initialize
+ * struct hfs_btree *btree: pointer to the (struct hfs_btree)
+ * int lock_type: the type of access to get to the nodes.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * struct hfs_belem *: pointer to the root path element or NULL
+ * Preconditions:
+ * 'brec' points to a (struct hfs_brec).
+ * 'tree' points to a valid (struct hfs_btree).
+ * Postconditions:
+ * If the two calls to brec_bnode_find() succeed then the return value
+ * points to a (struct hfs_belem) which corresponds to the root node
+ * of 'brec->tree'.
+ * Both the root and header nodes are obtained with the type of lock
+ * given by (flags & HFS_LOCK_MASK).
+ * The fields 'record' field of the root is set to its last record.
+ * If the header node is not needed to complete the appropriate
+ * operation (as determined by the 'keep_flags' field of 'brec') then
+ * it is released before this function returns.
+ * If either call to brec_bnode_find() fails, NULL is returned and the
+ * (struct hfs_brec) pointed to by 'brec' is invalid.
+ */
+struct hfs_belem *hfs_brec_init(struct hfs_brec *brec, struct hfs_btree *tree,
+ int flags)
+{
+ struct hfs_belem *head = &brec->elem[0];
+ struct hfs_belem *root = &brec->elem[1];
+ int lock_type = flags & HFS_LOCK_MASK;
+
+ brec->tree = tree;
+
+ head->bnr = hfs_bnode_find(tree, 0, lock_type);
+ if (!head->bnr.bn) {
+ return NULL;
+ }
+
+ root->bnr = hfs_bnode_find(tree, tree->bthRoot, lock_type);
+ if (!root->bnr.bn) {
+ hfs_bnode_relse(&head->bnr);
+ return NULL;
+ }
+
+ root->record = root->bnr.bn->ndNRecs;
+
+ brec->top = head;
+ brec->bottom = root;
+
+ brec->keep_flags = flags & HFS_BPATH_MASK;
+
+ /* HFS_BPATH_FIRST not applicable for root */
+ /* and HFS_BPATH_UNDERFLOW is different */
+ root->flags = overflow(tree, root->bnr.bn);
+ if (root->record < 3) {
+ root->flags |= HFS_BPATH_UNDERFLOW;
+ }
+
+ if (!(root->flags & brec->keep_flags)) {
+ hfs_brec_relse(brec, head);
+ }
+
+ return root;
+}
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
new file mode 100644
index 000000000..5aa735388
--- /dev/null
+++ b/fs/hfs/btree.c
@@ -0,0 +1,316 @@
+/*
+ * linux/fs/hfs/btree.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the code to manipulate the B-tree structure.
+ * The catalog and extents files are both B-trees.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ *
+ * The code in this file initializes some structures which contain
+ * pointers by calling memset(&foo, 0, sizeof(foo)).
+ * This produces the desired behavior only due to the non-ANSI
+ * assumption that the machine representation of NULL is all zeros.
+ */
+
+#include "hfs_btree.h"
+
+/*================ File-local functions ================*/
+
+/*
+ * hfs_bnode_ditch()
+ *
+ * Description:
+ * This function deletes an entire linked list of bnodes, so it
+ * does not need to keep the linked list consistent as
+ * hfs_bnode_delete() does.
+ * Called by hfs_btree_init() for error cleanup and by hfs_btree_free().
+ * Input Variable(s):
+ * struct hfs_bnode *bn: pointer to the first (struct hfs_bnode) in
+ * the linked list to be deleted.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'bn' is NULL or points to a "valid" (struct hfs_bnode) with a 'prev'
+ * field of NULL.
+ * Postconditions:
+ * 'bn' and all (struct hfs_bnode)s in the chain of 'next' pointers
+ * are deleted, freeing the associated memory and hfs_buffer_put()ing
+ * the associated buffer.
+ */
+static void hfs_bnode_ditch(struct hfs_bnode *bn) {
+ struct hfs_bnode *tmp;
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ extern int bnode_count;
+#endif
+
+ while (bn != NULL) {
+ tmp = bn->next;
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ hfs_warn("deleting node %d from tree %d with count %d\n",
+ bn->node, (int)ntohl(bn->tree->entry.cnid), bn->count);
+ --bnode_count;
+#endif
+ hfs_buffer_put(bn->buf); /* safe: checks for NULL argument */
+
+ /* free all but the header */
+ if (bn->node) {
+ HFS_DELETE(bn);
+ }
+ bn = tmp;
+ }
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_btree_free()
+ *
+ * Description:
+ * This function frees a (struct hfs_btree) obtained from hfs_btree_init().
+ * Called by hfs_put_super().
+ * Input Variable(s):
+ * struct hfs_btree *bt: pointer to the (struct hfs_btree) to free
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'bt' is NULL or points to a "valid" (struct hfs_btree)
+ * Postconditions:
+ * If 'bt' points to a "valid" (struct hfs_btree) then all (struct
+ * hfs_bnode)s associated with 'bt' are freed by calling
+ * hfs_bnode_ditch() and the memory associated with the (struct
+ * hfs_btree) is freed.
+ * If 'bt' is NULL or not "valid" an error is printed and nothing
+ * is changed.
+ */
+void hfs_btree_free(struct hfs_btree *bt)
+{
+ int lcv;
+
+ if (bt && (bt->magic == HFS_BTREE_MAGIC)) {
+ hfs_extent_free(&bt->entry.u.file.data_fork);
+
+ for (lcv=0; lcv<HFS_CACHELEN; ++lcv) {
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ hfs_warn("deleting nodes from bucket %d:\n", lcv);
+#endif
+ hfs_bnode_ditch(bt->cache[lcv]);
+ }
+
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ hfs_warn("deleting header and bitmap nodes\n");
+#endif
+ hfs_bnode_ditch(&bt->head);
+
+#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
+ hfs_warn("deleting root node\n");
+#endif
+ hfs_bnode_ditch(bt->root);
+
+ HFS_DELETE(bt);
+ } else if (bt) {
+ hfs_warn("hfs_btree_free: corrupted hfs_btree.\n");
+ }
+}
+
+/*
+ * hfs_btree_init()
+ *
+ * Description:
+ * Given some vital information from the MDB (HFS superblock),
+ * initializes the fields of a (struct hfs_btree).
+ * Input Variable(s):
+ * struct hfs_mdb *mdb: pointer to the MDB
+ * ino_t cnid: the CNID (HFS_CAT_CNID or HFS_EXT_CNID) of the B-tree
+ * hfs_u32 tsize: the size, in bytes, of the B-tree
+ * hfs_u32 csize: the size, in bytes, of the clump size for the B-tree
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * (struct hfs_btree *): pointer to the initialized hfs_btree on success,
+ * or NULL on failure
+ * Preconditions:
+ * 'mdb' points to a "valid" (struct hfs_mdb)
+ * Postconditions:
+ * Assuming the inputs are what they claim to be, no errors occur
+ * reading from disk, and no inconsistencies are noticed in the data
+ * read from disk, the return value is a pointer to a "valid"
+ * (struct hfs_btree). If there are errors reading from disk or
+ * inconsistencies are noticed in the data read from disk, then and
+ * all resources that were allocated are released and NULL is
+ * returned. If the inputs are not what they claim to be or if they
+ * are unnoticed inconsistencies in the data read from disk then the
+ * returned hfs_btree is probably going to lead to errors when it is
+ * used in a non-trivial way.
+ */
+struct hfs_btree * hfs_btree_init(struct hfs_mdb *mdb, ino_t cnid,
+ hfs_byte_t ext[12],
+ hfs_u32 tsize, hfs_u32 csize)
+{
+ struct hfs_btree * bt;
+ struct BTHdrRec * th;
+ struct hfs_bnode * tmp;
+ unsigned int next;
+#if defined(DEBUG_HEADER) || defined(DEBUG_ALL)
+ unsigned char *p, *q;
+#endif
+
+ if (!mdb || !ext || !HFS_NEW(bt)) {
+ goto bail3;
+ }
+
+ bt->magic = HFS_BTREE_MAGIC;
+ bt->sys_mdb = mdb->sys_mdb;
+ bt->reserved = 0;
+ bt->lock = 0;
+ bt->wait = NULL;
+ bt->dirt = 0;
+ memset(bt->cache, 0, sizeof(bt->cache));
+ bt->entry.mdb = mdb;
+ bt->entry.cnid = cnid;
+ bt->entry.type = HFS_CDR_FIL;
+ bt->entry.u.file.magic = HFS_FILE_MAGIC;
+ bt->entry.u.file.clumpablks = (csize / mdb->alloc_blksz)
+ >> HFS_SECTOR_SIZE_BITS;
+ bt->entry.u.file.data_fork.entry = &bt->entry;
+ bt->entry.u.file.data_fork.lsize = tsize;
+ bt->entry.u.file.data_fork.psize = tsize >> HFS_SECTOR_SIZE_BITS;
+ bt->entry.u.file.data_fork.fork = HFS_FK_DATA;
+ hfs_extent_in(&bt->entry.u.file.data_fork, ext);
+
+ hfs_bnode_read(&bt->head, bt, 0, HFS_STICKY);
+ if (!hfs_buffer_ok(bt->head.buf)) {
+ goto bail2;
+ }
+ th = (struct BTHdrRec *)((char *)hfs_buffer_data(bt->head.buf) +
+ sizeof(struct NodeDescriptor));
+
+ /* read in the bitmap nodes (if any) */
+ tmp = &bt->head;
+ while ((next = tmp->ndFLink)) {
+ if (!HFS_NEW(tmp->next)) {
+ goto bail2;
+ }
+ hfs_bnode_read(tmp->next, bt, next, HFS_STICKY);
+ if (!hfs_buffer_ok(tmp->next->buf)) {
+ goto bail2;
+ }
+ tmp->next->prev = tmp;
+ tmp = tmp->next;
+ }
+
+ if (hfs_get_ns(th->bthNodeSize) != htons(HFS_SECTOR_SIZE)) {
+ hfs_warn("hfs_btree_init: bthNodeSize!=512 not supported\n");
+ goto bail2;
+ }
+
+ if (cnid == htonl(HFS_CAT_CNID)) {
+ bt->compare = (hfs_cmpfn)hfs_cat_compare;
+ } else if (cnid == htonl(HFS_EXT_CNID)) {
+ bt->compare = (hfs_cmpfn)hfs_ext_compare;
+ } else {
+ goto bail2;
+ }
+ bt->bthDepth = hfs_get_hs(th->bthDepth);
+ bt->bthRoot = hfs_get_hl(th->bthRoot);
+ bt->bthNRecs = hfs_get_hl(th->bthNRecs);
+ bt->bthFNode = hfs_get_hl(th->bthFNode);
+ bt->bthLNode = hfs_get_hl(th->bthLNode);
+ bt->bthNNodes = hfs_get_hl(th->bthNNodes);
+ bt->bthFree = hfs_get_hl(th->bthFree);
+ bt->bthKeyLen = hfs_get_hs(th->bthKeyLen);
+
+#if defined(DEBUG_HEADER) || defined(DEBUG_ALL)
+ hfs_warn("bthDepth %d\n", bt->bthDepth);
+ hfs_warn("bthRoot %d\n", bt->bthRoot);
+ hfs_warn("bthNRecs %d\n", bt->bthNRecs);
+ hfs_warn("bthFNode %d\n", bt->bthFNode);
+ hfs_warn("bthLNode %d\n", bt->bthLNode);
+ hfs_warn("bthKeyLen %d\n", bt->bthKeyLen);
+ hfs_warn("bthNNodes %d\n", bt->bthNNodes);
+ hfs_warn("bthFree %d\n", bt->bthFree);
+ p = (unsigned char *)hfs_buffer_data(bt->head.buf);
+ q = p + HFS_SECTOR_SIZE;
+ while (p < q) {
+ hfs_warn("%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *p++, *p++, *p++, *p++, *p++, *p++, *p++, *p++,
+ *p++, *p++, *p++, *p++, *p++, *p++, *p++, *p++);
+ }
+#endif
+
+ /* Read in the root if it exists.
+ The header always exists, but the root exists only if the
+ tree is non-empty */
+ if (bt->bthDepth && bt->bthRoot) {
+ if (!HFS_NEW(bt->root)) {
+ goto bail2;
+ }
+ hfs_bnode_read(bt->root, bt, bt->bthRoot, HFS_STICKY);
+ if (!hfs_buffer_ok(bt->root->buf)) {
+ goto bail1;
+ }
+ } else {
+ bt->root = NULL;
+ }
+
+ return bt;
+
+ bail1:
+ hfs_bnode_ditch(bt->root);
+ bail2:
+ hfs_bnode_ditch(&bt->head);
+ HFS_DELETE(bt);
+ bail3:
+ return NULL;
+}
+
+/*
+ * hfs_btree_commit()
+ *
+ * Called to write a possibly dirty btree back to disk.
+ */
+void hfs_btree_commit(struct hfs_btree *bt, hfs_byte_t ext[12], hfs_lword_t size)
+{
+ if (bt->dirt) {
+ struct BTHdrRec *th;
+ th = (struct BTHdrRec *)((char *)hfs_buffer_data(bt->head.buf) +
+ sizeof(struct NodeDescriptor));
+
+ hfs_put_hs(bt->bthDepth, th->bthDepth);
+ hfs_put_hl(bt->bthRoot, th->bthRoot);
+ hfs_put_hl(bt->bthNRecs, th->bthNRecs);
+ hfs_put_hl(bt->bthFNode, th->bthFNode);
+ hfs_put_hl(bt->bthLNode, th->bthLNode);
+ hfs_put_hl(bt->bthNNodes, th->bthNNodes);
+ hfs_put_hl(bt->bthFree, th->bthFree);
+ hfs_buffer_dirty(bt->head.buf);
+
+ /*
+ * Commit the bnodes which are not cached.
+ * The map nodes don't need to be committed here because
+ * they are committed every time they are changed.
+ */
+ hfs_bnode_commit(&bt->head);
+ if (bt->root) {
+ hfs_bnode_commit(bt->root);
+ }
+
+
+ hfs_put_hl(bt->bthNNodes << HFS_SECTOR_SIZE_BITS, size);
+ hfs_extent_out(&bt->entry.u.file.data_fork, ext);
+ /* hfs_buffer_dirty(mdb->buf); (Done by caller) */
+
+ bt->dirt = 0;
+ }
+}
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
new file mode 100644
index 000000000..4055012f1
--- /dev/null
+++ b/fs/hfs/catalog.c
@@ -0,0 +1,1674 @@
+/*
+ * linux/fs/hfs/catalog.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the functions related to the catalog B-tree.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * Cache code shamelessly stolen from
+ * linux/fs/inode.c Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ *
+ * The code in this file initializes some structures by calling
+ * memset(&foo, 0, sizeof(foo)). This produces the desired behavior
+ * only due to the non-ANSI assumption that the machine representation
+ */
+
+#include "hfs.h"
+
+/*================ Variable-like macros ================*/
+
+#define NUM_FREE_ENTRIES 8
+
+/* Number of hash table slots */
+#define CCACHE_NR 128
+
+/* Max number of entries in memory */
+#define CCACHE_MAX 1024
+
+/* Number of entries to fit in a single page on an i386 */
+#define CCACHE_INC ((PAGE_SIZE - sizeof(void *))/sizeof(struct hfs_cat_entry))
+
+/*================ File-local data types ================*/
+
+/* The catalog record for a file */
+typedef struct {
+ hfs_byte_t Flags; /* Flags such as read-only */
+ hfs_byte_t Typ; /* file version number = 0 */
+ hfs_finfo_t UsrWds; /* data used by the Finder */
+ hfs_lword_t FlNum; /* The CNID */
+ hfs_word_t StBlk; /* obsolete */
+ hfs_lword_t LgLen; /* The logical EOF of the data fork*/
+ hfs_lword_t PyLen; /* The physical EOF of the data fork */
+ hfs_word_t RStBlk; /* obsolete */
+ hfs_lword_t RLgLen; /* The logical EOF of the rsrc fork */
+ hfs_lword_t RPyLen; /* The physical EOF of the rsrc fork */
+ hfs_lword_t CrDat; /* The creation date */
+ hfs_lword_t MdDat; /* The modified date */
+ hfs_lword_t BkDat; /* The last backup date */
+ hfs_fxinfo_t FndrInfo; /* more data for the Finder */
+ hfs_word_t ClpSize; /* number of bytes to allocate
+ when extending files */
+ hfs_byte_t ExtRec[12]; /* first extent record
+ for the data fork */
+ hfs_byte_t RExtRec[12]; /* first extent record
+ for the resource fork */
+ hfs_lword_t Resrv; /* reserved by Apple */
+} FIL_REC;
+
+/* the catalog record for a directory */
+typedef struct {
+ hfs_word_t Flags; /* flags */
+ hfs_word_t Val; /* Valence: number of files and
+ dirs in the directory */
+ hfs_lword_t DirID; /* The CNID */
+ hfs_lword_t CrDat; /* The creation date */
+ hfs_lword_t MdDat; /* The modification date */
+ hfs_lword_t BkDat; /* The last backup date */
+ hfs_dinfo_t UsrInfo; /* data used by the Finder */
+ hfs_dxinfo_t FndrInfo; /* more data used by Finder */
+ hfs_byte_t Resrv[16]; /* reserved by Apple */
+} DIR_REC;
+
+/* the catalog record for a thread */
+typedef struct {
+ hfs_byte_t Reserv[8]; /* reserved by Apple */
+ hfs_lword_t ParID; /* CNID of parent directory */
+ struct hfs_name CName; /* The name of this entry */
+} THD_REC;
+
+/* A catalog tree record */
+struct hfs_cat_rec {
+ hfs_byte_t cdrType; /* The type of entry */
+ hfs_byte_t cdrResrv2; /* padding */
+ union {
+ FIL_REC fil;
+ DIR_REC dir;
+ THD_REC thd;
+ } u;
+};
+
+
+struct allocation_unit {
+ struct allocation_unit *next;
+ struct hfs_cat_entry entries[CCACHE_INC];
+};
+
+/*================ File-local variables ================*/
+
+static LIST_HEAD(entry_in_use);
+static LIST_HEAD(entry_dirty); /* all the dirty entries */
+static LIST_HEAD(entry_unused);
+static struct list_head hash_table[CCACHE_NR];
+
+spinlock_t entry_lock = SPIN_LOCK_UNLOCKED;
+
+static struct {
+ int nr_entries;
+ int nr_free_entries;
+} entries_stat;
+
+static struct allocation_unit *allocation = NULL;
+
+/*================ File-local functions ================*/
+
+/*
+ * brec_to_id
+ *
+ * Get the CNID from a brec
+ */
+static inline hfs_u32 brec_to_id(struct hfs_brec *brec)
+{
+ struct hfs_cat_rec *rec = brec->data;
+
+ return hfs_get_nl((rec->cdrType==HFS_CDR_FIL) ?
+ rec->u.fil.FlNum : rec->u.dir.DirID);
+}
+
+/*
+ * hashfn()
+ *
+ * hash an (struct mdb *) and a (struct hfs_cat_key *) to an integer.
+ */
+static inline unsigned int hashfn(const struct hfs_mdb *mdb,
+ const struct hfs_cat_key *key)
+{
+#define LSB(X) (((unsigned char *)(&X))[3])
+ return ((unsigned int)LSB(mdb->create_date) ^
+ (unsigned int)key->ParID[3] ^
+ hfs_strhash(&key->CName)) % CCACHE_NR;
+#undef LSB
+}
+
+/*
+ * hash()
+ *
+ * hash an (struct mdb *) and a (struct hfs_cat_key *)
+ * to a pointer to a slot in the hash table.
+ */
+static inline struct list_head *hash(struct hfs_mdb *mdb,
+ const struct hfs_cat_key *key)
+{
+ return hash_table + hashfn(mdb, key);
+}
+
+static inline void insert_hash(struct hfs_cat_entry *entry)
+{
+ struct list_head *head = hash(entry->mdb, &entry->key);
+ list_add(&entry->hash, head);
+}
+
+static inline void remove_hash(struct hfs_cat_entry *entry)
+{
+ list_del(&entry->hash);
+ INIT_LIST_HEAD(&entry->hash);
+}
+
+/*
+ * wait_on_entry()
+ *
+ * Sleep until a locked entry is unlocked.
+ */
+static inline void wait_on_entry(struct hfs_cat_entry * entry)
+{
+ while ((entry->state & HFS_LOCK)) {
+ hfs_sleep_on(&entry->wait);
+ }
+}
+
+/*
+ * lock_entry()
+ *
+ * Obtain an exclusive lock on an entry.
+ */
+static void lock_entry(struct hfs_cat_entry * entry)
+{
+ wait_on_entry(entry);
+ spin_lock(&entry_lock);
+ entry->state |= HFS_LOCK;
+ spin_unlock(&entry_lock);
+}
+
+/*
+ * lock_entry()
+ *
+ * Relinquish an exclusive lock on an entry.
+ */
+static void unlock_entry(struct hfs_cat_entry * entry)
+{
+ spin_lock(&entry_lock);
+ entry->state &= ~HFS_LOCK;
+ spin_unlock(&entry_lock);
+ hfs_wake_up(&entry->wait);
+}
+
+/*
+ * clear_entry()
+ *
+ * Zero all the fields of an entry and place it on the free list.
+ */
+static void clear_entry(struct hfs_cat_entry * entry)
+{
+ wait_on_entry(entry);
+ /* zero all but the wait queue */
+ memset(&entry->wait, 0,
+ sizeof(*entry) - offsetof(struct hfs_cat_entry, wait));
+ INIT_LIST_HEAD(&entry->hash);
+ INIT_LIST_HEAD(&entry->list);
+ INIT_LIST_HEAD(&entry->dirty);
+}
+
+/* put entry on mdb dirty list. this only does it if it's on the hash
+ * list. we also add it to the global dirty list as well. */
+void hfs_cat_mark_dirty(struct hfs_cat_entry *entry)
+{
+ struct hfs_mdb *mdb = entry->mdb;
+
+ spin_lock(&entry_lock);
+ if (!(entry->state & HFS_DIRTY)) {
+ entry->state |= HFS_DIRTY;
+
+ /* Only add valid (ie hashed) entries to the
+ * dirty list */
+ if (!list_empty(&entry->hash)) {
+ list_del(&entry->list);
+ list_add(&entry->list, &mdb->entry_dirty);
+ INIT_LIST_HEAD(&entry->dirty);
+ list_add(&entry->dirty, &entry_dirty);
+ }
+ }
+ spin_unlock(&entry_lock);
+}
+
+/* prune all entries */
+static void dispose_list(struct list_head *head)
+{
+ struct list_head *next;
+ int count = 0;
+
+ next = head->next;
+ for (;;) {
+ struct list_head * tmp = next;
+
+ next = next->next;
+ if (tmp == head)
+ break;
+ hfs_cat_prune(list_entry(tmp, struct hfs_cat_entry, list));
+ count++;
+ }
+}
+
+/*
+ * try_to_free_entries works by getting the underlying
+ * cache system to release entries. it gets called with the entry lock
+ * held.
+ *
+ * count can be up to 2 due to both a resource and data fork being
+ * listed. we can unuse dirty entries as well.
+ */
+#define CAN_UNUSE(tmp) (((tmp)->count < 3) && ((tmp)->state <= HFS_DIRTY))
+static int try_to_free_entries(const int goal)
+{
+ struct list_head *tmp, *head = &entry_in_use;
+ LIST_HEAD(freeable);
+ int found = 0, depth = goal << 1;
+
+ /* try freeing from entry_in_use */
+ while ((tmp = head->prev) != head && depth--) {
+ struct hfs_cat_entry *entry =
+ list_entry(tmp, struct hfs_cat_entry, list);
+ list_del(tmp);
+ if (CAN_UNUSE(entry)) {
+ list_del(&entry->hash);
+ INIT_LIST_HEAD(&entry->hash);
+ list_add(tmp, &freeable);
+ if (++found < goal)
+ continue;
+ break;
+ }
+ list_add(tmp, head);
+ }
+
+ if (found < goal) { /* try freeing from global dirty list */
+ head = &entry_dirty;
+ depth = goal << 1;
+ while ((tmp = head->prev) != head && depth--) {
+ struct hfs_cat_entry *entry =
+ list_entry(tmp, struct hfs_cat_entry, dirty);
+ list_del(tmp);
+ if (CAN_UNUSE(entry)) {
+ list_del(&entry->hash);
+ INIT_LIST_HEAD(&entry->hash);
+ list_del(&entry->list);
+ INIT_LIST_HEAD(&entry->list);
+ list_add(&entry->list, &freeable);
+ if (++found < goal)
+ continue;
+ break;
+ }
+ list_add(tmp, head);
+ }
+ }
+
+ if (found) {
+ spin_unlock(&entry_lock);
+ dispose_list(&freeable);
+ spin_lock(&entry_lock);
+ }
+
+ return found;
+}
+
+/* init_once */
+static inline void init_once(struct hfs_cat_entry *entry)
+{
+ init_waitqueue(&entry->wait);
+ INIT_LIST_HEAD(&entry->hash);
+ INIT_LIST_HEAD(&entry->list);
+ INIT_LIST_HEAD(&entry->dirty);
+}
+
+/*
+ * grow_entries()
+ *
+ * Try to allocate more entries, adding them to the free list. this returns
+ * with the spinlock held if successful
+ */
+static struct hfs_cat_entry *grow_entries(struct hfs_mdb *mdb)
+{
+ struct allocation_unit *tmp;
+ struct hfs_cat_entry * entry;
+ int i;
+
+ spin_unlock(&entry_lock);
+ if ((entries_stat.nr_entries < CCACHE_MAX) &&
+ HFS_NEW(tmp)) {
+ spin_lock(&entry_lock);
+ memset(tmp, 0, sizeof(*tmp));
+ tmp->next = allocation;
+ allocation = tmp;
+ entry = tmp->entries;
+ for (i = 1; i < CCACHE_INC; i++) {
+ entry++;
+ init_once(entry);
+ list_add(&entry->list, &entry_unused);
+ }
+ init_once(tmp->entries);
+
+ entries_stat.nr_entries += CCACHE_INC;
+ entries_stat.nr_free_entries += CCACHE_INC - 1;
+ return tmp->entries;
+ }
+
+ /* allocation failed. do some pruning and try again */
+ spin_lock(&entry_lock);
+ try_to_free_entries(entries_stat.nr_entries >> 2);
+ {
+ struct list_head *tmp = entry_unused.next;
+ if (tmp != &entry_unused) {
+ entries_stat.nr_free_entries--;
+ list_del(tmp);
+ entry = list_entry(tmp, struct hfs_cat_entry, list);
+ return entry;
+ }
+ }
+ spin_unlock(&entry_lock);
+
+ return NULL;
+}
+
+/*
+ * __read_entry()
+ *
+ * Convert a (struct hfs_cat_rec) to a (struct hfs_cat_entry).
+ */
+static void __read_entry(struct hfs_cat_entry *entry,
+ const struct hfs_cat_rec *cat)
+{
+ entry->type = cat->cdrType;
+
+ if (cat->cdrType == HFS_CDR_DIR) {
+ struct hfs_dir *dir = &entry->u.dir;
+
+ entry->cnid = hfs_get_nl(cat->u.dir.DirID);
+
+ dir->magic = HFS_DIR_MAGIC;
+ dir->flags = hfs_get_ns(cat->u.dir.Flags);
+ memcpy(&entry->info.dir.dinfo, &cat->u.dir.UsrInfo, 16);
+ memcpy(&entry->info.dir.dxinfo, &cat->u.dir.FndrInfo, 16);
+ entry->create_date = hfs_get_nl(cat->u.dir.CrDat);
+ entry->modify_date = hfs_get_nl(cat->u.dir.MdDat);
+ entry->backup_date = hfs_get_nl(cat->u.dir.BkDat);
+ dir->dirs = dir->files = 0;
+ } else if (cat->cdrType == HFS_CDR_FIL) {
+ struct hfs_file *fil = &entry->u.file;
+
+ entry->cnid = hfs_get_nl(cat->u.fil.FlNum);
+
+ fil->magic = HFS_FILE_MAGIC;
+
+ fil->data_fork.fork = HFS_FK_DATA;
+ fil->data_fork.entry = entry;
+ fil->data_fork.lsize = hfs_get_hl(cat->u.fil.LgLen);
+ fil->data_fork.psize = hfs_get_hl(cat->u.fil.PyLen) >>
+ HFS_SECTOR_SIZE_BITS;
+ hfs_extent_in(&fil->data_fork, cat->u.fil.ExtRec);
+
+ fil->rsrc_fork.fork = HFS_FK_RSRC;
+ fil->rsrc_fork.entry = entry;
+ fil->rsrc_fork.lsize = hfs_get_hl(cat->u.fil.RLgLen);
+ fil->rsrc_fork.psize = hfs_get_hl(cat->u.fil.RPyLen) >>
+ HFS_SECTOR_SIZE_BITS;
+ hfs_extent_in(&fil->rsrc_fork, cat->u.fil.RExtRec);
+
+ memcpy(&entry->info.file.finfo, &cat->u.fil.UsrWds, 16);
+ memcpy(&entry->info.file.fxinfo, &cat->u.fil.FndrInfo, 16);
+
+ entry->create_date = hfs_get_nl(cat->u.fil.CrDat);
+ entry->modify_date = hfs_get_nl(cat->u.fil.MdDat);
+ entry->backup_date = hfs_get_nl(cat->u.fil.BkDat);
+ fil->clumpablks = (hfs_get_hs(cat->u.fil.ClpSize)
+ / entry->mdb->alloc_blksz)
+ >> HFS_SECTOR_SIZE_BITS;
+ fil->flags = cat->u.fil.Flags;
+ } else {
+ hfs_warn("hfs_fs: entry is neither file nor directory!\n");
+ }
+}
+
+/*
+ * count_dir_entries()
+ *
+ * Count the number of files and directories in a given directory.
+ */
+static inline void count_dir_entries(struct hfs_cat_entry *entry,
+ struct hfs_brec *brec)
+{
+ int error = 0;
+ hfs_u32 cnid;
+ hfs_u8 type;
+
+ if (!hfs_cat_open(entry, brec)) {
+ while (!(error = hfs_cat_next(entry, brec, 1, &cnid, &type))) {
+ if (type == HFS_CDR_FIL) {
+ ++entry->u.dir.files;
+ } else if (type == HFS_CDR_DIR) {
+ ++entry->u.dir.dirs;
+ }
+ } /* -ENOENT is normal termination */
+ }
+ if (error != -ENOENT) {
+ entry->cnid = 0;
+ }
+}
+
+/*
+ * read_entry()
+ *
+ * Convert a (struct hfs_brec) to a (struct hfs_cat_entry).
+ */
+static inline void read_entry(struct hfs_cat_entry *entry,
+ struct hfs_brec *brec)
+{
+ int need_count;
+ struct hfs_cat_rec *rec = brec->data;
+
+ __read_entry(entry, rec);
+
+ need_count = (rec->cdrType == HFS_CDR_DIR) && rec->u.dir.Val;
+
+ hfs_brec_relse(brec, NULL);
+
+ if (need_count) {
+ count_dir_entries(entry, brec);
+ }
+}
+
+/*
+ * __write_entry()
+ *
+ * Convert a (struct hfs_cat_entry) to a (struct hfs_cat_rec).
+ */
+static void __write_entry(const struct hfs_cat_entry *entry,
+ struct hfs_cat_rec *cat)
+{
+ if (entry->type == HFS_CDR_DIR) {
+ const struct hfs_dir *dir = &entry->u.dir;
+
+ hfs_put_ns(dir->flags, cat->u.dir.Flags);
+ hfs_put_hs(dir->dirs + dir->files, cat->u.dir.Val);
+ hfs_put_nl(entry->cnid, cat->u.dir.DirID);
+ hfs_put_nl(entry->create_date, cat->u.dir.CrDat);
+ hfs_put_nl(entry->modify_date, cat->u.dir.MdDat);
+ hfs_put_nl(entry->backup_date, cat->u.dir.BkDat);
+ memcpy(&cat->u.dir.UsrInfo, &entry->info.dir.dinfo, 16);
+ memcpy(&cat->u.dir.FndrInfo, &entry->info.dir.dxinfo, 16);
+ } else if (entry->type == HFS_CDR_FIL) {
+ const struct hfs_file *fil = &entry->u.file;
+
+ cat->u.fil.Flags = fil->flags;
+ hfs_put_nl(entry->cnid, cat->u.fil.FlNum);
+ memcpy(&cat->u.fil.UsrWds, &entry->info.file.finfo, 16);
+ hfs_put_hl(fil->data_fork.lsize, cat->u.fil.LgLen);
+ hfs_put_hl(fil->data_fork.psize << HFS_SECTOR_SIZE_BITS,
+ cat->u.fil.PyLen);
+ hfs_put_hl(fil->rsrc_fork.lsize, cat->u.fil.RLgLen);
+ hfs_put_hl(fil->rsrc_fork.psize << HFS_SECTOR_SIZE_BITS,
+ cat->u.fil.RPyLen);
+ hfs_put_nl(entry->create_date, cat->u.fil.CrDat);
+ hfs_put_nl(entry->modify_date, cat->u.fil.MdDat);
+ hfs_put_nl(entry->backup_date, cat->u.fil.BkDat);
+ memcpy(&cat->u.fil.FndrInfo, &entry->info.file.fxinfo, 16);
+ hfs_put_hs((fil->clumpablks * entry->mdb->alloc_blksz)
+ << HFS_SECTOR_SIZE_BITS, cat->u.fil.ClpSize);
+ hfs_extent_out(&fil->data_fork, cat->u.fil.ExtRec);
+ hfs_extent_out(&fil->rsrc_fork, cat->u.fil.RExtRec);
+ } else {
+ hfs_warn("__write_entry: invalid entry\n");
+ }
+}
+
+/*
+ * write_entry()
+ *
+ * Write a modified entry back to the catalog B-tree.
+ */
+static void write_entry(struct hfs_cat_entry * entry)
+{
+ struct hfs_brec brec;
+ int error;
+
+ if (!(entry->state & HFS_DELETED)) {
+ error = hfs_bfind(&brec, entry->mdb->cat_tree,
+ HFS_BKEY(&entry->key), HFS_BFIND_WRITE);
+ if (!error) {
+ if ((entry->state & HFS_KEYDIRTY)) {
+ /* key may have changed case due to a rename */
+ entry->state &= ~HFS_KEYDIRTY;
+ if (brec.key->KeyLen != entry->key.KeyLen) {
+ hfs_warn("hfs_write_entry: key length "
+ "changed!\n");
+ error = 1;
+ } else {
+ memcpy(brec.key, &entry->key,
+ entry->key.KeyLen);
+ }
+ } else if (entry->cnid != brec_to_id(&brec)) {
+ hfs_warn("hfs_write_entry: CNID "
+ "changed unexpectedly!\n");
+ error = 1;
+ }
+ if (!error) {
+ __write_entry(entry, brec.data);
+ }
+ hfs_brec_relse(&brec, NULL);
+ }
+ if (error) {
+ hfs_warn("hfs_write_entry: unable to write "
+ "entry %08x\n", entry->cnid);
+ }
+ }
+}
+
+
+static struct hfs_cat_entry *find_entry(struct hfs_mdb *mdb,
+ const struct hfs_cat_key *key)
+{
+ struct list_head *tmp, *head = hash(mdb, key);
+ struct hfs_cat_entry * entry;
+
+ tmp = head;
+ for (;;) {
+ tmp = tmp->next;
+ entry = NULL;
+ if (tmp == head)
+ break;
+ entry = list_entry(tmp, struct hfs_cat_entry, hash);
+ if (entry->mdb != mdb)
+ continue;
+ if (hfs_cat_compare(&entry->key, key))
+ continue;
+ entry->count++;
+ break;
+ }
+
+ return entry;
+}
+
+
+/* be careful. this gets called with the spinlock held. */
+static struct hfs_cat_entry *get_new_entry(struct hfs_mdb *mdb,
+ const struct hfs_cat_key *key,
+ const int read)
+{
+ struct hfs_cat_entry *entry;
+ struct list_head *head = hash(mdb, key);
+ struct list_head *tmp = entry_unused.next;
+
+ if (tmp != &entry_unused) {
+ list_del(tmp);
+ entries_stat.nr_free_entries--;
+ entry = list_entry(tmp, struct hfs_cat_entry, list);
+add_new_entry:
+ list_add(&entry->list, &entry_in_use);
+ list_add(&entry->hash, head);
+ entry->mdb = mdb;
+ entry->count = 1;
+ memcpy(&entry->key, key, sizeof(*key));
+ entry->state = HFS_LOCK;
+ spin_unlock(&entry_lock);
+
+ if (read) {
+ struct hfs_brec brec;
+
+ if (hfs_bfind(&brec, mdb->cat_tree,
+ HFS_BKEY(key), HFS_BFIND_READ_EQ)) {
+ /* uh oh. we failed to read the record */
+ entry->state |= HFS_DELETED;
+ goto read_fail;
+ }
+
+ read_entry(entry, &brec);
+
+ /* error */
+ if (!entry->cnid) {
+ goto read_fail;
+ }
+
+ /* we don't have to acquire a spinlock here or
+ * below for the unlocking bits as we're the first
+ * user of this entry. */
+ entry->state &= ~HFS_LOCK;
+ hfs_wake_up(&entry->wait);
+ }
+
+ return entry;
+ }
+
+ /*
+ * Uhhuh.. We need to expand. Note that "grow_entries()" will
+ * release the spinlock, but will return with the lock held
+ * again if the allocation succeeded.
+ */
+ entry = grow_entries(mdb);
+ if (entry) {
+ /* We released the lock, so.. */
+ struct hfs_cat_entry * old = find_entry(mdb, key);
+ if (!old)
+ goto add_new_entry;
+ list_add(&entry->list, &entry_unused);
+ entries_stat.nr_free_entries++;
+ spin_unlock(&entry_lock);
+ wait_on_entry(old);
+ return old;
+ }
+
+ return entry;
+
+
+read_fail:
+ remove_hash(entry);
+ entry->state &= ~HFS_LOCK;
+ hfs_wake_up(&entry->wait);
+ hfs_cat_put(entry);
+ return NULL;
+}
+
+/*
+ * get_entry()
+ *
+ * Try to return an entry for the indicated file or directory.
+ * If ('read' == 0) then no attempt will be made to read it from disk
+ * and a locked, but uninitialized, entry is returned.
+ */
+static struct hfs_cat_entry *get_entry(struct hfs_mdb *mdb,
+ const struct hfs_cat_key *key,
+ const int read)
+{
+ struct hfs_cat_entry * entry;
+
+ spin_lock(&entry_lock);
+ if (!entries_stat.nr_free_entries &&
+ (entries_stat.nr_entries >= CCACHE_MAX))
+ goto restock;
+
+search:
+ entry = find_entry(mdb, key);
+ if (!entry) {
+ return get_new_entry(mdb, key, read);
+ }
+ spin_unlock(&entry_lock);
+ wait_on_entry(entry);
+ return entry;
+
+restock:
+ try_to_free_entries(8);
+ goto search;
+}
+
+/*
+ * new_cnid()
+ *
+ * Allocate a CNID to use for a new file or directory.
+ */
+static inline hfs_u32 new_cnid(struct hfs_mdb *mdb)
+{
+ /* If the create succeeds then the mdb will get dirtied */
+ return htonl(mdb->next_id++);
+}
+
+/*
+ * update_dir()
+ *
+ * Update counts, times and dirt on a changed directory
+ */
+static void update_dir(struct hfs_mdb *mdb, struct hfs_cat_entry *dir,
+ int is_dir, int count)
+{
+ /* update counts */
+ if (is_dir) {
+ mdb->dir_count += count;
+ dir->u.dir.dirs += count;
+ if (dir->cnid == htonl(HFS_ROOT_CNID)) {
+ mdb->root_dirs += count;
+ }
+ } else {
+ mdb->file_count += count;
+ dir->u.dir.files += count;
+ if (dir->cnid == htonl(HFS_ROOT_CNID)) {
+ mdb->root_files += count;
+ }
+ }
+
+ /* update times and dirt */
+ dir->modify_date = hfs_time();
+ hfs_cat_mark_dirty(dir);
+}
+
+/*
+ * Add a writer to dir, excluding readers.
+ */
+static inline void start_write(struct hfs_cat_entry *dir)
+{
+ if (dir->u.dir.readers || dir->u.dir.read_wait) {
+ hfs_sleep_on(&dir->u.dir.write_wait);
+ }
+ ++dir->u.dir.writers;
+}
+
+/*
+ * Add a reader to dir, excluding writers.
+ */
+static inline void start_read(struct hfs_cat_entry *dir)
+{
+ if (dir->u.dir.writers || dir->u.dir.write_wait) {
+ hfs_sleep_on(&dir->u.dir.read_wait);
+ }
+ ++dir->u.dir.readers;
+}
+
+/*
+ * Remove a writer from dir, possibly admitting readers.
+ */
+static inline void end_write(struct hfs_cat_entry *dir)
+{
+ if (!(--dir->u.dir.writers)) {
+ hfs_wake_up(&dir->u.dir.read_wait);
+ }
+}
+
+/*
+ * Remove a reader from dir, possibly admitting writers.
+ */
+static inline void end_read(struct hfs_cat_entry *dir)
+{
+ if (!(--dir->u.dir.readers)) {
+ hfs_wake_up(&dir->u.dir.write_wait);
+ }
+}
+
+/*
+ * create_entry()
+ *
+ * Add a new file or directory to the catalog B-tree and
+ * return a (struct hfs_cat_entry) for it in '*result'.
+ */
+static int create_entry(struct hfs_cat_entry *parent, struct hfs_cat_key *key,
+ const struct hfs_cat_rec *record, int is_dir,
+ hfs_u32 cnid, struct hfs_cat_entry **result)
+{
+ struct hfs_mdb *mdb = parent->mdb;
+ struct hfs_cat_entry *entry;
+ struct hfs_cat_key thd_key;
+ struct hfs_cat_rec thd_rec;
+ int error, has_thread;
+
+ if (result) {
+ *result = NULL;
+ }
+
+ /* keep readers from getting confused by changing dir size */
+ start_write(parent);
+
+ /* create a locked entry in the cache */
+ entry = get_entry(mdb, key, 0);
+ if (!entry) {
+ /* The entry exists but can't be read */
+ error = -EIO;
+ goto done;
+ }
+
+ if (entry->cnid) {
+ /* The (unlocked) entry exists in the cache */
+ error = -EEXIST;
+ goto bail2;
+ }
+
+ /* limit directory valence to signed 16-bit integer */
+ if ((parent->u.dir.dirs + parent->u.dir.files) >= HFS_MAX_VALENCE) {
+ error = -ENOSPC;
+ goto bail1;
+ }
+
+ has_thread = is_dir || (record->u.fil.Flags & HFS_FIL_THD);
+
+ if (has_thread) {
+ /* init some fields for the thread record */
+ memset(&thd_rec, 0, sizeof(thd_rec));
+ thd_rec.cdrType = is_dir ? HFS_CDR_THD : HFS_CDR_FTH;
+ memcpy(&thd_rec.u.thd.ParID, &key->ParID,
+ sizeof(hfs_u32) + sizeof(struct hfs_name));
+
+ /* insert the thread record */
+ hfs_cat_build_key(cnid, NULL, &thd_key);
+ error = hfs_binsert(mdb->cat_tree, HFS_BKEY(&thd_key),
+ &thd_rec, 2 + sizeof(THD_REC));
+ if (error) {
+ goto bail1;
+ }
+ }
+
+ /* insert the record */
+ error = hfs_binsert(mdb->cat_tree, HFS_BKEY(key), record,
+ is_dir ? 2 + sizeof(DIR_REC) :
+ 2 + sizeof(FIL_REC));
+ if (error) {
+ if (has_thread && (error != -EIO)) {
+ /* at least TRY to remove the thread record */
+ (void)hfs_bdelete(mdb->cat_tree, HFS_BKEY(&thd_key));
+ }
+ goto bail1;
+ }
+
+ /* update the parent directory */
+ update_dir(mdb, parent, is_dir, 1);
+
+ /* complete the cache entry and return success */
+ __read_entry(entry, record);
+ unlock_entry(entry);
+ if (result) {
+ *result = entry;
+ } else {
+ hfs_cat_put(entry);
+ }
+ goto done;
+
+bail1:
+ entry->state |= HFS_DELETED;
+ unlock_entry(entry);
+bail2:
+ hfs_cat_put(entry);
+done:
+ end_write(parent);
+ return error;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_cat_put()
+ *
+ * Release an entry we aren't using anymore.
+ *
+ * NOTE: We must be careful any time we sleep on a non-deleted
+ * entry that the entry is in a consistent state, since another
+ * process may get the entry while we sleep. That is why we
+ * 'goto repeat' after each operation that might sleep.
+ */
+void hfs_cat_put(struct hfs_cat_entry * entry)
+{
+ if (entry) {
+ wait_on_entry(entry);
+
+ if (!entry->count) {/* just in case */
+ hfs_warn("hfs_cat_put: trying to free free entry: %p\n",
+ entry);
+ return;
+ }
+
+ spin_lock(&entry_lock);
+ if (!--entry->count) {
+repeat:
+ if ((entry->state & HFS_DELETED)) {
+ if (entry->type == HFS_CDR_FIL) {
+ /* free all extents */
+ entry->u.file.data_fork.lsize = 0;
+ hfs_extent_adj(&entry->u.file.data_fork);
+ entry->u.file.rsrc_fork.lsize = 0;
+ hfs_extent_adj(&entry->u.file.rsrc_fork);
+ }
+ entry->state = 0;
+ } else if (entry->type == HFS_CDR_FIL) {
+ /* clear out any cached extents */
+ if (entry->u.file.data_fork.first.next) {
+ hfs_extent_free(&entry->u.file.data_fork);
+ spin_unlock(&entry_lock);
+ wait_on_entry(entry);
+ spin_lock(&entry_lock);
+ goto repeat;
+ }
+ if (entry->u.file.rsrc_fork.first.next) {
+ hfs_extent_free(&entry->u.file.rsrc_fork);
+ spin_unlock(&entry_lock);
+ wait_on_entry(entry);
+ spin_lock(&entry_lock);
+ goto repeat;
+ }
+ }
+
+ /* if we put a dirty entry, write it out. */
+ if ((entry->state & HFS_DIRTY)) {
+ list_del(&entry->dirty);
+ INIT_LIST_HEAD(&entry->dirty);
+ spin_unlock(&entry_lock);
+ write_entry(entry);
+ spin_lock(&entry_lock);
+ entry->state &= ~HFS_DIRTY;
+ goto repeat;
+ }
+
+ list_del(&entry->hash);
+ list_del(&entry->list);
+ spin_unlock(&entry_lock);
+ clear_entry(entry);
+ spin_lock(&entry_lock);
+ list_add(&entry->list, &entry_unused);
+ entries_stat.nr_free_entries++;
+ }
+ spin_unlock(&entry_lock);
+ }
+}
+
+/*
+ * hfs_cat_get()
+ *
+ * Wrapper for get_entry() which always calls with ('read'==1).
+ * Used for access to get_entry() from outside this file.
+ */
+struct hfs_cat_entry *hfs_cat_get(struct hfs_mdb *mdb,
+ const struct hfs_cat_key *key)
+{
+ return get_entry(mdb, key, 1);
+}
+
+/* invalidate all entries for a device */
+static void invalidate_list(struct list_head *head, struct hfs_mdb *mdb,
+ struct list_head *dispose)
+{
+ struct list_head *next;
+
+ next = head->next;
+ for (;;) {
+ struct list_head *tmp = next;
+ struct hfs_cat_entry * entry;
+
+ next = next->next;
+ if (tmp == head)
+ break;
+ entry = list_entry(tmp, struct hfs_cat_entry, list);
+ if (entry->mdb != mdb) {
+ continue;
+ }
+ if (!entry->count) {
+ list_del(&entry->hash);
+ INIT_LIST_HEAD(&entry->hash);
+ list_del(&entry->dirty);
+ INIT_LIST_HEAD(&entry->dirty);
+ list_del(&entry->list);
+ list_add(&entry->list, dispose);
+ continue;
+ }
+ hfs_warn("hfs_fs: entry %p(%u:%lu) busy on removed device %s.\n",
+ entry, entry->count, entry->state,
+ hfs_mdb_name(entry->mdb->sys_mdb));
+ }
+
+}
+
+/*
+ * hfs_cat_invalidate()
+ *
+ * Called by hfs_mdb_put() to remove all the entries
+ * in the cache which are associated with a given MDB.
+ */
+void hfs_cat_invalidate(struct hfs_mdb *mdb)
+{
+ LIST_HEAD(throw_away);
+
+ spin_lock(&entry_lock);
+ invalidate_list(&entry_in_use, mdb, &throw_away);
+ invalidate_list(&mdb->entry_dirty, mdb, &throw_away);
+ spin_unlock(&entry_lock);
+
+ dispose_list(&throw_away);
+}
+
+/*
+ * hfs_cat_commit()
+ *
+ * Called by hfs_mdb_commit() to write dirty entries to the disk buffers.
+ */
+void hfs_cat_commit(struct hfs_mdb *mdb)
+{
+ struct list_head *tmp, *head = &mdb->entry_dirty;
+ struct hfs_cat_entry * entry;
+
+ spin_lock(&entry_lock);
+ while ((tmp = head->prev) != head) {
+ entry = list_entry(tmp, struct hfs_cat_entry, list);
+
+ if ((entry->state & HFS_LOCK)) {
+ spin_unlock(&entry_lock);
+ wait_on_entry(entry);
+ spin_lock(&entry_lock);
+ } else {
+ struct list_head *insert = &entry_in_use;
+
+ if (!entry->count)
+ insert = entry_in_use.prev;
+ /* remove from global dirty list */
+ list_del(&entry->dirty);
+ INIT_LIST_HEAD(&entry->dirty);
+
+ /* add to in_use list */
+ list_del(&entry->list);
+ list_add(&entry->list, insert);
+
+ /* reset DIRTY, set LOCK */
+ entry->state ^= HFS_DIRTY | HFS_LOCK;
+ spin_unlock(&entry_lock);
+ write_entry(entry);
+ spin_lock(&entry_lock);
+ entry->state &= ~HFS_LOCK;
+ hfs_wake_up(&entry->wait);
+ }
+ }
+ spin_unlock(&entry_lock);
+}
+
+/*
+ * hfs_cat_free()
+ *
+ * Releases all the memory allocated in grow_entries().
+ * Must call hfs_cat_invalidate() on all MDBs before calling this.
+ */
+void hfs_cat_free(void)
+{
+ struct allocation_unit *tmp;
+
+ while (allocation) {
+ tmp = allocation->next;
+ HFS_DELETE(allocation);
+ allocation = tmp;
+ }
+}
+
+/*
+ * hfs_cat_compare()
+ *
+ * Description:
+ * This is the comparison function used for the catalog B-tree. In
+ * comparing catalog B-tree entries, the parent id is the most
+ * significant field (compared as unsigned ints). The name field is
+ * the least significant (compared in "Macintosh lexical order",
+ * see hfs_strcmp() in string.c)
+ * Input Variable(s):
+ * struct hfs_cat_key *key1: pointer to the first key to compare
+ * struct hfs_cat_key *key2: pointer to the second key to compare
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
+ * Preconditions:
+ * key1 and key2 point to "valid" (struct hfs_cat_key)s.
+ * Postconditions:
+ * This function has no side-effects
+ */
+int hfs_cat_compare(const struct hfs_cat_key *key1,
+ const struct hfs_cat_key *key2)
+{
+ unsigned int parents;
+ int retval;
+
+ parents = hfs_get_hl(key1->ParID) - hfs_get_hl(key2->ParID);
+ if (parents != 0) {
+ retval = (int)parents;
+ } else {
+ retval = hfs_strcmp(&key1->CName, &key2->CName);
+ }
+ return retval;
+}
+
+/*
+ * hfs_cat_build_key()
+ *
+ * Given the ID of the parent and the name build a search key.
+ */
+void hfs_cat_build_key(hfs_u32 parent, const struct hfs_name *cname,
+ struct hfs_cat_key *key)
+{
+ hfs_put_nl(parent, key->ParID);
+
+ if (cname) {
+ key->KeyLen = 6 + cname->Len;
+ memcpy(&key->CName, cname, sizeof(*cname));
+ } else {
+ key->KeyLen = 6;
+ memset(&key->CName, 0, sizeof(*cname));
+ }
+}
+
+/*
+ * hfs_cat_open()
+ *
+ * Given a directory on an HFS filesystem get its thread and
+ * lock the directory against insertions and deletions.
+ * Return 0 on success or an error code on failure.
+ */
+int hfs_cat_open(struct hfs_cat_entry *dir, struct hfs_brec *brec)
+{
+ struct hfs_cat_key key;
+ int error;
+
+ if (dir->type != HFS_CDR_DIR) {
+ return -EINVAL;
+ }
+
+ /* Block writers */
+ start_read(dir);
+
+ /* Find the directory */
+ hfs_cat_build_key(dir->cnid, NULL, &key);
+ error = hfs_bfind(brec, dir->mdb->cat_tree,
+ HFS_BKEY(&key), HFS_BFIND_READ_EQ);
+
+ if (error) {
+ end_read(dir);
+ }
+
+ return error;
+}
+
+/*
+ * hfs_cat_next()
+ *
+ * Given a catalog brec structure, replace it with the count'th next brec
+ * in the same directory.
+ * Return an error code if there is a problem, 0 if OK.
+ * Note that an error code of -ENOENT means there are no more entries
+ * in this directory.
+ * The directory is "closed" on an error.
+ */
+int hfs_cat_next(struct hfs_cat_entry *dir, struct hfs_brec *brec,
+ hfs_u16 count, hfs_u32 *cnid, hfs_u8 *type)
+{
+ int error;
+
+ if (!dir || !brec) {
+ return -EINVAL;
+ }
+
+ /* Get the count'th next catalog tree entry */
+ error = hfs_bsucc(brec, count);
+ if (!error) {
+ struct hfs_cat_key *key = (struct hfs_cat_key *)brec->key;
+ if (hfs_get_nl(key->ParID) != dir->cnid) {
+ hfs_brec_relse(brec, NULL);
+ error = -ENOENT;
+ }
+ }
+ if (!error) {
+ *type = ((struct hfs_cat_rec *)brec->data)->cdrType;
+ *cnid = brec_to_id(brec);
+ } else {
+ end_read(dir);
+ }
+ return error;
+}
+
+/*
+ * hfs_cat_close()
+ *
+ * Given a catalog brec structure, replace it with the count'th next brec
+ * in the same directory.
+ * Return an error code if there is a problem, 0 if OK.
+ * Note that an error code of -ENOENT means there are no more entries
+ * in this directory.
+ */
+void hfs_cat_close(struct hfs_cat_entry *dir, struct hfs_brec *brec)
+{
+ if (dir && brec) {
+ hfs_brec_relse(brec, NULL);
+ end_read(dir);
+ }
+}
+
+/*
+ * hfs_cat_parent()
+ *
+ * Given a catalog entry, return the entry for its parent.
+ * Uses catalog key for the entry to get its parent's ID
+ * and then uses the parent's thread record to locate the
+ * parent's actual catalog entry.
+ */
+struct hfs_cat_entry *hfs_cat_parent(struct hfs_cat_entry *entry)
+{
+ struct hfs_cat_entry *retval = NULL;
+ struct hfs_mdb *mdb = entry->mdb;
+ struct hfs_brec brec;
+ struct hfs_cat_key key;
+ int error;
+
+ lock_entry(entry);
+ if (!(entry->state & HFS_DELETED)) {
+ hfs_cat_build_key(hfs_get_nl(entry->key.ParID), NULL, &key);
+ error = hfs_bfind(&brec, mdb->cat_tree,
+ HFS_BKEY(&key), HFS_BFIND_READ_EQ);
+ if (!error) {
+ /* convert thread record to key */
+ struct hfs_cat_rec *rec = brec.data;
+ key.KeyLen = 6 + rec->u.thd.CName.Len;
+ memcpy(&key.ParID, &rec->u.thd.ParID,
+ sizeof(hfs_u32) + sizeof(struct hfs_name));
+
+ hfs_brec_relse(&brec, NULL);
+
+ retval = hfs_cat_get(mdb, &key);
+ }
+ }
+ unlock_entry(entry);
+ return retval;
+}
+
+/*
+ * hfs_cat_create()
+ *
+ * Create a new file with the indicated name in the indicated directory.
+ * The file will have the indicated flags, type and creator.
+ * If successful an (struct hfs_cat_entry) is returned in '*result'.
+ */
+int hfs_cat_create(struct hfs_cat_entry *parent, struct hfs_cat_key *key,
+ hfs_u8 flags, hfs_u32 type, hfs_u32 creator,
+ struct hfs_cat_entry **result)
+{
+ struct hfs_cat_rec record;
+ hfs_u32 id = new_cnid(parent->mdb);
+ hfs_u32 mtime = hfs_time();
+
+ /* init some fields for the file record */
+ memset(&record, 0, sizeof(record));
+ record.cdrType = HFS_CDR_FIL;
+ record.u.fil.Flags = flags | HFS_FIL_USED;
+ hfs_put_nl(id, record.u.fil.FlNum);
+ hfs_put_nl(mtime, record.u.fil.CrDat);
+ hfs_put_nl(mtime, record.u.fil.MdDat);
+ hfs_put_nl(0, record.u.fil.BkDat);
+ hfs_put_nl(type, record.u.fil.UsrWds.fdType);
+ hfs_put_nl(creator, record.u.fil.UsrWds.fdCreator);
+
+ return create_entry(parent, key, &record, 0, id, result);
+}
+
+/*
+ * hfs_cat_mkdir()
+ *
+ * Create a new directory with the indicated name in the indicated directory.
+ * If successful an (struct hfs_cat_entry) is returned in '*result'.
+ */
+int hfs_cat_mkdir(struct hfs_cat_entry *parent, struct hfs_cat_key *key,
+ struct hfs_cat_entry **result)
+{
+ struct hfs_cat_rec record;
+ hfs_u32 id = new_cnid(parent->mdb);
+ hfs_u32 mtime = hfs_time();
+
+ /* init some fields for the directory record */
+ memset(&record, 0, sizeof(record));
+ record.cdrType = HFS_CDR_DIR;
+ hfs_put_nl(id, record.u.dir.DirID);
+ hfs_put_nl(mtime, record.u.dir.CrDat);
+ hfs_put_nl(mtime, record.u.dir.MdDat);
+ hfs_put_nl(0, record.u.dir.BkDat);
+ hfs_put_hs(0xff, record.u.dir.UsrInfo.frView);
+
+ return create_entry(parent, key, &record, 1, id, result);
+}
+
+/*
+ * hfs_cat_delete()
+ *
+ * Delete the indicated file or directory.
+ * The associated thread is also removed unless ('with_thread'==0).
+ */
+int hfs_cat_delete(struct hfs_cat_entry *parent, struct hfs_cat_entry *entry,
+ int with_thread)
+{
+ struct hfs_cat_key key;
+ struct hfs_mdb *mdb = parent->mdb;
+ int is_dir, error = 0;
+
+ if (parent->mdb != entry->mdb) {
+ return -EINVAL;
+ }
+
+ if (entry->type == HFS_CDR_FIL) {
+ with_thread = (entry->u.file.flags&HFS_FIL_THD) && with_thread;
+ is_dir = 0;
+ } else {
+ is_dir = 1;
+ }
+
+ /* keep readers from getting confused by changing dir size */
+ start_write(parent);
+
+ /* don't delete a busy directory */
+ if (entry->type == HFS_CDR_DIR) {
+ start_read(entry);
+
+ if (entry->u.dir.files || entry->u.dir.dirs) {
+ error = -ENOTEMPTY;
+ }
+ }
+
+ /* try to delete the file or directory */
+ if (!error) {
+ lock_entry(entry);
+ if ((entry->state & HFS_DELETED)) {
+ /* somebody beat us to it */
+ error = -ENOENT;
+ } else {
+ error = hfs_bdelete(mdb->cat_tree,
+ HFS_BKEY(&entry->key));
+ }
+ unlock_entry(entry);
+ }
+
+ if (!error) {
+ /* Mark the entry deleted and remove it from the cache */
+ entry->state |= HFS_DELETED;
+ remove_hash(entry);
+
+ /* try to delete the thread entry if it exists */
+ if (with_thread) {
+ hfs_cat_build_key(entry->cnid, NULL, &key);
+ (void)hfs_bdelete(mdb->cat_tree, HFS_BKEY(&key));
+ }
+
+ update_dir(mdb, parent, is_dir, -1);
+ }
+
+ if (entry->type == HFS_CDR_DIR) {
+ end_read(entry);
+ }
+ end_write(parent);
+ return error;
+}
+
+/*
+ * hfs_cat_move()
+ *
+ * Rename a file or directory, possibly to a new directory.
+ * If the destination exists it is removed and a
+ * (struct hfs_cat_entry) for it is returned in '*result'.
+ */
+int hfs_cat_move(struct hfs_cat_entry *old_dir, struct hfs_cat_entry *new_dir,
+ struct hfs_cat_entry *entry, struct hfs_cat_key *new_key,
+ struct hfs_cat_entry **removed)
+{
+ struct hfs_cat_entry *dest;
+ struct hfs_mdb *mdb;
+ int error = 0;
+ int is_dir, has_thread;
+
+ if (removed) {
+ *removed = NULL;
+ }
+
+ /* sanity checks */
+ if (!old_dir || !new_dir) {
+ return -EINVAL;
+ }
+ mdb = old_dir->mdb;
+ if (mdb != new_dir->mdb) {
+ return -EXDEV;
+ }
+
+ /* precompute a few things */
+ if (entry->type == HFS_CDR_DIR) {
+ is_dir = 1;
+ has_thread = 1;
+ } else if (entry->type == HFS_CDR_FIL) {
+ is_dir = 0;
+ has_thread = entry->u.file.flags & HFS_FIL_THD;
+ } else {
+ return -EINVAL;
+ }
+
+ while (mdb->rename_lock) {
+ hfs_sleep_on(&mdb->rename_wait);
+ }
+ mdb->rename_lock = 1;
+
+ /* keep readers from getting confused by changing dir size */
+ start_write(new_dir);
+ if (old_dir != new_dir) {
+ start_write(old_dir);
+ }
+
+ /* Don't move a directory inside itself */
+ if (is_dir) {
+ struct hfs_cat_key thd_key;
+ struct hfs_brec brec;
+
+ hfs_u32 id = new_dir->cnid;
+ while (id != htonl(HFS_ROOT_CNID)) {
+ if (id == entry->cnid) {
+ error = -EINVAL;
+ } else {
+ hfs_cat_build_key(id, NULL, &thd_key);
+ error = hfs_bfind(&brec, mdb->cat_tree,
+ HFS_BKEY(&thd_key),
+ HFS_BFIND_READ_EQ);
+ }
+ if (error) {
+ goto done;
+ } else {
+ struct hfs_cat_rec *rec = brec.data;
+ id = hfs_get_nl(rec->u.thd.ParID);
+ hfs_brec_relse(&brec, NULL);
+ }
+ }
+ }
+
+restart:
+ /* see if the destination exists, getting it if it does */
+ dest = hfs_cat_get(mdb, new_key);
+
+ if (!dest) {
+ /* destination doesn't exist, so create it */
+ struct hfs_cat_rec new_record;
+
+ /* create a locked entry in the cache */
+ dest = get_entry(mdb, new_key, 0);
+ if (!dest) {
+ error = -EIO;
+ goto done;
+ }
+ if (dest->cnid) {
+ /* The (unlocked) entry exists in the cache */
+ goto have_distinct;
+ }
+
+ /* limit directory valence to signed 16-bit integer */
+ if ((new_dir->u.dir.dirs + new_dir->u.dir.files) >=
+ HFS_MAX_VALENCE) {
+ error = -ENOSPC;
+ goto bail3;
+ }
+
+ /* build the new record */
+ new_record.cdrType = entry->type;
+ __write_entry(entry, &new_record);
+
+ /* insert the new record */
+ error = hfs_binsert(mdb->cat_tree, HFS_BKEY(new_key),
+ &new_record, is_dir ? 2 + sizeof(DIR_REC) :
+ 2 + sizeof(FIL_REC));
+ if (error == -EEXIST) {
+ dest->state |= HFS_DELETED;
+ unlock_entry(dest);
+ hfs_cat_put(dest);
+ goto restart;
+ } else if (error) {
+ goto bail3;
+ }
+
+ /* update the destination directory */
+ update_dir(mdb, new_dir, is_dir, 1);
+ } else if (entry != dest) {
+have_distinct:
+ /* The destination exists and is not same as source */
+ lock_entry(dest);
+ if ((dest->state & HFS_DELETED)) {
+ unlock_entry(dest);
+ hfs_cat_put(dest);
+ goto restart;
+ }
+ if (dest->type != entry->type) {
+ /* can't move a file on top
+ of a dir nor vice versa. */
+ error = is_dir ? -ENOTDIR : -EISDIR;
+ } else if (is_dir && (dest->u.dir.dirs || dest->u.dir.files)) {
+ /* directory to replace is not empty */
+ error = -ENOTEMPTY;
+ }
+
+ if (error) {
+ goto bail2;
+ }
+ } else {
+ /* The destination exists but is same as source */
+ --entry->count;
+ dest = NULL;
+ }
+
+ /* lock the entry */
+ lock_entry(entry);
+ if ((entry->state & HFS_DELETED)) {
+ error = -ENOENT;
+ goto bail1;
+ }
+
+ if (dest) {
+ /* remove the old entry */
+ error = hfs_bdelete(mdb->cat_tree, HFS_BKEY(&entry->key));
+
+ if (error) {
+ /* We couldn't remove the entry for the
+ original file, so nothing has changed. */
+ goto bail1;
+ }
+ update_dir(mdb, old_dir, is_dir, -1);
+ }
+
+ /* update the thread of the dir/file we're moving */
+ if (has_thread) {
+ struct hfs_cat_key thd_key;
+ struct hfs_brec brec;
+
+ hfs_cat_build_key(entry->cnid, NULL, &thd_key);
+ error = hfs_bfind(&brec, mdb->cat_tree,
+ HFS_BKEY(&thd_key), HFS_BFIND_WRITE);
+ if (error == -ENOENT) {
+ if (is_dir) {
+ /* directory w/o a thread! */
+ error = -EIO;
+ } else {
+ /* We were lied to! */
+ entry->u.file.flags &= ~HFS_FIL_THD;
+ hfs_cat_mark_dirty(entry);
+ }
+ }
+ if (!error) {
+ struct hfs_cat_rec *rec = brec.data;
+ memcpy(&rec->u.thd.ParID, &new_key->ParID,
+ sizeof(hfs_u32) + sizeof(struct hfs_name));
+ hfs_brec_relse(&brec, NULL);
+ } else if (error == -ENOENT) {
+ error = 0;
+ } else if (!dest) {
+ /* Nothing was changed */
+ unlock_entry(entry);
+ goto done;
+ } else {
+ /* Something went seriously wrong.
+ The dir/file has been deleted. */
+ /* XXX try some recovery? */
+ entry->state |= HFS_DELETED;
+ remove_hash(entry);
+ goto bail1;
+ }
+ }
+
+ /* TRY to remove the thread for the pre-existing entry */
+ if (dest && dest->cnid &&
+ (is_dir || (dest->u.file.flags & HFS_FIL_THD))) {
+ struct hfs_cat_key thd_key;
+
+ hfs_cat_build_key(dest->cnid, NULL, &thd_key);
+ (void)hfs_bdelete(mdb->cat_tree, HFS_BKEY(&thd_key));
+ }
+
+ /* update directories */
+ new_dir->modify_date = hfs_time();
+ hfs_cat_mark_dirty(new_dir);
+
+ /* update key */
+ remove_hash(entry);
+ memcpy(&entry->key, new_key, sizeof(*new_key));
+ /* KEYDIRTY as case might differ */
+ entry->state |= HFS_KEYDIRTY;
+ insert_hash(entry);
+ hfs_cat_mark_dirty(entry);
+ unlock_entry(entry);
+
+ /* delete any pre-existing or place-holder entry */
+ if (dest) {
+ dest->state |= HFS_DELETED;
+ unlock_entry(dest);
+ if (removed && dest->cnid) {
+ *removed = dest;
+ } else {
+ hfs_cat_put(dest);
+ }
+ }
+ goto done;
+
+bail1:
+ unlock_entry(entry);
+bail2:
+ if (dest) {
+ if (!dest->cnid) {
+ /* TRY to remove the new entry */
+ (void)hfs_bdelete(mdb->cat_tree, HFS_BKEY(new_key));
+ update_dir(mdb, new_dir, is_dir, -1);
+bail3:
+ dest->state |= HFS_DELETED;
+ }
+ unlock_entry(dest);
+ hfs_cat_put(dest);
+ }
+done:
+ if (new_dir != old_dir) {
+ end_write(old_dir);
+ }
+ end_write(new_dir);
+ mdb->rename_lock = 0;
+ hfs_wake_up(&mdb->rename_wait);
+
+ return error;
+}
+
+/*
+ * Initialize the hash tables
+ */
+void hfs_cat_init(void)
+{
+ int i;
+ struct list_head *head = hash_table;
+
+ i = CCACHE_NR;
+ do {
+ INIT_LIST_HEAD(head);
+ head++;
+ i--;
+ } while (i);
+}
+
+
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
new file mode 100644
index 000000000..144d9d42d
--- /dev/null
+++ b/fs/hfs/dir.c
@@ -0,0 +1,400 @@
+/*
+ * linux/fs/hfs/dir.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains directory-related functions independent of which
+ * scheme is being used to represent forks.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+/*================ File-local functions ================*/
+
+/*
+ * build_key()
+ *
+ * Build a key for a file by the given name in the given directory.
+ * If the name matches one of the reserved names returns 1 otherwise 0.
+ */
+static int build_key(struct hfs_cat_key *key, struct inode *dir,
+ const char *name, int len)
+{
+ struct hfs_name cname;
+ const struct hfs_name *reserved;
+
+ /* mangle the name */
+ hfs_nameout(dir, &cname, name, len);
+
+ /* check against reserved names */
+ reserved = HFS_SB(dir->i_sb)->s_reserved1;
+ while (reserved->Len) {
+ if (hfs_streq(reserved, &cname)) {
+ return 1;
+ }
+ ++reserved;
+ }
+
+ /* check against the names reserved only in the root directory */
+ if (HFS_I(dir)->entry->cnid == htonl(HFS_ROOT_CNID)) {
+ reserved = HFS_SB(dir->i_sb)->s_reserved2;
+ while (reserved->Len) {
+ if (hfs_streq(reserved, &cname)) {
+ return 1;
+ }
+ ++reserved;
+ }
+ }
+
+ /* build the key */
+ hfs_cat_build_key(HFS_I(dir)->entry->cnid, &cname, key);
+
+ return 0;
+}
+
+/*
+ * update_dirs_plus()
+ *
+ * Update the fields 'i_size', 'i_nlink', 'i_ctime', 'i_mtime' and
+ * 'i_version' of the inodes associated with a directory that has
+ * had a file ('is_dir'==0) or directory ('is_dir'!=0) added to it.
+ */
+static inline void update_dirs_plus(struct hfs_cat_entry *dir, int is_dir)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ struct dentry *de = dir->sys_entry[i];
+ if (de) {
+ struct inode *tmp = de->d_inode;
+ if (S_ISDIR(tmp->i_mode)) {
+ if (is_dir &&
+ (i == HFS_ITYPE_TO_INT(HFS_ITYPE_NORM))) {
+ /* In "normal" directory only */
+ ++(tmp->i_nlink);
+ }
+ tmp->i_size += HFS_I(tmp)->dir_size;
+ tmp->i_version = ++event;
+ }
+ tmp->i_ctime = tmp->i_mtime = CURRENT_TIME;
+ mark_inode_dirty(tmp);
+ }
+ }
+}
+
+/*
+ * update_dirs_plus()
+ *
+ * Update the fields 'i_size', 'i_nlink', 'i_ctime', 'i_mtime' and
+ * 'i_version' of the inodes associated with a directory that has
+ * had a file ('is_dir'==0) or directory ('is_dir'!=0) removed.
+ */
+static inline void update_dirs_minus(struct hfs_cat_entry *dir, int is_dir)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ struct dentry *de = dir->sys_entry[i];
+ if (de) {
+ struct inode *tmp = de->d_inode;
+ if (S_ISDIR(tmp->i_mode)) {
+ if (is_dir &&
+ (i == HFS_ITYPE_TO_INT(HFS_ITYPE_NORM))) {
+ /* In "normal" directory only */
+ --(tmp->i_nlink);
+ }
+ tmp->i_size -= HFS_I(tmp)->dir_size;
+ tmp->i_version = ++event;
+ }
+ tmp->i_ctime = tmp->i_mtime = CURRENT_TIME;
+ mark_inode_dirty(tmp);
+ }
+ }
+}
+
+/*
+ * mark_inodes_deleted()
+ *
+ * Update inodes associated with a deleted entry to reflect its deletion.
+ * Well, we really just drop the dentry.
+ */
+static inline void mark_inodes_deleted(struct hfs_cat_entry *entry,
+ struct dentry *dentry)
+{
+ struct dentry *de;
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ if ((de = entry->sys_entry[i]) && (dentry != de)) {
+ entry->sys_entry[i] = NULL;
+ dget(de);
+ d_delete(de);
+ dput(de);
+ }
+ }
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_dir_read()
+ *
+ * This is the read() entry in the file_operations structure for HFS
+ * directories. It simply returns an error code, since reading is not
+ * supported.
+ */
+hfs_rwret_t hfs_dir_read(struct file * filp, char *buf,
+ hfs_rwarg_t count, loff_t *ppos)
+{
+ return -EISDIR;
+}
+
+/*
+ * hfs_create()
+ *
+ * This is the create() entry in the inode_operations structure for
+ * regular HFS directories. The purpose is to create a new file in
+ * a directory and return a corresponding inode, given the inode for
+ * the directory and the name (and its length) of the new file.
+ */
+int hfs_create(struct inode * dir, struct dentry *dentry, int mode)
+{
+ struct hfs_cat_entry *entry = HFS_I(dir)->entry;
+ struct hfs_cat_entry *new;
+ struct hfs_cat_key key;
+ struct inode *inode;
+ int error;
+
+ /* build the key, checking against reserved names */
+ if (build_key(&key, dir, dentry->d_name.name, dentry->d_name.len)) {
+ error = -EEXIST;
+ } else {
+ /* try to create the file */
+ error = hfs_cat_create(entry, &key,
+ (mode & S_IWUSR) ? 0 : HFS_FIL_LOCK,
+ HFS_SB(dir->i_sb)->s_type,
+ HFS_SB(dir->i_sb)->s_creator, &new);
+ }
+
+ if (!error) {
+ update_dirs_plus(entry, 0);
+
+ /* create an inode for the new file */
+ inode = hfs_iget(new, HFS_I(dir)->file_type, dentry);
+ if (!inode) {
+ /* XXX correct error? */
+ error = -EIO;
+ } else {
+ if (HFS_I(dir)->d_drop_op)
+ HFS_I(dir)->d_drop_op(HFS_I(dir)->file_type, dentry);
+ d_instantiate(dentry, inode);
+ }
+ }
+
+ return error;
+}
+
+/*
+ * hfs_mkdir()
+ *
+ * This is the mkdir() entry in the inode_operations structure for
+ * regular HFS directories. The purpose is to create a new directory
+ * in a directory, given the inode for the parent directory and the
+ * name (and its length) of the new directory.
+ */
+int hfs_mkdir(struct inode * parent, struct dentry *dentry, int mode)
+{
+ struct hfs_cat_entry *entry = HFS_I(parent)->entry;
+ struct hfs_cat_entry *new;
+ struct hfs_cat_key key;
+ struct inode *inode;
+ int error;
+
+ /* build the key, checking against reserved names */
+ if (build_key(&key, parent, dentry->d_name.name,
+ dentry->d_name.len)) {
+ error = -EEXIST;
+ } else {
+ /* try to create the directory */
+ error = hfs_cat_mkdir(entry, &key, &new);
+ }
+
+ if (!error) {
+ update_dirs_plus(entry, 1);
+ inode = hfs_iget(new, HFS_I(parent)->file_type, dentry);
+ if (!inode) {
+ error = -EIO;
+ } else
+ d_instantiate(dentry, inode);
+ }
+
+ return error;
+}
+
+/*
+ * hfs_mknod()
+ *
+ * This is the mknod() entry in the inode_operations structure for
+ * regular HFS directories. The purpose is to create a new entry
+ * in a directory, given the inode for the parent directory and the
+ * name (and its length) and the mode of the new entry (and the device
+ * number if the entry is to be a device special file).
+ *
+ * HFS only supports regular files and directories and Linux disallows
+ * using mknod() to create directories. Thus we just check the arguments
+ * and call hfs_create().
+ */
+int hfs_mknod(struct inode *dir, struct dentry *dentry, int mode, int rdev)
+{
+ int error;
+
+ if (!dir) {
+ error = -ENOENT;
+ } else if (S_ISREG(mode)) {
+ error = hfs_create(dir, dentry, mode);
+ } else {
+ error = -EPERM;
+ }
+ return error;
+}
+
+/*
+ * hfs_unlink()
+ *
+ * This is the unlink() entry in the inode_operations structure for
+ * regular HFS directories. The purpose is to delete an existing
+ * file, given the inode for the parent directory and the name
+ * (and its length) of the existing file.
+ */
+int hfs_unlink(struct inode * dir, struct dentry *dentry)
+{
+ struct hfs_cat_entry *entry = HFS_I(dir)->entry;
+ struct hfs_cat_entry *victim = NULL;
+ struct hfs_cat_key key;
+ int error;
+
+ if (build_key(&key, dir, dentry->d_name.name,
+ dentry->d_name.len)) {
+ error = -EPERM;
+ } else if (!(victim = hfs_cat_get(entry->mdb, &key))) {
+ error = -ENOENT;
+ } else if (victim->type != HFS_CDR_FIL) {
+ error = -EPERM;
+ } else if (!(error = hfs_cat_delete(entry, victim, 1))) {
+ mark_inodes_deleted(victim, dentry);
+ d_delete(dentry);
+ update_dirs_minus(entry, 0);
+ }
+
+ hfs_cat_put(victim); /* Note that hfs_cat_put(NULL) is safe. */
+ return error;
+}
+
+/*
+ * hfs_rmdir()
+ *
+ * This is the rmdir() entry in the inode_operations structure for
+ * regular HFS directories. The purpose is to delete an existing
+ * directory, given the inode for the parent directory and the name
+ * (and its length) of the existing directory.
+ */
+int hfs_rmdir(struct inode * parent, struct dentry *dentry)
+{
+ struct hfs_cat_entry *entry = HFS_I(parent)->entry;
+ struct hfs_cat_entry *victim = NULL;
+ struct hfs_cat_key key;
+ int error;
+
+ if (build_key(&key, parent, dentry->d_name.name,
+ dentry->d_name.len)) {
+ error = -EPERM;
+ } else if (!(victim = hfs_cat_get(entry->mdb, &key))) {
+ error = -ENOENT;
+ } else if (victim->type != HFS_CDR_DIR) {
+ error = -ENOTDIR;
+ } else if (/* we only have to worry about 2 and 3 for mount points */
+ (victim->sys_entry[2] &&
+ (victim->sys_entry[2] !=
+ victim->sys_entry[2]->d_mounts)) ||
+ (victim->sys_entry[3] &&
+ (victim->sys_entry[3] !=
+ victim->sys_entry[3]->d_mounts))
+ ) {
+ error = -EBUSY;
+ } else if (!(error = hfs_cat_delete(entry, victim, 1))) {
+ mark_inodes_deleted(victim, dentry);
+ d_delete(dentry);
+ update_dirs_minus(entry, 1);
+ }
+
+ hfs_cat_put(victim); /* Note that hfs_cat_put(NULL) is safe. */
+ return error;
+}
+
+/*
+ * hfs_rename()
+ *
+ * This is the rename() entry in the inode_operations structure for
+ * regular HFS directories. The purpose is to rename an existing
+ * file or directory, given the inode for the current directory and
+ * the name (and its length) of the existing file/directory and the
+ * inode for the new directory and the name (and its length) of the
+ * new file/directory.
+ * XXX: how do you handle must_be dir?
+ */
+int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct hfs_cat_entry *old_parent = HFS_I(old_dir)->entry;
+ struct hfs_cat_entry *new_parent = HFS_I(new_dir)->entry;
+ struct hfs_cat_entry *victim = NULL;
+ struct hfs_cat_entry *deleted;
+ struct hfs_cat_key key;
+ int error;
+
+ if (build_key(&key, old_dir, old_dentry->d_name.name,
+ old_dentry->d_name.len) ||
+ (HFS_ITYPE(old_dir->i_ino) != HFS_ITYPE(new_dir->i_ino))) {
+ error = -EPERM;
+ } else if (!(victim = hfs_cat_get(old_parent->mdb, &key))) {
+ error = -ENOENT;
+ } else if (build_key(&key, new_dir, new_dentry->d_name.name,
+ new_dentry->d_name.len)) {
+ error = -EPERM;
+ } else if (!(error = hfs_cat_move(old_parent, new_parent,
+ victim, &key, &deleted))) {
+ int is_dir = (victim->type == HFS_CDR_DIR);
+
+ /* drop the old dentries */
+ mark_inodes_deleted(victim, old_dentry);
+ update_dirs_minus(old_parent, is_dir);
+ if (deleted) {
+ mark_inodes_deleted(deleted, new_dentry);
+ hfs_cat_put(deleted);
+ } else {
+ /* no existing inodes. just drop negative dentries */
+ if (HFS_I(new_dir)->d_drop_op)
+ HFS_I(new_dir)->d_drop_op(HFS_I(new_dir)->file_type,
+ new_dentry);
+ update_dirs_plus(new_parent, is_dir);
+ }
+
+ /* update dcache */
+ d_move(old_dentry, new_dentry);
+ }
+
+ hfs_cat_put(victim); /* Note that hfs_cat_put(NULL) is safe. */
+ return error;
+}
diff --git a/fs/hfs/dir_cap.c b/fs/hfs/dir_cap.c
new file mode 100644
index 000000000..d489c86ca
--- /dev/null
+++ b/fs/hfs/dir_cap.c
@@ -0,0 +1,402 @@
+/* linux/fs/hfs/dir_cap.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the inode_operations and file_operations
+ * structures for HFS directories under the CAP scheme.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ *
+ * The source code distribution of the Columbia AppleTalk Package for
+ * UNIX, version 6.0, (CAP) was used as a specification of the
+ * location and format of files used by CAP's Aufs. No code from CAP
+ * appears in hfs_fs. hfs_fs is not a work ``derived'' from CAP in
+ * the sense of intellectual property law.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+/*================ Forward declarations ================*/
+
+static int cap_lookup(struct inode *, struct dentry *);
+static int cap_readdir(struct file *, void *, filldir_t);
+
+/*================ Global variables ================*/
+
+#define DOT_LEN 1
+#define DOT_DOT_LEN 2
+#define DOT_RESOURCE_LEN 9
+#define DOT_FINDERINFO_LEN 11
+#define DOT_ROOTINFO_LEN 9
+
+const struct hfs_name hfs_cap_reserved1[] = {
+ {DOT_LEN, "."},
+ {DOT_DOT_LEN, ".."},
+ {DOT_RESOURCE_LEN, ".resource"},
+ {DOT_FINDERINFO_LEN, ".finderinfo"},
+ {0, ""},
+};
+
+const struct hfs_name hfs_cap_reserved2[] = {
+ {DOT_ROOTINFO_LEN, ".rootinfo"},
+ {0, ""},
+};
+
+#define DOT (&hfs_cap_reserved1[0])
+#define DOT_DOT (&hfs_cap_reserved1[1])
+#define DOT_RESOURCE (&hfs_cap_reserved1[2])
+#define DOT_FINDERINFO (&hfs_cap_reserved1[3])
+#define DOT_ROOTINFO (&hfs_cap_reserved2[0])
+
+static struct file_operations hfs_cap_dir_operations = {
+ NULL, /* lseek - default */
+ hfs_dir_read, /* read - invalid */
+ NULL, /* write - bad */
+ cap_readdir, /* readdir */
+ NULL, /* select - default */
+ NULL, /* ioctl - default */
+ NULL, /* mmap - none */
+ NULL, /* no special open code */
+ NULL, /* no special release code */
+ file_fsync, /* fsync - default */
+ NULL, /* fasync - default */
+ NULL, /* check_media_change - none */
+ NULL /* revalidate - none */
+};
+
+struct inode_operations hfs_cap_ndir_inode_operations = {
+ &hfs_cap_dir_operations,/* default directory file-ops */
+ hfs_create, /* create */
+ cap_lookup, /* lookup */
+ NULL, /* link */
+ hfs_unlink, /* unlink */
+ NULL, /* symlink */
+ hfs_mkdir, /* mkdir */
+ hfs_rmdir, /* rmdir */
+ hfs_mknod, /* mknod */
+ hfs_rename, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL /* smap */
+};
+
+struct inode_operations hfs_cap_fdir_inode_operations = {
+ &hfs_cap_dir_operations,/* default directory file-ops */
+ NULL, /* create */
+ cap_lookup, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL /* smap */
+};
+
+struct inode_operations hfs_cap_rdir_inode_operations = {
+ &hfs_cap_dir_operations,/* default directory file-ops */
+ hfs_create, /* create */
+ cap_lookup, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL /* smap */
+};
+
+/*================ File-local functions ================*/
+
+/*
+ * cap_lookup()
+ *
+ * This is the lookup() entry in the inode_operations structure for
+ * HFS directories in the CAP scheme. The purpose is to generate the
+ * inode corresponding to an entry in a directory, given the inode for
+ * the directory and the name (and its length) of the entry.
+ */
+static int cap_lookup(struct inode * dir, struct dentry *dentry)
+{
+ ino_t dtype;
+ struct hfs_name cname;
+ struct hfs_cat_entry *entry;
+ struct hfs_cat_key key;
+ struct inode *inode = NULL;
+
+ if (!dir || !S_ISDIR(dir->i_mode)) {
+ goto done;
+ }
+
+ entry = HFS_I(dir)->entry;
+ dtype = HFS_ITYPE(dir->i_ino);
+
+ /* Perform name-mangling */
+ hfs_nameout(dir, &cname, dentry->d_name.name,
+ dentry->d_name.len);
+
+ /* Check for "." */
+ if (hfs_streq(&cname, DOT)) {
+ /* this little trick skips the iget and iput */
+ d_add(dentry, dir);
+ return 0;
+ }
+
+ /* Check for "..". */
+ if (hfs_streq(&cname, DOT_DOT)) {
+ struct hfs_cat_entry *parent;
+
+ if (dtype != HFS_CAP_NDIR) {
+ /* Case for ".." in ".finderinfo" or ".resource" */
+ parent = entry;
+ ++entry->count; /* __hfs_iget() eats one */
+ } else {
+ /* Case for ".." in a normal directory */
+ parent = hfs_cat_parent(entry);
+ }
+ inode = hfs_iget(parent, HFS_CAP_NDIR, dentry);
+ goto done;
+ }
+
+ /* Check for special directories if in a normal directory.
+ Note that cap_dupdir() does an iput(dir). */
+ if (dtype==HFS_CAP_NDIR) {
+ /* Check for ".resource", ".finderinfo" and ".rootinfo" */
+ if (hfs_streq(&cname, DOT_RESOURCE)) {
+ ++entry->count; /* __hfs_iget() eats one */
+ inode = hfs_iget(entry, HFS_CAP_RDIR, dentry);
+ goto done;
+ } else if (hfs_streq(&cname, DOT_FINDERINFO)) {
+ ++entry->count; /* __hfs_iget() eats one */
+ inode = hfs_iget(entry, HFS_CAP_FDIR, dentry);
+ goto done;
+ } else if ((entry->cnid == htonl(HFS_ROOT_CNID)) &&
+ hfs_streq(&cname, DOT_ROOTINFO)) {
+ ++entry->count; /* __hfs_iget() eats one */
+ inode = hfs_iget(entry, HFS_CAP_FNDR, dentry);
+ goto done;
+ }
+ }
+
+ /* Do an hfs_iget() on the mangled name. */
+ hfs_cat_build_key(entry->cnid, &cname, &key);
+ inode = hfs_iget(hfs_cat_get(entry->mdb, &key),
+ HFS_I(dir)->file_type, dentry);
+
+ /* Don't return a resource fork for a directory */
+ if (inode && (dtype == HFS_CAP_RDIR) &&
+ (HFS_I(inode)->entry->type == HFS_CDR_DIR)) {
+ inode = NULL;
+ }
+
+done:
+ dentry->d_op = &hfs_dentry_operations;
+ d_add(dentry, inode);
+ return 0;
+}
+
+/*
+ * cap_readdir()
+ *
+ * This is the readdir() entry in the file_operations structure for
+ * HFS directories in the CAP scheme. The purpose is to enumerate the
+ * entries in a directory, given the inode of the directory and a
+ * (struct file *), the 'f_pos' field of which indicates the location
+ * in the directory. The (struct file *) is updated so that the next
+ * call with the same 'dir' and 'filp' arguments will produce the next
+ * directory entry. The entries are returned in 'dirent', which is
+ * "filled-in" by calling filldir(). This allows the same readdir()
+ * function be used for different dirent formats. We try to read in
+ * as many entries as we can before filldir() refuses to take any more.
+ *
+ * XXX: In the future it may be a good idea to consider not generating
+ * metadata files for covered directories since the data doesn't
+ * correspond to the mounted directory. However this requires an
+ * iget() for every directory which could be considered an excessive
+ * amount of overhead. Since the inode for a mount point is always
+ * in-core this is another argument for a call to get an inode if it
+ * is in-core or NULL if it is not.
+ */
+static int cap_readdir(struct file * filp,
+ void * dirent, filldir_t filldir)
+{
+ ino_t type;
+ int skip_dirs;
+ struct hfs_brec brec;
+ struct hfs_cat_entry *entry;
+ struct inode *dir = filp->f_dentry->d_inode;
+
+ if (!dir || !dir->i_sb || !S_ISDIR(dir->i_mode)) {
+ return -EBADF;
+ }
+
+ entry = HFS_I(dir)->entry;
+ type = HFS_ITYPE(dir->i_ino);
+ skip_dirs = (type == HFS_CAP_RDIR);
+
+ if (filp->f_pos == 0) {
+ /* Entry 0 is for "." */
+ if (filldir(dirent, DOT->Name, DOT_LEN, 0, dir->i_ino)) {
+ return 0;
+ }
+ filp->f_pos = 1;
+ }
+
+ if (filp->f_pos == 1) {
+ /* Entry 1 is for ".." */
+ hfs_u32 cnid;
+
+ if (type == HFS_CAP_NDIR) {
+ cnid = hfs_get_nl(entry->key.ParID);
+ } else {
+ cnid = entry->cnid;
+ }
+
+ if (filldir(dirent, DOT_DOT->Name,
+ DOT_DOT_LEN, 1, ntohl(cnid))) {
+ return 0;
+ }
+ filp->f_pos = 2;
+ }
+
+ if (filp->f_pos < (dir->i_size - 3)) {
+ hfs_u32 cnid;
+ hfs_u8 type;
+
+ if (hfs_cat_open(entry, &brec) ||
+ hfs_cat_next(entry, &brec, filp->f_pos - 2, &cnid, &type)) {
+ return 0;
+ }
+ while (filp->f_pos < (dir->i_size - 3)) {
+ if (hfs_cat_next(entry, &brec, 1, &cnid, &type)) {
+ return 0;
+ }
+ if (!skip_dirs || (type != HFS_CDR_DIR)) {
+ ino_t ino;
+ unsigned int len;
+ unsigned char tmp_name[HFS_NAMEMAX];
+
+ ino = ntohl(cnid) | HFS_I(dir)->file_type;
+ len = hfs_namein(dir, tmp_name,
+ &((struct hfs_cat_key *)brec.key)->CName);
+ if (filldir(dirent, tmp_name, len,
+ filp->f_pos, ino)) {
+ hfs_cat_close(entry, &brec);
+ return 0;
+ }
+ }
+ ++filp->f_pos;
+ }
+ hfs_cat_close(entry, &brec);
+ }
+
+ if (filp->f_pos == (dir->i_size - 3)) {
+ if ((entry->cnid == htonl(HFS_ROOT_CNID)) &&
+ (type == HFS_CAP_NDIR)) {
+ /* In root dir last-2 entry is for ".rootinfo" */
+ if (filldir(dirent, DOT_ROOTINFO->Name,
+ DOT_ROOTINFO_LEN, filp->f_pos,
+ ntohl(entry->cnid) | HFS_CAP_FNDR)) {
+ return 0;
+ }
+ }
+ ++filp->f_pos;
+ }
+
+ if (filp->f_pos == (dir->i_size - 2)) {
+ if (type == HFS_CAP_NDIR) {
+ /* In normal dirs last-1 entry is for ".finderinfo" */
+ if (filldir(dirent, DOT_FINDERINFO->Name,
+ DOT_FINDERINFO_LEN, filp->f_pos,
+ ntohl(entry->cnid) | HFS_CAP_FDIR)) {
+ return 0;
+ }
+ }
+ ++filp->f_pos;
+ }
+
+ if (filp->f_pos == (dir->i_size - 1)) {
+ if (type == HFS_CAP_NDIR) {
+ /* In normal dirs last entry is for ".resource" */
+ if (filldir(dirent, DOT_RESOURCE->Name,
+ DOT_RESOURCE_LEN, filp->f_pos,
+ ntohl(entry->cnid) | HFS_CAP_RDIR)) {
+ return 0;
+ }
+ }
+ ++filp->f_pos;
+ }
+
+ return 0;
+}
+
+
+/* due to the dcache caching negative dentries for non-existent files,
+ * we need to drop those entries when a file silently gets created.
+ * as far as i can tell, the calls that need to do this are the file
+ * related calls (create, rename, and mknod). the directory calls
+ * should be immune. the relevant calls in dir.c call drop_dentry
+ * upon successful completion. */
+void hfs_cap_drop_dentry(const ino_t type, struct dentry *dentry)
+{
+ if (type == HFS_CAP_DATA) { /* given name */
+ hfs_drop_special(DOT_FINDERINFO, dentry->d_parent, dentry);
+ hfs_drop_special(DOT_RESOURCE, dentry->d_parent, dentry);
+ } else {
+ struct dentry *de;
+
+ /* look for name */
+ if ((de = hfs_lookup_dentry(dentry->d_name.name,
+ dentry->d_name.len,
+ dentry->d_parent->d_parent))) {
+ if (!de->d_inode)
+ d_drop(de);
+ dput(de);
+ }
+
+ switch (type) {
+ case HFS_CAP_RSRC: /* given .resource/name */
+ /* look for .finderinfo/name */
+ hfs_drop_special(DOT_FINDERINFO, dentry->d_parent->d_parent,
+ dentry);
+ break;
+ case HFS_CAP_FNDR: /* given .finderinfo/name. i don't this
+ * happens. */
+ /* look for .resource/name */
+ hfs_drop_special(DOT_RESOURCE, dentry->d_parent->d_parent,
+ dentry);
+ break;
+ }
+ }
+}
diff --git a/fs/hfs/dir_dbl.c b/fs/hfs/dir_dbl.c
new file mode 100644
index 000000000..c97247dc9
--- /dev/null
+++ b/fs/hfs/dir_dbl.c
@@ -0,0 +1,464 @@
+/*
+ * linux/fs/hfs/dir_dbl.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the inode_operations and file_operations
+ * structures for HFS directories.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+/*================ Forward declarations ================*/
+
+static int dbl_lookup(struct inode *, struct dentry *);
+static int dbl_readdir(struct file *, void *, filldir_t);
+static int dbl_create(struct inode *, struct dentry *, int);
+static int dbl_mkdir(struct inode *, struct dentry *, int);
+static int dbl_mknod(struct inode *, struct dentry *, int, int);
+static int dbl_unlink(struct inode *, struct dentry *);
+static int dbl_rmdir(struct inode *, struct dentry *);
+static int dbl_rename(struct inode *, struct dentry *,
+ struct inode *, struct dentry *);
+
+/*================ Global variables ================*/
+
+#define DOT_LEN 1
+#define DOT_DOT_LEN 2
+#define ROOTINFO_LEN 8
+#define PCNT_ROOTINFO_LEN 9
+
+const struct hfs_name hfs_dbl_reserved1[] = {
+ {DOT_LEN, "."},
+ {DOT_DOT_LEN, ".."},
+ {0, ""},
+};
+
+const struct hfs_name hfs_dbl_reserved2[] = {
+ {ROOTINFO_LEN, "RootInfo"},
+ {PCNT_ROOTINFO_LEN, "%RootInfo"},
+ {0, ""},
+};
+
+#define DOT (&hfs_dbl_reserved1[0])
+#define DOT_DOT (&hfs_dbl_reserved1[1])
+#define ROOTINFO (&hfs_dbl_reserved2[0])
+#define PCNT_ROOTINFO (&hfs_dbl_reserved2[1])
+
+static struct file_operations hfs_dbl_dir_operations = {
+ NULL, /* lseek - default */
+ hfs_dir_read, /* read - invalid */
+ NULL, /* write - bad */
+ dbl_readdir, /* readdir */
+ NULL, /* select - default */
+ NULL, /* ioctl - default */
+ NULL, /* mmap - none */
+ NULL, /* no special open code */
+ NULL, /* no special release code */
+ file_fsync, /* fsync - default */
+ NULL, /* fasync - default */
+ NULL, /* check_media_change - none */
+ NULL /* revalidate - none */
+};
+
+struct inode_operations hfs_dbl_dir_inode_operations = {
+ &hfs_dbl_dir_operations,/* default directory file-ops */
+ dbl_create, /* create */
+ dbl_lookup, /* lookup */
+ NULL, /* link */
+ dbl_unlink, /* unlink */
+ NULL, /* symlink */
+ dbl_mkdir, /* mkdir */
+ dbl_rmdir, /* rmdir */
+ dbl_mknod, /* mknod */
+ dbl_rename, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL /* smap */
+};
+
+
+/*================ File-local functions ================*/
+
+/*
+ * is_hdr()
+ */
+static int is_hdr(struct inode *dir, const char *name, int len)
+{
+ int retval = 0;
+
+ if (name[0] == '%') {
+ struct hfs_cat_entry *entry = HFS_I(dir)->entry;
+ struct hfs_cat_entry *victim;
+ struct hfs_name cname;
+ struct hfs_cat_key key;
+
+ hfs_nameout(dir, &cname, name+1, len-1);
+ hfs_cat_build_key(entry->cnid, &cname, &key);
+ if ((victim = hfs_cat_get(entry->mdb, &key))) {
+ hfs_cat_put(victim);
+ retval = 1;
+ }
+ }
+ return retval;
+}
+
+/*
+ * dbl_lookup()
+ *
+ * This is the lookup() entry in the inode_operations structure for
+ * HFS directories in the AppleDouble scheme. The purpose is to
+ * generate the inode corresponding to an entry in a directory, given
+ * the inode for the directory and the name (and its length) of the
+ * entry.
+ */
+static int dbl_lookup(struct inode * dir, struct dentry *dentry)
+{
+ struct hfs_name cname;
+ struct hfs_cat_entry *entry;
+ struct hfs_cat_key key;
+ struct inode *inode = NULL;
+
+ if (!dir || !S_ISDIR(dir->i_mode)) {
+ goto done;
+ }
+
+ entry = HFS_I(dir)->entry;
+
+ /* Perform name-mangling */
+ hfs_nameout(dir, &cname, dentry->d_name.name, dentry->d_name.len);
+
+ /* Check for "." */
+ if (hfs_streq(&cname, DOT)) {
+ /* this little trick skips the iget and iput */
+ d_add(dentry, dir);
+ return 0;
+ }
+
+ /* Check for "..". */
+ if (hfs_streq(&cname, DOT_DOT)) {
+ inode = hfs_iget(hfs_cat_parent(entry), HFS_DBL_DIR, dentry);
+ goto done;
+ }
+
+ /* Check for "%RootInfo" if in the root directory. */
+ if ((entry->cnid == htonl(HFS_ROOT_CNID)) &&
+ hfs_streq(&cname, PCNT_ROOTINFO)) {
+ ++entry->count; /* __hfs_iget() eats one */
+ inode = hfs_iget(entry, HFS_DBL_HDR, dentry);
+ goto done;
+ }
+
+ /* Do an hfs_iget() on the mangled name. */
+ hfs_cat_build_key(entry->cnid, &cname, &key);
+ inode = hfs_iget(hfs_cat_get(entry->mdb, &key), HFS_DBL_NORM, dentry);
+
+ /* Try as a header if not found and first character is '%' */
+ if (!inode && (dentry->d_name.name[0] == '%')) {
+ hfs_nameout(dir, &cname, dentry->d_name.name+1,
+ dentry->d_name.len-1);
+ hfs_cat_build_key(entry->cnid, &cname, &key);
+ inode = hfs_iget(hfs_cat_get(entry->mdb, &key),
+ HFS_DBL_HDR, dentry);
+ }
+
+done:
+ dentry->d_op = &hfs_dentry_operations;
+ d_add(dentry, inode);
+ return 0;
+}
+
+/*
+ * dbl_readdir()
+ *
+ * This is the readdir() entry in the file_operations structure for
+ * HFS directories in the AppleDouble scheme. The purpose is to
+ * enumerate the entries in a directory, given the inode of the
+ * directory and a (struct file *), the 'f_pos' field of which
+ * indicates the location in the directory. The (struct file *) is
+ * updated so that the next call with the same 'dir' and 'filp'
+ * arguments will produce the next directory entry. The entries are
+ * returned in 'dirent', which is "filled-in" by calling filldir().
+ * This allows the same readdir() function be used for different
+ * formats. We try to read in as many entries as we can before
+ * filldir() refuses to take any more.
+ *
+ * XXX: In the future it may be a good idea to consider not generating
+ * metadata files for covered directories since the data doesn't
+ * correspond to the mounted directory. However this requires an
+ * iget() for every directory which could be considered an excessive
+ * amount of overhead. Since the inode for a mount point is always
+ * in-core this is another argument for a call to get an inode if it
+ * is in-core or NULL if it is not.
+ */
+static int dbl_readdir(struct file * filp,
+ void * dirent, filldir_t filldir)
+{
+ struct hfs_brec brec;
+ struct hfs_cat_entry *entry;
+ struct inode *dir = filp->f_dentry->d_inode;
+
+ if (!dir || !dir->i_sb || !S_ISDIR(dir->i_mode)) {
+ return -EBADF;
+ }
+
+ entry = HFS_I(dir)->entry;
+
+ if (filp->f_pos == 0) {
+ /* Entry 0 is for "." */
+ if (filldir(dirent, DOT->Name, DOT_LEN, 0, dir->i_ino)) {
+ return 0;
+ }
+ filp->f_pos = 1;
+ }
+
+ if (filp->f_pos == 1) {
+ /* Entry 1 is for ".." */
+ if (filldir(dirent, DOT_DOT->Name, DOT_DOT_LEN, 1,
+ hfs_get_hl(entry->key.ParID))) {
+ return 0;
+ }
+ filp->f_pos = 2;
+ }
+
+ if (filp->f_pos < (dir->i_size - 1)) {
+ hfs_u32 cnid;
+ hfs_u8 type;
+
+ if (hfs_cat_open(entry, &brec) ||
+ hfs_cat_next(entry, &brec, (filp->f_pos - 1) >> 1,
+ &cnid, &type)) {
+ return 0;
+ }
+
+ while (filp->f_pos < (dir->i_size - 1)) {
+ unsigned char tmp_name[HFS_NAMEMAX + 1];
+ ino_t ino;
+ int is_hdr = (filp->f_pos & 1);
+ unsigned int len;
+
+ if (is_hdr) {
+ ino = ntohl(cnid) | HFS_DBL_HDR;
+ tmp_name[0] = '%';
+ len = 1 + hfs_namein(dir, tmp_name + 1,
+ &((struct hfs_cat_key *)brec.key)->CName);
+ } else {
+ if (hfs_cat_next(entry, &brec, 1,
+ &cnid, &type)) {
+ return 0;
+ }
+ ino = ntohl(cnid);
+ len = hfs_namein(dir, tmp_name,
+ &((struct hfs_cat_key *)brec.key)->CName);
+ }
+
+ if (filldir(dirent, tmp_name, len, filp->f_pos, ino)) {
+ hfs_cat_close(entry, &brec);
+ return 0;
+ }
+ ++filp->f_pos;
+ }
+ hfs_cat_close(entry, &brec);
+ }
+
+ if (filp->f_pos == (dir->i_size - 1)) {
+ if (entry->cnid == htonl(HFS_ROOT_CNID)) {
+ /* In root dir last entry is for "%RootInfo" */
+ if (filldir(dirent, PCNT_ROOTINFO->Name,
+ PCNT_ROOTINFO_LEN, filp->f_pos,
+ ntohl(entry->cnid) | HFS_DBL_HDR)) {
+ return 0;
+ }
+ }
+ ++filp->f_pos;
+ }
+
+ return 0;
+}
+
+/*
+ * dbl_create()
+ *
+ * This is the create() entry in the inode_operations structure for
+ * AppleDouble directories. The purpose is to create a new file in
+ * a directory and return a corresponding inode, given the inode for
+ * the directory and the name (and its length) of the new file.
+ */
+static int dbl_create(struct inode * dir, struct dentry *dentry,
+ int mode)
+{
+ int error;
+
+ if (is_hdr(dir, dentry->d_name.name, dentry->d_name.len)) {
+ error = -EEXIST;
+ } else {
+ error = hfs_create(dir, dentry, mode);
+ }
+ return error;
+}
+
+/*
+ * dbl_mkdir()
+ *
+ * This is the mkdir() entry in the inode_operations structure for
+ * AppleDouble directories. The purpose is to create a new directory
+ * in a directory, given the inode for the parent directory and the
+ * name (and its length) of the new directory.
+ */
+static int dbl_mkdir(struct inode * parent, struct dentry *dentry,
+ int mode)
+{
+ int error;
+
+ if (is_hdr(parent, dentry->d_name.name, dentry->d_name.len)) {
+ error = -EEXIST;
+ } else {
+ error = hfs_mkdir(parent, dentry, mode);
+ }
+ return error;
+}
+
+/*
+ * dbl_mknod()
+ *
+ * This is the mknod() entry in the inode_operations structure for
+ * regular HFS directories. The purpose is to create a new entry
+ * in a directory, given the inode for the parent directory and the
+ * name (and its length) and the mode of the new entry (and the device
+ * number if the entry is to be a device special file).
+ */
+static int dbl_mknod(struct inode *dir, struct dentry *dentry,
+ int mode, int rdev)
+{
+ int error;
+
+ if (is_hdr(dir, dentry->d_name.name, dentry->d_name.len)) {
+ error = -EEXIST;
+ } else {
+ error = hfs_mknod(dir, dentry, mode, rdev);
+ }
+ return error;
+}
+
+/*
+ * dbl_unlink()
+ *
+ * This is the unlink() entry in the inode_operations structure for
+ * AppleDouble directories. The purpose is to delete an existing
+ * file, given the inode for the parent directory and the name
+ * (and its length) of the existing file.
+ */
+static int dbl_unlink(struct inode * dir, struct dentry *dentry)
+{
+ int error;
+
+ error = hfs_unlink(dir, dentry);
+ if ((error == -ENOENT) && is_hdr(dir, dentry->d_name.name,
+ dentry->d_name.len)) {
+ error = -EPERM;
+ }
+ return error;
+}
+
+/*
+ * dbl_rmdir()
+ *
+ * This is the rmdir() entry in the inode_operations structure for
+ * AppleDouble directories. The purpose is to delete an existing
+ * directory, given the inode for the parent directory and the name
+ * (and its length) of the existing directory.
+ */
+static int dbl_rmdir(struct inode * parent, struct dentry *dentry)
+{
+ int error;
+
+ error = hfs_rmdir(parent, dentry);
+ if ((error == -ENOENT) && is_hdr(parent, dentry->d_name.name,
+ dentry->d_name.len)) {
+ error = -ENOTDIR;
+ }
+ return error;
+}
+
+/*
+ * dbl_rename()
+ *
+ * This is the rename() entry in the inode_operations structure for
+ * AppleDouble directories. The purpose is to rename an existing
+ * file or directory, given the inode for the current directory and
+ * the name (and its length) of the existing file/directory and the
+ * inode for the new directory and the name (and its length) of the
+ * new file/directory.
+ *
+ * XXX: how do we handle must_be_dir?
+ */
+static int dbl_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ int error;
+
+ if (is_hdr(new_dir, new_dentry->d_name.name,
+ new_dentry->d_name.len)) {
+ error = -EPERM;
+ } else {
+ error = hfs_rename(old_dir, old_dentry,
+ new_dir, new_dentry);
+ if ((error == -ENOENT) /*&& !must_be_dir*/ &&
+ is_hdr(old_dir, old_dentry->d_name.name,
+ old_dentry->d_name.len)) {
+ error = -EPERM;
+ }
+ }
+ return error;
+}
+
+
+/* due to the dcache caching negative dentries for non-existent files,
+ * we need to drop those entries when a file silently gets created.
+ * as far as i can tell, the calls that need to do this are the file
+ * related calls (create, rename, and mknod). the directory calls
+ * should be immune. the relevant calls in dir.c call drop_dentry
+ * upon successful completion. this allocates an array for %name
+ * on the first attempt to access it. */
+void hfs_dbl_drop_dentry(const ino_t type, struct dentry *dentry)
+{
+ unsigned char tmp_name[HFS_NAMEMAX + 1];
+ struct dentry *de = NULL;
+
+ switch (type) {
+ case HFS_DBL_HDR:
+ /* given %name, look for name. i don't think this happens. */
+ de = hfs_lookup_dentry(dentry->d_name.name + 1, dentry->d_name.len - 1,
+ dentry->d_parent);
+ break;
+ case HFS_DBL_DATA:
+ /* given name, look for %name */
+ tmp_name[0] = '%';
+ strncpy(tmp_name + 1, dentry->d_name.name, HFS_NAMELEN - 1);
+ de = hfs_lookup_dentry(tmp_name, dentry->d_name.len + 1,
+ dentry->d_parent);
+ }
+
+ if (de) {
+ if (!de->d_inode)
+ d_drop(de);
+ dput(de);
+ }
+}
diff --git a/fs/hfs/dir_nat.c b/fs/hfs/dir_nat.c
new file mode 100644
index 000000000..62c9ea2cb
--- /dev/null
+++ b/fs/hfs/dir_nat.c
@@ -0,0 +1,487 @@
+/*
+ * linux/fs/hfs/dir_nat.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the inode_operations and file_operations
+ * structures for HFS directories.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ *
+ * The source code distributions of Netatalk, versions 1.3.3b2 and
+ * 1.4b2, were used as a specification of the location and format of
+ * files used by Netatalk's afpd. No code from Netatalk appears in
+ * hfs_fs. hfs_fs is not a work ``derived'' from Netatalk in the
+ * sense of intellectual property law.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+/*================ Forward declarations ================*/
+
+static int nat_lookup(struct inode *, struct dentry *);
+static int nat_readdir(struct file *, void *, filldir_t);
+static int nat_rmdir(struct inode *, struct dentry *);
+static int nat_hdr_unlink(struct inode *, struct dentry *);
+static int nat_hdr_rename(struct inode *, struct dentry *,
+ struct inode *, struct dentry *);
+
+/*================ Global variables ================*/
+
+#define DOT_LEN 1
+#define DOT_DOT_LEN 2
+#define DOT_APPLEDOUBLE_LEN 12
+#define DOT_PARENT_LEN 7
+
+const struct hfs_name hfs_nat_reserved1[] = {
+ {DOT_LEN, "."},
+ {DOT_DOT_LEN, ".."},
+ {DOT_APPLEDOUBLE_LEN, ".AppleDouble"},
+ {DOT_PARENT_LEN, ".Parent"},
+ {0, ""},
+};
+
+const struct hfs_name hfs_nat_reserved2[] = {
+ {0, ""},
+};
+
+#define DOT (&hfs_nat_reserved1[0])
+#define DOT_DOT (&hfs_nat_reserved1[1])
+#define DOT_APPLEDOUBLE (&hfs_nat_reserved1[2])
+#define DOT_PARENT (&hfs_nat_reserved1[3])
+
+static struct file_operations hfs_nat_dir_operations = {
+ NULL, /* lseek - default */
+ hfs_dir_read, /* read - invalid */
+ NULL, /* write - bad */
+ nat_readdir, /* readdir */
+ NULL, /* select - default */
+ NULL, /* ioctl - default */
+ NULL, /* mmap - none */
+ NULL, /* no special open code */
+ NULL, /* no special release code */
+ file_fsync, /* fsync - default */
+ NULL, /* fasync - default */
+ NULL, /* check_media_change - none */
+ NULL, /* revalidate - none */
+ NULL /* lock - none */
+};
+
+struct inode_operations hfs_nat_ndir_inode_operations = {
+ &hfs_nat_dir_operations,/* default directory file-ops */
+ hfs_create, /* create */
+ nat_lookup, /* lookup */
+ NULL, /* link */
+ hfs_unlink, /* unlink */
+ NULL, /* symlink */
+ hfs_mkdir, /* mkdir */
+ nat_rmdir, /* rmdir */
+ hfs_mknod, /* mknod */
+ hfs_rename, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL, /* updatepage */
+ NULL /* revalidate */
+};
+
+struct inode_operations hfs_nat_hdir_inode_operations = {
+ &hfs_nat_dir_operations,/* default directory file-ops */
+ hfs_create, /* create */
+ nat_lookup, /* lookup */
+ NULL, /* link */
+ nat_hdr_unlink, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ nat_hdr_rename, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap */
+ NULL, /* truncate */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL, /* updatepage */
+ NULL /* revalidate */
+};
+
+/*================ File-local functions ================*/
+
+/*
+ * nat_lookup()
+ *
+ * This is the lookup() entry in the inode_operations structure for
+ * HFS directories in the Netatalk scheme. The purpose is to generate
+ * the inode corresponding to an entry in a directory, given the inode
+ * for the directory and the name (and its length) of the entry.
+ */
+static int nat_lookup(struct inode * dir, struct dentry *dentry)
+{
+ ino_t dtype;
+ struct hfs_name cname;
+ struct hfs_cat_entry *entry;
+ struct hfs_cat_key key;
+ struct inode *inode = NULL;
+
+ if (!dir || !S_ISDIR(dir->i_mode)) {
+ goto done;
+ }
+
+ entry = HFS_I(dir)->entry;
+ dtype = HFS_ITYPE(dir->i_ino);
+
+ /* Perform name-mangling */
+ hfs_nameout(dir, &cname, dentry->d_name.name, dentry->d_name.len);
+
+ /* Check for "." */
+ if (hfs_streq(&cname, DOT)) {
+ /* this little trick skips the iget and iput */
+ d_add(dentry, dir);
+ return 0;
+ }
+
+ /* Check for "..". */
+ if (hfs_streq(&cname, DOT_DOT)) {
+ struct hfs_cat_entry *parent;
+
+ if (dtype != HFS_NAT_NDIR) {
+ /* Case for ".." in ".AppleDouble" */
+ parent = entry;
+ ++entry->count; /* __hfs_iget() eats one */
+ } else {
+ /* Case for ".." in a normal directory */
+ parent = hfs_cat_parent(entry);
+ }
+ inode = hfs_iget(parent, HFS_NAT_NDIR, dentry);
+ goto done;
+ }
+
+ /* Check for ".AppleDouble" if in a normal directory,
+ and for ".Parent" in ".AppleDouble". */
+ if (dtype==HFS_NAT_NDIR) {
+ /* Check for ".AppleDouble" */
+ if (hfs_streq(&cname, DOT_APPLEDOUBLE)) {
+ ++entry->count; /* __hfs_iget() eats one */
+ inode = hfs_iget(entry, HFS_NAT_HDIR, dentry);
+ goto done;
+ }
+ } else if (dtype==HFS_NAT_HDIR) {
+ if (hfs_streq(&cname, DOT_PARENT)) {
+ ++entry->count; /* __hfs_iget() eats one */
+ inode = hfs_iget(entry, HFS_NAT_HDR, dentry);
+ goto done;
+ }
+ }
+
+ /* Do an hfs_iget() on the mangled name. */
+ hfs_cat_build_key(entry->cnid, &cname, &key);
+ inode = hfs_iget(hfs_cat_get(entry->mdb, &key),
+ HFS_I(dir)->file_type, dentry);
+
+ /* Don't return a header file for a directory other than .Parent */
+ if (inode && (dtype == HFS_NAT_HDIR) &&
+ (HFS_I(inode)->entry != entry) &&
+ (HFS_I(inode)->entry->type == HFS_CDR_DIR)) {
+ iput(inode);
+ inode = NULL;
+ }
+
+done:
+ dentry->d_op = &hfs_dentry_operations;
+ d_add(dentry, inode);
+ return 0;
+}
+
+/*
+ * nat_readdir()
+ *
+ * This is the readdir() entry in the file_operations structure for
+ * HFS directories in the netatalk scheme. The purpose is to
+ * enumerate the entries in a directory, given the inode of the
+ * directory and a struct file which indicates the location in the
+ * directory. The struct file is updated so that the next call with
+ * the same dir and filp will produce the next directory entry. The
+ * entries are returned in dirent, which is "filled-in" by calling
+ * filldir(). This allows the same readdir() function be used for
+ * different dirent formats. We try to read in as many entries as we
+ * can before filldir() refuses to take any more.
+ *
+ * Note that the Netatalk format doesn't have the problem with
+ * metadata for covered directories that exists in the other formats,
+ * since the metadata is contained within the directory.
+ */
+static int nat_readdir(struct file * filp,
+ void * dirent, filldir_t filldir)
+{
+ ino_t type;
+ int skip_dirs;
+ struct hfs_brec brec;
+ struct hfs_cat_entry *entry;
+ struct inode *dir = filp->f_dentry->d_inode;
+
+ if (!dir || !dir->i_sb || !S_ISDIR(dir->i_mode)) {
+ return -EBADF;
+ }
+
+ entry = HFS_I(dir)->entry;
+ type = HFS_ITYPE(dir->i_ino);
+ skip_dirs = (type == HFS_NAT_HDIR);
+
+ if (filp->f_pos == 0) {
+ /* Entry 0 is for "." */
+ if (filldir(dirent, DOT->Name, DOT_LEN, 0, dir->i_ino)) {
+ return 0;
+ }
+ filp->f_pos = 1;
+ }
+
+ if (filp->f_pos == 1) {
+ /* Entry 1 is for ".." */
+ hfs_u32 cnid;
+
+ if (type == HFS_NAT_NDIR) {
+ cnid = hfs_get_nl(entry->key.ParID);
+ } else {
+ cnid = entry->cnid;
+ }
+
+ if (filldir(dirent, DOT_DOT->Name,
+ DOT_DOT_LEN, 1, ntohl(cnid))) {
+ return 0;
+ }
+ filp->f_pos = 2;
+ }
+
+ if (filp->f_pos < (dir->i_size - 1)) {
+ hfs_u32 cnid;
+ hfs_u8 type;
+
+ if (hfs_cat_open(entry, &brec) ||
+ hfs_cat_next(entry, &brec, filp->f_pos - 2, &cnid, &type)) {
+ return 0;
+ }
+ while (filp->f_pos < (dir->i_size - 1)) {
+ if (hfs_cat_next(entry, &brec, 1, &cnid, &type)) {
+ return 0;
+ }
+ if (!skip_dirs || (type != HFS_CDR_DIR)) {
+ ino_t ino;
+ unsigned int len;
+ unsigned char tmp_name[HFS_NAMEMAX];
+
+ ino = ntohl(cnid) | HFS_I(dir)->file_type;
+ len = hfs_namein(dir, tmp_name,
+ &((struct hfs_cat_key *)brec.key)->CName);
+ if (filldir(dirent, tmp_name, len,
+ filp->f_pos, ino)) {
+ hfs_cat_close(entry, &brec);
+ return 0;
+ }
+ }
+ ++filp->f_pos;
+ }
+ hfs_cat_close(entry, &brec);
+ }
+
+ if (filp->f_pos == (dir->i_size - 1)) {
+ if (type == HFS_NAT_NDIR) {
+ /* In normal dirs entry 2 is for ".AppleDouble" */
+ if (filldir(dirent, DOT_APPLEDOUBLE->Name,
+ DOT_APPLEDOUBLE_LEN, filp->f_pos,
+ ntohl(entry->cnid) | HFS_NAT_HDIR)) {
+ return 0;
+ }
+ } else if (type == HFS_NAT_HDIR) {
+ /* In .AppleDouble entry 2 is for ".Parent" */
+ if (filldir(dirent, DOT_PARENT->Name,
+ DOT_PARENT_LEN, filp->f_pos,
+ ntohl(entry->cnid) | HFS_NAT_HDR)) {
+ return 0;
+ }
+ }
+ ++filp->f_pos;
+ }
+
+ return 0;
+}
+
+/* due to the dcache caching negative dentries for non-existent files,
+ * we need to drop those entries when a file silently gets created.
+ * as far as i can tell, the calls that need to do this are the file
+ * related calls (create, rename, and mknod). the directory calls
+ * should be immune. the relevant calls in dir.c call drop_dentry
+ * upon successful completion. */
+void hfs_nat_drop_dentry(const ino_t type, struct dentry *dentry)
+{
+ struct dentry *de;
+
+ switch (type) {
+ case HFS_NAT_HDR: /* given .AppleDouble/name */
+ /* look for name */
+ de = hfs_lookup_dentry(dentry->d_name.name,
+ dentry->d_name.len,
+ dentry->d_parent->d_parent);
+ if (de) {
+ if (!de->d_inode)
+ d_drop(de);
+ dput(de);
+ }
+ break;
+ case HFS_NAT_DATA: /* given name */
+ /* look for .AppleDouble/name */
+ hfs_drop_special(DOT_APPLEDOUBLE, dentry->d_parent, dentry);
+ break;
+ }
+}
+
+/*
+ * nat_rmdir()
+ *
+ * This is the rmdir() entry in the inode_operations structure for
+ * Netatalk directories. The purpose is to delete an existing
+ * directory, given the inode for the parent directory and the name
+ * (and its length) of the existing directory.
+ *
+ * We handle .AppleDouble and call hfs_rmdir() for all other cases.
+ */
+static int nat_rmdir(struct inode *parent, struct dentry *dentry)
+{
+ struct hfs_cat_entry *entry = HFS_I(parent)->entry;
+ struct hfs_name cname;
+ int error;
+
+ hfs_nameout(parent, &cname, dentry->d_name.name, dentry->d_name.len);
+ if (hfs_streq(&cname, DOT_APPLEDOUBLE)) {
+ if (!HFS_SB(parent->i_sb)->s_afpd) {
+ /* Not in AFPD compatibility mode */
+ error = -EPERM;
+ } else if (entry->u.dir.files || entry->u.dir.dirs) {
+ /* AFPD compatible, but the directory is not empty */
+ error = -ENOTEMPTY;
+ } else {
+ /* AFPD compatible, so pretend to succeed */
+ error = 0;
+ }
+ } else {
+ error = hfs_rmdir(parent, dentry);
+ }
+ return error;
+}
+
+/*
+ * nat_hdr_unlink()
+ *
+ * This is the unlink() entry in the inode_operations structure for
+ * Netatalk .AppleDouble directories. The purpose is to delete an
+ * existing file, given the inode for the parent directory and the name
+ * (and its length) of the existing file.
+ *
+ * WE DON'T ACTUALLY DELETE HEADER THE FILE.
+ * In non-afpd-compatible mode:
+ * We return -EPERM.
+ * In afpd-compatible mode:
+ * We return success if the file exists or is .Parent.
+ * Otherwise we return -ENOENT.
+ */
+static int nat_hdr_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct hfs_cat_entry *entry = HFS_I(dir)->entry;
+ int error = 0;
+
+ if (!HFS_SB(dir->i_sb)->s_afpd) {
+ /* Not in AFPD compatibility mode */
+ error = -EPERM;
+ } else {
+ struct hfs_name cname;
+
+ hfs_nameout(dir, &cname, dentry->d_name.name,
+ dentry->d_name.len);
+ if (!hfs_streq(&cname, DOT_PARENT)) {
+ struct hfs_cat_entry *victim;
+ struct hfs_cat_key key;
+
+ hfs_cat_build_key(entry->cnid, &cname, &key);
+ victim = hfs_cat_get(entry->mdb, &key);
+
+ if (victim) {
+ /* pretend to succeed */
+ hfs_cat_put(victim);
+ } else {
+ error = -ENOENT;
+ }
+ }
+ }
+ return error;
+}
+
+/*
+ * nat_hdr_rename()
+ *
+ * This is the rename() entry in the inode_operations structure for
+ * Netatalk header directories. The purpose is to rename an existing
+ * file given the inode for the current directory and the name
+ * (and its length) of the existing file and the inode for the new
+ * directory and the name (and its length) of the new file/directory.
+ *
+ * WE NEVER MOVE ANYTHING.
+ * In non-afpd-compatible mode:
+ * We return -EPERM.
+ * In afpd-compatible mode:
+ * If the source header doesn't exist, we return -ENOENT.
+ * If the destination is not a header directory we return -EPERM.
+ * We return success if the destination is also a header directory
+ * and the header exists or is ".Parent".
+ */
+static int nat_hdr_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct hfs_cat_entry *entry = HFS_I(old_dir)->entry;
+ int error = 0;
+
+ if (!HFS_SB(old_dir->i_sb)->s_afpd) {
+ /* Not in AFPD compatibility mode */
+ error = -EPERM;
+ } else {
+ struct hfs_name cname;
+
+ hfs_nameout(old_dir, &cname, old_dentry->d_name.name,
+ old_dentry->d_name.len);
+ if (!hfs_streq(&cname, DOT_PARENT)) {
+ struct hfs_cat_entry *victim;
+ struct hfs_cat_key key;
+
+ hfs_cat_build_key(entry->cnid, &cname, &key);
+ victim = hfs_cat_get(entry->mdb, &key);
+
+ if (victim) {
+ /* pretend to succeed */
+ hfs_cat_put(victim);
+ } else {
+ error = -ENOENT;
+ }
+ }
+
+ if (!error && (HFS_ITYPE(new_dir->i_ino) != HFS_NAT_HDIR)) {
+ error = -EPERM;
+ }
+ }
+ return error;
+}
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
new file mode 100644
index 000000000..03d6b0acd
--- /dev/null
+++ b/fs/hfs/extent.c
@@ -0,0 +1,808 @@
+/*
+ * linux/fs/hfs/extent.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the functions related to the extents B-tree.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+
+/*================ File-local data type ================*/
+
+/* An extent record on disk*/
+struct hfs_raw_extent {
+ hfs_word_t block1;
+ hfs_word_t length1;
+ hfs_word_t block2;
+ hfs_word_t length2;
+ hfs_word_t block3;
+ hfs_word_t length3;
+};
+
+/*================ File-local functions ================*/
+
+/*
+ * build_key
+ */
+static inline void build_key(struct hfs_ext_key *key,
+ const struct hfs_fork *fork, hfs_u16 block)
+{
+ key->KeyLen = 7;
+ key->FkType = fork->fork;
+ hfs_put_nl(fork->entry->cnid, key->FNum);
+ hfs_put_hs(block, key->FABN);
+}
+
+
+/*
+ * lock_bitmap()
+ *
+ * Get an exclusive lock on the B-tree bitmap.
+ */
+static inline void lock_bitmap(struct hfs_mdb *mdb) {
+ while (mdb->bitmap_lock) {
+ hfs_sleep_on(&mdb->bitmap_wait);
+ }
+ mdb->bitmap_lock = 1;
+}
+
+/*
+ * unlock_bitmap()
+ *
+ * Relinquish an exclusive lock on the B-tree bitmap.
+ */
+static inline void unlock_bitmap(struct hfs_mdb *mdb) {
+ mdb->bitmap_lock = 0;
+ hfs_wake_up(&mdb->bitmap_wait);
+}
+
+/*
+ * dump_ext()
+ *
+ * prints the content of a extent for debugging purposes.
+ */
+#if defined(DEBUG_EXTENTS) || defined(DEBUG_ALL)
+static void dump_ext(const char *msg, const struct hfs_extent *e) {
+ if (e) {
+ hfs_warn("%s (%d-%d) (%d-%d) (%d-%d)\n", msg,
+ e->start,
+ e->start + e->length[0] - 1,
+ e->start + e->length[0],
+ e->start + e->length[0] + e->length[1] - 1,
+ e->start + e->length[0] + e->length[1],
+ e->end);
+ } else {
+ hfs_warn("%s NULL\n", msg);
+ }
+}
+#else
+#define dump_ext(A,B) {}
+#endif
+
+/*
+ * read_extent()
+ *
+ * Initializes a (struct hfs_extent) from a (struct hfs_raw_extent) and
+ * the number of the starting block for the extent.
+ *
+ * Note that the callers must check that to,from != NULL
+ */
+static void read_extent(struct hfs_extent *to,
+ const struct hfs_raw_extent *from,
+ hfs_u16 start)
+{
+ to->start = start;
+ to->block[0] = hfs_get_hs(from->block1);
+ to->length[0] = hfs_get_hs(from->length1);
+ to->block[1] = hfs_get_hs(from->block2);
+ to->length[1] = hfs_get_hs(from->length2);
+ to->block[2] = hfs_get_hs(from->block3);
+ to->length[2] = hfs_get_hs(from->length3);
+ to->end = start + to->length[0] + to->length[1] + to->length[2] - 1;
+ to->next = to->prev = NULL;
+ to->count = 0;
+}
+
+/*
+ * write_extent()
+ *
+ * Initializes a (struct hfs_raw_extent) from a (struct hfs_extent).
+ *
+ * Note that the callers must check that to,from != NULL
+ */
+static void write_extent(struct hfs_raw_extent *to,
+ const struct hfs_extent *from)
+{
+ hfs_put_hs(from->block[0], to->block1);
+ hfs_put_hs(from->length[0], to->length1);
+ hfs_put_hs(from->block[1], to->block2);
+ hfs_put_hs(from->length[1], to->length2);
+ hfs_put_hs(from->block[2], to->block3);
+ hfs_put_hs(from->length[2], to->length3);
+}
+
+/*
+ * decode_extent()
+ *
+ * Given an extent record and allocation block offset into the file,
+ * return the number of the corresponding allocation block on disk,
+ * or -1 if the desired block is not mapped by the given extent.
+ *
+ * Note that callers must check that extent != NULL
+ */
+static int decode_extent(const struct hfs_extent * extent, int block)
+{
+ if (!extent || (block < extent->start) || (block > extent->end) ||
+ (extent->end == (hfs_u16)(extent->start - 1))) {
+ return -1;
+ }
+ block -= extent->start;
+ if (block < extent->length[0]) {
+ return block + extent->block[0];
+ }
+ block -= extent->length[0];
+ if (block < extent->length[1]) {
+ return block + extent->block[1];
+ }
+ return block + extent->block[2] - extent->length[1];
+}
+
+/*
+ * relse_ext()
+ *
+ * Reduce the reference count of an in-core extent record by one,
+ * removing it from memory if the count falls to zero.
+ */
+static void relse_ext(struct hfs_extent *ext)
+{
+ if (--ext->count || !ext->start) {
+ return;
+ }
+ ext->prev->next = ext->next;
+ if (ext->next) {
+ ext->next->prev = ext->prev;
+ }
+ HFS_DELETE(ext);
+}
+
+/*
+ * set_cache()
+ *
+ * Changes the 'cache' field of the fork.
+ */
+static inline void set_cache(struct hfs_fork *fork, struct hfs_extent *ext)
+{
+ struct hfs_extent *tmp = fork->cache;
+
+ ++ext->count;
+ fork->cache = ext;
+ relse_ext(tmp);
+}
+
+/*
+ * find_ext()
+ *
+ * Given a pointer to a (struct hfs_file) and an allocation block
+ * number in the file, find the extent record containing that block.
+ * Returns a pointer to the extent record on success or NULL on failure.
+ * The 'cache' field of 'fil' also points to the extent so it has a
+ * reference count of at least 2.
+ *
+ * Callers must check that fil != NULL
+ */
+static struct hfs_extent * find_ext(struct hfs_fork *fork, int alloc_block)
+{
+ struct hfs_cat_entry *entry = fork->entry;
+ struct hfs_btree *tr= entry->mdb->ext_tree;
+ struct hfs_ext_key target, *key;
+ struct hfs_brec brec;
+ struct hfs_extent *ext, *ptr;
+ int tmp;
+
+ if (alloc_block < 0) {
+ ext = &fork->first;
+ goto found;
+ }
+
+ ext = fork->cache;
+ if (!ext || (alloc_block < ext->start)) {
+ ext = &fork->first;
+ }
+ while (ext->next && (alloc_block > ext->end)) {
+ ext = ext->next;
+ }
+ if ((alloc_block <= ext->end) && (alloc_block >= ext->start)) {
+ goto found;
+ }
+
+ /* time to read more extents */
+ if (!HFS_NEW(ext)) {
+ goto bail3;
+ }
+
+ build_key(&target, fork, alloc_block);
+
+ tmp = hfs_bfind(&brec, tr, HFS_BKEY(&target), HFS_BFIND_READ_LE);
+ if (tmp < 0) {
+ goto bail2;
+ }
+
+ key = (struct hfs_ext_key *)brec.key;
+ if ((hfs_get_nl(key->FNum) != hfs_get_nl(target.FNum)) ||
+ (key->FkType != fork->fork)) {
+ goto bail1;
+ }
+
+ read_extent(ext, brec.data, hfs_get_hs(key->FABN));
+ hfs_brec_relse(&brec, NULL);
+
+ if ((alloc_block > ext->end) && (alloc_block < ext->start)) {
+ /* something strange happened */
+ goto bail2;
+ }
+
+ ptr = fork->cache;
+ if (!ptr || (alloc_block < ptr->start)) {
+ ptr = &fork->first;
+ }
+ while (ptr->next && (alloc_block > ptr->end)) {
+ ptr = ptr->next;
+ }
+ if (ext->start == ptr->start) {
+ /* somebody beat us to it. */
+ HFS_DELETE(ext);
+ ext = ptr;
+ } else if (ext->start < ptr->start) {
+ /* insert just before ptr */
+ ptr->prev->next = ext;
+ ext->prev = ptr->prev;
+ ext->next = ptr;
+ ptr->prev = ext;
+ } else {
+ /* insert at end */
+ ptr->next = ext;
+ ext->prev = ptr;
+ }
+ found:
+ ++ext->count; /* for return value */
+ set_cache(fork, ext);
+ return ext;
+
+ bail1:
+ hfs_brec_relse(&brec, NULL);
+ bail2:
+ HFS_DELETE(ext);
+ bail3:
+ return NULL;
+}
+
+/*
+ * delete_extent()
+ *
+ * Description:
+ * Deletes an extent record from a fork, reducing its physical length.
+ * Input Variable(s):
+ * struct hfs_fork *fork: the fork
+ * struct hfs_extent *ext: the current last extent for 'fork'
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'fork' points to a valid (struct hfs_fork)
+ * 'ext' point to a valid (struct hfs_extent) which is the last in 'fork'
+ * and which is not also the first extent in 'fork'.
+ * Postconditions:
+ * The extent record has been removed if possible, and a warning has been
+ * printed otherwise.
+ */
+static void delete_extent(struct hfs_fork *fork, struct hfs_extent *ext)
+{
+ struct hfs_mdb *mdb = fork->entry->mdb;
+ struct hfs_ext_key key;
+ int error;
+
+ if (fork->cache == ext) {
+ set_cache(fork, ext->prev);
+ }
+ ext->prev->next = NULL;
+ if (ext->count != 1) {
+ hfs_warn("hfs_truncate: extent has count %d.\n", ext->count);
+ }
+
+ lock_bitmap(mdb);
+ error = hfs_clear_vbm_bits(mdb, ext->block[2], ext->length[2]);
+ if (error) {
+ hfs_warn("hfs_truncate: error %d freeing blocks.\n", error);
+ }
+ error = hfs_clear_vbm_bits(mdb, ext->block[1], ext->length[1]);
+ if (error) {
+ hfs_warn("hfs_truncate: error %d freeing blocks.\n", error);
+ }
+ error = hfs_clear_vbm_bits(mdb, ext->block[0], ext->length[0]);
+ if (error) {
+ hfs_warn("hfs_truncate: error %d freeing blocks.\n", error);
+ }
+ unlock_bitmap(mdb);
+
+ build_key(&key, fork, ext->start);
+
+ error = hfs_bdelete(mdb->ext_tree, HFS_BKEY(&key));
+ if (error) {
+ hfs_warn("hfs_truncate: error %d deleting an extent.\n", error);
+ }
+ HFS_DELETE(ext);
+}
+
+/*
+ * new_extent()
+ *
+ * Description:
+ * Adds a new extent record to a fork, extending its physical length.
+ * Input Variable(s):
+ * struct hfs_fork *fork: the fork to extend
+ * struct hfs_extent *ext: the current last extent for 'fork'
+ * hfs_u16 ablock: the number of allocation blocks in 'fork'.
+ * hfs_u16 start: first allocation block to add to 'fork'.
+ * hfs_u16 len: the number of allocation blocks to add to 'fork'.
+ * hfs_u16 ablksz: number of sectors in an allocation block.
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * (struct hfs_extent *) the new extent or NULL
+ * Preconditions:
+ * 'fork' points to a valid (struct hfs_fork)
+ * 'ext' point to a valid (struct hfs_extent) which is the last in 'fork'
+ * 'ablock', 'start', 'len' and 'ablksz' are what they claim to be.
+ * Postconditions:
+ * If NULL is returned then no changes have been made to 'fork'.
+ * If the return value is non-NULL that it is the extent that has been
+ * added to 'fork' both in memory and on disk. The 'psize' field of
+ * 'fork' has been updated to reflect the new physical size.
+ */
+static struct hfs_extent *new_extent(struct hfs_fork *fork,
+ struct hfs_extent *ext,
+ hfs_u16 ablock, hfs_u16 start,
+ hfs_u16 len, hfs_u16 ablksz)
+{
+ struct hfs_raw_extent raw;
+ struct hfs_ext_key key;
+ int error;
+
+ if (fork->entry->cnid == htonl(HFS_EXT_CNID)) {
+ /* Limit extents tree to the record in the MDB */
+ return NULL;
+ }
+
+ if (!HFS_NEW(ext->next)) {
+ return NULL;
+ }
+ ext->next->prev = ext;
+ ext->next->next = NULL;
+ ext = ext->next;
+ relse_ext(ext->prev);
+
+ ext->start = ablock;
+ ext->block[0] = start;
+ ext->length[0] = len;
+ ext->block[1] = 0;
+ ext->length[1] = 0;
+ ext->block[2] = 0;
+ ext->length[2] = 0;
+ ext->end = ablock + len - 1;
+ ext->count = 1;
+
+ write_extent(&raw, ext);
+
+ build_key(&key, fork, ablock);
+
+ error = hfs_binsert(fork->entry->mdb->ext_tree,
+ HFS_BKEY(&key), &raw, sizeof(raw));
+ if (error) {
+ ext->prev->next = NULL;
+ HFS_DELETE(ext);
+ return NULL;
+ }
+ set_cache(fork, ext);
+ return ext;
+}
+
+/*
+ * update_ext()
+ *
+ * Given a (struct hfs_fork) write an extent record back to disk.
+ */
+static void update_ext(struct hfs_fork *fork, struct hfs_extent *ext)
+{
+ struct hfs_ext_key target;
+ struct hfs_brec brec;
+
+ if (ext->start) {
+ build_key(&target, fork, ext->start);
+
+ if (!hfs_bfind(&brec, fork->entry->mdb->ext_tree,
+ HFS_BKEY(&target), HFS_BFIND_WRITE)) {
+ write_extent(brec.data, ext);
+ hfs_brec_relse(&brec, NULL);
+ }
+ }
+}
+
+/*
+ * zero_blocks()
+ *
+ * Zeros-out 'num' allocation blocks beginning with 'start'.
+ */
+static int zero_blocks(struct hfs_mdb *mdb, int start, int num) {
+ hfs_buffer buf;
+ int end;
+ int j;
+
+ start = mdb->fs_start + start * mdb->alloc_blksz;
+ end = start + num * mdb->alloc_blksz;
+
+ for (j=start; j<end; ++j) {
+ if (hfs_buffer_ok(buf = hfs_buffer_get(mdb->sys_mdb, j, 0))) {
+ memset(hfs_buffer_data(buf), 0, HFS_SECTOR_SIZE);
+ hfs_buffer_dirty(buf);
+ hfs_buffer_put(buf);
+ }
+ }
+ return 0;
+}
+
+/*
+ * shrink_fork()
+ *
+ * Try to remove enough allocation blocks from 'fork'
+ * so that it is 'ablocks' allocation blocks long.
+ */
+static void shrink_fork(struct hfs_fork *fork, int ablocks)
+{
+ struct hfs_mdb *mdb = fork->entry->mdb;
+ struct hfs_extent *ext;
+ int i, error, next, count;
+ hfs_u16 ablksz = mdb->alloc_blksz;
+
+ next = (fork->psize / ablksz) - 1;
+ ext = find_ext(fork, next);
+ while (ext && ext->start && (ext->start >= ablocks)) {
+ next = ext->start - 1;
+ delete_extent(fork, ext);
+ ext = find_ext(fork, next);
+ }
+ if (!ext) {
+ fork->psize = (next + 1) * ablksz;
+ return;
+ }
+
+ if ((count = next + 1 - ablocks) > 0) {
+ for (i=2; (i>=0) && !ext->length[i]; --i) {};
+ while (count && (ext->length[i] <= count)) {
+ ext->end -= ext->length[i];
+ count -= ext->length[i];
+ error = hfs_clear_vbm_bits(mdb, ext->block[i],
+ ext->length[i]);
+ if (error) {
+ hfs_warn("hfs_truncate: error %d freeing "
+ "blocks.\n", error);
+ }
+ ext->block[i] = ext->length[i] = 0;
+ --i;
+ }
+ if (count) {
+ ext->end -= count;
+ ext->length[i] -= count;
+ error = hfs_clear_vbm_bits(mdb, ext->block[i] +
+ ext->length[i], count);
+ if (error) {
+ hfs_warn("hfs_truncate: error %d freeing "
+ "blocks.\n", error);
+ }
+ }
+ update_ext(fork, ext);
+ }
+
+ fork->psize = ablocks * ablksz;
+}
+
+/*
+ * grow_fork()
+ *
+ * Try to add enough allocation blocks to 'fork'
+ * so that it is 'ablock' allocation blocks long.
+ */
+static void grow_fork(struct hfs_fork *fork, int ablocks)
+{
+ struct hfs_cat_entry *entry = fork->entry;
+ struct hfs_mdb *mdb = entry->mdb;
+ struct hfs_extent *ext;
+ int i, start, err;
+ hfs_u16 need, len=0;
+ hfs_u16 ablksz = mdb->alloc_blksz;
+ hfs_u32 blocks, clumpablks;
+
+ blocks = fork->psize;
+ need = ablocks - blocks/ablksz;
+ if (need < 1) {
+ return;
+ }
+
+ /* round up to clumpsize */
+ if (entry->u.file.clumpablks) {
+ clumpablks = entry->u.file.clumpablks;
+ } else {
+ clumpablks = mdb->clumpablks;
+ }
+ need = ((need + clumpablks - 1) / clumpablks) * clumpablks;
+
+ /* find last extent record and try to extend it */
+ if (!(ext = find_ext(fork, blocks/ablksz - 1))) {
+ /* somehow we couldn't find the end of the file! */
+ return;
+ }
+
+ /* determine which is the last used extent in the record */
+ /* then try to allocate the blocks immediately following it */
+ for (i=2; (i>=0) && !ext->length[i]; --i) {};
+ if (i>=0) {
+ /* try to extend the last extent */
+ start = ext->block[i] + ext->length[i];
+
+ err = 0;
+ lock_bitmap(mdb);
+ len = hfs_vbm_count_free(mdb, start);
+ if (!len) {
+ unlock_bitmap(mdb);
+ goto more_extents;
+ }
+ if (need < len) {
+ len = need;
+ }
+ err = hfs_set_vbm_bits(mdb, start, len);
+ unlock_bitmap(mdb);
+ if (err) {
+ relse_ext(ext);
+ return;
+ }
+
+ zero_blocks(mdb, start, len);
+
+ ext->length[i] += len;
+ ext->end += len;
+ blocks = (fork->psize += len * ablksz);
+ need -= len;
+ update_ext(fork, ext);
+ }
+
+more_extents:
+ /* add some more extents */
+ while (need) {
+ len = need;
+ err = 0;
+ lock_bitmap(mdb);
+ start = hfs_vbm_search_free(mdb, &len);
+ if (need < len) {
+ len = need;
+ }
+ err = hfs_set_vbm_bits(mdb, start, len);
+ unlock_bitmap(mdb);
+ if (!len || err) {
+ relse_ext(ext);
+ return;
+ }
+ zero_blocks(mdb, start, len);
+
+ /* determine which is the first free extent in the record */
+ for (i=0; (i<3) && ext->length[i]; ++i) {};
+ if (i < 3) {
+ ext->block[i] = start;
+ ext->length[i] = len;
+ ext->end += len;
+ update_ext(fork, ext);
+ } else {
+ if (!(ext = new_extent(fork, ext, blocks/ablksz,
+ start, len, ablksz))) {
+ hfs_clear_vbm_bits(mdb, start, len);
+ return;
+ }
+ }
+ blocks = (fork->psize += len * ablksz);
+ need -= len;
+ }
+ set_cache(fork, ext);
+ relse_ext(ext);
+ return;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_ext_compare()
+ *
+ * Description:
+ * This is the comparison function used for the extents B-tree. In
+ * comparing extent B-tree entries, the file id is the most
+ * significant field (compared as unsigned ints); the fork type is
+ * the second most significant field (compared as unsigned chars);
+ * and the allocation block number field is the least significant
+ * (compared as unsigned ints).
+ * Input Variable(s):
+ * struct hfs_ext_key *key1: pointer to the first key to compare
+ * struct hfs_ext_key *key2: pointer to the second key to compare
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
+ * Preconditions:
+ * key1 and key2 point to "valid" (struct hfs_ext_key)s.
+ * Postconditions:
+ * This function has no side-effects */
+int hfs_ext_compare(const struct hfs_ext_key *key1,
+ const struct hfs_ext_key *key2)
+{
+ unsigned int tmp;
+ int retval;
+
+ tmp = hfs_get_hl(key1->FNum) - hfs_get_hl(key2->FNum);
+ if (tmp != 0) {
+ retval = (int)tmp;
+ } else {
+ tmp = (unsigned char)key1->FkType - (unsigned char)key2->FkType;
+ if (tmp != 0) {
+ retval = (int)tmp;
+ } else {
+ retval = (int)(hfs_get_hs(key1->FABN)
+ - hfs_get_hs(key2->FABN));
+ }
+ }
+ return retval;
+}
+
+/*
+ * hfs_extent_adj()
+ *
+ * Given an hfs_fork shrink or grow the fork to hold the
+ * forks logical size.
+ */
+void hfs_extent_adj(struct hfs_fork *fork)
+{
+ if (fork) {
+ hfs_u32 blks, ablocks;
+ hfs_u16 ablksz;
+
+ if (fork->lsize > HFS_FORK_MAX) {
+ fork->lsize = HFS_FORK_MAX;
+ }
+
+ blks = (fork->lsize+HFS_SECTOR_SIZE-1) >> HFS_SECTOR_SIZE_BITS;
+ ablksz = fork->entry->mdb->alloc_blksz;
+ ablocks = (blks + ablksz - 1) / ablksz;
+
+ if (blks > fork->psize) {
+ grow_fork(fork, ablocks);
+ if (blks > fork->psize) {
+ fork->lsize =
+ fork->psize >> HFS_SECTOR_SIZE_BITS;
+ }
+ } else if (blks < fork->psize) {
+ shrink_fork(fork, ablocks);
+ }
+ }
+}
+
+/*
+ * hfs_extent_map()
+ *
+ * Given an hfs_fork and a block number within the fork, return the
+ * number of the corresponding physical block on disk, or zero on
+ * error.
+ */
+int hfs_extent_map(struct hfs_fork *fork, int block, int create)
+{
+ int ablksz, ablock, offset, tmp;
+ struct hfs_extent *ext;
+
+ if (!fork || !fork->entry || !fork->entry->mdb) {
+ return 0;
+ }
+
+#if defined(DEBUG_EXTENTS) || defined(DEBUG_ALL)
+ hfs_warn("hfs_extent_map: ablock %d of file %d, fork %d\n",
+ block, fork->entry->cnid, fork->fork);
+#endif
+
+ if (block < 0) {
+ hfs_warn("hfs_extent_map: block < 0\n");
+ return 0;
+ }
+ if (block > (HFS_FORK_MAX >> HFS_SECTOR_SIZE_BITS)) {
+ hfs_warn("hfs_extent_map: block(0x%08x) > big; cnid=%d "
+ "fork=%d\n", block, fork->entry->cnid, fork->fork);
+ return 0;
+ }
+ ablksz = fork->entry->mdb->alloc_blksz;
+ offset = fork->entry->mdb->fs_start + (block % ablksz);
+ ablock = block / ablksz;
+
+ if (block >= fork->psize) {
+ if (create) {
+ grow_fork(fork, ablock + 1);
+ } else {
+ return 0;
+ }
+ }
+
+#if defined(DEBUG_EXTENTS) || defined(DEBUG_ALL)
+ hfs_warn("(lblock %d offset %d)\n", ablock, offset);
+#endif
+
+ if ((ext = find_ext(fork, ablock))) {
+ dump_ext("trying new: ", ext);
+ tmp = decode_extent(ext, ablock);
+ relse_ext(ext);
+ if (tmp >= 0) {
+ return tmp*ablksz + offset;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * hfs_extent_out()
+ *
+ * Copy the first extent record from a (struct hfs_fork) to a (struct
+ * raw_extent), record (normally the one in the catalog entry).
+ */
+void hfs_extent_out(const struct hfs_fork *fork, hfs_byte_t dummy[12])
+{
+ struct hfs_raw_extent *ext = (struct hfs_raw_extent *)dummy;
+
+ if (fork && ext) {
+ write_extent(ext, &fork->first);
+ dump_ext("extent out: ", &fork->first);
+ }
+}
+
+/*
+ * hfs_extent_in()
+ *
+ * Copy an raw_extent to the 'first' and 'cache' fields of an hfs_fork.
+ */
+void hfs_extent_in(struct hfs_fork *fork, const hfs_byte_t dummy[12])
+{
+ const struct hfs_raw_extent *ext =
+ (const struct hfs_raw_extent *)dummy;
+
+ if (fork && ext) {
+ read_extent(&fork->first, ext, 0);
+ fork->cache = &fork->first;
+ fork->first.count = 2;
+ dump_ext("extent in: ", &fork->first);
+ }
+}
+
+/*
+ * hfs_extent_free()
+ *
+ * Removes from memory all extents associated with 'fil'.
+ */
+void hfs_extent_free(struct hfs_fork *fork)
+{
+ if (fork) {
+ set_cache(fork, &fork->first);
+
+ if (fork->first.next) {
+ hfs_warn("hfs_extent_free: extents in use!\n");
+ }
+ }
+}
diff --git a/fs/hfs/file.c b/fs/hfs/file.c
new file mode 100644
index 000000000..26f498305
--- /dev/null
+++ b/fs/hfs/file.c
@@ -0,0 +1,531 @@
+/*
+ * linux/fs/hfs/file.c
+ *
+ * Copyright (C) 1995, 1996 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the file-related functions which are independent of
+ * which scheme is being used to represent forks.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+/*================ Forward declarations ================*/
+
+static hfs_rwret_t hfs_file_read(struct file *, char *, hfs_rwarg_t,
+ loff_t *);
+static hfs_rwret_t hfs_file_write(struct file *, const char *, hfs_rwarg_t,
+ loff_t *);
+static void hfs_file_truncate(struct inode *);
+static int hfs_bmap(struct inode *, int);
+
+/*================ Global variables ================*/
+
+static struct file_operations hfs_file_operations = {
+ NULL, /* lseek - default */
+ hfs_file_read, /* read */
+ hfs_file_write, /* write */
+ NULL, /* readdir - bad */
+ NULL, /* select - default */
+ NULL, /* ioctl - default */
+ generic_file_mmap, /* mmap */
+ NULL, /* open */
+ NULL, /* release */
+ file_fsync, /* fsync - default */
+ NULL, /* fasync - default */
+ NULL, /* check_media_change - none */
+ NULL, /* revalidate - none */
+ NULL /* lock - none */
+};
+
+struct inode_operations hfs_file_inode_operations = {
+ &hfs_file_operations, /* default file operations */
+ NULL, /* create */
+ NULL, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ generic_readpage, /* readpage */
+ NULL, /* writepage */
+ hfs_bmap, /* bmap */
+ hfs_file_truncate, /* truncate */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL, /* updatepage */
+ NULL /* revalidate */
+};
+
+/*================ Variable-like macros ================*/
+
+/* maximum number of blocks to try to read in at once */
+#define NBUF 32
+
+/*================ File-local functions ================*/
+
+/*
+ * hfs_getblk()
+ *
+ * Given an hfs_fork and a block number return the buffer_head for
+ * that block from the fork. If 'create' is non-zero then allocate
+ * the necessary block(s) to the fork.
+ */
+struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
+{
+ int tmp;
+ kdev_t dev = fork->entry->mdb->sys_mdb->s_dev;
+
+ tmp = hfs_extent_map(fork, block, create);
+
+ if (create) {
+ /* If writing the block, then we have exclusive access
+ to the file until we return, so it can't have moved.
+ */
+ if (tmp) {
+ hfs_cat_mark_dirty(fork->entry);
+ return getblk(dev, tmp, HFS_SECTOR_SIZE);
+ }
+ return NULL;
+
+ } else {
+ /* If reading the block, then retry since the
+ location on disk could have changed while
+ we waited on the I/O in getblk to complete.
+ */
+ do {
+ struct buffer_head *bh =
+ getblk(dev, tmp, HFS_SECTOR_SIZE);
+ int tmp2 = hfs_extent_map(fork, block, 0);
+
+ if (tmp2 == tmp) {
+ return bh;
+ } else {
+ /* The block moved or no longer exists. */
+ brelse(bh);
+ tmp = tmp2;
+ }
+ } while (tmp != 0);
+
+ /* The block no longer exists. */
+ return NULL;
+ }
+}
+
+/*
+ * hfs_bmap()
+ *
+ * This is the bmap() field in the inode_operations structure for
+ * "regular" (non-header) files. The purpose is to translate an inode
+ * and a block number within the corresponding file into a physical
+ * block number. This function just calls hfs_extent_map() to do the
+ * real work.
+ */
+static int hfs_bmap(struct inode * inode, int block)
+{
+ return hfs_extent_map(HFS_I(inode)->fork, block, 0);
+}
+
+/*
+ * hfs_file_read()
+ *
+ * This is the read field in the inode_operations structure for
+ * "regular" (non-header) files. The purpose is to transfer up to
+ * 'count' bytes from the file corresponding to 'inode', beginning at
+ * 'filp->offset' bytes into the file. The data is transfered to
+ * user-space at the address 'buf'. Returns the number of bytes
+ * successfully transfered. This function checks the arguments, does
+ * some setup and then calls hfs_do_read() to do the actual transfer.
+ */
+static hfs_rwret_t hfs_file_read(struct file * filp, char * buf,
+ hfs_rwarg_t count, loff_t *ppos)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ hfs_s32 read, left, pos, size;
+
+ if (!S_ISREG(inode->i_mode)) {
+ hfs_warn("hfs_file_read: mode = %07o\n",inode->i_mode);
+ return -EINVAL;
+ }
+ pos = *ppos;
+ if (pos >= HFS_FORK_MAX) {
+ return 0;
+ }
+ size = inode->i_size;
+ if (pos > size) {
+ left = 0;
+ } else {
+ left = size - pos;
+ }
+ if (left > count) {
+ left = count;
+ }
+ if (left <= 0) {
+ return 0;
+ }
+ if ((read = hfs_do_read(inode, HFS_I(inode)->fork, pos,
+ buf, left, filp->f_reada != 0)) > 0) {
+ *ppos += read;
+ filp->f_reada = 1;
+ }
+
+ return read;
+}
+
+/*
+ * hfs_file_write()
+ *
+ * This is the write() entry in the file_operations structure for
+ * "regular" files. The purpose is to transfer up to 'count' bytes
+ * to the file corresponding to 'inode' beginning at offset
+ * 'file->f_pos' from user-space at the address 'buf'. The return
+ * value is the number of bytes actually transferred.
+ */
+static hfs_rwret_t hfs_file_write(struct file * filp, const char * buf,
+ hfs_rwarg_t count, loff_t *ppos)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct hfs_fork *fork = HFS_I(inode)->fork;
+ hfs_s32 written, pos;
+
+ if (!S_ISREG(inode->i_mode)) {
+ hfs_warn("hfs_file_write: mode = %07o\n", inode->i_mode);
+ return -EINVAL;
+ }
+
+ pos = (filp->f_flags & O_APPEND) ? inode->i_size : *ppos;
+
+ if (pos >= HFS_FORK_MAX) {
+ return 0;
+ }
+ if (count > HFS_FORK_MAX) {
+ count = HFS_FORK_MAX;
+ }
+ if ((written = hfs_do_write(inode, fork, pos, buf, count)) > 0)
+ pos += written;
+
+ *ppos = pos;
+ if (*ppos > inode->i_size)
+ inode->i_size = *ppos;
+
+ return written;
+}
+
+/*
+ * hfs_file_truncate()
+ *
+ * This is the truncate() entry in the file_operations structure for
+ * "regular" files. The purpose is to change the length of the file
+ * corresponding to the given inode. Changes can either lengthen or
+ * shorten the file.
+ */
+static void hfs_file_truncate(struct inode * inode)
+{
+ struct hfs_fork *fork = HFS_I(inode)->fork;
+
+ fork->lsize = inode->i_size;
+ hfs_extent_adj(fork);
+ hfs_cat_mark_dirty(HFS_I(inode)->entry);
+
+ inode->i_size = fork->lsize;
+ inode->i_blocks = fork->psize;
+}
+
+/*
+ * xlate_to_user()
+ *
+ * Like copy_to_user() while translating CR->NL.
+ */
+static inline void xlate_to_user(char *buf, const char *data, int count)
+{
+ char ch;
+
+ while (count--) {
+ ch = *(data++);
+ put_user((ch == '\r') ? '\n' : ch, buf++);
+ }
+}
+
+/*
+ * xlate_from_user()
+ *
+ * Like copy_from_user() while translating NL->CR;
+ */
+static inline void xlate_from_user(char *data, const char *buf, int count)
+{
+ copy_from_user(data, buf, count);
+ while (count--) {
+ if (*data == '\n') {
+ *data = '\r';
+ }
+ ++data;
+ }
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_do_read()
+ *
+ * This function transfers actual data from disk to user-space memory,
+ * returning the number of bytes successfully transfered. 'fork' tells
+ * which file on the disk to read from. 'pos' gives the offset into
+ * the Linux file at which to begin the transfer. Note that this will
+ * differ from 'filp->offset' in the case of an AppleDouble header file
+ * due to the block of metadata at the beginning of the file, which has
+ * no corresponding place in the HFS file. 'count' tells how many
+ * bytes to transfer. 'buf' gives an address in user-space to transfer
+ * the data to.
+ *
+ * This is based on Linus's minix_file_read().
+ * It has been changed to take into account that HFS files have no holes.
+ */
+hfs_s32 hfs_do_read(struct inode *inode, struct hfs_fork * fork, hfs_u32 pos,
+ char * buf, hfs_u32 count, int reada)
+{
+ kdev_t dev = inode->i_dev;
+ hfs_s32 size, chars, offset, block, blocks, read = 0;
+ int bhrequest, uptodate;
+ int convert = HFS_I(inode)->convert;
+ struct buffer_head ** bhb, ** bhe;
+ struct buffer_head * bhreq[NBUF];
+ struct buffer_head * buflist[NBUF];
+
+ /* split 'pos' in to block and (byte) offset components */
+ block = pos >> HFS_SECTOR_SIZE_BITS;
+ offset = pos & (HFS_SECTOR_SIZE-1);
+
+ /* compute the logical size of the fork in blocks */
+ size = (fork->lsize + (HFS_SECTOR_SIZE-1)) >> HFS_SECTOR_SIZE_BITS;
+
+ /* compute the number of physical blocks to be transferred */
+ blocks = (count+offset+HFS_SECTOR_SIZE-1) >> HFS_SECTOR_SIZE_BITS;
+
+ bhb = bhe = buflist;
+ if (reada) {
+ if (blocks < read_ahead[MAJOR(dev)] / (HFS_SECTOR_SIZE>>9)) {
+ blocks = read_ahead[MAJOR(dev)] / (HFS_SECTOR_SIZE>>9);
+ }
+ if (block + blocks > size) {
+ blocks = size - block;
+ }
+ }
+
+ /* We do this in a two stage process. We first try and
+ request as many blocks as we can, then we wait for the
+ first one to complete, and then we try and wrap up as many
+ as are actually done.
+
+ This routine is optimized to make maximum use of the
+ various buffers and caches. */
+
+ do {
+ bhrequest = 0;
+ uptodate = 1;
+ while (blocks) {
+ --blocks;
+ *bhb = hfs_getblk(fork, block++, 0);
+
+ if (!(*bhb)) {
+ /* Since there are no holes in HFS files
+ we must have encountered an error.
+ So, stop adding blocks to the queue. */
+ blocks = 0;
+ break;
+ }
+
+ if (!buffer_uptodate(*bhb)) {
+ uptodate = 0;
+ bhreq[bhrequest++] = *bhb;
+ }
+
+ if (++bhb == &buflist[NBUF]) {
+ bhb = buflist;
+ }
+
+ /* If the block we have on hand is uptodate,
+ go ahead and complete processing. */
+ if (uptodate) {
+ break;
+ }
+ if (bhb == bhe) {
+ break;
+ }
+ }
+
+ /* If the only block in the queue is bad then quit */
+ if (!(*bhe)) {
+ break;
+ }
+
+ /* Now request them all */
+ if (bhrequest) {
+ ll_rw_block(READ, bhrequest, bhreq);
+ }
+
+ do { /* Finish off all I/O that has actually completed */
+ char *p;
+
+ wait_on_buffer(*bhe);
+
+ if (!buffer_uptodate(*bhe)) {
+ /* read error? */
+ brelse(*bhe);
+ if (++bhe == &buflist[NBUF]) {
+ bhe = buflist;
+ }
+ count = 0;
+ break;
+ }
+
+ if (count < HFS_SECTOR_SIZE - offset) {
+ chars = count;
+ } else {
+ chars = HFS_SECTOR_SIZE - offset;
+ }
+ count -= chars;
+ read += chars;
+ p = (*bhe)->b_data + offset;
+ if (convert) {
+ xlate_to_user(buf, p, chars);
+ } else {
+ copy_to_user(buf, p, chars);
+ }
+ brelse(*bhe);
+ buf += chars;
+ offset = 0;
+ if (++bhe == &buflist[NBUF]) {
+ bhe = buflist;
+ }
+ } while (count && (bhe != bhb) && !buffer_locked(*bhe));
+ } while (count);
+
+ /* Release the read-ahead blocks */
+ while (bhe != bhb) {
+ brelse(*bhe);
+ if (++bhe == &buflist[NBUF]) {
+ bhe = buflist;
+ }
+ }
+ if (!read) {
+ return -EIO;
+ }
+ return read;
+}
+
+/*
+ * hfs_do_write()
+ *
+ * This function transfers actual data from user-space memory to disk,
+ * returning the number of bytes successfully transfered. 'fork' tells
+ * which file on the disk to write to. 'pos' gives the offset into
+ * the Linux file at which to begin the transfer. Note that this will
+ * differ from 'filp->offset' in the case of an AppleDouble header file
+ * due to the block of metadata at the beginning of the file, which has
+ * no corresponding place in the HFS file. 'count' tells how many
+ * bytes to transfer. 'buf' gives an address in user-space to transfer
+ * the data from.
+ *
+ * This is just a minor edit of Linus's minix_file_write().
+ */
+hfs_s32 hfs_do_write(struct inode *inode, struct hfs_fork * fork, hfs_u32 pos,
+ const char * buf, hfs_u32 count)
+{
+ hfs_s32 written, c;
+ struct buffer_head * bh;
+ char * p;
+ int convert = HFS_I(inode)->convert;
+
+ written = 0;
+ while (written < count) {
+ bh = hfs_getblk(fork, pos/HFS_SECTOR_SIZE, 1);
+ if (!bh) {
+ if (!written) {
+ written = -ENOSPC;
+ }
+ break;
+ }
+ c = HFS_SECTOR_SIZE - (pos % HFS_SECTOR_SIZE);
+ if (c > count - written) {
+ c = count - written;
+ }
+ if (c != HFS_SECTOR_SIZE && !buffer_uptodate(bh)) {
+ ll_rw_block(READ, 1, &bh);
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh)) {
+ brelse(bh);
+ if (!written) {
+ written = -EIO;
+ }
+ break;
+ }
+ }
+ p = (pos % HFS_SECTOR_SIZE) + bh->b_data;
+ if (convert) {
+ xlate_from_user(p, buf, c);
+ } else {
+ copy_from_user(p, buf, c);
+ }
+ update_vm_cache(inode,pos,p,c);
+ pos += c;
+ written += c;
+ buf += c;
+ mark_buffer_uptodate(bh, 1);
+ mark_buffer_dirty(bh, 0);
+ brelse(bh);
+ }
+ if (written > 0) {
+ struct hfs_cat_entry *entry = fork->entry;
+
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ if (pos > fork->lsize) {
+ fork->lsize = pos;
+ }
+ entry->modify_date = hfs_u_to_mtime(CURRENT_TIME);
+ hfs_cat_mark_dirty(entry);
+ }
+ return written;
+}
+
+/*
+ * hfs_file_fix_mode()
+ *
+ * Fixes up the permissions on a file after changing the write-inhibit bit.
+ */
+void hfs_file_fix_mode(struct hfs_cat_entry *entry)
+{
+ struct dentry **de = entry->sys_entry;
+ int i;
+
+ if (entry->u.file.flags & HFS_FIL_LOCK) {
+ for (i = 0; i < 4; ++i) {
+ if (de[i]) {
+ de[i]->d_inode->i_mode &= ~S_IWUGO;
+ }
+ }
+ } else {
+ for (i = 0; i < 4; ++i) {
+ if (de[i]) {
+ struct inode *inode = de[i]->d_inode;
+ inode->i_mode |= S_IWUGO;
+ inode->i_mode &=
+ ~HFS_SB(inode->i_sb)->s_umask;
+ }
+ }
+ }
+}
diff --git a/fs/hfs/file_cap.c b/fs/hfs/file_cap.c
new file mode 100644
index 000000000..7c298264a
--- /dev/null
+++ b/fs/hfs/file_cap.c
@@ -0,0 +1,297 @@
+/*
+ * linux/fs/hfs/file_cap.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the file_ops and inode_ops for the metadata
+ * files under the CAP representation.
+ *
+ * The source code distribution of the Columbia AppleTalk Package for
+ * UNIX, version 6.0, (CAP) was used as a specification of the
+ * location and format of files used by CAP's Aufs. No code from CAP
+ * appears in hfs_fs. hfs_fs is not a work ``derived'' from CAP in
+ * the sense of intellectual property law.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+/*================ Forward declarations ================*/
+
+static hfs_rwret_t cap_info_read(struct file *, char *,
+ hfs_rwarg_t, loff_t *);
+static hfs_rwret_t cap_info_write(struct file *, const char *,
+ hfs_rwarg_t, loff_t *);
+static void cap_info_truncate(struct inode *);
+
+/*================ Function-like macros ================*/
+
+/*
+ * OVERLAPS()
+ *
+ * Determines if a given range overlaps the specified structure member
+ */
+#define OVERLAPS(START, END, TYPE, MEMB) \
+ ((END > offsetof(TYPE, MEMB)) && \
+ (START < offsetof(TYPE, MEMB) + sizeof(((TYPE *)0)->MEMB)))
+
+/*================ Global variables ================*/
+
+static struct file_operations hfs_cap_info_operations = {
+ NULL, /* lseek - default */
+ cap_info_read, /* read */
+ cap_info_write, /* write */
+ NULL, /* readdir - bad */
+ NULL, /* select - default */
+ NULL, /* ioctl - default */
+ NULL, /* mmap - not yet */
+ NULL, /* no special open code */
+ NULL, /* no special release code */
+ file_fsync, /* fsync - default */
+ NULL, /* fasync - default */
+ NULL, /* check_media_change - none */
+ NULL, /* revalidate - none */
+ NULL /* lock - none */
+};
+
+struct inode_operations hfs_cap_info_inode_operations = {
+ &hfs_cap_info_operations, /* default file operations */
+ NULL, /* create */
+ NULL, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap - none */
+ cap_info_truncate, /* truncate */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL, /* updatepage */
+ NULL /* revalidata */
+};
+
+/*================ File-local functions ================*/
+
+/*
+ * cap_build_meta()
+ *
+ * Build the metadata structure.
+ */
+static void cap_build_meta(struct hfs_cap_info *meta,
+ struct hfs_cat_entry *entry)
+{
+ memset(meta, 0, sizeof(*meta));
+ memcpy(meta->fi_fndr, &entry->info, 32);
+ if ((entry->type == HFS_CDR_FIL) &&
+ (entry->u.file.flags & HFS_FIL_LOCK)) {
+ /* Couple the locked bit of the file to the
+ AFP {write,rename,delete} inhibit bits. */
+ hfs_put_hs(HFS_AFP_RDONLY, meta->fi_attr);
+ }
+ meta->fi_magic1 = HFS_CAP_MAGIC1;
+ meta->fi_version = HFS_CAP_VERSION;
+ meta->fi_magic = HFS_CAP_MAGIC;
+ meta->fi_bitmap = HFS_CAP_LONGNAME;
+ memcpy(meta->fi_macfilename, entry->key.CName.Name,
+ entry->key.CName.Len);
+ meta->fi_datemagic = HFS_CAP_DMAGIC;
+ meta->fi_datevalid = HFS_CAP_MDATE | HFS_CAP_CDATE;
+ hfs_put_nl(hfs_m_to_htime(entry->create_date), meta->fi_ctime);
+ hfs_put_nl(hfs_m_to_htime(entry->modify_date), meta->fi_mtime);
+ hfs_put_nl(CURRENT_TIME, meta->fi_utime);
+}
+
+/*
+ * cap_info_read()
+ *
+ * This is the read() entry in the file_operations structure for CAP
+ * metadata files. The purpose is to transfer up to 'count' bytes
+ * from the file corresponding to 'inode' beginning at offset
+ * 'file->f_pos' to user-space at the address 'buf'. The return value
+ * is the number of bytes actually transferred.
+ */
+static hfs_rwret_t cap_info_read(struct file *filp, char *buf,
+ hfs_rwarg_t count, loff_t *ppos)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+ hfs_s32 left, size, read = 0;
+ hfs_u32 pos;
+
+ if (!S_ISREG(inode->i_mode)) {
+ hfs_warn("hfs_cap_info_read: mode = %07o\n", inode->i_mode);
+ return -EINVAL;
+ }
+
+ pos = *ppos;
+ if (pos > HFS_FORK_MAX) {
+ return 0;
+ }
+ size = inode->i_size;
+ if (pos > size) {
+ left = 0;
+ } else {
+ left = size - pos;
+ }
+ if (left > count) {
+ left = count;
+ }
+ if (left <= 0) {
+ return 0;
+ }
+
+ if (pos < sizeof(struct hfs_cap_info)) {
+ int memcount = sizeof(struct hfs_cap_info) - pos;
+ struct hfs_cap_info meta;
+
+ if (memcount > left) {
+ memcount = left;
+ }
+ cap_build_meta(&meta, entry);
+ /* is copy_to_user guaranteed to write memcount? */
+ copy_to_user(buf, ((char *)&meta) + pos, memcount);
+ left -= memcount;
+ read += memcount;
+ pos += memcount;
+ buf += memcount;
+ }
+
+ if (left > 0) {
+ clear_user(buf, left);
+ pos += left;
+ }
+
+ if (read) {
+ inode->i_atime = CURRENT_TIME;
+ *ppos = pos;
+ }
+
+ return read;
+}
+
+/*
+ * cap_info_write()
+ *
+ * This is the write() entry in the file_operations structure for CAP
+ * metadata files. The purpose is to transfer up to 'count' bytes
+ * to the file corresponding to 'inode' beginning at offset
+ * '*ppos' from user-space at the address 'buf'.
+ * The return value is the number of bytes actually transferred.
+ */
+static hfs_rwret_t cap_info_write(struct file *filp, const char *buf,
+ hfs_rwarg_t count, loff_t *ppos)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ hfs_u32 pos;
+
+ if (!S_ISREG(inode->i_mode)) {
+ hfs_warn("hfs_file_write: mode = %07o\n", inode->i_mode);
+ return -EINVAL;
+ }
+ if (count <= 0) {
+ return 0;
+ }
+
+ pos = (filp->f_flags & O_APPEND) ? inode->i_size : *ppos;
+
+ if (pos > HFS_FORK_MAX) {
+ return 0;
+ }
+
+ *ppos += count;
+ if (*ppos > HFS_FORK_MAX) {
+ *ppos = HFS_FORK_MAX;
+ count = HFS_FORK_MAX - pos;
+ }
+
+ if (*ppos > inode->i_size)
+ inode->i_size = *ppos;
+
+ /* Only deal with the part we store in memory */
+ if (pos < sizeof(struct hfs_cap_info)) {
+ int end, mem_count;
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+ struct hfs_cap_info meta;
+
+ mem_count = sizeof(struct hfs_cap_info) - pos;
+ if (mem_count > count) {
+ mem_count = count;
+ }
+ end = pos + mem_count;
+
+ cap_build_meta(&meta, entry);
+ copy_from_user(((char *)&meta) + pos, buf, mem_count);
+
+ /* Update finder attributes if changed */
+ if (OVERLAPS(pos, end, struct hfs_cap_info, fi_fndr)) {
+ memcpy(&entry->info, meta.fi_fndr, 32);
+ hfs_cat_mark_dirty(entry);
+ }
+
+ /* Update file flags if changed */
+ if (OVERLAPS(pos, end, struct hfs_cap_info, fi_attr) &&
+ (entry->type == HFS_CDR_FIL)) {
+ int locked = hfs_get_ns(&meta.fi_attr) &
+ htons(HFS_AFP_WRI);
+ hfs_u8 new_flags;
+
+ if (locked) {
+ new_flags = entry->u.file.flags | HFS_FIL_LOCK;
+ } else {
+ new_flags = entry->u.file.flags & ~HFS_FIL_LOCK;
+ }
+
+ if (new_flags != entry->u.file.flags) {
+ entry->u.file.flags = new_flags;
+ hfs_cat_mark_dirty(entry);
+ hfs_file_fix_mode(entry);
+ }
+ }
+
+ /* Update CrDat if changed */
+ if (OVERLAPS(pos, end, struct hfs_cap_info, fi_ctime)) {
+ entry->create_date =
+ hfs_h_to_mtime(hfs_get_nl(meta.fi_ctime));
+ hfs_cat_mark_dirty(entry);
+ }
+
+ /* Update MdDat if changed */
+ if (OVERLAPS(pos, end, struct hfs_cap_info, fi_mtime)) {
+ entry->modify_date =
+ hfs_h_to_mtime(hfs_get_nl(meta.fi_mtime));
+ hfs_cat_mark_dirty(entry);
+ }
+ }
+
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ return count;
+}
+
+/*
+ * cap_info_truncate()
+ *
+ * This is the truncate field in the inode_operations structure for
+ * CAP metadata files.
+ */
+static void cap_info_truncate(struct inode *inode)
+{
+ if (inode->i_size > HFS_FORK_MAX) {
+ inode->i_size = HFS_FORK_MAX;
+ }
+}
diff --git a/fs/hfs/file_hdr.c b/fs/hfs/file_hdr.c
new file mode 100644
index 000000000..468a3f518
--- /dev/null
+++ b/fs/hfs/file_hdr.c
@@ -0,0 +1,940 @@
+/*
+ * linux/fs/hfs/file_hdr.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the file_ops and inode_ops for the metadata
+ * files under the AppleDouble and Netatalk representations.
+ *
+ * The source code distributions of Netatalk, versions 1.3.3b2 and
+ * 1.4b2, were used as a specification of the location and format of
+ * files used by Netatalk's afpd. No code from Netatalk appears in
+ * hfs_fs. hfs_fs is not a work ``derived'' from Netatalk in the
+ * sense of intellectual property law.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ *
+ * XXX: Note the reason that there is not bmap() for AppleDouble
+ * header files is that dynamic nature of their structure make it
+ * very difficult to safely mmap them. Maybe in the distant future
+ * I'll get bored enough to implement it.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+/*================ Forward declarations ================*/
+
+static hfs_rwret_t hdr_read(struct file *, char *, hfs_rwarg_t, loff_t *);
+static hfs_rwret_t hdr_write(struct file *, const char *,
+ hfs_rwarg_t, loff_t *);
+static void hdr_truncate(struct inode *);
+
+/*================ Global variables ================*/
+
+static struct file_operations hfs_hdr_operations = {
+ NULL, /* lseek - default */
+ hdr_read, /* read */
+ hdr_write, /* write */
+ NULL, /* readdir - bad */
+ NULL, /* select - default */
+ NULL, /* ioctl - default */
+ NULL, /* mmap - XXX: not yet */
+ NULL, /* no special open code */
+ NULL, /* no special release code */
+ file_fsync, /* fsync - default */
+ NULL, /* fasync - default */
+ NULL, /* check_media_change - none */
+ NULL, /* revalidate - none */
+ NULL /* lock - none */
+};
+
+struct inode_operations hfs_hdr_inode_operations = {
+ &hfs_hdr_operations, /* default file operations */
+ NULL, /* create */
+ NULL, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap - XXX: not available since
+ header part has no disk block */
+ hdr_truncate, /* truncate */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL, /* updatepage */
+ NULL /* revalidate */
+};
+
+const struct hfs_hdr_layout hfs_dbl_fil_hdr_layout = {
+ __constant_htonl(HFS_DBL_MAGIC), /* magic */
+ __constant_htonl(HFS_HDR_VERSION_2), /* version */
+ 5, /* entries */
+ { /* descr[] */
+ {HFS_HDR_DATES, offsetof(struct hfs_dbl_hdr, create_time), 16},
+ {HFS_HDR_FINFO, offsetof(struct hfs_dbl_hdr, finderinfo), 32},
+ {HFS_HDR_MACI, offsetof(struct hfs_dbl_hdr, fileinfo), 4},
+ {HFS_HDR_FNAME, offsetof(struct hfs_dbl_hdr, real_name), ~0},
+ {HFS_HDR_RSRC, HFS_DBL_HDR_LEN, ~0},
+ },
+ { /* order[] */
+ (struct hfs_hdr_descr *)&hfs_dbl_fil_hdr_layout.descr[0],
+ (struct hfs_hdr_descr *)&hfs_dbl_fil_hdr_layout.descr[1],
+ (struct hfs_hdr_descr *)&hfs_dbl_fil_hdr_layout.descr[2],
+ (struct hfs_hdr_descr *)&hfs_dbl_fil_hdr_layout.descr[3],
+ (struct hfs_hdr_descr *)&hfs_dbl_fil_hdr_layout.descr[4],
+ }
+};
+
+const struct hfs_hdr_layout hfs_dbl_dir_hdr_layout = {
+ __constant_htonl(HFS_DBL_MAGIC), /* magic */
+ __constant_htonl(HFS_HDR_VERSION_2), /* version */
+ 4, /* entries */
+ { /* descr[] */
+ {HFS_HDR_DATES, offsetof(struct hfs_dbl_hdr, create_time), 16},
+ {HFS_HDR_FINFO, offsetof(struct hfs_dbl_hdr, finderinfo), 32},
+ {HFS_HDR_MACI, offsetof(struct hfs_dbl_hdr, fileinfo), 4},
+ {HFS_HDR_FNAME, offsetof(struct hfs_dbl_hdr, real_name), ~0},
+ },
+ { /* order[] */
+ (struct hfs_hdr_descr *)&hfs_dbl_dir_hdr_layout.descr[0],
+ (struct hfs_hdr_descr *)&hfs_dbl_dir_hdr_layout.descr[1],
+ (struct hfs_hdr_descr *)&hfs_dbl_dir_hdr_layout.descr[2],
+ (struct hfs_hdr_descr *)&hfs_dbl_dir_hdr_layout.descr[3],
+ }
+};
+
+const struct hfs_hdr_layout hfs_nat_hdr_layout = {
+ __constant_htonl(HFS_DBL_MAGIC), /* magic */
+ __constant_htonl(HFS_HDR_VERSION_1), /* version */
+ 5, /* entries */
+ { /* descr[] */
+ {HFS_HDR_RSRC, HFS_NAT_HDR_LEN, ~0},
+ {HFS_HDR_FNAME, offsetof(struct hfs_nat_hdr, real_name), ~0},
+ {HFS_HDR_COMNT, offsetof(struct hfs_nat_hdr, comment), 0},
+ {HFS_HDR_OLDI, offsetof(struct hfs_nat_hdr, create_time), 16},
+ {HFS_HDR_FINFO, offsetof(struct hfs_nat_hdr, finderinfo), 32},
+ },
+ { /* order[] */
+ (struct hfs_hdr_descr *)&hfs_nat_hdr_layout.descr[1],
+ (struct hfs_hdr_descr *)&hfs_nat_hdr_layout.descr[2],
+ (struct hfs_hdr_descr *)&hfs_nat_hdr_layout.descr[3],
+ (struct hfs_hdr_descr *)&hfs_nat_hdr_layout.descr[4],
+ (struct hfs_hdr_descr *)&hfs_nat_hdr_layout.descr[0],
+ }
+};
+
+/*================ File-local variables ================*/
+
+static const char fstype[16] =
+ {'M','a','c','i','n','t','o','s','h',' ',' ',' ',' ',' ',' ',' '};
+
+/*================ File-local data types ================*/
+
+struct hdr_hdr {
+ hfs_lword_t magic;
+ hfs_lword_t version;
+ hfs_byte_t filler[16];
+ hfs_word_t entries;
+ hfs_byte_t descrs[12*HFS_HDR_MAX];
+};
+
+/*================ File-local functions ================*/
+
+/*
+ * dlength()
+ */
+static int dlength(const struct hfs_hdr_descr *descr,
+ const struct hfs_cat_entry *entry)
+{
+ hfs_u32 length = descr->length;
+
+ /* handle auto-sized entries */
+ if (length == ~0) {
+ switch (descr->id) {
+ case HFS_HDR_DATA:
+ if (entry->type == HFS_CDR_FIL) {
+ length = entry->u.file.data_fork.lsize;
+ } else {
+ length = 0;
+ }
+ break;
+
+ case HFS_HDR_RSRC:
+ if (entry->type == HFS_CDR_FIL) {
+ length = entry->u.file.rsrc_fork.lsize;
+ } else {
+ length = 0;
+ }
+ break;
+
+ case HFS_HDR_FNAME:
+ length = entry->key.CName.Len;
+ break;
+
+ default:
+ length = 0;
+ }
+ }
+ return length;
+}
+
+/*
+ * hdr_build_meta()
+ */
+static void hdr_build_meta(struct hdr_hdr *meta,
+ const struct hfs_hdr_layout *layout,
+ const struct hfs_cat_entry *entry)
+{
+ const struct hfs_hdr_descr *descr;
+ hfs_byte_t *ptr;
+ int lcv;
+
+ hfs_put_nl(layout->magic, meta->magic);
+ hfs_put_nl(layout->version, meta->version);
+ if (layout->version == htonl(HFS_HDR_VERSION_1)) {
+ memcpy(meta->filler, fstype, 16);
+ } else {
+ memset(meta->filler, 0, 16);
+ }
+ hfs_put_hs(layout->entries, meta->entries);
+ memset(meta->descrs, 0, sizeof(meta->descrs));
+ for (lcv = 0, descr = layout->descr, ptr = meta->descrs;
+ lcv < layout->entries; ++lcv, ++descr, ptr += 12) {
+ hfs_put_hl(descr->id, ptr);
+ hfs_put_hl(descr->offset, ptr + 4);
+ hfs_put_hl(dlength(descr, entry), ptr + 8);
+ }
+}
+
+/*
+ * dup_layout ()
+ */
+static struct hfs_hdr_layout *dup_layout(const struct hfs_hdr_layout *old)
+{
+ struct hfs_hdr_layout *new;
+ int lcv;
+
+ if (HFS_NEW(new)) {
+ memcpy(new, old, sizeof(*new));
+ for (lcv = 0; lcv < new->entries; ++lcv) {
+ (char *)(new->order[lcv]) += (char *)new - (char *)old;
+ }
+ }
+ return new;
+}
+
+/*
+ * init_layout()
+ */
+static inline void init_layout(struct hfs_hdr_layout *layout,
+ const hfs_byte_t *descrs)
+{
+ struct hfs_hdr_descr **base, **p, **q, *tmp;
+ int lcv, entries = layout->entries;
+
+ for (lcv = 0; lcv < entries; ++lcv, descrs += 12) {
+ layout->order[lcv] = &layout->descr[lcv];
+ layout->descr[lcv].id = hfs_get_hl(descrs);
+ layout->descr[lcv].offset = hfs_get_hl(descrs + 4);
+ layout->descr[lcv].length = hfs_get_hl(descrs + 8);
+ }
+ for (lcv = layout->entries; lcv < HFS_HDR_MAX; ++lcv) {
+ layout->order[lcv] = NULL;
+ layout->descr[lcv].id = 0;
+ layout->descr[lcv].offset = 0;
+ layout->descr[lcv].length = 0;
+ }
+
+ /* Sort the 'order' array using an insertion sort */
+ base = &layout->order[0];
+ for (p = (base+1); p < (base+entries); ++p) {
+ q=p;
+ while ((*q)->offset < (*(q-1))->offset) {
+ tmp = *q;
+ *q = *(q-1);
+ *(--q) = tmp;
+ if (q == base) break;
+ }
+ }
+}
+
+/*
+ * adjust_forks()
+ */
+static inline void adjust_forks(struct hfs_cat_entry *entry,
+ const struct hfs_hdr_layout *layout)
+{
+ int lcv;
+
+ for (lcv = 0; lcv < layout->entries; ++lcv) {
+ const struct hfs_hdr_descr *descr = &layout->descr[lcv];
+
+ if ((descr->id == HFS_HDR_DATA) &&
+ (descr->length != entry->u.file.data_fork.lsize)) {
+ entry->u.file.data_fork.lsize = descr->length;
+ hfs_extent_adj(&entry->u.file.data_fork);
+ hfs_cat_mark_dirty(entry);
+ } else if ((descr->id == HFS_HDR_RSRC) &&
+ (descr->length != entry->u.file.rsrc_fork.lsize)) {
+ entry->u.file.rsrc_fork.lsize = descr->length;
+ hfs_extent_adj(&entry->u.file.rsrc_fork);
+ hfs_cat_mark_dirty(entry);
+ }
+ }
+}
+
+/*
+ * get_dates()
+ */
+static void get_dates(const struct hfs_cat_entry *entry,
+ const struct inode *inode, hfs_u32 dates[3])
+{
+ if (HFS_SB(inode->i_sb)->s_afpd) {
+ /* AFPD compatible: use un*x times */
+ dates[0] = htonl(hfs_m_to_utime(entry->create_date));
+ dates[1] = htonl(hfs_m_to_utime(entry->modify_date));
+ dates[2] = htonl(hfs_m_to_utime(entry->backup_date));
+ } else {
+ dates[0] = hfs_m_to_htime(entry->create_date);
+ dates[1] = hfs_m_to_htime(entry->modify_date);
+ dates[2] = hfs_m_to_htime(entry->backup_date);
+ }
+}
+
+/*
+ * set_dates()
+ */
+static void set_dates(struct hfs_cat_entry *entry, struct inode *inode,
+ const hfs_u32 *dates)
+{
+ hfs_u32 tmp;
+ if (HFS_SB(inode->i_sb)->s_afpd) {
+ /* AFPD compatible: use un*x times */
+ tmp = hfs_u_to_mtime(ntohl(dates[0]));
+ if (entry->create_date != tmp) {
+ entry->create_date = tmp;
+ hfs_cat_mark_dirty(entry);
+ }
+ tmp = hfs_u_to_mtime(ntohl(dates[1]));
+ if (entry->modify_date != tmp) {
+ entry->modify_date = tmp;
+ inode->i_ctime = inode->i_atime = inode->i_mtime =
+ ntohl(dates[1]);
+ hfs_cat_mark_dirty(entry);
+ }
+ tmp = hfs_u_to_mtime(ntohl(dates[2]));
+ if (entry->backup_date != tmp) {
+ entry->backup_date = tmp;
+ hfs_cat_mark_dirty(entry);
+ }
+ } else {
+ tmp = hfs_h_to_mtime(dates[0]);
+ if (entry->create_date != tmp) {
+ entry->create_date = tmp;
+ hfs_cat_mark_dirty(entry);
+ }
+ tmp = hfs_h_to_mtime(dates[1]);
+ if (entry->modify_date != tmp) {
+ entry->modify_date = tmp;
+ inode->i_ctime = inode->i_atime = inode->i_mtime =
+ hfs_h_to_utime(dates[1]);
+ hfs_cat_mark_dirty(entry);
+ }
+ tmp = hfs_h_to_mtime(dates[2]);
+ if (entry->backup_date != tmp) {
+ entry->backup_date = tmp;
+ hfs_cat_mark_dirty(entry);
+ }
+ }
+}
+
+/*
+ * hdr_read()
+ *
+ * This is the read field in the inode_operations structure for
+ * header files. The purpose is to transfer up to 'count' bytes
+ * from the file corresponding to 'inode', beginning at
+ * 'filp->offset' bytes into the file. The data is transfered to
+ * user-space at the address 'buf'. Returns the number of bytes
+ * successfully transfered.
+ */
+/* XXX: what about the entry count changing on us? */
+static hfs_rwret_t hdr_read(struct file * filp, char * buf,
+ hfs_rwarg_t count, loff_t *ppos)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+ const struct hfs_hdr_layout *layout;
+ off_t start, length, offset;
+ off_t pos = *ppos;
+ int left, lcv, read = 0;
+
+ if (!S_ISREG(inode->i_mode)) {
+ hfs_warn("hfs_hdr_read: mode = %07o\n",inode->i_mode);
+ return -EINVAL;
+ }
+
+ if (HFS_I(inode)->layout) {
+ layout = HFS_I(inode)->layout;
+ } else {
+ layout = HFS_I(inode)->default_layout;
+ }
+
+ /* Adjust count to fit within the bounds of the file */
+ if ((pos >= inode->i_size) || (count <= 0)) {
+ return 0;
+ } else if (count > inode->i_size - pos) {
+ count = inode->i_size - pos;
+ }
+
+ /* Handle the fixed-location portion */
+ length = sizeof(hfs_u32) + sizeof(hfs_u32) + 16 +
+ sizeof(hfs_u16) + layout->entries * (3 * sizeof(hfs_u32));
+ if (pos < length) {
+ struct hdr_hdr meta;
+
+ left = length - pos;
+ if (left > count) {
+ left = count;
+ }
+
+ hdr_build_meta(&meta, layout, entry);
+ copy_to_user(buf, ((char *)&meta) + pos, left);
+ count -= left;
+ read += left;
+ pos += left;
+ buf += left;
+ }
+ if (!count) {
+ goto done;
+ }
+
+ /* Handle the actual data */
+ for (lcv = 0; count && (lcv < layout->entries); ++lcv) {
+ const struct hfs_hdr_descr *descr = layout->order[lcv];
+ struct hfs_fork *fork;
+ char tmp[16], *p;
+ off_t limit;
+
+ /* stop reading if we run out of descriptors early */
+ if (!descr) {
+ break;
+ }
+
+ /* find start and length of this entry */
+ start = descr->offset;
+ length = dlength(descr, entry);
+
+ /* Skip to next entry if this one is empty or isn't needed */
+ if (!length || (pos >= start + length)) {
+ continue;
+ }
+
+ /* Pad with zeros to the start of this entry if needed */
+ if (pos < start) {
+ left = start - pos;
+ if (left > count) {
+ left = count;
+ }
+ clear_user(buf, left);
+ count -= left;
+ read += left;
+ pos += left;
+ buf += left;
+ }
+ if (!count) {
+ goto done;
+ }
+
+ /* locate and/or construct the data for this entry */
+ fork = NULL;
+ p = NULL;
+ switch (descr->id) {
+ case HFS_HDR_DATA:
+ fork = &entry->u.file.data_fork;
+ limit = fork->lsize;
+ break;
+
+ case HFS_HDR_RSRC:
+ fork = &entry->u.file.rsrc_fork;
+ limit = fork->lsize;
+ break;
+
+ case HFS_HDR_FNAME:
+ p = entry->key.CName.Name;
+ limit = entry->key.CName.Len;
+ break;
+
+ case HFS_HDR_OLDI:
+ case HFS_HDR_DATES:
+ get_dates(entry, inode, (hfs_u32 *)tmp);
+ if (descr->id == HFS_HDR_DATES) {
+ memcpy(tmp + 12, tmp + 4, 4);
+ } else if ((entry->type == HFS_CDR_FIL) &&
+ (entry->u.file.flags & HFS_FIL_LOCK)) {
+ hfs_put_hl(HFS_AFP_RDONLY, tmp + 12);
+ } else {
+ hfs_put_nl(0, tmp + 12);
+ }
+ p = tmp;
+ limit = 16;
+ break;
+
+ case HFS_HDR_FINFO:
+ p = (char *)&entry->info;
+ limit = 32;
+ break;
+
+ case HFS_HDR_MACI:
+ hfs_put_ns(0, tmp);
+ if (entry->type == HFS_CDR_FIL) {
+ hfs_put_hs(entry->u.file.flags, tmp + 2);
+ } else {
+ hfs_put_ns(entry->u.dir.flags, tmp + 2);
+ }
+ p = tmp;
+ limit = 4;
+ break;
+
+ default:
+ limit = 0;
+ }
+
+ /* limit the transfer to the available data
+ of to the stated length of the entry. */
+ if (length > limit) {
+ length = limit;
+ }
+ offset = pos - start;
+ left = length - offset;
+ if (left > count) {
+ left = count;
+ }
+ if (left <= 0) {
+ continue;
+ }
+
+ /* transfer the data */
+ if (p) {
+ copy_to_user(buf, p + offset, left);
+ } else if (fork) {
+ left = hfs_do_read(inode, fork, offset, buf, left,
+ filp->f_reada != 0);
+ if (left > 0) {
+ filp->f_reada = 1;
+ } else if (!read) {
+ return left;
+ } else {
+ goto done;
+ }
+ }
+ count -= left;
+ read += left;
+ pos += left;
+ buf += left;
+ }
+
+ /* Pad the file out with zeros */
+ if (count) {
+ clear_user(buf, count);
+ read += count;
+ pos += count;
+ }
+
+done:
+ if (read) {
+ inode->i_atime = CURRENT_TIME;
+ *ppos = pos;
+ }
+ return read;
+}
+
+/*
+ * hdr_write()
+ *
+ * This is the write() entry in the file_operations structure for
+ * header files. The purpose is to transfer up to 'count' bytes
+ * to the file corresponding to 'inode' beginning at offset
+ * '*ppos' from user-space at the address 'buf'.
+ * The return value is the number of bytes actually transferred.
+ */
+static hfs_rwret_t hdr_write(struct file *filp, const char *buf,
+ hfs_rwarg_t count, loff_t *ppos)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+ struct hfs_hdr_layout *layout;
+ off_t start, length, offset;
+ int left, lcv, written = 0;
+ struct hdr_hdr meta;
+ int built_meta = 0;
+ off_t pos;
+
+ if (!S_ISREG(inode->i_mode)) {
+ hfs_warn("hfs_hdr_write: mode = %07o\n", inode->i_mode);
+ return -EINVAL;
+ }
+ if (count <= 0) {
+ return 0;
+ }
+
+ pos = (filp->f_flags & O_APPEND) ? inode->i_size : *ppos;
+
+ if (!HFS_I(inode)->layout) {
+ HFS_I(inode)->layout = dup_layout(HFS_I(inode)->default_layout);
+ }
+ layout = HFS_I(inode)->layout;
+
+ /* Handle the 'magic', 'version', 'filler' and 'entries' fields */
+ length = sizeof(hfs_u32) + sizeof(hfs_u32) + 16 + sizeof(hfs_u16);
+ if (pos < length) {
+ hdr_build_meta(&meta, layout, entry);
+ built_meta = 1;
+
+ left = length - pos;
+ if (left > count) {
+ left = count;
+ }
+
+ copy_from_user(((char *)&meta) + pos, buf, left);
+ layout->magic = hfs_get_nl(meta.magic);
+ layout->version = hfs_get_nl(meta.version);
+ layout->entries = hfs_get_hs(meta.entries);
+ if (layout->entries > HFS_HDR_MAX) {
+ /* XXX: should allocate slots dynamically */
+ hfs_warn("hfs_hdr_write: TRUNCATING TO %d "
+ "DESCRIPTORS\n", HFS_HDR_MAX);
+ layout->entries = HFS_HDR_MAX;
+ }
+
+ count -= left;
+ written += left;
+ pos += left;
+ buf += left;
+ }
+ if (!count) {
+ goto done;
+ }
+
+ /* We know for certain how many entries we have, so process them */
+ length += layout->entries * 3 * sizeof(hfs_u32);
+ if (pos < length) {
+ if (!built_meta) {
+ hdr_build_meta(&meta, layout, entry);
+ }
+
+ left = length - pos;
+ if (left > count) {
+ left = count;
+ }
+
+ copy_from_user(((char *)&meta) + pos, buf, left);
+ init_layout(layout, meta.descrs);
+
+ count -= left;
+ written += left;
+ pos += left;
+ buf += left;
+
+ /* Handle possible size changes for the forks */
+ if (entry->type == HFS_CDR_FIL) {
+ adjust_forks(entry, layout);
+ }
+ }
+
+ /* Handle the actual data */
+ for (lcv = 0; count && (lcv < layout->entries); ++lcv) {
+ struct hfs_hdr_descr *descr = layout->order[lcv];
+ struct hfs_fork *fork;
+ char tmp[16], *p;
+ off_t limit;
+
+ /* stop writing if we run out of descriptors early */
+ if (!descr) {
+ break;
+ }
+
+ /* find start and length of this entry */
+ start = descr->offset;
+ if ((descr->id == HFS_HDR_DATA) ||
+ (descr->id == HFS_HDR_RSRC)) {
+ if (entry->type == HFS_CDR_FIL) {
+ length = 0x7fffffff - start;
+ } else {
+ continue;
+ }
+ } else {
+ length = dlength(descr, entry);
+ }
+
+ /* Trim length to avoid overlap with the next entry */
+ if (layout->order[lcv+1] &&
+ ((start + length) > layout->order[lcv+1]->offset)) {
+ length = layout->order[lcv+1]->offset - start;
+ }
+
+ /* Skip to next entry if this one is empty or isn't needed */
+ if (!length || (pos >= start + length)) {
+ continue;
+ }
+
+ /* Skip any padding that may exist between entries */
+ if (pos < start) {
+ left = start - pos;
+ if (left > count) {
+ left = count;
+ }
+ count -= left;
+ written += left;
+ pos += left;
+ buf += left;
+ }
+ if (!count) {
+ goto done;
+ }
+
+ /* locate and/or construct the data for this entry */
+ fork = NULL;
+ p = NULL;
+ switch (descr->id) {
+ case HFS_HDR_DATA:
+#if 0
+/* Can't yet write to the data fork via a header file, since there is the
+ * possibility to write via the data file, and the only locking is at the
+ * inode level.
+ */
+ fork = &entry->u.file.data_fork;
+ limit = length;
+#else
+ limit = 0;
+#endif
+ break;
+
+ case HFS_HDR_RSRC:
+ fork = &entry->u.file.rsrc_fork;
+ limit = length;
+ break;
+
+ case HFS_HDR_OLDI:
+ case HFS_HDR_DATES:
+ get_dates(entry, inode, (hfs_u32 *)tmp);
+ if (descr->id == HFS_HDR_DATES) {
+ memcpy(tmp + 12, tmp + 4, 4);
+ } else if ((entry->type == HFS_CDR_FIL) &&
+ (entry->u.file.flags & HFS_FIL_LOCK)) {
+ hfs_put_hl(HFS_AFP_RDONLY, tmp + 12);
+ } else {
+ hfs_put_nl(0, tmp + 12);
+ }
+ p = tmp;
+ limit = 16;
+ break;
+
+ case HFS_HDR_FINFO:
+ p = (char *)&entry->info;
+ limit = 32;
+ break;
+
+ case HFS_HDR_MACI:
+ hfs_put_ns(0, tmp);
+ if (entry->type == HFS_CDR_FIL) {
+ hfs_put_hs(entry->u.file.flags, tmp + 2);
+ } else {
+ hfs_put_ns(entry->u.dir.flags, tmp + 2);
+ }
+ p = tmp;
+ limit = 4;
+ break;
+
+ case HFS_HDR_FNAME: /* Can't rename a file this way */
+ default:
+ limit = 0;
+ }
+
+ /* limit the transfer to the available data
+ of to the stated length of the entry. */
+ if (length > limit) {
+ length = limit;
+ }
+ offset = pos - start;
+ left = length - offset;
+ if (left > count) {
+ left = count;
+ }
+ if (left <= 0) {
+ continue;
+ }
+
+ /* transfer the data from user space */
+ if (p) {
+ copy_from_user(p + offset, buf, left);
+ } else if (fork) {
+ left = hfs_do_write(inode, fork, offset, buf, left);
+ }
+
+ /* process the data */
+ switch (descr->id) {
+ case HFS_HDR_OLDI:
+ set_dates(entry, inode, (hfs_u32 *)tmp);
+ if (entry->type == HFS_CDR_FIL) {
+ hfs_u8 new_flags = entry->u.file.flags;
+
+ if (hfs_get_nl(tmp+12) & htonl(HFS_AFP_WRI)) {
+ new_flags |= HFS_FIL_LOCK;
+ } else {
+ new_flags &= ~HFS_FIL_LOCK;
+ }
+
+ if (new_flags != entry->u.file.flags) {
+ entry->u.file.flags = new_flags;
+ hfs_cat_mark_dirty(entry);
+ hfs_file_fix_mode(entry);
+ }
+ }
+ break;
+
+ case HFS_HDR_DATES:
+ set_dates(entry, inode, (hfs_u32 *)tmp);
+ break;
+
+ case HFS_HDR_FINFO:
+ hfs_cat_mark_dirty(entry);
+ break;
+
+ case HFS_HDR_MACI:
+ if (entry->type == HFS_CDR_DIR) {
+ hfs_u16 new_flags = hfs_get_ns(tmp + 2);
+
+ if (entry->u.dir.flags != new_flags) {
+ entry->u.dir.flags = new_flags;
+ hfs_cat_mark_dirty(entry);
+ }
+ } else {
+ hfs_u8 new_flags = tmp[3];
+ hfs_u8 changed = entry->u.file.flags^new_flags;
+
+ if (changed) {
+ entry->u.file.flags = new_flags;
+ hfs_cat_mark_dirty(entry);
+ if (changed & HFS_FIL_LOCK) {
+ hfs_file_fix_mode(entry);
+ }
+ }
+ }
+ break;
+
+ case HFS_HDR_DATA:
+ case HFS_HDR_RSRC:
+ if (left <= 0) {
+ if (!written) {
+ return left;
+ } else {
+ goto done;
+ }
+ } else if (fork->lsize > descr->length) {
+ descr->length = fork->lsize;
+ }
+ break;
+
+ case HFS_HDR_FNAME: /* Can't rename a file this way */
+ default:
+ break;
+ }
+
+ count -= left;
+ written += left;
+ pos += left;
+ buf += left;
+ }
+
+ /* Skip any padding at the end */
+ if (count) {
+ written += count;
+ pos += count;
+ }
+
+done:
+ *ppos = pos;
+ if (written > 0) {
+ if (pos > inode->i_size)
+ inode->i_size = pos;
+ inode->i_mtime = inode->i_atime = CURRENT_TIME;
+ }
+ return written;
+}
+
+/*
+ * hdr_truncate()
+ *
+ * This is the truncate field in the inode_operations structure for
+ * header files. The purpose is to allocate or release blocks as needed
+ * to satisfy a change in file length.
+ */
+static void hdr_truncate(struct inode *inode)
+{
+ struct hfs_hdr_layout *layout;
+ size_t size = inode->i_size;
+ int lcv, last;
+
+ if (!HFS_I(inode)->layout) {
+ HFS_I(inode)->layout = dup_layout(HFS_I(inode)->default_layout);
+ }
+ layout = HFS_I(inode)->layout;
+
+ last = layout->entries - 1;
+ for (lcv = 0; lcv <= last; ++lcv) {
+ struct hfs_hdr_descr *descr = layout->order[lcv];
+ struct hfs_fork *fork;
+ hfs_u32 offset;
+
+ if (!descr) {
+ break;
+ }
+
+ if (descr->id == HFS_HDR_RSRC) {
+ fork = &HFS_I(inode)->entry->u.file.rsrc_fork;
+#if 0
+/* Can't yet truncate the data fork via a header file, since there is the
+ * possibility to truncate via the data file, and the only locking is at
+ * the inode level.
+ */
+ } else if (descr->id == HFS_HDR_DATA) {
+ fork = &HFS_I(inode)->entry->u.file.data_fork;
+#endif
+ } else {
+ continue;
+ }
+
+ offset = descr->offset;
+
+ if ((lcv != last) && ((offset + descr->length) <= size)) {
+ continue;
+ }
+
+ if (offset < size) {
+ descr->length = size - offset;
+ } else {
+ descr->length = 0;
+ }
+ if (fork->lsize != descr->length) {
+ fork->lsize = descr->length;
+ hfs_extent_adj(fork);
+ hfs_cat_mark_dirty(HFS_I(inode)->entry);
+ }
+ }
+}
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h
new file mode 100644
index 000000000..ccc2f0cae
--- /dev/null
+++ b/fs/hfs/hfs.h
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/hfs/hfs.h
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ */
+
+#ifndef _HFS_H
+#define _HFS_H
+
+#include <linux/hfs_sysdep.h>
+#include <linux/hfs_fs.h>
+
+#define HFS_NEW(X) ((X) = hfs_malloc(sizeof(*(X))))
+#define HFS_DELETE(X) { hfs_free((X), sizeof(*(X))); (X) = NULL; }
+
+/* offsets to various blocks */
+#define HFS_DD_BLK 0 /* Driver Descriptor block */
+#define HFS_PMAP_BLK 1 /* First block of partition map */
+#define HFS_MDB_BLK 2 /* Block (w/i partition) of MDB */
+
+/* magic numbers for various disk blocks */
+#define HFS_DRVR_DESC_MAGIC 0x4552 /* "ER": driver descriptor map */
+#define HFS_OLD_PMAP_MAGIC 0x5453 /* "TS": old-type partition map */
+#define HFS_NEW_PMAP_MAGIC 0x504D /* "PM": new-type partition map */
+#define HFS_SUPER_MAGIC 0x4244 /* "BD": HFS MDB (super block) */
+#define HFS_MFS_SUPER_MAGIC 0xD2D7 /* MFS MDB (super block) */
+
+/* magic numbers for various internal structures */
+#define HFS_FILE_MAGIC 0x4801
+#define HFS_DIR_MAGIC 0x4802
+#define HFS_MDB_MAGIC 0x4803
+#define HFS_EXT_MAGIC 0x4804 /* XXX currently unused */
+#define HFS_BREC_MAGIC 0x4811 /* XXX currently unused */
+#define HFS_BTREE_MAGIC 0x4812
+#define HFS_BNODE_MAGIC 0x4813
+
+/* various FIXED size parameters */
+#define HFS_SECTOR_SIZE 512 /* size of an HFS sector */
+#define HFS_SECTOR_SIZE_BITS 9 /* log_2(HFS_SECTOR_SIZE) */
+#define HFS_NAMELEN 31 /* maximum length of an HFS filename */
+#define HFS_NAMEMAX (3*31) /* max size of ENCODED filename */
+#define HFS_BM_MAXBLOCKS (16) /* max number of bitmap blocks */
+#define HFS_BM_BPB (8*HFS_SECTOR_SIZE) /* number of bits per bitmap block */
+#define HFS_MAX_VALENCE 32767U
+#define HFS_FORK_MAX (0x7FFFFFFF)
+
+/* Meanings of the drAtrb field of the MDB,
+ * Reference: _Inside Macintosh: Files_ p. 2-61
+ */
+#define HFS_SB_ATTRIB_HLOCK 0x0080
+#define HFS_SB_ATTRIB_CLEAN 0x0100
+#define HFS_SB_ATTRIB_SPARED 0x0200
+#define HFS_SB_ATTRIB_SLOCK 0x8000
+
+/* 2**16 - 1 */
+#define HFS_USHRT_MAX 65535
+
+/* Some special File ID numbers */
+#define HFS_POR_CNID 1 /* Parent Of the Root */
+#define HFS_ROOT_CNID 2 /* ROOT directory */
+#define HFS_EXT_CNID 3 /* EXTents B-tree */
+#define HFS_CAT_CNID 4 /* CATalog B-tree */
+#define HFS_BAD_CNID 5 /* BAD blocks file */
+
+/* values for hfs_cat_rec.cdrType */
+#define HFS_CDR_DIR 0x01
+#define HFS_CDR_FIL 0x02
+#define HFS_CDR_THD 0x03
+#define HFS_CDR_FTH 0x04
+
+/* legal values for hfs_ext_key.FkType and hfs_file.fork */
+#define HFS_FK_DATA 0x00
+#define HFS_FK_RSRC 0xFF
+
+/* bits in hfs_fil_entry.Flags */
+#define HFS_FIL_LOCK 0x01
+#define HFS_FIL_THD 0x02
+#define HFS_FIL_USED 0x80
+
+/* Access types used when requesting access to a B-node */
+#define HFS_LOCK_NONE 0x0000 /* Illegal */
+#define HFS_LOCK_READ 0x0001 /* read-only access */
+#define HFS_LOCK_RESRV 0x0002 /* might potentially modify */
+#define HFS_LOCK_WRITE 0x0003 /* will modify now (exclusive access) */
+#define HFS_LOCK_MASK 0x000f
+
+/* Flags field of the hfs_path_elem */
+#define HFS_BPATH_FIRST 0x0100
+#define HFS_BPATH_OVERFLOW 0x0200
+#define HFS_BPATH_UNDERFLOW 0x0400
+#define HFS_BPATH_MASK 0x0f00
+
+/* Flags for hfs_bfind() */
+#define HFS_BFIND_EXACT 0x0010
+#define HFS_BFIND_LOCK 0x0020
+
+/* Modes for hfs_bfind() */
+#define HFS_BFIND_WRITE (HFS_LOCK_RESRV|HFS_BFIND_EXACT|HFS_BFIND_LOCK)
+#define HFS_BFIND_READ_EQ (HFS_LOCK_READ|HFS_BFIND_EXACT)
+#define HFS_BFIND_READ_LE (HFS_LOCK_READ)
+#define HFS_BFIND_INSERT (HFS_LOCK_RESRV|HFS_BPATH_FIRST|HFS_BPATH_OVERFLOW)
+#define HFS_BFIND_DELETE \
+ (HFS_LOCK_RESRV|HFS_BFIND_EXACT|HFS_BPATH_FIRST|HFS_BPATH_UNDERFLOW)
+
+/*======== HFS structures as they appear on the disk ========*/
+
+/* Pascal-style string of up to 31 characters */
+struct hfs_name {
+ hfs_byte_t Len;
+ hfs_byte_t Name[31];
+};
+
+typedef struct {
+ hfs_word_t v;
+ hfs_word_t h;
+} hfs_point_t;
+
+typedef struct {
+ hfs_word_t top;
+ hfs_word_t left;
+ hfs_word_t bottom;
+ hfs_word_t right;
+} hfs_rect_t;
+
+typedef struct {
+ hfs_lword_t fdType;
+ hfs_lword_t fdCreator;
+ hfs_word_t fdFlags;
+ hfs_point_t fdLocation;
+ hfs_word_t fdFldr;
+} hfs_finfo_t;
+
+typedef struct {
+ hfs_word_t fdIconID;
+ hfs_byte_t fdUnused[8];
+ hfs_word_t fdComment;
+ hfs_lword_t fdPutAway;
+} hfs_fxinfo_t;
+
+typedef struct {
+ hfs_rect_t frRect;
+ hfs_word_t frFlags;
+ hfs_point_t frLocation;
+ hfs_word_t frView;
+} hfs_dinfo_t;
+
+typedef struct {
+ hfs_point_t frScroll;
+ hfs_lword_t frOpenChain;
+ hfs_word_t frUnused;
+ hfs_word_t frComment;
+ hfs_lword_t frPutAway;
+} hfs_dxinfo_t;
+
+union hfs_finder_info {
+ struct {
+ hfs_finfo_t finfo;
+ hfs_fxinfo_t fxinfo;
+ } file;
+ struct {
+ hfs_dinfo_t dinfo;
+ hfs_dxinfo_t dxinfo;
+ } dir;
+};
+
+/* A btree record key on disk */
+struct hfs_bkey {
+ hfs_byte_t KeyLen; /* number of bytes in the key */
+ hfs_byte_t value[1]; /* (KeyLen) bytes of key */
+};
+
+/* Cast to a pointer to a generic bkey */
+#define HFS_BKEY(X) (((void)((X)->KeyLen)), ((struct hfs_bkey *)(X)))
+
+/* The key used in the catalog b-tree: */
+struct hfs_cat_key {
+ hfs_byte_t KeyLen; /* number of bytes in the key */
+ hfs_byte_t Resrv1; /* padding */
+ hfs_lword_t ParID; /* CNID of the parent dir */
+ struct hfs_name CName; /* The filename of the entry */
+};
+
+/* The key used in the extents b-tree: */
+struct hfs_ext_key {
+ hfs_byte_t KeyLen; /* number of bytes in the key */
+ hfs_byte_t FkType; /* HFS_FK_{DATA,RSRC} */
+ hfs_lword_t FNum; /* The File ID of the file */
+ hfs_word_t FABN; /* allocation blocks number*/
+};
+
+/*======== Data structures kept in memory ========*/
+
+/*
+ * struct hfs_mdb
+ *
+ * The fields from the MDB of an HFS filesystem
+ */
+struct hfs_mdb {
+ int magic; /* A magic number */
+ unsigned char vname[28]; /* The volume name */
+ hfs_sysmdb sys_mdb; /* superblock */
+ hfs_buffer buf; /* The hfs_buffer
+ holding the real
+ superblock (aka VIB
+ or MDB) */
+ hfs_buffer alt_buf; /* The hfs_buffer holding
+ the alternate superblock */
+ hfs_buffer bitmap[16]; /* The hfs_buffer holding the
+ allocation bitmap */
+ struct hfs_btree * ext_tree; /* Information about
+ the extents b-tree */
+ struct hfs_btree * cat_tree; /* Information about
+ the catalog b-tree */
+ hfs_u32 file_count; /* The number of
+ regular files in
+ the filesystem */
+ hfs_u32 dir_count; /* The number of
+ directories in the
+ filesystem */
+ hfs_u32 next_id; /* The next available
+ file id number */
+ hfs_u32 clumpablks; /* The number of allocation
+ blocks to try to add when
+ extending a file */
+ hfs_u32 write_count; /* The number of MDB
+ writes (a sort of
+ version number) */
+ hfs_u32 fs_start; /* The first 512-byte
+ block represented
+ in the bitmap */
+ hfs_u32 create_date; /* In network byte-order */
+ hfs_u32 modify_date; /* In network byte-order */
+ hfs_u32 backup_date; /* In network byte-order */
+ hfs_u16 root_files; /* The number of
+ regular
+ (non-directory)
+ files in the root
+ directory */
+ hfs_u16 root_dirs; /* The number of
+ directories in the
+ root directory */
+ hfs_u16 fs_ablocks; /* The number of
+ allocation blocks
+ in the filesystem */
+ hfs_u16 free_ablocks; /* The number of unused
+ allocation blocks
+ in the filesystem */
+ hfs_u16 alloc_blksz; /* The number of
+ 512-byte blocks per
+ "allocation block" */
+ hfs_u16 attrib; /* Attribute word */
+ hfs_wait_queue rename_wait;
+ int rename_lock;
+ hfs_wait_queue bitmap_wait;
+ int bitmap_lock;
+ struct list_head entry_dirty;
+};
+
+/*
+ * struct hfs_extent
+ *
+ * The offset to allocation block mapping for a given file is
+ * contained in a series of these structures. Each (struct
+ * hfs_extent) records up to three runs of contiguous allocation
+ * blocks. An allocation block is a contiguous group of physical
+ * blocks.
+ */
+struct hfs_extent {
+ int magic; /* A magic number */
+ unsigned short start; /* Where in the file this record
+ begins (in allocation blocks) */
+ unsigned short end; /* Where in the file this record
+ ends (in allocation blocks) */
+ unsigned short block[3]; /* The allocation block on disk which
+ begins this extent */
+ unsigned short length[3]; /* The number of allocation blocks
+ in this extent */
+ struct hfs_extent *next; /* Next extent record for this file */
+ struct hfs_extent *prev; /* Previous extent record for this file */
+ int count; /* Number of times it is used */
+};
+
+/*
+ * struct hfs_dir
+ *
+ * This structure holds information specific
+ * to a directory in an HFS filesystem.
+ */
+struct hfs_dir {
+ int magic; /* A magic number */
+ hfs_u16 flags;
+ hfs_u16 dirs; /* Number of directories in this one */
+ hfs_u16 files; /* Number of files in this directory */
+ int readers;
+ hfs_wait_queue read_wait;
+ int writers;
+ hfs_wait_queue write_wait;
+};
+
+/*
+ * struct hfs_fork
+ *
+ * This structure holds the information
+ * specific to a single fork of a file.
+ */
+struct hfs_fork {
+ struct hfs_cat_entry *entry; /* The file this fork is part of */
+ struct hfs_extent first; /* The first extent record for
+ this fork */
+ struct hfs_extent *cache; /* The most-recently accessed
+ extent record for this fork */
+ hfs_u32 lsize; /* The logical size in bytes */
+ hfs_u32 psize; /* The phys size (512-byte blocks) */
+ hfs_u8 fork; /* Which fork is this? */
+};
+
+/*
+ * struct hfs_file
+ *
+ * This structure holds information specific
+ * to a file in an HFS filesystem.
+ */
+struct hfs_file {
+ int magic;
+ struct hfs_fork data_fork;
+ struct hfs_fork rsrc_fork;
+ hfs_u16 clumpablks;
+ hfs_u8 flags;
+};
+
+/*
+ * struct hfs_file
+ *
+ * This structure holds information about a
+ * file or directory in an HFS filesystem.
+ *
+ * 'wait' must remain 1st and 'next' 2nd since we do some pointer arithmetic.
+ */
+struct hfs_cat_entry {
+ hfs_wait_queue wait;
+ struct list_head hash;
+ struct list_head list;
+ struct list_head dirty;
+ struct hfs_mdb *mdb;
+ hfs_sysentry sys_entry;
+ struct hfs_cat_key key;
+ union hfs_finder_info info;
+ hfs_u32 cnid; /* In network byte-order */
+ hfs_u32 create_date; /* In network byte-order */
+ hfs_u32 modify_date; /* In network byte-order */
+ hfs_u32 backup_date; /* In network byte-order */
+ unsigned short count;
+ unsigned long state;
+ hfs_u8 type;
+ union {
+ struct hfs_dir dir;
+ struct hfs_file file;
+ } u;
+};
+
+/* hfs entry state bits */
+#define HFS_DIRTY 1
+#define HFS_KEYDIRTY 2
+#define HFS_LOCK 4
+#define HFS_DELETED 8
+#define HFS_SUPERBLK 16
+
+/*
+ * struct hfs_bnode_ref
+ *
+ * A pointer to a (struct hfs_bnode) and the type of lock held on it.
+ */
+struct hfs_bnode_ref {
+ struct hfs_bnode *bn;
+ int lock_type;
+};
+
+/*
+ * struct hfs_belem
+ *
+ * An element of the path from the root of a B-tree to a leaf.
+ * Includes the reference to a (struct hfs_bnode), the index of
+ * the appropriate record in that node, and some flags.
+ */
+struct hfs_belem {
+ struct hfs_bnode_ref bnr;
+ int record;
+ int flags;
+};
+
+/*
+ * struct hfs_brec
+ *
+ * The structure returned by hfs_bfind() to describe the requested record.
+ */
+struct hfs_brec {
+ int keep_flags;
+ struct hfs_btree *tree;
+ struct hfs_belem *top;
+ struct hfs_belem *bottom;
+ struct hfs_belem elem[9];
+ struct hfs_bkey *key;
+ void *data; /* The actual data */
+};
+
+/*================ Function prototypes ================*/
+
+/* bdelete.c */
+extern int hfs_bdelete(struct hfs_btree *, const struct hfs_bkey *);
+
+/* bfind.c */
+extern void hfs_brec_relse(struct hfs_brec *, struct hfs_belem *);
+extern int hfs_bsucc(struct hfs_brec *, int);
+extern int hfs_bfind(struct hfs_brec *, struct hfs_btree *,
+ const struct hfs_bkey *, int);
+
+/* binsert.c */
+extern int hfs_binsert(struct hfs_btree *, const struct hfs_bkey *,
+ const void *, hfs_u16);
+
+/* bitmap.c */
+extern hfs_u16 hfs_vbm_count_free(const struct hfs_mdb *, hfs_u16);
+extern hfs_u16 hfs_vbm_search_free(const struct hfs_mdb *, hfs_u16 *);
+extern int hfs_set_vbm_bits(struct hfs_mdb *, hfs_u16, hfs_u16);
+extern int hfs_clear_vbm_bits(struct hfs_mdb *, hfs_u16, hfs_u16);
+
+/* bitops.c */
+extern hfs_u32 hfs_find_zero_bit(const hfs_u32 *, hfs_u32, hfs_u32);
+extern hfs_u32 hfs_count_zero_bits(const hfs_u32 *, hfs_u32, hfs_u32);
+
+/* btree.c */
+extern struct hfs_btree *hfs_btree_init(struct hfs_mdb *, ino_t,
+ hfs_byte_t *, hfs_u32, hfs_u32);
+extern void hfs_btree_free(struct hfs_btree *);
+extern void hfs_btree_commit(struct hfs_btree *, hfs_byte_t *, hfs_lword_t);
+
+/* catalog.c */
+extern void hfs_cat_init(void);
+extern void hfs_cat_put(struct hfs_cat_entry *);
+extern void hfs_cat_mark_dirty(struct hfs_cat_entry *);
+extern struct hfs_cat_entry *hfs_cat_get(struct hfs_mdb *,
+ const struct hfs_cat_key *);
+
+extern void hfs_cat_invalidate(struct hfs_mdb *);
+extern void hfs_cat_commit(struct hfs_mdb *);
+extern void hfs_cat_free(void);
+
+extern int hfs_cat_compare(const struct hfs_cat_key *,
+ const struct hfs_cat_key *);
+extern void hfs_cat_build_key(hfs_u32, const struct hfs_name *,
+ struct hfs_cat_key *);
+extern struct hfs_cat_entry *hfs_cat_parent(struct hfs_cat_entry *);
+
+extern int hfs_cat_open(struct hfs_cat_entry *, struct hfs_brec *);
+extern int hfs_cat_next(struct hfs_cat_entry *, struct hfs_brec *,
+ hfs_u16, hfs_u32 *, hfs_u8 *);
+extern void hfs_cat_close(struct hfs_cat_entry *, struct hfs_brec *);
+
+extern int hfs_cat_create(struct hfs_cat_entry *, struct hfs_cat_key *,
+ hfs_u8, hfs_u32, hfs_u32, struct hfs_cat_entry **);
+extern int hfs_cat_mkdir(struct hfs_cat_entry *, struct hfs_cat_key *,
+ struct hfs_cat_entry **);
+extern int hfs_cat_delete(struct hfs_cat_entry *, struct hfs_cat_entry *, int);
+extern int hfs_cat_move(struct hfs_cat_entry *, struct hfs_cat_entry *,
+ struct hfs_cat_entry *, struct hfs_cat_key *,
+ struct hfs_cat_entry **);
+
+/* extent.c */
+extern int hfs_ext_compare(const struct hfs_ext_key *,
+ const struct hfs_ext_key *);
+extern void hfs_extent_in(struct hfs_fork *, const hfs_byte_t *);
+extern void hfs_extent_out(const struct hfs_fork *, hfs_byte_t *);
+extern int hfs_extent_map(struct hfs_fork *, int, int);
+extern void hfs_extent_adj(struct hfs_fork *);
+extern void hfs_extent_free(struct hfs_fork *);
+
+/* mdb.c */
+extern struct hfs_mdb *hfs_mdb_get(hfs_sysmdb, int, hfs_s32);
+extern void hfs_mdb_commit(struct hfs_mdb *, int);
+extern void hfs_mdb_put(struct hfs_mdb *, int);
+
+/* part_tbl.c */
+extern int hfs_part_find(hfs_sysmdb, int, int, hfs_s32 *, hfs_s32 *);
+
+/* string.c */
+extern unsigned int hfs_strhash(const struct hfs_name *);
+extern int hfs_strcmp(const struct hfs_name *, const struct hfs_name *);
+extern int hfs_streq(const struct hfs_name *, const struct hfs_name *);
+extern void hfs_tolower(unsigned char *, int);
+
+/* sysdep.c */
+extern void hfs_cat_prune(struct hfs_cat_entry *);
+
+extern __inline__ struct dentry
+*hfs_lookup_dentry(const char *name, const int len,
+ struct dentry *base)
+{
+ struct qstr this;
+
+ this.name = name;
+ this.len = len;
+ this.hash = full_name_hash(name, len);
+
+ return d_lookup(base, &this);
+}
+
+/* drop a dentry for one of the special subdirectories */
+extern __inline__ void hfs_drop_special(const struct hfs_name *name,
+ struct dentry *base,
+ struct dentry *dentry)
+{
+ struct dentry *dparent, *de;
+
+ dparent = hfs_lookup_dentry(name->Name, name->Len, base);
+ if (dparent) {
+ de = hfs_lookup_dentry(dentry->d_name.name, dentry->d_name.len,
+ dparent);
+ dput(dparent);
+
+ if (de) {
+ if (!de->d_inode)
+ d_drop(de);
+ dput(de);
+ }
+ }
+}
+
+extern struct dentry_operations hfs_dentry_operations;
+#endif
diff --git a/fs/hfs/hfs_btree.h b/fs/hfs/hfs_btree.h
new file mode 100644
index 000000000..7f7aea600
--- /dev/null
+++ b/fs/hfs/hfs_btree.h
@@ -0,0 +1,268 @@
+/*
+ * linux/fs/hfs/hfs_btree.h
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the declarations of the private B-tree
+ * structures and functions.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ */
+
+#ifndef _HFS_BTREE_H
+#define _HFS_BTREE_H
+
+#include "hfs.h"
+
+/*================ Variable-like macros ================*/
+
+/* The stickiness of a (struct hfs_bnode) */
+#define HFS_NOT_STICKY 0
+#define HFS_STICKY 1
+
+/* The number of hash buckets in a B-tree's bnode cache */
+#define HFS_CACHELEN 17 /* primes are best? */
+
+/*
+ * Legal values for the 'ndType' field of a (struct NodeDescriptor)
+ *
+ * Reference: _Inside Macintosh: Files_ p. 2-65
+ */
+#define ndIndxNode 0x00 /* An internal (index) node */
+#define ndHdrNode 0x01 /* The tree header node (node 0) */
+#define ndMapNode 0x02 /* Holds part of the bitmap of used nodes */
+#define ndLeafNode 0xFF /* A leaf (ndNHeight==1) node */
+
+/*================ Function-like macros ================*/
+
+/* Access the cache slot which should contain the desired node */
+#define bhash(tree, node) ((tree)->cache[(node) % HFS_CACHELEN])
+
+/* round up to multiple of sizeof(hfs_u16) */
+#define ROUND(X) ((X + sizeof(hfs_u16) - 1) & ~(sizeof(hfs_u16)-1))
+
+/* Refer to the (base-1) array of offsets in a bnode */
+#define RECTBL(X,N) \
+ (((hfs_u16 *)(hfs_buffer_data((X)->buf)+HFS_SECTOR_SIZE))-(N))
+
+/*================ Private data types ================*/
+
+/*
+ * struct BTHdrRec
+ *
+ * The B-tree header record
+ *
+ * This data structure is stored in the first node (512-byte block) of
+ * each B-tree file. It contains important information about the
+ * B-tree. Most fields vary over the life of the tree and are
+ * indicated by a 'V' in the comments. The other fields are fixed for
+ * the life of the tree and are indicated by a 'F'.
+ *
+ * Reference: _Inside Macintosh: Files_ pp. 2-68 through 2-69 */
+struct BTHdrRec {
+ hfs_word_t bthDepth; /* (V) The number of levels in this B-tree */
+ hfs_lword_t bthRoot; /* (V) The node number of the root node */
+ hfs_lword_t bthNRecs; /* (V) The number of leaf records */
+ hfs_lword_t bthFNode; /* (V) The number of the first leaf node */
+ hfs_lword_t bthLNode; /* (V) The number of the last leaf node */
+ hfs_word_t bthNodeSize; /* (F) The number of bytes in a node (=512) */
+ hfs_word_t bthKeyLen; /* (F) The length of a key in an index node */
+ hfs_lword_t bthNNodes; /* (V) The total number of nodes */
+ hfs_lword_t bthFree; /* (V) The number of unused nodes */
+ hfs_byte_t bthResv[76]; /* Reserved */
+};
+
+/*
+ * struct NodeDescriptor
+ *
+ * The B-tree node descriptor.
+ *
+ * This structure begins each node in the B-tree file. It contains
+ * important information about the node's contents. 'V' and 'F' in
+ * the comments indicate fields that are variable or fixed over the
+ * life of a node, where the 'life' of a node is defined as the period
+ * between leaving and reentering the free pool.
+ *
+ * Reference: _Inside Macintosh: Files_ p. 2-64
+ */
+struct NodeDescriptor {
+ hfs_lword_t ndFLink; /* (V) Number of the next node at this level */
+ hfs_lword_t ndBLink; /* (V) Number of the prev node at this level */
+ hfs_byte_t ndType; /* (F) The type of node */
+ hfs_byte_t ndNHeight; /* (F) The level of this node (leaves=1) */
+ hfs_word_t ndNRecs; /* (V) The number of records in this node */
+ hfs_word_t ndResv2; /* Reserved */
+};
+
+/*
+ * typedef hfs_cmpfn
+ *
+ * The type 'hfs_cmpfn' is a comparison function taking 2 keys and
+ * returning a positive, negative or zero integer according to the
+ * ordering of the two keys (just like strcmp() does for strings).
+ */
+typedef int (*hfs_cmpfn)(const void *, const void *);
+
+/*
+ * struct hfs_bnode
+ *
+ * An in-core B-tree node
+ *
+ * This structure holds information from the NodeDescriptor in native
+ * byte-order, a pointer to the buffer which contains the actual
+ * node and fields necessary for locking access to the node during
+ * updates. The use of the locking fields is explained with the
+ * locking functions.
+ */
+struct hfs_bnode {
+ int magic; /* Magic number to guard against
+ wild pointers */
+ hfs_buffer buf; /* The buffer containing the
+ actual node */
+ struct hfs_btree *tree; /* The tree to which this node
+ belongs */
+ struct hfs_bnode *prev; /* Next node in this hash bucket */
+ struct hfs_bnode *next; /* Previous node in this hash
+ bucket */
+ int sticky; /* Boolean: non-zero means keep
+ this node in-core (set for
+ root and head) */
+ hfs_u32 node; /* Node number */
+ /* locking related fields: */
+ hfs_wait_queue wqueue; /* Wait queue for write access */
+ hfs_wait_queue rqueue; /* Wait queue for read or reserve
+ access */
+ int count; /* Number of processes accessing
+ this node */
+ int resrv; /* Boolean, true means a process
+ had placed a 'reservation' on
+ this node */
+ int lock; /* Boolean, true means some
+ process has exclusive access,
+ so KEEP OUT */
+ /* fields from the NodeDescriptor in native byte-order: */
+ hfs_u32 ndFLink;
+ hfs_u32 ndBLink;
+ hfs_u16 ndNRecs;
+ hfs_u8 ndType;
+ hfs_u8 ndNHeight;
+};
+
+/*
+ * struct hfs_btree
+ *
+ * An in-core B-tree.
+ *
+ * This structure holds information from the BTHdrRec, MDB
+ * (superblock) and other information needed to work with the B-tree.
+ */
+struct hfs_btree {
+ int magic; /* Magic number to
+ guard against wild
+ pointers */
+ hfs_cmpfn compare; /* Comparison function
+ for this tree */
+ struct hfs_bnode head; /* in-core copy of node 0 */
+ struct hfs_bnode *root; /* Pointer to the in-core
+ copy of the root node */
+ hfs_sysmdb sys_mdb; /* The "device" holding
+ the filesystem */
+ int reserved; /* bnodes claimed but
+ not yet used */
+ struct hfs_bnode /* The bnode cache */
+ *cache[HFS_CACHELEN];
+ struct hfs_cat_entry entry; /* Fake catalog entry */
+ int lock;
+ hfs_wait_queue wait;
+ int dirt;
+ /* Fields from the BTHdrRec in native byte-order: */
+ hfs_u32 bthRoot;
+ hfs_u32 bthNRecs;
+ hfs_u32 bthFNode;
+ hfs_u32 bthLNode;
+ hfs_u32 bthNNodes;
+ hfs_u32 bthFree;
+ hfs_u16 bthKeyLen;
+ hfs_u16 bthDepth;
+};
+
+/*================ Global functions ================*/
+
+/* Convert a (struct hfs_bnode *) and an index to the value of the
+ n-th offset in the bnode (N >= 1) to the offset */
+extern inline hfs_u16 bnode_offset(const struct hfs_bnode *bnode, int n)
+{ return hfs_get_hs(RECTBL(bnode,n)); }
+
+/* Convert a (struct hfs_bnode *) and an index to the size of the
+ n-th record in the bnode (N >= 1) */
+extern inline hfs_u16 bnode_rsize(const struct hfs_bnode *bnode, int n)
+{ return bnode_offset(bnode, n+1) - bnode_offset(bnode, n); }
+
+/* Convert a (struct hfs_bnode *) to the offset of the empty part */
+extern inline hfs_u16 bnode_end(const struct hfs_bnode *bnode)
+{ return bnode_offset(bnode, bnode->ndNRecs + 1); }
+
+/* Convert a (struct hfs_bnode *) to the number of free bytes it contains */
+extern inline hfs_u16 bnode_freespace(const struct hfs_bnode *bnode)
+{ return HFS_SECTOR_SIZE - bnode_end(bnode)
+ - (bnode->ndNRecs + 1)*sizeof(hfs_u16); }
+
+/* Convert a (struct hfs_bnode *) X and an index N to
+ the address of the record N in the bnode (N >= 1) */
+extern inline void *bnode_datastart(const struct hfs_bnode *bnode)
+{ return (void *)(hfs_buffer_data(bnode->buf)+sizeof(struct NodeDescriptor)); }
+
+/* Convert a (struct hfs_bnode *) to the address of the empty part */
+extern inline void *bnode_dataend(const struct hfs_bnode *bnode)
+{ return (void *)(hfs_buffer_data(bnode->buf) + bnode_end(bnode)); }
+
+/* Convert various pointers to address of record's key */
+extern inline void *bnode_key(const struct hfs_bnode *bnode, int n)
+{ return (void *)(hfs_buffer_data(bnode->buf) + bnode_offset(bnode, n)); }
+extern inline void *belem_key(const struct hfs_belem *elem)
+{ return bnode_key(elem->bnr.bn, elem->record); }
+extern inline void *brec_key(const struct hfs_brec *brec)
+{ return belem_key(brec->bottom); }
+
+/* Convert various pointers to the address of a record */
+extern inline void *bkey_record(const struct hfs_bkey *key)
+{ return (void *)key + ROUND(key->KeyLen + 1); }
+extern inline void *bnode_record(const struct hfs_bnode *bnode, int n)
+{ return bkey_record(bnode_key(bnode, n)); }
+extern inline void *belem_record(const struct hfs_belem *elem)
+{ return bkey_record(belem_key(elem)); }
+extern inline void *brec_record(const struct hfs_brec *brec)
+{ return bkey_record(brec_key(brec)); }
+
+/*================ Function Prototypes ================*/
+
+/* balloc.c */
+extern int hfs_bnode_bitop(struct hfs_btree *, hfs_u32, int);
+extern struct hfs_bnode_ref hfs_bnode_alloc(struct hfs_btree *);
+extern int hfs_bnode_free(struct hfs_bnode_ref *);
+extern void hfs_btree_extend(struct hfs_btree *);
+
+/* bins_del.c */
+extern void hfs_bnode_update_key(struct hfs_brec *, struct hfs_belem *,
+ struct hfs_bnode *, int);
+extern void hfs_bnode_shift_right(struct hfs_bnode *, struct hfs_bnode *, int);
+extern void hfs_bnode_shift_left(struct hfs_bnode *, struct hfs_bnode *, int);
+extern int hfs_bnode_in_brec(hfs_u32 node, const struct hfs_brec *brec);
+
+/* bnode.c */
+extern void hfs_bnode_read(struct hfs_bnode *, struct hfs_btree *,
+ hfs_u32, int);
+extern void hfs_bnode_relse(struct hfs_bnode_ref *);
+extern struct hfs_bnode_ref hfs_bnode_find(struct hfs_btree *, hfs_u32, int);
+extern void hfs_bnode_lock(struct hfs_bnode_ref *, int);
+extern void hfs_bnode_delete(struct hfs_bnode *);
+extern void hfs_bnode_commit(struct hfs_bnode *);
+
+/* brec.c */
+extern void hfs_brec_lock(struct hfs_brec *, struct hfs_belem *);
+extern struct hfs_belem *hfs_brec_init(struct hfs_brec *, struct hfs_btree *,
+ int);
+extern struct hfs_belem *hfs_brec_next(struct hfs_brec *);
+
+#endif
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
new file mode 100644
index 000000000..a0bf3d576
--- /dev/null
+++ b/fs/hfs/inode.c
@@ -0,0 +1,427 @@
+/*
+ * linux/fs/hfs/inode.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains inode-related functions which do not depend on
+ * which scheme is being used to represent forks.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+/*================ Variable-like macros ================*/
+
+#define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO)
+
+/*================ File-local functions ================*/
+
+/*
+ * init_file_inode()
+ *
+ * Given an HFS catalog entry initialize an inode for a file.
+ */
+static void init_file_inode(struct inode *inode, hfs_u8 fork)
+{
+ struct hfs_fork *fk;
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+
+ if (!IS_NOEXEC(inode) && (fork == HFS_FK_DATA)) {
+ inode->i_mode = S_IRWXUGO | S_IFREG;
+ } else {
+ inode->i_mode = S_IRUGO | S_IWUGO | S_IFREG;
+ }
+
+ if (fork == HFS_FK_DATA) {
+ hfs_u32 type = hfs_get_nl(entry->info.file.finfo.fdType);
+
+ fk = &entry->u.file.data_fork;
+ HFS_I(inode)->convert =
+ ((HFS_SB(inode->i_sb)->s_conv == 't') ||
+ ((HFS_SB(inode->i_sb)->s_conv == 'a') &&
+ ((type == htonl(0x54455854)) || /* "TEXT" */
+ (type == htonl(0x7474726f))))); /* "ttro" */
+ } else {
+ fk = &entry->u.file.rsrc_fork;
+ HFS_I(inode)->convert = 0;
+ }
+ HFS_I(inode)->fork = fk;
+ inode->i_size = fk->lsize;
+ inode->i_blocks = fk->psize;
+ inode->i_nlink = 1;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_put_inode()
+ *
+ * This is the put_inode() entry in the super_operations for HFS
+ * filesystems. The purpose is to perform any filesystem-dependent
+ * cleanup necessary when the use-count of an inode falls to zero.
+ */
+void hfs_put_inode(struct inode * inode)
+{
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+
+ entry->sys_entry[HFS_ITYPE_TO_INT(HFS_ITYPE(inode->i_ino))] = NULL;
+ hfs_cat_put(entry);
+
+ if (inode->i_count == 1) {
+ struct hfs_hdr_layout *tmp = HFS_I(inode)->layout;
+ if (tmp) {
+ HFS_I(inode)->layout = NULL;
+ HFS_DELETE(tmp);
+ }
+ }
+}
+
+/*
+ * hfs_notify_change()
+ *
+ * Based very closely on fs/msdos/inode.c by Werner Almesberger
+ *
+ * This is the notify_change() field in the super_operations structure
+ * for HFS file systems. The purpose is to take that changes made to
+ * an inode and apply then in a filesystem-dependent manner. In this
+ * case the process has a few of tasks to do:
+ * 1) prevent changes to the i_uid and i_gid fields.
+ * 2) map file permissions to the closest allowable permissions
+ * 3) Since multiple Linux files can share the same on-disk inode under
+ * HFS (for instance the data and resource forks of a file) a change
+ * to permissions must be applied to all other in-core inodes which
+ * correspond to the same HFS file.
+ */
+int hfs_notify_change(struct dentry *dentry, struct iattr * attr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+ struct dentry **de = entry->sys_entry;
+ struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
+ int error, i;
+
+ error = inode_change_ok(inode, attr); /* basic permission checks */
+ if (error) {
+ /* Let netatalk's afpd think chmod() always succeeds */
+ if (hsb->s_afpd &&
+ (attr->ia_valid == (ATTR_MODE | ATTR_CTIME))) {
+ return 0;
+ } else {
+ return error;
+ }
+ }
+
+ /* no uig/gid changes and limit which mode bits can be set */
+ if (((attr->ia_valid & ATTR_UID) &&
+ (attr->ia_uid != hsb->s_uid)) ||
+ ((attr->ia_valid & ATTR_GID) &&
+ (attr->ia_gid != hsb->s_gid)) ||
+ ((attr->ia_valid & ATTR_MODE) &&
+ (((entry->type == HFS_CDR_DIR) &&
+ (attr->ia_mode != inode->i_mode))||
+ (attr->ia_mode & ~HFS_VALID_MODE_BITS)))) {
+ return hsb->s_quiet ? 0 : error;
+ }
+
+ if (entry->type == HFS_CDR_DIR) {
+ attr->ia_valid &= ~ATTR_MODE;
+ } else if (attr->ia_valid & ATTR_MODE) {
+ /* Only the 'w' bits can ever change and only all together. */
+ if (attr->ia_mode & S_IWUSR) {
+ attr->ia_mode = inode->i_mode | S_IWUGO;
+ } else {
+ attr->ia_mode = inode->i_mode & ~S_IWUGO;
+ }
+ attr->ia_mode &= ~hsb->s_umask;
+ }
+ inode_setattr(inode, attr);
+
+ /* We wouldn't want to mess with the sizes of the other fork */
+ attr->ia_valid &= ~ATTR_SIZE;
+
+ /* We must change all in-core inodes corresponding to this file. */
+ for (i = 0; i < 4; ++i) {
+ if (de[i] && (de[i] != dentry)) {
+ inode_setattr(de[i]->d_inode, attr);
+ }
+ }
+
+ /* Change the catalog entry if needed */
+ if (attr->ia_valid & ATTR_MTIME) {
+ entry->modify_date = hfs_u_to_mtime(inode->i_mtime);
+ hfs_cat_mark_dirty(entry);
+ }
+ if (attr->ia_valid & ATTR_MODE) {
+ hfs_u8 new_flags;
+
+ if (inode->i_mode & S_IWUSR) {
+ new_flags = entry->u.file.flags & ~HFS_FIL_LOCK;
+ } else {
+ new_flags = entry->u.file.flags | HFS_FIL_LOCK;
+ }
+
+ if (new_flags != entry->u.file.flags) {
+ entry->u.file.flags = new_flags;
+ hfs_cat_mark_dirty(entry);
+ }
+ }
+ /* size changes handled in hfs_extent_adj() */
+
+ return 0;
+}
+
+/*
+ * __hfs_iget()
+ *
+ * Given the MDB for a HFS filesystem, a 'key' and an 'entry' in
+ * the catalog B-tree and the 'type' of the desired file return the
+ * inode for that file/directory or NULL. Note that 'type' indicates
+ * whether we want the actual file or directory, or the corresponding
+ * metadata (AppleDouble header file or CAP metadata file).
+ *
+ * In an ideal world we could call iget() and would not need this
+ * function. However, since there is no way to even know the inode
+ * number until we've found the file/directory in the catalog B-tree
+ * that simply won't happen.
+ *
+ * The main idea here is to look in the catalog B-tree to get the
+ * vital info about the file or directory (including the file id which
+ * becomes the inode number) and then to call iget() and return the
+ * inode if it is complete. If it is not then we use the catalog
+ * entry to fill in the missing info, by calling the appropriate
+ * 'fillin' function. Note that these fillin functions are
+ * essentially hfs_*_read_inode() functions, but since there is no way
+ * to pass the catalog entry through iget() to such a read_inode()
+ * function, we have to call them after iget() returns an incomplete
+ * inode to us. This is pretty much the same problem faced in the NFS
+ * code, and pretty much the same solution. The SMB filesystem deals
+ * with this in a different way: by using the address of the
+ * kmalloc()'d space which holds the data as the inode number.
+ *
+ * XXX: Both this function and NFS's corresponding nfs_fhget() would
+ * benefit from a way to pass an additional (void *) through iget() to
+ * the VFS read_inode() function.
+ *
+ * hfs_iget no longer touches hfs_cat_entries.
+ */
+struct inode *hfs_iget(struct hfs_cat_entry *entry, ino_t type,
+ struct dentry *dentry)
+{
+ struct dentry **sys_entry;
+ struct super_block *sb;
+ struct inode *inode;
+
+ if (!entry) {
+ return NULL;
+ }
+
+ /* If there are several processes all calling __iget() for
+ the same inode then they will all get the same one back.
+ The first one to return from __iget() will notice that the
+ i_mode field of the inode is blank and KNOW that it is
+ the first to return. Therefore, it will set the appropriate
+ 'sys_entry' field in the entry and initialize the inode.
+ All the initialization must be done without sleeping,
+ or else other processes could end up using a partially
+ initialized inode. */
+
+ sb = entry->mdb->sys_mdb;
+ sys_entry = &entry->sys_entry[HFS_ITYPE_TO_INT(type)];
+
+ if (*sys_entry && (inode = (*sys_entry)->d_inode)) {
+ /* There is an existing inode for this file/dir. Use it. */
+ ++inode->i_count;
+ return inode;
+ }
+
+ if (!(inode = iget(sb, ntohl(entry->cnid) | type)))
+ return NULL;
+
+ if (inode->i_dev != sb->s_dev) {
+ iput(inode);
+ inode = NULL;
+ } else if (inode->i_mode) {
+ /* The inode has been initialized by another process.
+ Note that if hfs_put_inode() is sleeping in hfs_cat_put()
+ then we still need to attach it to the entry. */
+ if (!(*sys_entry))
+ *sys_entry = dentry; /* cache dentry */
+ } else {
+ /* Initialize the inode */
+ struct hfs_sb_info *hsb = HFS_SB(sb);
+
+ inode->i_rdev = 0;
+ inode->i_ctime = inode->i_atime = inode->i_mtime =
+ hfs_m_to_utime(entry->modify_date);
+ inode->i_blksize = HFS_SECTOR_SIZE;
+ inode->i_uid = hsb->s_uid;
+ inode->i_gid = hsb->s_gid;
+
+ memset(HFS_I(inode), 0, sizeof(struct hfs_inode_info));
+ HFS_I(inode)->magic = HFS_INO_MAGIC;
+ HFS_I(inode)->entry = entry;
+
+ hsb->s_ifill(inode, type);
+ if (!hsb->s_afpd && (entry->type == HFS_CDR_FIL) &&
+ (entry->u.file.flags & HFS_FIL_LOCK)) {
+ inode->i_mode &= ~S_IWUGO;
+ }
+ inode->i_mode &= ~hsb->s_umask;
+
+ if (!inode->i_mode) {
+ clear_inode(inode);
+ inode = NULL;
+ }
+
+ *sys_entry = dentry; /* cache dentry */
+ }
+
+ return inode;
+}
+
+/*================ Scheme-specific functions ================*/
+
+/*
+ * hfs_cap_ifill()
+ *
+ * This function serves the same purpose as a read_inode() function does
+ * in other filesystems. It is called by __hfs_iget() to fill in
+ * the missing fields of an uninitialized inode under the CAP scheme.
+ */
+void hfs_cap_ifill(struct inode * inode, ino_t type)
+{
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+
+ HFS_I(inode)->d_drop_op = hfs_cap_drop_dentry;
+ if (type == HFS_CAP_FNDR) {
+ inode->i_size = sizeof(struct hfs_cap_info);
+ inode->i_blocks = 0;
+ inode->i_nlink = 1;
+ inode->i_mode = S_IRUGO | S_IWUGO | S_IFREG;
+ inode->i_op = &hfs_cap_info_inode_operations;
+ } else if (entry->type == HFS_CDR_FIL) {
+ init_file_inode(inode, (type == HFS_CAP_DATA) ?
+ HFS_FK_DATA : HFS_FK_RSRC);
+ inode->i_op = &hfs_file_inode_operations;
+ } else { /* Directory */
+ struct hfs_dir *hdir = &entry->u.dir;
+
+ inode->i_blocks = 0;
+ inode->i_size = hdir->files + hdir->dirs + 5;
+ HFS_I(inode)->dir_size = 1;
+ if (type == HFS_CAP_NDIR) {
+ inode->i_mode = S_IRWXUGO | S_IFDIR;
+ inode->i_nlink = hdir->dirs + 4;
+ inode->i_op = &hfs_cap_ndir_inode_operations;
+ HFS_I(inode)->file_type = HFS_CAP_NORM;
+ } else if (type == HFS_CAP_FDIR) {
+ inode->i_mode = S_IRUGO | S_IXUGO | S_IFDIR;
+ inode->i_nlink = 2;
+ inode->i_op = &hfs_cap_fdir_inode_operations;
+ HFS_I(inode)->file_type = HFS_CAP_FNDR;
+ } else if (type == HFS_CAP_RDIR) {
+ inode->i_mode = S_IRUGO | S_IXUGO | S_IFDIR;
+ inode->i_nlink = 2;
+ inode->i_op = &hfs_cap_rdir_inode_operations;
+ HFS_I(inode)->file_type = HFS_CAP_RSRC;
+ }
+ }
+}
+
+/*
+ * hfs_dbl_ifill()
+ *
+ * This function serves the same purpose as a read_inode() function does
+ * in other filesystems. It is called by __hfs_iget() to fill in
+ * the missing fields of an uninitialized inode under the AppleDouble
+ * scheme.
+ */
+void hfs_dbl_ifill(struct inode * inode, ino_t type)
+{
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+
+ HFS_I(inode)->d_drop_op = hfs_dbl_drop_dentry;
+ if (type == HFS_DBL_HDR) {
+ if (entry->type == HFS_CDR_FIL) {
+ init_file_inode(inode, HFS_FK_RSRC);
+ inode->i_size += HFS_DBL_HDR_LEN;
+ HFS_I(inode)->default_layout = &hfs_dbl_fil_hdr_layout;
+ } else {
+ inode->i_size = HFS_DBL_HDR_LEN;
+ inode->i_mode = S_IRUGO | S_IWUGO | S_IFREG;
+ inode->i_nlink = 1;
+ HFS_I(inode)->default_layout = &hfs_dbl_dir_hdr_layout;
+ }
+ inode->i_op = &hfs_hdr_inode_operations;
+ } else if (entry->type == HFS_CDR_FIL) {
+ init_file_inode(inode, HFS_FK_DATA);
+ inode->i_op = &hfs_file_inode_operations;
+ } else { /* Directory */
+ struct hfs_dir *hdir = &entry->u.dir;
+
+ inode->i_blocks = 0;
+ inode->i_nlink = hdir->dirs + 2;
+ inode->i_size = 3 + 2 * (hdir->dirs + hdir->files);
+ inode->i_mode = S_IRWXUGO | S_IFDIR;
+ inode->i_op = &hfs_dbl_dir_inode_operations;
+ HFS_I(inode)->file_type = HFS_DBL_NORM;
+ HFS_I(inode)->dir_size = 2;
+ }
+}
+
+/*
+ * hfs_nat_ifill()
+ *
+ * This function serves the same purpose as a read_inode() function does
+ * in other filesystems. It is called by __hfs_iget() to fill in
+ * the missing fields of an uninitialized inode under the Netatalk
+ * scheme.
+ */
+void hfs_nat_ifill(struct inode * inode, ino_t type)
+{
+ struct hfs_cat_entry *entry = HFS_I(inode)->entry;
+
+ HFS_I(inode)->d_drop_op = hfs_nat_drop_dentry;
+ if (type == HFS_NAT_HDR) {
+ if (entry->type == HFS_CDR_FIL) {
+ init_file_inode(inode, HFS_FK_RSRC);
+ inode->i_size += HFS_NAT_HDR_LEN;
+ } else {
+ inode->i_size = HFS_NAT_HDR_LEN;
+ inode->i_mode = S_IRUGO | S_IWUGO | S_IFREG;
+ inode->i_nlink = 1;
+ }
+ inode->i_op = &hfs_hdr_inode_operations;
+ HFS_I(inode)->default_layout = &hfs_nat_hdr_layout;
+ } else if (entry->type == HFS_CDR_FIL) {
+ init_file_inode(inode, HFS_FK_DATA);
+ inode->i_op = &hfs_file_inode_operations;
+ } else { /* Directory */
+ struct hfs_dir *hdir = &entry->u.dir;
+
+ inode->i_blocks = 0;
+ inode->i_size = hdir->files + hdir->dirs + 3;
+ inode->i_mode = S_IRWXUGO | S_IFDIR;
+ HFS_I(inode)->dir_size = 1;
+ if (type == HFS_NAT_NDIR) {
+ inode->i_nlink = hdir->dirs + 3;
+ inode->i_op = &hfs_nat_ndir_inode_operations;
+ HFS_I(inode)->file_type = HFS_NAT_NORM;
+ } else if (type == HFS_NAT_HDIR) {
+ inode->i_nlink = 2;
+ inode->i_op = &hfs_nat_hdir_inode_operations;
+ HFS_I(inode)->file_type = HFS_NAT_HDR;
+ }
+ }
+}
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
new file mode 100644
index 000000000..45ad05022
--- /dev/null
+++ b/fs/hfs/mdb.c
@@ -0,0 +1,298 @@
+/*
+ * linux/fs/hfs/mdb.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains functions for reading/writing the MDB.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ *
+ * The code in this file initializes some structures which contain
+ * pointers by calling memset(&foo, 0, sizeof(foo)).
+ * This produces the desired behavior only due to the non-ANSI
+ * assumption that the machine representation of NULL is all zeros.
+ */
+
+#include "hfs.h"
+
+/*================ File-local data types ================*/
+
+/*
+ * The HFS Master Directory Block (MDB).
+ *
+ * Also known as the Volume Information Block (VIB), this structure is
+ * the HFS equivalent of a superblock.
+ *
+ * Reference: _Inside Macintosh: Files_ pages 2-59 through 2-62
+ */
+struct raw_mdb {
+ hfs_word_t drSigWord; /* Signature word indicating fs type */
+ hfs_lword_t drCrDate; /* fs creation date/time */
+ hfs_lword_t drLsMod; /* fs modification date/time */
+ hfs_word_t drAtrb; /* fs attributes */
+ hfs_word_t drNmFls; /* number of files in root directory */
+ hfs_word_t drVBMSt; /* location (in 512-byte blocks)
+ of the volume bitmap */
+ hfs_word_t drAllocPtr; /* location (in allocation blocks)
+ to begin next allocation search */
+ hfs_word_t drNmAlBlks; /* number of allocation blocks */
+ hfs_lword_t drAlBlkSiz; /* bytes in an allocation block */
+ hfs_lword_t drClpSiz; /* clumpsize, the number of bytes to
+ allocate when extending a file */
+ hfs_word_t drAlBlSt; /* location (in 512-byte blocks)
+ of the first allocation block */
+ hfs_lword_t drNxtCNID; /* CNID to assign to the next
+ file or directory created */
+ hfs_word_t drFreeBks; /* number of free allocation blocks */
+ hfs_byte_t drVN[28]; /* the volume label */
+ hfs_lword_t drVolBkUp; /* fs backup date/time */
+ hfs_word_t drVSeqNum; /* backup sequence number */
+ hfs_lword_t drWrCnt; /* fs write count */
+ hfs_lword_t drXTClpSiz; /* clumpsize for the extents B-tree */
+ hfs_lword_t drCTClpSiz; /* clumpsize for the catalog B-tree */
+ hfs_word_t drNmRtDirs; /* number of directories in
+ the root directory */
+ hfs_lword_t drFilCnt; /* number of files in the fs */
+ hfs_lword_t drDirCnt; /* number of directories in the fs */
+ hfs_byte_t drFndrInfo[32]; /* data used by the Finder */
+ hfs_word_t drVCSize; /* MacOS caching parameter */
+ hfs_word_t drVCBMSize; /* MacOS caching parameter */
+ hfs_word_t drCtlCSize; /* MacOS caching parameter */
+ hfs_lword_t drXTFlSize; /* bytes in the extents B-tree */
+ hfs_byte_t drXTExtRec[12]; /* extents B-tree's first 3 extents */
+ hfs_lword_t drCTFlSize; /* bytes in the catalog B-tree */
+ hfs_byte_t drCTExtRec[12]; /* catalog B-tree's first 3 extents */
+};
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_mdb_get()
+ *
+ * Build the in-core MDB for a filesystem, including
+ * the B-trees and the volume bitmap.
+ */
+struct hfs_mdb *hfs_mdb_get(hfs_sysmdb sys_mdb, int readonly,
+ hfs_s32 part_start)
+{
+ struct hfs_mdb *mdb;
+ hfs_buffer buf;
+ struct raw_mdb *raw;
+ unsigned int bs, block;
+ int lcv, limit;
+ hfs_buffer *bmbuf;
+
+ if (!HFS_NEW(mdb)) {
+ hfs_warn("hfs_fs: out of memory\n");
+ return NULL;
+ }
+
+ memset(mdb, 0, sizeof(*mdb));
+ mdb->magic = HFS_MDB_MAGIC;
+ mdb->sys_mdb = sys_mdb;
+
+ /* See if this is an HFS filesystem */
+ buf = hfs_buffer_get(sys_mdb, part_start + HFS_MDB_BLK, 1);
+ if (!hfs_buffer_ok(buf)) {
+ hfs_warn("hfs_fs: Unable to read superblock\n");
+ goto bail2;
+ }
+ raw = (struct raw_mdb *)hfs_buffer_data(buf);
+ if (hfs_get_ns(raw->drSigWord) != htons(HFS_SUPER_MAGIC)) {
+ hfs_buffer_put(buf);
+ goto bail2;
+ }
+ mdb->buf = buf;
+
+ bs = hfs_get_hl(raw->drAlBlkSiz);
+ if (!bs || bs > HFS_USHRT_MAX || (bs & (HFS_SECTOR_SIZE-1))) {
+ hfs_warn("hfs_fs: bad allocation block size %d != 512\n", bs);
+ goto bail1;
+ }
+ mdb->alloc_blksz = bs >> HFS_SECTOR_SIZE_BITS;
+
+ /* These parameters are read from the MDB, and never written */
+ mdb->create_date = hfs_get_hl(raw->drCrDate);
+ mdb->fs_ablocks = hfs_get_hs(raw->drNmAlBlks);
+ mdb->fs_start = hfs_get_hs(raw->drAlBlSt) + part_start;
+ mdb->backup_date = hfs_get_hl(raw->drVolBkUp);
+ mdb->clumpablks = (hfs_get_hl(raw->drClpSiz) / mdb->alloc_blksz)
+ >> HFS_SECTOR_SIZE_BITS;
+ memcpy(mdb->vname, raw->drVN, 28);
+
+ /* These parameters are read from and written to the MDB */
+ mdb->modify_date = hfs_get_nl(raw->drLsMod);
+ mdb->attrib = hfs_get_ns(raw->drAtrb);
+ mdb->free_ablocks = hfs_get_hs(raw->drFreeBks);
+ mdb->next_id = hfs_get_hl(raw->drNxtCNID);
+ mdb->write_count = hfs_get_hl(raw->drWrCnt);
+ mdb->root_files = hfs_get_hs(raw->drNmFls);
+ mdb->root_dirs = hfs_get_hs(raw->drNmRtDirs);
+ mdb->file_count = hfs_get_hl(raw->drFilCnt);
+ mdb->dir_count = hfs_get_hl(raw->drDirCnt);
+
+ /* TRY to get the alternate (backup) MDB */
+ lcv = mdb->fs_start + mdb->fs_ablocks * mdb->alloc_blksz;
+ limit = lcv + mdb->alloc_blksz;
+ for (; lcv < limit; ++lcv) {
+ buf = hfs_buffer_get(sys_mdb, lcv, 1);
+ if (hfs_buffer_ok(buf)) {
+ struct raw_mdb *tmp =
+ (struct raw_mdb *)hfs_buffer_data(buf);
+
+ if (hfs_get_ns(tmp->drSigWord) ==
+ htons(HFS_SUPER_MAGIC)) {
+ mdb->alt_buf = buf;
+ break;
+ }
+ hfs_buffer_put(buf);
+ }
+ }
+ if (mdb->alt_buf == NULL) {
+ hfs_warn("hfs_fs: unable to locate alternate MDB\n");
+ hfs_warn("hfs_fs: continuing without an alternate MDB\n");
+ }
+
+ /* read in the bitmap */
+ block = hfs_get_hs(raw->drVBMSt) + part_start;
+ bmbuf = mdb->bitmap;
+ lcv = (mdb->fs_ablocks + 4095) / 4096;
+ for ( ; lcv; --lcv, ++bmbuf, ++block) {
+ if (!hfs_buffer_ok(*bmbuf =
+ hfs_buffer_get(sys_mdb, block, 1))) {
+ hfs_warn("hfs_fs: unable to read volume bitmap\n");
+ goto bail1;
+ }
+ }
+
+ if (!(mdb->ext_tree = hfs_btree_init(mdb, htonl(HFS_EXT_CNID),
+ raw->drXTExtRec,
+ hfs_get_hl(raw->drXTFlSize),
+ hfs_get_hl(raw->drXTClpSiz))) ||
+ !(mdb->cat_tree = hfs_btree_init(mdb, htonl(HFS_CAT_CNID),
+ raw->drCTExtRec,
+ hfs_get_hl(raw->drCTFlSize),
+ hfs_get_hl(raw->drCTClpSiz)))) {
+ hfs_warn("hfs_fs: unable to initialize data structures\n");
+ goto bail1;
+ }
+
+ if (!(mdb->attrib & htons(HFS_SB_ATTRIB_CLEAN))) {
+ hfs_warn("hfs_fs: WARNING: mounting unclean filesystem.\n");
+ } else if (!readonly) {
+ /* Mark the volume uncleanly unmounted in case we crash */
+ hfs_put_ns(mdb->attrib & htons(~HFS_SB_ATTRIB_CLEAN),
+ raw->drAtrb);
+ hfs_buffer_dirty(mdb->buf);
+ hfs_buffer_sync(mdb->buf);
+ }
+
+ return mdb;
+
+bail1:
+ hfs_mdb_put(mdb, readonly);
+bail2:
+ return NULL;
+}
+
+/*
+ * hfs_mdb_commit()
+ *
+ * Description:
+ * This updates the MDB on disk (look also at hfs_write_super()).
+ * It does not check, if the superblock has been modified, or
+ * if the filesystem has been mounted read-only. It is mainly
+ * called by hfs_write_super() and hfs_btree_extend().
+ * Input Variable(s):
+ * struct hfs_mdb *mdb: Pointer to the hfs MDB
+ * int backup;
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'mdb' points to a "valid" (struct hfs_mdb).
+ * Postconditions:
+ * The HFS MDB and on disk will be updated, by copying the possibly
+ * modified fields from the in memory MDB (in native byte order) to
+ * the disk block buffer.
+ * If 'backup' is non-zero then the alternate MDB is also written
+ * and the function doesn't return until it is actually on disk.
+ */
+void hfs_mdb_commit(struct hfs_mdb *mdb, int backup)
+{
+ struct raw_mdb *raw = (struct raw_mdb *)hfs_buffer_data(mdb->buf);
+
+ /* Commit catalog entries to buffers */
+ hfs_cat_commit(mdb);
+
+ /* Commit B-tree data to buffers */
+ hfs_btree_commit(mdb->cat_tree, raw->drCTExtRec, raw->drCTFlSize);
+ hfs_btree_commit(mdb->ext_tree, raw->drXTExtRec, raw->drXTFlSize);
+
+ /* Update write_count and modify_date */
+ ++mdb->write_count;
+ mdb->modify_date = hfs_time();
+
+ /* These parameters may have been modified, so write them back */
+ hfs_put_nl(mdb->modify_date, raw->drLsMod);
+ hfs_put_hs(mdb->free_ablocks, raw->drFreeBks);
+ hfs_put_hl(mdb->next_id, raw->drNxtCNID);
+ hfs_put_hl(mdb->write_count, raw->drWrCnt);
+ hfs_put_hs(mdb->root_files, raw->drNmFls);
+ hfs_put_hs(mdb->root_dirs, raw->drNmRtDirs);
+ hfs_put_hl(mdb->file_count, raw->drFilCnt);
+ hfs_put_hl(mdb->dir_count, raw->drDirCnt);
+
+ /* write MDB to disk */
+ hfs_buffer_dirty(mdb->buf);
+
+ /* write the backup MDB, not returning until it is written */
+ if (backup && hfs_buffer_ok(mdb->alt_buf)) {
+ memcpy(hfs_buffer_data(mdb->alt_buf),
+ hfs_buffer_data(mdb->buf), HFS_SECTOR_SIZE);
+ hfs_buffer_dirty(mdb->alt_buf);
+ hfs_buffer_sync(mdb->alt_buf);
+ }
+}
+
+/*
+ * hfs_mdb_put()
+ *
+ * Release the resources associated with the in-core MDB.
+ */
+void hfs_mdb_put(struct hfs_mdb *mdb, int readonly) {
+ int lcv;
+
+ /* invalidate cached catalog entries */
+ hfs_cat_invalidate(mdb);
+
+ /* free the B-trees */
+ hfs_btree_free(mdb->ext_tree);
+ hfs_btree_free(mdb->cat_tree);
+
+ /* free the volume bitmap */
+ for (lcv = 0; lcv < HFS_BM_MAXBLOCKS; ++lcv) {
+ hfs_buffer_put(mdb->bitmap[lcv]);
+ }
+
+ /* update volume attributes */
+ if (!readonly) {
+ struct raw_mdb *raw =
+ (struct raw_mdb *)hfs_buffer_data(mdb->buf);
+ hfs_put_ns(mdb->attrib, raw->drAtrb);
+ hfs_buffer_dirty(mdb->buf);
+ }
+
+ /* free the buffers holding the primary and alternate MDBs */
+ hfs_buffer_put(mdb->buf);
+ hfs_buffer_put(mdb->alt_buf);
+
+ /* free the MDB */
+ HFS_DELETE(mdb);
+}
diff --git a/fs/hfs/part_tbl.c b/fs/hfs/part_tbl.c
new file mode 100644
index 000000000..12922c6d7
--- /dev/null
+++ b/fs/hfs/part_tbl.c
@@ -0,0 +1,244 @@
+/*
+ * linux/fs/hfs/part_tbl.c
+ *
+ * Copyright (C) 1996-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * Original code to handle the new style Mac partition table based on
+ * a patch contributed by Holger Schemel (aeglos@valinor.owl.de).
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ *
+ * The code in this file initializes some structures which contain
+ * pointers by calling memset(&foo, 0, sizeof(foo)).
+ * This produces the desired behavior only due to the non-ANSI
+ * assumption that the machine representation of NULL is all zeros.
+ */
+
+#include "hfs.h"
+
+/*================ File-local data types ================*/
+
+/*
+ * The Macintosh Driver Descriptor Block
+ *
+ * On partitioned Macintosh media this is block 0.
+ * We really only need the "magic number" to check for partitioned media.
+ */
+struct hfs_drvr_desc {
+ hfs_word_t ddSig; /* The signature word */
+ /* a bunch more stuff we don't need */
+};
+
+/*
+ * The new style Mac partition map
+ *
+ * For each partition on the media there is a physical block (512-byte
+ * block) containing one of these structures. These blocks are
+ * contiguous starting at block 1.
+ */
+struct new_pmap {
+ hfs_word_t pmSig; /* Signature bytes to verify
+ that this is a partition
+ map block */
+ hfs_word_t reSigPad; /* padding */
+ hfs_lword_t pmMapBlkCnt; /* (At least in block 1) this
+ is the number of partition
+ map blocks */
+ hfs_lword_t pmPyPartStart; /* The physical block number
+ of the first block in this
+ partition */
+ hfs_lword_t pmPartBlkCnt; /* The number of physical
+ blocks in this partition */
+ hfs_byte_t pmPartName[32]; /* (null terminated?) string
+ giving the name of this
+ partition */
+ hfs_byte_t pmPartType[32]; /* (null terminated?) string
+ giving the type of this
+ partition */
+ /* a bunch more stuff we don't need */
+};
+
+/*
+ * The old style Mac partition map
+ *
+ * The partition map consists for a 2-byte signature followed by an
+ * array of these structures. The map is terminated with an all-zero
+ * one of these.
+ */
+struct old_pmap {
+ hfs_word_t pdSig; /* Signature bytes */
+ struct old_pmap_entry {
+ hfs_lword_t pdStart;
+ hfs_lword_t pdSize;
+ hfs_lword_t pdFSID;
+ } pdEntry[42];
+};
+
+/*================ File-local functions ================*/
+
+/*
+ * parse_new_part_table()
+ *
+ * Parse a new style partition map looking for the
+ * start and length of the 'part'th HFS partition.
+ */
+static int parse_new_part_table(hfs_sysmdb sys_mdb, hfs_buffer buf,
+ int part, hfs_s32 *size, hfs_s32 *start)
+{
+ struct new_pmap *pm = (struct new_pmap *)hfs_buffer_data(buf);
+ hfs_u32 pmap_entries = hfs_get_hl(pm->pmMapBlkCnt);
+ int hfs_part = 0;
+ int entry;
+
+ for (entry = 0; (entry < pmap_entries) && !(*start); ++entry) {
+ if (entry) {
+ /* read the next partition map entry */
+ buf = hfs_buffer_get(sys_mdb, HFS_PMAP_BLK + entry, 1);
+ if (!hfs_buffer_ok(buf)) {
+ hfs_warn("hfs_fs: unable to "
+ "read partition map.\n");
+ goto bail;
+ }
+ pm = (struct new_pmap *)hfs_buffer_data(buf);
+ if (hfs_get_ns(pm->pmSig) !=
+ htons(HFS_NEW_PMAP_MAGIC)) {
+ hfs_warn("hfs_fs: invalid "
+ "entry in partition map\n");
+ hfs_buffer_put(buf);
+ goto bail;
+ }
+ }
+
+ /* look for an HFS partition */
+ if (!memcmp(pm->pmPartType,"Apple_HFS",9) &&
+ ((hfs_part++) == part)) {
+ /* Found it! */
+ *start = hfs_get_hl(pm->pmPyPartStart);
+ *size = hfs_get_hl(pm->pmPartBlkCnt);
+ }
+
+ hfs_buffer_put(buf);
+ }
+
+ return 0;
+
+bail:
+ return 1;
+}
+
+/*
+ * parse_old_part_table()
+ *
+ * Parse a old style partition map looking for the
+ * start and length of the 'part'th HFS partition.
+ */
+static int parse_old_part_table(hfs_sysmdb sys_mdb, hfs_buffer buf,
+ int part, hfs_s32 *size, hfs_s32 *start)
+{
+ struct old_pmap *pm = (struct old_pmap *)hfs_buffer_data(buf);
+ struct old_pmap_entry *p = &pm->pdEntry[0];
+ int hfs_part = 0;
+
+ while ((p->pdStart || p->pdSize || p->pdFSID) && !(*start)) {
+ /* look for an HFS partition */
+ if ((hfs_get_nl(p->pdFSID) == htonl(0x54465331)/*"TFS1"*/) &&
+ ((hfs_part++) == part)) {
+ /* Found it! */
+ *start = hfs_get_hl(p->pdStart);
+ *size = hfs_get_hl(p->pdSize);
+ }
+ ++p;
+ }
+ hfs_buffer_put(buf);
+
+ return 0;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_part_find()
+ *
+ * Parse the partition map looking for the
+ * start and length of the 'part'th HFS partition.
+ */
+int hfs_part_find(hfs_sysmdb sys_mdb, int part, int silent,
+ hfs_s32 *size, hfs_s32 *start)
+{
+ hfs_buffer buf;
+ hfs_u16 sig;
+ int dd_found = 0;
+ int retval = 1;
+
+ /* Read block 0 to see if this media is partitioned */
+ buf = hfs_buffer_get(sys_mdb, HFS_DD_BLK, 1);
+ if (!hfs_buffer_ok(buf)) {
+ hfs_warn("hfs_fs: Unable to read block 0.\n");
+ goto done;
+ }
+ sig = hfs_get_ns(((struct hfs_drvr_desc *)hfs_buffer_data(buf))->ddSig);
+ hfs_buffer_put(buf);
+
+ if (sig == htons(HFS_DRVR_DESC_MAGIC)) {
+ /* We are definitely on partitioned media. */
+ dd_found = 1;
+ }
+
+ buf = hfs_buffer_get(sys_mdb, HFS_PMAP_BLK, 1);
+ if (!hfs_buffer_ok(buf)) {
+ hfs_warn("hfs_fs: Unable to read block 1.\n");
+ goto done;
+ }
+
+ *size = *start = 0;
+
+ switch (hfs_get_ns(hfs_buffer_data(buf))) {
+ case __constant_htons(HFS_OLD_PMAP_MAGIC):
+ retval = parse_old_part_table(sys_mdb, buf, part, size, start);
+ break;
+
+ case __constant_htons(HFS_NEW_PMAP_MAGIC):
+ retval = parse_new_part_table(sys_mdb, buf, part, size, start);
+ break;
+
+ default:
+ if (dd_found) {
+ /* The media claimed to have a partition map */
+ if (!silent) {
+ hfs_warn("hfs_fs: This disk has an "
+ "unrecognized partition map type.\n");
+ }
+ } else {
+ /* Conclude that the media is not partitioned */
+ retval = 0;
+ }
+ goto done;
+ }
+
+ if (!retval) {
+ if (*start == 0) {
+ if (part) {
+ hfs_warn("hfs_fs: unable to locate "
+ "HFS partition number %d.\n", part);
+ } else {
+ hfs_warn("hfs_fs: unable to locate any "
+ "HFS partitions.\n");
+ }
+ retval = 1;
+ } else if (*size < 0) {
+ hfs_warn("hfs_fs: Partition size > 1 Terabyte.\n");
+ retval = 1;
+ } else if (*start < 0) {
+ hfs_warn("hfs_fs: Partition begins beyond 1 "
+ "Terabyte.\n");
+ retval = 1;
+ }
+ }
+done:
+ return retval;
+}
diff --git a/fs/hfs/string.c b/fs/hfs/string.c
new file mode 100644
index 000000000..cacc0a604
--- /dev/null
+++ b/fs/hfs/string.c
@@ -0,0 +1,152 @@
+/*
+ * linux/fs/hfs/string.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the string comparison function for the
+ * Macintosh character set.
+ *
+ * The code in this file is derived from code which is copyright
+ * 1986, 1989, 1990 by Abacus Research and Development, Inc. (ARDI)
+ * It is used here by the permission of ARDI's president Cliff Matthews.
+ *
+ * If you discover bugs in this code please notify both the author of the
+ * Linux HFS file system: hargrove@sccm.stanford.edu (Paul H. Hargrove)
+ * and the author of ARDI's HFS code: ctm@ardi.com (Clifford T. Matthews)
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ */
+
+#include "hfs.h"
+
+/*================ File-local variables ================*/
+
+/*
+ * unsigned char caseorder[]
+ *
+ * Defines the lexical ordering of characters on the Macintosh
+ *
+ * Composition of the 'casefold' and 'order' tables from ARDI's code
+ * with the entry for 0x20 changed to match that for 0xCA to remove
+ * special case for those two characters.
+ */
+static unsigned char caseorder[256] = {
+0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F,
+0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F,
+0x20,0x22,0x23,0x28,0x29,0x2A,0x2B,0x2C,0x2F,0x30,0x31,0x32,0x33,0x34,0x35,0x36,
+0x37,0x38,0x39,0x3A,0x3B,0x3C,0x3D,0x3E,0x3F,0x40,0x41,0x42,0x43,0x44,0x45,0x46,
+0x47,0x48,0x57,0x59,0x5D,0x5F,0x66,0x68,0x6A,0x6C,0x72,0x74,0x76,0x78,0x7A,0x7E,
+0x8C,0x8E,0x90,0x92,0x95,0x97,0x9E,0xA0,0xA2,0xA4,0xA7,0xA9,0xAA,0xAB,0xAC,0xAD,
+0x4E,0x48,0x57,0x59,0x5D,0x5F,0x66,0x68,0x6A,0x6C,0x72,0x74,0x76,0x78,0x7A,0x7E,
+0x8C,0x8E,0x90,0x92,0x95,0x97,0x9E,0xA0,0xA2,0xA4,0xA7,0xAF,0xB0,0xB1,0xB2,0xB3,
+0x4A,0x4C,0x5A,0x60,0x7B,0x7F,0x98,0x4F,0x49,0x51,0x4A,0x4B,0x4C,0x5A,0x60,0x63,
+0x64,0x65,0x6E,0x6F,0x70,0x71,0x7B,0x84,0x85,0x86,0x7F,0x80,0x9A,0x9B,0x9C,0x98,
+0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0x94,0xBB,0xBC,0xBD,0xBE,0xBF,0xC0,0x4D,0x81,
+0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0x55,0x8A,0xCC,0x4D,0x81,
+0xCD,0xCE,0xCF,0xD0,0xD1,0xD2,0xD3,0x26,0x27,0xD4,0x20,0x49,0x4B,0x80,0x82,0x82,
+0xD5,0xD6,0x24,0x25,0x2D,0x2E,0xD7,0xD8,0xA6,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF,
+0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF,
+0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF
+};
+
+/*
+ * unsigned char casefold[]
+ *
+ * Defines the mapping to lowercase characters on the Macintosh
+ *
+ * "Inverse" of the 'casefold' from ARDI's code.
+ */
+static unsigned char casefold[256] = {
+0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F,
+0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F,
+0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,0x29,0x2A,0x2B,0x2C,0x2D,0x2E,0x2F,
+0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x3A,0x3B,0x3C,0x3D,0x3E,0x3F,
+0x40,0x61,0x62,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x6B,0x6C,0x6D,0x6E,0x6F,
+0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x5B,0x5C,0x5D,0x5E,0x5F,
+0x41,0x61,0x62,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x6B,0x6C,0x6D,0x6E,0x6F,
+0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x7B,0x7C,0x7D,0x7E,0x7F,
+0x8A,0x8C,0x8D,0x8E,0x96,0x9A,0x9F,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F,
+0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F,
+0xA0,0xA1,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xBE,0xBF,
+0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF,
+0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0x88,0x8B,0x9B,0xCF,0xCF,
+0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF,
+0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF,
+0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF
+};
+
+/*================ Global functions ================*/
+
+/*
+ * Hash a string to an integer in a case-independent way
+ */
+unsigned int hfs_strhash(const struct hfs_name *cname)
+{
+ /* Currently just sum of the 'order' of first and last characters */
+ return ((unsigned int)caseorder[cname->Name[0]] +
+ (unsigned int)caseorder[cname->Name[cname->Len - 1]]);
+}
+
+/*
+ * Compare two strings in the HFS filename character ordering
+ * Returns positive, negative, or zero, not just 0 or (+/-)1
+ *
+ * Equivalent to ARDI's call:
+ * ROMlib_RelString(s1+1, s2+1, true, false, (s1[0]<<16) | s2[0])
+ */
+int hfs_strcmp(const struct hfs_name *s1, const struct hfs_name *s2)
+{
+ int len, tmp;
+ const unsigned char *p1, *p2;
+
+ if (!s1 || !s2) {
+ return 0;
+ }
+
+ len = (s1->Len > s2->Len) ? s2->Len : s1->Len;
+ p1 = s1->Name;
+ p2 = s2->Name;
+
+ while (len--) {
+ if ((tmp = (int)caseorder[*(p1++)]-(int)caseorder[*(p2++)])) {
+ return tmp;
+ }
+ }
+ return s1->Len - s2->Len;
+}
+
+/*
+ * Test for equality of two strings in the HFS filename character ordering.
+ */
+int hfs_streq(const struct hfs_name *s1, const struct hfs_name *s2)
+{
+ int len;
+ const unsigned char *p1, *p2;
+
+ if (!s1 || !s2 || (s1->Len != s2->Len)) {
+ return 0;
+ }
+
+ len = s1->Len;
+ p1 = s1->Name;
+ p2 = s2->Name;
+
+ while (len--) {
+ if (caseorder[*(p1++)] != caseorder[*(p2++)]) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * Convert a string to the Macintosh version of lower case.
+ */
+void hfs_tolower(unsigned char *p, int len)
+{
+ while (len--) {
+ *p = casefold[*p];
+ ++p;
+ }
+}
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
new file mode 100644
index 000000000..897130297
--- /dev/null
+++ b/fs/hfs/super.c
@@ -0,0 +1,527 @@
+/*
+ * linux/fs/hfs/super.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains hfs_read_super() some of the the super_ops and
+ * init_module() and cleanup_module(). The remaining super_ops are in
+ * inode.c since they deal with inodes.
+ *
+ * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ *
+ * The code in this file initializes some structures which contain
+ * pointers by calling memset(&foo, 0, sizeof(foo)).
+ * This produces the desired behavior only due to the non-ANSI
+ * assumption that the machine representation of NULL is all zeros.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+#include <linux/config.h> /* for CONFIG_MAC_PARTITION */
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+/*================ Forward declarations ================*/
+
+static void hfs_read_inode(struct inode *inode);
+static void hfs_put_super(struct super_block *);
+static int hfs_statfs(struct super_block *, struct statfs *, int);
+static void hfs_write_super(struct super_block *);
+
+/*================ Global variables ================*/
+
+static struct super_operations hfs_super_operations = {
+ hfs_read_inode, /* read_inode */
+ NULL, /* write_inode */
+ hfs_put_inode, /* put_inode - in inode.c */
+ NULL, /* delete inode */
+ hfs_notify_change, /* notify_change - in inode.c */
+ hfs_put_super, /* put_super */
+ hfs_write_super, /* write_super */
+ hfs_statfs, /* statfs */
+ NULL /* remount_fs */
+};
+
+/*================ File-local variables ================*/
+
+static struct file_system_type hfs_fs = {
+ "hfs",
+ FS_REQUIRES_DEV,
+ hfs_read_super,
+ NULL};
+
+/*================ File-local functions ================*/
+
+/*
+ * hfs_read_inode()
+ *
+ * this doesn't actually do much. hfs_iget actually fills in the
+ * necessary inode information.
+ */
+static void hfs_read_inode(struct inode *inode)
+{
+ inode->i_mode = 0;
+ inode->i_op = NULL;
+}
+
+
+/*
+ * hfs_write_super()
+ *
+ * Description:
+ * This function is called by the VFS only. When the filesystem
+ * is mounted r/w it updates the MDB on disk.
+ * Input Variable(s):
+ * struct super_block *sb: Pointer to the hfs superblock
+ * Output Variable(s):
+ * NONE
+ * Returns:
+ * void
+ * Preconditions:
+ * 'sb' points to a "valid" (struct super_block).
+ * Postconditions:
+ * The MDB is marked 'unsuccessfully unmounted' by clearing bit 8 of drAtrb
+ * (hfs_put_super() must set this flag!). Some MDB fields are updated
+ * and the MDB buffer is written to disk by calling hfs_mdb_commit().
+ */
+static void hfs_write_super(struct super_block *sb)
+{
+ struct hfs_mdb *mdb = HFS_SB(sb)->s_mdb;
+
+ /* is this a valid hfs superblock? */
+ if (!sb || sb->s_magic != HFS_SUPER_MAGIC) {
+ return;
+ }
+
+ if (!(sb->s_flags & MS_RDONLY)) {
+ /* sync everything to the buffers */
+ hfs_mdb_commit(mdb, 0);
+ }
+ sb->s_dirt = 0;
+}
+
+/*
+ * hfs_put_super()
+ *
+ * This is the put_super() entry in the super_operations structure for
+ * HFS filesystems. The purpose is to release the resources
+ * associated with the superblock sb.
+ */
+static void hfs_put_super(struct super_block *sb)
+{
+ struct hfs_mdb *mdb = HFS_SB(sb)->s_mdb;
+
+ lock_super(sb);
+
+ if (!(sb->s_flags & MS_RDONLY)) {
+ hfs_mdb_commit(mdb, 0);
+ sb->s_dirt = 0;
+ }
+
+ /* release the MDB's resources */
+ hfs_mdb_put(mdb, sb->s_flags & MS_RDONLY);
+
+ /* restore default blocksize for the device */
+ set_blocksize(sb->s_dev, BLOCK_SIZE);
+
+ /* invalidate the superblock */
+ sb->s_dev = 0;
+
+ MOD_DEC_USE_COUNT;
+
+ unlock_super(sb);
+ return;
+}
+
+/*
+ * hfs_statfs()
+ *
+ * This is the statfs() entry in the super_operations structure for
+ * HFS filesystems. The purpose is to return various data about the
+ * filesystem.
+ *
+ * XXX: changed f_files/f_ffree to reflect the fs_ablock/free_ablocks.
+ */
+static int hfs_statfs(struct super_block *sb, struct statfs *buf, int len)
+{
+ struct hfs_mdb *mdb = HFS_SB(sb)->s_mdb;
+ struct statfs tmp;
+
+ tmp.f_type = HFS_SUPER_MAGIC;
+ tmp.f_bsize = HFS_SECTOR_SIZE;
+ tmp.f_blocks = mdb->alloc_blksz * mdb->fs_ablocks;
+ tmp.f_bfree = mdb->alloc_blksz * mdb->free_ablocks;
+ tmp.f_bavail = tmp.f_bfree;
+ tmp.f_files = mdb->fs_ablocks; /* According to the statfs manual page, -1 is the */
+ tmp.f_ffree = mdb->free_ablocks; /* correct value when the meaning is undefined. */
+ tmp.f_namelen = HFS_NAMELEN;
+
+ return copy_to_user(buf, &tmp, len) ? -EFAULT : 0;
+}
+
+/*
+ * parse_options()
+ *
+ * adapted from linux/fs/msdos/inode.c written 1992,93 by Werner Almesberger
+ * This function is called by hfs_read_super() to parse the mount options.
+ */
+static int parse_options(char *options, struct hfs_sb_info *hsb, int *part)
+{
+ char *this_char, *value;
+ char names, fork;
+
+ /* initialize the sb with defaults */
+ memset(hsb, 0, sizeof(*hsb));
+ hsb->magic = HFS_SB_MAGIC;
+ hsb->s_uid = current->uid;
+ hsb->s_gid = current->gid;
+ hsb->s_umask = current->fs->umask;
+ hsb->s_type = 0x3f3f3f3f; /* == '????' */
+ hsb->s_creator = 0x3f3f3f3f; /* == '????' */
+ hsb->s_lowercase = 0;
+ hsb->s_quiet = 0;
+ hsb->s_afpd = 0;
+ hsb->s_conv = 'b';
+ names = '?';
+ fork = '?';
+ *part = 0;
+
+ if (!options) {
+ goto done;
+ }
+ for (this_char = strtok(options,","); this_char;
+ this_char = strtok(NULL,",")) {
+ if ((value = strchr(this_char,'=')) != NULL) {
+ *value++ = 0;
+ }
+ /* Numeric-valued options */
+ if (!strcmp(this_char,"uid")) {
+ if (!value || !*value) {
+ return 0;
+ }
+ hsb->s_uid = simple_strtoul(value,&value,0);
+ if (*value) {
+ return 0;
+ }
+ } else if (!strcmp(this_char,"gid")) {
+ if (!value || !*value) {
+ return 0;
+ }
+ hsb->s_gid = simple_strtoul(value,&value,0);
+ if (*value) {
+ return 0;
+ }
+ } else if (!strcmp(this_char,"umask")) {
+ if (!value || !*value) {
+ return 0;
+ }
+ hsb->s_umask = simple_strtoul(value,&value,8);
+ if (*value) {
+ return 0;
+ }
+ } else if (!strcmp(this_char,"part")) {
+ if (!value || !*value) {
+ return 0;
+ }
+ *part = simple_strtoul(value,&value,0);
+ if (*value) {
+ return 0;
+ }
+ /* String-valued options */
+ } else if (!strcmp(this_char,"type") && value) {
+ if (strlen(value) != 4) {
+ return 0;
+ }
+ hsb->s_type = hfs_get_nl(value);
+ } else if (!strcmp(this_char,"creator") && value) {
+ if (strlen(value) != 4) {
+ return 0;
+ }
+ hsb->s_creator = hfs_get_nl(value);
+ /* Boolean-valued options */
+ } else if (!strcmp(this_char,"quiet")) {
+ if (value) {
+ return 0;
+ }
+ hsb->s_quiet = 1;
+ } else if (!strcmp(this_char,"afpd")) {
+ if (value) {
+ return 0;
+ }
+ hsb->s_afpd = 1;
+ /* Multiple choice options */
+ } else if (!strcmp(this_char,"names") && value) {
+ if ((*value && !value[1] && strchr("ntal78c",*value)) ||
+ !strcmp(value,"netatalk") ||
+ !strcmp(value,"trivial") ||
+ !strcmp(value,"alpha") ||
+ !strcmp(value,"latin") ||
+ !strcmp(value,"7bit") ||
+ !strcmp(value,"8bit") ||
+ !strcmp(value,"cap")) {
+ names = *value;
+ } else {
+ return 0;
+ }
+ } else if (!strcmp(this_char,"fork") && value) {
+ if ((*value && !value[1] && strchr("nsdc",*value)) ||
+ !strcmp(value,"netatalk") ||
+ !strcmp(value,"single") ||
+ !strcmp(value,"double") ||
+ !strcmp(value,"cap")) {
+ fork = *value;
+ } else {
+ return 0;
+ }
+ } else if (!strcmp(this_char,"case") && value) {
+ if ((*value && !value[1] && strchr("la",*value)) ||
+ !strcmp(value,"lower") ||
+ !strcmp(value,"asis")) {
+ hsb->s_lowercase = (*value == 'l');
+ } else {
+ return 0;
+ }
+ } else if (!strcmp(this_char,"conv") && value) {
+ if ((*value && !value[1] && strchr("bta",*value)) ||
+ !strcmp(value,"binary") ||
+ !strcmp(value,"text") ||
+ !strcmp(value,"auto")) {
+ hsb->s_conv = *value;
+ } else {
+ return 0;
+ }
+ } else {
+ return 0;
+ }
+ }
+
+done:
+ /* Parse the "fork" and "names" options */
+ if (fork == '?') {
+ fork = hsb->s_afpd ? 'n' : 'c';
+ }
+ switch (fork) {
+ default:
+ case 'c':
+ hsb->s_ifill = hfs_cap_ifill;
+ hsb->s_reserved1 = hfs_cap_reserved1;
+ hsb->s_reserved2 = hfs_cap_reserved2;
+ break;
+
+ case 's':
+ hfs_warn("hfs_fs: AppleSingle not yet implemented.\n");
+ return 0;
+ /* break; */
+
+ case 'd':
+ hsb->s_ifill = hfs_dbl_ifill;
+ hsb->s_reserved1 = hfs_dbl_reserved1;
+ hsb->s_reserved2 = hfs_dbl_reserved2;
+ break;
+
+ case 'n':
+ hsb->s_ifill = hfs_nat_ifill;
+ hsb->s_reserved1 = hfs_nat_reserved1;
+ hsb->s_reserved2 = hfs_nat_reserved2;
+ break;
+ }
+
+ if (names == '?') {
+ names = fork;
+ }
+ switch (names) {
+ default:
+ case 'n':
+ hsb->s_nameout = hfs_colon2mac;
+ hsb->s_namein = hfs_mac2nat;
+ break;
+
+ case 'c':
+ hsb->s_nameout = hfs_colon2mac;
+ hsb->s_namein = hfs_mac2cap;
+ break;
+
+ case 't':
+ hsb->s_nameout = hfs_triv2mac;
+ hsb->s_namein = hfs_mac2triv;
+ break;
+
+ case '7':
+ hsb->s_nameout = hfs_prcnt2mac;
+ hsb->s_namein = hfs_mac2seven;
+ break;
+
+ case '8':
+ hsb->s_nameout = hfs_prcnt2mac;
+ hsb->s_namein = hfs_mac2eight;
+ break;
+
+ case 'l':
+ hsb->s_nameout = hfs_latin2mac;
+ hsb->s_namein = hfs_mac2latin;
+ break;
+
+ case 'a': /* 's' and 'd' are unadvertised aliases for 'alpha', */
+ case 's': /* since 'alpha' is the default if fork=s or fork=d. */
+ case 'd': /* (It is also helpful for poor typists!) */
+ hsb->s_nameout = hfs_prcnt2mac;
+ hsb->s_namein = hfs_mac2alpha;
+ break;
+ }
+
+ return 1;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_read_super()
+ *
+ * This is the function that is responsible for mounting an HFS
+ * filesystem. It performs all the tasks necessary to get enough data
+ * from the disk to read the root inode. This includes parsing the
+ * mount options, dealing with Macintosh partitions, reading the
+ * superblock and the allocation bitmap blocks, calling
+ * hfs_btree_init() to get the necessary data about the extents and
+ * catalog B-trees and, finally, reading the root inode into memory.
+ */
+struct super_block *hfs_read_super(struct super_block *s, void *data,
+ int silent)
+{
+ struct hfs_mdb *mdb;
+ struct hfs_cat_key key;
+ kdev_t dev = s->s_dev;
+#ifndef CONFIG_MAC_PARTITION
+ hfs_s32 part_size, part_start;
+#endif
+ struct inode *root_inode;
+ int part;
+
+ if (!parse_options((char *)data, HFS_SB(s), &part)) {
+ hfs_warn("hfs_fs: unable to parse mount options.\n");
+ goto bail3;
+ }
+
+ /* in case someone tries to unload the module while we wait on I/O */
+ MOD_INC_USE_COUNT;
+
+ lock_super(s);
+
+ /* set the device driver to 512-byte blocks */
+ set_blocksize(dev, HFS_SECTOR_SIZE);
+
+ /* look for a partition table and find the correct partition */
+#ifndef CONFIG_MAC_PARTITION
+ if (hfs_part_find(s, part, silent, &part_size, &part_start)) {
+ goto bail2;
+ }
+
+ mdb = hfs_mdb_get(s, s->s_flags & MS_RDONLY, part_start);
+#else
+ mdb = hfs_mdb_get(s, s->s_flags & MS_RDONLY, 0);
+#endif
+ if (!mdb) {
+ if (!silent) {
+ printk("VFS: Can't find a HFS filesystem on dev %s.\n",
+ kdevname(dev));
+ }
+ goto bail2;
+ }
+ HFS_SB(s)->s_mdb = mdb;
+ INIT_LIST_HEAD(&mdb->entry_dirty);
+
+ if (HFS_ITYPE(mdb->next_id) != 0) {
+ hfs_warn("hfs_fs: too many files.\n");
+ goto bail1;
+ }
+
+ s->s_magic = HFS_SUPER_MAGIC;
+ s->s_blocksize_bits = HFS_SECTOR_SIZE_BITS;
+ s->s_blocksize = HFS_SECTOR_SIZE;
+ s->s_op = &hfs_super_operations;
+
+ /* try to get the root inode */
+ hfs_cat_build_key(htonl(HFS_POR_CNID),
+ (struct hfs_name *)(mdb->vname), &key);
+
+ root_inode = hfs_iget(hfs_cat_get(mdb, &key), HFS_ITYPE_NORM, NULL);
+ if (!root_inode)
+ goto bail_no_root;
+
+ /* cache the dentry in the inode */
+ s->s_root =
+ HFS_I(root_inode)->entry->sys_entry[HFS_ITYPE_TO_INT(HFS_ITYPE_NORM)] =
+ d_alloc_root(root_inode, NULL);
+ if (!s->s_root)
+ goto bail_no_root;
+
+ /* HFS_SUPERBLK prevents the root inode from being flushed
+ * inadvertantly. */
+ HFS_I(root_inode)->entry->state = HFS_SUPERBLK;
+ s->s_root->d_op = &hfs_dentry_operations;
+
+ /* everything's okay */
+ unlock_super(s);
+ return s;
+
+bail_no_root:
+ hfs_warn("hfs_fs: get root inode failed.\n");
+ iput(root_inode);
+bail1:
+ hfs_mdb_put(mdb, s->s_flags & MS_RDONLY);
+bail2:
+ set_blocksize(dev, BLOCK_SIZE);
+ MOD_DEC_USE_COUNT;
+ unlock_super(s);
+bail3:
+ s->s_dev = 0;
+ return NULL;
+}
+
+__initfunc(int init_hfs_fs(void))
+{
+ hfs_cat_init();
+ return register_filesystem(&hfs_fs);
+}
+
+#ifdef MODULE
+int init_module(void) {
+ int error;
+
+#if defined(DEBUG_SIZES) || defined(DEBUG_ALL)
+ hfs_warn("HFS inode: %d bytes available\n",
+ sizeof(struct ext2_inode_info)-sizeof(struct hfs_inode_info));
+ hfs_warn("HFS super_block: %d bytes available\n",
+ sizeof(struct ext2_sb_info)-sizeof(struct hfs_sb_info));
+ if ((sizeof(struct hfs_inode_info)>sizeof(struct ext2_inode_info)) ||
+ (sizeof(struct hfs_sb_info)>sizeof(struct ext2_sb_info))) {
+ return -ENOMEM; /* well sort of */
+ }
+#endif
+ error = init_hfs_fs();
+ if (!error) {
+ /* register_symtab(NULL); */
+ }
+ return error;
+}
+
+void cleanup_module(void) {
+ hfs_cat_free();
+ unregister_filesystem(&hfs_fs);
+}
+#endif
+
+#if defined(DEBUG_ALL) || defined(DEBUG_MEM)
+long int hfs_alloc = 0;
+#endif
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
new file mode 100644
index 000000000..fc7368a75
--- /dev/null
+++ b/fs/hfs/sysdep.c
@@ -0,0 +1,103 @@
+/*
+ * linux/fs/hfs/sysdep.c
+ *
+ * Copyright (C) 1996 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the code to do various system dependent things.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+static int hfs_hash_dentry(struct dentry *, struct qstr *);
+static int hfs_compare_dentry (struct dentry *, struct qstr *, struct qstr *);
+struct dentry_operations hfs_dentry_operations =
+{
+ NULL, /* d_validate(struct dentry *) */
+ hfs_hash_dentry, /* d_hash */
+ hfs_compare_dentry, /* d_compare */
+ NULL /* d_delete(struct dentry *) */
+};
+
+/*
+ * hfs_buffer_get()
+ *
+ * Return a buffer for the 'block'th block of the media.
+ * If ('read'==0) then the buffer is not read from disk.
+ */
+hfs_buffer hfs_buffer_get(hfs_sysmdb sys_mdb, int block, int read) {
+ hfs_buffer tmp = HFS_BAD_BUFFER;
+
+ if (read) {
+ tmp = bread(sys_mdb->s_dev, block, HFS_SECTOR_SIZE);
+ } else {
+ tmp = getblk(sys_mdb->s_dev, block, HFS_SECTOR_SIZE);
+ if (tmp) {
+ mark_buffer_uptodate(tmp, 1);
+ }
+ }
+ if (!tmp) {
+ hfs_error("hfs_fs: unable to read block 0x%08x from dev %s\n",
+ block, hfs_mdb_name(sys_mdb));
+ }
+
+ return tmp;
+}
+
+/* dentry case-handling: just lowercase everything */
+
+/* should we use hfs_strhash? if so, it probably needs to be beefed
+ * up a little. */
+static int hfs_hash_dentry(struct dentry *dentry, struct qstr *this)
+{
+ unsigned char name[HFS_NAMELEN];
+ int len = this->len;
+
+ if (len > HFS_NAMELEN)
+ return 0;
+
+ strncpy(name, this->name, len);
+ hfs_tolower(name, len);
+ this->hash = full_name_hash(name, len);
+ return 0;
+}
+
+static int hfs_compare_dentry(struct dentry *dentry, struct qstr *a,
+ struct qstr *b)
+{
+ struct hfs_name s1, s2;
+
+ if (a->len != b->len) return 1;
+
+ if ((s1.Len = s2.Len = a->len) > HFS_NAMELEN)
+ return 1;
+
+ strncpy(s1.Name, a->name, s1.Len);
+ strncpy(s2.Name, b->name, s2.Len);
+ return hfs_streq(&s1, &s2);
+}
+
+
+/* toss a catalog entry. this does it by dropping the dentry. */
+void hfs_cat_prune(struct hfs_cat_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct dentry *de = entry->sys_entry[i];
+ if (de) {
+ dget(de);
+ d_drop(de);
+ dput(de);
+ }
+ }
+}
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
new file mode 100644
index 000000000..fe8d02ad6
--- /dev/null
+++ b/fs/hfs/trans.c
@@ -0,0 +1,556 @@
+/*
+ * linux/fs/hfs/trans.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains routines for converting between the Macintosh
+ * character set and various other encodings. This includes dealing
+ * with ':' vs. '/' as the path-element separator.
+ *
+ * Latin-1 translation based on code contributed by Holger Schemel
+ * (aeglos@valinor.owl.de).
+ *
+ * The '8-bit', '7-bit ASCII' and '7-bit alphanumeric' encodings are
+ * implementations of the three encodings recommended by Apple in the
+ * document "AppleSingle/AppleDouble Formats: Developer's Note
+ * (9/94)". This document is available from Apple's Technical
+ * Information Library from the World Wide Web server
+ * www.info.apple.com.
+ *
+ * The 'CAP' encoding is an implementation of the naming scheme used
+ * by the Columbia AppleTalk Package, available for anonymous FTP from
+ * ????.
+ *
+ * "XXX" in a comment is a note to myself to consider changing something.
+ *
+ * In function preconditions the term "valid" applied to a pointer to
+ * a structure means that the pointer is non-NULL and the structure it
+ * points to has all fields initialized to consistent values.
+ */
+
+#include "hfs.h"
+#include <linux/hfs_fs_sb.h>
+#include <linux/hfs_fs_i.h>
+#include <linux/hfs_fs.h>
+
+/*================ File-local variables ================*/
+
+/* int->ASCII map for a single hex digit */
+static char hex[16] = {'0','1','2','3','4','5','6','7',
+ '8','9','a','b','c','d','e','f'};
+/*
+ * Latin-1 to Mac character set map
+ *
+ * For the sake of consistency this map is generated from the Mac to
+ * Latin-1 map the first time it is needed. This means there is just
+ * one map to maintain.
+ */
+static unsigned char latin2mac_map[128]; /* initially all zero */
+
+/*
+ * Mac to Latin-1 map for the upper 128 characters (both have ASCII in
+ * the lower 128 positions)
+ */
+static unsigned char mac2latin_map[128] = {
+ 0xC4, 0xC5, 0xC7, 0xC9, 0xD1, 0xD6, 0xDC, 0xE1,
+ 0xE0, 0xE2, 0xE4, 0xE3, 0xE5, 0xE7, 0xE9, 0xE8,
+ 0xEA, 0xEB, 0xED, 0xEC, 0xEE, 0xEF, 0xF1, 0xF3,
+ 0xF2, 0xF4, 0xF6, 0xF5, 0xFA, 0xF9, 0xFB, 0xFC,
+ 0x00, 0xB0, 0xA2, 0xA3, 0xA7, 0xB7, 0xB6, 0xDF,
+ 0xAE, 0xA9, 0x00, 0xB4, 0xA8, 0x00, 0xC6, 0xD8,
+ 0x00, 0xB1, 0x00, 0x00, 0xA5, 0xB5, 0xF0, 0x00,
+ 0x00, 0x00, 0x00, 0xAA, 0xBA, 0x00, 0xE6, 0xF8,
+ 0xBF, 0xA1, 0xAC, 0x00, 0x00, 0x00, 0x00, 0xAB,
+ 0xBB, 0x00, 0xA0, 0xC0, 0xC3, 0xD5, 0x00, 0x00,
+ 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF7, 0x00,
+ 0xFF, 0x00, 0x00, 0xA4, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xB8, 0x00, 0x00, 0xC2, 0xCA, 0xC1,
+ 0xCB, 0xC8, 0xCD, 0xCE, 0xCF, 0xCC, 0xD3, 0xD4,
+ 0x00, 0xD2, 0xDA, 0xDB, 0xD9, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/*================ File-local functions ================*/
+
+/*
+ * dehex()
+ *
+ * Given a hexadecimal digit in ASCII, return the integer representation.
+ */
+static inline const unsigned char dehex(char c) {
+ if ((c>='0')&&(c<='9')) {
+ return c-'0';
+ }
+ if ((c>='a')&&(c<='f')) {
+ return c-'a'+10;
+ }
+ if ((c>='A')&&(c<='F')) {
+ return c-'A'+10;
+ }
+ return 0xff;
+}
+
+/*================ Global functions ================*/
+
+/*
+ * hfs_mac2nat()
+ *
+ * Given a 'Pascal String' (a string preceded by a length byte) in
+ * the Macintosh character set produce the corresponding filename using
+ * the Netatalk name-mangling scheme, returning the length of the
+ * mangled filename. Note that the output string is not NULL terminated.
+ *
+ * The name-mangling works as follows:
+ * Characters 32-126 (' '-'~') except '/' and any initial '.' are passed
+ * unchanged from input to output. The remaining characters are replaced
+ * by three characters: ':xx' where xx is the hexadecimal representation
+ * of the character, using lowercase 'a' through 'f'.
+ */
+int hfs_mac2nat(char *out, const struct hfs_name *in) {
+ unsigned char c;
+ const unsigned char *p = in->Name;
+ int len = in->Len;
+ int count = 0;
+
+ /* Special case for .AppleDesktop which in the
+ distant future may be a pseudodirectory. */
+ if (strncmp(".AppleDesktop", p, len) == 0) {
+ strncpy(out, p, 13);
+ return 13;
+ }
+
+ while (len--) {
+ c = *p++;
+ if ((c<32) || (c=='/') || (c>126) || (!count && (c=='.'))) {
+ *out++ = ':';
+ *out++ = hex[(c>>4) & 0xf];
+ *out++ = hex[c & 0xf];
+ count += 3;
+ } else {
+ *out++ = c;
+ count++;
+ }
+ }
+ return count;
+}
+
+/*
+ * hfs_mac2cap()
+ *
+ * Given a 'Pascal String' (a string preceded by a length byte) in
+ * the Macintosh character set produce the corresponding filename using
+ * the CAP name-mangling scheme, returning the length of the mangled
+ * filename. Note that the output string is not NULL terminated.
+ *
+ * The name-mangling works as follows:
+ * Characters 32-126 (' '-'~') except '/' are passed unchanged from
+ * input to output. The remaining characters are replaced by three
+ * characters: ':xx' where xx is the hexadecimal representation of the
+ * character, using lowercase 'a' through 'f'.
+ */
+int hfs_mac2cap(char *out, const struct hfs_name *in) {
+ unsigned char c;
+ const unsigned char *p = in->Name;
+ int len = in->Len;
+ int count = 0;
+
+ while (len--) {
+ c = *p++;
+ if ((c<32) || (c=='/') || (c>126)) {
+ *out++ = ':';
+ *out++ = hex[(c>>4) & 0xf];
+ *out++ = hex[c & 0xf];
+ count += 3;
+ } else {
+ *out++ = c;
+ count++;
+ }
+ }
+ return count;
+}
+
+/*
+ * hfs_mac2eight()
+ *
+ * Given a 'Pascal String' (a string preceded by a length byte) in
+ * the Macintosh character set produce the corresponding filename using
+ * the '8-bit' name-mangling scheme, returning the length of the
+ * mangled filename. Note that the output string is not NULL
+ * terminated.
+ *
+ * This is one of the three recommended naming conventions described
+ * in Apple's document "AppleSingle/AppleDouble Formats: Developer's
+ * Note (9/94)"
+ *
+ * The name-mangling works as follows:
+ * Characters 0, '%' and '/' are replaced by three characters: '%xx'
+ * where xx is the hexadecimal representation of the character, using
+ * lowercase 'a' through 'f'. All other characters are passed
+ * unchanged from input to output. Note that this format is mainly
+ * implemented for completeness and is rather hard to read.
+ */
+int hfs_mac2eight(char *out, const struct hfs_name *in) {
+ unsigned char c;
+ const unsigned char *p = in->Name;
+ int len = in->Len;
+ int count = 0;
+
+ while (len--) {
+ c = *p++;
+ if (!c || (c=='/') || (c=='%')) {
+ *out++ = '%';
+ *out++ = hex[(c>>4) & 0xf];
+ *out++ = hex[c & 0xf];
+ count += 3;
+ } else {
+ *out++ = c;
+ count++;
+ }
+ }
+ return count;
+}
+
+/*
+ * hfs_mac2seven()
+ *
+ * Given a 'Pascal String' (a string preceded by a length byte) in
+ * the Macintosh character set produce the corresponding filename using
+ * the '7-bit ASCII' name-mangling scheme, returning the length of the
+ * mangled filename. Note that the output string is not NULL
+ * terminated.
+ *
+ * This is one of the three recommended naming conventions described
+ * in Apple's document "AppleSingle/AppleDouble Formats: Developer's
+ * Note (9/94)"
+ *
+ * The name-mangling works as follows:
+ * Characters 0, '%', '/' and 128-255 are replaced by three
+ * characters: '%xx' where xx is the hexadecimal representation of the
+ * character, using lowercase 'a' through 'f'. All other characters
+ * are passed unchanged from input to output. Note that control
+ * characters (including newline) and space are unchanged make reading
+ * these filenames difficult.
+ */
+int hfs_mac2seven(char *out, const struct hfs_name *in) {
+ unsigned char c;
+ const unsigned char *p = in->Name;
+ int len = in->Len;
+ int count = 0;
+
+ while (len--) {
+ c = *p++;
+ if (!c || (c=='/') || (c=='%') || (c&0x80)) {
+ *out++ = '%';
+ *out++ = hex[(c>>4) & 0xf];
+ *out++ = hex[c & 0xf];
+ count += 3;
+ } else {
+ *out++ = c;
+ count++;
+ }
+ }
+ return count;
+}
+
+/*
+ * hfs_mac2alpha()
+ *
+ * Given a 'Pascal String' (a string preceded by a length byte) in
+ * the Macintosh character set produce the corresponding filename using
+ * the '7-bit alphanumeric' name-mangling scheme, returning the length
+ * of the mangled filename. Note that the output string is not NULL
+ * terminated.
+ *
+ * This is one of the three recommended naming conventions described
+ * in Apple's document "AppleSingle/AppleDouble Formats: Developer's
+ * Note (9/94)"
+ *
+ * The name-mangling works as follows:
+ * The characters 'a'-'z', 'A'-'Z', '0'-'9', '_' and the last '.' in
+ * the filename are passed unchanged from input to output. All
+ * remaining characters (including any '.'s other than the last) are
+ * replaced by three characters: '%xx' where xx is the hexadecimal
+ * representation of the character, using lowercase 'a' through 'f'.
+ */
+int hfs_mac2alpha(char *out, const struct hfs_name *in) {
+ unsigned char c;
+ const unsigned char *p = in->Name;
+ int len = in->Len;
+ int count = 0;
+ const unsigned char *lp; /* last period */
+
+ /* strrchr() would be good here, but 'in' is not null-terminated */
+ for (lp=p+len-1; (lp>=p)&&(*lp!='.'); --lp) {}
+ ++lp;
+
+ while (len--) {
+ c = *p++;
+ if ((p==lp) || ((c>='0')&&(c<='9')) || ((c>='A')&&(c<='Z')) ||
+ ((c>='a')&&(c<='z')) || (c=='_')) {
+ *out++ = c;
+ count++;
+ } else {
+ *out++ = '%';
+ *out++ = hex[(c>>4) & 0xf];
+ *out++ = hex[c & 0xf];
+ count += 3;
+ }
+ }
+ return count;
+}
+
+/*
+ * hfs_mac2triv()
+ *
+ * Given a 'Pascal String' (a string preceded by a length byte) in
+ * the Macintosh character set produce the corresponding filename using
+ * the 'trivial' name-mangling scheme, returning the length of the
+ * mangled filename. Note that the output string is not NULL
+ * terminated.
+ *
+ * The name-mangling works as follows:
+ * The character '/', which is illegal in Linux filenames is replaced
+ * by ':' which never appears in HFS filenames. All other characters
+ * are passed unchanged from input to output.
+ */
+int hfs_mac2triv(char *out, const struct hfs_name *in) {
+ unsigned char c;
+ const unsigned char *p = in->Name;
+ int len = in->Len;
+ int count = 0;
+
+ while (len--) {
+ c = *p++;
+ if (c=='/') {
+ *out++ = ':';
+ } else {
+ *out++ = c;
+ }
+ count++;
+ }
+ return count;
+}
+
+/*
+ * hfs_mac2latin()
+ *
+ * Given a 'Pascal String' (a string preceded by a length byte) in
+ * the Macintosh character set produce the corresponding filename using
+ * the 'Latin-1' name-mangling scheme, returning the length of the
+ * mangled filename. Note that the output string is not NULL
+ * terminated.
+ *
+ * The Macintosh character set and Latin-1 are both extensions of the
+ * ASCII character set. Some, but certainly not all, of the characters
+ * in the Macintosh character set are also in Latin-1 but not with the
+ * same encoding. This name-mangling scheme replaces the characters in
+ * the Macintosh character set that have Latin-1 equivalents by those
+ * equivalents; the characters 32-126, excluding '/' and '%', are
+ * passed unchanged from input to output. The remaining characters
+ * are replaced by three characters: '%xx' where xx is the hexadecimal
+ * representation of the character, using lowercase 'a' through 'f'.
+ *
+ * The array mac2latin_map[] indicates the correspondence between the
+ * two character sets. The byte in element x-128 gives the Latin-1
+ * encoding of the character with encoding x in the Macintosh
+ * character set. A value of zero indicates Latin-1 has no
+ * corresponding character.
+ */
+int hfs_mac2latin(char *out, const struct hfs_name *in) {
+ unsigned char c;
+ const unsigned char *p = in->Name;
+ int len = in->Len;
+ int count = 0;
+
+ while (len--) {
+ c = *p++;
+
+ if ((c & 0x80) && mac2latin_map[c & 0x7f]) {
+ *out++ = mac2latin_map[c & 0x7f];
+ count++;
+ } else if ((c>=32) && (c<=126) && (c!='/') && (c!='%')) {
+ *out++ = c;
+ count++;
+ } else {
+ *out++ = '%';
+ *out++ = hex[(c>>4) & 0xf];
+ *out++ = hex[c & 0xf];
+ count += 3;
+ }
+ }
+ return count;
+}
+
+/*
+ * hfs_colon2mac()
+ *
+ * Given an ASCII string (not null-terminated) and its length,
+ * generate the corresponding filename in the Macintosh character set
+ * using the 'CAP' name-mangling scheme, returning the length of the
+ * mangled filename. Note that the output string is not NULL
+ * terminated.
+ *
+ * This routine is a inverse to hfs_mac2cap() and hfs_mac2nat().
+ * A ':' not followed by a 2-digit hexadecimal number (or followed
+ * by the codes for NULL or ':') is replaced by a '|'.
+ */
+void hfs_colon2mac(struct hfs_name *out, const char *in, int len) {
+ int hi, lo;
+ unsigned char code, c, *count;
+ unsigned char *p = out->Name;
+
+ out->Len = 0;
+ count = &out->Len;
+ while (len-- && (*count < HFS_NAMELEN)) {
+ c = *in++;
+ (*count)++;
+ if (c!=':') {
+ *p++ = c;
+ } else if ((len<2) ||
+ ((hi=dehex(in[0])) & 0xf0) ||
+ ((lo=dehex(in[1])) & 0xf0) ||
+ !(code = (hi << 4) | lo) ||
+ (code == ':')) {
+ *p++ = '|';
+ } else {
+ *p++ = code;
+ len -= 2;
+ in += 2;
+ }
+ }
+}
+
+/*
+ * hfs_prcnt2mac()
+ *
+ * Given an ASCII string (not null-terminated) and its length,
+ * generate the corresponding filename in the Macintosh character set
+ * using Apple's three recommended name-mangling schemes, returning
+ * the length of the mangled filename. Note that the output string is
+ * not NULL terminated.
+ *
+ * This routine is a inverse to hfs_mac2alpha(), hfs_mac2seven() and
+ * hfs_mac2eight().
+ * A '%' not followed by a 2-digit hexadecimal number (or followed
+ * by the code for NULL or ':') is unchanged.
+ * A ':' is replaced by a '|'.
+ */
+void hfs_prcnt2mac(struct hfs_name *out, const char *in, int len) {
+ int hi, lo;
+ unsigned char code, c, *count;
+ unsigned char *p = out->Name;
+
+ out->Len = 0;
+ count = &out->Len;
+ while (len-- && (*count < HFS_NAMELEN)) {
+ c = *in++;
+ (*count)++;
+ if (c==':') {
+ *p++ = '|';
+ } else if (c!='%') {
+ *p++ = c;
+ } else if ((len<2) ||
+ ((hi=dehex(in[0])) & 0xf0) ||
+ ((lo=dehex(in[1])) & 0xf0) ||
+ !(code = (hi << 4) | lo) ||
+ (code == ':')) {
+ *p++ = '%';
+ } else {
+ *p++ = code;
+ len -= 2;
+ in += 2;
+ }
+ }
+}
+
+/*
+ * hfs_triv2mac()
+ *
+ * Given an ASCII string (not null-terminated) and its length,
+ * generate the corresponding filename in the Macintosh character set
+ * using the 'trivial' name-mangling scheme, returning the length of
+ * the mangled filename. Note that the output string is not NULL
+ * terminated.
+ *
+ * This routine is a inverse to hfs_mac2triv().
+ * A ':' is replaced by a '/'.
+ */
+void hfs_triv2mac(struct hfs_name *out, const char *in, int len) {
+ unsigned char c, *count;
+ unsigned char *p = out->Name;
+
+ out->Len = 0;
+ count = &out->Len;
+ while (len-- && (*count < HFS_NAMELEN)) {
+ c = *in++;
+ (*count)++;
+ if (c==':') {
+ *p++ = '/';
+ } else {
+ *p++ = c;
+ }
+ }
+}
+
+/*
+ * hfs_latin2mac()
+ *
+ * Given an Latin-1 string (not null-terminated) and its length,
+ * generate the corresponding filename in the Macintosh character set
+ * using the 'Latin-1' name-mangling scheme, returning the length of
+ * the mangled filename. Note that the output string is not NULL
+ * terminated.
+ *
+ * This routine is a inverse to hfs_latin2cap().
+ * A '%' not followed by a 2-digit hexadecimal number (or followed
+ * by the code for NULL or ':') is unchanged.
+ * A ':' is replaced by a '|'.
+ *
+ * Note that the character map is built the first time it is needed.
+ */
+void hfs_latin2mac(struct hfs_name *out, const char *in, int len)
+{
+ int hi, lo;
+ unsigned char code, c, *count;
+ unsigned char *p = out->Name;
+ static int map_initialized = 0;
+
+ if (!map_initialized) {
+ int i;
+
+ /* build the inverse mapping at run time */
+ for (i = 0; i < 128; i++) {
+ if ((c = mac2latin_map[i])) {
+ latin2mac_map[(int)c - 128] = i + 128;
+ }
+ }
+ map_initialized = 1;
+ }
+
+ out->Len = 0;
+ count = &out->Len;
+ while (len-- && (*count < HFS_NAMELEN)) {
+ c = *in++;
+ (*count)++;
+
+ if (c==':') {
+ *p++ = '|';
+ } else if (c!='%') {
+ if (c<128 || !(*p = latin2mac_map[c-128])) {
+ *p = c;
+ }
+ p++;
+ } else if ((len<2) ||
+ ((hi=dehex(in[0])) & 0xf0) ||
+ ((lo=dehex(in[1])) & 0xf0) ||
+ !(code = (hi << 4) | lo) ||
+ (code == ':')) {
+ *p++ = '%';
+ } else {
+ *p++ = code;
+ len -= 2;
+ in += 2;
+ }
+ }
+}
diff --git a/fs/hfs/version.c b/fs/hfs/version.c
new file mode 100644
index 000000000..8eb74084d
--- /dev/null
+++ b/fs/hfs/version.c
@@ -0,0 +1,10 @@
+/*
+ * linux/fs/hfs/version.c
+ *
+ * Copyright (C) 1995-1997 Paul H. Hargrove
+ * This file may be distributed under the terms of the GNU Public License.
+ *
+ * This file contains the version string for this release.
+ */
+
+const char hfs_version[]="0.95+asun2";