diff options
Diffstat (limited to 'fs/locks.c')
-rw-r--r-- | fs/locks.c | 435 |
1 files changed, 230 insertions, 205 deletions
diff --git a/fs/locks.c b/fs/locks.c index 50f3709c7..3aa530897 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -97,6 +97,9 @@ * Made mandatory locking a mount option. Default is not to allow mandatory * locking. * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. + * + * Some adaptations for NFS support. + * Olaf Kirch (okir@monad.swb.de), Dec 1996, */ #include <linux/malloc.h> @@ -105,6 +108,8 @@ #include <linux/errno.h> #include <linux/stat.h> #include <linux/fcntl.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> #include <asm/uaccess.h> @@ -121,12 +126,8 @@ static int posix_locks_conflict(struct file_lock *caller_fl, static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl); static int flock_lock_file(struct file *filp, struct file_lock *caller, unsigned int wait); -static int posix_lock_file(struct file *filp, struct file_lock *caller, - unsigned int wait); -static int posix_locks_deadlock(struct task_struct *my_task, - struct task_struct *blocked_task); -static void posix_remove_locks(struct file_lock **before, struct task_struct *task); -static void flock_remove_locks(struct file_lock **before, struct file *filp); +static int posix_locks_deadlock(struct file_lock *caller, + struct file_lock *blocker); static struct file_lock *locks_alloc_lock(struct file_lock *fl); static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl); @@ -137,7 +138,7 @@ static void locks_insert_block(struct file_lock *blocker, struct file_lock *wait static void locks_delete_block(struct file_lock *blocker, struct file_lock *waiter); static void locks_wake_up_blocks(struct file_lock *blocker, unsigned int wait); -static struct file_lock *file_lock_table = NULL; +struct file_lock *file_lock_table = NULL; /* Free lock not inserted in any queue. */ @@ -161,6 +162,15 @@ static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) (fl2->fl_end >= fl1->fl_start)); } +/* Check whether two locks have the same owner + */ +static inline int +locks_same_owner(struct file_lock *fl1, struct file_lock *fl2) +{ + return (fl1->fl_owner == fl2->fl_owner) && + (fl1->fl_pid == fl2->fl_pid); +} + /* Insert waiter into blocker's block list. * We use a circular list so that processes can be easily woken up in * the order they blocked. The documentation doesn't require this but @@ -211,6 +221,21 @@ static void locks_delete_block(struct file_lock *blocker, return; } +/* The following two are for the benefit of lockd. + */ +void +posix_block_lock(struct file_lock *blocker, struct file_lock *waiter) +{ + locks_insert_block(blocker, waiter); +} + +void +posix_unblock_lock(struct file_lock *waiter) +{ + if (waiter->fl_prevblock) + locks_delete_block(waiter->fl_prevblock, waiter); +} + /* Wake up processes blocked waiting for blocker. * If told to wait then schedule the processes until the block list * is empty, otherwise empty the block list ourselves. @@ -220,6 +245,8 @@ static void locks_wake_up_blocks(struct file_lock *blocker, unsigned int wait) struct file_lock *waiter; while ((waiter = blocker->fl_nextblock) != NULL) { + if (waiter->fl_notify) + waiter->fl_notify(waiter); wake_up(&waiter->fl_wait); if (wait) /* Let the blocked process remove waiter from the @@ -242,17 +269,20 @@ asmlinkage int sys_flock(unsigned int fd, unsigned int cmd) { struct file_lock file_lock; struct file *filp; + int err; + lock_kernel(); if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd])) - return (-EBADF); - - if (!flock_make_lock(filp, &file_lock, cmd)) - return (-EINVAL); - - if ((file_lock.fl_type != F_UNLCK) && !(filp->f_mode & 3)) - return (-EBADF); - - return (flock_lock_file(filp, &file_lock, (cmd & (LOCK_UN | LOCK_NB)) ? 0 : 1)); + err = -EBADF; + else if (!flock_make_lock(filp, &file_lock, cmd)) + err = -EINVAL; + else if ((file_lock.fl_type != F_UNLCK) && !(filp->f_mode & 3)) + err = -EBADF; + else + err = flock_lock_file(filp, &file_lock, + (cmd & (LOCK_UN | LOCK_NB)) ? 0 : 1); + unlock_kernel(); + return err; } /* Report the first existing lock that would conflict with l. @@ -260,58 +290,57 @@ asmlinkage int sys_flock(unsigned int fd, unsigned int cmd) */ int fcntl_getlk(unsigned int fd, struct flock *l) { - int error; struct flock flock; struct file *filp; struct file_lock *fl,file_lock; + int error; if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd])) return (-EBADF); - error = verify_area(VERIFY_WRITE, l, sizeof(*l)); - if (error) - return (error); + if (copy_from_user(&flock, l, sizeof(flock))) + return -EFAULT; - copy_from_user(&flock, l, sizeof(flock)); - if ((flock.l_type == F_UNLCK) || (flock.l_type == F_EXLCK) || - (flock.l_type == F_SHLCK)) + if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) return (-EINVAL); if (!filp->f_inode || !posix_make_lock(filp, &file_lock, &flock)) return (-EINVAL); - if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_POSIX)) { - while (fl != NULL) { - if (posix_locks_conflict(&file_lock, fl)) { - flock.l_pid = fl->fl_owner->pid; - flock.l_start = fl->fl_start; - flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : - fl->fl_end - fl->fl_start + 1; - flock.l_whence = 0; - flock.l_type = fl->fl_type; - copy_to_user(l, &flock, sizeof(flock)); - return (0); - } - fl = fl->fl_next; - } + if (filp->f_op->lock) { + error = filp->f_op->lock(filp->f_inode, filp, + F_GETLK, &file_lock); + if (error < 0) + return error; + fl = &file_lock; + } else { + fl = posix_test_lock(filp, &file_lock); } - - flock.l_type = F_UNLCK; /* no conflict found */ - copy_to_user(l, &flock, sizeof(flock)); - return (0); + + if (fl != NULL) { + flock.l_pid = fl->fl_pid; + flock.l_start = fl->fl_start; + flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : + fl->fl_end - fl->fl_start + 1; + flock.l_whence = 0; + flock.l_type = fl->fl_type; + return copy_to_user(l, &flock, sizeof(flock)) ? -EFAULT : 0; + } else { + flock.l_type = F_UNLCK; + } + + return copy_to_user(l, &flock, sizeof(flock)) ? -EFAULT : 0; } /* Apply the lock described by l to an open file descriptor. * This implements both the F_SETLK and F_SETLKW commands of fcntl(). - * It also emulates flock() in a pretty broken way for older C - * libraries. */ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) { - int error; struct file *filp; struct file_lock file_lock; struct flock flock; struct inode *inode; + int error; /* Get arguments and validate them ... */ @@ -319,10 +348,6 @@ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd])) return (-EBADF); - error = verify_area(VERIFY_READ, l, sizeof(*l)); - if (error) - return (error); - if (!(inode = filp->f_inode)) return (-EINVAL); @@ -340,22 +365,25 @@ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) } while (vma != inode->i_mmap); } - copy_from_user(&flock, l, sizeof(flock)); + if (copy_from_user(&flock, l, sizeof(flock))) + return -EFAULT; if (!posix_make_lock(filp, &file_lock, &flock)) return (-EINVAL); switch (flock.l_type) { - case F_RDLCK : + case F_RDLCK: if (!(filp->f_mode & 1)) return (-EBADF); break; - case F_WRLCK : + case F_WRLCK: if (!(filp->f_mode & 2)) return (-EBADF); break; - case F_SHLCK : - case F_EXLCK : -#if 1 + case F_UNLCK: + break; + case F_SHLCK: + case F_EXLCK: +#ifdef __sparc__ /* warn a bit for now, but don't overdo it */ { static int count = 0; @@ -366,16 +394,20 @@ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) current->pid, current->comm); } } -#endif if (!(filp->f_mode & 3)) return (-EBADF); break; - case F_UNLCK : - break; +#endif default: return -EINVAL; } - + + if (filp->f_op->lock != NULL) { + error = filp->f_op->lock(filp->f_inode, filp, cmd, &file_lock); + if (error < 0) + return error; + } + return (posix_lock_file(filp, &file_lock, cmd == F_SETLKW)); } @@ -383,48 +415,49 @@ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) */ void locks_remove_locks(struct task_struct *task, struct file *filp) { - struct file_lock *fl; + struct file_lock file_lock, *fl; + struct file_lock **before; /* For POSIX locks we free all locks on this file for the given task. * For FLOCK we only free locks on this *open* file if it is the last * close on that file. */ - if ((fl = filp->f_inode->i_flock) != NULL) { - if (fl->fl_flags & FL_POSIX) - posix_remove_locks(&filp->f_inode->i_flock, task); - else - flock_remove_locks(&filp->f_inode->i_flock, filp); - } - - return; -} - -static void posix_remove_locks(struct file_lock **before, struct task_struct *task) -{ - struct file_lock *fl; + before = &filp->f_inode->i_flock; while ((fl = *before) != NULL) { - if (fl->fl_owner == task) + if (((fl->fl_flags & FL_POSIX) && (fl->fl_owner == task)) || + ((fl->fl_flags & FL_FLOCK) && (fl->fl_file == filp) && + (filp->f_count == 1))) { + file_lock = *fl; locks_delete_lock(before, 0); - else + if (filp->f_op->lock) { + file_lock.fl_type = F_UNLCK; + filp->f_op->lock(filp->f_inode, filp, + F_SETLK, &file_lock); + /* List may have changed: */ + before = &filp->f_inode->i_flock; + } + } else { before = &fl->fl_next; + } } return; } -static void flock_remove_locks(struct file_lock **before, struct file *filp) +struct file_lock * +posix_test_lock(struct file *filp, struct file_lock *fl) { - struct file_lock *fl; + struct file_lock *cfl; - while ((fl = *before) != NULL) { - if ((fl->fl_file == filp) && (filp->f_count == 1)) - locks_delete_lock(before, 0); - else - before = &fl->fl_next; + for (cfl = filp->f_inode->i_flock; cfl; cfl = cfl->fl_next) { + if (!(cfl->fl_flags & FL_POSIX)) + continue; + if (posix_locks_conflict(cfl, fl)) + break; } - return; + return cfl; } int locks_verify_locked(struct inode *inode) @@ -457,13 +490,11 @@ int locks_mandatory_locked(struct inode *inode) /* Search the lock list for this inode for any POSIX locks. */ - if ((fl = inode->i_flock) == NULL || (fl->fl_flags & FL_FLOCK)) - return (0); - - while (fl != NULL) { + for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { + if (!(fl->fl_flags & FL_POSIX)) + continue; if (fl->fl_owner != current) return (-EAGAIN); - fl = fl->fl_next; } return (0); } @@ -475,29 +506,23 @@ int locks_mandatory_area(int read_write, struct inode *inode, struct file_lock *fl; struct file_lock tfl; + memset(&tfl, 0, sizeof(tfl)); + tfl.fl_file = filp; - tfl.fl_nextlink = NULL; - tfl.fl_prevlink = NULL; - tfl.fl_next = NULL; - tfl.fl_nextblock = NULL; - tfl.fl_prevblock = NULL; tfl.fl_flags = FL_POSIX | FL_ACCESS; tfl.fl_owner = current; - tfl.fl_wait = NULL; + tfl.fl_pid = current->pid; tfl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; tfl.fl_start = offset; tfl.fl_end = offset + count - 1; repeat: - /* Check that there are locks, and that they're not FL_FLOCK locks. - */ - if ((fl = inode->i_flock) == NULL || (fl->fl_flags & FL_FLOCK)) - return (0); - /* Search the lock list for this inode for locks that conflict with * the proposed read/write. */ - while (fl != NULL) { + for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { + if (!(fl->fl_flags & FL_POSIX)) + continue; /* Block for writes against a "read" lock, * and both reads and writes against a "write" lock. */ @@ -506,7 +531,7 @@ repeat: return (-EAGAIN); if (current->signal & ~current->blocked) return (-ERESTARTSYS); - if (posix_locks_deadlock(current, fl->fl_owner)) + if (posix_locks_deadlock(&tfl, fl)) return (-EDEADLK); locks_insert_block(fl, &tfl); @@ -522,7 +547,6 @@ repeat: break; goto repeat; } - fl = fl->fl_next; } return (0); } @@ -535,37 +559,31 @@ static int posix_make_lock(struct file *filp, struct file_lock *fl, { off_t start; + memset(fl, 0, sizeof(*fl)); + fl->fl_flags = FL_POSIX; switch (l->l_type) { - case F_RDLCK : - case F_WRLCK : - case F_UNLCK : + case F_RDLCK: + case F_WRLCK: + case F_UNLCK: fl->fl_type = l->l_type; break; - case F_SHLCK : - fl->fl_type = F_RDLCK; - fl->fl_flags |= FL_BROKEN; - break; - case F_EXLCK : - fl->fl_type = F_WRLCK; - fl->fl_flags |= FL_BROKEN; - break; - default : + default: return (0); } switch (l->l_whence) { - case 0 : /*SEEK_SET*/ + case 0: /*SEEK_SET*/ start = 0; break; - case 1 : /*SEEK_CUR*/ + case 1: /*SEEK_CUR*/ start = filp->f_pos; break; - case 2 : /*SEEK_END*/ + case 2: /*SEEK_END*/ start = filp->f_inode->i_size; break; - default : + default: return (0); } @@ -577,7 +595,7 @@ static int posix_make_lock(struct file *filp, struct file_lock *fl, fl->fl_file = filp; fl->fl_owner = current; - fl->fl_wait = NULL; /* just for cleanliness */ + fl->fl_pid = current->pid; return (1); } @@ -588,20 +606,22 @@ static int posix_make_lock(struct file *filp, struct file_lock *fl, static int flock_make_lock(struct file *filp, struct file_lock *fl, unsigned int cmd) { + memset(fl, 0, sizeof(*fl)); + if (!filp->f_inode) /* just in case */ return (0); switch (cmd & ~LOCK_NB) { - case LOCK_SH : + case LOCK_SH: fl->fl_type = F_RDLCK; break; - case LOCK_EX : + case LOCK_EX: fl->fl_type = F_WRLCK; break; - case LOCK_UN : + case LOCK_UN: fl->fl_type = F_UNLCK; break; - default : + default: return (0); } @@ -610,7 +630,6 @@ static int flock_make_lock(struct file *filp, struct file_lock *fl, fl->fl_end = OFFSET_MAX; fl->fl_file = filp; fl->fl_owner = NULL; - fl->fl_wait = NULL; /* just for cleanliness */ return (1); } @@ -623,7 +642,8 @@ static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *s /* POSIX locks owned by the same process do not conflict with * each other. */ - if (caller_fl->fl_owner == sys_fl->fl_owner) + if (!(sys_fl->fl_flags & FL_POSIX) || + locks_same_owner(caller_fl, sys_fl)) return (0); return (locks_conflict(caller_fl, sys_fl)); @@ -637,7 +657,8 @@ static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *s /* FLOCK locks referring to the same filp do not conflict with * each other. */ - if (caller_fl->fl_file == sys_fl->fl_file) + if (!(sys_fl->fl_flags & FL_FLOCK) || + (caller_fl->fl_file == sys_fl->fl_file)) return (0); return (locks_conflict(caller_fl, sys_fl)); @@ -652,10 +673,10 @@ static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) return (0); switch (caller_fl->fl_type) { - case F_RDLCK : + case F_RDLCK: return (sys_fl->fl_type == F_WRLCK); - case F_WRLCK : + case F_WRLCK: return (1); default: @@ -675,25 +696,39 @@ static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) * at a time. When we find blocked_task on a wait queue we can re-search * with blocked_task equal to that queue's owner, until either blocked_task * isn't found, or blocked_task is found on a queue owned by my_task. + * + * Note: the above assumption may not be true when handling lock requests + * from a broken NFS client. But broken NFS clients have a lot more to + * worry about than proper deadlock detection anyway... --okir */ -static int posix_locks_deadlock(struct task_struct *my_task, - struct task_struct *blocked_task) +static int posix_locks_deadlock(struct file_lock *caller_fl, + struct file_lock *block_fl) { struct file_lock *fl; struct file_lock *bfl; + void *caller_owner, *blocked_owner; + unsigned int caller_pid, blocked_pid; + + caller_owner = caller_fl->fl_owner; + caller_pid = caller_fl->fl_pid; + blocked_owner = block_fl->fl_owner; + blocked_pid = block_fl->fl_pid; next_task: - if (my_task == blocked_task) + if (caller_owner == blocked_owner && caller_pid == blocked_pid) return (1); for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) { if (fl->fl_owner == NULL || fl->fl_nextblock == NULL) continue; for (bfl = fl->fl_nextblock; bfl != fl; bfl = bfl->fl_nextblock) { - if (bfl->fl_owner == blocked_task) { - if (fl->fl_owner == my_task) { + if (bfl->fl_owner == blocked_owner && + bfl->fl_pid == blocked_pid) { + if (fl->fl_owner == caller_owner && + fl->fl_pid == caller_pid) { return (1); } - blocked_task = fl->fl_owner; + blocked_owner = fl->fl_owner; + blocked_pid = fl->fl_pid; goto next_task; } } @@ -701,8 +736,9 @@ next_task: return (0); } -/* Try to create a FLOCK lock on filp. We always insert new locks at - * the head of the list. +/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks at + * the head of the list, but that's secret knowledge known only to the next + * two functions. */ static int flock_lock_file(struct file *filp, struct file_lock *caller, unsigned int wait) @@ -713,11 +749,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *caller, int change = 0; before = &filp->f_inode->i_flock; - - if ((fl = *before) && (fl->fl_flags & FL_POSIX)) - return (-EBUSY); - - while ((fl = *before) != NULL) { + while (((fl = *before) != NULL) && (fl->fl_flags & FL_FLOCK)) { if (caller->fl_file == fl->fl_file) { if (caller->fl_type == fl->fl_type) return (0); @@ -736,39 +768,34 @@ static int flock_lock_file(struct file *filp, struct file_lock *caller, if ((new_fl = locks_alloc_lock(caller)) == NULL) return (-ENOLCK); repeat: - if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_POSIX)) { - locks_free_lock(new_fl); - return (-EBUSY); - } - - while (fl != NULL) { - if (flock_locks_conflict(new_fl, fl)) { - if (!wait) { - locks_free_lock(new_fl); - return (-EAGAIN); - } - if (current->signal & ~current->blocked) { - /* Note: new_fl is not in any queue at this - * point, so we must use locks_free_lock() - * instead of locks_delete_lock() - * Dmitry Gorodchanin 09/02/96. - */ - locks_free_lock(new_fl); - return (-ERESTARTSYS); - } - locks_insert_block(fl, new_fl); - interruptible_sleep_on(&new_fl->fl_wait); - locks_delete_block(fl, new_fl); - if (current->signal & ~current->blocked) { - /* Awakened by a signal. Free the new - * lock and return an error. - */ - locks_free_lock(new_fl); - return (-ERESTARTSYS); - } - goto repeat; + for (fl = filp->f_inode->i_flock; (fl != NULL) && (fl->fl_flags & FL_FLOCK); + fl = fl->fl_next) { + if (!flock_locks_conflict(new_fl, fl)) + continue; + if (!wait) { + locks_free_lock(new_fl); + return (-EAGAIN); + } + if (current->signal & ~current->blocked) { + /* Note: new_fl is not in any queue at this + * point, so we must use locks_free_lock() + * instead of locks_delete_lock() + * Dmitry Gorodchanin 09/02/96. + */ + locks_free_lock(new_fl); + return (-ERESTARTSYS); + } + locks_insert_block(fl, new_fl); + interruptible_sleep_on(&new_fl->fl_wait); + locks_delete_block(fl, new_fl); + if (current->signal & ~current->blocked) { + /* Awakened by a signal. Free the new + * lock and return an error. + */ + locks_free_lock(new_fl); + return (-ERESTARTSYS); } - fl = fl->fl_next; + goto repeat; } locks_insert_lock(&filp->f_inode->i_flock, new_fl); return (0); @@ -786,7 +813,7 @@ repeat: * To all purists: Yes, I use a few goto's. Just pass on to the next function. */ -static int posix_lock_file(struct file *filp, struct file_lock *caller, +int posix_lock_file(struct file *filp, struct file_lock *caller, unsigned int wait) { struct file_lock *fl; @@ -796,27 +823,25 @@ static int posix_lock_file(struct file *filp, struct file_lock *caller, struct file_lock **before; int added = 0; -repeat: - if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_FLOCK)) - return (-EBUSY); - if (caller->fl_type != F_UNLCK) { - while (fl != NULL) { - if (posix_locks_conflict(caller, fl)) { - if (!wait) - return (-EAGAIN); - if (current->signal & ~current->blocked) - return (-ERESTARTSYS); - if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner)) - return (-EDEADLK); - locks_insert_block(fl, caller); - interruptible_sleep_on(&caller->fl_wait); - locks_delete_block(fl, caller); - if (current->signal & ~current->blocked) - return (-ERESTARTSYS); - goto repeat; - } - fl = fl->fl_next; + repeat: + for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) { + if (!(fl->fl_flags & FL_POSIX)) + continue; + if (!posix_locks_conflict(caller, fl)) + continue; + if (!wait) + return (-EAGAIN); + if (current->signal & ~current->blocked) + return (-ERESTARTSYS); + if (posix_locks_deadlock(caller, fl)) + return (-EDEADLK); + locks_insert_block(fl, caller); + interruptible_sleep_on(&caller->fl_wait); + locks_delete_block(fl, caller); + if (current->signal & ~current->blocked) + return (-ERESTARTSYS); + goto repeat; } } @@ -827,13 +852,14 @@ repeat: /* First skip locks owned by other processes. */ - while ((fl = *before) && (caller->fl_owner != fl->fl_owner)) { + while ((fl = *before) && (!(fl->fl_flags & FL_POSIX) || + !locks_same_owner(caller, fl))) { before = &fl->fl_next; } /* Process locks with this owner. */ - while ((fl = *before) && (caller->fl_owner == fl->fl_owner)) { + while ((fl = *before) && locks_same_owner(caller, fl)) { /* Detect adjacent or overlapping regions (if same lock type) */ if (caller->fl_type == fl->fl_type) { @@ -901,6 +927,7 @@ repeat: fl->fl_start = caller->fl_start; fl->fl_end = caller->fl_end; fl->fl_type = caller->fl_type; + fl->fl_u = caller->fl_u; caller = fl; added = 1; } @@ -954,18 +981,17 @@ static struct file_lock *locks_alloc_lock(struct file_lock *fl) GFP_ATOMIC)) == NULL) return (tmp); - tmp->fl_nextlink = NULL; - tmp->fl_prevlink = NULL; - tmp->fl_next = NULL; - tmp->fl_nextblock = NULL; - tmp->fl_prevblock = NULL; + memset(tmp, 0, sizeof(*tmp)); + tmp->fl_flags = fl->fl_flags; tmp->fl_owner = fl->fl_owner; + tmp->fl_pid = fl->fl_pid; tmp->fl_file = fl->fl_file; - tmp->fl_wait = NULL; tmp->fl_type = fl->fl_type; tmp->fl_start = fl->fl_start; tmp->fl_end = fl->fl_end; + tmp->fl_notify = fl->fl_notify; + tmp->fl_u = fl->fl_u; return (tmp); } @@ -1027,8 +1053,7 @@ static char *lock_get_status(struct file_lock *fl, char *p, int id, char *pfx) p += sprintf(p, "%d:%s ", id, pfx); if (fl->fl_flags & FL_POSIX) { p += sprintf(p, "%s %s ", - (fl->fl_flags & FL_ACCESS) ? "ACCESS" : - ((fl->fl_flags & FL_BROKEN) ? "BROKEN" : "POSIX "), + (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX", (IS_MANDLOCK(inode) && (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? "MANDATORY" : "ADVISORY "); @@ -1038,7 +1063,7 @@ static char *lock_get_status(struct file_lock *fl, char *p, int id, char *pfx) } p += sprintf(p, "%s ", (fl->fl_type == F_RDLCK) ? "READ " : "WRITE"); p += sprintf(p, "%d %s:%ld %ld %ld ", - fl->fl_owner ? fl->fl_owner->pid : 0, + fl->fl_pid, kdevname(inode->i_dev), inode->i_ino, fl->fl_start, fl->fl_end); p += sprintf(p, "%08lx %08lx %08lx %08lx %08lx\n", @@ -1063,6 +1088,6 @@ int get_locks_status(char *buf) p = lock_get_status(bfl, p, i, " ->"); } while ((bfl = bfl->fl_nextblock) != fl); } - return (p - buf); + return (p - buf); } |