summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-16 01:07:24 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-16 01:07:24 +0000
commit95db6b748fc86297827fbd9c9ef174d491c9ad89 (patch)
tree27a92a942821cde1edda9a1b088718d436b3efe4 /ipc
parent45b27b0a0652331d104c953a5b192d843fff88f8 (diff)
Merge with Linux 2.3.40.
Diffstat (limited to 'ipc')
-rw-r--r--ipc/msg.c167
-rw-r--r--ipc/sem.c160
-rw-r--r--ipc/shm.c245
-rw-r--r--ipc/util.c48
-rw-r--r--ipc/util.h21
5 files changed, 465 insertions, 176 deletions
diff --git a/ipc/msg.c b/ipc/msg.c
index 2cb913925..de2c406b9 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -66,15 +66,15 @@ struct msg_msg {
/* one msq_queue structure for each present queue on the system */
struct msg_queue {
- struct ipc_perm q_perm;
- __kernel_time_t q_stime; /* last msgsnd time */
- __kernel_time_t q_rtime; /* last msgrcv time */
- __kernel_time_t q_ctime; /* last change time */
- unsigned int q_cbytes; /* current number of bytes on queue */
- unsigned int q_qnum; /* number of messages in queue */
- unsigned int q_qbytes; /* max number of bytes on queue */
- __kernel_ipc_pid_t q_lspid; /* pid of last msgsnd */
- __kernel_ipc_pid_t q_lrpid; /* last receive pid */
+ struct kern_ipc_perm q_perm;
+ time_t q_stime; /* last msgsnd time */
+ time_t q_rtime; /* last msgrcv time */
+ time_t q_ctime; /* last change time */
+ unsigned long q_cbytes; /* current number of bytes on queue */
+ unsigned long q_qnum; /* number of messages in queue */
+ unsigned long q_qbytes; /* max number of bytes on queue */
+ pid_t q_lspid; /* pid of last msgsnd */
+ pid_t q_lrpid; /* last receive pid */
struct list_head q_messages;
struct list_head q_receivers;
@@ -329,16 +329,109 @@ asmlinkage long sys_msgget (key_t key, int msgflg)
return ret;
}
+static inline unsigned long copy_msqid_to_user(void *buf, struct msqid64_ds *in, int version)
+{
+ switch(version) {
+ case IPC_64:
+ return copy_to_user (buf, in, sizeof(*in));
+ case IPC_OLD:
+ {
+ struct msqid_ds out;
+
+ memset(&out,0,sizeof(out));
+
+ ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
+
+ out.msg_stime = in->msg_stime;
+ out.msg_rtime = in->msg_rtime;
+ out.msg_ctime = in->msg_ctime;
+
+ if(in->msg_cbytes > USHRT_MAX)
+ out.msg_cbytes = USHRT_MAX;
+ else
+ out.msg_cbytes = in->msg_cbytes;
+ out.msg_lcbytes = in->msg_cbytes;
+
+ if(in->msg_qnum > USHRT_MAX)
+ out.msg_qnum = USHRT_MAX;
+ else
+ out.msg_qnum = in->msg_qnum;
+
+ if(in->msg_qbytes > USHRT_MAX)
+ out.msg_qbytes = USHRT_MAX;
+ else
+ out.msg_qbytes = in->msg_qbytes;
+ out.msg_lqbytes = in->msg_qbytes;
+
+ out.msg_lspid = in->msg_lspid;
+ out.msg_lrpid = in->msg_lrpid;
+
+ return copy_to_user (buf, &out, sizeof(out));
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+struct msq_setbuf {
+ unsigned long qbytes;
+ uid_t uid;
+ gid_t gid;
+ mode_t mode;
+};
+
+static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void *buf, int version)
+{
+ switch(version) {
+ case IPC_64:
+ {
+ struct msqid64_ds tbuf;
+
+ if (copy_from_user (&tbuf, buf, sizeof (tbuf)))
+ return -EFAULT;
+
+ out->qbytes = tbuf.msg_qbytes;
+ out->uid = tbuf.msg_perm.uid;
+ out->gid = tbuf.msg_perm.gid;
+ out->mode = tbuf.msg_perm.mode;
+
+ return 0;
+ }
+ case IPC_OLD:
+ {
+ struct msqid_ds tbuf_old;
+
+ if (copy_from_user (&tbuf_old, buf, sizeof (tbuf_old)))
+ return -EFAULT;
+
+ out->uid = tbuf_old.msg_perm.uid;
+ out->gid = tbuf_old.msg_perm.gid;
+ out->mode = tbuf_old.msg_perm.mode;
+
+ if(tbuf_old.msg_qbytes == 0)
+ out->qbytes = tbuf_old.msg_lqbytes;
+ else
+ out->qbytes = tbuf_old.msg_qbytes;
+
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
{
- int err;
+ int err, version;
struct msg_queue *msq;
- struct msqid_ds tbuf;
- struct ipc_perm *ipcp;
+ struct msq_setbuf setbuf;
+ struct kern_ipc_perm *ipcp;
if (msqid < 0 || cmd < 0)
return -EINVAL;
+ version = ipc_parse_version(&cmd);
+
switch (cmd) {
case IPC_INFO:
case MSG_INFO:
@@ -376,12 +469,15 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
case MSG_STAT:
case IPC_STAT:
{
+ struct msqid64_ds tbuf;
int success_return;
if (!buf)
return -EFAULT;
if(cmd == MSG_STAT && msqid > msg_ids.size)
return -EINVAL;
+ memset(&tbuf,0,sizeof(tbuf));
+
msq = msg_lock(msqid);
if (msq == NULL)
return -EINVAL;
@@ -398,40 +494,24 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
if (ipcperms (&msq->q_perm, S_IRUGO))
goto out_unlock;
- memset(&tbuf,0,sizeof(tbuf));
- tbuf.msg_perm = msq->q_perm;
- /* tbuf.msg_{first,last}: not reported.*/
+ kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
tbuf.msg_stime = msq->q_stime;
tbuf.msg_rtime = msq->q_rtime;
tbuf.msg_ctime = msq->q_ctime;
- if(msq->q_cbytes > USHRT_MAX)
- tbuf.msg_cbytes = USHRT_MAX;
- else
- tbuf.msg_cbytes = msq->q_cbytes;
- tbuf.msg_lcbytes = msq->q_cbytes;
-
- if(msq->q_qnum > USHRT_MAX)
- tbuf.msg_qnum = USHRT_MAX;
- else
- tbuf.msg_qnum = msq->q_qnum;
-
- if(msq->q_qbytes > USHRT_MAX)
- tbuf.msg_qbytes = USHRT_MAX;
- else
- tbuf.msg_qbytes = msq->q_qbytes;
- tbuf.msg_lqbytes = msq->q_qbytes;
-
+ tbuf.msg_cbytes = msq->q_cbytes;
+ tbuf.msg_qnum = msq->q_qnum;
+ tbuf.msg_qbytes = msq->q_qbytes;
tbuf.msg_lspid = msq->q_lspid;
tbuf.msg_lrpid = msq->q_lrpid;
msg_unlock(msqid);
- if (copy_to_user (buf, &tbuf, sizeof(*buf)))
+ if (copy_msqid_to_user(buf, &tbuf, version))
return -EFAULT;
return success_return;
}
case IPC_SET:
if (!buf)
return -EFAULT;
- if (copy_from_user (&tbuf, buf, sizeof (*buf)))
+ if (copy_msqid_from_user (&setbuf, buf, version))
return -EFAULT;
break;
case IPC_RMID:
@@ -459,19 +539,14 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
switch (cmd) {
case IPC_SET:
{
- int newqbytes;
- if(tbuf.msg_qbytes == 0)
- newqbytes = tbuf.msg_lqbytes;
- else
- newqbytes = tbuf.msg_qbytes;
- if (newqbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
+ if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
goto out_unlock_up;
- msq->q_qbytes = newqbytes;
+ msq->q_qbytes = setbuf.qbytes;
- ipcp->uid = tbuf.msg_perm.uid;
- ipcp->gid = tbuf.msg_perm.gid;
+ ipcp->uid = setbuf.uid;
+ ipcp->gid = setbuf.gid;
ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
- (S_IRWXUGO & tbuf.msg_perm.mode);
+ (S_IRWXUGO & setbuf.mode);
msq->q_ctime = CURRENT_TIME;
/* sleeping receivers might be excluded by
* stricter permissions.
@@ -776,13 +851,13 @@ static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int l
int i, len = 0;
down(&msg_ids.sem);
- len += sprintf(buffer, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n");
+ len += sprintf(buffer, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n");
for(i = 0; i <= msg_ids.max_id; i++) {
struct msg_queue * msq;
msq = msg_lock(i);
if(msq != NULL) {
- len += sprintf(buffer + len, "%10d %10d %4o %5u %5u %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
+ len += sprintf(buffer + len, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
msq->q_perm.key,
msg_buildid(i,msq->q_perm.seq),
msq->q_perm.mode,
diff --git a/ipc/sem.c b/ipc/sem.c
index 103761a2b..1dfdc92c0 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -64,9 +64,9 @@
#include "util.h"
-#define sem_lock(id) ((struct semid_ds*)ipc_lock(&sem_ids,id))
+#define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id))
#define sem_unlock(id) ipc_unlock(&sem_ids,id)
-#define sem_rmid(id) ((struct semid_ds*)ipc_rmid(&sem_ids,id))
+#define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id))
#define sem_checkid(sma, semid) \
ipc_checkid(&sem_ids,&sma->sem_perm,semid)
#define sem_buildid(id, seq) \
@@ -85,8 +85,8 @@ static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int l
/*
* linked list protection:
* sem_undo.id_next,
- * semid_ds.sem_pending{,last},
- * semid_ds.sem_undo: sem_lock() for read/write
+ * sem_array.sem_pending{,last},
+ * sem_array.sem_undo: sem_lock() for read/write
* sem_undo.proc_next: only "current" is allowed to read/write that field.
*
*/
@@ -112,7 +112,7 @@ void __init sem_init (void)
static int newary (key_t key, int nsems, int semflg)
{
int id;
- struct semid_ds *sma;
+ struct sem_array *sma;
int size;
if (!nsems)
@@ -121,7 +121,7 @@ static int newary (key_t key, int nsems, int semflg)
return -ENOSPC;
size = sizeof (*sma) + nsems * sizeof (struct sem);
- sma = (struct semid_ds *) ipc_alloc(size);
+ sma = (struct sem_array *) ipc_alloc(size);
if (!sma) {
return -ENOMEM;
}
@@ -150,7 +150,7 @@ static int newary (key_t key, int nsems, int semflg)
asmlinkage long sys_semget (key_t key, int nsems, int semflg)
{
int id, err = -EINVAL;
- struct semid_ds *sma;
+ struct sem_array *sma;
if (nsems < 0 || nsems > sc_semmsl)
return -EINVAL;
@@ -183,9 +183,9 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg)
}
/* doesn't acquire the sem_lock on error! */
-static int sem_revalidate(int semid, struct semid_ds* sma, int nsems, short flg)
+static int sem_revalidate(int semid, struct sem_array* sma, int nsems, short flg)
{
- struct semid_ds* smanew;
+ struct sem_array* smanew;
smanew = sem_lock(semid);
if(smanew==NULL)
@@ -204,14 +204,14 @@ static int sem_revalidate(int semid, struct semid_ds* sma, int nsems, short flg)
/* Manage the doubly linked list sma->sem_pending as a FIFO:
* insert new queue elements at the tail sma->sem_pending_last.
*/
-static inline void append_to_queue (struct semid_ds * sma,
+static inline void append_to_queue (struct sem_array * sma,
struct sem_queue * q)
{
*(q->prev = sma->sem_pending_last) = q;
*(sma->sem_pending_last = &q->next) = NULL;
}
-static inline void prepend_to_queue (struct semid_ds * sma,
+static inline void prepend_to_queue (struct sem_array * sma,
struct sem_queue * q)
{
q->next = sma->sem_pending;
@@ -222,7 +222,7 @@ static inline void prepend_to_queue (struct semid_ds * sma,
sma->sem_pending_last = &q->next;
}
-static inline void remove_from_queue (struct semid_ds * sma,
+static inline void remove_from_queue (struct sem_array * sma,
struct sem_queue * q)
{
*(q->prev) = q->next;
@@ -238,7 +238,7 @@ static inline void remove_from_queue (struct semid_ds * sma,
* all at once. Return 0 if yes, 1 if need to sleep, else return error code.
*/
-static int try_atomic_semop (struct semid_ds * sma, struct sembuf * sops,
+static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
int nsops, struct sem_undo *un, int pid,
int do_undo)
{
@@ -301,7 +301,7 @@ undo:
/* Go through the pending queue for the indicated semaphore
* looking for tasks that can be completed.
*/
-static void update_queue (struct semid_ds * sma)
+static void update_queue (struct sem_array * sma)
{
int error;
struct sem_queue * q;
@@ -338,7 +338,7 @@ static void update_queue (struct semid_ds * sma)
* The counts we return here are a rough approximation, but still
* warrant that semncnt+semzcnt>0 if the task is on the pending queue.
*/
-static int count_semncnt (struct semid_ds * sma, ushort semnum)
+static int count_semncnt (struct sem_array * sma, ushort semnum)
{
int semncnt;
struct sem_queue * q;
@@ -356,7 +356,7 @@ static int count_semncnt (struct semid_ds * sma, ushort semnum)
}
return semncnt;
}
-static int count_semzcnt (struct semid_ds * sma, ushort semnum)
+static int count_semzcnt (struct sem_array * sma, ushort semnum)
{
int semzcnt;
struct sem_queue * q;
@@ -378,7 +378,7 @@ static int count_semzcnt (struct semid_ds * sma, ushort semnum)
/* Free a semaphore set. */
static void freeary (int id)
{
- struct semid_ds *sma;
+ struct sem_array *sma;
struct sem_undo *un;
struct sem_queue *q;
int size;
@@ -405,7 +405,29 @@ static void freeary (int id)
ipc_free(sma, size);
}
-int semctl_nolock(int semid, int semnum, int cmd, union semun arg)
+static unsigned long copy_semid_to_user(void *buf, struct semid64_ds *in, int version)
+{
+ switch(version) {
+ case IPC_64:
+ return copy_to_user(buf, in, sizeof(*in));
+ case IPC_OLD:
+ {
+ struct semid_ds out;
+
+ ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
+
+ out.sem_otime = in->sem_otime;
+ out.sem_ctime = in->sem_ctime;
+ out.sem_nsems = in->sem_nsems;
+
+ return copy_to_user(buf, &out, sizeof(out));
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+int semctl_nolock(int semid, int semnum, int cmd, int version, union semun arg)
{
int err = -EINVAL;
@@ -441,13 +463,15 @@ int semctl_nolock(int semid, int semnum, int cmd, union semun arg)
}
case SEM_STAT:
{
- struct semid_ds *sma;
- struct semid_ds tbuf;
+ struct sem_array *sma;
+ struct semid64_ds tbuf;
int id;
if(semid > sem_ids.size)
return -EINVAL;
+ memset(&tbuf,0,sizeof(tbuf));
+
sma = sem_lock(semid);
if(sma == NULL)
return -EINVAL;
@@ -457,13 +481,12 @@ int semctl_nolock(int semid, int semnum, int cmd, union semun arg)
goto out_unlock;
id = sem_buildid(semid, sma->sem_perm.seq);
- memset(&tbuf,0,sizeof(tbuf));
- tbuf.sem_perm = sma->sem_perm;
+ kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
tbuf.sem_otime = sma->sem_otime;
tbuf.sem_ctime = sma->sem_ctime;
tbuf.sem_nsems = sma->sem_nsems;
sem_unlock(semid);
- if (copy_to_user (arg.buf, &tbuf, sizeof(tbuf)))
+ if (copy_semid_to_user (arg.buf, &tbuf, version))
return -EFAULT;
return id;
}
@@ -476,9 +499,9 @@ out_unlock:
return err;
}
-int semctl_main(int semid, int semnum, int cmd, union semun arg)
+int semctl_main(int semid, int semnum, int cmd, int version, union semun arg)
{
- struct semid_ds *sma;
+ struct sem_array *sma;
struct sem* curr;
int err;
ushort fast_sem_io[SEMMSL_FAST];
@@ -564,14 +587,14 @@ int semctl_main(int semid, int semnum, int cmd, union semun arg)
}
case IPC_STAT:
{
- struct semid_ds tbuf;
+ struct semid64_ds tbuf;
memset(&tbuf,0,sizeof(tbuf));
- tbuf.sem_perm = sma->sem_perm;
+ kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
tbuf.sem_otime = sma->sem_otime;
tbuf.sem_ctime = sma->sem_ctime;
tbuf.sem_nsems = sma->sem_nsems;
sem_unlock(semid);
- if (copy_to_user (arg.buf, &tbuf, sizeof(tbuf)))
+ if (copy_semid_to_user (arg.buf, &tbuf, version))
return -EFAULT;
return 0;
}
@@ -622,15 +645,55 @@ out_free:
return err;
}
-int semctl_down(int semid, int semnum, int cmd, union semun arg)
+struct sem_setbuf {
+ uid_t uid;
+ gid_t gid;
+ mode_t mode;
+};
+
+static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void *buf, int version)
+{
+ switch(version) {
+ case IPC_64:
+ {
+ struct semid64_ds tbuf;
+
+ if(copy_from_user(&tbuf, buf, sizeof(tbuf)))
+ return -EFAULT;
+
+ out->uid = tbuf.sem_perm.uid;
+ out->gid = tbuf.sem_perm.gid;
+ out->mode = tbuf.sem_perm.mode;
+
+ return 0;
+ }
+ case IPC_OLD:
+ {
+ struct semid_ds tbuf_old;
+
+ if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
+ return -EFAULT;
+
+ out->uid = tbuf_old.sem_perm.uid;
+ out->gid = tbuf_old.sem_perm.gid;
+ out->mode = tbuf_old.sem_perm.mode;
+
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+int semctl_down(int semid, int semnum, int cmd, int version, union semun arg)
{
- struct semid_ds *sma;
+ struct sem_array *sma;
int err;
- struct semid_ds tbuf;
- struct ipc_perm *ipcp;
+ struct sem_setbuf setbuf;
+ struct kern_ipc_perm *ipcp;
if(cmd == IPC_SET) {
- if(copy_from_user (&tbuf, arg.buf, sizeof (tbuf)))
+ if(copy_semid_from_user (&setbuf, arg.buf, version))
return -EFAULT;
}
sma = sem_lock(semid);
@@ -655,10 +718,10 @@ int semctl_down(int semid, int semnum, int cmd, union semun arg)
err = 0;
break;
case IPC_SET:
- ipcp->uid = tbuf.sem_perm.uid;
- ipcp->gid = tbuf.sem_perm.gid;
+ ipcp->uid = setbuf.uid;
+ ipcp->gid = setbuf.gid;
ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
- | (tbuf.sem_perm.mode & S_IRWXUGO);
+ | (setbuf.mode & S_IRWXUGO);
sma->sem_ctime = CURRENT_TIME;
sem_unlock(semid);
err = 0;
@@ -678,15 +741,18 @@ out_unlock:
asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
{
int err = -EINVAL;
+ int version;
if (semid < 0)
return -EINVAL;
+ version = ipc_parse_version(&cmd);
+
switch(cmd) {
case IPC_INFO:
case SEM_INFO:
case SEM_STAT:
- err = semctl_nolock(semid,semnum,cmd,arg);
+ err = semctl_nolock(semid,semnum,cmd,version,arg);
return err;
case GETALL:
case GETVAL:
@@ -696,12 +762,12 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
case IPC_STAT:
case SETVAL:
case SETALL:
- err = semctl_main(semid,semnum,cmd,arg);
+ err = semctl_main(semid,semnum,cmd,version,arg);
return err;
case IPC_RMID:
case IPC_SET:
down(&sem_ids.sem);
- err = semctl_down(semid,semnum,cmd,arg);
+ err = semctl_down(semid,semnum,cmd,version,arg);
up(&sem_ids.sem);
return err;
default:
@@ -709,7 +775,7 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
}
}
-static struct sem_undo* freeundos(struct semid_ds *sma, struct sem_undo* un)
+static struct sem_undo* freeundos(struct sem_array *sma, struct sem_undo* un)
{
struct sem_undo* u;
struct sem_undo** up;
@@ -727,7 +793,7 @@ static struct sem_undo* freeundos(struct semid_ds *sma, struct sem_undo* un)
}
/* returns without sem_lock on error! */
-static int alloc_undo(struct semid_ds *sma, struct sem_undo** unp, int semid, int alter)
+static int alloc_undo(struct sem_array *sma, struct sem_undo** unp, int semid, int alter)
{
int size, nsems, error;
struct sem_undo *un;
@@ -760,7 +826,7 @@ static int alloc_undo(struct semid_ds *sma, struct sem_undo** unp, int semid, in
asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
{
int error = -EINVAL;
- struct semid_ds *sma;
+ struct sem_array *sma;
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
@@ -846,7 +912,7 @@ asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
current->semsleeping = &queue;
for (;;) {
- struct semid_ds* tmp;
+ struct sem_array* tmp;
queue.status = -EINTR;
queue.sleeper = current;
current->state = TASK_INTERRUPTIBLE;
@@ -913,7 +979,7 @@ void sem_exit (void)
{
struct sem_queue *q;
struct sem_undo *u, *un = NULL, **up, **unp;
- struct semid_ds *sma;
+ struct sem_array *sma;
int nsems, i;
/* If the current process was sleeping for a semaphore,
@@ -981,14 +1047,14 @@ static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int l
off_t begin = 0;
int i, len = 0;
- len += sprintf(buffer, " key semid perms nsems uid gid cuid cgid otime ctime\n");
+ len += sprintf(buffer, " key semid perms nsems uid gid cuid cgid otime ctime\n");
down(&sem_ids.sem);
for(i = 0; i <= sem_ids.max_id; i++) {
- struct semid_ds *sma;
+ struct sem_array *sma;
sma = sem_lock(i);
if(sma) {
- len += sprintf(buffer + len, "%10d %10d %4o %5u %5u %5u %5u %5u %10lu %10lu\n",
+ len += sprintf(buffer + len, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
sma->sem_perm.key,
sem_buildid(i,sma->sem_perm.seq),
sma->sem_perm.mode,
diff --git a/ipc/shm.c b/ipc/shm.c
index 4d70545ab..8b4976195 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -29,9 +29,16 @@
#include "util.h"
-struct shmid_kernel /* extend struct shmis_ds with private fields */
+struct shmid_kernel /* private to the kernel */
{
- struct shmid_ds u;
+ struct kern_ipc_perm shm_perm;
+ size_t shm_segsz;
+ time_t shm_atime;
+ time_t shm_dtime;
+ time_t shm_ctime;
+ pid_t shm_cpid;
+ pid_t shm_lpid;
+ unsigned long shm_nattch;
unsigned long shm_npages; /* size of segment (pages) */
pte_t **shm_dir; /* ptr to array of ptrs to frames -> SHMMAX */
struct vm_area_struct *attaches; /* descriptors for attaches */
@@ -48,7 +55,7 @@ static struct ipc_ids shm_ids;
#define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
#define shm_rmid(id) ((struct shmid_kernel*)ipc_rmid(&shm_ids,id))
#define shm_checkid(s, id) \
- ipc_checkid(&shm_ids,&s->u.shm_perm,id)
+ ipc_checkid(&shm_ids,&s->shm_perm,id)
#define shm_buildid(id, seq) \
ipc_buildid(&shm_ids, id, seq)
@@ -89,7 +96,6 @@ static int shm_swp = 0; /* number of shared memory pages that are in swap */
/* some statistics */
static ulong swap_attempts = 0;
static ulong swap_successes = 0;
-static ulong used_segs = 0;
void __init shm_init (void)
{
@@ -166,7 +172,7 @@ static int shm_revalidate(struct shmid_kernel* shp, int shmid, int pagecount, in
shm_unlock(shmid);
return -EIDRM;
}
- if (ipcperms(&shp->u.shm_perm, flg)) {
+ if (ipcperms(&shp->shm_perm, flg)) {
shm_unlock(shmid);
return -EACCES;
}
@@ -196,28 +202,28 @@ static int newseg (key_t key, int shmflg, size_t size)
kfree(shp);
return -ENOMEM;
}
- id = ipc_addid(&shm_ids, &shp->u.shm_perm, shm_ctlmni);
+ id = ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni);
if(id == -1) {
shm_free(shp->shm_dir,numpages);
kfree(shp);
return -ENOSPC;
}
- shp->u.shm_perm.key = key;
- shp->u.shm_perm.mode = (shmflg & S_IRWXUGO);
- shp->u.shm_segsz = size;
- shp->u.shm_cpid = current->pid;
+ shp->shm_perm.key = key;
+ shp->shm_perm.mode = (shmflg & S_IRWXUGO);
+ shp->shm_segsz = size;
+ shp->shm_cpid = current->pid;
shp->attaches = NULL;
- shp->u.shm_lpid = shp->u.shm_nattch = 0;
- shp->u.shm_atime = shp->u.shm_dtime = 0;
- shp->u.shm_ctime = CURRENT_TIME;
+ shp->shm_lpid = shp->shm_nattch = 0;
+ shp->shm_atime = shp->shm_dtime = 0;
+ shp->shm_ctime = CURRENT_TIME;
shp->shm_npages = numpages;
- shp->id = shm_buildid(id,shp->u.shm_perm.seq);
+ shp->id = shm_buildid(id,shp->shm_perm.seq);
init_MUTEX(&shp->sem);
shm_tot += numpages;
shm_unlock(id);
- return shm_buildid(id,shp->u.shm_perm.seq);
+ return shm_buildid(id,shp->shm_perm.seq);
}
asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
@@ -239,10 +245,10 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
shp = shm_lock(id);
if(shp==NULL)
BUG();
- if (ipcperms(&shp->u.shm_perm, shmflg))
+ if (ipcperms(&shp->shm_perm, shmflg))
err = -EACCES;
else
- err = shm_buildid(id, shp->u.shm_perm.seq);
+ err = shm_buildid(id, shp->shm_perm.seq);
shm_unlock(id);
}
up(&shm_ids.sem);
@@ -266,8 +272,8 @@ out_up:
up(&shm_ids.sem);
return;
}
- if(shm_checkid(shp,shmid) || shp->u.shm_nattch > 0 ||
- !(shp->u.shm_perm.mode & SHM_DEST)) {
+ if(shm_checkid(shp,shmid) || shp->shm_nattch > 0 ||
+ !(shp->shm_perm.mode & SHM_DEST)) {
shm_unlock(shmid);
goto out_up;
}
@@ -303,19 +309,112 @@ out_up:
return;
}
+static inline unsigned long copy_shmid_to_user(void *buf, struct shmid64_ds *in, int version)
+{
+ switch(version) {
+ case IPC_64:
+ return copy_to_user(buf, in, sizeof(*in));
+ case IPC_OLD:
+ {
+ struct shmid_ds out;
+
+ ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
+ out.shm_segsz = in->shm_segsz;
+ out.shm_atime = in->shm_atime;
+ out.shm_dtime = in->shm_dtime;
+ out.shm_ctime = in->shm_ctime;
+ out.shm_cpid = in->shm_cpid;
+ out.shm_lpid = in->shm_lpid;
+ out.shm_nattch = in->shm_nattch;
+
+ return copy_to_user(buf, &out, sizeof(out));
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+struct shm_setbuf {
+ uid_t uid;
+ gid_t gid;
+ mode_t mode;
+};
+
+static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void *buf, int version)
+{
+ switch(version) {
+ case IPC_64:
+ {
+ struct shmid64_ds tbuf;
+
+ if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
+ return -EFAULT;
+
+ out->uid = tbuf.shm_perm.uid;
+ out->gid = tbuf.shm_perm.gid;
+ out->mode = tbuf.shm_perm.mode;
+
+ return 0;
+ }
+ case IPC_OLD:
+ {
+ struct shmid_ds tbuf_old;
+
+ if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
+ return -EFAULT;
+
+ out->uid = tbuf_old.shm_perm.uid;
+ out->gid = tbuf_old.shm_perm.gid;
+ out->mode = tbuf_old.shm_perm.mode;
+
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline unsigned long copy_shminfo_to_user(void *buf, struct shminfo64 *in, int version)
+{
+ switch(version) {
+ case IPC_64:
+ return copy_to_user(buf, in, sizeof(*in));
+ case IPC_OLD:
+ {
+ struct shminfo out;
+
+ if(in->shmmax > INT_MAX)
+ out.shmmax = INT_MAX;
+ else
+ out.shmmax = (int)in->shmmax;
+
+ out.shmmin = in->shmmin;
+ out.shmmni = in->shmmni;
+ out.shmseg = in->shmseg;
+ out.shmall = in->shmall;
+
+ return copy_to_user(buf, &out, sizeof(out));
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
{
- struct shmid_ds tbuf;
+ struct shm_setbuf setbuf;
struct shmid_kernel *shp;
- int err;
+ int err, version;
if (cmd < 0 || shmid < 0)
return -EINVAL;
+ version = ipc_parse_version(&cmd);
+
switch (cmd) { /* replace with proc interface ? */
case IPC_INFO:
{
- struct shminfo shminfo;
+ struct shminfo64 shminfo;
memset(&shminfo,0,sizeof(shminfo));
shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
@@ -323,7 +422,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
shminfo.shmall = shm_ctlall;
shminfo.shmmin = SHMMIN;
- if(copy_to_user (buf, &shminfo, sizeof(struct shminfo)))
+ if(copy_shminfo_to_user (buf, &shminfo, version))
return -EFAULT;
/* reading a integer is always atomic */
err= shm_ids.max_id;
@@ -337,7 +436,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
memset(&shm_info,0,sizeof(shm_info));
shm_lockall();
- shm_info.used_ids = used_segs;
+ shm_info.used_ids = shm_ids.in_use;
shm_info.shm_rss = shm_rss;
shm_info.shm_tot = shm_tot;
shm_info.shm_swp = shm_swp;
@@ -353,8 +452,9 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
case SHM_STAT:
case IPC_STAT:
{
- struct shmid_ds tmp;
+ struct shmid64_ds tbuf;
int result;
+ memset(&tbuf, 0, sizeof(tbuf));
shp = shm_lock(shmid);
if(shp==NULL)
return -EINVAL;
@@ -362,7 +462,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
err = -EINVAL;
if (shmid > shm_ids.max_id)
goto out_unlock;
- result = shm_buildid(shmid, shp->u.shm_perm.seq);
+ result = shm_buildid(shmid, shp->shm_perm.seq);
} else {
err = -EIDRM;
if(shm_checkid(shp,shmid))
@@ -370,11 +470,18 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
result = 0;
}
err=-EACCES;
- if (ipcperms (&shp->u.shm_perm, S_IRUGO))
+ if (ipcperms (&shp->shm_perm, S_IRUGO))
goto out_unlock;
- memcpy(&tmp,&shp->u,sizeof(tmp));
+ kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
+ tbuf.shm_segsz = shp->shm_segsz;
+ tbuf.shm_atime = shp->shm_atime;
+ tbuf.shm_dtime = shp->shm_dtime;
+ tbuf.shm_ctime = shp->shm_ctime;
+ tbuf.shm_cpid = shp->shm_cpid;
+ tbuf.shm_lpid = shp->shm_lpid;
+ tbuf.shm_nattch = shp->shm_nattch;
shm_unlock(shmid);
- if(copy_to_user (buf, &tmp, sizeof(tmp)))
+ if(copy_shmid_to_user (buf, &tbuf, version))
return -EFAULT;
return result;
}
@@ -384,7 +491,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
/* Allow superuser to lock segment in memory */
/* Should the pages be faulted in here or leave it to user? */
/* need to determine interaction with current->swappable */
- struct ipc_perm *ipcp;
+ struct kern_ipc_perm *ipcp;
if (!capable(CAP_IPC_LOCK))
return -EPERM;
@@ -394,7 +501,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
err=-EIDRM;
if(shm_checkid(shp,shmid))
goto out_unlock;
- ipcp = &shp->u.shm_perm;
+ ipcp = &shp->shm_perm;
if(cmd==SHM_LOCK) {
if (!(ipcp->mode & SHM_LOCKED)) {
ipcp->mode |= SHM_LOCKED;
@@ -417,7 +524,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
}
if (cmd == IPC_SET) {
- if(copy_from_user (&tbuf, buf, sizeof (*buf)))
+ if(copy_shmid_from_user (&setbuf, buf, version))
return -EFAULT;
}
down(&shm_ids.sem);
@@ -429,23 +536,23 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
if(shm_checkid(shp,shmid))
goto out_unlock_up;
err=-EPERM;
- if (current->euid != shp->u.shm_perm.uid &&
- current->euid != shp->u.shm_perm.cuid &&
+ if (current->euid != shp->shm_perm.uid &&
+ current->euid != shp->shm_perm.cuid &&
!capable(CAP_SYS_ADMIN)) {
goto out_unlock_up;
}
switch (cmd) {
case IPC_SET:
- shp->u.shm_perm.uid = tbuf.shm_perm.uid;
- shp->u.shm_perm.gid = tbuf.shm_perm.gid;
- shp->u.shm_perm.mode = (shp->u.shm_perm.mode & ~S_IRWXUGO)
- | (tbuf.shm_perm.mode & S_IRWXUGO);
- shp->u.shm_ctime = CURRENT_TIME;
+ shp->shm_perm.uid = setbuf.uid;
+ shp->shm_perm.gid = setbuf.gid;
+ shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
+ | (setbuf.mode & S_IRWXUGO);
+ shp->shm_ctime = CURRENT_TIME;
break;
case IPC_RMID:
- shp->u.shm_perm.mode |= SHM_DEST;
- if (shp->u.shm_nattch <= 0) {
+ shp->shm_perm.mode |= SHM_DEST;
+ if (shp->shm_nattch <= 0) {
shm_unlock(shmid);
up(&shm_ids.sem);
killseg (shmid);
@@ -553,7 +660,7 @@ asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
goto out_up;
err = -EACCES;
- if (ipcperms(&shp->u.shm_perm, flg))
+ if (ipcperms(&shp->shm_perm, flg))
goto out_unlock_up;
err = -EIDRM;
@@ -566,7 +673,7 @@ asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
err = -ENOMEM;
addr = 0;
again:
- if (!(addr = get_unmapped_area(addr, (unsigned long)shp->u.shm_segsz)))
+ if (!(addr = get_unmapped_area(addr, (unsigned long)shp->shm_segsz)))
goto out_unlock_up;
if(addr & (SHMLBA - 1)) {
addr = (addr + (SHMLBA - 1)) & ~(SHMLBA - 1);
@@ -593,7 +700,7 @@ asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
if (addr < current->mm->start_stack &&
addr > current->mm->start_stack - PAGE_SIZE*(shp->shm_npages + 4))
goto out_unlock_up;
- if (!(shmflg & SHM_REMAP) && find_vma_intersection(current->mm, addr, addr + (unsigned long)shp->u.shm_segsz))
+ if (!(shmflg & SHM_REMAP) && find_vma_intersection(current->mm, addr, addr + (unsigned long)shp->shm_segsz))
goto out_unlock_up;
shm_unlock(shmid);
@@ -617,7 +724,7 @@ asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
shmd->vm_pgoff = 0;
shmd->vm_ops = &shm_vm_ops;
- shp->u.shm_nattch++; /* prevent destruction */
+ shp->shm_nattch++; /* prevent destruction */
shm_unlock(shp->id);
err = shm_map (shmd);
shm_lock(shmid); /* cannot fail */
@@ -626,8 +733,8 @@ asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
insert_attach(shp,shmd); /* insert shmd into shp->attaches */
- shp->u.shm_lpid = current->pid;
- shp->u.shm_atime = CURRENT_TIME;
+ shp->shm_lpid = current->pid;
+ shp->shm_atime = CURRENT_TIME;
*raddr = addr;
err = 0;
@@ -640,7 +747,7 @@ out_up:
failed_shm_map:
{
int delete = 0;
- if (--shp->u.shm_nattch <= 0 && shp->u.shm_perm.mode & SHM_DEST)
+ if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
delete = 1;
shm_unlock(shmid);
up(&current->mm->mmap_sem);
@@ -660,9 +767,9 @@ static void shm_open (struct vm_area_struct *shmd)
if(shp != shm_lock(shp->id))
BUG();
insert_attach(shp,shmd); /* insert shmd into shp->attaches */
- shp->u.shm_nattch++;
- shp->u.shm_atime = CURRENT_TIME;
- shp->u.shm_lpid = current->pid;
+ shp->shm_nattch++;
+ shp->shm_atime = CURRENT_TIME;
+ shp->shm_lpid = current->pid;
shm_unlock(shp->id);
}
@@ -682,10 +789,10 @@ static void shm_close (struct vm_area_struct *shmd)
if(shp != shm_lock(shp->id))
BUG();
remove_attach(shp,shmd); /* remove from shp->attaches */
- shp->u.shm_lpid = current->pid;
- shp->u.shm_dtime = CURRENT_TIME;
+ shp->shm_lpid = current->pid;
+ shp->shm_dtime = CURRENT_TIME;
id=-1;
- if (--shp->u.shm_nattch <= 0 && shp->u.shm_perm.mode & SHM_DEST)
+ if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
id=shp->id;
shm_unlock(shp->id);
if(id!=-1)
@@ -827,7 +934,7 @@ int shm_swap (int prio, int gfp_mask, zone_t *zone)
shm_lockall();
check_id:
shp = shm_get(swap_id);
- if(shp==NULL || shp->u.shm_perm.mode & SHM_LOCKED) {
+ if(shp==NULL || shp->shm_perm.mode & SHM_LOCKED) {
next_id:
swap_idx = 0;
if (++swap_id > shm_ids.max_id) {
@@ -952,20 +1059,20 @@ static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int l
else
format = BIG_STRING;
len += sprintf(buffer + len, format,
- shp->u.shm_perm.key,
- shm_buildid(i, shp->u.shm_perm.seq),
- shp->u.shm_perm.mode,
- shp->u.shm_segsz,
- shp->u.shm_cpid,
- shp->u.shm_lpid,
- shp->u.shm_nattch,
- shp->u.shm_perm.uid,
- shp->u.shm_perm.gid,
- shp->u.shm_perm.cuid,
- shp->u.shm_perm.cgid,
- shp->u.shm_atime,
- shp->u.shm_dtime,
- shp->u.shm_ctime);
+ shp->shm_perm.key,
+ shm_buildid(i, shp->shm_perm.seq),
+ shp->shm_perm.mode,
+ shp->shm_segsz,
+ shp->shm_cpid,
+ shp->shm_lpid,
+ shp->shm_nattch,
+ shp->shm_perm.uid,
+ shp->shm_perm.gid,
+ shp->shm_perm.cuid,
+ shp->shm_perm.cgid,
+ shp->shm_atime,
+ shp->shm_dtime,
+ shp->shm_ctime);
shm_unlock(i);
pos += len;
diff --git a/ipc/util.c b/ipc/util.c
index 6008b0d99..b5b5431d8 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -18,6 +18,7 @@
#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include <linux/malloc.h>
+#include <linux/highuid.h>
#if defined(CONFIG_SYSVIPC)
@@ -67,7 +68,7 @@ void __init ipc_init_ids(struct ipc_ids* ids, int size)
int ipc_findkey(struct ipc_ids* ids, key_t key)
{
int id;
- struct ipc_perm* p;
+ struct kern_ipc_perm* p;
for (id = 0; id <= ids->max_id; id++) {
p = ids->entries[id].p;
@@ -108,7 +109,7 @@ static int grow_ary(struct ipc_ids* ids, int newsize)
return ids->size;
}
-int ipc_addid(struct ipc_ids* ids, struct ipc_perm* new, int size)
+int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
{
int id;
@@ -135,9 +136,9 @@ found:
return id;
}
-struct ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
+struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
{
- struct ipc_perm* p;
+ struct kern_ipc_perm* p;
int lid = id % SEQ_MULTIPLIER;
if(lid > ids->size)
BUG();
@@ -186,7 +187,7 @@ void ipc_free(void* ptr, int size)
* Check user, group, other permissions for access
* to ipc resources. return 0 if allowed
*/
-int ipcperms (struct ipc_perm *ipcp, short flag)
+int ipcperms (struct kern_ipc_perm *ipcp, short flag)
{ /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */
int requested_mode, granted_mode;
@@ -204,6 +205,43 @@ int ipcperms (struct ipc_perm *ipcp, short flag)
return 0;
}
+/*
+ * Functions to convert between the kern_ipc_perm structure and the
+ * old/new ipc_perm structures
+ */
+
+void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
+{
+ out->key = in->key;
+ out->uid = in->uid;
+ out->gid = in->gid;
+ out->cuid = in->cuid;
+ out->cgid = in->cgid;
+ out->mode = in->mode;
+ out->seq = in->seq;
+}
+
+void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
+{
+ out->key = in->key;
+ out->uid = NEW_TO_OLD_UID(in->uid);
+ out->gid = NEW_TO_OLD_GID(in->gid);
+ out->cuid = NEW_TO_OLD_UID(in->cuid);
+ out->cgid = NEW_TO_OLD_GID(in->cgid);
+ out->mode = in->mode;
+ out->seq = in->seq;
+}
+
+int ipc_parse_version (int *cmd)
+{
+ if (*cmd & IPC_64) {
+ *cmd ^= IPC_64;
+ return IPC_64;
+ } else {
+ return IPC_OLD;
+ }
+}
+
#else
/*
* Dummy functions when SYSV IPC isn't configured
diff --git a/ipc/util.h b/ipc/util.h
index 2b9ef66d0..ffc6de0d8 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -24,7 +24,7 @@ struct ipc_ids {
};
struct ipc_id {
- struct ipc_perm* p;
+ struct kern_ipc_perm* p;
};
@@ -32,12 +32,12 @@ void __init ipc_init_ids(struct ipc_ids* ids, int size);
/* must be called with ids->sem acquired.*/
int ipc_findkey(struct ipc_ids* ids, key_t key);
-int ipc_addid(struct ipc_ids* ids, struct ipc_perm* new, int size);
+int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size);
/* must be called with both locks acquired. */
-struct ipc_perm* ipc_rmid(struct ipc_ids* ids, int id);
+struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id);
-int ipcperms (struct ipc_perm *ipcp, short flg);
+int ipcperms (struct kern_ipc_perm *ipcp, short flg);
/* for rare, potentially huge allocations.
* both function can sleep
@@ -50,9 +50,9 @@ extern inline void ipc_lockall(struct ipc_ids* ids)
spin_lock(&ids->ary);
}
-extern inline struct ipc_perm* ipc_get(struct ipc_ids* ids, int id)
+extern inline struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
{
- struct ipc_perm* out;
+ struct kern_ipc_perm* out;
int lid = id % SEQ_MULTIPLIER;
if(lid > ids->size)
return NULL;
@@ -65,9 +65,9 @@ extern inline void ipc_unlockall(struct ipc_ids* ids)
{
spin_unlock(&ids->ary);
}
-extern inline struct ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
+extern inline struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
{
- struct ipc_perm* out;
+ struct kern_ipc_perm* out;
int lid = id % SEQ_MULTIPLIER;
if(lid > ids->size)
return NULL;
@@ -89,10 +89,13 @@ extern inline int ipc_buildid(struct ipc_ids* ids, int id, int seq)
return SEQ_MULTIPLIER*seq + id;
}
-extern inline int ipc_checkid(struct ipc_ids* ids, struct ipc_perm* ipcp, int uid)
+extern inline int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid)
{
if(uid/SEQ_MULTIPLIER != ipcp->seq)
return 1;
return 0;
}
+void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
+void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
+int ipc_parse_version (int *cmd);