summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1994-11-28 11:59:19 +0000
committer <ralf@linux-mips.org>1994-11-28 11:59:19 +0000
commit1513ff9b7899ab588401c89db0e99903dbf5f886 (patch)
treef69cc81a940a502ea23d664c3ffb2d215a479667 /ipc
Import of Linus's Linux 1.1.68
Diffstat (limited to 'ipc')
-rw-r--r--ipc/Makefile38
-rw-r--r--ipc/msg.c441
-rw-r--r--ipc/sem.c513
-rw-r--r--ipc/shm.c761
-rw-r--r--ipc/util.c150
5 files changed, 1903 insertions, 0 deletions
diff --git a/ipc/Makefile b/ipc/Makefile
new file mode 100644
index 000000000..a3a18a7ae
--- /dev/null
+++ b/ipc/Makefile
@@ -0,0 +1,38 @@
+#
+# Makefile for the linux ipc.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+.c.o:
+ $(CC) $(CFLAGS) -c $<
+.s.o:
+ $(AS) -o $*.o $<
+.c.s:
+ $(CC) $(CFLAGS) -S $<
+
+OBJS = util.o
+SRCS = util.c
+
+ifdef CONFIG_SYSVIPC
+OBJS := $(OBJS) msg.o sem.o shm.o
+SRCS := $(SRCS) msg.c sem.c shm.c
+endif
+
+ipc.o: $(OBJS)
+ $(LD) -r -o ipc.o $(OBJS)
+
+dep:
+ $(CPP) -M $(SRCS) > .depend
+
+dummy:
+
+#
+# include a dependency file if one exists
+#
+ifeq (.depend,$(wildcard .depend))
+include .depend
+endif
diff --git a/ipc/msg.c b/ipc/msg.c
new file mode 100644
index 000000000..04aa12328
--- /dev/null
+++ b/ipc/msg.c
@@ -0,0 +1,441 @@
+/*
+ * linux/ipc/msg.c
+ * Copyright (C) 1992 Krishna Balasubramanian
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/msg.h>
+#include <linux/stat.h>
+#include <linux/malloc.h>
+
+#include <asm/segment.h>
+
+extern int ipcperms (struct ipc_perm *ipcp, short msgflg);
+
+static void freeque (int id);
+static int newque (key_t key, int msgflg);
+static int findkey (key_t key);
+
+static struct msqid_ds *msgque[MSGMNI];
+static int msgbytes = 0;
+static int msghdrs = 0;
+static unsigned short msg_seq = 0;
+static int used_queues = 0;
+static int max_msqid = 0;
+static struct wait_queue *msg_lock = NULL;
+
+void msg_init (void)
+{
+ int id;
+
+ for (id = 0; id < MSGMNI; id++)
+ msgque[id] = (struct msqid_ds *) IPC_UNUSED;
+ msgbytes = msghdrs = msg_seq = max_msqid = used_queues = 0;
+ msg_lock = NULL;
+ return;
+}
+
+int sys_msgsnd (int msqid, struct msgbuf *msgp, int msgsz, int msgflg)
+{
+ int id, err;
+ struct msqid_ds *msq;
+ struct ipc_perm *ipcp;
+ struct msg *msgh;
+ long mtype;
+
+ if (msgsz > MSGMAX || msgsz < 0 || msqid < 0)
+ return -EINVAL;
+ if (!msgp)
+ return -EFAULT;
+ err = verify_area (VERIFY_READ, msgp->mtext, msgsz);
+ if (err)
+ return err;
+ if ((mtype = get_fs_long (&msgp->mtype)) < 1)
+ return -EINVAL;
+ id = (unsigned int) msqid % MSGMNI;
+ msq = msgque [id];
+ if (msq == IPC_UNUSED || msq == IPC_NOID)
+ return -EINVAL;
+ ipcp = &msq->msg_perm;
+
+ slept:
+ if (msq->msg_perm.seq != (unsigned int) msqid / MSGMNI)
+ return -EIDRM;
+ if (ipcperms(ipcp, S_IWUGO))
+ return -EACCES;
+
+ if (msgsz + msq->msg_cbytes > msq->msg_qbytes) {
+ /* no space in queue */
+ if (msgflg & IPC_NOWAIT)
+ return -EAGAIN;
+ if (current->signal & ~current->blocked)
+ return -EINTR;
+ interruptible_sleep_on (&msq->wwait);
+ goto slept;
+ }
+
+ /* allocate message header and text space*/
+ msgh = (struct msg *) kmalloc (sizeof(*msgh) + msgsz, GFP_USER);
+ if (!msgh)
+ return -ENOMEM;
+ msgh->msg_spot = (char *) (msgh + 1);
+ memcpy_fromfs (msgh->msg_spot, msgp->mtext, msgsz);
+
+ if (msgque[id] == IPC_UNUSED || msgque[id] == IPC_NOID
+ || msq->msg_perm.seq != (unsigned int) msqid / MSGMNI) {
+ kfree(msgh);
+ return -EIDRM;
+ }
+
+ msgh->msg_next = NULL;
+ if (!msq->msg_first)
+ msq->msg_first = msq->msg_last = msgh;
+ else {
+ msq->msg_last->msg_next = msgh;
+ msq->msg_last = msgh;
+ }
+ msgh->msg_ts = msgsz;
+ msgh->msg_type = mtype;
+ msq->msg_cbytes += msgsz;
+ msgbytes += msgsz;
+ msghdrs++;
+ msq->msg_qnum++;
+ msq->msg_lspid = current->pid;
+ msq->msg_stime = CURRENT_TIME;
+ if (msq->rwait)
+ wake_up (&msq->rwait);
+ return msgsz;
+}
+
+int sys_msgrcv (int msqid, struct msgbuf *msgp, int msgsz, long msgtyp,
+ int msgflg)
+{
+ struct msqid_ds *msq;
+ struct ipc_perm *ipcp;
+ struct msg *tmsg, *leastp = NULL;
+ struct msg *nmsg = NULL;
+ int id, err;
+
+ if (msqid < 0 || msgsz < 0)
+ return -EINVAL;
+ if (!msgp || !msgp->mtext)
+ return -EFAULT;
+ err = verify_area (VERIFY_WRITE, msgp->mtext, msgsz);
+ if (err)
+ return err;
+
+ id = (unsigned int) msqid % MSGMNI;
+ msq = msgque [id];
+ if (msq == IPC_NOID || msq == IPC_UNUSED)
+ return -EINVAL;
+ ipcp = &msq->msg_perm;
+
+ /*
+ * find message of correct type.
+ * msgtyp = 0 => get first.
+ * msgtyp > 0 => get first message of matching type.
+ * msgtyp < 0 => get message with least type must be < abs(msgtype).
+ */
+ while (!nmsg) {
+ if (msq->msg_perm.seq != (unsigned int) msqid / MSGMNI)
+ return -EIDRM;
+ if (ipcperms (ipcp, S_IRUGO))
+ return -EACCES;
+ if (msgtyp == 0)
+ nmsg = msq->msg_first;
+ else if (msgtyp > 0) {
+ if (msgflg & MSG_EXCEPT) {
+ for (tmsg = msq->msg_first; tmsg;
+ tmsg = tmsg->msg_next)
+ if (tmsg->msg_type != msgtyp)
+ break;
+ nmsg = tmsg;
+ } else {
+ for (tmsg = msq->msg_first; tmsg;
+ tmsg = tmsg->msg_next)
+ if (tmsg->msg_type == msgtyp)
+ break;
+ nmsg = tmsg;
+ }
+ } else {
+ for (leastp = tmsg = msq->msg_first; tmsg;
+ tmsg = tmsg->msg_next)
+ if (tmsg->msg_type < leastp->msg_type)
+ leastp = tmsg;
+ if (leastp && leastp->msg_type <= - msgtyp)
+ nmsg = leastp;
+ }
+
+ if (nmsg) { /* done finding a message */
+ if ((msgsz < nmsg->msg_ts) && !(msgflg & MSG_NOERROR))
+ return -E2BIG;
+ msgsz = (msgsz > nmsg->msg_ts)? nmsg->msg_ts : msgsz;
+ if (nmsg == msq->msg_first)
+ msq->msg_first = nmsg->msg_next;
+ else {
+ for (tmsg = msq->msg_first; tmsg;
+ tmsg = tmsg->msg_next)
+ if (tmsg->msg_next == nmsg)
+ break;
+ tmsg->msg_next = nmsg->msg_next;
+ if (nmsg == msq->msg_last)
+ msq->msg_last = tmsg;
+ }
+ if (!(--msq->msg_qnum))
+ msq->msg_last = msq->msg_first = NULL;
+
+ msq->msg_rtime = CURRENT_TIME;
+ msq->msg_lrpid = current->pid;
+ msgbytes -= nmsg->msg_ts;
+ msghdrs--;
+ msq->msg_cbytes -= nmsg->msg_ts;
+ if (msq->wwait)
+ wake_up (&msq->wwait);
+ put_fs_long (nmsg->msg_type, &msgp->mtype);
+ memcpy_tofs (msgp->mtext, nmsg->msg_spot, msgsz);
+ kfree(nmsg);
+ return msgsz;
+ } else { /* did not find a message */
+ if (msgflg & IPC_NOWAIT)
+ return -ENOMSG;
+ if (current->signal & ~current->blocked)
+ return -EINTR;
+ interruptible_sleep_on (&msq->rwait);
+ }
+ } /* end while */
+ return -1;
+}
+
+
+static int findkey (key_t key)
+{
+ int id;
+ struct msqid_ds *msq;
+
+ for (id = 0; id <= max_msqid; id++) {
+ while ((msq = msgque[id]) == IPC_NOID)
+ interruptible_sleep_on (&msg_lock);
+ if (msq == IPC_UNUSED)
+ continue;
+ if (key == msq->msg_perm.key)
+ return id;
+ }
+ return -1;
+}
+
+static int newque (key_t key, int msgflg)
+{
+ int id;
+ struct msqid_ds *msq;
+ struct ipc_perm *ipcp;
+
+ for (id = 0; id < MSGMNI; id++)
+ if (msgque[id] == IPC_UNUSED) {
+ msgque[id] = (struct msqid_ds *) IPC_NOID;
+ goto found;
+ }
+ return -ENOSPC;
+
+found:
+ msq = (struct msqid_ds *) kmalloc (sizeof (*msq), GFP_KERNEL);
+ if (!msq) {
+ msgque[id] = (struct msqid_ds *) IPC_UNUSED;
+ if (msg_lock)
+ wake_up (&msg_lock);
+ return -ENOMEM;
+ }
+ ipcp = &msq->msg_perm;
+ ipcp->mode = (msgflg & S_IRWXUGO);
+ ipcp->key = key;
+ ipcp->cuid = ipcp->uid = current->euid;
+ ipcp->gid = ipcp->cgid = current->egid;
+ msq->msg_perm.seq = msg_seq;
+ msq->msg_first = msq->msg_last = NULL;
+ msq->rwait = msq->wwait = NULL;
+ msq->msg_cbytes = msq->msg_qnum = 0;
+ msq->msg_lspid = msq->msg_lrpid = 0;
+ msq->msg_stime = msq->msg_rtime = 0;
+ msq->msg_qbytes = MSGMNB;
+ msq->msg_ctime = CURRENT_TIME;
+ if (id > max_msqid)
+ max_msqid = id;
+ msgque[id] = msq;
+ used_queues++;
+ if (msg_lock)
+ wake_up (&msg_lock);
+ return (unsigned int) msq->msg_perm.seq * MSGMNI + id;
+}
+
+int sys_msgget (key_t key, int msgflg)
+{
+ int id;
+ struct msqid_ds *msq;
+
+ if (key == IPC_PRIVATE)
+ return newque(key, msgflg);
+ if ((id = findkey (key)) == -1) { /* key not used */
+ if (!(msgflg & IPC_CREAT))
+ return -ENOENT;
+ return newque(key, msgflg);
+ }
+ if (msgflg & IPC_CREAT && msgflg & IPC_EXCL)
+ return -EEXIST;
+ msq = msgque[id];
+ if (msq == IPC_UNUSED || msq == IPC_NOID)
+ return -EIDRM;
+ if (ipcperms(&msq->msg_perm, msgflg))
+ return -EACCES;
+ return (unsigned int) msq->msg_perm.seq * MSGMNI + id;
+}
+
+static void freeque (int id)
+{
+ struct msqid_ds *msq = msgque[id];
+ struct msg *msgp, *msgh;
+
+ msq->msg_perm.seq++;
+ msg_seq = (msg_seq+1) % ((unsigned)(1<<31)/MSGMNI); /* increment, but avoid overflow */
+ msgbytes -= msq->msg_cbytes;
+ if (id == max_msqid)
+ while (max_msqid && (msgque[--max_msqid] == IPC_UNUSED));
+ msgque[id] = (struct msqid_ds *) IPC_UNUSED;
+ used_queues--;
+ while (msq->rwait || msq->wwait) {
+ if (msq->rwait)
+ wake_up (&msq->rwait);
+ if (msq->wwait)
+ wake_up (&msq->wwait);
+ schedule();
+ }
+ for (msgp = msq->msg_first; msgp; msgp = msgh ) {
+ msgh = msgp->msg_next;
+ msghdrs--;
+ kfree(msgp);
+ }
+ kfree(msq);
+}
+
+int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
+{
+ int id, err;
+ struct msqid_ds *msq;
+ struct msqid_ds tbuf;
+ struct ipc_perm *ipcp;
+
+ if (msqid < 0 || cmd < 0)
+ return -EINVAL;
+ switch (cmd) {
+ case IPC_INFO:
+ case MSG_INFO:
+ if (!buf)
+ return -EFAULT;
+ {
+ struct msginfo msginfo;
+ msginfo.msgmni = MSGMNI;
+ msginfo.msgmax = MSGMAX;
+ msginfo.msgmnb = MSGMNB;
+ msginfo.msgmap = MSGMAP;
+ msginfo.msgpool = MSGPOOL;
+ msginfo.msgtql = MSGTQL;
+ msginfo.msgssz = MSGSSZ;
+ msginfo.msgseg = MSGSEG;
+ if (cmd == MSG_INFO) {
+ msginfo.msgpool = used_queues;
+ msginfo.msgmap = msghdrs;
+ msginfo.msgtql = msgbytes;
+ }
+ err = verify_area (VERIFY_WRITE, buf, sizeof (struct msginfo));
+ if (err)
+ return err;
+ memcpy_tofs (buf, &msginfo, sizeof(struct msginfo));
+ return max_msqid;
+ }
+ case MSG_STAT:
+ if (!buf)
+ return -EFAULT;
+ err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
+ if (err)
+ return err;
+ if (msqid > max_msqid)
+ return -EINVAL;
+ msq = msgque[msqid];
+ if (msq == IPC_UNUSED || msq == IPC_NOID)
+ return -EINVAL;
+ if (ipcperms (&msq->msg_perm, S_IRUGO))
+ return -EACCES;
+ id = (unsigned int) msq->msg_perm.seq * MSGMNI + msqid;
+ tbuf.msg_perm = msq->msg_perm;
+ tbuf.msg_stime = msq->msg_stime;
+ tbuf.msg_rtime = msq->msg_rtime;
+ tbuf.msg_ctime = msq->msg_ctime;
+ tbuf.msg_cbytes = msq->msg_cbytes;
+ tbuf.msg_qnum = msq->msg_qnum;
+ tbuf.msg_qbytes = msq->msg_qbytes;
+ tbuf.msg_lspid = msq->msg_lspid;
+ tbuf.msg_lrpid = msq->msg_lrpid;
+ memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ return id;
+ case IPC_SET:
+ if (!buf)
+ return -EFAULT;
+ err = verify_area (VERIFY_READ, buf, sizeof (*buf));
+ if (err)
+ return err;
+ memcpy_fromfs (&tbuf, buf, sizeof (*buf));
+ break;
+ case IPC_STAT:
+ if (!buf)
+ return -EFAULT;
+ err = verify_area (VERIFY_WRITE, buf, sizeof(*buf));
+ if (err)
+ return err;
+ break;
+ }
+
+ id = (unsigned int) msqid % MSGMNI;
+ msq = msgque [id];
+ if (msq == IPC_UNUSED || msq == IPC_NOID)
+ return -EINVAL;
+ if (msq->msg_perm.seq != (unsigned int) msqid / MSGMNI)
+ return -EIDRM;
+ ipcp = &msq->msg_perm;
+
+ switch (cmd) {
+ case IPC_STAT:
+ if (ipcperms (ipcp, S_IRUGO))
+ return -EACCES;
+ tbuf.msg_perm = msq->msg_perm;
+ tbuf.msg_stime = msq->msg_stime;
+ tbuf.msg_rtime = msq->msg_rtime;
+ tbuf.msg_ctime = msq->msg_ctime;
+ tbuf.msg_cbytes = msq->msg_cbytes;
+ tbuf.msg_qnum = msq->msg_qnum;
+ tbuf.msg_qbytes = msq->msg_qbytes;
+ tbuf.msg_lspid = msq->msg_lspid;
+ tbuf.msg_lrpid = msq->msg_lrpid;
+ memcpy_tofs (buf, &tbuf, sizeof (*buf));
+ return 0;
+ case IPC_SET:
+ if (!suser() && current->euid != ipcp->cuid &&
+ current->euid != ipcp->uid)
+ return -EPERM;
+ if (tbuf.msg_qbytes > MSGMNB && !suser())
+ return -EPERM;
+ msq->msg_qbytes = tbuf.msg_qbytes;
+ ipcp->uid = tbuf.msg_perm.uid;
+ ipcp->gid = tbuf.msg_perm.gid;
+ ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
+ (S_IRWXUGO & tbuf.msg_perm.mode);
+ msq->msg_ctime = CURRENT_TIME;
+ return 0;
+ case IPC_RMID:
+ if (!suser() && current->euid != ipcp->cuid &&
+ current->euid != ipcp->uid)
+ return -EPERM;
+ freeque (id);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/ipc/sem.c b/ipc/sem.c
new file mode 100644
index 000000000..0aeaf588b
--- /dev/null
+++ b/ipc/sem.c
@@ -0,0 +1,513 @@
+/*
+ * linux/ipc/sem.c
+ * Copyright (C) 1992 Krishna Balasubramanian
+ */
+
+#include <linux/errno.h>
+#include <asm/segment.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/sem.h>
+#include <linux/ipc.h>
+#include <linux/stat.h>
+#include <linux/malloc.h>
+
+extern int ipcperms (struct ipc_perm *ipcp, short semflg);
+static int newary (key_t, int, int);
+static int findkey (key_t key);
+static void freeary (int id);
+
+static struct semid_ds *semary[SEMMNI];
+static int used_sems = 0, used_semids = 0;
+static struct wait_queue *sem_lock = NULL;
+static int max_semid = 0;
+
+static unsigned short sem_seq = 0;
+
+void sem_init (void)
+{
+ int i;
+
+ sem_lock = NULL;
+ used_sems = used_semids = max_semid = sem_seq = 0;
+ for (i = 0; i < SEMMNI; i++)
+ semary[i] = (struct semid_ds *) IPC_UNUSED;
+ return;
+}
+
+static int findkey (key_t key)
+{
+ int id;
+ struct semid_ds *sma;
+
+ for (id = 0; id <= max_semid; id++) {
+ while ((sma = semary[id]) == IPC_NOID)
+ interruptible_sleep_on (&sem_lock);
+ if (sma == IPC_UNUSED)
+ continue;
+ if (key == sma->sem_perm.key)
+ return id;
+ }
+ return -1;
+}
+
+static int newary (key_t key, int nsems, int semflg)
+{
+ int id;
+ struct semid_ds *sma;
+ struct ipc_perm *ipcp;
+ int size;
+
+ if (!nsems)
+ return -EINVAL;
+ if (used_sems + nsems > SEMMNS)
+ return -ENOSPC;
+ for (id = 0; id < SEMMNI; id++)
+ if (semary[id] == IPC_UNUSED) {
+ semary[id] = (struct semid_ds *) IPC_NOID;
+ goto found;
+ }
+ return -ENOSPC;
+found:
+ size = sizeof (*sma) + nsems * sizeof (struct sem);
+ used_sems += nsems;
+ sma = (struct semid_ds *) kmalloc (size, GFP_KERNEL);
+ if (!sma) {
+ semary[id] = (struct semid_ds *) IPC_UNUSED;
+ used_sems -= nsems;
+ if (sem_lock)
+ wake_up (&sem_lock);
+ return -ENOMEM;
+ }
+ memset (sma, 0, size);
+ sma->sem_base = (struct sem *) &sma[1];
+ ipcp = &sma->sem_perm;
+ ipcp->mode = (semflg & S_IRWXUGO);
+ ipcp->key = key;
+ ipcp->cuid = ipcp->uid = current->euid;
+ ipcp->gid = ipcp->cgid = current->egid;
+ sma->sem_perm.seq = sem_seq;
+ sma->eventn = sma->eventz = NULL;
+ sma->sem_nsems = nsems;
+ sma->sem_ctime = CURRENT_TIME;
+ if (id > max_semid)
+ max_semid = id;
+ used_semids++;
+ semary[id] = sma;
+ if (sem_lock)
+ wake_up (&sem_lock);
+ return (unsigned int) sma->sem_perm.seq * SEMMNI + id;
+}
+
+int sys_semget (key_t key, int nsems, int semflg)
+{
+ int id;
+ struct semid_ds *sma;
+
+ if (nsems < 0 || nsems > SEMMSL)
+ return -EINVAL;
+ if (key == IPC_PRIVATE)
+ return newary(key, nsems, semflg);
+ if ((id = findkey (key)) == -1) { /* key not used */
+ if (!(semflg & IPC_CREAT))
+ return -ENOENT;
+ return newary(key, nsems, semflg);
+ }
+ if (semflg & IPC_CREAT && semflg & IPC_EXCL)
+ return -EEXIST;
+ sma = semary[id];
+ if (nsems > sma->sem_nsems)
+ return -EINVAL;
+ if (ipcperms(&sma->sem_perm, semflg))
+ return -EACCES;
+ return (unsigned int) sma->sem_perm.seq * SEMMNI + id;
+}
+
+static void freeary (int id)
+{
+ struct semid_ds *sma = semary[id];
+ struct sem_undo *un;
+
+ sma->sem_perm.seq++;
+ sem_seq = (sem_seq+1) % ((unsigned)(1<<31)/SEMMNI); /* increment, but avoid overflow */
+ used_sems -= sma->sem_nsems;
+ if (id == max_semid)
+ while (max_semid && (semary[--max_semid] == IPC_UNUSED));
+ semary[id] = (struct semid_ds *) IPC_UNUSED;
+ used_semids--;
+ for (un = sma->undo; un; un = un->id_next)
+ un->semadj = 0;
+ while (sma->eventz || sma->eventn) {
+ if (sma->eventz)
+ wake_up (&sma->eventz);
+ if (sma->eventn)
+ wake_up (&sma->eventn);
+ schedule();
+ }
+ kfree(sma);
+ return;
+}
+
+int sys_semctl (int semid, int semnum, int cmd, union semun arg)
+{
+ struct semid_ds *buf = NULL;
+ struct semid_ds tbuf;
+ int i, id, val = 0;
+ struct semid_ds *sma;
+ struct ipc_perm *ipcp;
+ struct sem *curr;
+ struct sem_undo *un;
+ unsigned int nsems;
+ ushort *array = NULL;
+ ushort sem_io[SEMMSL];
+
+ if (semid < 0 || semnum < 0 || cmd < 0)
+ return -EINVAL;
+
+ switch (cmd) {
+ case IPC_INFO:
+ case SEM_INFO:
+ {
+ struct seminfo seminfo, *tmp = arg.__buf;
+ seminfo.semmni = SEMMNI;
+ seminfo.semmns = SEMMNS;
+ seminfo.semmsl = SEMMSL;
+ seminfo.semopm = SEMOPM;
+ seminfo.semvmx = SEMVMX;
+ seminfo.semmnu = SEMMNU;
+ seminfo.semmap = SEMMAP;
+ seminfo.semume = SEMUME;
+ seminfo.semusz = SEMUSZ;
+ seminfo.semaem = SEMAEM;
+ if (cmd == SEM_INFO) {
+ seminfo.semusz = used_semids;
+ seminfo.semaem = used_sems;
+ }
+ i = verify_area(VERIFY_WRITE, tmp, sizeof(struct seminfo));
+ if (i)
+ return i;
+ memcpy_tofs (tmp, &seminfo, sizeof(struct seminfo));
+ return max_semid;
+ }
+
+ case SEM_STAT:
+ buf = arg.buf;
+ i = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
+ if (i)
+ return i;
+ if (semid > max_semid)
+ return -EINVAL;
+ sma = semary[semid];
+ if (sma == IPC_UNUSED || sma == IPC_NOID)
+ return -EINVAL;
+ if (ipcperms (&sma->sem_perm, S_IRUGO))
+ return -EACCES;
+ id = (unsigned int) sma->sem_perm.seq * SEMMNI + semid;
+ tbuf.sem_perm = sma->sem_perm;
+ tbuf.sem_otime = sma->sem_otime;
+ tbuf.sem_ctime = sma->sem_ctime;
+ tbuf.sem_nsems = sma->sem_nsems;
+ memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ return id;
+ }
+
+ id = (unsigned int) semid % SEMMNI;
+ sma = semary [id];
+ if (sma == IPC_UNUSED || sma == IPC_NOID)
+ return -EINVAL;
+ ipcp = &sma->sem_perm;
+ nsems = sma->sem_nsems;
+ if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
+ return -EIDRM;
+ if (semnum >= nsems)
+ return -EINVAL;
+ curr = &sma->sem_base[semnum];
+
+ switch (cmd) {
+ case GETVAL:
+ case GETPID:
+ case GETNCNT:
+ case GETZCNT:
+ case GETALL:
+ if (ipcperms (ipcp, S_IRUGO))
+ return -EACCES;
+ switch (cmd) {
+ case GETVAL : return curr->semval;
+ case GETPID : return curr->sempid;
+ case GETNCNT: return curr->semncnt;
+ case GETZCNT: return curr->semzcnt;
+ case GETALL:
+ array = arg.array;
+ i = verify_area (VERIFY_WRITE, array, nsems*sizeof(ushort));
+ if (i)
+ return i;
+ }
+ break;
+ case SETVAL:
+ val = arg.val;
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
+ break;
+ case IPC_RMID:
+ if (suser() || current->euid == ipcp->cuid ||
+ current->euid == ipcp->uid) {
+ freeary (id);
+ return 0;
+ }
+ return -EPERM;
+ case SETALL: /* arg is a pointer to an array of ushort */
+ array = arg.array;
+ if ((i = verify_area (VERIFY_READ, array, nsems*sizeof(ushort))))
+ return i;
+ memcpy_fromfs (sem_io, array, nsems*sizeof(ushort));
+ for (i = 0; i < nsems; i++)
+ if (sem_io[i] > SEMVMX)
+ return -ERANGE;
+ break;
+ case IPC_STAT:
+ buf = arg.buf;
+ if ((i = verify_area (VERIFY_WRITE, buf, sizeof(*buf))))
+ return i;
+ break;
+ case IPC_SET:
+ buf = arg.buf;
+ if ((i = verify_area (VERIFY_READ, buf, sizeof (*buf))))
+ return i;
+ memcpy_fromfs (&tbuf, buf, sizeof (*buf));
+ break;
+ }
+
+ if (semary[id] == IPC_UNUSED || semary[id] == IPC_NOID)
+ return -EIDRM;
+ if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
+ return -EIDRM;
+
+ switch (cmd) {
+ case GETALL:
+ if (ipcperms (ipcp, S_IRUGO))
+ return -EACCES;
+ for (i = 0; i < sma->sem_nsems; i++)
+ sem_io[i] = sma->sem_base[i].semval;
+ memcpy_tofs (array, sem_io, nsems*sizeof(ushort));
+ break;
+ case SETVAL:
+ if (ipcperms (ipcp, S_IWUGO))
+ return -EACCES;
+ for (un = sma->undo; un; un = un->id_next)
+ if (semnum == un->sem_num)
+ un->semadj = 0;
+ sma->sem_ctime = CURRENT_TIME;
+ curr->semval = val;
+ if (sma->eventn)
+ wake_up (&sma->eventn);
+ if (sma->eventz)
+ wake_up (&sma->eventz);
+ break;
+ case IPC_SET:
+ if (suser() || current->euid == ipcp->cuid ||
+ current->euid == ipcp->uid) {
+ ipcp->uid = tbuf.sem_perm.uid;
+ ipcp->gid = tbuf.sem_perm.gid;
+ ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
+ | (tbuf.sem_perm.mode & S_IRWXUGO);
+ sma->sem_ctime = CURRENT_TIME;
+ return 0;
+ }
+ return -EPERM;
+ case IPC_STAT:
+ if (ipcperms (ipcp, S_IRUGO))
+ return -EACCES;
+ tbuf.sem_perm = sma->sem_perm;
+ tbuf.sem_otime = sma->sem_otime;
+ tbuf.sem_ctime = sma->sem_ctime;
+ tbuf.sem_nsems = sma->sem_nsems;
+ memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ break;
+ case SETALL:
+ if (ipcperms (ipcp, S_IWUGO))
+ return -EACCES;
+ for (i = 0; i < nsems; i++)
+ sma->sem_base[i].semval = sem_io[i];
+ for (un = sma->undo; un; un = un->id_next)
+ un->semadj = 0;
+ if (sma->eventn)
+ wake_up (&sma->eventn);
+ if (sma->eventz)
+ wake_up (&sma->eventz);
+ sma->sem_ctime = CURRENT_TIME;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
+{
+ int i, id;
+ struct semid_ds *sma;
+ struct sem *curr = NULL;
+ struct sembuf sops[SEMOPM], *sop;
+ struct sem_undo *un;
+ int undos = 0, alter = 0, semncnt = 0, semzcnt = 0;
+
+ if (nsops < 1 || semid < 0)
+ return -EINVAL;
+ if (nsops > SEMOPM)
+ return -E2BIG;
+ if (!tsops)
+ return -EFAULT;
+ if ((i = verify_area (VERIFY_READ, tsops, nsops * sizeof(*tsops))))
+ return i;
+ memcpy_fromfs (sops, tsops, nsops * sizeof(*tsops));
+ id = (unsigned int) semid % SEMMNI;
+ if ((sma = semary[id]) == IPC_UNUSED || sma == IPC_NOID)
+ return -EINVAL;
+ for (i = 0; i < nsops; i++) {
+ sop = &sops[i];
+ if (sop->sem_num > sma->sem_nsems)
+ return -EFBIG;
+ if (sop->sem_flg & SEM_UNDO)
+ undos++;
+ if (sop->sem_op) {
+ alter++;
+ if (sop->sem_op > 0)
+ semncnt ++;
+ }
+ }
+ if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
+ return -EACCES;
+ /*
+ * ensure every sop with undo gets an undo structure
+ */
+ if (undos) {
+ for (i = 0; i < nsops; i++) {
+ if (!(sops[i].sem_flg & SEM_UNDO))
+ continue;
+ for (un = current->semundo; un; un = un->proc_next)
+ if ((un->semid == semid) &&
+ (un->sem_num == sops[i].sem_num))
+ break;
+ if (un)
+ continue;
+ un = (struct sem_undo *)
+ kmalloc (sizeof(*un), GFP_ATOMIC);
+ if (!un)
+ return -ENOMEM; /* freed on exit */
+ un->semid = semid;
+ un->semadj = 0;
+ un->sem_num = sops[i].sem_num;
+ un->proc_next = current->semundo;
+ current->semundo = un;
+ un->id_next = sma->undo;
+ sma->undo = un;
+ }
+ }
+
+ slept:
+ if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
+ return -EIDRM;
+ for (i = 0; i < nsops; i++) {
+ sop = &sops[i];
+ curr = &sma->sem_base[sop->sem_num];
+ if (sop->sem_op + curr->semval > SEMVMX)
+ return -ERANGE;
+ if (!sop->sem_op && curr->semval) {
+ if (sop->sem_flg & IPC_NOWAIT)
+ return -EAGAIN;
+ if (current->signal & ~current->blocked)
+ return -EINTR;
+ curr->semzcnt++;
+ interruptible_sleep_on (&sma->eventz);
+ curr->semzcnt--;
+ goto slept;
+ }
+ if ((sop->sem_op + curr->semval < 0) ) {
+ if (sop->sem_flg & IPC_NOWAIT)
+ return -EAGAIN;
+ if (current->signal & ~current->blocked)
+ return -EINTR;
+ curr->semncnt++;
+ interruptible_sleep_on (&sma->eventn);
+ curr->semncnt--;
+ goto slept;
+ }
+ }
+
+ for (i = 0; i < nsops; i++) {
+ sop = &sops[i];
+ curr = &sma->sem_base[sop->sem_num];
+ curr->sempid = current->pid;
+ if (!(curr->semval += sop->sem_op))
+ semzcnt++;
+ if (!(sop->sem_flg & SEM_UNDO))
+ continue;
+ for (un = current->semundo; un; un = un->proc_next)
+ if ((un->semid == semid) &&
+ (un->sem_num == sop->sem_num))
+ break;
+ if (!un) {
+ printk ("semop : no undo for op %d\n", i);
+ continue;
+ }
+ un->semadj -= sop->sem_op;
+ }
+ sma->sem_otime = CURRENT_TIME;
+ if (semncnt && sma->eventn)
+ wake_up(&sma->eventn);
+ if (semzcnt && sma->eventz)
+ wake_up(&sma->eventz);
+ return curr->semval;
+}
+
+/*
+ * add semadj values to semaphores, free undo structures.
+ * undo structures are not freed when semaphore arrays are destroyed
+ * so some of them may be out of date.
+ */
+void sem_exit (void)
+{
+ struct sem_undo *u, *un = NULL, **up, **unp;
+ struct semid_ds *sma;
+ struct sem *sem = NULL;
+
+ for (up = &current->semundo; (u = *up); *up = u->proc_next, kfree(u)) {
+ sma = semary[(unsigned int) u->semid % SEMMNI];
+ if (sma == IPC_UNUSED || sma == IPC_NOID)
+ continue;
+ if (sma->sem_perm.seq != (unsigned int) u->semid / SEMMNI)
+ continue;
+ for (unp = &sma->undo; (un = *unp); unp = &un->id_next) {
+ if (u == un)
+ goto found;
+ }
+ printk ("sem_exit undo list error id=%d\n", u->semid);
+ break;
+found:
+ *unp = un->id_next;
+ if (!un->semadj)
+ continue;
+ while (1) {
+ if (sma->sem_perm.seq != (unsigned int) un->semid / SEMMNI)
+ break;
+ sem = &sma->sem_base[un->sem_num];
+ if (sem->semval + un->semadj >= 0) {
+ sem->semval += un->semadj;
+ sem->sempid = current->pid;
+ sma->sem_otime = CURRENT_TIME;
+ if (un->semadj > 0 && sma->eventn)
+ wake_up (&sma->eventn);
+ if (!sem->semval && sma->eventz)
+ wake_up (&sma->eventz);
+ break;
+ }
+ if (current->signal & ~current->blocked)
+ break;
+ sem->semncnt++;
+ interruptible_sleep_on (&sma->eventn);
+ sem->semncnt--;
+ }
+ }
+ current->semundo = NULL;
+ return;
+}
diff --git a/ipc/shm.c b/ipc/shm.c
new file mode 100644
index 000000000..562539a90
--- /dev/null
+++ b/ipc/shm.c
@@ -0,0 +1,761 @@
+/*
+ * linux/ipc/shm.c
+ * Copyright (C) 1992, 1993 Krishna Balasubramanian
+ * Many improvements/fixes by Bruno Haible.
+ * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
+ */
+
+#include <linux/errno.h>
+#include <asm/segment.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/ipc.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/malloc.h>
+
+extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
+extern unsigned int get_swap_page (void);
+static int findkey (key_t key);
+static int newseg (key_t key, int shmflg, int size);
+static int shm_map (struct vm_area_struct *shmd, int remap);
+static void killseg (int id);
+static void shm_open (struct vm_area_struct *shmd);
+static void shm_close (struct vm_area_struct *shmd);
+static unsigned long shm_swap_in (struct vm_area_struct *, unsigned long);
+
+static int shm_tot = 0; /* total number of shared memory pages */
+static int shm_rss = 0; /* number of shared memory pages that are in memory */
+static int shm_swp = 0; /* number of shared memory pages that are in swap */
+static int max_shmid = 0; /* every used id is <= max_shmid */
+static struct wait_queue *shm_lock = NULL; /* calling findkey() may need to wait */
+static struct shmid_ds *shm_segs[SHMMNI];
+
+static unsigned short shm_seq = 0; /* incremented, for recognizing stale ids */
+
+/* some statistics */
+static ulong swap_attempts = 0;
+static ulong swap_successes = 0;
+static ulong used_segs = 0;
+
+void shm_init (void)
+{
+ int id;
+
+ for (id = 0; id < SHMMNI; id++)
+ shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
+ shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
+ shm_lock = NULL;
+ return;
+}
+
+static int findkey (key_t key)
+{
+ int id;
+ struct shmid_ds *shp;
+
+ for (id = 0; id <= max_shmid; id++) {
+ while ((shp = shm_segs[id]) == IPC_NOID)
+ sleep_on (&shm_lock);
+ if (shp == IPC_UNUSED)
+ continue;
+ if (key == shp->shm_perm.key)
+ return id;
+ }
+ return -1;
+}
+
+/*
+ * allocate new shmid_ds and pgtable. protected by shm_segs[id] = NOID.
+ */
+static int newseg (key_t key, int shmflg, int size)
+{
+ struct shmid_ds *shp;
+ int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ int id, i;
+
+ if (size < SHMMIN)
+ return -EINVAL;
+ if (shm_tot + numpages >= SHMALL)
+ return -ENOSPC;
+ for (id = 0; id < SHMMNI; id++)
+ if (shm_segs[id] == IPC_UNUSED) {
+ shm_segs[id] = (struct shmid_ds *) IPC_NOID;
+ goto found;
+ }
+ return -ENOSPC;
+
+found:
+ shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
+ if (!shp) {
+ shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
+ if (shm_lock)
+ wake_up (&shm_lock);
+ return -ENOMEM;
+ }
+
+ shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
+ if (!shp->shm_pages) {
+ shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
+ if (shm_lock)
+ wake_up (&shm_lock);
+ kfree(shp);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < numpages; shp->shm_pages[i++] = 0);
+ shm_tot += numpages;
+ shp->shm_perm.key = key;
+ shp->shm_perm.mode = (shmflg & S_IRWXUGO);
+ shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
+ shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
+ shp->shm_perm.seq = shm_seq;
+ shp->shm_segsz = size;
+ shp->shm_cpid = current->pid;
+ shp->attaches = NULL;
+ shp->shm_lpid = shp->shm_nattch = 0;
+ shp->shm_atime = shp->shm_dtime = 0;
+ shp->shm_ctime = CURRENT_TIME;
+ shp->shm_npages = numpages;
+
+ if (id > max_shmid)
+ max_shmid = id;
+ shm_segs[id] = shp;
+ used_segs++;
+ if (shm_lock)
+ wake_up (&shm_lock);
+ return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
+}
+
+int sys_shmget (key_t key, int size, int shmflg)
+{
+ struct shmid_ds *shp;
+ int id = 0;
+
+ if (size < 0 || size > SHMMAX)
+ return -EINVAL;
+ if (key == IPC_PRIVATE)
+ return newseg(key, shmflg, size);
+ if ((id = findkey (key)) == -1) {
+ if (!(shmflg & IPC_CREAT))
+ return -ENOENT;
+ return newseg(key, shmflg, size);
+ }
+ if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
+ return -EEXIST;
+ shp = shm_segs[id];
+ if (shp->shm_perm.mode & SHM_DEST)
+ return -EIDRM;
+ if (size > shp->shm_segsz)
+ return -EINVAL;
+ if (ipcperms (&shp->shm_perm, shmflg))
+ return -EACCES;
+ return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
+}
+
+/*
+ * Only called after testing nattch and SHM_DEST.
+ * Here pages, pgtable and shmid_ds are freed.
+ */
+static void killseg (int id)
+{
+ struct shmid_ds *shp;
+ int i, numpages;
+ ulong page;
+
+ shp = shm_segs[id];
+ if (shp == IPC_NOID || shp == IPC_UNUSED) {
+ printk ("shm nono: killseg called on unused seg id=%d\n", id);
+ return;
+ }
+ shp->shm_perm.seq++; /* for shmat */
+ shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI); /* increment, but avoid overflow */
+ shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
+ used_segs--;
+ if (id == max_shmid)
+ while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
+ if (!shp->shm_pages) {
+ printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
+ return;
+ }
+ numpages = shp->shm_npages;
+ for (i = 0; i < numpages ; i++) {
+ if (!(page = shp->shm_pages[i]))
+ continue;
+ if (page & PAGE_PRESENT) {
+ free_page (page & PAGE_MASK);
+ shm_rss--;
+ } else {
+ swap_free (page);
+ shm_swp--;
+ }
+ }
+ kfree(shp->shm_pages);
+ shm_tot -= numpages;
+ kfree(shp);
+ return;
+}
+
+int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
+{
+ struct shmid_ds tbuf;
+ struct shmid_ds *shp;
+ struct ipc_perm *ipcp;
+ int id, err;
+
+ if (cmd < 0 || shmid < 0)
+ return -EINVAL;
+ if (cmd == IPC_SET) {
+ if (!buf)
+ return -EFAULT;
+ err = verify_area (VERIFY_READ, buf, sizeof (*buf));
+ if (err)
+ return err;
+ memcpy_fromfs (&tbuf, buf, sizeof (*buf));
+ }
+
+ switch (cmd) { /* replace with proc interface ? */
+ case IPC_INFO:
+ {
+ struct shminfo shminfo;
+ if (!buf)
+ return -EFAULT;
+ shminfo.shmmni = SHMMNI;
+ shminfo.shmmax = SHMMAX;
+ shminfo.shmmin = SHMMIN;
+ shminfo.shmall = SHMALL;
+ shminfo.shmseg = SHMSEG;
+ err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
+ if (err)
+ return err;
+ memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
+ return max_shmid;
+ }
+ case SHM_INFO:
+ {
+ struct shm_info shm_info;
+ if (!buf)
+ return -EFAULT;
+ err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
+ if (err)
+ return err;
+ shm_info.used_ids = used_segs;
+ shm_info.shm_rss = shm_rss;
+ shm_info.shm_tot = shm_tot;
+ shm_info.shm_swp = shm_swp;
+ shm_info.swap_attempts = swap_attempts;
+ shm_info.swap_successes = swap_successes;
+ memcpy_tofs (buf, &shm_info, sizeof(shm_info));
+ return max_shmid;
+ }
+ case SHM_STAT:
+ if (!buf)
+ return -EFAULT;
+ err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
+ if (err)
+ return err;
+ if (shmid > max_shmid)
+ return -EINVAL;
+ shp = shm_segs[shmid];
+ if (shp == IPC_UNUSED || shp == IPC_NOID)
+ return -EINVAL;
+ if (ipcperms (&shp->shm_perm, S_IRUGO))
+ return -EACCES;
+ id = (unsigned int) shp->shm_perm.seq * SHMMNI + shmid;
+ tbuf.shm_perm = shp->shm_perm;
+ tbuf.shm_segsz = shp->shm_segsz;
+ tbuf.shm_atime = shp->shm_atime;
+ tbuf.shm_dtime = shp->shm_dtime;
+ tbuf.shm_ctime = shp->shm_ctime;
+ tbuf.shm_cpid = shp->shm_cpid;
+ tbuf.shm_lpid = shp->shm_lpid;
+ tbuf.shm_nattch = shp->shm_nattch;
+ memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ return id;
+ }
+
+ shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
+ if (shp == IPC_UNUSED || shp == IPC_NOID)
+ return -EINVAL;
+ if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
+ return -EIDRM;
+ ipcp = &shp->shm_perm;
+
+ switch (cmd) {
+ case SHM_UNLOCK:
+ if (!suser())
+ return -EPERM;
+ if (!(ipcp->mode & SHM_LOCKED))
+ return -EINVAL;
+ ipcp->mode &= ~SHM_LOCKED;
+ break;
+ case SHM_LOCK:
+/* Allow superuser to lock segment in memory */
+/* Should the pages be faulted in here or leave it to user? */
+/* need to determine interaction with current->swappable */
+ if (!suser())
+ return -EPERM;
+ if (ipcp->mode & SHM_LOCKED)
+ return -EINVAL;
+ ipcp->mode |= SHM_LOCKED;
+ break;
+ case IPC_STAT:
+ if (ipcperms (ipcp, S_IRUGO))
+ return -EACCES;
+ if (!buf)
+ return -EFAULT;
+ err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
+ if (err)
+ return err;
+ tbuf.shm_perm = shp->shm_perm;
+ tbuf.shm_segsz = shp->shm_segsz;
+ tbuf.shm_atime = shp->shm_atime;
+ tbuf.shm_dtime = shp->shm_dtime;
+ tbuf.shm_ctime = shp->shm_ctime;
+ tbuf.shm_cpid = shp->shm_cpid;
+ tbuf.shm_lpid = shp->shm_lpid;
+ tbuf.shm_nattch = shp->shm_nattch;
+ memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ break;
+ case IPC_SET:
+ if (suser() || current->euid == shp->shm_perm.uid ||
+ current->euid == shp->shm_perm.cuid) {
+ ipcp->uid = tbuf.shm_perm.uid;
+ ipcp->gid = tbuf.shm_perm.gid;
+ ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
+ | (tbuf.shm_perm.mode & S_IRWXUGO);
+ shp->shm_ctime = CURRENT_TIME;
+ break;
+ }
+ return -EPERM;
+ case IPC_RMID:
+ if (suser() || current->euid == shp->shm_perm.uid ||
+ current->euid == shp->shm_perm.cuid) {
+ shp->shm_perm.mode |= SHM_DEST;
+ if (shp->shm_nattch <= 0)
+ killseg (id);
+ break;
+ }
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * The per process internal structure for managing segments is
+ * `struct vm_area_struct'.
+ * A shmat will add to and shmdt will remove from the list.
+ * shmd->vm_task the attacher
+ * shmd->vm_start virt addr of attach, multiple of SHMLBA
+ * shmd->vm_end multiple of SHMLBA
+ * shmd->vm_next next attach for task
+ * shmd->vm_share next attach for segment
+ * shmd->vm_offset offset into segment
+ * shmd->vm_pte signature for this attach
+ */
+
+static struct vm_operations_struct shm_vm_ops = {
+ shm_open, /* open */
+ shm_close, /* close */
+ NULL, /* nopage (done with swapin) */
+ NULL, /* wppage */
+ NULL, /* share */
+ NULL, /* unmap */
+ NULL, /* swapout (hardcoded right now) */
+ shm_swap_in /* swapin */
+};
+
+/*
+ * check range is unmapped, ensure page tables exist
+ * mark page table entries with shm_sgn.
+ * if remap != 0 the range is remapped.
+ */
+static int shm_map (struct vm_area_struct *shmd, int remap)
+{
+ unsigned long *page_table;
+ unsigned long tmp, shm_sgn;
+ unsigned long page_dir = shmd->vm_task->tss.cr3;
+
+ /* check that the range is unmapped */
+ if (!remap)
+ for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
+ page_table = PAGE_DIR_OFFSET(page_dir,tmp);
+ if (*page_table & PAGE_PRESENT) {
+ page_table = (ulong *) (PAGE_MASK & *page_table);
+ page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
+ if (*page_table) {
+ /* printk("shmat() -> EINVAL because address 0x%lx is already mapped.\n",tmp); */
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* clear old mappings */
+ do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
+
+ /* add new mapping */
+ insert_vm_struct(current, shmd);
+ merge_segments(current->mm->mmap);
+
+ /* check that the range has page_tables */
+ for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
+ page_table = PAGE_DIR_OFFSET(page_dir,tmp);
+ if (*page_table & PAGE_PRESENT) {
+ page_table = (ulong *) (PAGE_MASK & *page_table);
+ page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
+ if (*page_table) {
+ if (*page_table & PAGE_PRESENT) {
+ --current->mm->rss;
+ free_page (*page_table & PAGE_MASK);
+ }
+ else
+ swap_free (*page_table);
+ *page_table = 0;
+ }
+ } else {
+ unsigned long new_pt;
+ if (!(new_pt = get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+ *page_table = new_pt | PAGE_TABLE;
+ tmp |= ((PAGE_SIZE << 10) - PAGE_SIZE);
+ }
+ }
+
+ /* map page range */
+ shm_sgn = shmd->vm_pte + ((shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
+ for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE,
+ shm_sgn += (1 << SHM_IDX_SHIFT)) {
+ page_table = PAGE_DIR_OFFSET(page_dir,tmp);
+ page_table = (ulong *) (PAGE_MASK & *page_table);
+ page_table += (tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
+ *page_table = shm_sgn;
+ }
+ invalidate();
+ return 0;
+}
+
+/*
+ * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
+ * raddr is needed to return addresses above 2Gig.
+ */
+int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
+{
+ struct shmid_ds *shp;
+ struct vm_area_struct *shmd;
+ int err;
+ unsigned int id;
+ unsigned long addr;
+
+ if (shmid < 0) {
+ /* printk("shmat() -> EINVAL because shmid = %d < 0\n",shmid); */
+ return -EINVAL;
+ }
+
+ if (raddr) {
+ err = verify_area(VERIFY_WRITE, raddr, sizeof(ulong));
+ if (err)
+ return err;
+ }
+
+ shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
+ if (shp == IPC_UNUSED || shp == IPC_NOID) {
+ /* printk("shmat() -> EINVAL because shmid = %d is invalid\n",shmid); */
+ return -EINVAL;
+ }
+
+ if (!(addr = (ulong) shmaddr)) {
+ if (shmflg & SHM_REMAP)
+ return -EINVAL;
+ if (!(addr = get_unmapped_area(shp->shm_segsz)))
+ return -ENOMEM;
+ } else if (addr & (SHMLBA-1)) {
+ if (shmflg & SHM_RND)
+ addr &= ~(SHMLBA-1); /* round down */
+ else
+ return -EINVAL;
+ }
+ if ((addr > current->mm->start_stack - 16384 - PAGE_SIZE*shp->shm_npages)) {
+ /* printk("shmat() -> EINVAL because segment intersects stack\n"); */
+ return -EINVAL;
+ }
+ if (!(shmflg & SHM_REMAP))
+ for (shmd = current->mm->mmap; shmd; shmd = shmd->vm_next)
+ if (!(addr >= shmd->vm_end || addr + shp->shm_segsz <= shmd->vm_start)) {
+ /* printk("shmat() -> EINVAL because the interval [0x%lx,0x%lx) intersects an already mapped interval [0x%lx,0x%lx).\n",
+ addr, addr + shp->shm_segsz, shmd->vm_start, shmd->vm_end); */
+ return -EINVAL;
+ }
+
+ if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
+ return -EACCES;
+ if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
+ return -EIDRM;
+
+ shmd = (struct vm_area_struct *) kmalloc (sizeof(*shmd), GFP_KERNEL);
+ if (!shmd)
+ return -ENOMEM;
+ if ((shp != shm_segs[id]) || (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)) {
+ kfree(shmd);
+ return -EIDRM;
+ }
+
+ shmd->vm_pte = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) |
+ (shmflg & SHM_RDONLY ? SHM_READ_ONLY : 0);
+ shmd->vm_start = addr;
+ shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
+ shmd->vm_task = current;
+ shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED;
+ shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED
+ | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC
+ | ((shmflg & SHM_RDONLY) ? 0 : VM_MAYWRITE | VM_WRITE);
+ shmd->vm_share = NULL;
+ shmd->vm_inode = NULL;
+ shmd->vm_offset = 0;
+ shmd->vm_ops = &shm_vm_ops;
+
+ shp->shm_nattch++; /* prevent destruction */
+ if ((err = shm_map (shmd, shmflg & SHM_REMAP))) {
+ if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
+ killseg(id);
+ kfree(shmd);
+ return err;
+ }
+
+ shmd->vm_share = shp->attaches;
+ shp->attaches = shmd;
+ shp->shm_lpid = current->pid;
+ shp->shm_atime = CURRENT_TIME;
+
+ if (!raddr)
+ return addr;
+ put_fs_long (addr, raddr);
+ return 0;
+}
+
+/* This is called by fork, once for every shm attach. */
+static void shm_open (struct vm_area_struct *shmd)
+{
+ unsigned int id;
+ struct shmid_ds *shp;
+
+ id = (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK;
+ shp = shm_segs[id];
+ if (shp == IPC_UNUSED) {
+ printk("shm_open: unused id=%d PANIC\n", id);
+ return;
+ }
+ shmd->vm_share = shp->attaches;
+ shp->attaches = shmd;
+ shp->shm_nattch++;
+ shp->shm_atime = CURRENT_TIME;
+ shp->shm_lpid = current->pid;
+}
+
+/*
+ * remove the attach descriptor shmd.
+ * free memory for segment if it is marked destroyed.
+ * The descriptor has already been removed from the current->mm->mmap list
+ * and will later be kfree()d.
+ */
+static void shm_close (struct vm_area_struct *shmd)
+{
+ struct vm_area_struct **shmdp;
+ struct shmid_ds *shp;
+ int id;
+
+ unmap_page_range (shmd->vm_start, shmd->vm_end - shmd->vm_start);
+
+ /* remove from the list of attaches of the shm segment */
+ id = (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK;
+ shp = shm_segs[id];
+ for (shmdp = &shp->attaches; *shmdp; shmdp = &(*shmdp)->vm_share)
+ if (*shmdp == shmd) {
+ *shmdp = shmd->vm_share;
+ goto found;
+ }
+ printk("shm_close: shm segment (id=%d) attach list inconsistent\n",id);
+ printk("shm_close: %d %08lx-%08lx %c%c%c%c %08lx %08lx\n",
+ shmd->vm_task->pid, shmd->vm_start, shmd->vm_end,
+ shmd->vm_flags & VM_READ ? 'r' : '-',
+ shmd->vm_flags & VM_WRITE ? 'w' : '-',
+ shmd->vm_flags & VM_EXEC ? 'x' : '-',
+ shmd->vm_flags & VM_SHARED ? 's' : 'p',
+ shmd->vm_offset, shmd->vm_pte);
+
+ found:
+ shp->shm_lpid = current->pid;
+ shp->shm_dtime = CURRENT_TIME;
+ if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
+ killseg (id);
+}
+
+/*
+ * detach and kill segment if marked destroyed.
+ * The work is done in shm_close.
+ */
+int sys_shmdt (char *shmaddr)
+{
+ struct vm_area_struct *shmd, *shmdnext;
+
+ for (shmd = current->mm->mmap; shmd; shmd = shmdnext) {
+ shmdnext = shmd->vm_next;
+ if (shmd->vm_ops == &shm_vm_ops
+ && shmd->vm_start - shmd->vm_offset == (ulong) shmaddr)
+ do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
+ }
+ return 0;
+}
+
+/*
+ * page not present ... go through shm_pages
+ */
+static unsigned long shm_swap_in(struct vm_area_struct * vma, unsigned long code)
+{
+ unsigned long page;
+ struct shmid_ds *shp;
+ unsigned int id, idx;
+
+ id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
+ if (id > max_shmid) {
+ printk ("shm_no_page: id=%d too big. proc mem corrupted\n", id);
+ return BAD_PAGE | PAGE_SHARED;
+ }
+ shp = shm_segs[id];
+ if (shp == IPC_UNUSED || shp == IPC_NOID) {
+ printk ("shm_no_page: id=%d invalid. Race.\n", id);
+ return BAD_PAGE | PAGE_SHARED;
+ }
+ idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
+ if (idx >= shp->shm_npages) {
+ printk ("shm_no_page : too large page index. id=%d\n", id);
+ return BAD_PAGE | PAGE_SHARED;
+ }
+
+ if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
+ if(!(page = get_free_page(GFP_KERNEL))) {
+ oom(current);
+ return BAD_PAGE | PAGE_SHARED;
+ }
+ if (shp->shm_pages[idx] & PAGE_PRESENT) {
+ free_page (page);
+ goto done;
+ }
+ if (shp->shm_pages[idx]) {
+ read_swap_page (shp->shm_pages[idx], (char *) page);
+ if (shp->shm_pages[idx] & PAGE_PRESENT) {
+ free_page (page);
+ goto done;
+ }
+ swap_free (shp->shm_pages[idx]);
+ shm_swp--;
+ }
+ shm_rss++;
+ shp->shm_pages[idx] = page | (PAGE_SHARED | PAGE_DIRTY);
+ } else
+ --current->mm->maj_flt; /* was incremented in do_no_page */
+
+done:
+ current->mm->min_flt++;
+ page = shp->shm_pages[idx];
+ if (code & SHM_READ_ONLY) /* write-protect */
+ page &= ~PAGE_RW;
+ mem_map[MAP_NR(page)]++;
+ return page;
+}
+
+/*
+ * Goes through counter = (shm_rss << prio) present shm pages.
+ */
+static unsigned long swap_id = 0; /* currently being swapped */
+static unsigned long swap_idx = 0; /* next to swap */
+
+int shm_swap (int prio)
+{
+ unsigned long page;
+ struct shmid_ds *shp;
+ struct vm_area_struct *shmd;
+ unsigned int swap_nr;
+ unsigned long id, idx, invalid = 0;
+ int counter;
+
+ counter = shm_rss >> prio;
+ if (!counter || !(swap_nr = get_swap_page()))
+ return 0;
+
+ check_id:
+ shp = shm_segs[swap_id];
+ if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
+ swap_idx = 0;
+ if (++swap_id > max_shmid)
+ swap_id = 0;
+ goto check_id;
+ }
+ id = swap_id;
+
+ check_table:
+ idx = swap_idx++;
+ if (idx >= shp->shm_npages) {
+ swap_idx = 0;
+ if (++swap_id > max_shmid)
+ swap_id = 0;
+ goto check_id;
+ }
+
+ page = shp->shm_pages[idx];
+ if (!(page & PAGE_PRESENT))
+ goto check_table;
+ swap_attempts++;
+
+ if (--counter < 0) { /* failed */
+ if (invalid)
+ invalidate();
+ swap_free (swap_nr);
+ return 0;
+ }
+ for (shmd = shp->attaches; shmd; shmd = shmd->vm_share) {
+ unsigned long tmp, *pte;
+ if ((shmd->vm_pte >> SHM_ID_SHIFT & SHM_ID_MASK) != id) {
+ printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n", id, shmd->vm_pte >> SHM_ID_SHIFT & SHM_ID_MASK);
+ continue;
+ }
+ tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
+ if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
+ continue;
+ pte = PAGE_DIR_OFFSET(shmd->vm_task->tss.cr3,tmp);
+ if (!(*pte & PAGE_PRESENT)) {
+ printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
+ id, shmd->vm_start, idx);
+ *pte = 0;
+ continue;
+ }
+ pte = (ulong *) (PAGE_MASK & *pte);
+ pte += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
+ tmp = *pte;
+ if (!(tmp & PAGE_PRESENT))
+ continue;
+ if (tmp & PAGE_ACCESSED) {
+ *pte &= ~PAGE_ACCESSED;
+ continue;
+ }
+ tmp = shmd->vm_pte | idx << SHM_IDX_SHIFT;
+ *pte = tmp;
+ mem_map[MAP_NR(page)]--;
+ shmd->vm_task->mm->rss--;
+ invalid++;
+ }
+
+ if (mem_map[MAP_NR(page)] != 1)
+ goto check_table;
+ page &= PAGE_MASK;
+ shp->shm_pages[idx] = swap_nr;
+ if (invalid)
+ invalidate();
+ write_swap_page (swap_nr, (char *) page);
+ free_page (page);
+ swap_successes++;
+ shm_swp++;
+ shm_rss--;
+ return 1;
+}
diff --git a/ipc/util.c b/ipc/util.c
new file mode 100644
index 000000000..fb0e6970d
--- /dev/null
+++ b/ipc/util.c
@@ -0,0 +1,150 @@
+/*
+ * linux/ipc/util.c
+ * Copyright (C) 1992 Krishna Balasubramanian
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <asm/segment.h>
+#include <linux/sched.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+
+void ipc_init (void);
+asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr);
+
+#ifdef CONFIG_SYSVIPC
+
+int ipcperms (struct ipc_perm *ipcp, short flag);
+extern void sem_init (void), msg_init (void), shm_init (void);
+extern int sys_semget (key_t key, int nsems, int semflg);
+extern int sys_semop (int semid, struct sembuf *sops, unsigned nsops);
+extern int sys_semctl (int semid, int semnum, int cmd, union semun arg);
+extern int sys_msgget (key_t key, int msgflg);
+extern int sys_msgsnd (int msqid, struct msgbuf *msgp, int msgsz, int msgflg);
+extern int sys_msgrcv (int msqid, struct msgbuf *msgp, int msgsz, long msgtyp,
+ int msgflg);
+extern int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf);
+extern int sys_shmget (key_t key, int size, int flag);
+extern int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *addr);
+extern int sys_shmdt (char *shmaddr);
+extern int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf);
+
+void ipc_init (void)
+{
+ sem_init();
+ msg_init();
+ shm_init();
+ return;
+}
+
+/*
+ * Check user, group, other permissions for access
+ * to ipc resources. return 0 if allowed
+ */
+int ipcperms (struct ipc_perm *ipcp, short flag)
+{ /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */
+ int requested_mode, granted_mode;
+
+ if (suser())
+ return 0;
+ requested_mode = (flag >> 6) | (flag >> 3) | flag;
+ granted_mode = ipcp->mode;
+ if (current->euid == ipcp->cuid || current->euid == ipcp->uid)
+ granted_mode >>= 6;
+ else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
+ granted_mode >>= 3;
+ /* is there some bit set in requested_mode but not in granted_mode? */
+ if (requested_mode & ~granted_mode & 0007)
+ return -1;
+ return 0;
+}
+
+asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr)
+{
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ return sys_semop (first, (struct sembuf *)ptr, second);
+ case SEMGET:
+ return sys_semget (first, second, third);
+ case SEMCTL: {
+ union semun fourth;
+ int err;
+ if (!ptr)
+ return -EINVAL;
+ if ((err = verify_area (VERIFY_READ, ptr, sizeof(long))))
+ return err;
+ fourth.__pad = (void *) get_fs_long(ptr);
+ return sys_semctl (first, second, third, fourth);
+ }
+ default:
+ return -EINVAL;
+ }
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ return sys_msgsnd (first, (struct msgbuf *) ptr,
+ second, third);
+ case MSGRCV: {
+ struct ipc_kludge tmp;
+ int err;
+ if (!ptr)
+ return -EINVAL;
+ if ((err = verify_area (VERIFY_READ, ptr, sizeof(tmp))))
+ return err;
+ memcpy_fromfs (&tmp,(struct ipc_kludge *) ptr,
+ sizeof (tmp));
+ return sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp,
+ third);
+ }
+ case MSGGET:
+ return sys_msgget ((key_t) first, second);
+ case MSGCTL:
+ return sys_msgctl (first, second, (struct msqid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ return sys_shmat (first, (char *) ptr, second,
+ (ulong *) third);
+ case SHMDT:
+ return sys_shmdt ((char *)ptr);
+ case SHMGET:
+ return sys_shmget (first, second, third);
+ case SHMCTL:
+ return sys_shmctl (first, second, (struct shmid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+#else /* not CONFIG_SYSVIPC */
+
+asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr)
+{
+ return -ENOSYS;
+}
+
+void sem_exit (void)
+{
+ return;
+}
+
+int shm_swap (int prio)
+{
+ return 0;
+}
+
+void shm_no_page (unsigned long *ptent)
+{
+ return;
+}
+
+#endif /* CONFIG_SYSVIPC */