summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-01-07 02:33:00 +0000
committer <ralf@linux-mips.org>1997-01-07 02:33:00 +0000
commitbeb116954b9b7f3bb56412b2494b562f02b864b1 (patch)
tree120e997879884e1b9d93b265221b939d2ef1ade1 /ipc
parent908d4681a1dc3792ecafbe64265783a86c4cccb6 (diff)
Import of Linux/MIPS 2.1.14
Diffstat (limited to 'ipc')
-rw-r--r--ipc/Makefile32
-rw-r--r--ipc/msg.c437
-rw-r--r--ipc/sem.c29
-rw-r--r--ipc/shm.c161
-rw-r--r--ipc/util.c175
5 files changed, 559 insertions, 275 deletions
diff --git a/ipc/Makefile b/ipc/Makefile
index 936d1cf50..424052017 100644
--- a/ipc/Makefile
+++ b/ipc/Makefile
@@ -7,33 +7,15 @@
#
# Note 2! The CFLAGS definition is now in the main makefile...
-.c.o:
- $(CC) $(CFLAGS) -c $<
-.s.o:
- $(AS) -o $*.o $<
-.c.s:
- $(CC) $(CFLAGS) -S $<
+O_TARGET := ipc.o
+O_OBJS := util.o
-OBJS = util.o
-SRCS = util.c
+ifdef CONFIG_KERNELD
+CONFIG_SYSVIPC=1
+endif
ifdef CONFIG_SYSVIPC
-OBJS := $(OBJS) msg.o sem.o shm.o
-SRCS := $(SRCS) msg.c sem.c shm.c
+O_OBJS += msg.o sem.o shm.o
endif
-ipc.o: $(OBJS)
- $(LD) -r -o ipc.o $(OBJS)
-
-dep:
- $(CPP) -M $(SRCS) > .depend
-
-modules:
-dummy:
-
-#
-# include a dependency file if one exists
-#
-ifeq (.depend,$(wildcard .depend))
-include .depend
-endif
+include $(TOPDIR)/Rules.make
diff --git a/ipc/msg.c b/ipc/msg.c
index 04aa12328..0ed1eb6a6 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -1,15 +1,22 @@
/*
* linux/ipc/msg.c
* Copyright (C) 1992 Krishna Balasubramanian
+ *
+ * Kerneld extensions by Bjorn Ekwall <bj0rn@blox.se> in May 1995, and May 1996
+ *
+ * See <linux/kerneld.h> for the (optional) new kerneld protocol
*/
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/msg.h>
#include <linux/stat.h>
#include <linux/malloc.h>
+#include <linux/kerneld.h>
+#include <linux/interrupt.h>
-#include <asm/segment.h>
+#include <asm/uaccess.h>
extern int ipcperms (struct ipc_perm *ipcp, short msgflg);
@@ -24,6 +31,11 @@ static unsigned short msg_seq = 0;
static int used_queues = 0;
static int max_msqid = 0;
static struct wait_queue *msg_lock = NULL;
+static int kerneld_msqid = -1;
+
+#define MAX_KERNELDS 20
+static int kerneld_arr[MAX_KERNELDS];
+static int n_kernelds = 0;
void msg_init (void)
{
@@ -36,23 +48,66 @@ void msg_init (void)
return;
}
-int sys_msgsnd (int msqid, struct msgbuf *msgp, int msgsz, int msgflg)
+/*
+ * If the send queue is full, try to free any old messages.
+ * These are most probably unwanted, since no one has picked them up...
+ */
+#define MSG_FLUSH_TIME 10 /* seconds */
+static void flush_msg(struct msqid_ds *msq)
+{
+ struct msg *nmsg;
+ unsigned long flags;
+ int flushed = 0;
+
+ save_flags(flags);
+ cli();
+
+ /* messages were put on the queue in time order */
+ while ( (nmsg = msq->msg_first) &&
+ ((CURRENT_TIME - nmsg->msg_stime) > MSG_FLUSH_TIME)) {
+ msgbytes -= nmsg->msg_ts;
+ msghdrs--;
+ msq->msg_cbytes -= nmsg->msg_ts;
+ msq->msg_qnum--;
+ msq->msg_first = nmsg->msg_next;
+ ++flushed;
+ kfree(nmsg);
+ }
+
+ if (msq->msg_qnum == 0)
+ msq->msg_first = msq->msg_last = NULL;
+ restore_flags(flags);
+ if (flushed)
+ printk(KERN_WARNING "flushed %d old SYSVIPC messages", flushed);
+}
+
+static int real_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg)
{
int id, err;
struct msqid_ds *msq;
struct ipc_perm *ipcp;
struct msg *msgh;
long mtype;
+ unsigned long flags;
- if (msgsz > MSGMAX || msgsz < 0 || msqid < 0)
+ if (msgsz > MSGMAX || (long) msgsz < 0 || msqid < 0)
return -EINVAL;
if (!msgp)
return -EFAULT;
- err = verify_area (VERIFY_READ, msgp->mtext, msgsz);
- if (err)
- return err;
- if ((mtype = get_fs_long (&msgp->mtype)) < 1)
- return -EINVAL;
+ /*
+ * Calls from kernel level (IPC_KERNELD set)
+ * have the message somewhere in kernel space already!
+ */
+ if ((msgflg & IPC_KERNELD))
+ mtype = msgp->mtype;
+ else {
+ err = verify_area (VERIFY_READ, msgp->mtext, msgsz);
+ if (err)
+ return err;
+ get_user(mtype, &msgp->mtype);
+ if (mtype < 1)
+ return -EINVAL;
+ }
id = (unsigned int) msqid % MSGMNI;
msq = msgque [id];
if (msq == IPC_UNUSED || msq == IPC_NOID)
@@ -62,25 +117,56 @@ int sys_msgsnd (int msqid, struct msgbuf *msgp, int msgsz, int msgflg)
slept:
if (msq->msg_perm.seq != (unsigned int) msqid / MSGMNI)
return -EIDRM;
- if (ipcperms(ipcp, S_IWUGO))
- return -EACCES;
+ /*
+ * Non-root kernel level processes may send to kerneld!
+ * i.e. no permission check if called from the kernel
+ * otoh we don't want user level non-root snoopers...
+ */
+ if ((msgflg & IPC_KERNELD) == 0)
+ if (ipcperms(ipcp, S_IWUGO))
+ return -EACCES;
if (msgsz + msq->msg_cbytes > msq->msg_qbytes) {
- /* no space in queue */
- if (msgflg & IPC_NOWAIT)
- return -EAGAIN;
- if (current->signal & ~current->blocked)
- return -EINTR;
- interruptible_sleep_on (&msq->wwait);
- goto slept;
+ if ((kerneld_msqid != -1) && (kerneld_msqid == msqid))
+ flush_msg(msq); /* flush the kerneld channel only */
+ if (msgsz + msq->msg_cbytes > msq->msg_qbytes) {
+ /* still no space in queue */
+ if (msgflg & IPC_NOWAIT)
+ return -EAGAIN;
+ if (current->signal & ~current->blocked)
+ return -EINTR;
+ if (intr_count) {
+ /* Very unlikely, but better safe than sorry */
+ printk(KERN_WARNING "Ouch, kerneld:msgsnd buffers full!\n");
+ return -EINTR;
+ }
+ interruptible_sleep_on (&msq->wwait);
+ goto slept;
+ }
}
/* allocate message header and text space*/
- msgh = (struct msg *) kmalloc (sizeof(*msgh) + msgsz, GFP_USER);
+ msgh = (struct msg *) kmalloc (sizeof(*msgh) + msgsz, GFP_ATOMIC);
if (!msgh)
return -ENOMEM;
msgh->msg_spot = (char *) (msgh + 1);
- memcpy_fromfs (msgh->msg_spot, msgp->mtext, msgsz);
+
+ /*
+ * Calls from kernel level (IPC_KERNELD set)
+ * have the message somewhere in kernel space already!
+ */
+ if (msgflg & IPC_KERNELD) {
+ struct kerneld_msg *kdmp = (struct kerneld_msg *)msgp;
+
+ /*
+ * Note that the kernel supplies a pointer
+ * but the user-level kerneld uses a char array...
+ */
+ memcpy(msgh->msg_spot, (char *)(&(kdmp->id)), KDHDR);
+ memcpy(msgh->msg_spot + KDHDR, kdmp->text, msgsz - KDHDR);
+ }
+ else
+ copy_from_user (msgh->msg_spot, msgp->mtext, msgsz);
if (msgque[id] == IPC_UNUSED || msgque[id] == IPC_NOID
|| msq->msg_perm.seq != (unsigned int) msqid / MSGMNI) {
@@ -89,41 +175,85 @@ int sys_msgsnd (int msqid, struct msgbuf *msgp, int msgsz, int msgflg)
}
msgh->msg_next = NULL;
+ msgh->msg_ts = msgsz;
+ msgh->msg_type = mtype;
+ msgh->msg_stime = CURRENT_TIME;
+
+ save_flags(flags);
+ cli();
if (!msq->msg_first)
msq->msg_first = msq->msg_last = msgh;
else {
msq->msg_last->msg_next = msgh;
msq->msg_last = msgh;
}
- msgh->msg_ts = msgsz;
- msgh->msg_type = mtype;
msq->msg_cbytes += msgsz;
msgbytes += msgsz;
msghdrs++;
msq->msg_qnum++;
msq->msg_lspid = current->pid;
msq->msg_stime = CURRENT_TIME;
- if (msq->rwait)
- wake_up (&msq->rwait);
- return msgsz;
+ restore_flags(flags);
+ wake_up (&msq->rwait);
+ return 0;
+}
+
+/*
+ * Take care of missing kerneld, especially in case of multiple daemons
+ */
+#define KERNELD_TIMEOUT 1 * (HZ)
+#define DROP_TIMER del_timer(&kd_timer)
+/*#define DROP_TIMER if ((msgflg & IPC_KERNELD) && kd_timer.next && kd_timer.prev) del_timer(&kd_timer)*/
+
+static void kd_timeout(unsigned long msgid)
+{
+ struct msqid_ds *msq;
+ struct msg *tmsg;
+ unsigned long flags;
+
+ msq = msgque [ (unsigned int) kerneld_msqid % MSGMNI ];
+ if (msq == IPC_NOID || msq == IPC_UNUSED)
+ return;
+
+ save_flags(flags);
+ cli();
+ for (tmsg = msq->msg_first; tmsg; tmsg = tmsg->msg_next)
+ if (*(long *)(tmsg->msg_spot) == msgid)
+ break;
+ restore_flags(flags);
+ if (tmsg) { /* still there! */
+ struct kerneld_msg kmsp = { msgid, NULL_KDHDR, "" };
+
+ printk(KERN_ALERT "Ouch, no kerneld for message %ld\n", msgid);
+ kmsp.id = -ENODEV;
+ real_msgsnd(kerneld_msqid, (struct msgbuf *)&kmsp, KDHDR,
+ S_IRUSR | S_IWUSR | IPC_KERNELD | MSG_NOERROR);
+ }
}
-int sys_msgrcv (int msqid, struct msgbuf *msgp, int msgsz, long msgtyp,
- int msgflg)
+static int real_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz, long msgtyp, int msgflg)
{
+ struct timer_list kd_timer = { NULL, NULL, 0, 0, 0};
struct msqid_ds *msq;
struct ipc_perm *ipcp;
struct msg *tmsg, *leastp = NULL;
struct msg *nmsg = NULL;
int id, err;
+ unsigned long flags;
- if (msqid < 0 || msgsz < 0)
+ if (msqid < 0 || (long) msgsz < 0)
return -EINVAL;
if (!msgp || !msgp->mtext)
return -EFAULT;
- err = verify_area (VERIFY_WRITE, msgp->mtext, msgsz);
- if (err)
- return err;
+ /*
+ * Calls from kernel level (IPC_KERNELD set)
+ * wants the message put in kernel space!
+ */
+ if ((msgflg & IPC_KERNELD) == 0) {
+ err = verify_area (VERIFY_WRITE, msgp->mtext, msgsz);
+ if (err)
+ return err;
+ }
id = (unsigned int) msqid % MSGMNI;
msq = msgque [id];
@@ -131,6 +261,16 @@ int sys_msgrcv (int msqid, struct msgbuf *msgp, int msgsz, long msgtyp,
return -EINVAL;
ipcp = &msq->msg_perm;
+ /*
+ * Start timer for missing kerneld
+ */
+ if (msgflg & IPC_KERNELD) {
+ kd_timer.data = (unsigned long)msgtyp;
+ kd_timer.expires = jiffies + KERNELD_TIMEOUT;
+ kd_timer.function = kd_timeout;
+ add_timer(&kd_timer);
+ }
+
/*
* find message of correct type.
* msgtyp = 0 => get first.
@@ -138,10 +278,24 @@ int sys_msgrcv (int msqid, struct msgbuf *msgp, int msgsz, long msgtyp,
* msgtyp < 0 => get message with least type must be < abs(msgtype).
*/
while (!nmsg) {
- if (msq->msg_perm.seq != (unsigned int) msqid / MSGMNI)
+ if (msq->msg_perm.seq != (unsigned int) msqid / MSGMNI) {
+ DROP_TIMER;
return -EIDRM;
- if (ipcperms (ipcp, S_IRUGO))
- return -EACCES;
+ }
+ if ((msgflg & IPC_KERNELD) == 0) {
+ /*
+ * All kernel level processes may receive from kerneld!
+ * i.e. no permission check if called from the kernel
+ * otoh we don't want user level non-root snoopers...
+ */
+ if (ipcperms (ipcp, S_IRUGO)) {
+ DROP_TIMER; /* Not needed, but doesn't hurt */
+ return -EACCES;
+ }
+ }
+
+ save_flags(flags);
+ cli();
if (msgtyp == 0)
nmsg = msq->msg_first;
else if (msgtyp > 0) {
@@ -166,11 +320,16 @@ int sys_msgrcv (int msqid, struct msgbuf *msgp, int msgsz, long msgtyp,
if (leastp && leastp->msg_type <= - msgtyp)
nmsg = leastp;
}
+ restore_flags(flags);
if (nmsg) { /* done finding a message */
- if ((msgsz < nmsg->msg_ts) && !(msgflg & MSG_NOERROR))
+ DROP_TIMER;
+ if ((msgsz < nmsg->msg_ts) && !(msgflg & MSG_NOERROR)) {
return -E2BIG;
+ }
msgsz = (msgsz > nmsg->msg_ts)? nmsg->msg_ts : msgsz;
+ save_flags(flags);
+ cli();
if (nmsg == msq->msg_first)
msq->msg_first = nmsg->msg_next;
else {
@@ -190,23 +349,60 @@ int sys_msgrcv (int msqid, struct msgbuf *msgp, int msgsz, long msgtyp,
msgbytes -= nmsg->msg_ts;
msghdrs--;
msq->msg_cbytes -= nmsg->msg_ts;
- if (msq->wwait)
- wake_up (&msq->wwait);
- put_fs_long (nmsg->msg_type, &msgp->mtype);
- memcpy_tofs (msgp->mtext, nmsg->msg_spot, msgsz);
+ restore_flags(flags);
+ wake_up (&msq->wwait);
+ /*
+ * Calls from kernel level (IPC_KERNELD set)
+ * wants the message copied to kernel space!
+ */
+ if (msgflg & IPC_KERNELD) {
+ struct kerneld_msg *kdmp = (struct kerneld_msg *) msgp;
+
+ memcpy((char *)(&(kdmp->id)),
+ nmsg->msg_spot, KDHDR);
+ /*
+ * Note that kdmp->text is a pointer
+ * when called from kernel space!
+ */
+ if ((msgsz > KDHDR) && kdmp->text)
+ memcpy(kdmp->text,
+ nmsg->msg_spot + KDHDR,
+ msgsz - KDHDR);
+ }
+ else {
+ put_user (nmsg->msg_type, &msgp->mtype);
+ copy_to_user (msgp->mtext, nmsg->msg_spot, msgsz);
+ }
kfree(nmsg);
return msgsz;
} else { /* did not find a message */
- if (msgflg & IPC_NOWAIT)
+ if (msgflg & IPC_NOWAIT) {
+ DROP_TIMER;
return -ENOMSG;
- if (current->signal & ~current->blocked)
+ }
+ if (current->signal & ~current->blocked) {
+ DROP_TIMER;
return -EINTR;
+ }
interruptible_sleep_on (&msq->rwait);
}
} /* end while */
+ DROP_TIMER;
return -1;
}
+asmlinkage int sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg)
+{
+ /* IPC_KERNELD is used as a marker for kernel level calls */
+ return real_msgsnd(msqid, msgp, msgsz, msgflg & ~IPC_KERNELD);
+}
+
+asmlinkage int sys_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz,
+ long msgtyp, int msgflg)
+{
+ /* IPC_KERNELD is used as a marker for kernel level calls */
+ return real_msgrcv (msqid, msgp, msgsz, msgtyp, msgflg & ~IPC_KERNELD);
+}
static int findkey (key_t key)
{
@@ -241,8 +437,7 @@ found:
msq = (struct msqid_ds *) kmalloc (sizeof (*msq), GFP_KERNEL);
if (!msq) {
msgque[id] = (struct msqid_ds *) IPC_UNUSED;
- if (msg_lock)
- wake_up (&msg_lock);
+ wake_up (&msg_lock);
return -ENOMEM;
}
ipcp = &msq->msg_perm;
@@ -262,16 +457,42 @@ found:
max_msqid = id;
msgque[id] = msq;
used_queues++;
- if (msg_lock)
- wake_up (&msg_lock);
+ wake_up (&msg_lock);
return (unsigned int) msq->msg_perm.seq * MSGMNI + id;
}
-int sys_msgget (key_t key, int msgflg)
+asmlinkage int sys_msgget (key_t key, int msgflg)
{
int id;
struct msqid_ds *msq;
+ /*
+ * If the IPC_KERNELD flag is set, the key is forced to IPC_PRIVATE,
+ * and a designated kerneld message queue is created/referred to
+ */
+ if ((msgflg & IPC_KERNELD)) {
+ int i;
+ if (!suser())
+ return -EPERM;
+#ifdef NEW_KERNELD_PROTOCOL
+ if ((msgflg & IPC_KERNELD) == OLDIPC_KERNELD) {
+ printk(KERN_ALERT "Please recompile your kerneld daemons!\n");
+ return -EPERM;
+ }
+#endif
+ if ((kerneld_msqid == -1) && (kerneld_msqid =
+ newque(IPC_PRIVATE, msgflg & S_IRWXU)) < 0)
+ return -ENOSPC;
+ for (i = 0; i < MAX_KERNELDS; ++i) {
+ if (kerneld_arr[i] == 0) {
+ kerneld_arr[i] = current->pid;
+ ++n_kernelds;
+ return kerneld_msqid;
+ }
+ }
+ return -ENOSPC;
+ }
+ /* else it is a "normal" request */
if (key == IPC_PRIVATE)
return newque(key, msgflg);
if ((id = findkey (key)) == -1) { /* key not used */
@@ -301,11 +522,9 @@ static void freeque (int id)
while (max_msqid && (msgque[--max_msqid] == IPC_UNUSED));
msgque[id] = (struct msqid_ds *) IPC_UNUSED;
used_queues--;
- while (msq->rwait || msq->wwait) {
- if (msq->rwait)
- wake_up (&msq->rwait);
- if (msq->wwait)
- wake_up (&msq->wwait);
+ while (waitqueue_active(&msq->rwait) || waitqueue_active(&msq->wwait)) {
+ wake_up (&msq->rwait);
+ wake_up (&msq->wwait);
schedule();
}
for (msgp = msq->msg_first; msgp; msgp = msgh ) {
@@ -316,7 +535,7 @@ static void freeque (int id)
kfree(msq);
}
-int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
+asmlinkage int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
{
int id, err;
struct msqid_ds *msq;
@@ -348,7 +567,7 @@ int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
err = verify_area (VERIFY_WRITE, buf, sizeof (struct msginfo));
if (err)
return err;
- memcpy_tofs (buf, &msginfo, sizeof(struct msginfo));
+ copy_to_user (buf, &msginfo, sizeof(struct msginfo));
return max_msqid;
}
case MSG_STAT:
@@ -374,7 +593,7 @@ int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
tbuf.msg_qbytes = msq->msg_qbytes;
tbuf.msg_lspid = msq->msg_lspid;
tbuf.msg_lrpid = msq->msg_lrpid;
- memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ copy_to_user (buf, &tbuf, sizeof(*buf));
return id;
case IPC_SET:
if (!buf)
@@ -382,7 +601,7 @@ int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
err = verify_area (VERIFY_READ, buf, sizeof (*buf));
if (err)
return err;
- memcpy_fromfs (&tbuf, buf, sizeof (*buf));
+ copy_from_user (&tbuf, buf, sizeof (*buf));
break;
case IPC_STAT:
if (!buf)
@@ -414,7 +633,7 @@ int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
tbuf.msg_qbytes = msq->msg_qbytes;
tbuf.msg_lspid = msq->msg_lspid;
tbuf.msg_lrpid = msq->msg_lrpid;
- memcpy_tofs (buf, &tbuf, sizeof (*buf));
+ copy_to_user (buf, &tbuf, sizeof (*buf));
return 0;
case IPC_SET:
if (!suser() && current->euid != ipcp->cuid &&
@@ -433,9 +652,117 @@ int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
if (!suser() && current->euid != ipcp->cuid &&
current->euid != ipcp->uid)
return -EPERM;
+ /*
+ * There is only one kerneld message queue,
+ * mark it as non-existent
+ */
+ if ((kerneld_msqid >= 0) && (msqid == kerneld_msqid))
+ kerneld_msqid = -1;
freeque (id);
return 0;
default:
return -EINVAL;
}
}
+
+/*
+ * We do perhaps need a "flush" for waiting processes,
+ * so that if they are terminated, a call from do_exit
+ * will minimize the possibility of orphaned received
+ * messages in the queue. For now we just make sure
+ * that the queue is shut down whenever all kernelds have died.
+ */
+void kerneld_exit(void)
+{
+ int i;
+
+ if (kerneld_msqid == -1)
+ return;
+ for (i = 0; i < MAX_KERNELDS; ++i) {
+ if (kerneld_arr[i] == current->pid) {
+ kerneld_arr[i] = 0;
+ --n_kernelds;
+ if (n_kernelds == 0)
+ sys_msgctl(kerneld_msqid, IPC_RMID, NULL);
+ break;
+ }
+ }
+}
+
+/*
+ * Kerneld internal message format/syntax:
+ *
+ * The message type from the kernel to kerneld is used to specify _what_
+ * function we want kerneld to perform.
+ *
+ * The "normal" message area is divided into a header, followed by a char array.
+ * The header is used to hold the sequence number of the request, which will
+ * be used as the return message type from kerneld back to the kernel.
+ * In the return message, the header will be used to store the exit status
+ * of the kerneld "job", or task.
+ * The character array is used to pass parameters to kerneld and (optional)
+ * return information from kerneld back to the kernel.
+ * It is the responsibility of kerneld and the kernel level caller
+ * to set usable sizes on the parameter/return value array, since
+ * that information is _not_ included in the message format
+ */
+
+/*
+ * The basic kernel level entry point to kerneld.
+ * msgtype should correspond to a task type for (a) kerneld
+ * ret_size is the size of the (optional) return _value,
+ * OR-ed with KERNELD_WAIT if we want an answer
+ * msgsize is the size (in bytes) of the message, not including
+ * the header that is always sent first in a kerneld message
+ * text is the parameter for the kerneld specific task
+ * ret_val is NULL or the kernel address where an expected answer
+ * from kerneld should be placed.
+ *
+ * See <linux/kerneld.h> for usage (inline convenience functions)
+ *
+ */
+int kerneld_send(int msgtype, int ret_size, int msgsz,
+ const char *text, const char *ret_val)
+{
+ int status = -ENOSYS;
+#ifdef CONFIG_KERNELD
+ static int id = KERNELD_MINSEQ;
+ struct kerneld_msg kmsp = { msgtype, NULL_KDHDR, (char *)text };
+ int msgflg = S_IRUSR | S_IWUSR | IPC_KERNELD | MSG_NOERROR;
+ unsigned long flags;
+
+ if (kerneld_msqid == -1)
+ return -ENODEV;
+
+ /* Do not wait for an answer at interrupt-time! */
+ if (intr_count)
+ ret_size &= ~KERNELD_WAIT;
+#ifdef NEW_KERNELD_PROTOCOL
+ else
+ kmsp.pid = current->pid;
+#endif
+
+ msgsz += KDHDR;
+ if (ret_size & KERNELD_WAIT) {
+ save_flags(flags);
+ cli();
+ if (++id <= 0) /* overflow */
+ id = KERNELD_MINSEQ;
+ kmsp.id = id;
+ restore_flags(flags);
+ }
+
+ status = real_msgsnd(kerneld_msqid, (struct msgbuf *)&kmsp, msgsz, msgflg);
+ if ((status >= 0) && (ret_size & KERNELD_WAIT)) {
+ ret_size &= ~KERNELD_WAIT;
+ kmsp.text = (char *)ret_val;
+ status = real_msgrcv(kerneld_msqid, (struct msgbuf *)&kmsp,
+ KDHDR + ((ret_val)?ret_size:0),
+ kmsp.id, msgflg);
+ if (status > 0) /* a valid answer contains at least a long */
+ status = kmsp.id;
+ }
+
+#endif /* CONFIG_KERNELD */
+ return status;
+}
diff --git a/ipc/sem.c b/ipc/sem.c
index 6dbe8e4fe..fb79d6004 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -32,7 +32,6 @@
*/
#include <linux/errno.h>
-#include <asm/segment.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/sem.h>
@@ -40,6 +39,8 @@
#include <linux/stat.h>
#include <linux/malloc.h>
+#include <asm/uaccess.h>
+
extern int ipcperms (struct ipc_perm *ipcp, short semflg);
static int newary (key_t, int, int);
static int findkey (key_t key);
@@ -103,8 +104,7 @@ found:
if (!sma) {
semary[id] = (struct semid_ds *) IPC_UNUSED;
used_sems -= nsems;
- if (sem_lock)
- wake_up (&sem_lock);
+ wake_up (&sem_lock);
return -ENOMEM;
}
memset (sma, 0, size);
@@ -124,12 +124,11 @@ found:
max_semid = id;
used_semids++;
semary[id] = sma;
- if (sem_lock)
- wake_up (&sem_lock);
+ wake_up (&sem_lock);
return (unsigned int) sma->sem_perm.seq * SEMMNI + id;
}
-int sys_semget (key_t key, int nsems, int semflg)
+asmlinkage int sys_semget (key_t key, int nsems, int semflg)
{
int id;
struct semid_ds *sma;
@@ -357,7 +356,7 @@ static void freeary (int id)
kfree(sma);
}
-int sys_semctl (int semid, int semnum, int cmd, union semun arg)
+asmlinkage int sys_semctl (int semid, int semnum, int cmd, union semun arg)
{
struct semid_ds *buf = NULL;
struct semid_ds tbuf;
@@ -395,7 +394,7 @@ int sys_semctl (int semid, int semnum, int cmd, union semun arg)
i = verify_area(VERIFY_WRITE, tmp, sizeof(struct seminfo));
if (i)
return i;
- memcpy_tofs (tmp, &seminfo, sizeof(struct seminfo));
+ copy_to_user (tmp, &seminfo, sizeof(struct seminfo));
return max_semid;
}
@@ -416,7 +415,7 @@ int sys_semctl (int semid, int semnum, int cmd, union semun arg)
tbuf.sem_otime = sma->sem_otime;
tbuf.sem_ctime = sma->sem_ctime;
tbuf.sem_nsems = sma->sem_nsems;
- memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ copy_to_user (buf, &tbuf, sizeof(*buf));
return id;
}
@@ -476,7 +475,7 @@ int sys_semctl (int semid, int semnum, int cmd, union semun arg)
array = arg.array;
if ((i = verify_area (VERIFY_READ, array, nsems*sizeof(ushort))))
return i;
- memcpy_fromfs (sem_io, array, nsems*sizeof(ushort));
+ copy_from_user (sem_io, array, nsems*sizeof(ushort));
for (i = 0; i < nsems; i++)
if (sem_io[i] > SEMVMX)
return -ERANGE;
@@ -490,7 +489,7 @@ int sys_semctl (int semid, int semnum, int cmd, union semun arg)
buf = arg.buf;
if ((i = verify_area (VERIFY_READ, buf, sizeof (*buf))))
return i;
- memcpy_fromfs (&tbuf, buf, sizeof (*buf));
+ copy_from_user (&tbuf, buf, sizeof (*buf));
break;
}
@@ -505,7 +504,7 @@ int sys_semctl (int semid, int semnum, int cmd, union semun arg)
return -EACCES;
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- memcpy_tofs (array, sem_io, nsems*sizeof(ushort));
+ copy_to_user (array, sem_io, nsems*sizeof(ushort));
break;
case SETVAL:
if (ipcperms (ipcp, S_IWUGO))
@@ -534,7 +533,7 @@ int sys_semctl (int semid, int semnum, int cmd, union semun arg)
tbuf.sem_otime = sma->sem_otime;
tbuf.sem_ctime = sma->sem_ctime;
tbuf.sem_nsems = sma->sem_nsems;
- memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ copy_to_user (buf, &tbuf, sizeof(*buf));
break;
case SETALL:
if (ipcperms (ipcp, S_IWUGO))
@@ -554,7 +553,7 @@ int sys_semctl (int semid, int semnum, int cmd, union semun arg)
return 0;
}
-int sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
+asmlinkage int sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
{
int i, id, size, error;
struct semid_ds *sma;
@@ -570,7 +569,7 @@ int sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
return -EFAULT;
if ((i = verify_area (VERIFY_READ, tsops, nsops * sizeof(*tsops))))
return i;
- memcpy_fromfs (sops, tsops, nsops * sizeof(*tsops));
+ copy_from_user (sops, tsops, nsops * sizeof(*tsops));
id = (unsigned int) semid % SEMMNI;
if ((sma = semary[id]) == IPC_UNUSED || sma == IPC_NOID)
return -EINVAL;
diff --git a/ipc/shm.c b/ipc/shm.c
index 9dc89ec22..ec408ed62 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -12,8 +12,9 @@
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/malloc.h>
+#include <linux/swap.h>
-#include <asm/segment.h>
+#include <asm/uaccess.h>
#include <asm/pgtable.h>
extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
@@ -91,16 +92,14 @@ found:
shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
if (!shp) {
shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
- if (shm_lock)
- wake_up (&shm_lock);
+ wake_up (&shm_lock);
return -ENOMEM;
}
shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
if (!shp->shm_pages) {
shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
- if (shm_lock)
- wake_up (&shm_lock);
+ wake_up (&shm_lock);
kfree(shp);
return -ENOMEM;
}
@@ -124,12 +123,11 @@ found:
max_shmid = id;
shm_segs[id] = shp;
used_segs++;
- if (shm_lock)
- wake_up (&shm_lock);
+ wake_up (&shm_lock);
return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
}
-int sys_shmget (key_t key, int size, int shmflg)
+asmlinkage int sys_shmget (key_t key, int size, int shmflg)
{
struct shmid_ds *shp;
int id = 0;
@@ -199,7 +197,7 @@ static void killseg (int id)
return;
}
-int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
+asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
{
struct shmid_ds tbuf;
struct shmid_ds *shp;
@@ -214,7 +212,7 @@ int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
err = verify_area (VERIFY_READ, buf, sizeof (*buf));
if (err)
return err;
- memcpy_fromfs (&tbuf, buf, sizeof (*buf));
+ copy_from_user (&tbuf, buf, sizeof (*buf));
}
switch (cmd) { /* replace with proc interface ? */
@@ -231,7 +229,7 @@ int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
if (err)
return err;
- memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
+ copy_to_user (buf, &shminfo, sizeof(struct shminfo));
return max_shmid;
}
case SHM_INFO:
@@ -248,7 +246,7 @@ int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
shm_info.shm_swp = shm_swp;
shm_info.swap_attempts = swap_attempts;
shm_info.swap_successes = swap_successes;
- memcpy_tofs (buf, &shm_info, sizeof(shm_info));
+ copy_to_user (buf, &shm_info, sizeof(shm_info));
return max_shmid;
}
case SHM_STAT:
@@ -273,7 +271,7 @@ int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
tbuf.shm_cpid = shp->shm_cpid;
tbuf.shm_lpid = shp->shm_lpid;
tbuf.shm_nattch = shp->shm_nattch;
- memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ copy_to_user (buf, &tbuf, sizeof(*buf));
return id;
}
@@ -318,7 +316,7 @@ int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
tbuf.shm_cpid = shp->shm_cpid;
tbuf.shm_lpid = shp->shm_lpid;
tbuf.shm_nattch = shp->shm_nattch;
- memcpy_tofs (buf, &tbuf, sizeof(*buf));
+ copy_to_user (buf, &tbuf, sizeof(*buf));
break;
case IPC_SET:
if (suser() || current->euid == shp->shm_perm.uid ||
@@ -350,7 +348,7 @@ int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
* The per process internal structure for managing segments is
* `struct vm_area_struct'.
* A shmat will add to and shmdt will remove from the list.
- * shmd->vm_task the attacher
+ * shmd->vm_mm the attacher
* shmd->vm_start virt addr of attach, multiple of SHMLBA
* shmd->vm_end multiple of SHMLBA
* shmd->vm_next next attach for task
@@ -360,9 +358,9 @@ int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
*/
static struct vm_operations_struct shm_vm_ops = {
- shm_open, /* open */
- shm_close, /* close */
- NULL, /* unmap */
+ shm_open, /* open - callback for a new vm-area open */
+ shm_close, /* close - callback for when the vm-area is released */
+ NULL, /* no need to sync pages at unmap */
NULL, /* protect */
NULL, /* sync */
NULL, /* advise */
@@ -392,9 +390,9 @@ static inline void remove_attach (struct shmid_ds * shp, struct vm_area_struct *
if (shmd->vm_next_share == shmd) {
if (shp->attaches != shmd) {
printk("shm_close: shm segment (id=%ld) attach list inconsistent\n",
- (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK);
- printk("shm_close: %d %08lx-%08lx %c%c%c%c %08lx %08lx\n",
- shmd->vm_task->pid, shmd->vm_start, shmd->vm_end,
+ SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
+ printk("shm_close: %08lx-%08lx %c%c%c%c %08lx %08lx\n",
+ shmd->vm_start, shmd->vm_end,
shmd->vm_flags & VM_READ ? 'r' : '-',
shmd->vm_flags & VM_WRITE ? 'w' : '-',
shmd->vm_flags & VM_EXEC ? 'x' : '-',
@@ -420,41 +418,57 @@ static int shm_map (struct vm_area_struct *shmd)
pmd_t *page_middle;
pte_t *page_table;
unsigned long tmp, shm_sgn;
+ int error;
/* clear old mappings */
do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
/* add new mapping */
- insert_vm_struct(current, shmd);
- merge_segments(current, shmd->vm_start, shmd->vm_end);
+ tmp = shmd->vm_end - shmd->vm_start;
+ if((current->mm->total_vm << PAGE_SHIFT) + tmp
+ > (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+ current->mm->total_vm += tmp >> PAGE_SHIFT;
+ insert_vm_struct(current->mm, shmd);
+ merge_segments(current->mm, shmd->vm_start, shmd->vm_end);
/* map page range */
- shm_sgn = shmd->vm_pte + ((shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
- for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE,
- shm_sgn += (1 << SHM_IDX_SHIFT)) {
- page_dir = pgd_offset(shmd->vm_task,tmp);
+ error = 0;
+ shm_sgn = shmd->vm_pte +
+ SWP_ENTRY(0, (shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
+ flush_cache_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
+ for (tmp = shmd->vm_start;
+ tmp < shmd->vm_end;
+ tmp += PAGE_SIZE, shm_sgn += SWP_ENTRY(0, 1 << SHM_IDX_SHIFT))
+ {
+ page_dir = pgd_offset(shmd->vm_mm,tmp);
page_middle = pmd_alloc(page_dir,tmp);
- if (!page_middle)
- return -ENOMEM;
+ if (!page_middle) {
+ error = -ENOMEM;
+ break;
+ }
page_table = pte_alloc(page_middle,tmp);
- if (!page_table)
- return -ENOMEM;
- pte_val(*page_table) = shm_sgn;
+ if (!page_table) {
+ error = -ENOMEM;
+ break;
+ }
+ set_pte(page_table, __pte(shm_sgn));
}
- invalidate();
- return 0;
+ flush_tlb_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
+ return error;
}
/*
* Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
*/
-int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
+asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
{
struct shmid_ds *shp;
struct vm_area_struct *shmd;
int err;
unsigned int id;
unsigned long addr;
+ unsigned long len;
if (shmid < 0) {
/* printk("shmat() -> EINVAL because shmid = %d < 0\n",shmid); */
@@ -478,12 +492,24 @@ int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
else
return -EINVAL;
}
- if ((addr > current->mm->start_stack - 16384 - PAGE_SIZE*shp->shm_npages)) {
+ /*
+ * Check if addr exceeds TASK_SIZE (from do_mmap)
+ */
+ len = PAGE_SIZE*shp->shm_npages;
+ if (addr >= TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE - len)
+ return -EINVAL;
+ /*
+ * If shm segment goes below stack, make sure there is some
+ * space left for the stack to grow (presently 4 pages).
+ */
+ if (addr < current->mm->start_stack &&
+ addr > current->mm->start_stack - PAGE_SIZE*(shp->shm_npages + 4))
+ {
/* printk("shmat() -> EINVAL because segment intersects stack\n"); */
return -EINVAL;
}
if (!(shmflg & SHM_REMAP))
- if ((shmd = find_vma_intersection(current, addr, addr + shp->shm_segsz))) {
+ if ((shmd = find_vma_intersection(current->mm, addr, addr + shp->shm_segsz))) {
/* printk("shmat() -> EINVAL because the interval [0x%lx,0x%lx) intersects an already mapped interval [0x%lx,0x%lx).\n",
addr, addr + shp->shm_segsz, shmd->vm_start, shmd->vm_end); */
return -EINVAL;
@@ -502,10 +528,10 @@ int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
return -EIDRM;
}
- shmd->vm_pte = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT);
+ shmd->vm_pte = SWP_ENTRY(SHM_SWP_TYPE, id);
shmd->vm_start = addr;
shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
- shmd->vm_task = current;
+ shmd->vm_mm = current->mm;
shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED;
shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED
| VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC
@@ -538,7 +564,7 @@ static void shm_open (struct vm_area_struct *shmd)
unsigned int id;
struct shmid_ds *shp;
- id = (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK;
+ id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
shp = shm_segs[id];
if (shp == IPC_UNUSED) {
printk("shm_open: unused id=%d PANIC\n", id);
@@ -561,10 +587,8 @@ static void shm_close (struct vm_area_struct *shmd)
struct shmid_ds *shp;
int id;
- unmap_page_range (shmd->vm_start, shmd->vm_end - shmd->vm_start);
-
/* remove from the list of attaches of the shm segment */
- id = (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK;
+ id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
shp = shm_segs[id];
remove_attach(shp,shmd); /* remove from shp->attaches */
shp->shm_lpid = current->pid;
@@ -577,7 +601,7 @@ static void shm_close (struct vm_area_struct *shmd)
* detach and kill segment if marked destroyed.
* The work is done in shm_close.
*/
-int sys_shmdt (char *shmaddr)
+asmlinkage int sys_shmdt (char *shmaddr)
{
struct vm_area_struct *shmd, *shmdnext;
@@ -599,10 +623,10 @@ static pte_t shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, uns
struct shmid_ds *shp;
unsigned int id, idx;
- id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
- if (id != ((shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK)) {
+ id = SWP_OFFSET(code) & SHM_ID_MASK;
+ if (id != (SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK)) {
printk ("shm_swap_in: code id = %d and shmd id = %ld differ\n",
- id, (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK);
+ id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
return BAD_PAGE;
}
if (id > max_shmid) {
@@ -614,7 +638,7 @@ static pte_t shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, uns
printk ("shm_swap_in: id=%d invalid. Race.\n", id);
return BAD_PAGE;
}
- idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
+ idx = (SWP_OFFSET(code) >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
if (idx != (offset >> PAGE_SHIFT)) {
printk ("shm_swap_in: code idx = %u and shmd idx = %lu differ\n",
idx, offset >> PAGE_SHIFT);
@@ -651,11 +675,11 @@ static pte_t shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, uns
pte = pte_mkdirty(mk_pte(page, PAGE_SHARED));
shp->shm_pages[idx] = pte_val(pte);
} else
- --current->mm->maj_flt; /* was incremented in do_no_page */
+ --current->maj_flt; /* was incremented in do_no_page */
done: /* pte_val(pte) == shp->shm_pages[idx] */
- current->mm->min_flt++;
- mem_map[MAP_NR(pte_page(pte))]++;
+ current->min_flt++;
+ mem_map[MAP_NR(pte_page(pte))].count++;
return pte_modify(pte, shmd->vm_page_prot);
}
@@ -665,16 +689,16 @@ done: /* pte_val(pte) == shp->shm_pages[idx] */
static unsigned long swap_id = 0; /* currently being swapped */
static unsigned long swap_idx = 0; /* next to swap */
-int shm_swap (int prio)
+int shm_swap (int prio, int dma)
{
pte_t page;
struct shmid_ds *shp;
struct vm_area_struct *shmd;
unsigned long swap_nr;
unsigned long id, idx;
- int loop = 0, invalid = 0;
+ int loop = 0;
int counter;
-
+
counter = shm_rss >> prio;
if (!counter || !(swap_nr = get_swap_page()))
return 0;
@@ -702,12 +726,12 @@ int shm_swap (int prio)
pte_val(page) = shp->shm_pages[idx];
if (!pte_present(page))
goto check_table;
+ if (dma && !PageDMA(&mem_map[MAP_NR(pte_page(page))]))
+ goto check_table;
swap_attempts++;
if (--counter < 0) { /* failed */
failed:
- if (invalid)
- invalidate();
swap_free (swap_nr);
return 0;
}
@@ -719,14 +743,15 @@ int shm_swap (int prio)
pte_t *page_table, pte;
unsigned long tmp;
- if ((shmd->vm_pte >> SHM_ID_SHIFT & SHM_ID_MASK) != id) {
- printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n", id, shmd->vm_pte >> SHM_ID_SHIFT & SHM_ID_MASK);
+ if ((SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK) != id) {
+ printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n",
+ id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
continue;
}
tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
continue;
- page_dir = pgd_offset(shmd->vm_task,tmp);
+ page_dir = pgd_offset(shmd->vm_mm,tmp);
if (pgd_none(*page_dir) || pgd_bad(*page_dir)) {
printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
id, shmd->vm_start, idx);
@@ -745,27 +770,27 @@ int shm_swap (int prio)
if (!pte_present(pte))
continue;
if (pte_young(pte)) {
- *page_table = pte_mkold(pte);
+ set_pte(page_table, pte_mkold(pte));
continue;
}
if (pte_page(pte) != pte_page(page))
printk("shm_swap_out: page and pte mismatch\n");
- pte_val(*page_table) = shmd->vm_pte | idx << SHM_IDX_SHIFT;
- mem_map[MAP_NR(pte_page(pte))]--;
- if (shmd->vm_task->mm->rss > 0)
- shmd->vm_task->mm->rss--;
- invalid++;
+ flush_cache_page(shmd, tmp);
+ set_pte(page_table,
+ __pte(shmd->vm_pte + SWP_ENTRY(0, idx << SHM_IDX_SHIFT)));
+ mem_map[MAP_NR(pte_page(pte))].count--;
+ if (shmd->vm_mm->rss > 0)
+ shmd->vm_mm->rss--;
+ flush_tlb_page(shmd, tmp);
/* continue looping through circular list */
} while (0);
if ((shmd = shmd->vm_next_share) == shp->attaches)
break;
}
- if (mem_map[MAP_NR(pte_page(page))] != 1)
+ if (mem_map[MAP_NR(pte_page(page))].count != 1)
goto check_table;
shp->shm_pages[idx] = swap_nr;
- if (invalid)
- invalidate();
write_swap_page (swap_nr, (char *) pte_page(page));
free_page(pte_page(page));
swap_successes++;
diff --git a/ipc/util.c b/ipc/util.c
index 87c6c28ea..e81afc36f 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -5,7 +5,6 @@
#include <linux/config.h>
#include <linux/errno.h>
-#include <asm/segment.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/sem.h>
@@ -13,25 +12,11 @@
#include <linux/shm.h>
#include <linux/stat.h>
-void ipc_init (void);
-asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth);
+#include <asm/uaccess.h>
-#ifdef CONFIG_SYSVIPC
+#if defined(CONFIG_SYSVIPC) || defined(CONFIG_KERNELD)
-int ipcperms (struct ipc_perm *ipcp, short flag);
extern void sem_init (void), msg_init (void), shm_init (void);
-extern int sys_semget (key_t key, int nsems, int semflg);
-extern int sys_semop (int semid, struct sembuf *sops, unsigned nsops);
-extern int sys_semctl (int semid, int semnum, int cmd, union semun arg);
-extern int sys_msgget (key_t key, int msgflg);
-extern int sys_msgsnd (int msqid, struct msgbuf *msgp, int msgsz, int msgflg);
-extern int sys_msgrcv (int msqid, struct msgbuf *msgp, int msgsz, long msgtyp,
- int msgflg);
-extern int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf);
-extern int sys_shmget (key_t key, int size, int flag);
-extern int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *addr);
-extern int sys_shmdt (char *shmaddr);
-extern int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf);
void ipc_init (void)
{
@@ -63,112 +48,78 @@ int ipcperms (struct ipc_perm *ipcp, short flag)
return 0;
}
-asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
-{
- int version;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- if (call <= SEMCTL)
- switch (call) {
- case SEMOP:
- return sys_semop (first, (struct sembuf *)ptr, second);
- case SEMGET:
- return sys_semget (first, second, third);
- case SEMCTL: {
- union semun fourth;
- int err;
- if (!ptr)
- return -EINVAL;
- if ((err = verify_area (VERIFY_READ, ptr, sizeof(long))))
- return err;
- fourth.__pad = (void *) get_fs_long(ptr);
- return sys_semctl (first, second, third, fourth);
- }
- default:
- return -EINVAL;
- }
- if (call <= MSGCTL)
- switch (call) {
- case MSGSND:
- return sys_msgsnd (first, (struct msgbuf *) ptr,
- second, third);
- case MSGRCV:
- switch (version) {
- case 0: {
- struct ipc_kludge tmp;
- int err;
- if (!ptr)
- return -EINVAL;
- if ((err = verify_area (VERIFY_READ, ptr, sizeof(tmp))))
- return err;
- memcpy_fromfs (&tmp,(struct ipc_kludge *) ptr,
- sizeof (tmp));
- return sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
- }
- case 1: default:
- return sys_msgrcv (first, (struct msgbuf *) ptr, second, fifth, third);
- }
- case MSGGET:
- return sys_msgget ((key_t) first, second);
- case MSGCTL:
- return sys_msgctl (first, second, (struct msqid_ds *) ptr);
- default:
- return -EINVAL;
- }
- if (call <= SHMCTL)
- switch (call) {
- case SHMAT:
- switch (version) {
- case 0: default: {
- ulong raddr;
- int err;
- if ((err = verify_area(VERIFY_WRITE, (ulong*) third, sizeof(ulong))))
- return err;
- err = sys_shmat (first, (char *) ptr, second, &raddr);
- if (err)
- return err;
- put_fs_long (raddr, (ulong *) third);
- return 0;
- }
- case 1: /* iBCS2 emulator entry point */
- if (get_fs() != get_ds())
- return -EINVAL;
- return sys_shmat (first, (char *) ptr, second, (ulong *) third);
- }
- case SHMDT:
- return sys_shmdt ((char *)ptr);
- case SHMGET:
- return sys_shmget (first, second, third);
- case SHMCTL:
- return sys_shmctl (first, second, (struct shmid_ds *) ptr);
- default:
- return -EINVAL;
- }
- return -EINVAL;
-}
-
-#else /* not CONFIG_SYSVIPC */
-
-asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
-{
- return -ENOSYS;
-}
+#else
+/*
+ * Dummy functions when SYSV IPC isn't configured
+ */
void sem_exit (void)
{
return;
}
-int shm_swap (int prio)
+int shm_swap (int prio, unsigned long limit)
{
return 0;
}
-void shm_no_page (unsigned long *ptent)
+asmlinkage int sys_semget (key_t key, int nsems, int semflg)
{
- return;
+ return -ENOSYS;
+}
+
+asmlinkage int sys_semop (int semid, struct sembuf *sops, unsigned nsops)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_semctl (int semid, int semnum, int cmd, union semun arg)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_msgget (key_t key, int msgflg)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz, long msgtyp,
+ int msgflg)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
+{
+ return -ENOSYS;
}
+asmlinkage int sys_shmget (key_t key, int size, int flag)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *addr)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_shmdt (char *shmaddr)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
+{
+ return -ENOSYS;
+}
+
+void kerneld_exit(void)
+{
+}
#endif /* CONFIG_SYSVIPC */