diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1999-06-13 16:29:25 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1999-06-13 16:29:25 +0000 |
commit | db7d4daea91e105e3859cf461d7e53b9b77454b2 (patch) | |
tree | 9bb65b95440af09e8aca63abe56970dd3360cc57 /net/netlink | |
parent | 9c1c01ead627bdda9211c9abd5b758d6c687d8ac (diff) |
Merge with Linux 2.2.8.
Diffstat (limited to 'net/netlink')
-rw-r--r-- | net/netlink/af_netlink.c | 43 |
1 files changed, 28 insertions, 15 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a281c966b..9247bf99c 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -25,7 +25,6 @@ #include <linux/un.h> #include <linux/fcntl.h> #include <linux/termios.h> -#include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/fs.h> @@ -55,6 +54,13 @@ static struct socket *netlink_kernel[MAX_LINKS]; static int netlink_dump(struct sock *sk); static void netlink_destroy_callback(struct netlink_callback *cb); +/* Netlink table lock. It protects against sk list changes + during uninterruptible sleeps in netlink_broadcast. + + These lock MUST NOT be used from bh/irq on SMP kernels, because + It would result in race in netlink_wait_on_table. + */ + extern __inline__ void netlink_wait_on_table(int protocol) { @@ -69,16 +75,16 @@ netlink_lock_table(int protocol) } extern __inline__ void -netlink_unlock_table(int protocol, int wakeup) +netlink_unlock_table(int protocol) { #if 0 /* F...g gcc does not eat it! */ - if (atomic_dec_and_test(&nl_table_lock[protocol]) && wakeup) + if (atomic_dec_and_test(&nl_table_lock[protocol])) wake_up(&nl_table_wait); #else atomic_dec(&nl_table_lock[protocol]); - if (atomic_read(&nl_table_lock[protocol]) && wakeup) + if (!atomic_read(&nl_table_lock[protocol])) wake_up(&nl_table_wait); #endif } @@ -125,7 +131,9 @@ static void netlink_remove(struct sock *sk) struct sock **skp; for (skp = &nl_table[sk->protocol]; *skp; skp = &((*skp)->next)) { if (*skp == sk) { + start_bh_atomic(); *skp = sk->next; + end_bh_atomic(); return; } } @@ -186,7 +194,7 @@ static int netlink_release(struct socket *sock, struct socket *peer) transport (and AF_UNIX datagram, when it will be repaired). Someone could wait on our sock->wait now. - We cannot release socket until waiter will remove yourself + We cannot release socket until waiter will remove itself from wait queue. I choose the most conservetive way of solving the problem. @@ -218,8 +226,6 @@ static int netlink_autobind(struct socket *sock) struct sock *sk = sock->sk; struct sock *osk; - netlink_wait_on_table(sk->protocol); - sk->protinfo.af_netlink.groups = 0; sk->protinfo.af_netlink.pid = current->pid; @@ -264,8 +270,6 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len return 0; } - netlink_wait_on_table(sk->protocol); - for (osk=nl_table[sk->protocol]; osk; osk=osk->next) { if (osk->protinfo.af_netlink.pid == nladdr->nl_pid) return -EADDRINUSE; @@ -332,7 +336,7 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock retry: for (sk = nl_table[protocol]; sk; sk = sk->next) { if (sk->protinfo.af_netlink.pid != pid) - continue; + continue; netlink_lock(sk); @@ -416,7 +420,8 @@ void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, /* While we sleep in clone, do not allow to change socket list */ - netlink_lock_table(protocol); + if (allocation == GFP_KERNEL) + netlink_lock_table(protocol); for (sk = nl_table[protocol]; sk; sk = sk->next) { if (ssk == sk) @@ -454,7 +459,8 @@ void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, netlink_unlock(sk); } - netlink_unlock_table(protocol, allocation == GFP_KERNEL); + if (allocation == GFP_KERNEL) + netlink_unlock_table(protocol); if (skb2) kfree_skb(skb2); @@ -475,7 +481,7 @@ Nprintk("seterr"); !(sk->protinfo.af_netlink.groups&group)) continue; - sk->err = -code; + sk->err = code; sk->state_change(sk); } } @@ -739,15 +745,20 @@ int netlink_attach(int unit, int (*function)(int, struct sk_buff *skb)) void netlink_detach(int unit) { struct socket *sock = netlink_kernel[unit]; + netlink_kernel[unit] = NULL; + synchronize_bh(); + sock_release(sock); } int netlink_post(int unit, struct sk_buff *skb) { - if (netlink_kernel[unit]) { + struct socket *sock = netlink_kernel[unit]; + barrier(); + if (sock) { memset(skb->cb, 0, sizeof(skb->cb)); - netlink_broadcast(netlink_kernel[unit]->sk, skb, 0, ~0, GFP_ATOMIC); + netlink_broadcast(sock->sk, skb, 0, ~0, GFP_ATOMIC); return 0; } return -EUNATCH;; @@ -800,6 +811,8 @@ done: len-=(offset-begin); if(len>length) len=length; + if(len<0) + len=0; return len; } #endif |