summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c56
-rw-r--r--net/core/profile.c5
-rw-r--r--net/core/rtnetlink.c19
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/skbuff.c22
-rw-r--r--net/core/sock.c46
6 files changed, 93 insertions, 57 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 69315d948..bd414c794 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -740,21 +740,20 @@ static inline void handle_bridge(struct sk_buff *skb, unsigned short type)
* recovering the MAC header first.
*/
- int offset=skb->data-skb->mac.raw;
- cli();
+ int offset;
+
+ skb=skb_clone(skb, GFP_ATOMIC);
+ if(skb==NULL)
+ return;
+
+ offset=skb->data-skb->mac.raw;
skb_push(skb,offset); /* Put header back on for bridge */
+
if(br_receive_frame(skb))
- {
- sti();
return;
- }
- /*
- * Pull the MAC header off for the copy going to
- * the upper layers.
- */
- skb_pull(skb,offset);
- sti();
+ kfree_skb(skb, FREE_READ);
}
+ return;
}
#endif
@@ -809,7 +808,7 @@ void net_bh(void)
while (!skb_queue_empty(&backlog))
{
- struct sk_buff * skb = backlog.next;
+ struct sk_buff * skb;
/* Give chance to other bottom halves to run */
if (jiffies - start_time > 1)
@@ -818,9 +817,7 @@ void net_bh(void)
/*
* We have a packet. Therefore the queue has shrunk
*/
- cli();
- __skb_unlink(skb, &backlog);
- sti();
+ skb = skb_dequeue(&backlog);
#ifdef CONFIG_CPU_IS_SLOW
if (ave_busy > 128*16) {
@@ -1097,7 +1094,7 @@ static int sprintf_stats(char *buffer, struct device *dev)
int size;
if (stats)
- size = sprintf(buffer, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu %8lu %8lu %4lu %4lu %4lu %5lu %4lu %4lu\n",
+ size = sprintf(buffer, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu %8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
dev->name,
stats->rx_bytes,
stats->rx_packets, stats->rx_errors,
@@ -1325,7 +1322,7 @@ int dev_change_flags(struct device *dev, unsigned flags)
dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_RUNNING|IFF_NOARP|
IFF_SLAVE|IFF_MASTER|
IFF_MULTICAST|IFF_PORTSEL|IFF_AUTOMEDIA)) |
- (dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC));
+ (dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC|IFF_ALLMULTI));
/*
* Load in the correct multicast list now the flags have changed.
@@ -1346,13 +1343,11 @@ int dev_change_flags(struct device *dev, unsigned flags)
if (ret == 0)
dev_mc_upload(dev);
- }
+ }
if (dev->flags&IFF_UP &&
- ((old_flags^dev->flags)&~(IFF_UP|IFF_RUNNING|IFF_PROMISC|IFF_VOLATILE))) {
- printk(KERN_DEBUG "SIFFL %s(%s)\n", dev->name, current->comm);
+ ((old_flags^dev->flags)&~(IFF_UP|IFF_RUNNING|IFF_PROMISC|IFF_ALLMULTI|IFF_VOLATILE)))
notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
- }
if ((flags^dev->gflags)&IFF_PROMISC) {
int inc = (flags&IFF_PROMISC) ? +1 : -1;
@@ -1360,6 +1355,16 @@ int dev_change_flags(struct device *dev, unsigned flags)
dev_set_promiscuity(dev, inc);
}
+ /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
+ is important. Some (broken) drivers set IFF_PROMISC, when
+ IFF_ALLMULTI is requested not asking us and not reporting.
+ */
+ if ((flags^dev->gflags)&IFF_ALLMULTI) {
+ int inc = (flags&IFF_ALLMULTI) ? +1 : -1;
+ dev->gflags ^= IFF_ALLMULTI;
+ dev_set_allmulti(dev, inc);
+ }
+
return ret;
}
@@ -1378,7 +1383,8 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
switch(cmd)
{
case SIOCGIFFLAGS: /* Get interface flags */
- ifr->ifr_flags = (dev->flags&~IFF_PROMISC)|(dev->gflags&IFF_PROMISC);
+ ifr->ifr_flags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI))
+ |(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI));
return 0;
case SIOCSIFFLAGS: /* Set interface flags */
@@ -1660,6 +1666,7 @@ static int dev_boot_phase = 1;
int register_netdevice(struct device *dev)
{
struct device *d, **dp;
+printk("register_netdevice #1\n");
if (dev_boot_phase) {
printk(KERN_INFO "early initialization of device %s is deferred\n", dev->name);
@@ -1673,27 +1680,32 @@ int register_netdevice(struct device *dev)
*dp = dev;
return 0;
}
+printk("register_netdevice #2\n");
dev->iflink = -1;
/* Init, if this function is available */
if (dev->init && dev->init(dev) != 0)
return -EIO;
+printk("register_netdevice #3\n");
/* Check for existence, and append to tail of chain */
for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
if (d == dev || strcmp(d->name, dev->name) == 0)
return -EEXIST;
}
+printk("register_netdevice #4\n");
dev->next = NULL;
dev_init_scheduler(dev);
dev->ifindex = dev_new_index();
if (dev->iflink == -1)
dev->iflink = dev->ifindex;
*dp = dev;
+printk("register_netdevice #5\n");
/* Notify protocols, that a new device appeared. */
notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
+printk("register_netdevice #6\n");
return 0;
}
diff --git a/net/core/profile.c b/net/core/profile.c
index 54fc57662..fc7464b7a 100644
--- a/net/core/profile.c
+++ b/net/core/profile.c
@@ -13,6 +13,7 @@
#include <linux/inet.h>
#include <net/checksum.h>
+#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -276,8 +277,8 @@ __initfunc(int net_profile_init(void))
printk("Evaluating net profiler cost ...");
#if CPU == 586 || CPU == 686
- if (!(boot_cpu_data.x86_capability & 16)) {
- panic("Sorry, you CPU does not support tsc. I am dying...\n");
+ if (!(boot_cpu_data.x86_capability & X86_FEATURE_TSC)) {
+ printk(KERN_ERR "Sorry, your CPU does not support TSC. Net profiler disabled.\n");
return -1;
}
#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4bbe84cac..cd8030c5d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -246,7 +246,7 @@ int rtnetlink_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
s_idx = 1;
for (idx=1; idx<NPROTO; idx++) {
int type = cb->nlh->nlmsg_type-RTM_BASE;
- if (idx < s_idx || idx == AF_PACKET)
+ if (idx < s_idx || idx == PF_PACKET)
continue;
if (rtnetlink_links[idx] == NULL ||
rtnetlink_links[idx][type].dumpit == NULL)
@@ -336,7 +336,7 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
link_tab = rtnetlink_links[family];
if (link_tab == NULL)
- link_tab = rtnetlink_links[AF_UNSPEC];
+ link_tab = rtnetlink_links[PF_UNSPEC];
link = &link_tab[type];
sz_idx = type>>2;
@@ -348,8 +348,10 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
}
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
+ int rlen;
+
if (link->dumpit == NULL)
- link = &(rtnetlink_links[AF_UNSPEC][type]);
+ link = &(rtnetlink_links[PF_UNSPEC][type]);
if (link->dumpit == NULL)
goto err_inval;
@@ -364,7 +366,10 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
atomic_dec(&rtnl_rlockct);
return -1;
}
- skb_pull(skb, NLMSG_ALIGN(nlh->nlmsg_len));
+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (rlen > skb->len)
+ rlen = skb->len;
+ skb_pull(skb, rlen);
return -1;
}
@@ -398,7 +403,7 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
}
if (link->doit == NULL)
- link = &(rtnetlink_links[AF_UNSPEC][type]);
+ link = &(rtnetlink_links[PF_UNSPEC][type]);
if (link->doit == NULL)
goto err_inval;
err = link->doit(skb, nlh, (void *)&rta);
@@ -538,8 +543,8 @@ __initfunc(void rtnetlink_init(void))
if (rtnl == NULL)
panic("rtnetlink_init: cannot initialize rtnetlink\n");
register_netdevice_notifier(&rtnetlink_dev_notifier);
- rtnetlink_links[AF_UNSPEC] = link_rtnetlink_table;
- rtnetlink_links[AF_PACKET] = link_rtnetlink_table;
+ rtnetlink_links[PF_UNSPEC] = link_rtnetlink_table;
+ rtnetlink_links[PF_PACKET] = link_rtnetlink_table;
}
diff --git a/net/core/scm.c b/net/core/scm.c
index dd19cf5e0..3e4469f29 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -50,7 +50,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
creds->uid == current->suid) || capable(CAP_SETUID)) &&
((creds->gid == current->gid || creds->gid == current->egid ||
creds->gid == current->sgid) || capable(CAP_SETGID))) {
- return 0;
+ return 0;
}
return -EPERM;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index abad1e217..c218233d4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -77,11 +77,22 @@ extern atomic_t ip_frag_mem;
static kmem_cache_t *skbuff_head_cache;
/*
- * Strings we don't want inline's duplicating
+ * Keep out-of-line to prevent kernel bloat.
+ * __builtin_return_address is not used because it is not always
+ * reliable.
*/
-
-const char skb_push_errstr[]="skpush:under: %p:%d";
-const char skb_put_errstr[] ="skput:over: %p:%d";
+
+void skb_over_panic(struct sk_buff *skb, int sz, void *here)
+{
+ panic("skput:over: %p:%d put:%d dev:%s",
+ here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
+}
+
+void skb_under_panic(struct sk_buff *skb, int sz, void *here)
+{
+ panic("skput:under: %p:%d put:%d dev:%s",
+ here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
+}
void show_net_buffers(void)
{
@@ -179,6 +190,9 @@ static inline void skb_headerinit(void *p, kmem_cache_t *cache,
skb->ip_summed = 0;
skb->security = 0; /* By default packets are insecure */
skb->dst = NULL;
+#ifdef CONFIG_IP_FIREWALL_CHAINS
+ skb->fwmark = 0;
+#endif
memset(skb->cb, 0, sizeof(skb->cb));
skb->priority = 0;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 428b4052c..07d125462 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -105,6 +105,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
+#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -206,18 +207,14 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
sk->broadcast=valbool;
break;
case SO_SNDBUF:
- /*
- * The spec isnt clear if ENOBUFS or EINVAL
- * is best
- */
-
- /* printk(KERN_DEBUG "setting SO_SNDBUF %d\n", val); */
+ /* Don't error on this BSD doesn't and if you think
+ about it this is right. Otherwise apps have to
+ play 'guess the biggest size' games. RCVBUF/SNDBUF
+ are treated in BSD as hints */
+
if (val > sysctl_wmem_max)
- return -EINVAL;
+ val = sysctl_wmem_max;
- /* FIXME: the tcp code should be made to work even
- * with small sndbuf values.
- */
sk->sndbuf = max(val*2,2048);
/*
@@ -228,10 +225,13 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_RCVBUF:
- /* printk(KERN_DEBUG "setting SO_RCVBUF %d\n", val); */
-
+ /* Don't error on this BSD doesn't and if you think
+ about it this is right. Otherwise apps have to
+ play 'guess the biggest size' games. RCVBUF/SNDBUF
+ are treated in BSD as hints */
+
if (val > sysctl_rmem_max)
- return -EINVAL;
+ val = sysctl_rmem_max;
/* FIXME: is this lower bound the right one? */
sk->rcvbuf = max(val*2,256);
@@ -480,8 +480,8 @@ struct sock *sk_alloc(int family, int priority, int zero_it)
{
struct sock *sk = kmem_cache_alloc(sk_cachep, priority);
- if(sk && zero_it) {
- memset(sk, 0, sizeof(struct sock));
+ if(sk) {
+ if (zero_it) memset(sk, 0, sizeof(struct sock));
sk->family = family;
}
@@ -496,10 +496,11 @@ void sk_free(struct sock *sk)
kmem_cache_free(sk_cachep, sk);
}
-void sk_init(void)
+__initfunc(void sk_init(void))
{
sk_cachep = kmem_cache_create("sock", sizeof(struct sock), 0,
SLAB_HWCACHE_ALIGN, 0, 0);
+
}
/*
@@ -542,8 +543,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int
atomic_add(skb->truesize, &sk->wmem_alloc);
skb->destructor = sock_wfree;
skb->sk = sk;
+ return skb;
}
- return skb;
}
return NULL;
}
@@ -556,23 +557,26 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int
atomic_add(skb->truesize, &sk->rmem_alloc);
skb->destructor = sock_rfree;
skb->sk = sk;
+ return skb;
}
- return skb;
}
return NULL;
}
void *sock_kmalloc(struct sock *sk, int size, int priority)
{
- void *mem = NULL;
if (atomic_read(&sk->omem_alloc)+size < sysctl_optmem_max) {
+ void *mem;
/* First do the add, to avoid the race if kmalloc
* might sleep.
*/
atomic_add(size, &sk->omem_alloc);
mem = kmalloc(size, priority);
+ if (mem)
+ return mem;
+ atomic_sub(size, &sk->omem_alloc);
}
- return mem;
+ return NULL;
}
void sock_kfree_s(struct sock *sk, void *mem, int size)
@@ -880,7 +884,7 @@ int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
{
- return -EOPNOTSUPP;
+ return 0;
}
int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)