summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-06-17 13:25:08 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-06-17 13:25:08 +0000
commit59223edaa18759982db0a8aced0e77457d10c68e (patch)
tree89354903b01fa0a447bffeefe00df3044495db2e /net
parentdb7d4daea91e105e3859cf461d7e53b9b77454b2 (diff)
Merge with Linux 2.3.6. Sorry, this isn't tested on silicon, I don't
have a MIPS box at hand.
Diffstat (limited to 'net')
-rw-r--r--net/802/llc_macinit.c3
-rw-r--r--net/Config.in8
-rw-r--r--net/Makefile2
-rw-r--r--net/core/datagram.c10
-rw-r--r--net/core/dev.c301
-rw-r--r--net/core/dev_mcast.c51
-rw-r--r--net/core/dst.c67
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/firewall.c2
-rw-r--r--net/core/neighbour.c334
-rw-r--r--net/core/rtnetlink.c28
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c62
-rw-r--r--net/decnet/README13
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ipv4/af_inet.c57
-rw-r--r--net/ipv4/arp.c84
-rw-r--r--net/ipv4/devinet.c34
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_hash.c62
-rw-r--r--net/ipv4/fib_rules.c40
-rw-r--r--net/ipv4/icmp.c23
-rw-r--r--net/ipv4/igmp.c102
-rw-r--r--net/ipv4/ip_fragment.c27
-rw-r--r--net/ipv4/ip_input.c260
-rw-r--r--net/ipv4/ip_masq_mfw.c4
-rw-r--r--net/ipv4/ip_masq_quake.c4
-rw-r--r--net/ipv4/ip_masq_vdolive.c6
-rw-r--r--net/ipv4/ip_options.c3
-rw-r--r--net/ipv4/ipconfig.c8
-rw-r--r--net/ipv4/ipmr.c11
-rw-r--r--net/ipv4/proc.c15
-rw-r--r--net/ipv4/raw.c94
-rw-r--r--net/ipv4/route.c97
-rw-r--r--net/ipv4/tcp.c121
-rw-r--r--net/ipv4/tcp_input.c155
-rw-r--r--net/ipv4/tcp_ipv4.c220
-rw-r--r--net/ipv4/tcp_output.c11
-rw-r--r--net/ipv4/tcp_timer.c165
-rw-r--r--net/ipv4/timer.c17
-rw-r--r--net/ipv4/udp.c128
-rw-r--r--net/ipv4/utils.c7
-rw-r--r--net/ipv6/addrconf.c22
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/exthdrs.c4
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6_fw.c5
-rw-r--r--net/ipv6/ip6_output.c21
-rw-r--r--net/ipv6/mcast.c98
-rw-r--r--net/ipv6/ndisc.c52
-rw-r--r--net/ipv6/proc.c15
-rw-r--r--net/ipv6/raw.c27
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c15
-rw-r--r--net/ipv6/tcp_ipv6.c128
-rw-r--r--net/ipv6/udp.c34
-rw-r--r--net/irda/Config.in43
-rw-r--r--net/irda/af_irda.c311
-rw-r--r--net/irda/crc.c8
-rw-r--r--net/irda/discovery.c62
-rw-r--r--net/irda/ircomm/ircomm_common.c240
-rw-r--r--net/irda/ircomm/irvtd_driver.c191
-rw-r--r--net/irda/irda_device.c357
-rw-r--r--net/irda/iriap.c80
-rw-r--r--net/irda/iriap_event.c8
-rw-r--r--net/irda/irlan/irlan_client.c84
-rw-r--r--net/irda/irlan/irlan_client_event.c12
-rw-r--r--net/irda/irlan/irlan_common.c274
-rw-r--r--net/irda/irlan/irlan_eth.c180
-rw-r--r--net/irda/irlan/irlan_event.c16
-rw-r--r--net/irda/irlan/irlan_filter.c24
-rw-r--r--net/irda/irlan/irlan_provider.c121
-rw-r--r--net/irda/irlan/irlan_provider_event.c14
-rw-r--r--net/irda/irlap.c548
-rw-r--r--net/irda/irlap_comp.c12
-rw-r--r--net/irda/irlap_event.c189
-rw-r--r--net/irda/irlap_frame.c27
-rw-r--r--net/irda/irlmp.c134
-rw-r--r--net/irda/irlmp_frame.c62
-rw-r--r--net/irda/irlpt/irlpt_cli.c24
-rw-r--r--net/irda/irlpt/irlpt_cli_fsm.c12
-rw-r--r--net/irda/irlpt/irlpt_common.c12
-rw-r--r--net/irda/irlpt/irlpt_srvr.c29
-rw-r--r--net/irda/irmod.c32
-rw-r--r--net/irda/irproc.c47
-rw-r--r--net/irda/irsysctl.c8
-rw-r--r--net/irda/irttp.c156
-rw-r--r--net/irda/qos.c11
-rw-r--r--net/irda/wrapper.c339
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/netlink/netlink_dev.c2
-rw-r--r--net/netrom/nr_route.c16
-rw-r--r--net/netsyms.c21
-rw-r--r--net/packet/af_packet.c29
-rw-r--r--net/rose/rose_route.c16
-rw-r--r--net/sched/cls_api.c41
-rw-r--r--net/sched/cls_fw.c23
-rw-r--r--net/sched/cls_route.c39
-rw-r--r--net/sched/cls_rsvp.h20
-rw-r--r--net/sched/cls_u32.c19
-rw-r--r--net/sched/estimator.c26
-rw-r--r--net/sched/police.c25
-rw-r--r--net/sched/sch_api.c86
-rw-r--r--net/sched/sch_cbq.c56
-rw-r--r--net/sched/sch_csz.c8
-rw-r--r--net/sched/sch_generic.c283
-rw-r--r--net/sched/sch_prio.c13
-rw-r--r--net/sched/sch_sfq.c4
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/sched/sch_teql.c38
-rw-r--r--net/socket.c12
-rw-r--r--net/sunrpc/auth.c3
-rw-r--r--net/sunrpc/auth_unix.c3
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/sched.c11
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xprt.c77
-rw-r--r--net/unix/af_unix.c18
-rw-r--r--net/wanrouter/wanmain.c15
-rw-r--r--net/x25/af_x25.c11
121 files changed, 4812 insertions, 2876 deletions
diff --git a/net/802/llc_macinit.c b/net/802/llc_macinit.c
index a51a868f2..da47f4883 100644
--- a/net/802/llc_macinit.c
+++ b/net/802/llc_macinit.c
@@ -17,6 +17,8 @@
* Alan Cox : Chainsawed to Linux format
* Added llc_ to names
* Started restructuring handlers
+ *
+ * Horst von Brand : Add #include <linux/string.h>
*/
#include <linux/module.h>
@@ -24,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/malloc.h>
#include <linux/unistd.h>
+#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/init.h>
#include <net/p8022.h>
diff --git a/net/Config.in b/net/Config.in
index ed8510209..53cd5b0c6 100644
--- a/net/Config.in
+++ b/net/Config.in
@@ -32,10 +32,10 @@ if [ "$CONFIG_IPX" != "n" ]; then
fi
tristate 'Appletalk DDP' CONFIG_ATALK
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
-# tristate 'DECnet Support (NOT YET FUNCTIONAL)' CONFIG_DECNET
-# if [ "$CONFIG_DECNET" != "n" ]; then
-# source net/decnet/Config.in
-# fi
+ tristate 'DECnet Support (EXPERIMENTAL)' CONFIG_DECNET
+ if [ "$CONFIG_DECNET" != "n" ]; then
+ source net/decnet/Config.in
+ fi
tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25
tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB
bool 'Bridging (EXPERIMENTAL)' CONFIG_BRIDGE
diff --git a/net/Makefile b/net/Makefile
index d20953259..999e5b9eb 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -10,7 +10,7 @@
MOD_SUB_DIRS := ipv4
ALL_SUB_DIRS := 802 ax25 bridge core ethernet ipv4 ipv6 ipx unix appletalk \
netrom rose lapb x25 wanrouter netlink sched packet sunrpc \
- econet irda #decnet
+ econet irda decnet
SUB_DIRS := core ethernet sched
MOD_LIST_NAME := NET_MISC_MODULES
diff --git a/net/core/datagram.c b/net/core/datagram.c
index da09973cd..98233a224 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -6,7 +6,7 @@
* This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and NetROM layer all have identical poll code and mostly
* identical recvmsg() code. So we share it here. The poll was shared before but buried in udp.c so I moved it.
*
- * Authors: Alan Cox <alan@cymru.net>. (datagram_poll() from old udp.c code)
+ * Authors: Alan Cox <alan@redhat.com>. (datagram_poll() from old udp.c code)
*
* Fixes:
* Alan Cox : NULL return from skb_peek_copy() understood
@@ -54,7 +54,7 @@
static inline void wait_for_packet(struct sock * sk)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk->sleep, &wait);
current->state = TASK_INTERRUPTIBLE;
@@ -151,15 +151,15 @@ restart:
is reentearble (it is not) or this function
is called by interrupts.
- Protect it with global skb spinlock,
+ Protect it with skb queue spinlock,
though for now even this is overkill.
--ANK (980728)
*/
- spin_lock_irqsave(&skb_queue_lock, cpu_flags);
+ spin_lock_irqsave(&sk->receive_queue.lock, cpu_flags);
skb = skb_peek(&sk->receive_queue);
if(skb!=NULL)
atomic_inc(&skb->users);
- spin_unlock_irqrestore(&skb_queue_lock, cpu_flags);
+ spin_unlock_irqrestore(&sk->receive_queue.lock, cpu_flags);
} else
skb = skb_dequeue(&sk->receive_queue);
diff --git a/net/core/dev.c b/net/core/dev.c
index 921f05470..b9bd18343 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -129,8 +129,9 @@ const char *if_port_text[] = {
* 86DD IPv6
*/
-struct packet_type *ptype_base[16]; /* 16 way hashed list */
-struct packet_type *ptype_all = NULL; /* Taps */
+static struct packet_type *ptype_base[16]; /* 16 way hashed list */
+static struct packet_type *ptype_all = NULL; /* Taps */
+static rwlock_t ptype_lock = RW_LOCK_UNLOCKED;
/*
* Device list lock. Setting it provides that interface
@@ -199,6 +200,7 @@ void dev_add_pack(struct packet_type *pt)
dev_clear_fastroute(pt->dev);
}
#endif
+ write_lock_bh(&ptype_lock);
if(pt->type==htons(ETH_P_ALL))
{
netdev_nit++;
@@ -211,6 +213,7 @@ void dev_add_pack(struct packet_type *pt)
pt->next = ptype_base[hash];
ptype_base[hash] = pt;
}
+ write_unlock_bh(&ptype_lock);
}
@@ -228,19 +231,21 @@ void dev_remove_pack(struct packet_type *pt)
}
else
pt1=&ptype_base[ntohs(pt->type)&15];
+ write_lock_bh(&ptype_lock);
for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
{
if(pt==(*pt1))
{
*pt1=pt->next;
- synchronize_bh();
#ifdef CONFIG_NET_FASTROUTE
if (pt->data)
netdev_fastroute_obstacles--;
#endif
+ write_unlock_bh(&ptype_lock);
return;
}
}
+ write_unlock_bh(&ptype_lock);
printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
}
@@ -258,37 +263,43 @@ struct device *dev_get(const char *name)
{
struct device *dev;
- for (dev = dev_base; dev != NULL; dev = dev->next)
- {
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
if (strcmp(dev->name, name) == 0)
- return(dev);
+ goto out;
}
- return NULL;
+out:
+ read_unlock(&dev_base_lock);
+ return dev;
}
struct device * dev_get_by_index(int ifindex)
{
struct device *dev;
- for (dev = dev_base; dev != NULL; dev = dev->next)
- {
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
if (dev->ifindex == ifindex)
- return(dev);
+ goto out;
}
- return NULL;
+out:
+ read_unlock(&dev_base_lock);
+ return dev;
}
struct device *dev_getbyhwaddr(unsigned short type, char *ha)
{
struct device *dev;
- for (dev = dev_base; dev != NULL; dev = dev->next)
- {
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
if (dev->type == type &&
memcmp(dev->dev_addr, ha, dev->addr_len) == 0)
- return(dev);
+ goto out;
}
- return(NULL);
+out:
+ read_unlock(&dev_base_lock);
+ return dev;
}
/*
@@ -310,7 +321,7 @@ int dev_alloc_name(struct device *dev, const char *name)
}
return -ENFILE; /* Over 100 of the things .. bail out! */
}
-
+
struct device *dev_alloc(const char *name, int *err)
{
struct device *dev=kmalloc(sizeof(struct device)+16, GFP_KERNEL);
@@ -438,8 +449,10 @@ void dev_clear_fastroute(struct device *dev)
if (dev) {
dev_do_clear_fastroute(dev);
} else {
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next)
dev_do_clear_fastroute(dev);
+ read_unlock(&dev_base_lock);
}
}
#endif
@@ -512,6 +525,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct device *dev)
struct packet_type *ptype;
get_fast_time(&skb->stamp);
+ read_lock(&ptype_lock);
for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
{
/* Never send packets back to the socket
@@ -552,6 +566,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct device *dev)
ptype->func(skb2, skb->dev, ptype);
}
}
+ read_unlock(&ptype_lock);
}
/*
@@ -578,59 +593,61 @@ int dev_queue_xmit(struct sk_buff *skb)
struct device *dev = skb->dev;
struct Qdisc *q;
-#ifdef CONFIG_NET_PROFILE
- start_bh_atomic();
- NET_PROFILE_ENTER(dev_queue_xmit);
-#endif
-
- start_bh_atomic();
+ /* Grab device queue */
+ spin_lock_bh(&dev->queue_lock);
q = dev->qdisc;
if (q->enqueue) {
q->enqueue(skb, q);
- qdisc_wakeup(dev);
- end_bh_atomic();
-#ifdef CONFIG_NET_PROFILE
- NET_PROFILE_LEAVE(dev_queue_xmit);
- end_bh_atomic();
-#endif
+ /* If the device is not busy, kick it.
+ * Otherwise or if queue is not empty after kick,
+ * add it to run list.
+ */
+ if (dev->tbusy || qdisc_restart(dev))
+ qdisc_run(dev->qdisc);
+ spin_unlock_bh(&dev->queue_lock);
return 0;
}
+ spin_unlock_bh(&dev->queue_lock);
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
- Really, it is unlikely that bh protection is necessary here:
- virtual devices do not generate EOI events.
- However, it is possible, that they rely on bh protection
+ Really, it is unlikely that xmit_lock protection is necessary here.
+ (f.e. loopback and IP tunnels are clean ignoring statistics counters.)
+ However, it is possible, that they rely on protection
made by us here.
+
+ Check this and shot the lock. It is not prone from deadlocks.
+ Either shot noqueue qdisc, it is even simpler 8)
*/
if (dev->flags&IFF_UP) {
if (netdev_nit)
dev_queue_xmit_nit(skb,dev);
- if (dev->hard_start_xmit(skb, dev) == 0) {
- end_bh_atomic();
-
-#ifdef CONFIG_NET_PROFILE
- NET_PROFILE_LEAVE(dev_queue_xmit);
- end_bh_atomic();
-#endif
- return 0;
+ local_bh_disable();
+ if (dev->xmit_lock_owner != smp_processor_id()) {
+ spin_lock(&dev->xmit_lock);
+ dev->xmit_lock_owner = smp_processor_id();
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ dev->xmit_lock_owner = -1;
+ spin_unlock_bh(&dev->xmit_lock);
+ return 0;
+ }
+ dev->xmit_lock_owner = -1;
+ spin_unlock_bh(&dev->xmit_lock);
+ if (net_ratelimit())
+ printk(KERN_DEBUG "Virtual device %s asks to queue packet!\n", dev->name);
+ } else {
+ /* Recursion is detected! It is possible, unfortunately */
+ local_bh_enable();
+ if (net_ratelimit())
+ printk(KERN_DEBUG "Dead loop on virtual device %s, fix it urgently!\n", dev->name);
}
- if (net_ratelimit())
- printk(KERN_DEBUG "Virtual device %s asks to queue packet!\n", dev->name);
}
- end_bh_atomic();
kfree_skb(skb);
-
-#ifdef CONFIG_NET_PROFILE
- NET_PROFILE_LEAVE(dev_queue_xmit);
- end_bh_atomic();
-#endif
-
return 0;
}
@@ -642,9 +659,6 @@ int dev_queue_xmit(struct sk_buff *skb)
int netdev_dropping = 0;
int netdev_max_backlog = 300;
atomic_t netdev_rx_dropped;
-#ifdef CONFIG_CPU_IS_SLOW
-int net_cpu_congestion;
-#endif
#ifdef CONFIG_NET_HW_FLOWCONTROL
int netdev_throttle_events;
@@ -732,9 +746,9 @@ static void dev_clear_backlog(struct device *dev)
curr=curr->next;
if ( curr->prev->dev == dev ) {
prev = curr->prev;
- spin_lock_irqsave(&skb_queue_lock, flags);
+ spin_lock_irqsave(&backlog.lock, flags);
__skb_unlink(prev, &backlog);
- spin_unlock_irqrestore(&skb_queue_lock, flags);
+ spin_unlock_irqrestore(&backlog.lock, flags);
kfree_skb(prev);
}
}
@@ -834,14 +848,6 @@ void net_bh(void)
struct packet_type *pt_prev;
unsigned short type;
unsigned long start_time = jiffies;
-#ifdef CONFIG_CPU_IS_SLOW
- static unsigned long start_busy = 0;
- static unsigned long ave_busy = 0;
-
- if (start_busy == 0)
- start_busy = start_time;
- net_cpu_congestion = ave_busy>>8;
-#endif
NET_PROFILE_ENTER(net_bh);
/*
@@ -851,9 +857,9 @@ void net_bh(void)
* latency on a transmit interrupt bh.
*/
- if (qdisc_head.forw != &qdisc_head)
+ if (qdisc_pending())
qdisc_run_queues();
-
+
/*
* Any data left to process. This may occur because a
* mark_bh() is done after we empty the queue including
@@ -881,19 +887,6 @@ void net_bh(void)
*/
skb = skb_dequeue(&backlog);
-#ifdef CONFIG_CPU_IS_SLOW
- if (ave_busy > 128*16) {
- kfree_skb(skb);
- while ((skb = skb_dequeue(&backlog)) != NULL)
- kfree_skb(skb);
- break;
- }
-#endif
-
-
-#if 0
- NET_PROFILE_SKB_PASSED(skb, net_bh_skb);
-#endif
#ifdef CONFIG_NET_FASTROUTE
if (skb->pkt_type == PACKET_FASTROUTE) {
dev_queue_xmit(skb);
@@ -939,6 +932,7 @@ void net_bh(void)
*/
pt_prev = NULL;
+ read_lock(&ptype_lock);
for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
{
if (!ptype->dev || ptype->dev == skb->dev) {
@@ -992,6 +986,7 @@ void net_bh(void)
else {
kfree_skb(skb);
}
+ read_unlock(&ptype_lock);
} /* End of queue loop */
/*
@@ -1002,16 +997,9 @@ void net_bh(void)
* One last output flush.
*/
- if (qdisc_head.forw != &qdisc_head)
+ if (qdisc_pending())
qdisc_run_queues();
-#ifdef CONFIG_CPU_IS_SLOW
- if (1) {
- unsigned long start_idle = jiffies;
- ave_busy += ((start_idle - start_busy)<<3) - (ave_busy>>4);
- start_busy = 0;
- }
-#endif
#ifdef CONFIG_NET_HW_FLOWCONTROL
if (netdev_dropping)
netdev_wakeup();
@@ -1045,14 +1033,6 @@ int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
*/
/*
- * This call is useful, but I'd remove it too.
- *
- * The reason is purely aestetical, it is the only call
- * from SIOC* family using struct ifreq in reversed manner.
- * Besides that, it is pretty silly to put "drawing" facility
- * to kernel, it is useful only to print ifindices
- * in readable form, is not it? --ANK
- *
* We need this ioctl for efficient implementation of the
* if_indextoname() function required by the IPv6 API. Without
* it, we would have to search all the interfaces to find a
@@ -1105,14 +1085,20 @@ static int dev_ifconf(char *arg)
if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
return -EFAULT;
- pos = ifc.ifc_buf;
len = ifc.ifc_len;
+ if (ifc.ifc_buf) {
+ pos = (char *) kmalloc(len, GFP_KERNEL);
+ if(pos == NULL)
+ return -ENOBUFS;
+ } else
+ pos = NULL;
/*
* Loop over the interfaces, and write an info block for each.
*/
total = 0;
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
for (i=0; i<NPROTO; i++) {
if (gifconf_list[i]) {
@@ -1122,12 +1108,19 @@ static int dev_ifconf(char *arg)
} else {
done = gifconf_list[i](dev, pos+total, len-total);
}
- if (done<0)
- return -EFAULT;
total += done;
}
}
}
+ read_unlock(&dev_base_lock);
+
+ if(pos != NULL) {
+ int err = copy_to_user(ifc.ifc_buf, pos, total);
+
+ kfree(pos);
+ if(err)
+ return -EFAULT;
+ }
/*
* All done. Write the updated control block back to the caller.
@@ -1199,20 +1192,20 @@ int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy
len+=size;
- for (dev = dev_base; dev != NULL; dev = dev->next)
- {
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
size = sprintf_stats(buffer+len, dev);
len+=size;
pos=begin+len;
- if(pos<offset)
- {
+ if(pos<offset) {
len=0;
begin=pos;
}
if(pos>offset+length)
break;
}
+ read_unlock(&dev_base_lock);
*start=buffer+(offset-begin); /* Start of wanted data */
len-=(offset-begin); /* Start slop */
@@ -1314,20 +1307,20 @@ int dev_get_wireless_info(char * buffer, char **start, off_t offset,
pos+=size;
len+=size;
- for(dev = dev_base; dev != NULL; dev = dev->next)
- {
+ read_lock(&dev_base_lock);
+ for(dev = dev_base; dev != NULL; dev = dev->next) {
size = sprintf_wireless_stats(buffer+len, dev);
len+=size;
pos=begin+len;
- if(pos < offset)
- {
+ if(pos < offset) {
len=0;
begin=pos;
}
if(pos > offset + length)
break;
}
+ read_unlock(&dev_base_lock);
*start = buffer + (offset - begin); /* Start of wanted data */
len -= (offset - begin); /* Start slop */
@@ -1703,11 +1696,10 @@ int dev_ioctl(unsigned int cmd, void *arg)
if (IW_IS_SET(cmd)) {
if (!suser())
return -EPERM;
- rtnl_lock();
}
+ rtnl_lock();
ret = dev_ifsioc(&ifr, cmd);
- if (IW_IS_SET(cmd))
- rtnl_unlock();
+ rtnl_unlock();
if (!ret && IW_IS_GET(cmd) &&
copy_to_user(arg, &ifr, sizeof(struct ifreq)))
return -EFAULT;
@@ -1736,6 +1728,10 @@ int register_netdevice(struct device *dev)
{
struct device *d, **dp;
+ spin_lock_init(&dev->queue_lock);
+ spin_lock_init(&dev->xmit_lock);
+ dev->xmit_lock_owner = -1;
+
if (dev_boot_phase) {
/* This is NOT bug, but I am not sure, that all the
devices, initialized before netdev module is started
@@ -1752,11 +1748,14 @@ int register_netdevice(struct device *dev)
/* Check for existence, and append to tail of chain */
for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
- if (d == dev || strcmp(d->name, dev->name) == 0)
+ if (d == dev || strcmp(d->name, dev->name) == 0) {
return -EEXIST;
+ }
}
dev->next = NULL;
+ write_lock_bh(&dev_base_lock);
*dp = dev;
+ write_unlock_bh(&dev_base_lock);
return 0;
}
@@ -1766,17 +1765,21 @@ int register_netdevice(struct device *dev)
if (dev->init && dev->init(dev) != 0)
return -EIO;
+ dev->ifindex = dev_new_index();
+ if (dev->iflink == -1)
+ dev->iflink = dev->ifindex;
+
/* Check for existence, and append to tail of chain */
for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
- if (d == dev || strcmp(d->name, dev->name) == 0)
+ if (d == dev || strcmp(d->name, dev->name) == 0) {
return -EEXIST;
+ }
}
dev->next = NULL;
dev_init_scheduler(dev);
- dev->ifindex = dev_new_index();
- if (dev->iflink == -1)
- dev->iflink = dev->ifindex;
+ write_lock_bh(&dev_base_lock);
*dp = dev;
+ write_unlock_bh(&dev_base_lock);
/* Notify protocols, that a new device appeared. */
notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
@@ -1788,15 +1791,35 @@ int unregister_netdevice(struct device *dev)
{
struct device *d, **dp;
- if (dev_boot_phase == 0) {
- /* If device is running, close it.
- It is very bad idea, really we should
- complain loudly here, but random hackery
- in linux/drivers/net likes it.
- */
- if (dev->flags & IFF_UP)
- dev_close(dev);
+ /* If device is running, close it first. */
+ if (dev->flags & IFF_UP)
+ dev_close(dev);
+ /* And unlink it from device chain. */
+ for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
+ if (d == dev) {
+ write_lock_bh(&dev_base_lock);
+ *dp = d->next;
+ write_unlock_bh(&dev_base_lock);
+
+ /* Sorry. It is known "feature". The race is clear.
+ Keep it after device reference counting will
+ be complete.
+ */
+ synchronize_bh();
+ break;
+ }
+ }
+ if (d == NULL)
+ return -ENODEV;
+
+ /* It is "synchronize_bh" to those of guys, who overslept
+ in skb_alloc/page fault etc. that device is off-line.
+ Again, it can be removed only if devices are refcounted.
+ */
+ dev_lock_wait();
+
+ if (dev_boot_phase == 0) {
#ifdef CONFIG_NET_FASTROUTE
dev_clear_fastroute(dev);
#endif
@@ -1813,25 +1836,11 @@ int unregister_netdevice(struct device *dev)
* Flush the multicast chain
*/
dev_mc_discard(dev);
-
- /* To avoid pointers looking to nowhere,
- we wait for end of critical section */
- dev_lock_wait();
}
- /* And unlink it from device chain. */
- for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
- if (d == dev) {
- *dp = d->next;
- synchronize_bh();
- d->next = NULL;
-
- if (dev->destructor)
- dev->destructor(dev);
- return 0;
- }
- }
- return -ENODEV;
+ if (dev->destructor)
+ dev->destructor(dev);
+ return 0;
}
@@ -1973,22 +1982,25 @@ __initfunc(int net_dev_init(void))
* If the call to dev->init fails, the dev is removed
* from the chain disconnecting the device until the
* next reboot.
+ *
+ * NB At boot phase networking is dead. No locking is required.
+ * But we still preserve dev_base_lock for sanity.
*/
dp = &dev_base;
- while ((dev = *dp) != NULL)
- {
+ while ((dev = *dp) != NULL) {
+ spin_lock_init(&dev->queue_lock);
+ spin_lock_init(&dev->xmit_lock);
+ dev->xmit_lock_owner = -1;
dev->iflink = -1;
- if (dev->init && dev->init(dev))
- {
+ if (dev->init && dev->init(dev)) {
/*
* It failed to come up. Unhook it.
*/
+ write_lock_bh(&dev_base_lock);
*dp = dev->next;
- synchronize_bh();
- }
- else
- {
+ write_unlock_bh(&dev_base_lock);
+ } else {
dp = &dev->next;
dev->ifindex = dev_new_index();
if (dev->iflink == -1)
@@ -2015,6 +2027,7 @@ __initfunc(int net_dev_init(void))
dev_boot_phase = 0;
+ dst_init();
dev_mcast_init();
#ifdef CONFIG_IP_PNP
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index bce3f4a4a..f7fcb1f87 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -58,7 +58,11 @@
*
* Device mc lists are changed by bh at least if IPv6 is enabled,
* so that it must be bh protected.
+ *
+ * We protect all mc lists with global rw lock
+ * and block accesses to device mc filters with dev->xmit_lock.
*/
+static rwlock_t dev_mc_lock = RW_LOCK_UNLOCKED;
/*
* Update the multicast list into the physical NIC controller.
@@ -69,7 +73,7 @@ void dev_mc_upload(struct device *dev)
/* Don't do anything till we up the interface
[dev_open will call this function so the list will
stay sane] */
-
+
if(!(dev->flags&IFF_UP))
return;
@@ -80,11 +84,15 @@ void dev_mc_upload(struct device *dev)
if(dev->set_multicast_list==NULL)
return;
- start_bh_atomic();
+ read_lock_bh(&dev_mc_lock);
+ spin_lock(&dev->xmit_lock);
+ dev->xmit_lock_owner = smp_processor_id();
dev->set_multicast_list(dev);
- end_bh_atomic();
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+ read_unlock_bh(&dev_mc_lock);
}
-
+
/*
* Delete a device level multicast
*/
@@ -94,7 +102,7 @@ int dev_mc_delete(struct device *dev, void *addr, int alen, int glbl)
int err = 0;
struct dev_mc_list *dmi, **dmip;
- start_bh_atomic();
+ write_lock_bh(&dev_mc_lock);
for (dmip=&dev->mc_list; (dmi=*dmip)!=NULL; dmip=&dmi->next) {
/*
* Find the entry we want to delete. The device could
@@ -120,14 +128,15 @@ int dev_mc_delete(struct device *dev, void *addr, int alen, int glbl)
* We have altered the list, so the card
* loaded filter is now wrong. Fix it
*/
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
+
dev_mc_upload(dev);
return 0;
}
}
err = -ENOENT;
done:
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
return err;
}
@@ -140,9 +149,12 @@ int dev_mc_add(struct device *dev, void *addr, int alen, int glbl)
int err = 0;
struct dev_mc_list *dmi, *dmi1;
+ /* RED-PEN: does gfp_any() work now? It requires
+ true local_bh_disable rather than global.
+ */
dmi1 = (struct dev_mc_list *)kmalloc(sizeof(*dmi), gfp_any());
- start_bh_atomic();
+ write_lock_bh(&dev_mc_lock);
for(dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next) {
if (memcmp(dmi->dmi_addr,addr,dmi->dmi_addrlen)==0 && dmi->dmi_addrlen==alen) {
if (glbl) {
@@ -156,8 +168,10 @@ int dev_mc_add(struct device *dev, void *addr, int alen, int glbl)
}
}
- if ((dmi=dmi1)==NULL)
+ if ((dmi=dmi1)==NULL) {
+ write_unlock_bh(&dev_mc_lock);
return -ENOMEM;
+ }
memcpy(dmi->dmi_addr, addr, alen);
dmi->dmi_addrlen=alen;
dmi->next=dev->mc_list;
@@ -165,12 +179,12 @@ int dev_mc_add(struct device *dev, void *addr, int alen, int glbl)
dmi->dmi_gusers=glbl ? 1 : 0;
dev->mc_list=dmi;
dev->mc_count++;
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
dev_mc_upload(dev);
return 0;
done:
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
if (dmi1)
kfree(dmi1);
return err;
@@ -182,7 +196,7 @@ done:
void dev_mc_discard(struct device *dev)
{
- start_bh_atomic();
+ write_lock_bh(&dev_mc_lock);
while (dev->mc_list!=NULL) {
struct dev_mc_list *tmp=dev->mc_list;
dev->mc_list=tmp->next;
@@ -191,7 +205,7 @@ void dev_mc_discard(struct device *dev)
kfree_s(tmp,sizeof(*tmp));
}
dev->mc_count=0;
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
}
#ifdef CONFIG_PROC_FS
@@ -203,9 +217,9 @@ static int dev_mc_read_proc(char *buffer, char **start, off_t offset,
int len=0;
struct device *dev;
- start_bh_atomic();
-
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next) {
+ read_lock_bh(&dev_mc_lock);
for (m = dev->mc_list; m; m = m->next) {
int i;
@@ -222,14 +236,17 @@ static int dev_mc_read_proc(char *buffer, char **start, off_t offset,
len=0;
begin=pos;
}
- if (pos > offset+length)
+ if (pos > offset+length) {
+ read_unlock_bh(&dev_mc_lock);
goto done;
+ }
}
+ read_unlock_bh(&dev_mc_lock);
}
*eof = 1;
done:
- end_bh_atomic();
+ read_unlock(&dev_base_lock);
*start=buffer+(offset-begin);
len-=(offset-begin);
if(len>length)
diff --git a/net/core/dst.c b/net/core/dst.c
index 9007dde66..f1695ca84 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -16,11 +16,22 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <linux/init.h>
#include <net/dst.h>
-struct dst_entry * dst_garbage_list;
-atomic_t dst_total = ATOMIC_INIT(0);
+/* Locking strategy:
+ * 1) Garbage collection state of dead destination cache
+ * entries is protected by dst_lock.
+ * 2) GC is run only from BH context, and is the only remover
+ * of entries.
+ * 3) Entries are added to the garbage list from both BH
+ * and non-BH context, so local BH disabling is needed.
+ * 4) All operations modify state, so a spinlock is used.
+ */
+static struct dst_entry *dst_garbage_list;
+static atomic_t dst_total = ATOMIC_INIT(0);
+static spinlock_t dst_lock = SPIN_LOCK_UNLOCKED;
static unsigned long dst_gc_timer_expires;
static unsigned long dst_gc_timer_inc = DST_GC_MAX;
@@ -29,15 +40,17 @@ static void dst_run_gc(unsigned long);
static struct timer_list dst_gc_timer =
{ NULL, NULL, DST_GC_MIN, 0L, dst_run_gc };
-#if RT_CACHE_DEBUG >= 2
-atomic_t hh_count;
-#endif
static void dst_run_gc(unsigned long dummy)
{
int delayed = 0;
struct dst_entry * dst, **dstp;
+ if (!spin_trylock(&dst_lock)) {
+ mod_timer(&dst_gc_timer, jiffies + HZ/10);
+ return;
+ }
+
del_timer(&dst_gc_timer);
dstp = &dst_garbage_list;
while ((dst = *dstp) != NULL) {
@@ -51,7 +64,7 @@ static void dst_run_gc(unsigned long dummy)
}
if (!dst_garbage_list) {
dst_gc_timer_inc = DST_GC_MAX;
- return;
+ goto out;
}
if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX)
dst_gc_timer_expires = DST_GC_MAX;
@@ -62,6 +75,9 @@ static void dst_run_gc(unsigned long dummy)
atomic_read(&dst_total), delayed, dst_gc_timer_expires);
#endif
add_timer(&dst_gc_timer);
+
+out:
+ spin_unlock(&dst_lock);
}
static int dst_discard(struct sk_buff *skb)
@@ -100,7 +116,8 @@ void * dst_alloc(int size, struct dst_ops * ops)
void __dst_free(struct dst_entry * dst)
{
- start_bh_atomic();
+ spin_lock_bh(&dst_lock);
+
/* The first case (dev==NULL) is required, when
protocol module is unloaded.
*/
@@ -119,7 +136,8 @@ void __dst_free(struct dst_entry * dst)
dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
add_timer(&dst_gc_timer);
}
- end_bh_atomic();
+
+ spin_unlock_bh(&dst_lock);
}
void dst_destroy(struct dst_entry * dst)
@@ -143,3 +161,36 @@ void dst_destroy(struct dst_entry * dst)
atomic_dec(&dst_total);
kfree(dst);
}
+
+static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct device *dev = ptr;
+ struct dst_entry *dst;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ case NETDEV_DOWN:
+ spin_lock_bh(&dst_lock);
+ for (dst = dst_garbage_list; dst; dst = dst->next) {
+ if (dst->dev == dev) {
+ dst->input = dst_discard;
+ dst->output = dst_blackhole;
+ dst->dev = &loopback_dev;
+ }
+ }
+ spin_unlock_bh(&dst_lock);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+struct notifier_block dst_dev_notifier = {
+ dst_dev_event,
+ NULL,
+ 0
+};
+
+__initfunc(void dst_init(void))
+{
+ register_netdevice_notifier(&dst_dev_notifier);
+}
diff --git a/net/core/filter.c b/net/core/filter.c
index cc1ed83cd..8e1ffb628 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -106,7 +106,7 @@ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
continue;
case BPF_ALU|BPF_MUL|BPF_K:
- A *= X;
+ A *= fentry->k;
continue;
case BPF_ALU|BPF_DIV|BPF_X:
diff --git a/net/core/firewall.c b/net/core/firewall.c
index fc7b1a517..7ca90f49a 100644
--- a/net/core/firewall.c
+++ b/net/core/firewall.c
@@ -13,7 +13,7 @@
#include <linux/interrupt.h>
#include <asm/semaphore.h>
-struct semaphore firewall_sem = MUTEX;
+DECLARE_MUTEX(firewall_sem);
static int firewall_policy[NPROTO];
static struct firewall_ops *firewall_chain[NPROTO];
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b96650bcd..6124fcfc3 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -28,33 +28,6 @@
#include <net/sock.h>
#include <linux/rtnetlink.h>
-/*
- NOTE. The most unpleasent question is serialization of
- accesses to resolved addresses. The problem is that addresses
- are modified by bh, but they are referenced from normal
- kernel thread. Before today no locking was made.
- My reasoning was that corrupted address token will be copied
- to packet with cosmologically small probability
- (it is even difficult to estimate such small number)
- and it is very silly to waste cycles in fast path to lock them.
-
- But now I changed my mind, but not because previous statement
- is wrong. Actually, neigh->ha MAY BE not opaque byte array,
- but reference to some private data. In this case even neglibible
- corruption probability becomes bug.
-
- - hh cache is protected by rwlock. It assumes that
- hh cache update procedure is short and fast, and that
- read_lock is cheaper than start_bh_atomic().
- - ha tokens, saved in neighbour entries, are protected
- by bh_atomic().
- - no protection is made in /proc reading. It is OK, because
- /proc is broken by design in any case, and
- corrupted output is normal behaviour there.
-
- --ANK (981025)
- */
-
#define NEIGH_DEBUG 1
#define NEIGH_PRINTK(x...) printk(x)
@@ -81,6 +54,46 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct device *dev);
static int neigh_glbl_allocs;
static struct neigh_table *neigh_tables;
+#if defined(__i386__) && defined(__SMP__)
+#define ASSERT_WL(n) if ((int)((n)->lock.lock) >= 0) { printk("WL assertion failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
+#else
+#define ASSERT_WL(n) do { } while(0)
+#endif
+
+/*
+ Neighbour hash table buckets are protected with rwlock tbl->lock.
+
+ - All the scans/updates to hash buckets MUST be made under this lock.
+ - NOTHING clever should be made under this lock: no callbacks
+ to protocol backends, no attempts to send something to network.
+ It will result in deadlocks, if backend/driver wants to use neighbour
+ cache.
+ - If the entry requires some non-trivial actions, increase
+ its reference count and release table lock.
+
+ Neighbour entries are protected:
+ - with reference count.
+ - with rwlock neigh->lock
+
+ Reference count prevents destruction.
+
+ neigh->lock mainly serializes ll address data and its validity state.
+ However, the same lock is used to protect another entry fields:
+ - timer
+ - resolution queue
+
+ Again, nothing clever shall be made under neigh->lock,
+ the most complicated procedure, which we allow is dev->hard_header.
+ It is supposed, that dev->hard_header is simplistic and does
+ not make callbacks to neighbour tables.
+
+ The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
+ list of neighbour tables. This list is used only in process context,
+ so that this lock is useless with big kernel lock.
+ */
+
+static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
+
static int neigh_blackhole(struct sk_buff *skb)
{
kfree_skb(skb);
@@ -104,13 +117,11 @@ static int neigh_forced_gc(struct neigh_table *tbl)
int shrunk = 0;
int i;
- if (atomic_read(&tbl->lock))
- return 0;
-
for (i=0; i<=NEIGH_HASHMASK; i++) {
struct neighbour *n, **np;
np = &tbl->hash_buckets[i];
+ write_lock_bh(&tbl->lock);
while ((n = *np) != NULL) {
/* Neighbour record may be discarded if:
- nobody refers to it.
@@ -122,6 +133,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
It is not clear, what is better table overflow
or flooding.
*/
+ write_lock(&n->lock);
if (atomic_read(&n->refcnt) == 0 &&
!(n->nud_state&NUD_PERMANENT) &&
(n->nud_state != NUD_INCOMPLETE ||
@@ -130,11 +142,14 @@ static int neigh_forced_gc(struct neigh_table *tbl)
n->tbl = NULL;
tbl->entries--;
shrunk = 1;
+ write_unlock(&n->lock);
neigh_destroy(n);
continue;
}
+ write_unlock(&n->lock);
np = &n->next;
}
+ write_unlock_bh(&tbl->lock);
}
tbl->last_flush = jiffies;
@@ -145,12 +160,8 @@ int neigh_ifdown(struct neigh_table *tbl, struct device *dev)
{
int i;
- if (atomic_read(&tbl->lock)) {
- NEIGH_PRINTK1("neigh_ifdown: impossible event 1763\n");
- return -EBUSY;
- }
+ write_lock_bh(&tbl->lock);
- start_bh_atomic();
for (i=0; i<=NEIGH_HASHMASK; i++) {
struct neighbour *n, **np;
@@ -161,6 +172,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct device *dev)
continue;
}
*np = n->next;
+ write_lock(&n->lock);
n->tbl = NULL;
tbl->entries--;
if (atomic_read(&n->refcnt)) {
@@ -183,33 +195,32 @@ int neigh_ifdown(struct neigh_table *tbl, struct device *dev)
else
n->nud_state = NUD_NONE;
NEIGH_PRINTK2("neigh %p is stray.\n", n);
- } else
+ write_unlock(&n->lock);
+ } else {
+ write_unlock(&n->lock);
neigh_destroy(n);
+ }
}
}
del_timer(&tbl->proxy_timer);
skb_queue_purge(&tbl->proxy_queue);
pneigh_ifdown(tbl, dev);
- end_bh_atomic();
+ write_unlock_bh(&tbl->lock);
return 0;
}
-static struct neighbour *neigh_alloc(struct neigh_table *tbl, int creat)
+static struct neighbour *neigh_alloc(struct neigh_table *tbl)
{
struct neighbour *n;
unsigned long now = jiffies;
- if (tbl->entries > tbl->gc_thresh1) {
- if (creat < 0)
+ if (tbl->entries > tbl->gc_thresh3 ||
+ (tbl->entries > tbl->gc_thresh2 &&
+ now - tbl->last_flush > 5*HZ)) {
+ if (neigh_forced_gc(tbl) == 0 &&
+ tbl->entries > tbl->gc_thresh3)
return NULL;
- if (tbl->entries > tbl->gc_thresh3 ||
- (tbl->entries > tbl->gc_thresh2 &&
- now - tbl->last_flush > 5*HZ)) {
- if (neigh_forced_gc(tbl) == 0 &&
- tbl->entries > tbl->gc_thresh3)
- return NULL;
- }
}
n = kmalloc(tbl->entry_size, GFP_ATOMIC);
@@ -219,6 +230,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, int creat)
memset(n, 0, tbl->entry_size);
skb_queue_head_init(&n->arp_queue);
+ n->lock = RW_LOCK_UNLOCKED;
n->updated = n->used = now;
n->nud_state = NUD_NONE;
n->output = neigh_blackhole;
@@ -231,9 +243,8 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, int creat)
return n;
}
-
-struct neighbour * __neigh_lookup(struct neigh_table *tbl, const void *pkey,
- struct device *dev, int creat)
+struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ struct device *dev)
{
struct neighbour *n;
u32 hash_val;
@@ -245,17 +256,26 @@ struct neighbour * __neigh_lookup(struct neigh_table *tbl, const void *pkey,
hash_val ^= hash_val>>3;
hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
+ read_lock_bh(&tbl->lock);
for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
if (dev == n->dev &&
memcmp(n->primary_key, pkey, key_len) == 0) {
atomic_inc(&n->refcnt);
- return n;
+ break;
}
}
- if (!creat)
- return NULL;
+ read_unlock_bh(&tbl->lock);
+ return n;
+}
+
+struct neighbour * neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct device *dev)
+{
+ struct neighbour *n, *n1;
+ u32 hash_val;
+ int key_len = tbl->key_len;
- n = neigh_alloc(tbl, creat);
+ n = neigh_alloc(tbl);
if (n == NULL)
return NULL;
@@ -275,11 +295,30 @@ struct neighbour * __neigh_lookup(struct neigh_table *tbl, const void *pkey,
}
n->confirmed = jiffies - (n->parms->base_reachable_time<<1);
- atomic_set(&n->refcnt, 1);
+
+ hash_val = *(u32*)(pkey + key_len - 4);
+ hash_val ^= (hash_val>>16);
+ hash_val ^= hash_val>>8;
+ hash_val ^= hash_val>>3;
+ hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
+
+ write_lock_bh(&tbl->lock);
+ for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
+ if (dev == n1->dev &&
+ memcmp(n1->primary_key, pkey, key_len) == 0) {
+ atomic_inc(&n1->refcnt);
+ write_unlock_bh(&tbl->lock);
+ neigh_destroy(n);
+ return n1;
+ }
+ }
+
tbl->entries++;
+ n->tbl = tbl;
+ atomic_set(&n->refcnt, 1);
n->next = tbl->hash_buckets[hash_val];
tbl->hash_buckets[hash_val] = n;
- n->tbl = tbl;
+ write_unlock_bh(&tbl->lock);
NEIGH_PRINTK2("neigh %p is created.\n", n);
return n;
}
@@ -391,7 +430,9 @@ void neigh_destroy(struct neighbour *neigh)
while ((hh = neigh->hh) != NULL) {
neigh->hh = hh->hh_next;
hh->hh_next = NULL;
+ write_lock_bh(&hh->hh_lock);
hh->hh_output = neigh_blackhole;
+ write_unlock_bh(&hh->hh_lock);
if (atomic_dec_and_test(&hh->hh_refcnt))
kfree(hh);
}
@@ -409,6 +450,8 @@ void neigh_destroy(struct neighbour *neigh)
/* Neighbour state is suspicious;
disable fast path.
+
+ Called with write_locked neigh.
*/
static void neigh_suspect(struct neighbour *neigh)
{
@@ -416,6 +459,8 @@ static void neigh_suspect(struct neighbour *neigh)
NEIGH_PRINTK2("neigh %p is suspecteded.\n", neigh);
+ ASSERT_WL(neigh);
+
neigh->output = neigh->ops->output;
for (hh = neigh->hh; hh; hh = hh->hh_next)
@@ -424,6 +469,8 @@ static void neigh_suspect(struct neighbour *neigh)
/* Neighbour state is OK;
enable fast path.
+
+ Called with write_locked neigh.
*/
static void neigh_connect(struct neighbour *neigh)
{
@@ -431,6 +478,8 @@ static void neigh_connect(struct neighbour *neigh)
NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
+ ASSERT_WL(neigh);
+
neigh->output = neigh->ops->connected_output;
for (hh = neigh->hh; hh; hh = hh->hh_next)
@@ -446,6 +495,8 @@ static void neigh_connect(struct neighbour *neigh)
If a routine wants to know TRUE entry state, it calls
neigh_sync before checking state.
+
+ Called with write_locked neigh.
*/
static void neigh_sync(struct neighbour *n)
@@ -453,6 +504,7 @@ static void neigh_sync(struct neighbour *n)
unsigned long now = jiffies;
u8 state = n->nud_state;
+ ASSERT_WL(n);
if (state&(NUD_NOARP|NUD_PERMANENT))
return;
if (state&NUD_REACHABLE) {
@@ -476,11 +528,8 @@ static void neigh_periodic_timer(unsigned long arg)
unsigned long now = jiffies;
int i;
- if (atomic_read(&tbl->lock)) {
- tbl->gc_timer.expires = now + 1*HZ;
- add_timer(&tbl->gc_timer);
- return;
- }
+
+ write_lock(&tbl->lock);
/*
* periodicly recompute ReachableTime from random function
@@ -498,10 +547,15 @@ static void neigh_periodic_timer(unsigned long arg)
np = &tbl->hash_buckets[i];
while ((n = *np) != NULL) {
- unsigned state = n->nud_state;
+ unsigned state;
- if (state&(NUD_PERMANENT|NUD_IN_TIMER))
+ write_lock(&n->lock);
+
+ state = n->nud_state;
+ if (state&(NUD_PERMANENT|NUD_IN_TIMER)) {
+ write_unlock(&n->lock);
goto next_elt;
+ }
if ((long)(n->used - n->confirmed) < 0)
n->used = n->confirmed;
@@ -512,6 +566,7 @@ static void neigh_periodic_timer(unsigned long arg)
n->tbl = NULL;
n->next = NULL;
tbl->entries--;
+ write_unlock(&n->lock);
neigh_destroy(n);
continue;
}
@@ -521,6 +576,7 @@ static void neigh_periodic_timer(unsigned long arg)
n->nud_state = NUD_STALE;
neigh_suspect(n);
}
+ write_unlock(&n->lock);
next_elt:
np = &n->next;
@@ -529,6 +585,7 @@ next_elt:
tbl->gc_timer.expires = now + tbl->gc_interval;
add_timer(&tbl->gc_timer);
+ write_unlock(&tbl->lock);
}
static __inline__ int neigh_max_probes(struct neighbour *n)
@@ -544,11 +601,17 @@ static void neigh_timer_handler(unsigned long arg)
{
unsigned long now = jiffies;
struct neighbour *neigh = (struct neighbour*)arg;
- unsigned state = neigh->nud_state;
+ unsigned state;
+ int notify = 0;
+
+ write_lock(&neigh->lock);
+ atomic_inc(&neigh->refcnt);
+
+ state = neigh->nud_state;
if (!(state&NUD_IN_TIMER)) {
NEIGH_PRINTK1("neigh: timer & !nud_in_timer\n");
- return;
+ goto out;
}
if ((state&NUD_VALID) &&
@@ -556,18 +619,19 @@ static void neigh_timer_handler(unsigned long arg)
neigh->nud_state = NUD_REACHABLE;
NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
neigh_connect(neigh);
- return;
+ goto out;
}
if (state == NUD_DELAY) {
NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
neigh->nud_state = NUD_PROBE;
- neigh->probes = 0;
+ atomic_set(&neigh->probes, 0);
}
- if (neigh->probes >= neigh_max_probes(neigh)) {
+ if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
struct sk_buff *skb;
neigh->nud_state = NUD_FAILED;
+ notify = 1;
neigh->tbl->stats.res_failed++;
NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
@@ -576,44 +640,60 @@ static void neigh_timer_handler(unsigned long arg)
So that, we try to be accurate and avoid dead loop. --ANK
*/
- while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL)
+ while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
+ write_unlock(&neigh->lock);
neigh->ops->error_report(neigh, skb);
+ write_lock(&neigh->lock);
+ }
skb_queue_purge(&neigh->arp_queue);
- return;
+ goto out;
}
neigh->timer.expires = now + neigh->parms->retrans_time;
add_timer(&neigh->timer);
+ write_unlock(&neigh->lock);
neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue));
- neigh->probes++;
+ atomic_inc(&neigh->probes);
+ neigh_release(neigh);
+ return;
+
+out:
+ write_unlock(&neigh->lock);
+#ifdef CONFIG_ARPD
+ if (notify && neigh->parms->app_probes)
+ neigh_app_notify(neigh);
+#endif
+ neigh_release(neigh);
}
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
- start_bh_atomic();
+ write_lock_bh(&neigh->lock);
if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) {
if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) {
if (neigh->tbl == NULL) {
NEIGH_PRINTK2("neigh %p used after death.\n", neigh);
if (skb)
kfree_skb(skb);
- end_bh_atomic();
+ write_unlock_bh(&neigh->lock);
return 1;
}
if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
- neigh->probes = neigh->parms->ucast_probes;
+ atomic_set(&neigh->probes, neigh->parms->ucast_probes);
neigh->nud_state = NUD_INCOMPLETE;
neigh->timer.expires = jiffies + neigh->parms->retrans_time;
add_timer(&neigh->timer);
-
+ write_unlock_bh(&neigh->lock);
neigh->ops->solicit(neigh, skb);
- neigh->probes++;
+ atomic_inc(&neigh->probes);
+ write_lock_bh(&neigh->lock);
} else {
neigh->nud_state = NUD_FAILED;
+ write_unlock_bh(&neigh->lock);
+
if (skb)
kfree_skb(skb);
- end_bh_atomic();
return 1;
}
}
@@ -627,7 +707,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
}
__skb_queue_head(&neigh->arp_queue, skb);
}
- end_bh_atomic();
+ write_unlock_bh(&neigh->lock);
return 1;
}
if (neigh->nud_state == NUD_STALE) {
@@ -637,7 +717,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
add_timer(&neigh->timer);
}
}
- end_bh_atomic();
+ write_unlock_bh(&neigh->lock);
return 0;
}
@@ -649,9 +729,9 @@ static __inline__ void neigh_update_hhs(struct neighbour *neigh)
if (update) {
for (hh=neigh->hh; hh; hh=hh->hh_next) {
- write_lock_irq(&hh->hh_lock);
+ write_lock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
- write_unlock_irq(&hh->hh_lock);
+ write_unlock_bh(&hh->hh_lock);
}
}
}
@@ -663,15 +743,23 @@ static __inline__ void neigh_update_hhs(struct neighbour *neigh)
-- new is new state.
-- override==1 allows to override existing lladdr, if it is different.
-- arp==0 means that the change is administrative.
+
+ Caller MUST hold reference count on the entry.
*/
int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int arp)
{
- u8 old = neigh->nud_state;
+ u8 old;
+ int err;
+ int notify = 0;
struct device *dev = neigh->dev;
+ write_lock_bh(&neigh->lock);
+ old = neigh->nud_state;
+
+ err = -EPERM;
if (arp && (old&(NUD_NOARP|NUD_PERMANENT)))
- return -EPERM;
+ goto out;
if (!(new&NUD_VALID)) {
if (old&NUD_IN_TIMER)
@@ -679,7 +767,9 @@ int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int
if (old&NUD_CONNECTED)
neigh_suspect(neigh);
neigh->nud_state = new;
- return 0;
+ err = 0;
+ notify = old&NUD_VALID;
+ goto out;
}
/* Compare new lladdr with cached one */
@@ -696,14 +786,15 @@ int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int
if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0)
lladdr = neigh->ha;
else if (!override)
- return -EPERM;
+ goto out;
}
} else {
/* No address is supplied; if we know something,
use it, otherwise discard the request.
*/
+ err = -EINVAL;
if (!(old&NUD_VALID))
- return -EINVAL;
+ goto out;
lladdr = neigh->ha;
}
@@ -716,10 +807,11 @@ int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
+ err = 0;
if (old&NUD_VALID) {
if (lladdr == neigh->ha)
if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED)))
- return 0;
+ goto out;
}
if (old&NUD_IN_TIMER)
del_timer(&neigh->timer);
@@ -729,12 +821,11 @@ int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int
neigh_update_hhs(neigh);
neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);
#ifdef CONFIG_ARPD
- if (neigh->parms->app_probes)
- neigh_app_notify(neigh);
+ notify = 1;
#endif
}
if (new == old)
- return 0;
+ goto out;
if (new&NUD_CONNECTED)
neigh_connect(neigh);
else
@@ -747,14 +838,22 @@ int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int
while (neigh->nud_state&NUD_VALID &&
(skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
struct neighbour *n1 = neigh;
+ write_unlock_bh(&neigh->lock);
/* On shaper/eql skb->dst->neighbour != neigh :( */
if (skb->dst && skb->dst->neighbour)
n1 = skb->dst->neighbour;
n1->output(skb);
+ write_lock_bh(&neigh->lock);
}
skb_queue_purge(&neigh->arp_queue);
}
- return 0;
+out:
+ write_unlock_bh(&neigh->lock);
+#ifdef CONFIG_ARPD
+ if (notify && neigh->parms->app_probes)
+ neigh_app_notify(neigh);
+#endif
+ return err;
}
struct neighbour * neigh_event_ns(struct neigh_table *tbl,
@@ -837,15 +936,15 @@ int neigh_resolve_output(struct sk_buff *skb)
int err;
struct device *dev = neigh->dev;
if (dev->hard_header_cache && dst->hh == NULL) {
- start_bh_atomic();
+ write_lock_bh(&neigh->lock);
if (dst->hh == NULL)
neigh_hh_init(neigh, dst, dst->ops->protocol);
err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
- end_bh_atomic();
+ write_unlock_bh(&neigh->lock);
} else {
- start_bh_atomic();
+ read_lock_bh(&neigh->lock);
err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
- end_bh_atomic();
+ read_unlock_bh(&neigh->lock);
}
if (err >= 0)
return neigh->ops->queue_xmit(skb);
@@ -871,9 +970,9 @@ int neigh_connected_output(struct sk_buff *skb)
__skb_pull(skb, skb->nh.raw - skb->data);
- start_bh_atomic();
+ read_lock_bh(&neigh->lock);
err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
- end_bh_atomic();
+ read_unlock_bh(&neigh->lock);
if (err >= 0)
return neigh->ops->queue_xmit(skb);
kfree_skb(skb);
@@ -947,8 +1046,10 @@ struct neigh_parms *neigh_parms_alloc(struct device *dev, struct neigh_table *tb
return NULL;
}
}
+ write_lock_bh(&tbl->lock);
p->next = tbl->parms.next;
tbl->parms.next = p;
+ write_unlock_bh(&tbl->lock);
}
return p;
}
@@ -959,10 +1060,11 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
if (parms == NULL || parms == &tbl->parms)
return;
+ write_lock_bh(&tbl->lock);
for (p = &tbl->parms.next; *p; p = &(*p)->next) {
if (*p == parms) {
*p = parms->next;
- synchronize_bh();
+ write_unlock_bh(&tbl->lock);
#ifdef CONFIG_SYSCTL
neigh_sysctl_unregister(parms);
#endif
@@ -970,6 +1072,7 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
return;
}
}
+ write_unlock_bh(&tbl->lock);
NEIGH_PRINTK1("neigh_release_parms: not found\n");
}
@@ -981,6 +1084,7 @@ void neigh_table_init(struct neigh_table *tbl)
tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time);
init_timer(&tbl->gc_timer);
+ tbl->lock = RW_LOCK_UNLOCKED;
tbl->gc_timer.data = (unsigned long)tbl;
tbl->gc_timer.function = neigh_periodic_timer;
tbl->gc_timer.expires = now + tbl->gc_interval + tbl->parms.reachable_time;
@@ -993,29 +1097,30 @@ void neigh_table_init(struct neigh_table *tbl)
tbl->last_flush = now;
tbl->last_rand = now + tbl->parms.reachable_time*20;
+ write_lock(&neigh_tbl_lock);
tbl->next = neigh_tables;
neigh_tables = tbl;
+ write_unlock(&neigh_tbl_lock);
}
int neigh_table_clear(struct neigh_table *tbl)
{
struct neigh_table **tp;
- start_bh_atomic();
del_timer(&tbl->gc_timer);
del_timer(&tbl->proxy_timer);
skb_queue_purge(&tbl->proxy_queue);
neigh_ifdown(tbl, NULL);
- end_bh_atomic();
if (tbl->entries)
printk(KERN_CRIT "neighbour leakage\n");
+ write_lock(&neigh_tbl_lock);
for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
if (*tp == tbl) {
*tp = tbl->next;
- synchronize_bh();
break;
}
}
+ write_unlock(&neigh_tbl_lock);
#ifdef CONFIG_SYSCTL
neigh_sysctl_unregister(&tbl->parms);
#endif
@@ -1037,12 +1142,14 @@ int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
return -ENODEV;
}
+ read_lock(&neigh_tbl_lock);
for (tbl=neigh_tables; tbl; tbl = tbl->next) {
int err = 0;
struct neighbour *n;
if (tbl->family != ndm->ndm_family)
continue;
+ read_unlock(&neigh_tbl_lock);
if (nda[NDA_DST-1] == NULL ||
nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
@@ -1054,15 +1161,14 @@ int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
if (dev == NULL)
return -EINVAL;
- start_bh_atomic();
- n = __neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 0);
+ n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
if (n) {
err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
neigh_release(n);
}
- end_bh_atomic();
return err;
}
+ read_unlock(&neigh_tbl_lock);
return -EADDRNOTAVAIL;
}
@@ -1079,12 +1185,15 @@ int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
return -ENODEV;
}
+ read_lock(&neigh_tbl_lock);
for (tbl=neigh_tables; tbl; tbl = tbl->next) {
int err = 0;
struct neighbour *n;
if (tbl->family != ndm->ndm_family)
continue;
+ read_unlock(&neigh_tbl_lock);
+
if (nda[NDA_DST-1] == NULL ||
nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
return -EINVAL;
@@ -1098,8 +1207,7 @@ int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
if (nda[NDA_LLADDR-1] != NULL &&
nda[NDA_LLADDR-1]->rta_len != RTA_LENGTH(dev->addr_len))
return -EINVAL;
- start_bh_atomic();
- n = __neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 0);
+ n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
if (n) {
if (nlh->nlmsg_flags&NLM_F_EXCL)
err = -EEXIST;
@@ -1117,9 +1225,9 @@ int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
}
if (n)
neigh_release(n);
- end_bh_atomic();
return err;
}
+ read_unlock(&neigh_tbl_lock);
return -EADDRNOTAVAIL;
}
@@ -1139,15 +1247,17 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
ndm->ndm_family = n->ops->family;
ndm->ndm_flags = n->flags;
ndm->ndm_type = n->type;
- ndm->ndm_state = n->nud_state;
ndm->ndm_ifindex = n->dev->ifindex;
RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
+ read_lock_bh(&n->lock);
+ ndm->ndm_state = n->nud_state;
if (n->nud_state&NUD_VALID)
RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
ci.ndm_used = now - n->used;
ci.ndm_confirmed = now - n->confirmed;
ci.ndm_updated = now - n->updated;
ci.ndm_refcnt = atomic_read(&n->refcnt);
+ read_unlock_bh(&n->lock);
RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
nlh->nlmsg_len = skb->tail - b;
return skb->len;
@@ -1171,20 +1281,20 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct
if (h < s_h) continue;
if (h > s_h)
s_idx = 0;
- start_bh_atomic();
+ read_lock_bh(&tbl->lock);
for (n = tbl->hash_buckets[h], idx = 0; n;
n = n->next, idx++) {
if (idx < s_idx)
continue;
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWNEIGH) <= 0) {
- end_bh_atomic();
+ read_unlock_bh(&tbl->lock);
cb->args[1] = h;
cb->args[2] = idx;
return -1;
}
}
- end_bh_atomic();
+ read_unlock_bh(&tbl->lock);
}
cb->args[1] = h;
@@ -1201,6 +1311,7 @@ int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
+ read_lock(&neigh_tbl_lock);
for (tbl=neigh_tables, t=0; tbl; tbl = tbl->next, t++) {
if (t < s_t) continue;
if (family && tbl->family != family)
@@ -1210,6 +1321,7 @@ int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
if (neigh_dump_table(tbl, skb, cb) < 0)
break;
}
+ read_unlock(&neigh_tbl_lock);
cb->args[0] = t;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ed27c8e1d..dad9ee252 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -50,22 +50,22 @@
#include <net/sock.h>
#include <net/pkt_sched.h>
-atomic_t rtnl_rlockct;
-struct wait_queue *rtnl_wait;
+DECLARE_MUTEX(rtnl_sem);
-
-void rtnl_lock()
+void rtnl_lock(void)
{
rtnl_shlock();
rtnl_exlock();
}
-
-void rtnl_unlock()
+
+void rtnl_unlock(void)
{
rtnl_exunlock();
rtnl_shunlock();
}
+
+
int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len)
{
memset(tb, 0, sizeof(struct rtattr*)*maxattr);
@@ -82,8 +82,6 @@ int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len)
#ifdef CONFIG_RTNETLINK
struct sock *rtnl;
-unsigned long rtnl_wlockct;
-
struct rtnetlink_link * rtnetlink_links[NPROTO];
#define _S 1 /* superuser privileges required */
@@ -189,12 +187,14 @@ int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
int s_idx = cb->args[0];
struct device *dev;
+ read_lock(&dev_base_lock);
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq) <= 0)
break;
}
+ read_unlock(&dev_base_lock);
cb->args[0] = idx;
return skb->len;
@@ -216,9 +216,7 @@ int rtnetlink_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
continue;
if (idx > s_idx)
memset(&cb->args[0], 0, sizeof(cb->args));
- if (rtnetlink_links[idx][type].dumpit(skb, cb) == 0)
- continue;
- if (skb_tailroom(skb) < 256)
+ if (rtnetlink_links[idx][type].dumpit(skb, cb))
break;
}
cb->family = idx;
@@ -245,8 +243,6 @@ void rtmsg_ifinfo(int type, struct device *dev)
static int rtnetlink_done(struct netlink_callback *cb)
{
- if (cap_raised(NETLINK_CB(cb->skb).eff_cap, CAP_NET_ADMIN) && cb->nlh->nlmsg_flags&NLM_F_ATOMIC)
- rtnl_shunlock();
return 0;
}
@@ -314,15 +310,9 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
if (link->dumpit == NULL)
goto err_inval;
- /* Super-user locks all the tables to get atomic snapshot */
- if (cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)
- && nlh->nlmsg_flags&NLM_F_ATOMIC)
- atomic_inc(&rtnl_rlockct);
if ((*errp = netlink_dump_start(rtnl, skb, nlh,
link->dumpit,
rtnetlink_done)) != 0) {
- if (cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN) && nlh->nlmsg_flags&NLM_F_ATOMIC)
- atomic_dec(&rtnl_rlockct);
return -1;
}
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b76364371..5ea21d7b4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4,7 +4,7 @@
* Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
* Florian La Roche <rzsfl@rz.uni-sb.de>
*
- * Version: $Id: skbuff.c,v 1.55 1999/02/23 08:12:27 davem Exp $
+ * Version: $Id: skbuff.c,v 1.56 1999/05/29 23:20:42 davem Exp $
*
* Fixes:
* Alan Cox : Fixed the worst of the load balancer bugs.
@@ -62,11 +62,6 @@
#include <asm/system.h>
/*
- * Skb list spinlock
- */
-spinlock_t skb_queue_lock = SPIN_LOCK_UNLOCKED;
-
-/*
* Resource tracking variables
*/
diff --git a/net/core/sock.c b/net/core/sock.c
index e0eb41a01..c38e92e93 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -7,7 +7,7 @@
* handler for protocols to use and generic option handler.
*
*
- * Version: $Id: sock.c,v 1.80 1999/05/08 03:04:34 davem Exp $
+ * Version: $Id: sock.c,v 1.82 1999/05/27 00:37:03 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -487,10 +487,10 @@ struct sock *sk_alloc(int family, int priority, int zero_it)
{
struct sock *sk = kmem_cache_alloc(sk_cachep, priority);
- if(sk) {
- if (zero_it)
- memset(sk, 0, sizeof(struct sock));
+ if(sk && zero_it) {
+ memset(sk, 0, sizeof(struct sock));
sk->family = family;
+ sock_lock_init(sk);
}
return sk;
@@ -650,7 +650,7 @@ unsigned long sock_rspace(struct sock *sk)
*/
static void sock_wait_for_wmem(struct sock * sk)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
sk->socket->flags &= ~SO_NOSPACE;
add_wait_queue(sk->sleep, &wait);
@@ -736,24 +736,44 @@ failure:
return NULL;
}
-
-void __release_sock(struct sock *sk)
+void lock_sock(struct sock *sk)
{
-#ifdef CONFIG_INET
- if (!sk->prot || !sk->backlog_rcv)
- return;
-
- /* See if we have any packets built up. */
- start_bh_atomic();
- while (!skb_queue_empty(&sk->back_log)) {
- struct sk_buff * skb = sk->back_log.next;
- __skb_unlink(skb, &sk->back_log);
- sk->backlog_rcv(sk, skb);
+ spin_lock_bh(&sk->lock.slock);
+ if(sk->lock.users != 0) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue_exclusive(&sk->lock.wq, &wait);
+ for(;;) {
+ current->state = TASK_EXCLUSIVE | TASK_UNINTERRUPTIBLE;
+ spin_unlock_bh(&sk->lock.slock);
+ schedule();
+ spin_lock_bh(&sk->lock.slock);
+ if(!sk->lock.users)
+ break;
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&sk->lock.wq, &wait);
}
- end_bh_atomic();
-#endif
+ sk->lock.users = 1;
+ spin_unlock_bh(&sk->lock.slock);
}
+void release_sock(struct sock *sk)
+{
+ spin_lock_bh(&sk->lock.slock);
+ sk->lock.users = 0;
+ if(sk->backlog.tail != NULL) {
+ struct sk_buff *skb = sk->backlog.head;
+ do { struct sk_buff *next = skb->next;
+ skb->next = NULL;
+ sk->backlog_rcv(sk, skb);
+ skb = next;
+ } while(skb != NULL);
+ sk->backlog.head = sk->backlog.tail = NULL;
+ }
+ wake_up(&sk->lock.wq);
+ spin_unlock_bh(&sk->lock.slock);
+}
/*
* Generic socket manager library. Most simpler socket families
@@ -1019,7 +1039,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
{
skb_queue_head_init(&sk->receive_queue);
skb_queue_head_init(&sk->write_queue);
- skb_queue_head_init(&sk->back_log);
skb_queue_head_init(&sk->error_queue);
init_timer(&sk->timer);
@@ -1036,7 +1055,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->type = sock->type;
sk->sleep = &sock->wait;
sock->sk = sk;
- }
+ } else
+ sk->sleep = NULL;
sk->state_change = sock_def_wakeup;
sk->data_ready = sock_def_readable;
diff --git a/net/decnet/README b/net/decnet/README
index 316435d29..9384fa322 100644
--- a/net/decnet/README
+++ b/net/decnet/README
@@ -1,15 +1,8 @@
Linux DECnet Project
======================
-For information on the Linux DECnet Project and the latest progress,
-look at the project home page:
-
-http://www.sucs.swan.ac.uk/~rohan/DECnet/
-
-To contribute either mail <SteveW@ACM.org> or post on one of the Linux
-mailing lists (either linux-net or netdev). DECnet for Linux will not
-be distributed as part of the 2.2.xx kernel series. It is available as a
-patch from the above site. Expect DECnet to arrive as part of the standard
-kernel distribution early in the 2.3.xx series.
+The documentation for this kernel subsystem is available in the
+Documentation/networking subdirctory of this distribution and also
+on line at http://www.sucs.swan.ac.uk/~rohan/DECnet/index.html.
Steve Whitehouse <SteveW@ACM.org>
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index bce35d484..128c2a5e9 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -63,10 +63,12 @@
__initfunc(void eth_setup(char *str, int *ints))
{
- struct device *d = dev_base;
+ struct device *d;
if (!str || !*str)
return;
+
+ d = dev_base;
while (d)
{
if (!strcmp(str,d->name))
@@ -246,6 +248,7 @@ int eth_header_cache(struct neighbour *neigh, struct hh_cache *hh)
eth->h_proto = type;
memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
memcpy(eth->h_dest, neigh->ha, dev->addr_len);
+ hh->hh_len = ETH_HLEN;
return 0;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 70fcf4024..ca0f27d0c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -5,7 +5,7 @@
*
* PF_INET protocol family socket handler.
*
- * Version: $Id: af_inet.c,v 1.87 1999/04/22 10:07:33 davem Exp $
+ * Version: $Id: af_inet.c,v 1.91 1999/06/09 08:28:55 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -147,22 +147,17 @@ static __inline__ void kill_sk_queues(struct sock *sk)
struct sk_buff *skb;
/* First the read buffer. */
- while((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
- /* This will take care of closing sockets that were
- * listening and didn't accept everything.
- */
- if (skb->sk != NULL && skb->sk != sk)
- skb->sk->prot->close(skb->sk, 0);
+ while((skb = skb_dequeue(&sk->receive_queue)) != NULL)
kfree_skb(skb);
- }
/* Next, the error queue. */
while((skb = skb_dequeue(&sk->error_queue)) != NULL)
kfree_skb(skb);
- /* Now the backlog. */
- while((skb=skb_dequeue(&sk->back_log)) != NULL)
- kfree_skb(skb);
+ /* It is _impossible_ for the backlog to contain anything
+ * when we get here. All user references to this socket
+ * have gone away, only the net layer knows can touch it.
+ */
}
static __inline__ void kill_sk_now(struct sock *sk)
@@ -195,14 +190,19 @@ static __inline__ void kill_sk_later(struct sock *sk)
sk->destroy = 1;
sk->ack_backlog = 0;
- release_sock(sk);
+ bh_unlock_sock(sk);
net_reset_timer(sk, TIME_DESTROY, SOCK_DESTROY_TIME);
}
+/* Callers must hold the BH spinlock.
+ *
+ * At this point, there should be no process reference to this
+ * socket, and thus no user references at all. Therefore we
+ * can assume the socket waitqueue is inactive and nobody will
+ * try to jump onto it.
+ */
void destroy_sock(struct sock *sk)
{
- lock_sock(sk); /* just to be safe. */
-
/* Now we can no longer get new packets or once the
* timers are killed, send them.
*/
@@ -213,12 +213,6 @@ void destroy_sock(struct sock *sk)
kill_sk_queues(sk);
- /* Now if it has a half accepted/ closed socket. */
- if (sk->pair) {
- sk->pair->prot->close(sk->pair, 0);
- sk->pair = NULL;
- }
-
/* Now if everything is gone we can free the socket
* structure, otherwise we need to keep it around until
* everything is gone.
@@ -284,6 +278,14 @@ static int inet_autobind(struct sock *sk)
return 0;
}
+/* Listening INET sockets never sleep to wait for memory, so
+ * it is completely silly to wake them up on queue space
+ * available events. So we hook them up to this dummy callback.
+ */
+static void inet_listen_write_space(struct sock *sk)
+{
+}
+
/*
* Move a socket into listening state.
*/
@@ -310,6 +312,7 @@ int inet_listen(struct socket *sock, int backlog)
dst_release(xchg(&sk->dst_cache, NULL));
sk->prot->rehash(sk);
add_to_prot_sklist(sk);
+ sk->write_space = inet_listen_write_space;
}
sk->socket->flags |= SO_ACCEPTCON;
return(0);
@@ -368,7 +371,7 @@ static int inet_create(struct socket *sock, int protocol)
if (protocol && protocol != IPPROTO_UDP)
goto free_and_noproto;
protocol = IPPROTO_UDP;
- sk->no_check = UDP_NO_CHECK;
+ sk->no_check = UDP_CSUM_DEFAULT;
sk->ip_pmtudisc = IP_PMTUDISC_DONT;
prot=&udp_prot;
sock->ops = &inet_dgram_ops;
@@ -578,7 +581,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
static void inet_wait_for_connect(struct sock *sk)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk->sleep, &wait);
current->state = TASK_INTERRUPTIBLE;
@@ -684,14 +687,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
if (sk1->prot->accept == NULL)
goto do_err;
- /* Restore the state if we have been interrupted, and then returned. */
- if (sk1->pair != NULL) {
- sk2 = sk1->pair;
- sk1->pair = NULL;
- } else {
- if((sk2 = sk1->prot->accept(sk1,flags)) == NULL)
- goto do_sk1_err;
- }
+ if((sk2 = sk1->prot->accept(sk1,flags)) == NULL)
+ goto do_sk1_err;
/*
* We've been passed an extra socket.
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 2c311f233..a3ca88701 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1,6 +1,6 @@
/* linux/net/inet/arp.c
*
- * Version: $Id: arp.c,v 1.77 1999/03/21 05:22:30 davem Exp $
+ * Version: $Id: arp.c,v 1.78 1999/06/09 10:10:36 davem Exp $
*
* Copyright (C) 1994 by Florian La Roche
*
@@ -119,6 +119,11 @@
#include <asm/system.h>
#include <asm/uaccess.h>
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+static char *ax2asc2(ax25_address *a, char *buf);
+#endif
+
+
/*
* Interface to generic neighbour cache.
*/
@@ -304,7 +309,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
u8 *dst_ha = NULL;
struct device *dev = neigh->dev;
u32 target = *(u32*)neigh->primary_key;
- int probes = neigh->probes;
+ int probes = atomic_read(&neigh->probes);
if (skb && inet_addr_type(skb->nh.iph->saddr) == RTN_LOCAL)
saddr = skb->nh.iph->saddr;
@@ -315,6 +320,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (!(neigh->nud_state&NUD_VALID))
printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n");
dst_ha = neigh->ha;
+ read_lock_bh(&neigh->lock);
} else if ((probes -= neigh->parms->app_probes) < 0) {
#ifdef CONFIG_ARPD
neigh_app_ns(neigh);
@@ -324,6 +330,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
dst_ha, dev->dev_addr, NULL);
+ if (dst_ha)
+ read_unlock_bh(&neigh->lock);
}
/* OBSOLETE FUNCTIONS */
@@ -372,29 +380,25 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
if (arp_set_predefined(inet_addr_type(paddr), haddr, paddr, dev))
return 0;
- start_bh_atomic();
n = __neigh_lookup(&arp_tbl, &paddr, dev, 1);
if (n) {
n->used = jiffies;
if (n->nud_state&NUD_VALID || neigh_event_send(n, skb) == 0) {
- memcpy(haddr, n->ha, dev->addr_len);
+ read_lock_bh(&n->lock);
+ memcpy(haddr, n->ha, dev->addr_len);
+ read_unlock_bh(&n->lock);
neigh_release(n);
- end_bh_atomic();
return 0;
}
+ neigh_release(n);
} else
kfree_skb(skb);
- neigh_release(n);
- end_bh_atomic();
return 1;
}
/* END OF OBSOLETE FUNCTIONS */
-/*
- * Note: requires bh_atomic locking.
- */
int arp_bind_neighbour(struct dst_entry *dst)
{
struct device *dev = dst->dev;
@@ -672,7 +676,8 @@ int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
(addr_type == RTN_UNICAST && rt->u.dst.dev != dev &&
(IN_DEV_PROXY_ARP(in_dev) || pneigh_lookup(&arp_tbl, &tip, dev, 0)))) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
- neigh_release(n);
+ if (n)
+ neigh_release(n);
if (skb->stamp.tv_sec == 0 ||
skb->pkt_type == PACKET_HOST ||
@@ -785,7 +790,6 @@ int arp_req_set(struct arpreq *r, struct device * dev)
return -EINVAL;
err = -ENOBUFS;
- start_bh_atomic();
neigh = __neigh_lookup(&arp_tbl, &ip, dev, 1);
if (neigh) {
unsigned state = NUD_STALE;
@@ -795,7 +799,6 @@ int arp_req_set(struct arpreq *r, struct device * dev)
r->arp_ha.sa_data : NULL, state, 1, 0);
neigh_release(neigh);
}
- end_bh_atomic();
return err;
}
@@ -819,17 +822,17 @@ static int arp_req_get(struct arpreq *r, struct device *dev)
struct neighbour *neigh;
int err = -ENXIO;
- start_bh_atomic();
- neigh = __neigh_lookup(&arp_tbl, &ip, dev, 0);
+ neigh = neigh_lookup(&arp_tbl, &ip, dev);
if (neigh) {
+ read_lock_bh(&neigh->lock);
memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
+ r->arp_flags = arp_state_to_flags(neigh);
+ read_unlock_bh(&neigh->lock);
r->arp_ha.sa_family = dev->type;
strncpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
- r->arp_flags = arp_state_to_flags(neigh);
neigh_release(neigh);
err = 0;
}
- end_bh_atomic();
return err;
}
@@ -867,14 +870,12 @@ int arp_req_delete(struct arpreq *r, struct device * dev)
return -EINVAL;
}
err = -ENXIO;
- start_bh_atomic();
- neigh = __neigh_lookup(&arp_tbl, &ip, dev, 0);
+ neigh = neigh_lookup(&arp_tbl, &ip, dev);
if (neigh) {
if (neigh->nud_state&~NUD_NOARP)
err = neigh_update(neigh, NULL, NUD_FAILED, 1, 0);
neigh_release(neigh);
}
- end_bh_atomic();
return err;
}
@@ -961,16 +962,16 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
char hbuffer[HBUFFERLEN];
int i,j,k;
const char hexbuf[] = "0123456789ABCDEF";
+ char abuf[16];
size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
pos+=size;
len+=size;
- neigh_table_lock(&arp_tbl);
-
- for(i=0; i<=NEIGH_HASHMASK; i++) {
+ for(i=0; i<=NEIGH_HASHMASK; i++) {
struct neighbour *n;
+ read_lock_bh(&arp_tbl.lock);
for (n=arp_tbl.hash_buckets[i]; n; n=n->next) {
struct device *dev = n->dev;
int hatype = dev->type;
@@ -979,17 +980,14 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
if (!(n->nud_state&~NUD_NOARP))
continue;
- /* I'd get great pleasure deleting
- this ugly code. Let's output it in hexadecimal format.
- "arp" utility will eventually repaired --ANK
- */
-#if 1 /* UGLY CODE */
+ read_lock(&n->lock);
+
/*
* Convert hardware address to XX:XX:XX:XX ... form.
*/
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
if (hatype == ARPHRD_AX25 || hatype == ARPHRD_NETROM)
- strcpy(hbuffer,ax2asc((ax25_address *)n->ha));
+ ax2asc2((ax25_address *)n->ha, hbuffer);
else {
#endif
for (k=0,j=0;k<HBUFFERLEN-3 && j<dev->addr_len;j++) {
@@ -998,37 +996,33 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
hbuffer[k++]=':';
}
hbuffer[--k]=0;
-
+
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
}
#endif
-#else
- if ((neigh->nud_state&NUD_VALID) && dev->addr_len) {
- int j;
- for (j=0; j < dev->addr_len; j++)
- sprintf(hbuffer+2*j, "%02x", neigh->ha[j]);
- } else
- sprintf(hbuffer, "0");
-#endif
size = sprintf(buffer+len,
"%-17s0x%-10x0x%-10x%s",
- in_ntoa(*(u32*)n->primary_key),
+ in_ntoa2(*(u32*)n->primary_key, abuf),
hatype,
arp_state_to_flags(n),
hbuffer);
size += sprintf(buffer+len+size,
" %-17s %s\n",
"*", dev->name);
+ read_unlock(&n->lock);
len += size;
pos += size;
if (pos <= offset)
len=0;
- if (pos >= offset+length)
- goto done;
+ if (pos >= offset+length) {
+ read_unlock_bh(&arp_tbl.lock);
+ goto done;
+ }
}
+ read_unlock_bh(&arp_tbl.lock);
}
for (i=0; i<=PNEIGH_HASHMASK; i++) {
@@ -1039,7 +1033,7 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
size = sprintf(buffer+len,
"%-17s0x%-10x0x%-10x%s",
- in_ntoa(*(u32*)n->key),
+ in_ntoa2(*(u32*)n->key, abuf),
hatype,
ATF_PUBL|ATF_PERM,
"00:00:00:00:00:00");
@@ -1058,7 +1052,6 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
}
done:
- neigh_table_unlock(&arp_tbl);
*start = buffer+len-(pos-offset); /* Start of wanted data */
len = pos-offset; /* Start slop */
@@ -1117,14 +1110,13 @@ __initfunc(void arp_init (void))
}
-#ifdef CONFIG_AX25_MODULE
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
/*
* ax25 -> ASCII conversion
*/
-char *ax2asc(ax25_address *a)
+char *ax2asc2(ax25_address *a, char *buf)
{
- static char buf[11];
char c, *s;
int n;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c8b0fbbc8..ff2c930d1 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1,7 +1,7 @@
/*
* NET3 IP device support routines.
*
- * Version: $Id: devinet.c,v 1.28 1999/05/08 20:00:16 davem Exp $
+ * Version: $Id: devinet.c,v 1.32 1999/06/09 11:15:33 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -607,41 +607,39 @@ inet_gifconf(struct device *dev, char *buf, int len)
{
struct in_device *in_dev = dev->ip_ptr;
struct in_ifaddr *ifa;
- struct ifreq ifr;
+ struct ifreq *ifr = (struct ifreq *) buf;
int done=0;
if (in_dev==NULL || (ifa=in_dev->ifa_list)==NULL)
return 0;
for ( ; ifa; ifa = ifa->ifa_next) {
- if (!buf) {
+ if (!ifr) {
done += sizeof(ifr);
continue;
}
if (len < (int) sizeof(ifr))
return done;
- memset(&ifr, 0, sizeof(struct ifreq));
+ memset(ifr, 0, sizeof(struct ifreq));
if (ifa->ifa_label)
- strcpy(ifr.ifr_name, ifa->ifa_label);
+ strcpy(ifr->ifr_name, ifa->ifa_label);
else
- strcpy(ifr.ifr_name, dev->name);
+ strcpy(ifr->ifr_name, dev->name);
- (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = AF_INET;
- (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = ifa->ifa_local;
+ (*(struct sockaddr_in *) &ifr->ifr_addr).sin_family = AF_INET;
+ (*(struct sockaddr_in *) &ifr->ifr_addr).sin_addr.s_addr = ifa->ifa_local;
- if (copy_to_user(buf, &ifr, sizeof(struct ifreq)))
- return -EFAULT;
- buf += sizeof(struct ifreq);
+ ifr++;
len -= sizeof(struct ifreq);
done += sizeof(struct ifreq);
}
return done;
}
-u32 inet_select_addr(struct device *dev, u32 dst, int scope)
+u32 inet_select_addr(const struct device *dev, u32 dst, int scope)
{
u32 addr = 0;
- struct in_device *in_dev = dev->ip_ptr;
+ const struct in_device *in_dev = dev->ip_ptr;
if (in_dev == NULL)
return 0;
@@ -661,15 +659,19 @@ u32 inet_select_addr(struct device *dev, u32 dst, int scope)
in this case. It is importnat that lo is the first interface
in dev_base list.
*/
+ read_lock(&dev_base_lock);
for (dev=dev_base; dev; dev=dev->next) {
if ((in_dev=dev->ip_ptr) == NULL)
continue;
for_primary_ifa(in_dev) {
- if (ifa->ifa_scope <= scope)
+ if (ifa->ifa_scope <= scope) {
+ read_unlock(&dev_base_lock);
return ifa->ifa_local;
+ }
} endfor_ifa(in_dev);
}
+ read_unlock(&dev_base_lock);
return 0;
}
@@ -790,6 +792,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
s_idx = cb->args[0];
s_ip_idx = ip_idx = cb->args[1];
+ read_lock(&dev_base_lock);
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
@@ -807,6 +810,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
}
}
done:
+ read_unlock(&dev_base_lock);
cb->args[0] = idx;
cb->args[1] = ip_idx;
@@ -881,11 +885,13 @@ void inet_forward_change()
ipv4_devconf.accept_redirects = !on;
ipv4_devconf_dflt.forwarding = on;
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next) {
struct in_device *in_dev = dev->ip_ptr;
if (in_dev)
in_dev->cnf.forwarding = on;
}
+ read_unlock(&dev_base_lock);
rt_cache_flush(0);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index a17470483..d57d4daa9 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -5,7 +5,7 @@
*
* IPv4 Forwarding Information Base: FIB frontend.
*
- * Version: $Id: fib_frontend.c,v 1.15 1999/03/21 05:22:31 davem Exp $
+ * Version: $Id: fib_frontend.c,v 1.16 1999/06/09 10:10:42 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
@@ -123,13 +123,11 @@ fib_get_procinfo(char *buffer, char **start, off_t offset, int length, int dummy
first = 0;
}
- /* rtnl_shlock(); -- it is pointless at the moment --ANK */
if (main_table && count > 0) {
int n = main_table->tb_get_info(main_table, ptr, first, count);
count -= n;
ptr += n*128;
}
- /* rtnl_shunlock(); */
len = ptr - *start;
if (len >= length)
return length;
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index d9e029cef..0472f6118 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -5,7 +5,7 @@
*
* IPv4 FIB: lookup engine and maintenance routines.
*
- * Version: $Id: fib_hash.c,v 1.8 1999/03/25 10:04:17 davem Exp $
+ * Version: $Id: fib_hash.c,v 1.10 1999/06/09 10:10:45 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
@@ -145,13 +145,16 @@ extern __inline__ int fn_key_leq(fn_key_t a, fn_key_t b)
return a.datum <= b.datum;
}
+static rwlock_t fib_hash_lock = RW_LOCK_UNLOCKED;
+
#define FZ_MAX_DIVISOR 1024
#ifdef CONFIG_IP_ROUTE_LARGE_TABLES
+/* The fib hash lock must be held when this is called. */
static __inline__ void fn_rebuild_zone(struct fn_zone *fz,
- struct fib_node **old_ht,
- int old_divisor)
+ struct fib_node **old_ht,
+ int old_divisor)
{
int i;
struct fib_node *f, **fp, *next;
@@ -198,13 +201,13 @@ static void fn_rehash_zone(struct fn_zone *fz)
if (ht) {
memset(ht, 0, new_divisor*sizeof(struct fib_node*));
- start_bh_atomic();
+ write_lock_bh(&fib_hash_lock);
old_ht = fz->fz_hash;
fz->fz_hash = ht;
fz->fz_hashmask = new_hashmask;
fz->fz_divisor = new_divisor;
fn_rebuild_zone(fz, old_ht, old_divisor);
- end_bh_atomic();
+ write_unlock_bh(&fib_hash_lock);
kfree(old_ht);
}
}
@@ -246,6 +249,7 @@ fn_new_zone(struct fn_hash *table, int z)
for (i=z+1; i<=32; i++)
if (table->fn_zones[i])
break;
+ write_lock_bh(&fib_hash_lock);
if (i>32) {
/* No more specific masks, we are the first. */
fz->fz_next = table->fn_zone_list;
@@ -255,6 +259,7 @@ fn_new_zone(struct fn_hash *table, int z)
table->fn_zones[i]->fz_next = fz;
}
table->fn_zones[z] = fz;
+ write_unlock_bh(&fib_hash_lock);
return fz;
}
@@ -265,6 +270,7 @@ fn_hash_lookup(struct fib_table *tb, const struct rt_key *key, struct fib_result
struct fn_zone *fz;
struct fn_hash *t = (struct fn_hash*)tb->tb_data;
+ read_lock(&fib_hash_lock);
for (fz = t->fn_zone_list; fz; fz = fz->fz_next) {
struct fib_node *f;
fn_key_t k = fz_key(key->dst, fz);
@@ -293,13 +299,16 @@ fn_hash_lookup(struct fib_table *tb, const struct rt_key *key, struct fib_result
res->scope = f->fn_scope;
res->prefixlen = fz->fz_order;
res->prefix = &fz_prefix(f->fn_key, fz);
- return 0;
+ goto out;
}
if (err < 0)
- return err;
+ goto out;
}
}
- return 1;
+ err = 1;
+out:
+ read_unlock(&fib_hash_lock);
+ return err;
}
static int fn_hash_last_dflt=-1;
@@ -344,6 +353,7 @@ fn_hash_select_default(struct fib_table *tb, const struct rt_key *key, struct fi
last_resort = NULL;
order = -1;
+ read_lock(&fib_hash_lock);
for (f = fz->fz_hash[0]; f; f = f->fn_next) {
struct fib_info *next_fi = FIB_INFO(f);
@@ -364,7 +374,7 @@ fn_hash_select_default(struct fib_table *tb, const struct rt_key *key, struct fi
} else if (!fib_detect_death(fi, order, &last_resort, &last_idx)) {
res->fi = fi;
fn_hash_last_dflt = order;
- return;
+ goto out;
}
fi = next_fi;
order++;
@@ -372,18 +382,20 @@ fn_hash_select_default(struct fib_table *tb, const struct rt_key *key, struct fi
if (order<=0 || fi==NULL) {
fn_hash_last_dflt = -1;
- return;
+ goto out;
}
if (!fib_detect_death(fi, order, &last_resort, &last_idx)) {
res->fi = fi;
fn_hash_last_dflt = order;
- return;
+ goto out;
}
if (last_idx >= 0)
res->fi = last_resort;
fn_hash_last_dflt = last_idx;
+out:
+ read_unlock(&fib_hash_lock);
}
#define FIB_SCAN(f, fp) \
@@ -457,6 +469,7 @@ rta->rta_prefsrc ? *(u32*)rta->rta_prefsrc : 0);
fp = fz_chain_p(key, fz);
+
/*
* Scan list to find the first route with the same destination
*/
@@ -560,14 +573,17 @@ replace:
*/
new_f->fn_next = f;
+ write_lock_bh(&fib_hash_lock);
*fp = new_f;
+ write_unlock_bh(&fib_hash_lock);
fz->fz_nent++;
if (del_fp) {
f = *del_fp;
/* Unlink replaced node */
+ write_lock_bh(&fib_hash_lock);
*del_fp = f->fn_next;
- synchronize_bh();
+ write_unlock_bh(&fib_hash_lock);
if (!(f->fn_state&FN_S_ZOMBIE))
rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req);
@@ -619,11 +635,13 @@ FTprint("tb(%d)_delete: %d %08x/%d %d\n", tb->tb_id, r->rtm_type, rta->rta_dst ?
fp = fz_chain_p(key, fz);
+
FIB_SCAN(f, fp) {
if (fn_key_eq(f->fn_key, key))
break;
- if (fn_key_leq(key, f->fn_key))
+ if (fn_key_leq(key, f->fn_key)) {
return -ESRCH;
+ }
}
#ifdef CONFIG_IP_ROUTE_TOS
FIB_SCAN_KEY(f, fp, key) {
@@ -637,9 +655,9 @@ FTprint("tb(%d)_delete: %d %08x/%d %d\n", tb->tb_id, r->rtm_type, rta->rta_dst ?
FIB_SCAN_TOS(f, fp, key, tos) {
struct fib_info * fi = FIB_INFO(f);
- if (f->fn_state&FN_S_ZOMBIE)
+ if (f->fn_state&FN_S_ZOMBIE) {
return -ESRCH;
-
+ }
matched++;
if (del_fp == NULL &&
@@ -655,8 +673,9 @@ FTprint("tb(%d)_delete: %d %08x/%d %d\n", tb->tb_id, r->rtm_type, rta->rta_dst ?
rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req);
if (matched != 1) {
+ write_lock_bh(&fib_hash_lock);
*del_fp = f->fn_next;
- synchronize_bh();
+ write_unlock_bh(&fib_hash_lock);
if (f->fn_state&FN_S_ACCESSED)
rt_cache_flush(-1);
@@ -687,8 +706,9 @@ fn_flush_list(struct fib_node ** fp, int z, struct fn_hash *table)
struct fib_info *fi = FIB_INFO(f);
if (fi && ((f->fn_state&FN_S_ZOMBIE) || (fi->fib_flags&RTNH_F_DEAD))) {
+ write_lock_bh(&fib_hash_lock);
*fp = f->fn_next;
- synchronize_bh();
+ write_unlock_bh(&fib_hash_lock);
fn_free_node(f);
found++;
@@ -727,6 +747,7 @@ static int fn_hash_get_info(struct fib_table *tb, char *buffer, int first, int c
int pos = 0;
int n = 0;
+ read_lock(&fib_hash_lock);
for (fz=table->fn_zone_list; fz; fz = fz->fz_next) {
int i;
struct fib_node *f;
@@ -752,10 +773,12 @@ static int fn_hash_get_info(struct fib_table *tb, char *buffer, int first, int c
FZ_MASK(fz), buffer);
buffer += 128;
if (++n >= count)
- return n;
+ goto out;
}
}
}
+out:
+ read_unlock(&fib_hash_lock);
return n;
}
#endif
@@ -818,15 +841,18 @@ static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlin
struct fn_hash *table = (struct fn_hash*)tb->tb_data;
s_m = cb->args[1];
+ read_lock(&fib_hash_lock);
for (fz = table->fn_zone_list, m=0; fz; fz = fz->fz_next, m++) {
if (m < s_m) continue;
if (m > s_m)
memset(&cb->args[2], 0, sizeof(cb->args) - 2*sizeof(cb->args[0]));
if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
cb->args[1] = m;
+ read_unlock(&fib_hash_lock);
return -1;
}
}
+ read_unlock(&fib_hash_lock);
cb->args[1] = m;
return skb->len;
}
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 868c44c31..97074198e 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -5,7 +5,7 @@
*
* IPv4 Forwarding Information Base: policy rules.
*
- * Version: $Id: fib_rules.c,v 1.9 1999/03/25 10:04:23 davem Exp $
+ * Version: $Id: fib_rules.c,v 1.11 1999/06/09 10:10:47 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
@@ -79,12 +79,14 @@ static struct fib_rule main_rule = { &default_rule, 0x7FFE, RT_TABLE_MAIN, RTN_U
static struct fib_rule local_rule = { &main_rule, 0, RT_TABLE_LOCAL, RTN_UNICAST, };
static struct fib_rule *fib_rules = &local_rule;
+static rwlock_t fib_rules_lock = RW_LOCK_UNLOCKED;
int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
{
struct rtattr **rta = arg;
struct rtmsg *rtm = NLMSG_DATA(nlh);
struct fib_rule *r, **rp;
+ int err = -ESRCH;
for (rp=&fib_rules; (r=*rp) != NULL; rp=&r->r_next) {
if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 4) == 0) &&
@@ -99,18 +101,20 @@ int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
(!rta[RTA_PRIORITY-1] || memcmp(RTA_DATA(rta[RTA_PRIORITY-1]), &r->r_preference, 4) == 0) &&
(!rta[RTA_IIF-1] || strcmp(RTA_DATA(rta[RTA_IIF-1]), r->r_ifname) == 0) &&
(!rtm->rtm_table || (r && rtm->rtm_table == r->r_table))) {
+ err = -EPERM;
if (r == &local_rule)
- return -EPERM;
+ break;
+ write_lock_bh(&fib_rules_lock);
*rp = r->r_next;
- synchronize_bh();
-
+ write_unlock_bh(&fib_rules_lock);
if (r != &default_rule && r != &main_rule)
kfree(r);
- return 0;
+ err = 0;
+ break;
}
}
- return -ESRCH;
+ return err;
}
/* Allocate new unique table id */
@@ -205,7 +209,9 @@ int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
}
new_r->r_next = r;
+ write_lock_bh(&fib_rules_lock);
*rp = new_r;
+ write_unlock_bh(&fib_rules_lock);
return 0;
}
@@ -250,8 +256,11 @@ static void fib_rules_detach(struct device *dev)
struct fib_rule *r;
for (r=fib_rules; r; r=r->r_next) {
- if (r->r_ifindex == dev->ifindex)
+ if (r->r_ifindex == dev->ifindex) {
+ write_lock_bh(&fib_rules_lock);
r->r_ifindex = -1;
+ write_unlock_bh(&fib_rules_lock);
+ }
}
}
@@ -260,8 +269,11 @@ static void fib_rules_attach(struct device *dev)
struct fib_rule *r;
for (r=fib_rules; r; r=r->r_next) {
- if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
+ if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0) {
+ write_lock_bh(&fib_rules_lock);
r->r_ifindex = dev->ifindex;
+ write_unlock_bh(&fib_rules_lock);
+ }
}
}
@@ -275,6 +287,7 @@ int fib_lookup(const struct rt_key *key, struct fib_result *res)
u32 saddr = key->src;
FRprintk("Lookup: %08x <- %08x ", key->dst, key->src);
+ read_lock(&fib_rules_lock);
for (r = fib_rules; r; r=r->r_next) {
if (((saddr^r->r_src) & r->r_srcmask) ||
((daddr^r->r_dst) & r->r_dstmask) ||
@@ -294,11 +307,14 @@ FRprintk("tb %d r %d ", r->r_table, r->r_action);
policy = r;
break;
case RTN_UNREACHABLE:
+ read_unlock(&fib_rules_lock);
return -ENETUNREACH;
default:
case RTN_BLACKHOLE:
+ read_unlock(&fib_rules_lock);
return -EINVAL;
case RTN_PROHIBIT:
+ read_unlock(&fib_rules_lock);
return -EACCES;
}
@@ -308,12 +324,16 @@ FRprintk("tb %d r %d ", r->r_table, r->r_action);
if (err == 0) {
FRprintk("ok\n");
res->r = policy;
+ read_unlock(&fib_rules_lock);
return 0;
}
- if (err < 0 && err != -EAGAIN)
+ if (err < 0 && err != -EAGAIN) {
+ read_unlock(&fib_rules_lock);
return err;
+ }
}
FRprintk("FAILURE\n");
+ read_unlock(&fib_rules_lock);
return -ENETUNREACH;
}
@@ -400,12 +420,14 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
int s_idx = cb->args[0];
struct fib_rule *r;
+ read_lock(&fib_rules_lock);
for (r=fib_rules, idx=0; r; r = r->r_next, idx++) {
if (idx < s_idx)
continue;
if (inet_fill_rule(skb, r, cb) < 0)
break;
}
+ read_unlock(&fib_rules_lock);
cb->args[0] = idx;
return skb->len;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 199550ffb..9456c7f29 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1,9 +1,9 @@
/*
* NET3: Implementation of the ICMP protocol layer.
*
- * Alan Cox, <alan@cymru.net>
+ * Alan Cox, <alan@redhat.com>
*
- * Version: $Id: icmp.c,v 1.52 1999/03/21 12:04:11 davem Exp $
+ * Version: $Id: icmp.c,v 1.57 1999/06/09 10:10:50 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -699,8 +699,8 @@ static void icmp_unreach(struct icmphdr *icmph, struct sk_buff *skb, int len)
case ICMP_FRAG_NEEDED:
if (ipv4_config.no_pmtu_disc) {
if (net_ratelimit())
- printk(KERN_INFO "ICMP: %s: fragmentation needed and DF set.\n",
- in_ntoa(iph->daddr));
+ printk(KERN_INFO "ICMP: %d.%d.%d.%d: fragmentation needed and DF set.\n",
+ NIPQUAD(iph->daddr));
} else {
unsigned short new_mtu;
new_mtu = ip_rt_frag_needed(iph, ntohs(icmph->un.frag.mtu));
@@ -711,7 +711,7 @@ static void icmp_unreach(struct icmphdr *icmph, struct sk_buff *skb, int len)
break;
case ICMP_SR_FAILED:
if (net_ratelimit())
- printk(KERN_INFO "ICMP: %s: Source Route Failed.\n", in_ntoa(iph->daddr));
+ printk(KERN_INFO "ICMP: %d.%d.%d.%d: Source Route Failed.\n", NIPQUAD(iph->daddr));
break;
default:
break;
@@ -741,8 +741,8 @@ static void icmp_unreach(struct icmphdr *icmph, struct sk_buff *skb, int len)
if (inet_addr_type(iph->daddr) == RTN_BROADCAST)
{
if (net_ratelimit())
- printk(KERN_WARNING "%s sent an invalid ICMP error to a broadcast.\n",
- in_ntoa(skb->nh.iph->saddr));
+ printk(KERN_WARNING "%d.%d.%d.%d sent an invalid ICMP error to a broadcast.\n",
+ NIPQUAD(skb->nh.iph->saddr));
return;
}
}
@@ -1142,6 +1142,8 @@ __initfunc(void icmp_init(struct net_proto_family *ops))
icmp_inode.i_sock = 1;
icmp_inode.i_uid = 0;
icmp_inode.i_gid = 0;
+ init_waitqueue_head(&icmp_inode.i_wait);
+ init_waitqueue_head(&icmp_inode.u.socket_i.wait);
icmp_socket->inode = &icmp_inode;
icmp_socket->state = SS_UNCONNECTED;
@@ -1150,6 +1152,11 @@ __initfunc(void icmp_init(struct net_proto_family *ops))
if ((err=ops->create(icmp_socket, IPPROTO_ICMP))<0)
panic("Failed to create the ICMP control socket.\n");
icmp_socket->sk->allocation=GFP_ATOMIC;
- icmp_socket->sk->num = 256; /* Don't receive any data */
icmp_socket->sk->ip_ttl = MAXTTL;
+
+ /* Unhash it so that IP input processing does not even
+ * see it, we do not wish this socket to see incoming
+ * packets.
+ */
+ icmp_socket->sk->prot->unhash(icmp_socket->sk);
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 68e52633e..61c530418 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -8,7 +8,7 @@
* the older version didn't come out right using gcc 2.5.8, the newer one
* seems to fall out with gcc 2.6.2.
*
- * Version: $Id: igmp.c,v 1.30 1999/03/25 10:04:10 davem Exp $
+ * Version: $Id: igmp.c,v 1.32 1999/06/09 10:10:53 davem Exp $
*
* Authors:
* Alan Cox <Alan.Cox@linux.org>
@@ -97,6 +97,15 @@
#include <linux/mroute.h>
#endif
+/* Big mc list lock for all the devices */
+static rwlock_t ip_mc_lock = RW_LOCK_UNLOCKED;
+/* Big mc list semaphore for all the sockets.
+ We do not refer to this list in IP data paths or from BH,
+ so that semaphore is OK.
+ */
+DECLARE_MUTEX(ip_sk_mc_sem);
+
+
#define IP_MAX_MEMBERSHIPS 20
#ifdef CONFIG_IP_MULTICAST
@@ -216,6 +225,8 @@ static void igmp_timer_expire(unsigned long data)
struct in_device *in_dev = im->interface;
int err;
+ read_lock(&ip_mc_lock);
+
im->tm_running=0;
if (IGMP_V1_SEEN(in_dev))
@@ -234,6 +245,7 @@ static void igmp_timer_expire(unsigned long data)
igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
}
im->reporter = 1;
+ read_unlock(&ip_mc_lock);
}
static void igmp_heard_report(struct in_device *in_dev, u32 group)
@@ -245,14 +257,16 @@ static void igmp_heard_report(struct in_device *in_dev, u32 group)
if (LOCAL_MCAST(group))
return;
+ read_lock(&ip_mc_lock);
for (im=in_dev->mc_list; im!=NULL; im=im->next) {
if (im->multiaddr == group) {
igmp_stop_timer(im);
im->reporter = 0;
im->unsolicit_count = 0;
- return;
+ break;
}
}
+ read_unlock(&ip_mc_lock);
}
static void igmp_heard_query(struct in_device *in_dev, unsigned char max_resp_time,
@@ -281,6 +295,7 @@ static void igmp_heard_query(struct in_device *in_dev, unsigned char max_resp_ti
* - Use the igmp->igmp_code field as the maximum
* delay possible
*/
+ read_lock(&ip_mc_lock);
for (im=in_dev->mc_list; im!=NULL; im=im->next) {
if (group && group != im->multiaddr)
continue;
@@ -291,6 +306,7 @@ static void igmp_heard_query(struct in_device *in_dev, unsigned char max_resp_ti
igmp_stop_timer(im);
igmp_start_timer(im, max_delay);
}
+ read_unlock(&ip_mc_lock);
}
int igmp_rcv(struct sk_buff *skb, unsigned short len)
@@ -380,9 +396,7 @@ static void igmp_group_dropped(struct ip_mc_list *im)
if (LOCAL_MCAST(im->multiaddr))
return;
- start_bh_atomic();
igmp_stop_timer(im);
- end_bh_atomic();
if (im->reporter && !IGMP_V1_SEEN(im->interface))
igmp_send_report(im->interface->dev, im->multiaddr, IGMP_HOST_LEAVE_MESSAGE);
@@ -400,9 +414,7 @@ static void igmp_group_added(struct ip_mc_list *im)
if (LOCAL_MCAST(im->multiaddr))
return;
- start_bh_atomic();
igmp_start_timer(im, IGMP_Initial_Report_Delay);
- end_bh_atomic();
#endif
}
@@ -422,16 +434,17 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
im = (struct ip_mc_list *)kmalloc(sizeof(*im), GFP_KERNEL);
+ write_lock_bh(&ip_mc_lock);
for (i=in_dev->mc_list; i; i=i->next) {
if (i->multiaddr == addr) {
i->users++;
if (im)
kfree(im);
- return;
+ goto out;
}
}
if (!im)
- return;
+ goto out;
im->users=1;
im->interface=in_dev;
im->multiaddr=addr;
@@ -447,9 +460,13 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
im->next=in_dev->mc_list;
in_dev->mc_list=im;
igmp_group_added(im);
+ write_unlock_bh(&ip_mc_lock);
if (in_dev->dev->flags & IFF_UP)
ip_rt_multicast_event(in_dev);
return;
+out:
+ write_unlock_bh(&ip_mc_lock);
+ return;
}
/*
@@ -458,22 +475,27 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
int ip_mc_dec_group(struct in_device *in_dev, u32 addr)
{
+ int err = -ESRCH;
struct ip_mc_list *i, **ip;
+ write_lock_bh(&ip_mc_lock);
for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
if (i->multiaddr==addr) {
if (--i->users == 0) {
*ip = i->next;
- synchronize_bh();
-
igmp_group_dropped(i);
+
+ write_unlock_bh(&ip_mc_lock);
if (in_dev->dev->flags & IFF_UP)
ip_rt_multicast_event(in_dev);
kfree_s(i, sizeof(*i));
+ return 0;
}
- return 0;
+ err = 0;
+ break;
}
}
+ write_unlock_bh(&ip_mc_lock);
return -ESRCH;
}
@@ -483,8 +505,10 @@ void ip_mc_down(struct in_device *in_dev)
{
struct ip_mc_list *i;
+ read_lock_bh(&ip_mc_lock);
for (i=in_dev->mc_list; i; i=i->next)
igmp_group_dropped(i);
+ read_unlock_bh(&ip_mc_lock);
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
}
@@ -497,8 +521,10 @@ void ip_mc_up(struct in_device *in_dev)
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
+ read_lock_bh(&ip_mc_lock);
for (i=in_dev->mc_list; i; i=i->next)
igmp_group_added(i);
+ read_unlock_bh(&ip_mc_lock);
}
/*
@@ -509,11 +535,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
{
struct ip_mc_list *i;
+ write_lock_bh(&ip_mc_lock);
while ((i = in_dev->mc_list) != NULL) {
in_dev->mc_list = i->next;
igmp_group_dropped(i);
kfree_s(i, sizeof(*i));
}
+ write_unlock_bh(&ip_mc_lock);
}
static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
@@ -570,6 +598,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
iml = (struct ip_mc_socklist *)sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
err = -EADDRINUSE;
+ down(&ip_sk_mc_sem);
for (i=sk->ip_mc_list; i; i=i->next) {
if (memcmp(&i->multi, imr, sizeof(*imr)) == 0) {
/* New style additions are reference counted */
@@ -577,13 +606,13 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
i->count++;
err = 0;
}
- goto done;
+ goto done_unlock;
}
count++;
}
err = -ENOBUFS;
if (iml == NULL || count >= sysctl_igmp_max_memberships)
- goto done;
+ goto done_unlock;
memcpy(&iml->multi, imr, sizeof(*imr));
iml->next = sk->ip_mc_list;
iml->count = 1;
@@ -591,6 +620,9 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
ip_mc_inc_group(in_dev, addr);
iml = NULL;
err = 0;
+
+done_unlock:
+ up(&ip_sk_mc_sem);
done:
rtnl_shunlock();
if (iml)
@@ -606,6 +638,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
{
struct ip_mc_socklist *iml, **imlp;
+ down(&ip_sk_mc_sem);
for (imlp=&sk->ip_mc_list; (iml=*imlp)!=NULL; imlp=&iml->next) {
if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr &&
iml->multi.imr_address.s_addr==imr->imr_address.s_addr &&
@@ -615,7 +648,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
return 0;
*imlp = iml->next;
- synchronize_bh();
+ up(&ip_sk_mc_sem);
in_dev = inetdev_by_index(iml->multi.imr_ifindex);
if (in_dev)
@@ -624,6 +657,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
return 0;
}
}
+ up(&ip_sk_mc_sem);
return -EADDRNOTAVAIL;
}
@@ -635,13 +669,37 @@ void ip_mc_drop_socket(struct sock *sk)
{
struct ip_mc_socklist *iml;
+ down(&ip_sk_mc_sem);
while ((iml=sk->ip_mc_list) != NULL) {
struct in_device *in_dev;
sk->ip_mc_list = iml->next;
+ up(&ip_sk_mc_sem);
+
if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL)
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
sock_kfree_s(sk, iml, sizeof(*iml));
+
+ down(&ip_sk_mc_sem);
}
+ up(&ip_sk_mc_sem);
+}
+
+int ip_check_mc(struct device *dev, u32 mc_addr)
+{
+ struct in_device *in_dev = dev->ip_ptr;
+ struct ip_mc_list *im;
+
+ if (in_dev) {
+ read_lock(&ip_mc_lock);
+ for (im=in_dev->mc_list; im; im=im->next) {
+ if (im->multiaddr == mc_addr) {
+ read_unlock(&ip_mc_lock);
+ return 1;
+ }
+ }
+ read_unlock(&ip_mc_lock);
+ }
+ return 0;
}
@@ -653,11 +711,11 @@ int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length, int dum
struct ip_mc_list *im;
int len=0;
struct device *dev;
-
+
len=sprintf(buffer,"Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
-
- for(dev = dev_base; dev; dev = dev->next)
- {
+
+ read_lock(&dev_base_lock);
+ for(dev = dev_base; dev; dev = dev->next) {
struct in_device *in_dev = dev->ip_ptr;
char *querier = "NONE";
@@ -669,6 +727,7 @@ int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length, int dum
len+=sprintf(buffer+len,"%d\t%-10s: %5d %7s\n",
dev->ifindex, dev->name, dev->mc_count, querier);
+ read_lock(&ip_mc_lock);
for (im = in_dev->mc_list; im; im = im->next) {
len+=sprintf(buffer+len,
"\t\t\t\t%08lX %5d %d:%08lX\t\t%d\n",
@@ -681,11 +740,16 @@ int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length, int dum
len=0;
begin=pos;
}
- if(pos>offset+length)
+ if(pos>offset+length) {
+ read_unlock(&ip_mc_lock);
goto done;
+ }
}
+ read_unlock(&ip_mc_lock);
}
done:
+ read_unlock(&dev_base_lock);
+
*start=buffer+(offset-begin);
len-=(offset-begin);
if(len>length)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index f066e6073..29747fee6 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -5,7 +5,7 @@
*
* The IP fragmentation functionality.
*
- * Version: $Id: ip_fragment.c,v 1.40 1999/03/20 23:58:34 davem Exp $
+ * Version: $Id: ip_fragment.c,v 1.41 1999/05/27 00:38:07 davem Exp $
*
* Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox <Alan.Cox@linux.org>
@@ -71,7 +71,8 @@ struct ipq {
#define IPQ_HASHSZ 64
-struct ipq *ipq_hash[IPQ_HASHSZ];
+static struct ipq *ipq_hash[IPQ_HASHSZ];
+static spinlock_t ipfrag_lock = SPIN_LOCK_UNLOCKED;
#define ipqhashfn(id, saddr, daddr, prot) \
((((id) >> 1) ^ (saddr) ^ (daddr) ^ (prot)) & (IPQ_HASHSZ - 1))
@@ -141,7 +142,9 @@ static inline struct ipq *ip_find(struct iphdr *iph, struct dst_entry *dst)
unsigned int hash = ipqhashfn(id, saddr, daddr, protocol);
struct ipq *qp;
- /* Always, we are in a BH context, so no locking. -DaveM */
+ /* We are always in BH context, and protected by the
+ * ipfrag lock.
+ */
for(qp = ipq_hash[hash]; qp; qp = qp->next) {
if(qp->iph->id == id &&
qp->iph->saddr == saddr &&
@@ -158,8 +161,9 @@ static inline struct ipq *ip_find(struct iphdr *iph, struct dst_entry *dst)
* because we completed, reassembled and processed it, or because
* it timed out.
*
- * This is called _only_ from BH contexts, on packet reception
- * processing and from frag queue expiration timers. -DaveM
+ * This is called _only_ from BH contexts with the ipfrag lock held,
+ * on packet reception processing and from frag queue expiration
+ * timers. -DaveM
*/
static void ip_free(struct ipq *qp)
{
@@ -197,6 +201,7 @@ static void ip_expire(unsigned long arg)
{
struct ipq *qp = (struct ipq *) arg;
+ spin_lock(&ipfrag_lock);
if(!qp->fragments)
{
#ifdef IP_EXPIRE_DEBUG
@@ -213,10 +218,13 @@ static void ip_expire(unsigned long arg)
out:
/* Nuke the fragment queue. */
ip_free(qp);
+ spin_lock(&ipfrag_lock);
}
/* Memory limiting on fragments. Evictor trashes the oldest
* fragment queue until we are back under the low threshold.
+ *
+ * We are always called in BH with the ipfrag lock held.
*/
static void ip_evictor(void)
{
@@ -229,9 +237,6 @@ restart:
struct ipq *qp;
if (atomic_read(&ip_frag_mem) <= sysctl_ipfrag_low_thresh)
return;
- /* We are in a BH context, so these queue
- * accesses are safe. -DaveM
- */
qp = ipq_hash[i];
if (qp) {
/* find the oldest queue for this hash bucket */
@@ -283,7 +288,7 @@ static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph)
/* Add this entry to the queue. */
hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
- /* We are in a BH context, no locking necessary. -DaveM */
+ /* In a BH context and ipfrag lock is held. -DaveM */
if((qp->next = ipq_hash[hash]) != NULL)
qp->next->pprev = &qp->next;
ipq_hash[hash] = qp;
@@ -421,6 +426,8 @@ struct sk_buff *ip_defrag(struct sk_buff *skb)
ip_statistics.IpReasmReqds++;
+ spin_lock(&ipfrag_lock);
+
/* Start by cleaning up the memory. */
if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
ip_evictor();
@@ -565,6 +572,7 @@ struct sk_buff *ip_defrag(struct sk_buff *skb)
out_freequeue:
ip_free(qp);
out_skb:
+ spin_unlock(&ipfrag_lock);
return skb;
}
@@ -574,6 +582,7 @@ out_skb:
out_timer:
mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time); /* ~ 30 seconds */
out:
+ spin_unlock(&ipfrag_lock);
return NULL;
/*
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 7a3e2618b..107ccaa16 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -5,7 +5,7 @@
*
* The Internet Protocol (IP) module.
*
- * Version: $Id: ip_input.c,v 1.37 1999/04/22 10:38:36 davem Exp $
+ * Version: $Id: ip_input.c,v 1.40 1999/06/09 10:10:55 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -154,44 +154,11 @@
struct ip_mib ip_statistics={2,IPDEFTTL,}; /* Forwarding=No, Default TTL=64 */
-
-/*
- * Handle the issuing of an ioctl() request
- * for the ip device. This is scheduled to
- * disappear
- */
-
-int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
-{
- switch(cmd)
- {
- default:
- return(-EINVAL);
- }
-}
-
-
#if defined(CONFIG_IP_TRANSPARENT_PROXY) && !defined(CONFIG_IP_ALWAYS_DEFRAG)
#define CONFIG_IP_ALWAYS_DEFRAG 1
#endif
/*
- * 0 - deliver
- * 1 - block
- */
-static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
-{
- int type;
-
- type = skb->h.icmph->type;
- if (type < 32)
- return test_bit(type, &sk->tp_pinfo.tp_raw4.filter);
-
- /* Do not block unknown ICMP types */
- return 0;
-}
-
-/*
* Process Router Attention IP option
*/
int ip_call_ra_chain(struct sk_buff *skb)
@@ -224,16 +191,37 @@ int ip_call_ra_chain(struct sk_buff *skb)
return 0;
}
+/* Handle this out of line, it is rare. */
+static int ip_run_ipprot(struct sk_buff *skb, struct iphdr *iph,
+ struct inet_protocol *ipprot, int force_copy)
+{
+ int ret = 0;
+
+ do {
+ if (ipprot->protocol == iph->protocol) {
+ struct sk_buff *skb2 = skb;
+ if (ipprot->copy || force_copy)
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if(skb2 != NULL) {
+ ret = 1;
+ ipprot->handler(skb2,
+ ntohs(iph->tot_len) - (iph->ihl * 4));
+ }
+ }
+ ipprot = (struct inet_protocol *) ipprot->next;
+ } while(ipprot != NULL);
+
+ return ret;
+}
+
+extern struct sock *raw_v4_input(struct sk_buff *, struct iphdr *, int);
+
/*
* Deliver IP Packets to the higher protocol layers.
*/
int ip_local_deliver(struct sk_buff *skb)
{
struct iphdr *iph = skb->nh.iph;
- struct inet_protocol *ipprot;
- struct sock *raw_sk=NULL;
- unsigned char hash;
- int flag = 0;
#ifndef CONFIG_IP_ALWAYS_DEFRAG
/*
@@ -249,34 +237,29 @@ int ip_local_deliver(struct sk_buff *skb)
#endif
#ifdef CONFIG_IP_MASQUERADE
- /*
- * Do we need to de-masquerade this packet?
- */
- {
- int ret;
- /*
- * Some masq modules can re-inject packets if
- * bad configured.
+ /* Do we need to de-masquerade this packet? */
+ if((IPCB(skb)->flags&IPSKB_MASQUERADED)) {
+ /* Some masq modules can re-inject packets if
+ * bad configured.
*/
+ printk(KERN_DEBUG "ip_input(): demasq recursion detected. "
+ "Check masq modules configuration\n");
+ kfree_skb(skb);
+ return 0;
+ } else {
+ int ret = ip_fw_demasquerade(&skb);
- if((IPCB(skb)->flags&IPSKB_MASQUERADED)) {
- printk(KERN_DEBUG "ip_input(): demasq recursion detected. Check masq modules configuration\n");
- kfree_skb(skb);
- return 0;
- }
-
- ret = ip_fw_demasquerade(&skb);
if (ret < 0) {
kfree_skb(skb);
return 0;
}
-
if (ret) {
- iph=skb->nh.iph;
+ iph = skb->nh.iph;
IPCB(skb)->flags |= IPSKB_MASQUERADED;
dst_release(skb->dst);
skb->dst = NULL;
- if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, skb->dev)) {
+ if (ip_route_input(skb, iph->daddr, iph->saddr,
+ iph->tos, skb->dev)) {
kfree_skb(skb);
return 0;
}
@@ -285,112 +268,50 @@ int ip_local_deliver(struct sk_buff *skb)
}
#endif
- /*
- * Point into the IP datagram, just past the header.
- */
-
+ /* Point into the IP datagram, just past the header. */
skb->h.raw = skb->nh.raw + iph->ihl*4;
- /*
- * Deliver to raw sockets. This is fun as to avoid copies we want to make no
- * surplus copies.
- *
- * RFC 1122: SHOULD pass TOS value up to the transport layer.
- * -> It does. And not only TOS, but all IP header.
- */
-
- /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
- hash = iph->protocol & (MAX_INET_PROTOS - 1);
-
- /*
- * If there maybe a raw socket we must check - if not we don't care less
- */
-
- if((raw_sk = raw_v4_htable[hash]) != NULL) {
- struct sock *sknext = NULL;
- struct sk_buff *skb1;
- raw_sk = raw_v4_lookup(raw_sk, iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex);
- if(raw_sk) { /* Any raw sockets */
- do {
- /* Find the next */
- sknext = raw_v4_lookup(raw_sk->next, iph->protocol,
- iph->saddr, iph->daddr, skb->dev->ifindex);
- if (iph->protocol != IPPROTO_ICMP || !icmp_filter(raw_sk, skb)) {
- if (sknext == NULL)
- break;
- skb1 = skb_clone(skb, GFP_ATOMIC);
- if(skb1)
- {
- raw_rcv(raw_sk, skb1);
- }
- }
- raw_sk = sknext;
- } while(raw_sk!=NULL);
-
- /* Here either raw_sk is the last raw socket, or NULL if
- * none. We deliver to the last raw socket AFTER the
- * protocol checks as it avoids a surplus copy.
- */
- }
- }
-
- /*
- * skb->h.raw now points at the protocol beyond the IP header.
- */
-
- for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
{
- struct sk_buff *skb2;
-
- if (ipprot->protocol != iph->protocol)
- continue;
- /*
- * See if we need to make a copy of it. This will
- * only be set if more than one protocol wants it.
- * and then not for the last one. If there is a pending
- * raw delivery wait for that
+ /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
+ int hash = iph->protocol & (MAX_INET_PROTOS - 1);
+ struct sock *raw_sk = raw_v4_htable[hash];
+ struct inet_protocol *ipprot;
+ int flag;
+
+ /* If there maybe a raw socket we must check - if not we
+ * don't care less
*/
-
- if (ipprot->copy || raw_sk)
- {
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if(skb2==NULL)
- continue;
- }
- else
- {
- skb2 = skb;
- }
- flag = 1;
+ if(raw_sk != NULL)
+ raw_sk = raw_v4_input(skb, iph, hash);
+
+ ipprot = (struct inet_protocol *) inet_protos[hash];
+ flag = 0;
+ if(ipprot != NULL) {
+ if(raw_sk == NULL &&
+ ipprot->next == NULL &&
+ ipprot->protocol == iph->protocol) {
+ /* Fast path... */
+ return ipprot->handler(skb, (ntohs(iph->tot_len) -
+ (iph->ihl * 4)));
+ } else {
+ flag = ip_run_ipprot(skb, iph, ipprot, (raw_sk != NULL));
+ }
+ }
- /*
- * Pass on the datagram to each protocol that wants it,
- * based on the datagram protocol. We should really
- * check the protocol handler's return values here...
+ /* All protocols checked.
+ * If this packet was a broadcast, we may *not* reply to it, since that
+ * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
+ * ICMP reply messages get queued up for transmission...)
*/
-
- ipprot->handler(skb2, ntohs(iph->tot_len) - (iph->ihl * 4));
- }
-
- /*
- * All protocols checked.
- * If this packet was a broadcast, we may *not* reply to it, since that
- * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
- * ICMP reply messages get queued up for transmission...)
- */
-
- if(raw_sk!=NULL) /* Shift to last raw user */
- {
- raw_rcv(raw_sk, skb);
-
- }
- else if (!flag) /* Free and report errors */
- {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);
- kfree_skb(skb);
+ if(raw_sk != NULL) { /* Shift to last raw user */
+ raw_rcv(raw_sk, skb);
+ } else if (!flag) { /* Free and report errors */
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);
+ kfree_skb(skb);
+ }
}
- return(0);
+ return 0;
}
/*
@@ -404,9 +325,8 @@ int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
u16 rport;
#endif /* CONFIG_FIREWALL */
- /*
- * When the interface is in promisc. mode, drop all the crap
- * that it receives, do not try to analyse it.
+ /* When the interface is in promisc. mode, drop all the crap
+ * that it receives, do not try to analyse it.
*/
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
@@ -430,17 +350,15 @@ int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
goto inhdr_error;
{
- __u32 len = ntohs(iph->tot_len);
- if (skb->len < len)
- goto inhdr_error;
+ __u32 len = ntohs(iph->tot_len);
+ if (skb->len < len)
+ goto inhdr_error;
- /*
- * Our transport medium may have padded the buffer out. Now we know it
- * is IP we can trim to the true length of the frame.
- * Note this now means skb->len holds ntohs(iph->tot_len).
- */
-
- __skb_trim(skb, len);
+ /* Our transport medium may have padded the buffer out. Now we know it
+ * is IP we can trim to the true length of the frame.
+ * Note this now means skb->len holds ntohs(iph->tot_len).
+ */
+ __skb_trim(skb, len);
}
#ifdef CONFIG_IP_ALWAYS_DEFRAG
@@ -474,21 +392,17 @@ int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
if (skb->dst == NULL) {
if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
goto drop;
-#ifdef CONFIG_CPU_IS_SLOW
- if (net_cpu_congestion > 10 && !(iph->tos&IPTOS_RELIABILITY) &&
- IPTOS_PREC(iph->tos) < IPTOS_PREC_INTERNETCONTROL) {
- goto drop;
- }
-#endif
}
#ifdef CONFIG_NET_CLS_ROUTE
if (skb->dst->tclassid) {
u32 idx = skb->dst->tclassid;
+ write_lock(&ip_rt_acct_lock);
ip_rt_acct[idx&0xFF].o_packets++;
ip_rt_acct[idx&0xFF].o_bytes+=skb->len;
ip_rt_acct[(idx>>16)&0xFF].i_packets++;
ip_rt_acct[(idx>>16)&0xFF].i_bytes+=skb->len;
+ write_unlock(&ip_rt_acct_lock);
}
#endif
diff --git a/net/ipv4/ip_masq_mfw.c b/net/ipv4/ip_masq_mfw.c
index dc38b1712..ff07231fc 100644
--- a/net/ipv4/ip_masq_mfw.c
+++ b/net/ipv4/ip_masq_mfw.c
@@ -3,7 +3,7 @@
*
* Does (reverse-masq) forwarding based on skb->fwmark value
*
- * $Id: ip_masq_mfw.c,v 1.3 1999/01/26 05:33:47 davem Exp $
+ * $Id: ip_masq_mfw.c,v 1.4 1999/05/13 23:25:07 davem Exp $
*
* Author: Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar>
* based on Steven Clarke's portfw
@@ -79,7 +79,7 @@ struct ip_masq_mfw {
};
-static struct semaphore mfw_sema = MUTEX;
+static DECLARE_MUTEX(mfw_sema);
#ifdef __SMP__
static rwlock_t mfw_lock = RW_LOCK_UNLOCKED;
#endif
diff --git a/net/ipv4/ip_masq_quake.c b/net/ipv4/ip_masq_quake.c
index 165dd6bd5..17b11a799 100644
--- a/net/ipv4/ip_masq_quake.c
+++ b/net/ipv4/ip_masq_quake.c
@@ -12,6 +12,7 @@
* http://www.gamers.org/dEngine/quake/spec/
* Harald Hoyer : Check for QUAKE-STRING
* Juan Jose Ciarlante : litl bits for 2.1
+ * Horst von Brand : Add #include <linux/string.h>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -24,6 +25,7 @@
#include <linux/module.h>
#include <asm/system.h>
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/in.h>
@@ -44,7 +46,7 @@ typedef struct
struct quake_priv_data {
/* Have we seen a client connect message */
- char cl_connect;
+ signed char cl_connect;
};
static int
diff --git a/net/ipv4/ip_masq_vdolive.c b/net/ipv4/ip_masq_vdolive.c
index 4724e3b93..2d8d672cc 100644
--- a/net/ipv4/ip_masq_vdolive.c
+++ b/net/ipv4/ip_masq_vdolive.c
@@ -2,7 +2,7 @@
* IP_MASQ_VDOLIVE - VDO Live masquerading module
*
*
- * Version: @(#)$Id: ip_masq_vdolive.c,v 1.4 1998/10/06 04:49:07 davem Exp $
+ * Version: @(#)$Id: ip_masq_vdolive.c,v 1.6 1999/06/09 08:29:03 davem Exp $
*
* Author: Nigel Metheringham <Nigel.Metheringham@ThePLAnet.net>
* PLAnet Online Ltd
@@ -10,6 +10,9 @@
* Fixes: Minor changes for 2.1 by
* Steven Clarke <Steven.Clarke@ThePlanet.Net>, Planet Online Ltd
*
+ * Add missing #include <linux/string.h>
+ * Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -25,6 +28,7 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/system.h>
#include <linux/skbuff.h>
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index fae22cbe7..359926a4c 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -5,7 +5,7 @@
*
* The options processing module for ip.c
*
- * Version: $Id: ip_options.c,v 1.16 1999/03/21 05:22:40 davem Exp $
+ * Version: $Id: ip_options.c,v 1.18 1999/06/09 08:29:06 davem Exp $
*
* Authors: A.N.Kuznetsov
*
@@ -452,7 +452,6 @@ eol:
error:
if (skb) {
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
- kfree_skb(skb);
}
return -EINVAL;
}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index abe93ec27..51e27ad67 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1,5 +1,5 @@
/*
- * $Id: ipconfig.c,v 1.20 1999/03/28 10:18:28 davem Exp $
+ * $Id: ipconfig.c,v 1.22 1999/06/09 10:10:57 davem Exp $
*
* Automatic Configuration of IP -- use BOOTP or RARP or user-supplied
* information to configure own IP address and routes.
@@ -112,7 +112,8 @@ static int __init ic_open_devs(void)
unsigned short oflags;
last = &ic_first_dev;
- for (dev = dev_base; dev; dev = dev->next)
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev; dev = dev->next) {
if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
(!(dev->flags & IFF_LOOPBACK) &&
(dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
@@ -142,6 +143,9 @@ static int __init ic_open_devs(void)
ic_proto_have_if |= able;
DBG(("IP-Config: Opened %s (able=%d)\n", dev->name, able));
}
+ }
+ read_unlock(&dev_base_lock);
+
*last = NULL;
if (!ic_first_dev) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index d7db0c007..1034e0e7a 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1,7 +1,7 @@
/*
* IP multicast routing support for mrouted 3.6/3.8
*
- * (c) 1995 Alan Cox, <alan@cymru.net>
+ * (c) 1995 Alan Cox, <alan@redhat.com>
* Linux Consultancy and Custom Driver Development
*
* This program is free software; you can redistribute it and/or
@@ -9,7 +9,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: ipmr.c,v 1.40 1999/03/25 10:04:25 davem Exp $
+ * Version: $Id: ipmr.c,v 1.43 1999/06/09 10:10:59 davem Exp $
*
* Fixes:
* Michael Chastain : Incorrect size of copying.
@@ -23,6 +23,8 @@
* Brad Parker : Better behaviour on mrouted upcall
* overflow.
* Carlos Picoto : PIMv1 Support
+ * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
+ * Relax this requrement to work with older peers.
*
*/
@@ -431,7 +433,7 @@ static void ipmr_cache_resolve(struct mfc_cache *cache)
skb_trim(skb, nlh->nlmsg_len);
((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -EMSGSIZE;
}
- err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
} else
#endif
ip_mr_forward(skb, cache, 0);
@@ -1343,7 +1345,8 @@ int pim_rcv(struct sk_buff * skb, unsigned short len)
pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
(pim->flags&PIM_NULL_REGISTER) ||
reg_dev == NULL ||
- ip_compute_csum((void *)pim, len)) {
+ (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
+ ip_compute_csum((void *)pim, len))) {
kfree_skb(skb);
return -EINVAL;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 1640a0560..52c5ee5a4 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -7,7 +7,7 @@
* PROC file system. It is mainly used for debugging and
* statistics.
*
- * Version: $Id: proc.c,v 1.34 1999/02/08 11:20:34 davem Exp $
+ * Version: $Id: proc.c,v 1.35 1999/05/27 00:37:38 davem Exp $
*
* Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de>
@@ -114,10 +114,8 @@ static inline void get__sock(struct sock *sp, char *tmpbuf, int i, int format)
slot_dist = tcp_tw_death_row_slot - slot_dist;
timer_expires = jiffies + (slot_dist * TCP_TWKILL_PERIOD);
} else {
- timer_active1 = del_timer(&tp->retransmit_timer);
- timer_active2 = del_timer(&sp->timer);
- if (!timer_active1) tp->retransmit_timer.expires=0;
- if (!timer_active2) sp->timer.expires=0;
+ timer_active1 = tp->retransmit_timer.prev != NULL;
+ timer_active2 = sp->timer.prev != NULL;
timer_active = 0;
timer_expires = (unsigned) -1;
}
@@ -147,9 +145,6 @@ static inline void get__sock(struct sock *sp, char *tmpbuf, int i, int format)
(!tw_bucket && sp->socket) ? sp->socket->inode->i_uid : 0,
(!tw_bucket && timer_active) ? sp->timeout : 0,
(!tw_bucket && sp->socket) ? sp->socket->inode->i_ino : 0);
-
- if (timer_active1) add_timer(&tp->retransmit_timer);
- if (timer_active2) add_timer(&sp->timer);
}
/*
@@ -176,7 +171,7 @@ get__netinfo(struct proto *pro, char *buffer, int format, char **start, off_t of
" sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout inode");
pos = 128;
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_READ();
sp = pro->sklist_next;
while(sp != (struct sock *)pro) {
if (format == 0 && sp->state == TCP_LISTEN) {
@@ -211,7 +206,7 @@ get__netinfo(struct proto *pro, char *buffer, int format, char **start, off_t of
i++;
}
out:
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_READ();
begin = len - (pos - offset);
*start = buffer + begin;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index fc6b1f2ee..dd2e7555e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -5,7 +5,7 @@
*
* RAW - implementation of IP "raw" sockets.
*
- * Version: $Id: raw.c,v 1.39 1998/11/08 11:17:04 davem Exp $
+ * Version: $Id: raw.c,v 1.41 1999/05/30 01:16:19 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -75,11 +75,11 @@ static void raw_v4_hash(struct sock *sk)
num &= (RAWV4_HTABLE_SIZE - 1);
skp = &raw_v4_htable[num];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
sk->next = *skp;
*skp = sk;
sk->hashent = num;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void raw_v4_unhash(struct sock *sk)
@@ -90,7 +90,7 @@ static void raw_v4_unhash(struct sock *sk)
num &= (RAWV4_HTABLE_SIZE - 1);
skp = &raw_v4_htable[num];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
while(*skp != NULL) {
if(*skp == sk) {
*skp = sk->next;
@@ -98,7 +98,7 @@ static void raw_v4_unhash(struct sock *sk)
}
skp = &((*skp)->next);
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void raw_v4_rehash(struct sock *sk)
@@ -110,7 +110,7 @@ static void raw_v4_rehash(struct sock *sk)
num &= (RAWV4_HTABLE_SIZE - 1);
skp = &raw_v4_htable[oldnum];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
while(*skp != NULL) {
if(*skp == sk) {
*skp = sk->next;
@@ -121,16 +121,15 @@ static void raw_v4_rehash(struct sock *sk)
sk->next = raw_v4_htable[num];
raw_v4_htable[num] = sk;
sk->hashent = num;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
-/* Grumble... icmp and ip_input want to get at this... */
-struct sock *raw_v4_lookup(struct sock *sk, unsigned short num,
- unsigned long raddr, unsigned long laddr, int dif)
+static __inline__ struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
+ unsigned long raddr, unsigned long laddr,
+ int dif)
{
struct sock *s = sk;
- SOCKHASH_LOCK();
for(s = sk; s; s = s->next) {
if((s->num == num) &&
!(s->dead && (s->state == TCP_CLOSE)) &&
@@ -139,10 +138,79 @@ struct sock *raw_v4_lookup(struct sock *sk, unsigned short num,
!(s->bound_dev_if && s->bound_dev_if != dif))
break; /* gotcha */
}
- SOCKHASH_UNLOCK();
return s;
}
+struct sock *raw_v4_lookup(struct sock *sk, unsigned short num,
+ unsigned long raddr, unsigned long laddr,
+ int dif)
+{
+ SOCKHASH_LOCK_READ();
+ sk = __raw_v4_lookup(sk, num, raddr, laddr, dif);
+ SOCKHASH_UNLOCK_READ();
+
+ return sk;
+}
+
+/*
+ * 0 - deliver
+ * 1 - block
+ */
+static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
+{
+ int type;
+
+ type = skb->h.icmph->type;
+ if (type < 32)
+ return test_bit(type, &sk->tp_pinfo.tp_raw4.filter);
+
+ /* Do not block unknown ICMP types */
+ return 0;
+}
+
+/* IP input processing comes here for RAW socket delivery.
+ * This is fun as to avoid copies we want to make no surplus
+ * copies.
+ *
+ * RFC 1122: SHOULD pass TOS value up to the transport layer.
+ * -> It does. And not only TOS, but all IP header.
+ */
+struct sock *raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
+{
+ struct sock *sk;
+
+ SOCKHASH_LOCK_READ_BH();
+ if ((sk = raw_v4_htable[hash]) == NULL)
+ goto out;
+ sk = __raw_v4_lookup(sk, iph->protocol,
+ iph->saddr, iph->daddr,
+ skb->dev->ifindex);
+ while(sk != NULL) {
+ struct sock *sknext = __raw_v4_lookup(sk->next, iph->protocol,
+ iph->saddr, iph->daddr,
+ skb->dev->ifindex);
+
+ if (iph->protocol != IPPROTO_ICMP ||
+ ! icmp_filter(sk, skb)) {
+ struct sk_buff *clone;
+
+ if(sknext == NULL)
+ break;
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if(clone) {
+ SOCKHASH_UNLOCK_READ_BH();
+ raw_rcv(sk, clone);
+ SOCKHASH_LOCK_READ_BH();
+ }
+ }
+ sk = sknext;
+ }
+out:
+ SOCKHASH_UNLOCK_READ_BH();
+
+ return sk;
+}
+
void raw_err (struct sock *sk, struct sk_buff *skb)
{
int type = skb->h.icmph->type;
@@ -402,6 +470,8 @@ done:
static void raw_close(struct sock *sk, long timeout)
{
+ bh_lock_sock(sk);
+
/* Observation: when raw_close is called, processes have
no access to socket anymore. But net still has.
Step one, detach it from networking:
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index dbde97b70..3d9e87de3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -5,7 +5,7 @@
*
* ROUTE - implementation of the IP router.
*
- * Version: $Id: route.c,v 1.67 1999/05/08 20:00:20 davem Exp $
+ * Version: $Id: route.c,v 1.69 1999/06/09 10:11:02 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -174,7 +174,18 @@ __u8 ip_tos2prio[16] = {
* Route cache.
*/
-struct rtable *rt_hash_table[RT_HASH_DIVISOR];
+/* The locking scheme is rather straight forward:
+ *
+ * 1) A BH protected rwlock protects the central route hash.
+ * 2) Only writers remove entries, and they hold the lock
+ * as they look at rtable reference counts.
+ * 3) Only readers acquire references to rtable entries,
+ * they do so with atomic increments and with the
+ * lock held.
+ */
+
+static struct rtable *rt_hash_table[RT_HASH_DIVISOR];
+static rwlock_t rt_hash_lock = RW_LOCK_UNLOCKED;
static int rt_intern_hash(unsigned hash, struct rtable * rth, struct rtable ** res);
@@ -204,7 +215,7 @@ static int rt_cache_get_info(char *buffer, char **start, off_t offset, int lengt
}
- start_bh_atomic();
+ read_lock_bh(&rt_hash_lock);
for (i = 0; i<RT_HASH_DIVISOR; i++) {
for (r = rt_hash_table[i]; r; r = r->u.rt_next) {
@@ -239,7 +250,7 @@ static int rt_cache_get_info(char *buffer, char **start, off_t offset, int lengt
}
done:
- end_bh_atomic();
+ read_unlock_bh(&rt_hash_lock);
*start = buffer+len-(pos-offset);
len = pos-offset;
@@ -292,6 +303,7 @@ static __inline__ int rt_may_expire(struct rtable *rth, int tmo1, int tmo2)
return 1;
}
+/* This runs via a timer and thus is always in BH context. */
static void rt_check_expire(unsigned long dummy)
{
int i;
@@ -305,6 +317,7 @@ static void rt_check_expire(unsigned long dummy)
rover = (rover + 1) & (RT_HASH_DIVISOR-1);
rthp = &rt_hash_table[rover];
+ write_lock(&rt_hash_lock);
while ((rth = *rthp) != NULL) {
if (rth->u.dst.expires) {
/* Entrie is expired even if it is in use */
@@ -325,6 +338,7 @@ static void rt_check_expire(unsigned long dummy)
*rthp = rth->u.rt_next;
rt_free(rth);
}
+ write_unlock(&rt_hash_lock);
/* Fallback loop breaker. */
if ((jiffies - now) > 0)
@@ -334,6 +348,9 @@ static void rt_check_expire(unsigned long dummy)
add_timer(&rt_periodic_timer);
}
+/* This can run from both BH and non-BH contexts, the latter
+ * in the case of a forced flush event.
+ */
static void rt_run_flush(unsigned long dummy)
{
int i;
@@ -341,23 +358,23 @@ static void rt_run_flush(unsigned long dummy)
rt_deadline = 0;
- start_bh_atomic();
for (i=0; i<RT_HASH_DIVISOR; i++) {
- if ((rth = xchg(&rt_hash_table[i], NULL)) == NULL)
- continue;
- end_bh_atomic();
+ write_lock_bh(&rt_hash_lock);
+ rth = rt_hash_table[i];
+ if(rth != NULL)
+ rt_hash_table[i] = NULL;
+ write_unlock_bh(&rt_hash_lock);
for (; rth; rth=next) {
next = rth->u.rt_next;
rth->u.rt_next = NULL;
rt_free(rth);
}
-
- start_bh_atomic();
}
- end_bh_atomic();
}
+static spinlock_t rt_flush_lock = SPIN_LOCK_UNLOCKED;
+
void rt_cache_flush(int delay)
{
unsigned long now = jiffies;
@@ -366,7 +383,7 @@ void rt_cache_flush(int delay)
if (delay < 0)
delay = ip_rt_min_delay;
- start_bh_atomic();
+ spin_lock_bh(&rt_flush_lock);
if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
long tmo = (long)(rt_deadline - now);
@@ -386,7 +403,7 @@ void rt_cache_flush(int delay)
}
if (delay <= 0) {
- end_bh_atomic();
+ spin_unlock_bh(&rt_flush_lock);
rt_run_flush(0);
return;
}
@@ -396,7 +413,7 @@ void rt_cache_flush(int delay)
rt_flush_timer.expires = now + delay;
add_timer(&rt_flush_timer);
- end_bh_atomic();
+ spin_unlock_bh(&rt_flush_lock);
}
/*
@@ -459,7 +476,10 @@ static int rt_garbage_collect(void)
do {
int i, k;
- start_bh_atomic();
+ /* The write lock is held during the entire hash
+ * traversal to ensure consistent state of the rover.
+ */
+ write_lock_bh(&rt_hash_lock);
for (i=0, k=rover; i<RT_HASH_DIVISOR; i++) {
unsigned tmo = expire;
@@ -480,7 +500,7 @@ static int rt_garbage_collect(void)
break;
}
rover = k;
- end_bh_atomic();
+ write_unlock_bh(&rt_hash_lock);
if (goal <= 0)
goto work_done;
@@ -530,10 +550,9 @@ static int rt_intern_hash(unsigned hash, struct rtable * rt, struct rtable ** rp
int attempts = !in_interrupt();
restart:
- start_bh_atomic();
-
rthp = &rt_hash_table[hash];
+ write_lock_bh(&rt_hash_lock);
while ((rth = *rthp) != NULL) {
if (memcmp(&rth->key, &rt->key, sizeof(rt->key)) == 0) {
/* Put it first */
@@ -544,7 +563,7 @@ restart:
atomic_inc(&rth->u.dst.refcnt);
atomic_inc(&rth->u.dst.use);
rth->u.dst.lastuse = now;
- end_bh_atomic();
+ write_unlock_bh(&rt_hash_lock);
rt_drop(rt);
*rp = rth;
@@ -559,7 +578,7 @@ restart:
*/
if (rt->rt_type == RTN_UNICAST || rt->key.iif == 0) {
if (!arp_bind_neighbour(&rt->u.dst)) {
- end_bh_atomic();
+ write_unlock_bh(&rt_hash_lock);
/* Neighbour tables are full and nothing
can be released. Try to shrink route cache,
@@ -594,7 +613,7 @@ restart:
}
#endif
rt_hash_table[hash] = rt;
- end_bh_atomic();
+ write_unlock_bh(&rt_hash_lock);
*rp = rt;
return 0;
}
@@ -633,6 +652,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
rthp=&rt_hash_table[hash];
+ write_lock_bh(&rt_hash_lock);
while ( (rth = *rthp) != NULL) {
struct rtable *rt;
@@ -657,6 +677,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
rt = dst_alloc(sizeof(struct rtable), &ipv4_dst_ops);
if (rt == NULL) {
ip_rt_put(rth);
+ write_unlock_bh(&rt_hash_lock);
return;
}
@@ -688,11 +709,15 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
}
*rthp = rth->u.rt_next;
+ write_unlock_bh(&rt_hash_lock);
if (!rt_intern_hash(hash, rt, &rt))
ip_rt_put(rt);
rt_drop(rth);
- break;
+ goto do_next;
}
+ write_unlock_bh(&rt_hash_lock);
+ do_next:
+ ;
}
}
return;
@@ -722,8 +747,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
#if RT_CACHE_DEBUG >= 1
printk(KERN_DEBUG "ip_rt_advice: redirect to %d.%d.%d.%d/%02x dropped\n", NIPQUAD(rt->rt_dst), rt->key.tos);
#endif
- start_bh_atomic();
ip_rt_put(rt);
+ write_lock_bh(&rt_hash_lock);
for (rthp = &rt_hash_table[hash]; *rthp; rthp = &(*rthp)->u.rt_next) {
if (*rthp == rt) {
*rthp = rt->u.rt_next;
@@ -731,7 +756,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
break;
}
}
- end_bh_atomic();
+ write_unlock_bh(&rt_hash_lock);
return NULL;
}
}
@@ -861,6 +886,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
for (i=0; i<2; i++) {
unsigned hash = rt_hash_code(daddr, skeys[i], tos);
+ read_lock_bh(&rt_hash_lock);
for (rth = rt_hash_table[hash]; rth; rth = rth->u.rt_next) {
if (rth->key.dst == daddr &&
rth->key.src == skeys[i] &&
@@ -890,6 +916,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
}
}
}
+ read_unlock_bh(&rt_hash_lock);
}
return est_mtu ? : new_mtu;
}
@@ -1362,6 +1389,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
tos &= IPTOS_TOS_MASK;
hash = rt_hash_code(daddr, saddr^(iif<<5), tos);
+ read_lock_bh(&rt_hash_lock);
for (rth=rt_hash_table[hash]; rth; rth=rth->u.rt_next) {
if (rth->key.dst == daddr &&
rth->key.src == saddr &&
@@ -1374,10 +1402,12 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
rth->u.dst.lastuse = jiffies;
atomic_inc(&rth->u.dst.use);
atomic_inc(&rth->u.dst.refcnt);
+ read_unlock_bh(&rt_hash_lock);
skb->dst = (struct dst_entry*)rth;
return 0;
}
}
+ read_unlock_bh(&rt_hash_lock);
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
@@ -1657,7 +1687,7 @@ int ip_route_output(struct rtable **rp, u32 daddr, u32 saddr, u32 tos, int oif)
hash = rt_hash_code(daddr, saddr^(oif<<5), tos);
- start_bh_atomic();
+ read_lock_bh(&rt_hash_lock);
for (rth=rt_hash_table[hash]; rth; rth=rth->u.rt_next) {
if (rth->key.dst == daddr &&
rth->key.src == saddr &&
@@ -1673,12 +1703,12 @@ int ip_route_output(struct rtable **rp, u32 daddr, u32 saddr, u32 tos, int oif)
rth->u.dst.lastuse = jiffies;
atomic_inc(&rth->u.dst.use);
atomic_inc(&rth->u.dst.refcnt);
- end_bh_atomic();
+ read_unlock_bh(&rt_hash_lock);
*rp = rth;
return 0;
}
}
- end_bh_atomic();
+ read_unlock_bh(&rt_hash_lock);
return ip_route_output_slow(rp, daddr, saddr, tos, oif);
}
@@ -1821,9 +1851,7 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
return -ENODEV;
skb->protocol = __constant_htons(ETH_P_IP);
skb->dev = dev;
- start_bh_atomic();
err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
- end_bh_atomic();
rt = (struct rtable*)skb->dst;
if (!err && rt->u.dst.error)
err = -rt->u.dst.error;
@@ -1869,7 +1897,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (h < s_h) continue;
if (h > s_h)
s_idx = 0;
- start_bh_atomic();
+ read_lock_bh(&rt_hash_lock);
for (rt = rt_hash_table[h], idx = 0; rt; rt = rt->u.rt_next, idx++) {
if (idx < s_idx)
continue;
@@ -1877,12 +1905,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1) <= 0) {
dst_release(xchg(&skb->dst, NULL));
- end_bh_atomic();
+ read_unlock_bh(&rt_hash_lock);
goto done;
}
dst_release(xchg(&skb->dst, NULL));
}
- end_bh_atomic();
+ read_unlock_bh(&rt_hash_lock);
}
done:
@@ -1968,6 +1996,7 @@ ctl_table ipv4_route_table[] = {
#ifdef CONFIG_NET_CLS_ROUTE
struct ip_rt_acct ip_rt_acct[256];
+rwlock_t ip_rt_acct_lock = RW_LOCK_UNLOCKED;
#ifdef CONFIG_PROC_FS
static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
@@ -1980,9 +2009,9 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
*eof = 1;
}
if (length > 0) {
- start_bh_atomic();
+ read_lock_bh(&ip_rt_acct_lock);
memcpy(buffer, ((u8*)&ip_rt_acct)+offset, length);
- end_bh_atomic();
+ read_unlock_bh(&ip_rt_acct_lock);
return length;
}
return 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8c1c9f9be..779c31cef 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp.c,v 1.140 1999/04/22 10:34:31 davem Exp $
+ * Version: $Id: tcp.c,v 1.144 1999/05/27 01:03:37 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -416,6 +416,7 @@
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
#include <net/icmp.h>
#include <net/tcp.h>
@@ -432,7 +433,7 @@ kmem_cache_t *tcp_timewait_cachep;
/*
* Find someone to 'accept'. Must be called with
- * the socket locked or with interrupts disabled
+ * the listening socket locked.
*/
static struct open_request *tcp_find_established(struct tcp_opt *tp,
@@ -441,10 +442,11 @@ static struct open_request *tcp_find_established(struct tcp_opt *tp,
struct open_request *req = tp->syn_wait_queue;
struct open_request *prev = (struct open_request *)&tp->syn_wait_queue;
while(req) {
- if (req->sk &&
- ((1 << req->sk->state) &
- ~(TCPF_SYN_SENT|TCPF_SYN_RECV)))
- break;
+ if (req->sk) {
+ if((1 << req->sk->state) &
+ ~(TCPF_SYN_SENT|TCPF_SYN_RECV))
+ break;
+ }
prev = req;
req = req->dl_next;
}
@@ -655,12 +657,13 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
/*
* Wait for a socket to get into the connected state
*
- * Note: must be called with the socket locked.
+ * Note: Must be called with the socket locked, and it
+ * runs with the kernel fully unlocked.
*/
static int wait_for_tcp_connect(struct sock * sk, int flags)
{
struct task_struct *tsk = current;
- struct wait_queue wait = { tsk, NULL };
+ DECLARE_WAITQUEUE(wait, tsk);
while((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
if(sk->err)
@@ -698,12 +701,14 @@ static inline int tcp_memory_free(struct sock *sk)
/*
* Wait for more memory for a socket
+ *
+ * NOTE: This runs with the kernel fully unlocked.
*/
static void wait_for_tcp_memory(struct sock * sk)
{
release_sock(sk);
if (!tcp_memory_free(sk)) {
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
sk->socket->flags &= ~SO_NOSPACE;
add_wait_queue(sk->sleep, &wait);
@@ -744,6 +749,7 @@ int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg)
int mss_now;
int err, copied;
+ unlock_kernel();
lock_sock(sk);
err = 0;
@@ -896,6 +902,7 @@ int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg)
err = -ERESTARTSYS;
goto do_interrupted;
}
+ tcp_push_pending_frames(sk, tp);
wait_for_tcp_memory(sk);
/* If SACK's were formed or PMTU events happened,
@@ -969,6 +976,7 @@ do_fault2:
out:
tcp_push_pending_frames(sk, tp);
release_sock(sk);
+ lock_kernel();
return err;
}
@@ -1117,7 +1125,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
int len, int nonblock, int flags, int *addr_len)
{
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
int copied = 0;
u32 peek_seq;
volatile u32 *seq; /* So gcc doesn't overoptimise */
@@ -1148,6 +1156,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
if (flags & MSG_WAITALL)
target=len;
+ unlock_kernel();
add_wait_queue(sk->sleep, &wait);
lock_sock(sk);
@@ -1300,6 +1309,8 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
/* We now will not sleep again until we are finished
* with skb. Sorry if you are doing the SMP port
* but you'll just have to fix it neatly ;)
+ *
+ * Very funny Alan... -DaveM
*/
atomic_dec(&skb->users);
@@ -1344,6 +1355,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
/* Clean up data we have read: This will do ACK frames. */
cleanup_rbuf(sk, copied);
release_sock(sk);
+ lock_kernel();
return copied;
}
@@ -1415,16 +1427,15 @@ void tcp_shutdown(struct sock *sk, int how)
return;
/* If we've already sent a FIN, or it's a closed state, skip this. */
+ lock_sock(sk);
if ((1 << sk->state) &
(TCPF_ESTABLISHED|TCPF_SYN_SENT|TCPF_SYN_RECV|TCPF_CLOSE_WAIT)) {
- lock_sock(sk);
/* Clear out any half completed packets. FIN if needed. */
if (tcp_close_state(sk,0))
tcp_send_fin(sk);
-
- release_sock(sk);
}
+ release_sock(sk);
}
@@ -1471,13 +1482,6 @@ void tcp_close(struct sock *sk, long timeout)
struct sk_buff *skb;
int data_was_unread = 0;
- /*
- * Check whether the socket is locked ... supposedly
- * it's impossible to tcp_close() a locked socket.
- */
- if (atomic_read(&sk->sock_readers))
- printk("tcp_close: socket already locked!\n");
-
/* We need to grab some memory, and put together a FIN,
* and then put it into the queue to be sent.
*/
@@ -1491,6 +1495,8 @@ void tcp_close(struct sock *sk, long timeout)
return;
}
+ unlock_kernel();
+
/* It is questionable, what the role of this is now.
* In any event either it should be removed, or
* increment of SLT_KEEPALIVE be done, this is causing
@@ -1534,24 +1540,23 @@ void tcp_close(struct sock *sk, long timeout)
if (timeout) {
struct task_struct *tsk = current;
- struct wait_queue wait = { tsk, NULL };
+ DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk->sleep, &wait);
- release_sock(sk);
while (1) {
tsk->state = TASK_INTERRUPTIBLE;
if (!closing(sk))
break;
+ release_sock(sk);
timeout = schedule_timeout(timeout);
+ lock_sock(sk);
if (signal_pending(tsk) || !timeout)
break;
}
tsk->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &wait);
-
- lock_sock(sk);
}
/* Now that the socket is dead, if we are in the FIN_WAIT2 state
@@ -1559,23 +1564,40 @@ void tcp_close(struct sock *sk, long timeout)
*/
tcp_check_fin_timer(sk);
- release_sock(sk);
sk->dead = 1;
+
+ release_sock(sk);
+ lock_kernel();
}
/*
* Wait for an incoming connection, avoid race
- * conditions. This must be called with the socket locked.
+ * conditions. This must be called with the socket locked,
+ * and without the kernel lock held.
*/
static struct open_request * wait_for_connect(struct sock * sk,
struct open_request **pprev)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
struct open_request *req;
- add_wait_queue(sk->sleep, &wait);
+ /*
+ * True wake-one mechanism for incoming connections: only
+ * one process gets woken up, not the 'whole herd'.
+ * Since we do not 'race & poll' for established sockets
+ * anymore, the common case will execute the loop only once.
+ *
+ * Subtle issue: "add_wait_queue_exclusive()" will be added
+ * after any current non-exclusive waiters, and we know that
+ * it will always _stay_ after any new non-exclusive waiters
+ * because all non-exclusive waiters are added at the
+ * beginning of the wait-queue. As such, it's ok to "drop"
+ * our exclusiveness temporarily when we get woken up without
+ * having to remove and re-insert us on the wait queue.
+ */
+ add_wait_queue_exclusive(sk->sleep, &wait);
for (;;) {
- current->state = TASK_INTERRUPTIBLE;
+ current->state = TASK_EXCLUSIVE | TASK_INTERRUPTIBLE;
release_sock(sk);
schedule();
lock_sock(sk);
@@ -1603,6 +1625,7 @@ struct sock *tcp_accept(struct sock *sk, int flags)
struct sock *newsk = NULL;
int error;
+ unlock_kernel();
lock_sock(sk);
/* We need to make sure that this socket is listening,
@@ -1633,16 +1656,17 @@ struct sock *tcp_accept(struct sock *sk, int flags)
sk->ack_backlog--;
if(sk->keepopen)
tcp_inc_slow_timer(TCP_SLT_KEEPALIVE);
-
release_sock(sk);
+ lock_kernel();
return newsk;
out:
/* sk should be in LISTEN state, thus accept can use sk->err for
- * internal purposes without stomping one anyone's feed.
+ * internal purposes without stomping on anyone's feed.
*/
sk->err = error;
release_sock(sk);
+ lock_kernel();
return newsk;
}
@@ -1765,6 +1789,8 @@ extern void __skb_cb_too_small_for_tcp(int, int);
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
+ unsigned long goal;
+ int order;
if(sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
@@ -1790,4 +1816,37 @@ void __init tcp_init(void)
NULL, NULL);
if(!tcp_timewait_cachep)
panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
+
+ /* Size and allocate the main established and bind bucket
+ * hash tables.
+ *
+ * The methodology is similar to that of the buffer cache.
+ */
+ goal = num_physpages >> (20 - PAGE_SHIFT);
+ for(order = 5; (1UL << order) < goal; order++)
+ ;
+ do {
+ tcp_ehash_size = (1UL << order) * PAGE_SIZE /
+ sizeof(struct sock *);
+ tcp_ehash = (struct sock **)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while (tcp_ehash == NULL && --order > 4);
+
+ if (!tcp_ehash)
+ panic("Failed to allocate TCP established hash table\n");
+ memset(tcp_ehash, 0, tcp_ehash_size * sizeof(struct sock *));
+
+ do {
+ tcp_bhash_size = (1UL << order) * PAGE_SIZE /
+ sizeof(struct tcp_bind_bucket *);
+ tcp_bhash = (struct tcp_bind_bucket **)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while (tcp_bhash == NULL && --order > 4);
+
+ if (!tcp_bhash)
+ panic("Failed to allocate TCP bind hash table\n");
+ memset(tcp_bhash, 0, tcp_bhash_size * sizeof(struct tcp_bind_bucket *));
+
+ printk("TCP: Hash tables configured (established %d bind %d)\n",
+ tcp_ehash_size, tcp_bhash_size);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4a607a749..af4165fce 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.164 1999/05/08 21:09:52 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.169 1999/06/09 08:29:13 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -748,7 +748,6 @@ static void tcp_ack_saw_tstamp(struct sock *sk, struct tcp_opt *tp,
static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp)
{
struct sk_buff *skb = skb_peek(&sk->write_queue);
- __u32 when = tp->rto - (tcp_time_stamp - TCP_SKB_CB(skb)->when);
/* Some data was ACK'd, if still retransmitting (due to a
* timeout), resend more of the retransmit queue. The
@@ -758,6 +757,9 @@ static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp)
tcp_xmit_retransmit_queue(sk);
tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
} else {
+ __u32 when = tp->rto - (tcp_time_stamp - TCP_SKB_CB(skb)->when);
+ if ((__s32)when < 0)
+ when = 1;
tcp_reset_xmit_timer(sk, TIME_RETRANS, when);
}
}
@@ -785,8 +787,6 @@ static int tcp_ack(struct sock *sk, struct tcphdr *th,
if (after(ack, tp->snd_nxt) || before(ack, tp->snd_una))
goto uninteresting_ack;
- dst_confirm(sk->dst_cache);
-
/* If there is data set flag 1 */
if (len != th->doff*4) {
flag |= FLAG_DATA;
@@ -882,6 +882,24 @@ static int tcp_ack(struct sock *sk, struct tcphdr *th,
/* Clear any aborted fast retransmit starts. */
tp->dup_acks = 0;
}
+ /* It is not a brain fart, I thought a bit now. 8)
+ *
+ * Forward progress is indicated, if:
+ * 1. the ack acknowledges new data.
+ * 2. or the ack is duplicate, but it is caused by new segment
+ * arrival. This case is filtered by:
+ * - it contains no data, syn or fin.
+ * - it does not update window.
+ * 3. or new SACK. It is difficult to check, so that we ignore it.
+ *
+ * Forward progress is also indicated by arrival new data,
+ * which was caused by window open from our side. This case is more
+ * difficult and it is made (alas, incorrectly) in tcp_data_queue().
+ * --ANK (990513)
+ */
+ if (ack != tp->snd_una || (flag == 0 && !th->fin))
+ dst_confirm(sk->dst_cache);
+
/* Remember the highest ack received. */
tp->snd_una = ack;
return 1;
@@ -896,8 +914,11 @@ extern void tcp_tw_schedule(struct tcp_tw_bucket *tw);
extern void tcp_tw_reschedule(struct tcp_tw_bucket *tw);
extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
+/* Must be called only from BH context. */
void tcp_timewait_kill(struct tcp_tw_bucket *tw)
{
+ SOCKHASH_LOCK_WRITE_BH();
+
/* Unlink from various places. */
if(tw->bind_next)
tw->bind_next->bind_pprev = tw->bind_pprev;
@@ -915,6 +936,8 @@ void tcp_timewait_kill(struct tcp_tw_bucket *tw)
tw->sklist_next->sklist_prev = tw->sklist_prev;
tw->sklist_prev->sklist_next = tw->sklist_next;
+ SOCKHASH_UNLOCK_WRITE_BH();
+
/* Ok, now free it up. */
kmem_cache_free(tcp_timewait_cachep, tw);
}
@@ -945,6 +968,7 @@ int tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
struct sock *sk;
struct tcp_func *af_specific = tw->af_specific;
__u32 isn;
+ int ret;
isn = tw->rcv_nxt + 128000;
if(isn == 0)
@@ -953,14 +977,25 @@ int tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
tcp_timewait_kill(tw);
sk = af_specific->get_sock(skb, th);
if(sk == NULL ||
- !ipsec_sk_policy(sk,skb) ||
- atomic_read(&sk->sock_readers) != 0)
+ !ipsec_sk_policy(sk,skb))
return 0;
+
+ bh_lock_sock(sk);
+
+ /* Default is to discard the frame. */
+ ret = 0;
+
+ if(sk->lock.users)
+ goto out_unlock;
+
skb_set_owner_r(skb, sk);
af_specific = sk->tp_pinfo.af_tcp.af_specific;
+
if(af_specific->conn_request(sk, skb, isn) < 0)
- return 1; /* Toss a reset back. */
- return 0; /* Discard the frame. */
+ ret = 1; /* Toss a reset back. */
+ out_unlock:
+ bh_unlock_sock(sk);
+ return ret;
}
/* Check RST or SYN */
@@ -1013,7 +1048,7 @@ static __inline__ void tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *t
sk->prot->inuse--;
/* Step 4: Hash TW into TIMEWAIT half of established hash table. */
- head = &tcp_established_hash[sk->hashent + (TCP_HTABLE_SIZE/2)];
+ head = &tcp_ehash[sk->hashent + (tcp_ehash_size >> 1)];
sktw = (struct sock *)tw;
if((sktw->next = *head) != NULL)
(*head)->pprev = &sktw->next;
@@ -1051,7 +1086,9 @@ void tcp_time_wait(struct sock *sk)
}
#endif
/* Linkage updates. */
+ SOCKHASH_LOCK_WRITE();
tcp_tw_hashdance(sk, tw);
+ SOCKHASH_UNLOCK_WRITE();
/* Get the TIME_WAIT timeout firing. */
tcp_tw_schedule(tw);
@@ -1801,7 +1838,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
}
}
- flg = *(((u32 *)th) + 3) & ~htonl(0x8 << 16);
+ flg = *(((u32 *)th) + 3) & ~htonl(0xFC8 << 16);
/* pred_flags is 0xS?10 << 16 + snd_wnd
* if header_predition is to be made
@@ -2031,8 +2068,26 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* These use the socket TOS..
* might want to be the received TOS
*/
- if(th->ack)
- return 1;
+ if(th->ack) {
+ struct sock *realsk;
+ int ret;
+
+ realsk = tp->af_specific->get_sock(skb, th);
+ if(realsk == sk)
+ return 1;
+
+ bh_lock_sock(realsk);
+ ret = 0;
+ if(realsk->lock.users != 0) {
+ skb_orphan(skb);
+ sk_add_backlog(realsk, skb);
+ } else {
+ ret = tcp_rcv_state_process(realsk, skb,
+ skb->h.th, skb->len);
+ }
+ bh_unlock_sock(realsk);
+ return ret;
+ }
if(th->syn) {
if(tp->af_specific->conn_request(sk, skb, 0) < 0)
@@ -2067,21 +2122,81 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* not be in line code. [AC]
*/
if(th->ack) {
- tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
-
- /* We got an ack, but it's not a good ack. */
- if(!tcp_ack(sk,th, TCP_SKB_CB(skb)->seq,
- TCP_SKB_CB(skb)->ack_seq, len))
+ /* rfc793:
+ * "If the state is SYN-SENT then
+ * first check the ACK bit
+ * If the ACK bit is set
+ * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
+ * a reset (unless the RST bit is set, if so drop
+ * the segment and return)"
+ *
+ * I cite this place to emphasize one essential
+ * detail, this check is different of one
+ * in established state: SND.UNA <= SEG.ACK <= SND.NXT.
+ * SEG_ACK == SND.UNA == ISS is invalid in SYN-SENT,
+ * because we have no previous data sent before SYN.
+ * --ANK(990513)
+ *
+ * We do not send data with SYN, so that RFC-correct
+ * test reduces to:
+ */
+ if (sk->zapped ||
+ TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
return 1;
- if(th->rst) {
+ /* Now ACK is acceptable.
+ *
+ * "If the RST bit is set
+ * If the ACK was acceptable then signal the user "error:
+ * connection reset", drop the segment, enter CLOSED state,
+ * delete TCB, and return."
+ */
+
+ if (th->rst) {
tcp_reset(sk);
goto discard;
}
- if(!th->syn)
+ /* rfc793:
+ * "fifth, if neither of the SYN or RST bits is set then
+ * drop the segment and return."
+ *
+ * See note below!
+ * --ANK(990513)
+ */
+
+ if (!th->syn)
goto discard;
+ /* rfc793:
+ * "If the SYN bit is on ...
+ * are acceptable then ...
+ * (our SYN has been ACKed), change the connection
+ * state to ESTABLISHED..."
+ *
+ * Do you see? SYN-less ACKs in SYN-SENT state are
+ * completely ignored.
+ *
+ * The bug causing stalled SYN-SENT sockets
+ * was here: tcp_ack advanced snd_una and canceled
+ * retransmit timer, so that bare ACK received
+ * in SYN-SENT state (even with invalid ack==ISS,
+ * because tcp_ack check is too weak for SYN-SENT)
+ * causes moving socket to invalid semi-SYN-SENT,
+ * semi-ESTABLISHED state and connection hangs.
+ *
+ * There exist buggy stacks, which really send
+ * such ACKs: f.e. 202.226.91.94 (okigate.oki.co.jp)
+ * Actually, if this host did not try to get something
+ * from ftp.inr.ac.ru I'd never find this bug 8)
+ *
+ * --ANK (990514)
+ */
+
+ tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
+ tcp_ack(sk,th, TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(skb)->ack_seq, len);
+
/* Ok.. it's good. Set up sequence numbers and
* move to established.
*/
@@ -2206,8 +2321,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
!(th->fin && TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)) {
if (!th->rst) {
tcp_send_ack(sk);
- goto discard;
}
+ goto discard;
}
/* step 2: check RST bit */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b5070c3a7..564e859f2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_ipv4.c,v 1.175 1999/05/08 21:09:54 davem Exp $
+ * Version: $Id: tcp_ipv4.c,v 1.180 1999/06/09 08:29:19 davem Exp $
*
* IPv4 specific functions
*
@@ -90,12 +90,14 @@ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
* First half of the table is for sockets not in TIME_WAIT, second half
* is for TIME_WAIT sockets only.
*/
-struct sock *tcp_established_hash[TCP_HTABLE_SIZE];
+struct sock **tcp_ehash;
+int tcp_ehash_size;
/* Ok, let's try this, I give up, we do need a local binding
* TCP hash as well as the others for fast bind/connect.
*/
-struct tcp_bind_bucket *tcp_bound_hash[TCP_BHTABLE_SIZE];
+struct tcp_bind_bucket **tcp_bhash;
+int tcp_bhash_size;
/* All sockets in TCP_LISTEN state will be in here. This is the only table
* where wildcard'd TCP sockets can exist. Hash function here is just local
@@ -117,7 +119,7 @@ int tcp_port_rover = (1024 - 1);
static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
__u32 faddr, __u16 fport)
{
- return ((laddr ^ lport) ^ (faddr ^ fport)) & ((TCP_HTABLE_SIZE/2) - 1);
+ return ((laddr ^ lport) ^ (faddr ^ fport)) & ((tcp_ehash_size >> 1) - 1);
}
static __inline__ int tcp_sk_hashfn(struct sock *sk)
@@ -136,8 +138,8 @@ void tcp_bucket_unlock(struct sock *sk)
struct tcp_bind_bucket *tb;
unsigned short snum = sk->num;
- SOCKHASH_LOCK();
- for(tb = tcp_bound_hash[tcp_bhashfn(snum)]; tb; tb = tb->next) {
+ SOCKHASH_LOCK_WRITE();
+ for(tb = tcp_bhash[tcp_bhashfn(snum)]; tb; tb = tb->next) {
if(tb->port == snum) {
if(tb->owners == NULL &&
(tb->flags & TCPB_FLAG_LOCKED)) {
@@ -148,9 +150,10 @@ void tcp_bucket_unlock(struct sock *sk)
break;
}
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
+/* The sockhash lock must be held as a writer here. */
struct tcp_bind_bucket *tcp_bucket_create(unsigned short snum)
{
struct tcp_bind_bucket *tb;
@@ -158,7 +161,7 @@ struct tcp_bind_bucket *tcp_bucket_create(unsigned short snum)
tb = kmem_cache_alloc(tcp_bucket_cachep, SLAB_ATOMIC);
if(tb != NULL) {
struct tcp_bind_bucket **head =
- &tcp_bound_hash[tcp_bhashfn(snum)];
+ &tcp_bhash[tcp_bhashfn(snum)];
tb->port = snum;
tb->flags = TCPB_FLAG_LOCKED;
tb->owners = NULL;
@@ -176,13 +179,18 @@ struct tcp_bind_bucket *tcp_bucket_create(unsigned short snum)
*/
static __inline__ int tcp_bucket_check(unsigned short snum)
{
- struct tcp_bind_bucket *tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ struct tcp_bind_bucket *tb;
+ int ret = 0;
+
+ SOCKHASH_LOCK_WRITE();
+ tb = tcp_bhash[tcp_bhashfn(snum)];
for( ; (tb && (tb->port != snum)); tb = tb->next)
;
if(tb == NULL && tcp_bucket_create(snum) == NULL)
- return 1;
- else
- return 0;
+ ret = 1;
+ SOCKHASH_UNLOCK_WRITE();
+
+ return ret;
}
#endif
@@ -191,8 +199,8 @@ static int tcp_v4_verify_bind(struct sock *sk, unsigned short snum)
struct tcp_bind_bucket *tb;
int result = 0;
- SOCKHASH_LOCK();
- for(tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ SOCKHASH_LOCK_WRITE();
+ for(tb = tcp_bhash[tcp_bhashfn(snum)];
(tb && (tb->port != snum));
tb = tb->next)
;
@@ -256,7 +264,7 @@ static int tcp_v4_verify_bind(struct sock *sk, unsigned short snum)
}
}
go_like_smoke:
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
return result;
}
@@ -268,13 +276,13 @@ unsigned short tcp_good_socknum(void)
int remaining = (high - low) + 1;
int rover;
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
rover = tcp_port_rover;
do {
rover += 1;
if((rover < low) || (rover > high))
rover = low;
- tb = tcp_bound_hash[tcp_bhashfn(rover)];
+ tb = tcp_bhash[tcp_bhashfn(rover)];
for( ; tb; tb = tb->next) {
if(tb->port == rover)
goto next;
@@ -288,7 +296,7 @@ unsigned short tcp_good_socknum(void)
rover = 0;
if (tb != NULL)
tb->flags |= TCPB_FLAG_GOODSOCKNUM;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
return rover;
}
@@ -298,20 +306,20 @@ static void tcp_v4_hash(struct sock *sk)
if (sk->state != TCP_CLOSE) {
struct sock **skp;
- SOCKHASH_LOCK();
- skp = &tcp_established_hash[(sk->hashent = tcp_sk_hashfn(sk))];
+ SOCKHASH_LOCK_WRITE();
+ skp = &tcp_ehash[(sk->hashent = tcp_sk_hashfn(sk))];
if((sk->next = *skp) != NULL)
(*skp)->pprev = &sk->next;
*skp = sk;
sk->pprev = skp;
tcp_sk_bindify(sk);
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
}
static void tcp_v4_unhash(struct sock *sk)
{
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
if(sk->pprev) {
if(sk->next)
sk->next->pprev = sk->pprev;
@@ -320,14 +328,14 @@ static void tcp_v4_unhash(struct sock *sk)
tcp_reg_zap(sk);
tcp_sk_unbindify(sk);
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void tcp_v4_rehash(struct sock *sk)
{
unsigned char state;
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
state = sk->state;
if(sk->pprev != NULL) {
if(sk->next)
@@ -342,7 +350,7 @@ static void tcp_v4_rehash(struct sock *sk)
if(state == TCP_LISTEN)
skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
else
- skp = &tcp_established_hash[(sk->hashent = tcp_sk_hashfn(sk))];
+ skp = &tcp_ehash[(sk->hashent = tcp_sk_hashfn(sk))];
if((sk->next = *skp) != NULL)
(*skp)->pprev = &sk->next;
@@ -351,7 +359,7 @@ static void tcp_v4_rehash(struct sock *sk)
if(state == TCP_LISTEN)
tcp_sk_bindify(sk);
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
/* Don't inline this cruft. Here are some nice properties to
@@ -395,10 +403,10 @@ static struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, int d
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
* we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
- * It is assumed that this code only gets called from within NET_BH.
+ *
+ * The sockhash lock must be held as a reader here.
*/
-static inline struct sock *__tcp_v4_lookup(struct tcphdr *th,
- u32 saddr, u16 sport,
+static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
u32 daddr, u16 dport, int dif)
{
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
@@ -416,7 +424,7 @@ static inline struct sock *__tcp_v4_lookup(struct tcphdr *th,
* have wildcards anyways.
*/
hash = tcp_hashfn(daddr, hnum, saddr, sport);
- for(sk = tcp_established_hash[hash]; sk; sk = sk->next) {
+ for(sk = tcp_ehash[hash]; sk; sk = sk->next) {
if(TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) {
if (sk->state == TCP_ESTABLISHED)
TCP_RHASH(sport) = sk;
@@ -424,7 +432,7 @@ static inline struct sock *__tcp_v4_lookup(struct tcphdr *th,
}
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- for(sk = tcp_established_hash[hash+(TCP_HTABLE_SIZE/2)]; sk; sk = sk->next)
+ for(sk = tcp_ehash[hash+(tcp_ehash_size >> 1)]; sk; sk = sk->next)
if(TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit;
sk = tcp_v4_lookup_listener(daddr, hnum, dif);
@@ -434,7 +442,13 @@ hit:
__inline__ struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif)
{
- return __tcp_v4_lookup(0, saddr, sport, daddr, dport, dif);
+ struct sock *sk;
+
+ SOCKHASH_LOCK_READ();
+ sk = __tcp_v4_lookup(saddr, sport, daddr, dport, dif);
+ SOCKHASH_UNLOCK_READ();
+
+ return sk;
}
#ifdef CONFIG_IP_TRANSPARENT_PROXY
@@ -462,9 +476,12 @@ static struct sock *tcp_v4_proxy_lookup(unsigned short num, unsigned long raddr,
paddr = idev->ifa_list->ifa_local;
}
- /* This code must run only from NET_BH. */
+ /* We must obtain the sockhash lock here, we are always
+ * in BH context.
+ */
+ SOCKHASH_LOCK_READ_BH();
{
- struct tcp_bind_bucket *tb = tcp_bound_hash[tcp_bhashfn(hnum)];
+ struct tcp_bind_bucket *tb = tcp_bhash[tcp_bhashfn(hnum)];
for( ; (tb && tb->port != hnum); tb = tb->next)
;
if(tb == NULL)
@@ -505,7 +522,7 @@ pass2:
}
next:
if(firstpass--) {
- struct tcp_bind_bucket *tb = tcp_bound_hash[tcp_bhashfn(hpnum)];
+ struct tcp_bind_bucket *tb = tcp_bhash[tcp_bhashfn(hpnum)];
for( ; (tb && tb->port != hpnum); tb = tb->next)
;
if(tb) {
@@ -514,6 +531,7 @@ next:
}
}
gotit:
+ SOCKHASH_UNLOCK_READ_BH();
return result;
}
#endif /* CONFIG_IP_TRANSPARENT_PROXY */
@@ -540,21 +558,23 @@ static int tcp_v4_unique_address(struct sock *sk)
int retval = 1;
/* Freeze the hash while we snoop around. */
- SOCKHASH_LOCK();
- tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ SOCKHASH_LOCK_READ();
+ tb = tcp_bhash[tcp_bhashfn(snum)];
for(; tb; tb = tb->next) {
if(tb->port == snum && tb->owners != NULL) {
/* Almost certainly the re-use port case, search the real hashes
* so it actually scales.
*/
- sk = __tcp_v4_lookup(NULL, sk->daddr, sk->dport,
+ sk = __tcp_v4_lookup(sk->daddr, sk->dport,
sk->rcv_saddr, snum, sk->bound_dev_if);
+ SOCKHASH_UNLOCK_READ();
+
if((sk != NULL) && (sk->state != TCP_LISTEN))
retval = 0;
- break;
+ return retval;
}
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_READ();
return retval;
}
@@ -727,16 +747,17 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *ip, unsigned
{
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
- if (atomic_read(&sk->sock_readers))
- return;
-
- /* Don't interested in TCP_LISTEN and open_requests (SYN-ACKs
+ /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
* send out by Linux are always <576bytes so they should go through
* unfragmented).
*/
if (sk->state == TCP_LISTEN)
return;
+ bh_lock_sock(sk);
+ if(sk->lock.users != 0)
+ goto out;
+
/* We don't check in the destentry if pmtu discovery is forbidden
* on this route. We just assume that no packet_to_big packets
* are send back when pmtu discovery is not active.
@@ -744,7 +765,8 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *ip, unsigned
* route, but I think that's acceptable.
*/
if (sk->dst_cache == NULL)
- return;
+ goto out;
+
ip_rt_update_pmtu(sk->dst_cache, mtu);
if (sk->ip_pmtudisc != IP_PMTUDISC_DONT &&
tp->pmtu_cookie > sk->dst_cache->pmtu) {
@@ -757,6 +779,8 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *ip, unsigned
*/
tcp_simple_retransmit(sk);
} /* else let the usual retransmit timer handle it */
+out:
+ bh_unlock_sock(sk);
}
/*
@@ -849,17 +873,6 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
switch (sk->state) {
struct open_request *req, *prev;
case TCP_LISTEN:
- /* Prevent race conditions with accept() -
- * ICMP is unreliable.
- */
- if (atomic_read(&sk->sock_readers)) {
- net_statistics.LockDroppedIcmps++;
- /* If too many ICMPs get dropped on busy
- * servers this needs to be solved differently.
- */
- return;
- }
-
/* The final ACK of the handshake should be already
* handled in the new socket context, not here.
* Strictly speaking - an ICMP error for the final
@@ -869,12 +882,24 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
if (!no_flags && !th->syn && !th->ack)
return;
+ /* Prevent race conditions with accept() -
+ * ICMP is unreliable.
+ */
+ bh_lock_sock(sk);
+ if (sk->lock.users != 0) {
+ net_statistics.LockDroppedIcmps++;
+ /* If too many ICMPs get dropped on busy
+ * servers this needs to be solved differently.
+ */
+ goto out_unlock;
+ }
+
req = tcp_v4_search_req(tp, iph, th, &prev);
if (!req)
- return;
+ goto out_unlock;
if (seq != req->snt_isn) {
net_statistics.OutOfWindowIcmps++;
- return;
+ goto out_unlock;
}
if (req->sk) {
/*
@@ -884,6 +909,7 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
* but only with the next operation on the socket after
* accept.
*/
+ bh_unlock_sock(sk);
sk = req->sk;
} else {
/*
@@ -896,6 +922,8 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
tcp_synq_unlink(tp, req, prev);
req->class->destructor(req);
tcp_openreq_free(req);
+ out_unlock:
+ bh_unlock_sock(sk);
return;
}
break;
@@ -1025,9 +1053,10 @@ static struct sock *tcp_v4_search_proxy_openreq(struct sk_buff *skb)
{
struct iphdr *iph = skb->nh.iph;
struct tcphdr *th = (struct tcphdr *)(skb->nh.raw + iph->ihl*4);
- struct sock *sk;
+ struct sock *sk = NULL;
int i;
+ SOCKHASH_LOCK_READ();
for (i=0; i<TCP_LHTABLE_SIZE; i++) {
for(sk = tcp_listening_hash[i]; sk; sk = sk->next) {
struct open_request *dummy;
@@ -1035,10 +1064,12 @@ static struct sock *tcp_v4_search_proxy_openreq(struct sk_buff *skb)
th, &dummy) &&
(!sk->bound_dev_if ||
sk->bound_dev_if == skb->dev->ifindex))
- return sk;
+ goto out;
}
}
- return NULL;
+out:
+ SOCKHASH_UNLOCK_READ();
+ return sk;
}
/*
@@ -1319,7 +1350,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
/* Clone the TCP header template */
newsk->dport = req->rmt_port;
- atomic_set(&newsk->sock_readers, 0);
+ sock_lock_init(newsk);
+
atomic_set(&newsk->rmem_alloc, 0);
skb_queue_head_init(&newsk->receive_queue);
atomic_set(&newsk->wmem_alloc, 0);
@@ -1328,9 +1360,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newsk->done = 0;
newsk->proc = 0;
- newsk->pair = NULL;
- skb_queue_head_init(&newsk->back_log);
+ newsk->backlog.head = newsk->backlog.tail = NULL;
skb_queue_head_init(&newsk->error_queue);
+ newsk->write_space = tcp_write_space;
#ifdef CONFIG_FILTER
if ((filter = newsk->filter) != NULL)
sk_filter_charge(newsk, filter);
@@ -1552,7 +1584,8 @@ static inline struct sock *tcp_v4_hnd_req(struct sock *sk,struct sk_buff *skb)
}
/* Check for SYN|ACK */
- if (flg & __constant_htonl(0x00120000)) {
+ flg &= __constant_htonl(0x00120000);
+ if (flg) {
struct open_request *req, *dummy;
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
@@ -1570,8 +1603,17 @@ static inline struct sock *tcp_v4_hnd_req(struct sock *sk,struct sk_buff *skb)
return sk;
}
+/* The socket must have it's spinlock held when we get
+ * here.
+ *
+ * We have a potential double-lock case here, so even when
+ * doing backlog processing we use the BH locking scheme.
+ * This is because we cannot sleep with the original spinlock
+ * held.
+ */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
+ int need_unlock = 0;
#ifdef CONFIG_FILTER
struct sk_filter *filter = sk->filter;
if (filter && sk_filter(skb, filter))
@@ -1591,7 +1633,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
-
if (sk->state == TCP_LISTEN) {
struct sock *nsk;
@@ -1604,17 +1645,22 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
* otherwise we just shortcircuit this and continue with
* the new socket..
*/
- if (atomic_read(&nsk->sock_readers)) {
- skb_orphan(skb);
- __skb_queue_tail(&nsk->back_log, skb);
- return 0;
+ if (nsk != sk) {
+ bh_lock_sock(nsk);
+ if (nsk->lock.users != 0) {
+ skb_orphan(skb);
+ sk_add_backlog(nsk, skb);
+ bh_unlock_sock(nsk);
+ return 0;
+ }
+ need_unlock = 1;
+ sk = nsk;
}
- sk = nsk;
}
if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
goto reset;
- return 0;
+ goto out_maybe_unlock;
reset:
tcp_v4_send_reset(skb);
@@ -1625,6 +1671,9 @@ discard:
* might be destroyed here. This current version compiles correctly,
* but you have been warned.
*/
+out_maybe_unlock:
+ if(need_unlock)
+ bh_unlock_sock(sk);
return 0;
}
@@ -1636,6 +1685,7 @@ int tcp_v4_rcv(struct sk_buff *skb, unsigned short len)
{
struct tcphdr *th;
struct sock *sk;
+ int ret;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
@@ -1681,8 +1731,10 @@ int tcp_v4_rcv(struct sk_buff *skb, unsigned short len)
IPCB(skb)->redirport, skb->dev->ifindex);
else {
#endif
- sk = __tcp_v4_lookup(th, skb->nh.iph->saddr, th->source,
+ SOCKHASH_LOCK_READ_BH();
+ sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
skb->nh.iph->daddr, th->dest, skb->dev->ifindex);
+ SOCKHASH_UNLOCK_READ_BH();
#ifdef CONFIG_IP_TRANSPARENT_PROXY
if (!sk)
sk = tcp_v4_search_proxy_openreq(skb);
@@ -1702,11 +1754,16 @@ int tcp_v4_rcv(struct sk_buff *skb, unsigned short len)
if (sk->state == TCP_TIME_WAIT)
goto do_time_wait;
- if (!atomic_read(&sk->sock_readers))
- return tcp_v4_do_rcv(sk, skb);
- __skb_queue_tail(&sk->back_log, skb);
- return 0;
+ bh_lock_sock(sk);
+ ret = 0;
+ if (!sk->lock.users)
+ ret = tcp_v4_do_rcv(sk, skb);
+ else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
+
+ return ret;
no_tcp_socket:
tcp_v4_send_reset(skb);
@@ -1944,6 +2001,8 @@ __initfunc(void tcp_v4_init(struct net_proto_family *ops))
tcp_inode.i_sock = 1;
tcp_inode.i_uid = 0;
tcp_inode.i_gid = 0;
+ init_waitqueue_head(&tcp_inode.i_wait);
+ init_waitqueue_head(&tcp_inode.u.socket_i.wait);
tcp_socket->inode = &tcp_inode;
tcp_socket->state = SS_UNCONNECTED;
@@ -1952,6 +2011,11 @@ __initfunc(void tcp_v4_init(struct net_proto_family *ops))
if ((err=ops->create(tcp_socket, IPPROTO_TCP))<0)
panic("Failed to create the TCP control socket.\n");
tcp_socket->sk->allocation=GFP_ATOMIC;
- tcp_socket->sk->num = 256; /* Don't receive any data */
tcp_socket->sk->ip_ttl = MAXTTL;
+
+ /* Unhash it so that IP input processing does not even
+ * see it, we do not wish this socket to see incoming
+ * packets.
+ */
+ tcp_socket->sk->prot->unhash(tcp_socket->sk);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9a096f0f3..18b5ebf80 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_output.c,v 1.108 1999/05/08 21:48:59 davem Exp $
+ * Version: $Id: tcp_output.c,v 1.110 1999/05/27 00:37:45 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -36,6 +36,8 @@
#include <net/tcp.h>
+#include <linux/smp_lock.h>
+
extern int sysctl_tcp_timestamps;
extern int sysctl_tcp_window_scaling;
extern int sysctl_tcp_sack;
@@ -240,6 +242,11 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
/* Rechecksum original buffer. */
skb->csum = csum_partial(skb->data, skb->len, 0);
+ /* Looks stupid, but our code really uses when of
+ * skbs, which it never sent before. --ANK
+ */
+ TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
+
/* Link BUFF into the send queue. */
__skb_append(skb, buff);
@@ -961,6 +968,7 @@ void tcp_connect(struct sock *sk, struct sk_buff *buff, int mtu)
/* Ok, now lock the socket before we make it visible to
* the incoming packet engine.
*/
+ unlock_kernel();
lock_sock(sk);
/* Socket identity change complete, no longer
@@ -988,6 +996,7 @@ void tcp_connect(struct sock *sk, struct sk_buff *buff, int mtu)
/* Now, it is safe to release the socket. */
release_sock(sk);
+ lock_kernel();
}
/* Send out a delayed ack, the caller does the policy checking
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index ad6ccace9..d23eef143 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_timer.c,v 1.62 1999/05/08 21:09:55 davem Exp $
+ * Version: $Id: tcp_timer.c,v 1.64 1999/05/27 00:37:31 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -168,15 +168,16 @@ void tcp_delack_timer(unsigned long data)
{
struct sock *sk = (struct sock*)data;
+ bh_lock_sock(sk);
if(!sk->zapped &&
sk->tp_pinfo.af_tcp.delayed_acks &&
sk->state != TCP_CLOSE) {
- /* If socket is currently locked, defer the ACK. */
- if (!atomic_read(&sk->sock_readers))
+ if (!sk->lock.users)
tcp_send_ack(sk);
else
tcp_send_delayed_ack(&(sk->tp_pinfo.af_tcp), HZ/10);
}
+ bh_unlock_sock(sk);
}
void tcp_probe_timer(unsigned long data)
@@ -187,9 +188,11 @@ void tcp_probe_timer(unsigned long data)
if(sk->zapped)
return;
- if (atomic_read(&sk->sock_readers)) {
+ bh_lock_sock(sk);
+ if (sk->lock.users) {
/* Try again later. */
tcp_reset_xmit_timer(sk, TIME_PROBE0, HZ/5);
+ bh_unlock_sock(sk);
return;
}
@@ -216,6 +219,7 @@ void tcp_probe_timer(unsigned long data)
/* Only send another probe if we didn't close things up. */
tcp_send_probe0(sk);
}
+ bh_unlock_sock(sk);
}
static __inline__ int tcp_keepopen_proc(struct sock *sk)
@@ -253,8 +257,9 @@ static void tcp_bucketgc(unsigned long data)
{
int i, reaped = 0;;
- for(i = 0; i < TCP_BHTABLE_SIZE; i++) {
- struct tcp_bind_bucket *tb = tcp_bound_hash[i];
+ SOCKHASH_LOCK_WRITE_BH();
+ for(i = 0; i < tcp_bhash_size; i++) {
+ struct tcp_bind_bucket *tb = tcp_bhash[i];
while(tb) {
struct tcp_bind_bucket *next = tb->next;
@@ -274,6 +279,8 @@ static void tcp_bucketgc(unsigned long data)
tb = next;
}
}
+ SOCKHASH_UNLOCK_WRITE_BH();
+
if(reaped != 0) {
struct tcp_sl_timer *slt = (struct tcp_sl_timer *)data;
@@ -294,8 +301,14 @@ static void tcp_twkill(unsigned long data)
struct tcp_tw_bucket *tw;
int killed = 0;
+ /* The death-row tw chains are only ever touched
+ * in BH context so no locking is needed.
+ */
tw = tcp_tw_death_row[tcp_tw_death_row_slot];
tcp_tw_death_row[tcp_tw_death_row_slot] = NULL;
+ tcp_tw_death_row_slot =
+ ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
+
while(tw != NULL) {
struct tcp_tw_bucket *next = tw->next_death;
@@ -307,8 +320,6 @@ static void tcp_twkill(unsigned long data)
struct tcp_sl_timer *slt = (struct tcp_sl_timer *)data;
atomic_sub(killed, &slt->count);
}
- tcp_tw_death_row_slot =
- ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
}
/* These are always called from BH context. See callers in
@@ -319,12 +330,14 @@ void tcp_tw_schedule(struct tcp_tw_bucket *tw)
int slot = (tcp_tw_death_row_slot - 1) & (TCP_TWKILL_SLOTS - 1);
struct tcp_tw_bucket **tpp = &tcp_tw_death_row[slot];
+ SOCKHASH_LOCK_WRITE_BH();
if((tw->next_death = *tpp) != NULL)
(*tpp)->pprev_death = &tw->next_death;
*tpp = tw;
tw->pprev_death = tpp;
tw->death_slot = slot;
+ SOCKHASH_UNLOCK_WRITE_BH();
tcp_inc_slow_timer(TCP_SLT_TWKILL);
}
@@ -335,6 +348,7 @@ void tcp_tw_reschedule(struct tcp_tw_bucket *tw)
struct tcp_tw_bucket **tpp;
int slot;
+ SOCKHASH_LOCK_WRITE_BH();
if(tw->next_death)
tw->next_death->pprev_death = tw->pprev_death;
*tw->pprev_death = tw->next_death;
@@ -348,16 +362,21 @@ void tcp_tw_reschedule(struct tcp_tw_bucket *tw)
tw->pprev_death = tpp;
tw->death_slot = slot;
+ SOCKHASH_UNLOCK_WRITE_BH();
+
/* Timer was incremented when we first entered the table. */
}
/* This is for handling early-kills of TIME_WAIT sockets. */
void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
{
+ SOCKHASH_LOCK_WRITE_BH();
if(tw->next_death)
tw->next_death->pprev_death = tw->pprev_death;
*tw->pprev_death = tw->next_death;
tw->pprev_death = NULL;
+ SOCKHASH_UNLOCK_WRITE_BH();
+
tcp_dec_slow_timer(TCP_SLT_TWKILL);
}
@@ -399,20 +418,30 @@ static void tcp_keepalive(unsigned long data)
int count = 0;
int i;
- for(i = chain_start; i < (chain_start + ((TCP_HTABLE_SIZE/2) >> 2)); i++) {
- struct sock *sk = tcp_established_hash[i];
+ SOCKHASH_LOCK_READ_BH();
+ for(i = chain_start; i < (chain_start + ((tcp_ehash_size >> 1) >> 2)); i++) {
+ struct sock *sk;
+
+ sk = tcp_ehash[i];
while(sk) {
- if(!atomic_read(&sk->sock_readers) && sk->keepopen) {
+ struct sock *next = sk->next;
+
+ bh_lock_sock(sk);
+ if (sk->keepopen && !sk->lock.users) {
+ SOCKHASH_UNLOCK_READ_BH();
count += tcp_keepopen_proc(sk);
- if(count == sysctl_tcp_max_ka_probes)
- goto out;
+ SOCKHASH_LOCK_READ_BH();
}
- sk = sk->next;
+ bh_unlock_sock(sk);
+ if(count == sysctl_tcp_max_ka_probes)
+ goto out;
+ sk = next;
}
}
out:
- chain_start = ((chain_start + ((TCP_HTABLE_SIZE/2)>>2)) &
- ((TCP_HTABLE_SIZE/2) - 1));
+ SOCKHASH_UNLOCK_READ_BH();
+ chain_start = ((chain_start + ((tcp_ehash_size >> 1)>>2)) &
+ ((tcp_ehash_size >> 1) - 1));
}
/*
@@ -439,9 +468,11 @@ void tcp_retransmit_timer(unsigned long data)
return;
}
- if (atomic_read(&sk->sock_readers)) {
+ bh_lock_sock(sk);
+ if (sk->lock.users) {
/* Try again later */
tcp_reset_xmit_timer(sk, TIME_RETRANS, HZ/20);
+ bh_unlock_sock(sk);
return;
}
@@ -508,12 +539,51 @@ void tcp_retransmit_timer(unsigned long data)
tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
tcp_write_timeout(sk);
+
+ bh_unlock_sock(sk);
}
/*
* Slow timer for SYN-RECV sockets
*/
+static void tcp_do_syn_queue(struct sock *sk, struct tcp_opt *tp, unsigned long now)
+{
+ struct open_request *prev, *req;
+
+ prev = (struct open_request *) &tp->syn_wait_queue;
+ for(req = tp->syn_wait_queue; req; ) {
+ struct open_request *next = req->dl_next;
+
+ if (! req->sk) {
+ tcp_synq_unlink(tp, req, prev);
+ if(req->retrans >= sysctl_tcp_retries1) {
+ (*req->class->destructor)(req);
+ tcp_dec_slow_timer(TCP_SLT_SYNACK);
+ tp->syn_backlog--;
+ tcp_openreq_free(req);
+ if (! tp->syn_wait_queue)
+ break;
+ } else {
+ unsigned long timeo;
+ struct open_request *rp;
+
+ (*req->class->rtx_syn_ack)(sk, req);
+ req->retrans++;
+ timeo = min((TCP_TIMEOUT_INIT << req->retrans),
+ (120 * HZ));
+ req->expires = now + timeo;
+ rp = prev->dl_next;
+ tcp_synq_queue(tp, req);
+ if(rp != prev->dl_next)
+ prev = prev->dl_next;
+ }
+ } else
+ prev = req;
+ req = next;
+ }
+}
+
/* This now scales very nicely. -DaveM */
static void tcp_syn_recv_timer(unsigned long data)
{
@@ -521,70 +591,21 @@ static void tcp_syn_recv_timer(unsigned long data)
unsigned long now = jiffies;
int i;
+ SOCKHASH_LOCK_READ_BH();
for(i = 0; i < TCP_LHTABLE_SIZE; i++) {
sk = tcp_listening_hash[i];
-
while(sk) {
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
/* TCP_LISTEN is implied. */
- if (!atomic_read(&sk->sock_readers) && tp->syn_wait_queue) {
- struct open_request *prev = (struct open_request *)(&tp->syn_wait_queue);
- struct open_request *req = tp->syn_wait_queue;
- do {
- struct open_request *conn;
-
- conn = req;
- req = req->dl_next;
-
- if (conn->sk) {
- prev = conn;
- continue;
- }
-
- if ((long)(now - conn->expires) <= 0)
- break;
-
-
- tcp_synq_unlink(tp, conn, prev);
- if (conn->retrans >= sysctl_tcp_retries1) {
-#ifdef TCP_DEBUG
- printk(KERN_DEBUG "syn_recv: "
- "too many retransmits\n");
-#endif
- (*conn->class->destructor)(conn);
- tcp_dec_slow_timer(TCP_SLT_SYNACK);
- tp->syn_backlog--;
- tcp_openreq_free(conn);
-
- if (!tp->syn_wait_queue)
- break;
- } else {
- unsigned long timeo;
- struct open_request *op;
-
- (*conn->class->rtx_syn_ack)(sk, conn);
-
- conn->retrans++;
-#ifdef TCP_DEBUG
- printk(KERN_DEBUG "syn_ack rtx %d\n",
- conn->retrans);
-#endif
- timeo = min((TCP_TIMEOUT_INIT
- << conn->retrans),
- 120*HZ);
- conn->expires = now + timeo;
- op = prev->dl_next;
- tcp_synq_queue(tp, conn);
- if (op != prev->dl_next)
- prev = prev->dl_next;
- }
- /* old prev still valid here */
- } while (req);
- }
+ bh_lock_sock(sk);
+ if (!sk->lock.users && tp->syn_wait_queue)
+ tcp_do_syn_queue(sk, tp, now);
+ bh_unlock_sock(sk);
sk = sk->next;
}
}
+ SOCKHASH_UNLOCK_READ_BH();
}
void tcp_sltimer_handler(unsigned long data)
diff --git a/net/ipv4/timer.c b/net/ipv4/timer.c
index 3821a7c4c..0487f5bfa 100644
--- a/net/ipv4/timer.c
+++ b/net/ipv4/timer.c
@@ -5,7 +5,7 @@
*
* TIMER - implementation of software timers for IP.
*
- * Version: $Id: timer.c,v 1.15 1999/02/22 13:54:29 davem Exp $
+ * Version: $Id: timer.c,v 1.16 1999/05/27 00:37:39 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -69,13 +69,15 @@ void net_reset_timer (struct sock *t, int timeout, unsigned long len)
*/
void net_timer (unsigned long data)
{
- struct sock *sk = (struct sock*)data;
+ struct sock *sk = (struct sock *) data;
int why = sk->timeout;
/* Only process if socket is not in use. */
- if (atomic_read(&sk->sock_readers)) {
+ bh_lock_sock(sk);
+ if (sk->lock.users) {
/* Try again later. */
mod_timer(&sk->timer, jiffies+HZ/20);
+ bh_unlock_sock(sk);
return;
}
@@ -99,15 +101,15 @@ void net_timer (unsigned long data)
printk (KERN_DEBUG "non CLOSE socket in time_done\n");
break;
}
- destroy_sock (sk);
- break;
+ destroy_sock(sk);
+ return;
case TIME_DESTROY:
/* We've waited for a while for all the memory associated with
* the socket to be freed.
*/
destroy_sock(sk);
- break;
+ return;
case TIME_CLOSE:
/* We've waited long enough, close the socket. */
@@ -123,5 +125,8 @@ void net_timer (unsigned long data)
printk ("net_timer: timer expired - reason %d is unknown\n", why);
break;
}
+
+ /* We only need to unlock if the socket was not destroyed. */
+ bh_unlock_sock(sk);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5fcec9cf3..320e5151e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -5,7 +5,7 @@
*
* The User Datagram Protocol (UDP).
*
- * Version: $Id: udp.c,v 1.66 1999/05/08 20:00:25 davem Exp $
+ * Version: $Id: udp.c,v 1.69 1999/06/09 11:15:31 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -128,7 +128,7 @@ static int udp_v4_verify_bind(struct sock *sk, unsigned short snum)
struct sock *sk2;
int retval = 0, sk_reuse = sk->reuse;
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_READ();
for(sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; sk2 != NULL; sk2 = sk2->next) {
if((sk2->num == snum) && (sk2 != sk)) {
unsigned char state = sk2->state;
@@ -158,7 +158,7 @@ static int udp_v4_verify_bind(struct sock *sk, unsigned short snum)
}
}
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_READ();
return retval;
}
@@ -173,14 +173,14 @@ static inline int udp_lport_inuse(u16 num)
return 0;
}
-/* Shared by v4/v6 tcp. */
+/* Shared by v4/v6 udp. */
unsigned short udp_good_socknum(void)
{
int result;
static int start = 0;
int i, best, best_size_so_far;
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_READ();
if (start > sysctl_local_port_range[1] || start < sysctl_local_port_range[0])
start = sysctl_local_port_range[0];
@@ -223,15 +223,10 @@ unsigned short udp_good_socknum(void)
}
out:
start = result;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_READ();
return result;
}
-/* Last hit UDP socket cache, this is ipv4 specific so make it static. */
-static u32 uh_cache_saddr, uh_cache_daddr;
-static u16 uh_cache_dport, uh_cache_sport;
-static struct sock *uh_cache_sk = NULL;
-
static void udp_v4_hash(struct sock *sk)
{
struct sock **skp;
@@ -240,11 +235,11 @@ static void udp_v4_hash(struct sock *sk)
num &= (UDP_HTABLE_SIZE - 1);
skp = &udp_hash[num];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
sk->next = *skp;
*skp = sk;
sk->hashent = num;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void udp_v4_unhash(struct sock *sk)
@@ -255,7 +250,7 @@ static void udp_v4_unhash(struct sock *sk)
num &= (UDP_HTABLE_SIZE - 1);
skp = &udp_hash[num];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
while(*skp != NULL) {
if(*skp == sk) {
*skp = sk->next;
@@ -263,9 +258,7 @@ static void udp_v4_unhash(struct sock *sk)
}
skp = &((*skp)->next);
}
- if(uh_cache_sk == sk)
- uh_cache_sk = NULL;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void udp_v4_rehash(struct sock *sk)
@@ -277,7 +270,7 @@ static void udp_v4_rehash(struct sock *sk)
num &= (UDP_HTABLE_SIZE - 1);
skp = &udp_hash[oldnum];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
while(*skp != NULL) {
if(*skp == sk) {
*skp = sk->next;
@@ -288,13 +281,11 @@ static void udp_v4_rehash(struct sock *sk)
sk->next = udp_hash[num];
udp_hash[num] = sk;
sk->hashent = num;
- if(uh_cache_sk == sk)
- uh_cache_sk = NULL;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
- * harder than this here plus the last hit cache. -DaveM
+ * harder than this. -DaveM
*/
struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif)
{
@@ -341,21 +332,9 @@ __inline__ struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport
{
struct sock *sk;
- if(!dif && uh_cache_sk &&
- uh_cache_saddr == saddr &&
- uh_cache_sport == sport &&
- uh_cache_dport == dport &&
- uh_cache_daddr == daddr)
- return uh_cache_sk;
-
+ SOCKHASH_LOCK_READ();
sk = udp_v4_lookup_longway(saddr, sport, daddr, dport, dif);
- if(!dif) {
- uh_cache_sk = sk;
- uh_cache_saddr = saddr;
- uh_cache_daddr = daddr;
- uh_cache_sport = sport;
- uh_cache_dport = dport;
- }
+ SOCKHASH_UNLOCK_READ();
return sk;
}
@@ -393,7 +372,7 @@ static struct sock *udp_v4_proxy_lookup(unsigned short num, unsigned long raddr,
paddr = idev->ifa_list->ifa_local;
}
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_READ();
for(s = udp_v4_proxy_loop_init(hnum, hpnum, s, firstpass);
s != NULL;
s = udp_v4_proxy_loop_next(hnum, hpnum, s, firstpass)) {
@@ -431,7 +410,7 @@ static struct sock *udp_v4_proxy_lookup(unsigned short num, unsigned long raddr,
}
}
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_READ();
return result;
}
@@ -784,7 +763,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, int len)
/* 4.1.3.4. It's configurable by the application via setsockopt() */
/* (MAY) and it defaults to on (MUST). */
- err = ip_build_xmit(sk,sk->no_check ? udp_getfrag_nosum : udp_getfrag,
+ err = ip_build_xmit(sk,
+ (sk->no_check == UDP_CSUM_NOXMIT ?
+ udp_getfrag_nosum :
+ udp_getfrag),
&ufh, ulen, &ipc, rt, msg->msg_flags);
out:
@@ -979,8 +961,6 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk->rcv_saddr=INADDR_ANY;
sk->daddr=INADDR_ANY;
sk->state = TCP_CLOSE;
- if(uh_cache_sk == sk)
- uh_cache_sk = NULL;
return 0;
}
@@ -1005,9 +985,6 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk->dport = usin->sin_port;
sk->state = TCP_ESTABLISHED;
- if(uh_cache_sk == sk)
- uh_cache_sk = NULL;
-
sk->dst_cache = &rt->u.dst;
return(0);
}
@@ -1015,6 +992,8 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
static void udp_close(struct sock *sk, long timeout)
{
+ bh_lock_sock(sk);
+
/* See for explanation: raw_close in ipv4/raw.c */
sk->state = TCP_CLOSE;
udp_v4_unhash(sk);
@@ -1117,6 +1096,33 @@ int udp_chkaddr(struct sk_buff *skb)
}
#endif
+static int udp_checksum_verify(struct sk_buff *skb, struct udphdr *uh,
+ unsigned short ulen, u32 saddr, u32 daddr,
+ int full_csum_deferred)
+{
+ if (!full_csum_deferred) {
+ if (uh->check) {
+ if (skb->ip_summed == CHECKSUM_HW &&
+ udp_check(uh, ulen, saddr, daddr, skb->csum))
+ return -1;
+ if (skb->ip_summed == CHECKSUM_NONE &&
+ udp_check(uh, ulen, saddr, daddr,
+ csum_partial((char *)uh, ulen, 0)))
+ return -1;
+ }
+ } else {
+ if (uh->check == 0)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else if (skb->ip_summed == CHECKSUM_HW) {
+ if (udp_check(uh, ulen, saddr, daddr, skb->csum))
+ return -1;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+ }
+ return 0;
+}
+
/*
* All we need to do is get the socket, and then do a checksum.
*/
@@ -1158,25 +1164,18 @@ int udp_rcv(struct sk_buff *skb, unsigned short len)
}
skb_trim(skb, ulen);
-#ifndef CONFIG_UDP_DELAY_CSUM
- if (uh->check &&
- (((skb->ip_summed==CHECKSUM_HW)&&udp_check(uh,ulen,saddr,daddr,skb->csum)) ||
- ((skb->ip_summed==CHECKSUM_NONE) &&
- (udp_check(uh,ulen,saddr,daddr, csum_partial((char*)uh, ulen, 0))))))
- goto csum_error;
+ if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) {
+ int defer;
+
+#ifdef CONFIG_UDP_DELAY_CSUM
+ defer = 1;
#else
- if (uh->check==0)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else if (skb->ip_summed==CHECKSUM_HW) {
- if (udp_check(uh,ulen,saddr,daddr,skb->csum))
- goto csum_error;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+ defer = 0;
#endif
-
- if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+ if (udp_checksum_verify(skb, uh, ulen, saddr, daddr, defer))
+ goto csum_error;
return udp_v4_mcast_deliver(skb, uh, saddr, daddr);
+ }
#ifdef CONFIG_IP_TRANSPARENT_PROXY
if (IPCB(skb)->redirport)
@@ -1203,6 +1202,15 @@ int udp_rcv(struct sk_buff *skb, unsigned short len)
kfree_skb(skb);
return(0);
}
+ if (udp_checksum_verify(skb, uh, ulen, saddr, daddr,
+#ifdef CONFIG_UDP_DELAY_CSUM
+ 1
+#else
+ (sk->no_check & UDP_CSUM_NORCV) != 0
+#endif
+ ))
+ goto csum_error;
+
udp_deliver(sk, skb);
return 0;
diff --git a/net/ipv4/utils.c b/net/ipv4/utils.c
index ce74ade2a..5992cbc55 100644
--- a/net/ipv4/utils.c
+++ b/net/ipv4/utils.c
@@ -6,7 +6,7 @@
* Various kernel-resident INET utility functions; mainly
* for format conversion and debugging output.
*
- * Version: $Id: utils.c,v 1.6 1997/12/13 21:53:03 kuznet Exp $
+ * Version: $Id: utils.c,v 1.7 1999/06/09 10:11:05 davem Exp $
*
* Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
@@ -57,6 +57,11 @@ char *in_ntoa(__u32 in)
return(buff);
}
+char *in_ntoa2(__u32 in, char *buff)
+{
+ sprintf(buff, "%d.%d.%d.%d", NIPQUAD(in));
+ return buff;
+}
/*
* Convert an ASCII string to binary IP.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f34975076..9f71f7cda 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: addrconf.c,v 1.48 1999/03/25 10:04:43 davem Exp $
+ * $Id: addrconf.c,v 1.50 1999/06/09 10:11:09 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -100,9 +100,7 @@ static struct timer_list addr_chk_timer = {
1. The result of inet6_add_addr() is used only inside lock
or from bh_atomic context.
- 2. inet6_get_lladdr() is used only from bh protected context.
-
- 3. The result of ipv6_chk_addr() is not used outside of bh protected context.
+ 2. The result of ipv6_chk_addr() is not used outside of bh protected context.
*/
static __inline__ void addrconf_lock(void)
@@ -463,7 +461,7 @@ out:
return err;
}
-struct inet6_ifaddr * ipv6_get_lladdr(struct device *dev)
+int ipv6_get_lladdr(struct device *dev, struct in6_addr *addr)
{
struct inet6_ifaddr *ifp = NULL;
struct inet6_dev *idev;
@@ -471,12 +469,15 @@ struct inet6_ifaddr * ipv6_get_lladdr(struct device *dev)
if ((idev = ipv6_get_idev(dev)) != NULL) {
addrconf_lock();
for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
- if (ifp->scope == IFA_LINK)
- break;
+ if (ifp->scope == IFA_LINK) {
+ ipv6_addr_copy(addr, &ifp->addr);
+ addrconf_unlock();
+ return 0;
+ }
}
addrconf_unlock();
}
- return ifp;
+ return -EADDRNOTAVAIL;
}
/*
@@ -982,6 +983,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
return;
}
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if (dev->ip_ptr && (dev->flags & IFF_UP)) {
struct in_device * in_dev = dev->ip_ptr;
@@ -1014,6 +1016,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
}
}
}
+ read_unlock(&dev_base_lock);
}
static void init_loopback(struct device *dev)
@@ -1842,7 +1845,7 @@ __initfunc(void addrconf_init(void))
struct device *dev;
/* This takes sense only during module load. */
-
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next) {
if (!(dev->flags&IFF_UP))
continue;
@@ -1858,6 +1861,7 @@ __initfunc(void addrconf_init(void))
/* Ignore all other */
}
}
+ read_unlock(&dev_base_lock);
#endif
#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 36ab229ed..f7f50df86 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -7,7 +7,7 @@
*
* Adapted from linux/net/ipv4/af_inet.c
*
- * $Id: af_inet6.c,v 1.43 1999/04/22 10:07:39 davem Exp $
+ * $Id: af_inet6.c,v 1.44 1999/06/09 08:29:29 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -103,7 +103,7 @@ static int inet6_create(struct socket *sock, int protocol)
if (protocol && protocol != IPPROTO_UDP)
goto free_and_noproto;
protocol = IPPROTO_UDP;
- sk->no_check = UDP_NO_CHECK;
+ sk->no_check = UDP_CSUM_DEFAULT;
prot=&udpv6_prot;
sock->ops = &inet6_dgram_ops;
} else if(sock->type == SOCK_RAW) {
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 8a4f85b6c..5fb915390 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -7,7 +7,7 @@
* Andi Kleen <ak@muc.de>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
- * $Id: exthdrs.c,v 1.8 1998/10/03 09:38:27 davem Exp $
+ * $Id: exthdrs.c,v 1.9 1999/05/17 23:47:35 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -369,7 +369,7 @@ ipv6_invert_rthdr(struct sock *sk, struct ipv6_rt_hdr *hdr)
Certainly, it is possible only for udp and raw sockets, but not for tcp.
AUTH header has 4byte granular length, which kills all the idea
- behind AUTOMATIC 64bit alignment of IPv6. Now we will loose
+ behind AUTOMATIC 64bit alignment of IPv6. Now we will lose
cpu ticks, checking that sender did not something stupid
and opt->hdrlen is even. Shit! --ANK (980730)
*/
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 3760be8eb..1abc87541 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: icmp.c,v 1.21 1999/03/21 05:22:51 davem Exp $
+ * $Id: icmp.c,v 1.22 1999/05/19 22:06:39 davem Exp $
*
* Based on net/ipv4/icmp.c
*
@@ -315,6 +315,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
fl.nl_u.ip6_u.daddr = &hdr->saddr;
fl.nl_u.ip6_u.saddr = saddr;
fl.oif = iif;
+ fl.fl6_flowlabel = 0;
fl.uli_u.icmpt.type = type;
fl.uli_u.icmpt.code = code;
@@ -388,6 +389,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
fl.nl_u.ip6_u.daddr = &hdr->saddr;
fl.nl_u.ip6_u.saddr = saddr;
fl.oif = skb->dev->ifindex;
+ fl.fl6_flowlabel = 0;
fl.uli_u.icmpt.type = ICMPV6_ECHO_REPLY;
fl.uli_u.icmpt.code = 0;
diff --git a/net/ipv6/ip6_fw.c b/net/ipv6/ip6_fw.c
index c19a561e9..a6263d41c 100644
--- a/net/ipv6/ip6_fw.c
+++ b/net/ipv6/ip6_fw.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: ip6_fw.c,v 1.10 1998/08/26 12:04:57 davem Exp $
+ * $Id: ip6_fw.c,v 1.12 1999/06/09 08:29:32 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -16,6 +16,7 @@
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
@@ -382,7 +383,7 @@ __initfunc(void ip6_fw_init(void))
}
#ifdef MODULE
-void module_cleanup(void)
+void cleanup_module(void)
{
#ifdef CONFIG_NETLINK
netlink_detach(NETLINK_IP6_FW);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 26ec51c4d..9a635f882 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: ip6_output.c,v 1.17 1999/04/22 10:07:42 davem Exp $
+ * $Id: ip6_output.c,v 1.20 1999/06/09 10:11:12 davem Exp $
*
* Based on linux/net/ipv4/ip_output.c
*
@@ -20,11 +20,13 @@
* route changes now work.
* ip6_forward does not confuse sniffers.
* etc.
- *
+ *
+ * H. von Brand : Added missing #include <linux/string.h>
*/
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/netdevice.h>
@@ -73,19 +75,10 @@ int ip6_output(struct sk_buff *skb)
}
if (hh) {
-#ifdef __alpha__
- /* Alpha has disguisting memcpy. Help it. */
- u64 *aligned_hdr = (u64*)(skb->data - 16);
- u64 *aligned_hdr0 = hh->hh_data;
- read_lock_irq(&hh->hh_lock);
- aligned_hdr[0] = aligned_hdr0[0];
- aligned_hdr[1] = aligned_hdr0[1];
-#else
- read_lock_irq(&hh->hh_lock);
+ read_lock_bh(&hh->hh_lock);
memcpy(skb->data - 16, hh->hh_data, 16);
-#endif
- read_unlock_irq(&hh->hh_lock);
- skb_push(skb, dev->hard_header_len);
+ read_unlock_bh(&hh->hh_lock);
+ skb_push(skb, hh->hh_len);
return hh->hh_output(skb);
} else if (dst->neighbour)
return dst->neighbour->output(skb);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 939d268da..cedf9e691 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: mcast.c,v 1.19 1999/03/25 10:04:50 davem Exp $
+ * $Id: mcast.c,v 1.23 1999/06/09 10:11:14 davem Exp $
*
* Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
*
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/sched.h>
@@ -52,6 +53,11 @@
#define MDBG(x)
#endif
+/* Big mc list lock for all the devices */
+static rwlock_t ipv6_mc_lock = RW_LOCK_UNLOCKED;
+/* Big mc list lock for all the sockets */
+static rwlock_t ipv6_sk_mc_lock = RW_LOCK_UNLOCKED;
+
static struct socket *igmp6_socket;
static void igmp6_join_group(struct ifmcaddr6 *ma);
@@ -114,8 +120,10 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr)
return err;
}
+ write_lock_bh(&ipv6_sk_mc_lock);
mc_lst->next = np->ipv6_mc_list;
np->ipv6_mc_list = mc_lst;
+ write_unlock_bh(&ipv6_sk_mc_lock);
return 0;
}
@@ -128,13 +136,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
struct ipv6_mc_socklist *mc_lst, **lnk;
+ write_lock_bh(&ipv6_sk_mc_lock);
for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) {
if (mc_lst->ifindex == ifindex &&
ipv6_addr_cmp(&mc_lst->addr, addr) == 0) {
struct device *dev;
*lnk = mc_lst->next;
- synchronize_bh();
+ write_unlock_bh(&ipv6_sk_mc_lock);
if ((dev = dev_get_by_index(ifindex)) != NULL)
ipv6_dev_mc_dec(dev, &mc_lst->addr);
@@ -142,6 +151,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
return 0;
}
}
+ write_unlock_bh(&ipv6_sk_mc_lock);
return -ENOENT;
}
@@ -151,15 +161,38 @@ void ipv6_sock_mc_close(struct sock *sk)
struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
struct ipv6_mc_socklist *mc_lst;
+ write_lock_bh(&ipv6_sk_mc_lock);
while ((mc_lst = np->ipv6_mc_list) != NULL) {
- struct device *dev = dev_get_by_index(mc_lst->ifindex);
+ struct device *dev;
+
+ np->ipv6_mc_list = mc_lst->next;
+ write_unlock_bh(&ipv6_sk_mc_lock);
+ dev = dev_get_by_index(mc_lst->ifindex);
if (dev)
ipv6_dev_mc_dec(dev, &mc_lst->addr);
- np->ipv6_mc_list = mc_lst->next;
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
+
+ write_lock_bh(&ipv6_sk_mc_lock);
+ }
+ write_unlock_bh(&ipv6_sk_mc_lock);
+}
+
+int inet6_mc_check(struct sock *sk, struct in6_addr *addr)
+{
+ struct ipv6_mc_socklist *mc;
+
+ read_lock(&ipv6_sk_mc_lock);
+ for (mc = sk->net_pinfo.af_inet6.ipv6_mc_list; mc; mc=mc->next) {
+ if (ipv6_addr_cmp(&mc->addr, addr) == 0) {
+ read_unlock(&ipv6_sk_mc_lock);
+ return 1;
+ }
}
+ read_unlock(&ipv6_sk_mc_lock);
+
+ return 0;
}
static int igmp6_group_added(struct ifmcaddr6 *mc)
@@ -209,9 +242,11 @@ int ipv6_dev_mc_inc(struct device *dev, struct in6_addr *addr)
hash = ipv6_addr_hash(addr);
+ write_lock_bh(&ipv6_mc_lock);
for (mc = inet6_mcast_lst[hash]; mc; mc = mc->next) {
if (ipv6_addr_cmp(&mc->mca_addr, addr) == 0 && mc->dev == dev) {
atomic_inc(&mc->mca_users);
+ write_unlock_bh(&ipv6_mc_lock);
return 0;
}
}
@@ -222,8 +257,10 @@ int ipv6_dev_mc_inc(struct device *dev, struct in6_addr *addr)
mc = kmalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
- if (mc == NULL)
+ if (mc == NULL) {
+ write_unlock_bh(&ipv6_mc_lock);
return -ENOMEM;
+ }
memset(mc, 0, sizeof(struct ifmcaddr6));
mc->mca_timer.function = igmp6_timer_handler;
@@ -241,6 +278,8 @@ int ipv6_dev_mc_inc(struct device *dev, struct in6_addr *addr)
igmp6_group_added(mc);
+ write_unlock_bh(&ipv6_mc_lock);
+
return 0;
}
@@ -256,7 +295,6 @@ static void ipv6_mca_remove(struct device *dev, struct ifmcaddr6 *ma)
for (lnk = &idev->mc_list; (iter = *lnk) != NULL; lnk = &iter->if_next) {
if (iter == ma) {
*lnk = iter->if_next;
- synchronize_bh();
return;
}
}
@@ -273,20 +311,22 @@ int ipv6_dev_mc_dec(struct device *dev, struct in6_addr *addr)
hash = ipv6_addr_hash(addr);
+ write_lock_bh(&ipv6_mc_lock);
for (lnk = &inet6_mcast_lst[hash]; (ma=*lnk) != NULL; lnk = &ma->next) {
if (ipv6_addr_cmp(&ma->mca_addr, addr) == 0 && ma->dev == dev) {
if (atomic_dec_and_test(&ma->mca_users)) {
igmp6_group_dropped(ma);
*lnk = ma->next;
- synchronize_bh();
ipv6_mca_remove(dev, ma);
kfree(ma);
}
+ write_unlock_bh(&ipv6_mc_lock);
return 0;
}
}
+ write_unlock_bh(&ipv6_mc_lock);
return -ENOENT;
}
@@ -301,10 +341,14 @@ int ipv6_chk_mcast_addr(struct device *dev, struct in6_addr *addr)
hash = ipv6_addr_hash(addr);
+ read_lock_bh(&ipv6_mc_lock);
for (mc = inet6_mcast_lst[hash]; mc; mc=mc->next) {
- if (mc->dev == dev && ipv6_addr_cmp(&mc->mca_addr, addr) == 0)
+ if (mc->dev == dev && ipv6_addr_cmp(&mc->mca_addr, addr) == 0) {
+ read_unlock_bh(&ipv6_mc_lock);
return 1;
+ }
}
+ read_unlock_bh(&ipv6_mc_lock);
return 0;
}
@@ -363,11 +407,14 @@ int igmp6_event_query(struct sk_buff *skb, struct icmp6hdr *hdr, int len)
if (idev == NULL)
return 0;
+ read_lock(&ipv6_mc_lock);
for (ma = idev->mc_list; ma; ma=ma->if_next)
igmp6_group_queried(ma, resptime);
+ read_unlock(&ipv6_mc_lock);
} else {
int hash = ipv6_addr_hash(addrp);
+ read_lock(&ipv6_mc_lock);
for (ma = inet6_mcast_lst[hash]; ma; ma=ma->next) {
if (ma->dev == skb->dev &&
ipv6_addr_cmp(addrp, &ma->mca_addr) == 0) {
@@ -375,6 +422,7 @@ int igmp6_event_query(struct sk_buff *skb, struct icmp6hdr *hdr, int len)
break;
}
}
+ read_unlock(&ipv6_mc_lock);
}
return 0;
@@ -409,6 +457,7 @@ int igmp6_event_report(struct sk_buff *skb, struct icmp6hdr *hdr, int len)
hash = ipv6_addr_hash(addrp);
+ read_lock(&ipv6_mc_lock);
for (ma = inet6_mcast_lst[hash]; ma; ma=ma->next) {
if ((ma->dev == dev) && ipv6_addr_cmp(&ma->mca_addr, addrp) == 0) {
if (ma->mca_flags & MAF_TIMER_RUNNING) {
@@ -420,6 +469,7 @@ int igmp6_event_report(struct sk_buff *skb, struct icmp6hdr *hdr, int len)
break;
}
}
+ read_unlock(&ipv6_mc_lock);
return 0;
}
@@ -429,9 +479,9 @@ void igmp6_send(struct in6_addr *addr, struct device *dev, int type)
struct sock *sk = igmp6_socket->sk;
struct sk_buff *skb;
struct icmp6hdr *hdr;
- struct inet6_ifaddr *ifp;
struct in6_addr *snd_addr;
struct in6_addr *addrp;
+ struct in6_addr addr_buf;
struct in6_addr all_routers;
int err, len, payload_len, full_len;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
@@ -460,9 +510,7 @@ void igmp6_send(struct in6_addr *addr, struct device *dev, int type)
dev->hard_header(skb, dev, ETH_P_IPV6, ha, NULL, full_len);
}
- ifp = ipv6_get_lladdr(dev);
-
- if (ifp == NULL) {
+ if (ipv6_get_lladdr(dev, &addr_buf)) {
#if MCAST_DEBUG >= 1
printk(KERN_DEBUG "igmp6: %s no linklocal address\n",
dev->name);
@@ -470,7 +518,7 @@ void igmp6_send(struct in6_addr *addr, struct device *dev, int type)
return;
}
- ip6_nd_hdr(sk, skb, dev, &ifp->addr, snd_addr, NEXTHDR_HOP, payload_len);
+ ip6_nd_hdr(sk, skb, dev, &addr_buf, snd_addr, NEXTHDR_HOP, payload_len);
memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
@@ -481,7 +529,7 @@ void igmp6_send(struct in6_addr *addr, struct device *dev, int type)
addrp = (struct in6_addr *) skb_put(skb, sizeof(struct in6_addr));
ipv6_addr_copy(addrp, addr);
- hdr->icmp6_cksum = csum_ipv6_magic(&ifp->addr, snd_addr, len,
+ hdr->icmp6_cksum = csum_ipv6_magic(&addr_buf, snd_addr, len,
IPPROTO_ICMPV6,
csum_partial((__u8 *) hdr, len, 0));
@@ -503,7 +551,6 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
if ((addr_type & (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_LOOPBACK)))
return;
- start_bh_atomic();
igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REPORT);
delay = net_random() % IGMP6_UNSOLICITED_IVAL;
@@ -514,7 +561,6 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
add_timer(&ma->mca_timer);
ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
- end_bh_atomic();
}
static void igmp6_leave_group(struct ifmcaddr6 *ma)
@@ -526,22 +572,22 @@ static void igmp6_leave_group(struct ifmcaddr6 *ma)
if ((addr_type & IPV6_ADDR_LINKLOCAL))
return;
- start_bh_atomic();
if (ma->mca_flags & MAF_LAST_REPORTER)
igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REDUCTION);
if (ma->mca_flags & MAF_TIMER_RUNNING)
del_timer(&ma->mca_timer);
- end_bh_atomic();
}
void igmp6_timer_handler(unsigned long data)
{
struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
+ read_lock(&ipv6_mc_lock);
ma->mca_flags |= MAF_LAST_REPORTER;
igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REPORT);
ma->mca_flags &= ~MAF_TIMER_RUNNING;
+ read_unlock(&ipv6_mc_lock);
}
/* Device going down */
@@ -553,8 +599,10 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Withdraw multicast list */
+ read_lock_bh(&ipv6_mc_lock);
for (i = idev->mc_list; i; i=i->if_next)
igmp6_group_dropped(i);
+ read_unlock_bh(&ipv6_mc_lock);
/* Delete all-nodes address. */
@@ -576,8 +624,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
/* Install multicast list, except for all-nodes (already installed) */
+ read_lock(&ipv6_mc_lock);
for (i = idev->mc_list; i; i=i->if_next)
igmp6_group_added(i);
+ read_unlock(&ipv6_mc_lock);
}
/*
@@ -589,6 +639,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
int hash;
struct ifmcaddr6 *i, **lnk;
+ write_lock_bh(&ipv6_mc_lock);
while ((i = idev->mc_list) != NULL) {
idev->mc_list = i->if_next;
@@ -597,13 +648,13 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
for (lnk = &inet6_mcast_lst[hash]; *lnk; lnk = &(*lnk)->next) {
if (*lnk == i) {
*lnk = i->next;
- synchronize_bh();
break;
}
}
igmp6_group_dropped(i);
kfree(i);
}
+ write_unlock_bh(&ipv6_mc_lock);
}
#ifdef CONFIG_PROC_FS
@@ -615,12 +666,14 @@ static int igmp6_read_proc(char *buffer, char **start, off_t offset,
int len=0;
struct device *dev;
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next) {
struct inet6_dev *idev;
if ((idev = ipv6_get_idev(dev)) == NULL)
continue;
+ read_lock_bh(&ipv6_mc_lock);
for (im = idev->mc_list; im; im = im->if_next) {
int i;
@@ -640,13 +693,18 @@ static int igmp6_read_proc(char *buffer, char **start, off_t offset,
len=0;
begin=pos;
}
- if (pos > offset+length)
+ if (pos > offset+length) {
+ read_unlock_bh(&ipv6_mc_lock);
goto done;
+ }
}
+ read_unlock_bh(&ipv6_mc_lock);
}
*eof = 1;
done:
+ read_unlock(&dev_base_lock);
+
*start=buffer+(offset-begin);
len-=(offset-begin);
if(len>length)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index bb5e08373..d0613056a 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -268,14 +268,21 @@ ndisc_build_ll_hdr(struct sk_buff *skb, struct device *dev,
ndisc_mc_map(daddr, ha, dev, 1);
h_dest = ha;
} else if (neigh) {
- h_dest = neigh->ha;
+ read_lock_bh(&neigh->lock);
+ if (neigh->nud_state&NUD_VALID) {
+ memcpy(ha, neigh->ha, dev->addr_len);
+ h_dest = ha;
+ }
+ read_unlock_bh(&neigh->lock);
} else {
neigh = neigh_lookup(&nd_tbl, daddr, dev);
if (neigh) {
+ read_lock_bh(&neigh->lock);
if (neigh->nud_state&NUD_VALID) {
memcpy(ha, neigh->ha, dev->addr_len);
h_dest = ha;
}
+ read_unlock_bh(&neigh->lock);
neigh_release(neigh);
}
}
@@ -362,6 +369,7 @@ void ndisc_send_ns(struct device *dev, struct neighbour *neigh,
struct sock *sk = ndisc_socket->sk;
struct sk_buff *skb;
struct nd_msg *msg;
+ struct in6_addr addr_buf;
int len;
int err;
@@ -377,13 +385,8 @@ void ndisc_send_ns(struct device *dev, struct neighbour *neigh,
}
if (saddr == NULL) {
- struct inet6_ifaddr *ifa;
-
- /* use link local address */
- ifa = ipv6_get_lladdr(dev);
-
- if (ifa)
- saddr = &ifa->addr;
+ if (!ipv6_get_lladdr(dev, &addr_buf))
+ saddr = &addr_buf;
}
if (ndisc_build_ll_hdr(skb, dev, daddr, neigh, len) == 0) {
@@ -501,13 +504,15 @@ static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
}
+/* Called with locked neigh: either read or both */
+
static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
struct in6_addr *saddr = NULL;
struct in6_addr mcaddr;
struct device *dev = neigh->dev;
struct in6_addr *target = (struct in6_addr *)&neigh->primary_key;
- int probes = neigh->probes;
+ int probes = atomic_read(&neigh->probes);
if (skb && ipv6_chk_addr(&skb->nh.ipv6h->saddr, dev, 0))
saddr = &skb->nh.ipv6h->saddr;
@@ -774,8 +779,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
struct sock *sk = ndisc_socket->sk;
int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
struct sk_buff *buff;
- struct inet6_ifaddr *ifp;
struct icmp6hdr *icmph;
+ struct in6_addr saddr_buf;
struct in6_addr *addrp;
struct device *dev;
struct rt6_info *rt;
@@ -817,12 +822,10 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
rd_len &= ~0x7;
len += rd_len;
- ifp = ipv6_get_lladdr(dev);
-
- if (ifp == NULL) {
- ND_PRINTK1("redirect: no link_local addr for dev\n");
- return;
- }
+ if (ipv6_get_lladdr(dev, &saddr_buf)) {
+ ND_PRINTK1("redirect: no link_local addr for dev\n");
+ return;
+ }
buff = sock_alloc_send_skb(sk, MAX_HEADER + len + dev->hard_header_len + 15,
0, 0, &err);
@@ -838,7 +841,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
return;
}
- ip6_nd_hdr(sk, buff, dev, &ifp->addr, &skb->nh.ipv6h->saddr,
+ ip6_nd_hdr(sk, buff, dev, &saddr_buf, &skb->nh.ipv6h->saddr,
IPPROTO_ICMPV6, len);
icmph = (struct icmp6hdr *) skb_put(buff, len);
@@ -875,7 +878,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
memcpy(opt, skb->nh.ipv6h, rd_len - 8);
- icmph->icmp6_cksum = csum_ipv6_magic(&ifp->addr, &skb->nh.ipv6h->saddr,
+ icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &skb->nh.ipv6h->saddr,
len, IPPROTO_ICMPV6,
csum_partial((u8 *) icmph, len, 0));
@@ -1034,7 +1037,7 @@ int ndisc_rcv(struct sk_buff *skb, unsigned long len)
ifp->idev->dev->name);
return 0;
}
- neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 0);
+ neigh = neigh_lookup(&nd_tbl, &msg->target, skb->dev);
if (neigh) {
if (neigh->flags & NTF_ROUTER) {
@@ -1083,11 +1086,10 @@ int ndisc_get_info(char *buffer, char **start, off_t offset, int length, int dum
unsigned long now = jiffies;
int i;
- neigh_table_lock(&nd_tbl);
-
for (i = 0; i <= NEIGH_HASHMASK; i++) {
struct neighbour *neigh;
+ read_lock_bh(&nd_tbl.lock);
for (neigh = nd_tbl.hash_buckets[i]; neigh; neigh = neigh->next) {
int j;
@@ -1097,6 +1099,7 @@ int ndisc_get_info(char *buffer, char **start, off_t offset, int length, int dum
size += 2;
}
+ read_lock(&neigh->lock);
size += sprintf(buffer+len+size,
" %02x %02x %02x %02x %08lx %08lx %08x %04x %04x %04x %8s ", i,
128,
@@ -1118,19 +1121,22 @@ int ndisc_get_info(char *buffer, char **start, off_t offset, int length, int dum
} else {
size += sprintf(buffer+len+size, "000000000000");
}
+ read_unlock(&neigh->lock);
size += sprintf(buffer+len+size, "\n");
len += size;
pos += size;
if (pos <= offset)
len=0;
- if (pos >= offset+length)
+ if (pos >= offset+length) {
+ read_unlock_bh(&nd_tbl.lock);
goto done;
+ }
}
+ read_unlock_bh(&nd_tbl.lock);
}
done:
- neigh_table_unlock(&nd_tbl);
*start = buffer+len-(pos-offset); /* Start of wanted data */
len = pos-offset; /* Start slop */
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 31f6a2f55..b83bdc34b 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -7,7 +7,7 @@
* PROC file system. This is very similar to the IPv4 version,
* except it reports the sockets in the INET6 address family.
*
- * Version: $Id: proc.c,v 1.9 1998/08/26 12:05:11 davem Exp $
+ * Version: $Id: proc.c,v 1.10 1999/05/27 00:38:14 davem Exp $
*
* Authors: David S. Miller (davem@caip.rutgers.edu)
*
@@ -52,7 +52,7 @@ static int get__netinfo6(struct proto *pro, char *buffer, int format, char **sta
/*144 */
pos = 149;
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_READ();
sp = pro->sklist_next;
while(sp != (struct sock *)pro) {
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sp;
@@ -72,6 +72,7 @@ static int get__netinfo6(struct proto *pro, char *buffer, int format, char **sta
}
destp = ntohs(sp->dport);
srcp = ntohs(sp->sport);
+
if((format == 0) && (sp->state == TCP_TIME_WAIT)) {
extern int tcp_tw_death_row_slot;
int slot_dist;
@@ -85,10 +86,8 @@ static int get__netinfo6(struct proto *pro, char *buffer, int format, char **sta
slot_dist = tcp_tw_death_row_slot - slot_dist;
timer_expires = jiffies + (slot_dist * TCP_TWKILL_PERIOD);
} else {
- timer_active1 = del_timer(&tp->retransmit_timer);
- timer_active2 = del_timer(&sp->timer);
- if(!timer_active1) tp->retransmit_timer.expires = 0;
- if(!timer_active2) sp->timer.expires = 0;
+ timer_active1 = tp->retransmit_timer.prev != NULL;
+ timer_active2 = sp->timer.prev != NULL;
timer_active = 0;
timer_expires = (unsigned) -1;
}
@@ -128,8 +127,6 @@ static int get__netinfo6(struct proto *pro, char *buffer, int format, char **sta
((!tw_bucket && sp->socket) ?
sp->socket->inode->i_ino : 0));
- if(timer_active1) add_timer(&tp->retransmit_timer);
- if(timer_active2) add_timer(&sp->timer);
len += sprintf(buffer+len, "%-148s\n", tmpbuf);
if(len >= length)
break;
@@ -137,7 +134,7 @@ static int get__netinfo6(struct proto *pro, char *buffer, int format, char **sta
sp = sp->sklist_next;
i++;
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_READ();
begin = len - (pos - offset);
*start = buffer + begin;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index f82ac33db..70394dc03 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -7,7 +7,7 @@
*
* Adapted from linux/net/ipv4/raw.c
*
- * $Id: raw.c,v 1.24 1999/04/22 10:07:45 davem Exp $
+ * $Id: raw.c,v 1.26 1999/06/09 10:11:18 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -50,11 +50,11 @@ static void raw_v6_hash(struct sock *sk)
num &= (RAWV6_HTABLE_SIZE - 1);
skp = &raw_v6_htable[num];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
sk->next = *skp;
*skp = sk;
sk->hashent = num;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void raw_v6_unhash(struct sock *sk)
@@ -65,7 +65,7 @@ static void raw_v6_unhash(struct sock *sk)
num &= (RAWV6_HTABLE_SIZE - 1);
skp = &raw_v6_htable[num];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
while(*skp != NULL) {
if(*skp == sk) {
*skp = sk->next;
@@ -73,7 +73,7 @@ static void raw_v6_unhash(struct sock *sk)
}
skp = &((*skp)->next);
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void raw_v6_rehash(struct sock *sk)
@@ -85,7 +85,7 @@ static void raw_v6_rehash(struct sock *sk)
num &= (RAWV6_HTABLE_SIZE - 1);
skp = &raw_v6_htable[oldnum];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
while(*skp != NULL) {
if(*skp == sk) {
*skp = sk->next;
@@ -96,20 +96,9 @@ static void raw_v6_rehash(struct sock *sk)
sk->next = raw_v6_htable[num];
raw_v6_htable[num] = sk;
sk->hashent = num;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
-static __inline__ int inet6_mc_check(struct sock *sk, struct in6_addr *addr)
-{
- struct ipv6_mc_socklist *mc;
-
- for (mc = sk->net_pinfo.af_inet6.ipv6_mc_list; mc; mc=mc->next) {
- if (ipv6_addr_cmp(&mc->addr, addr) == 0)
- return 1;
- }
-
- return 0;
-}
/* Grumble... icmp and ip_input want to get at this... */
struct sock *raw_v6_lookup(struct sock *sk, unsigned short num,
@@ -631,6 +620,8 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
static void rawv6_close(struct sock *sk, long timeout)
{
+ bh_lock_sock(sk);
+
/* See for explanation: raw_close in ipv4/raw.c */
sk->state = TCP_CLOSE;
raw_v6_unhash(sk);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index e455b0533..74cf4571b 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: reassembly.c,v 1.11 1998/08/26 12:05:16 davem Exp $
+ * $Id: reassembly.c,v 1.13 1999/06/09 08:29:40 davem Exp $
*
* Based on: net/ipv4/ip_fragment.c
*
@@ -19,9 +19,12 @@
* Fixes:
* Andi Kleen Make it work with multiple hosts.
* More RFC compliance.
+ *
+ * Horst von Brand Add missing #include <linux/string.h>
*/
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/sched.h>
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 04b49d843..5f8ff914b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: route.c,v 1.35 1999/03/21 05:22:57 davem Exp $
+ * $Id: route.c,v 1.36 1999/06/09 10:11:21 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -1607,7 +1607,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
return 0;
}
-static int fib6_dump_done(struct netlink_callback *cb)
+static void fib6_dump_end(struct netlink_callback *cb)
{
struct fib6_walker_t *w = (void*)cb->args[0];
@@ -1622,6 +1622,11 @@ static int fib6_dump_done(struct netlink_callback *cb)
cb->done = (void*)cb->args[1];
cb->args[1] = 0;
}
+}
+
+static int fib6_dump_done(struct netlink_callback *cb)
+{
+ fib6_dump_end(cb);
return cb->done(cb);
}
@@ -1668,11 +1673,15 @@ int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
if (res <= 0 && skb->len == 0)
RT6_TRACE("%p>dump end\n", w);
#endif
+ res = res < 0 ? res : skb->len;
/* res < 0 is an error. (really, impossible)
res == 0 means that dump is complete, but skb still can contain data.
res > 0 dump is not complete, but frame is full.
*/
- return res < 0 ? res : skb->len;
+ /* Destroy walker, if dump of this table is complete. */
+ if (res <= 0)
+ fib6_dump_end(cb);
+ return res;
}
int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f1ef74de8..2164e245e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: tcp_ipv6.c,v 1.104 1999/04/24 00:27:25 davem Exp $
+ * $Id: tcp_ipv6.c,v 1.108 1999/06/09 08:29:43 davem Exp $
*
* Based on:
* linux/net/ipv4/tcp.c
@@ -67,7 +67,7 @@ static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport,
int hashent = (lport ^ fport);
hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
- return (hashent & ((TCP_HTABLE_SIZE/2) - 1));
+ return (hashent & ((tcp_ehash_size >> 1) - 1));
}
static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
@@ -89,8 +89,8 @@ static int tcp_v6_verify_bind(struct sock *sk, unsigned short snum)
struct tcp_bind_bucket *tb;
int result = 0;
- SOCKHASH_LOCK();
- for(tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ SOCKHASH_LOCK_WRITE();
+ for(tb = tcp_bhash[tcp_bhashfn(snum)];
(tb && (tb->port != snum));
tb = tb->next)
;
@@ -156,7 +156,7 @@ static int tcp_v6_verify_bind(struct sock *sk, unsigned short snum)
}
}
go_like_smoke:
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
return result;
}
@@ -172,20 +172,20 @@ static void tcp_v6_hash(struct sock *sk)
if(sk->state != TCP_CLOSE) {
struct sock **skp;
- SOCKHASH_LOCK();
- skp = &tcp_established_hash[(sk->hashent = tcp_v6_sk_hashfn(sk))];
+ SOCKHASH_LOCK_WRITE();
+ skp = &tcp_ehash[(sk->hashent = tcp_v6_sk_hashfn(sk))];
if((sk->next = *skp) != NULL)
(*skp)->pprev = &sk->next;
*skp = sk;
sk->pprev = skp;
tcp_sk_bindify(sk);
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
}
static void tcp_v6_unhash(struct sock *sk)
{
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
if(sk->pprev) {
if(sk->next)
sk->next->pprev = sk->pprev;
@@ -194,14 +194,14 @@ static void tcp_v6_unhash(struct sock *sk)
tcp_sk_unbindify(sk);
tcp_reg_zap(sk);
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void tcp_v6_rehash(struct sock *sk)
{
unsigned char state;
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
state = sk->state;
if(sk->pprev != NULL) {
if(sk->next)
@@ -216,7 +216,7 @@ static void tcp_v6_rehash(struct sock *sk)
if(state == TCP_LISTEN)
skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
else
- skp = &tcp_established_hash[(sk->hashent = tcp_v6_sk_hashfn(sk))];
+ skp = &tcp_ehash[(sk->hashent = tcp_v6_sk_hashfn(sk))];
if((sk->next = *skp) != NULL)
(*skp)->pprev = &sk->next;
@@ -225,7 +225,7 @@ static void tcp_v6_rehash(struct sock *sk)
if(state == TCP_LISTEN)
tcp_sk_bindify(sk);
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned short hnum, int dif)
@@ -264,10 +264,10 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
* we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
- * It is assumed that this code only gets called from within NET_BH.
+ *
+ * The sockhash lock must be held as a reader here.
*/
-static inline struct sock *__tcp_v6_lookup(struct tcphdr *th,
- struct in6_addr *saddr, u16 sport,
+static inline struct sock *__tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
struct in6_addr *daddr, u16 dport,
int dif)
{
@@ -285,7 +285,7 @@ static inline struct sock *__tcp_v6_lookup(struct tcphdr *th,
* have wildcards anyways.
*/
hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
- for(sk = tcp_established_hash[hash]; sk; sk = sk->next) {
+ for(sk = tcp_ehash[hash]; sk; sk = sk->next) {
/* For IPV6 do the cheaper port and family tests first. */
if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif)) {
if (sk->state == TCP_ESTABLISHED)
@@ -294,7 +294,7 @@ static inline struct sock *__tcp_v6_lookup(struct tcphdr *th,
}
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- for(sk = tcp_established_hash[hash+(TCP_HTABLE_SIZE/2)]; sk; sk = sk->next) {
+ for(sk = tcp_ehash[hash+(tcp_ehash_size >> 1)]; sk; sk = sk->next) {
if(*((__u32 *)&(sk->dport)) == ports &&
sk->family == PF_INET6) {
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
@@ -309,7 +309,13 @@ hit:
return sk;
}
-#define tcp_v6_lookup(sa, sp, da, dp, dif) __tcp_v6_lookup((0),(sa),(sp),(da),(dp),(dif))
+#define tcp_v6_lookup(sa, sp, da, dp, dif) \
+({ struct sock *___sk; \
+ SOCKHASH_LOCK_READ(); \
+ ___sk = __tcp_v6_lookup((sa),(sp),(da),(dp),(dif)); \
+ SOCKHASH_UNLOCK_READ(); \
+ ___sk; \
+})
static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
struct in6_addr *saddr,
@@ -344,24 +350,26 @@ static int tcp_v6_unique_address(struct sock *sk)
int retval = 1;
/* Freeze the hash while we snoop around. */
- SOCKHASH_LOCK();
- tb = tcp_bound_hash[tcp_bhashfn(snum)];
+ SOCKHASH_LOCK_READ();
+ tb = tcp_bhash[tcp_bhashfn(snum)];
for(; tb; tb = tb->next) {
if(tb->port == snum && tb->owners != NULL) {
/* Almost certainly the re-use port case, search the real hashes
* so it actually scales. (we hope that all ipv6 ftp servers will
* use passive ftp, I just cover this case for completeness)
*/
- sk = __tcp_v6_lookup(NULL, &sk->net_pinfo.af_inet6.daddr,
+ sk = __tcp_v6_lookup(&sk->net_pinfo.af_inet6.daddr,
sk->dport,
&sk->net_pinfo.af_inet6.rcv_saddr, snum,
sk->bound_dev_if);
+ SOCKHASH_UNLOCK_READ();
+
if((sk != NULL) && (sk->state != TCP_LISTEN))
retval = 0;
- break;
+ return retval;
}
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_READ();
return retval;
}
@@ -551,7 +559,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
failure:
dst_release(xchg(&sk->dst_cache, NULL));
- memcpy(&np->daddr, 0, sizeof(struct in6_addr));
+ memset(&np->daddr, 0, sizeof(struct in6_addr));
sk->daddr = 0;
return err;
}
@@ -628,11 +636,14 @@ void tcp_v6_err(struct sk_buff *skb, struct ipv6hdr *hdr,
if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst = NULL;
- if (atomic_read(&sk->sock_readers))
+ if (sk->state == TCP_LISTEN)
return;
- if (sk->state == TCP_LISTEN)
+ bh_lock_sock(sk);
+ if(sk->lock.users) {
+ bh_unlock_sock(sk);
return;
+ }
/* icmp should have updated the destination cache entry */
if (sk->dst_cache)
@@ -664,7 +675,7 @@ void tcp_v6_err(struct sk_buff *skb, struct ipv6hdr *hdr,
tcp_simple_retransmit(sk);
} /* else let the usual retransmit timer handle it */
dst_release(dst);
- return;
+ bh_unlock_sock(sk);
}
icmpv6_err_convert(type, code, &err);
@@ -674,11 +685,13 @@ void tcp_v6_err(struct sk_buff *skb, struct ipv6hdr *hdr,
struct open_request *req, *prev;
struct ipv6hdr hd;
case TCP_LISTEN:
- if (atomic_read(&sk->sock_readers)) {
+ bh_lock_sock(sk);
+ if (sk->lock.users) {
net_statistics.LockDroppedIcmps++;
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
+ bh_unlock_sock(sk);
return;
}
@@ -686,20 +699,22 @@ void tcp_v6_err(struct sk_buff *skb, struct ipv6hdr *hdr,
ipv6_addr_copy(&hd.saddr, saddr);
ipv6_addr_copy(&hd.daddr, daddr);
req = tcp_v6_search_req(tp, &hd, th, tcp_v6_iif(skb), &prev);
- if (!req)
- return;
- if (seq != req->snt_isn) {
+ if (!req || (seq != req->snt_isn)) {
net_statistics.OutOfWindowIcmps++;
+ bh_unlock_sock(sk);
return;
}
if (req->sk) {
+ bh_unlock_sock(sk);
sk = req->sk; /* report error in accept */
} else {
tp->syn_backlog--;
tcp_synq_unlink(tp, req, prev);
req->class->destructor(req);
tcp_openreq_free(req);
+ bh_unlock_sock(sk);
}
+
/* FALL THROUGH */
case TCP_SYN_SENT:
case TCP_SYN_RECV: /* Cannot happen */
@@ -1210,12 +1225,20 @@ static inline struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
return sk;
}
+/* The socket must have it's spinlock held when we get
+ * here.
+ *
+ * We have a potential double-lock case here, so even when
+ * doing backlog processing we use the BH locking scheme.
+ * This is because we cannot sleep with the original spinlock
+ * held.
+ */
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
#ifdef CONFIG_FILTER
struct sk_filter *filter;
#endif
- int users = 0;
+ int users = 0, need_unlock = 0;
/* Imagine: socket is IPv6. IPv4 packet arrives,
goes to IPv4 receive handler and backlogged.
@@ -1286,19 +1309,24 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
* otherwise we just shortcircuit this and continue with
* the new socket..
*/
- if (atomic_read(&nsk->sock_readers)) {
- skb_orphan(skb);
- __skb_queue_tail(&nsk->back_log, skb);
- return 0;
+ if(nsk != sk) {
+ bh_lock_sock(nsk);
+ if (nsk->lock.users) {
+ skb_orphan(skb);
+ sk_add_backlog(nsk, skb);
+ bh_unlock_sock(nsk);
+ return 0;
+ }
+ need_unlock = 1;
+ sk = nsk;
}
- sk = nsk;
}
if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
goto reset;
if (users)
goto ipv6_pktoptions;
- return 0;
+ goto out_maybe_unlock;
reset:
tcp_v6_send_reset(skb);
@@ -1306,7 +1334,7 @@ discard:
if (users)
kfree_skb(skb);
kfree_skb(skb);
- return 0;
+ goto out_maybe_unlock;
ipv6_pktoptions:
/* Do you ask, what is it?
@@ -1335,6 +1363,9 @@ ipv6_pktoptions:
if (skb)
kfree_skb(skb);
+out_maybe_unlock:
+ if (need_unlock)
+ bh_unlock_sock(sk);
return 0;
}
@@ -1344,6 +1375,7 @@ int tcp_v6_rcv(struct sk_buff *skb, unsigned long len)
struct sock *sk;
struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
struct in6_addr *daddr = &skb->nh.ipv6h->daddr;
+ int ret;
th = skb->h.th;
@@ -1383,7 +1415,9 @@ int tcp_v6_rcv(struct sk_buff *skb, unsigned long len)
/* CHECKSUM_UNNECESSARY */
};
- sk = __tcp_v6_lookup(th, saddr, th->source, daddr, th->dest, tcp_v6_iif(skb));
+ SOCKHASH_LOCK_READ_BH();
+ sk = __tcp_v6_lookup(saddr, th->source, daddr, th->dest, tcp_v6_iif(skb));
+ SOCKHASH_UNLOCK_READ_BH();
if (!sk)
goto no_tcp_socket;
@@ -1396,11 +1430,15 @@ int tcp_v6_rcv(struct sk_buff *skb, unsigned long len)
if(sk->state == TCP_TIME_WAIT)
goto do_time_wait;
- if (!atomic_read(&sk->sock_readers))
- return tcp_v6_do_rcv(sk, skb);
+ bh_lock_sock(sk);
+ ret = 0;
+ if (!sk->lock.users)
+ ret = tcp_v6_do_rcv(sk, skb);
+ else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
- __skb_queue_tail(&sk->back_log, skb);
- return(0);
+ return ret;
no_tcp_socket:
tcp_v6_send_reset(skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 5b4d55f9e..da020d8fb 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -7,7 +7,7 @@
*
* Based on linux/ipv4/udp.c
*
- * $Id: udp.c,v 1.40 1999/05/08 20:00:32 davem Exp $
+ * $Id: udp.c,v 1.42 1999/06/09 10:11:24 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -55,7 +55,7 @@ static int udp_v6_verify_bind(struct sock *sk, unsigned short snum)
int addr_type = ipv6_addr_type(&sk->net_pinfo.af_inet6.rcv_saddr);
int retval = 0, sk_reuse = sk->reuse;
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_READ();
for(sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; sk2 != NULL; sk2 = sk2->next) {
if((sk2->num == snum) && (sk2 != sk)) {
unsigned char state = sk2->state;
@@ -86,7 +86,7 @@ static int udp_v6_verify_bind(struct sock *sk, unsigned short snum)
}
}
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_READ();
return retval;
}
@@ -98,11 +98,11 @@ static void udp_v6_hash(struct sock *sk)
num &= (UDP_HTABLE_SIZE - 1);
skp = &udp_hash[num];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
sk->next = *skp;
*skp = sk;
sk->hashent = num;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void udp_v6_unhash(struct sock *sk)
@@ -113,7 +113,7 @@ static void udp_v6_unhash(struct sock *sk)
num &= (UDP_HTABLE_SIZE - 1);
skp = &udp_hash[num];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
while(*skp != NULL) {
if(*skp == sk) {
*skp = sk->next;
@@ -121,7 +121,7 @@ static void udp_v6_unhash(struct sock *sk)
}
skp = &((*skp)->next);
}
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static void udp_v6_rehash(struct sock *sk)
@@ -133,7 +133,7 @@ static void udp_v6_rehash(struct sock *sk)
num &= (UDP_HTABLE_SIZE - 1);
skp = &udp_hash[oldnum];
- SOCKHASH_LOCK();
+ SOCKHASH_LOCK_WRITE();
while(*skp != NULL) {
if(*skp == sk) {
*skp = sk->next;
@@ -144,7 +144,7 @@ static void udp_v6_rehash(struct sock *sk)
sk->next = udp_hash[num];
udp_hash[num] = sk;
sk->hashent = num;
- SOCKHASH_UNLOCK();
+ SOCKHASH_UNLOCK_WRITE();
}
static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
@@ -154,6 +154,7 @@ static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
unsigned short hnum = ntohs(dport);
int badness = -1;
+ SOCKHASH_LOCK_READ();
for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
if((sk->num == hnum) &&
(sk->family == PF_INET6) &&
@@ -189,6 +190,7 @@ static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
}
}
}
+ SOCKHASH_UNLOCK_READ();
return result;
}
@@ -331,6 +333,8 @@ ipv4_connected:
static void udpv6_close(struct sock *sk, long timeout)
{
+ bh_lock_sock(sk);
+
/* See for explanation: raw_close in ipv4/raw.c */
sk->state = TCP_CLOSE;
udp_v6_unhash(sk);
@@ -498,18 +502,6 @@ static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
return 0;
}
-static __inline__ int inet6_mc_check(struct sock *sk, struct in6_addr *addr)
-{
- struct ipv6_mc_socklist *mc;
-
- for (mc = sk->net_pinfo.af_inet6.ipv6_mc_list; mc; mc=mc->next) {
- if (ipv6_addr_cmp(&mc->addr, addr) == 0)
- return 1;
- }
-
- return 0;
-}
-
static struct sock *udp_v6_mcast_next(struct sock *sk,
u16 loc_port, struct in6_addr *loc_addr,
u16 rmt_port, struct in6_addr *rmt_addr,
diff --git a/net/irda/Config.in b/net/irda/Config.in
index 8912d6cb9..92300cb30 100644
--- a/net/irda/Config.in
+++ b/net/irda/Config.in
@@ -2,34 +2,31 @@
# IrDA protocol configuration
#
-if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- if [ "$CONFIG_NET" != "n" ] ; then
+if [ "$CONFIG_NET" != "n" ] ; then
- mainmenu_option next_comment
- comment 'IrDA subsystem support'
- dep_tristate 'IrDA subsystem support' CONFIG_IRDA $CONFIG_EXPERIMENTAL $CONFIG_NET
+ mainmenu_option next_comment
+ comment 'IrDA subsystem support'
+ dep_tristate 'IrDA subsystem support' CONFIG_IRDA $CONFIG_NET
- if [ "$CONFIG_IRDA" != "n" ] ; then
- comment 'IrDA protocols'
- source net/irda/irlan/Config.in
- source net/irda/ircomm/Config.in
- source net/irda/irlpt/Config.in
+ if [ "$CONFIG_IRDA" != "n" ] ; then
+ comment 'IrDA protocols'
+ source net/irda/irlan/Config.in
+ source net/irda/ircomm/Config.in
+ source net/irda/irlpt/Config.in
- bool 'IrDA protocol options' CONFIG_IRDA_OPTIONS
- if [ "$CONFIG_IRDA_OPTIONS" != "n" ] ; then
- comment ' IrDA options'
- bool ' Cache last LSAP' CONFIG_IRDA_CACHE_LAST_LSAP
- bool ' Fast RRs' CONFIG_IRDA_FAST_RR
- bool ' Debug information' CONFIG_IRDA_DEBUG
- fi
+ bool 'IrDA protocol options' CONFIG_IRDA_OPTIONS
+ if [ "$CONFIG_IRDA_OPTIONS" != "n" ] ; then
+ comment ' IrDA options'
+ bool ' Cache last LSAP' CONFIG_IRDA_CACHE_LAST_LSAP
+ bool ' Fast RRs' CONFIG_IRDA_FAST_RR
+ bool ' Debug information' CONFIG_IRDA_DEBUG
fi
+ fi
- if [ "$CONFIG_IRDA" != "n" ] ; then
- source net/irda/compressors/Config.in
- source drivers/net/irda/Config.in
- fi
- endmenu
-
+ if [ "$CONFIG_IRDA" != "n" ] ; then
+ source net/irda/compressors/Config.in
+ source drivers/net/irda/Config.in
fi
+ endmenu
fi
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 6dd118024..3e656e565 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun May 31 10:12:43 1998
- * Modified at: Thu Apr 22 12:08:04 1999
+ * Modified at: Wed May 19 16:12:06 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: af_netroom.c, af_ax25.c, af_rose.c, af_x25.c etc.
*
@@ -30,6 +30,7 @@
#include <linux/if_arp.h>
#include <linux/net.h>
#include <linux/irda.h>
+#include <linux/poll.h>
#include <asm/uaccess.h>
@@ -46,11 +47,12 @@ extern void irda_cleanup(void);
extern int irlap_driver_rcv(struct sk_buff *, struct device *,
struct packet_type *);
-static struct proto_ops irda_proto_ops;
+static struct proto_ops irda_stream_ops;
+static struct proto_ops irda_dgram_ops;
static hashbin_t *cachelog = NULL;
-static struct wait_queue *discovery_wait; /* Wait for discovery */
+static DECLARE_WAIT_QUEUE_HEAD(discovery_wait); /* Wait for discovery */
-#define IRDA_MAX_HEADER (TTP_HEADER+LMP_HEADER+LAP_HEADER)
+#define IRDA_MAX_HEADER (TTP_MAX_HEADER)
/*
* Function irda_data_indication (instance, sap, skb)
@@ -121,7 +123,8 @@ static void irda_disconnect_indication(void *instance, void *sap,
*/
static void irda_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
- __u32 max_sdu_size, struct sk_buff *skb)
+ __u32 max_sdu_size, __u8 max_header_size,
+ struct sk_buff *skb)
{
struct irda_sock *self;
struct sock *sk;
@@ -130,13 +133,28 @@ static void irda_connect_confirm(void *instance, void *sap,
self = (struct irda_sock *) instance;
+ /* How much header space do we need to reserve */
+ self->max_header_size = max_header_size;
+
+ /* IrTTP max SDU size in transmit direction */
self->max_sdu_size_tx = max_sdu_size;
+
+ /* Find out what the largest chunk of data that we can transmit is */
+ if (max_sdu_size == SAR_DISABLE)
+ self->max_data_size = qos->data_size.value - max_header_size;
+ else
+ self->max_data_size = max_sdu_size;
+
+ DEBUG(1, __FUNCTION__ "(), max_data_size=%d\n", self->max_data_size);
+
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
sk = self->sk;
if (sk == NULL)
return;
+ skb_queue_tail(&sk->receive_queue, skb);
+
/* We are now connected! */
sk->state = TCP_ESTABLISHED;
sk->state_change(sk);
@@ -150,7 +168,7 @@ static void irda_connect_confirm(void *instance, void *sap,
*/
static void irda_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
- struct sk_buff *skb)
+ __u8 max_header_size, struct sk_buff *skb)
{
struct irda_sock *self;
struct sock *sk;
@@ -158,8 +176,21 @@ static void irda_connect_indication(void *instance, void *sap,
DEBUG(1, __FUNCTION__ "()\n");
self = (struct irda_sock *) instance;
-
- self->max_sdu_size_tx = max_sdu_size;
+
+ /* How much header space do we need to reserve */
+ self->max_header_size = max_header_size;
+
+ /* IrTTP max SDU size in transmit direction */
+ self->max_sdu_size_tx = max_sdu_size;
+
+ /* Find out what the largest chunk of data that we can transmit is */
+ if (max_sdu_size == SAR_DISABLE)
+ self->max_data_size = qos->data_size.value - max_header_size;
+ else
+ self->max_data_size = max_sdu_size;
+
+ DEBUG(1, __FUNCTION__ "(), max_data_size=%d\n", self->max_data_size);
+
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
sk = self->sk;
@@ -187,12 +218,12 @@ void irda_connect_response(struct irda_sock *self)
skb = dev_alloc_skb(64);
if (skb == NULL) {
- DEBUG( 0, __FUNCTION__ "() Could not allocate sk_buff!\n");
+ DEBUG(0, __FUNCTION__ "() Unable to allocate sk_buff!\n");
return;
}
/* Reserve space for MUX_CONTROL and LAP header */
- skb_reserve(skb, TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_reserve(skb, IRDA_MAX_HEADER);
irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb);
}
@@ -219,12 +250,12 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
switch (flow) {
case FLOW_STOP:
- DEBUG( 0, __FUNCTION__ "(), IrTTP wants us to slow down\n");
+ DEBUG(1, __FUNCTION__ "(), IrTTP wants us to slow down\n");
self->tx_flow = flow;
break;
case FLOW_START:
self->tx_flow = flow;
- DEBUG(0, __FUNCTION__ "(), IrTTP wants us to start again\n");
+ DEBUG(1, __FUNCTION__ "(), IrTTP wants us to start again\n");
wake_up_interruptible(sk->sleep);
break;
default:
@@ -514,10 +545,13 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
new->stsap_sel = new->tsap->stsap_sel;
new->dtsap_sel = new->tsap->dtsap_sel;
new->saddr = irttp_get_saddr(new->tsap);
- new->saddr = irttp_get_saddr(new->tsap);
+ new->daddr = irttp_get_daddr(new->tsap);
new->max_sdu_size_tx = self->max_sdu_size_tx;
new->max_sdu_size_rx = self->max_sdu_size_rx;
+ new->max_data_size = self->max_data_size;
+ new->max_header_size = self->max_header_size;
+
memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info));
/* Clean up the original one to keep it in listen state */
@@ -669,7 +703,11 @@ static int irda_create(struct socket *sock, int protocol)
sock_init_data(sock, sk);
- sock->ops = &irda_proto_ops;
+ if (sock->type == SOCK_STREAM)
+ sock->ops = &irda_stream_ops;
+ else
+ sock->ops = &irda_dgram_ops;
+
sk->protocol = protocol;
/* Register as a client with IrLMP */
@@ -786,12 +824,20 @@ static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len,
return -ENOTCONN;
}
- skb = sock_alloc_send_skb(sk, len + IRDA_MAX_HEADER, 0,
+ /* Check that we don't send out to big frames */
+ if (len > self->max_data_size) {
+ DEBUG(0, __FUNCTION__ "(), Warning to much data! "
+ "Chopping frame from %d to %d bytes!\n", len,
+ self->max_data_size);
+ len = self->max_data_size;
+ }
+
+ skb = sock_alloc_send_skb(sk, len + self->max_header_size, 0,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return -ENOBUFS;
- skb_reserve(skb, IRDA_MAX_HEADER);
+ skb_reserve(skb, self->max_header_size);
DEBUG(4, __FUNCTION__ "(), appending user data\n");
asmptr = skb->h.raw = skb_put(skb, len);
@@ -815,8 +861,8 @@ static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len,
* Try to receive message and copy it to user
*
*/
-static int irda_recvmsg(struct socket *sock, struct msghdr *msg, int size,
- int flags, struct scm_cookie *scm)
+static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg,
+ int size, int flags, struct scm_cookie *scm)
{
struct irda_sock *self;
struct sock *sk = sock->sk;
@@ -862,6 +908,161 @@ static int irda_recvmsg(struct socket *sock, struct msghdr *msg, int size,
}
/*
+ * Function irda_data_wait (sk)
+ *
+ * Sleep until data has arrive. But check for races..
+ *
+ */
+static void irda_data_wait(struct sock *sk)
+{
+ if (!skb_peek(&sk->receive_queue)) {
+ sk->socket->flags |= SO_WAITDATA;
+ interruptible_sleep_on(sk->sleep);
+ sk->socket->flags &= ~SO_WAITDATA;
+ }
+}
+
+/*
+ * Function irda_recvmsg_stream (sock, msg, size, flags, scm)
+ *
+ *
+ *
+ */
+static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg,
+ int size, int flags, struct scm_cookie *scm)
+{
+ struct irda_sock *self;
+ struct sock *sk = sock->sk;
+ int noblock = flags & MSG_DONTWAIT;
+ int copied = 0;
+ int target = 1;
+
+ DEBUG(3, __FUNCTION__ "()\n");
+
+ self = sk->protinfo.irda;
+ ASSERT(self != NULL, return -1;);
+
+ if (sock->flags & SO_ACCEPTCON)
+ return(-EINVAL);
+
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ if (flags & MSG_WAITALL)
+ target = size;
+
+
+ msg->msg_namelen = 0;
+
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+ */
+/* down(&self->readsem); */
+
+ do {
+ int chunk;
+ struct sk_buff *skb;
+
+ skb=skb_dequeue(&sk->receive_queue);
+ if (skb==NULL) {
+ if (copied >= target)
+ break;
+
+ /*
+ * POSIX 1003.1g mandates this order.
+ */
+
+ if (sk->err) {
+ /* up(&self->readsem); */
+ return sock_error(sk);
+ }
+
+ if (sk->shutdown & RCV_SHUTDOWN)
+ break;
+
+ /* up(&self->readsem); */
+
+ if (noblock)
+ return -EAGAIN;
+ irda_data_wait(sk);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ /* down(&self->readsem); */
+ continue;
+ }
+
+ /* Never glue messages from different writers */
+/* if (check_creds && */
+/* memcmp(UNIXCREDS(skb), &scm->creds, sizeof(scm->creds)) != 0) */
+/* { */
+/* skb_queue_head(&sk->receive_queue, skb); */
+/* break; */
+/* } */
+
+ chunk = min(skb->len, size);
+ if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ skb_queue_head(&sk->receive_queue, skb);
+ if (copied == 0)
+ copied = -EFAULT;
+ break;
+ }
+ copied += chunk;
+ size -= chunk;
+
+ /* Copy credentials */
+/* scm->creds = *UNIXCREDS(skb); */
+/* check_creds = 1; */
+
+ /* Mark read part of skb as used */
+ if (!(flags & MSG_PEEK)) {
+ skb_pull(skb, chunk);
+
+/* if (UNIXCB(skb).fp) */
+/* unix_detach_fds(scm, skb); */
+
+ /* put the skb back if we didn't use it up.. */
+ if (skb->len) {
+ DEBUG(1, __FUNCTION__ "(), back on q!\n");
+ skb_queue_head(&sk->receive_queue, skb);
+ break;
+ }
+
+ kfree_skb(skb);
+
+/* if (scm->fp) */
+/* break; */
+ } else {
+ DEBUG(0, __FUNCTION__ "() questionable!?\n");
+ /* It is questionable, see note in unix_dgram_recvmsg. */
+/* if (UNIXCB(skb).fp) */
+/* scm->fp = scm_fp_dup(UNIXCB(skb).fp); */
+
+ /* put message back and return */
+ skb_queue_head(&sk->receive_queue, skb);
+ break;
+ }
+ } while (size);
+
+ /*
+ * Check if we have previously stopped IrTTP and we know
+ * have more free space in our rx_queue. If so tell IrTTP
+ * to start delivering frames again before our rx_queue gets
+ * empty
+ */
+ if (self->rx_flow == FLOW_STOP) {
+ if ((atomic_read(&sk->rmem_alloc) << 2) <= sk->rcvbuf) {
+ DEBUG(2, __FUNCTION__ "(), Starting IrTTP\n");
+ self->rx_flow = FLOW_START;
+ irttp_flow_request(self->tsap, FLOW_START);
+ }
+ }
+
+ /* up(&self->readsem); */
+
+ return copied;
+}
+
+/*
* Function irda_shutdown (sk, how)
*
*
@@ -875,19 +1076,45 @@ static int irda_shutdown( struct socket *sk, int how)
return -EOPNOTSUPP;
}
-
/*
* Function irda_poll (file, sock, wait)
*
*
*
*/
-unsigned int irda_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait)
+static unsigned int irda_poll(struct file * file, struct socket *sock,
+ poll_table *wait)
{
- DEBUG(0, __FUNCTION__ "()\n");
+ struct sock *sk = sock->sk;
+ unsigned int mask;
- return 0;
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ poll_wait(file, sk->sleep, wait);
+ mask = 0;
+
+ /* exceptional events? */
+ if (sk->err)
+ mask |= POLLERR;
+ if (sk->shutdown & RCV_SHUTDOWN)
+ mask |= POLLHUP;
+
+ /* readable? */
+ if (!skb_queue_empty(&sk->receive_queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ /* Connection-based need to check for termination and startup */
+ if (sk->type == SOCK_STREAM && sk->state==TCP_CLOSE)
+ mask |= POLLHUP;
+
+ /*
+ * we set writable also when the other side has shut down the
+ * connection. This prevents stuck sockets.
+ */
+ if (sk->sndbuf - (int)atomic_read(&sk->wmem_alloc) >= MIN_WRITE_SPACE)
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+ return mask;
}
/*
@@ -900,7 +1127,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
- DEBUG(0, __FUNCTION__ "(), cmd=%#x\n", cmd);
+ DEBUG(4, __FUNCTION__ "(), cmd=%#x\n", cmd);
switch (cmd) {
case TIOCOUTQ: {
@@ -947,6 +1174,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return -EINVAL;
default:
+ DEBUG(1, __FUNCTION__ "(), doing device ioctl!\n");
return dev_ioctl(cmd, (void *) arg);
}
@@ -1082,13 +1310,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
return -EFAULT;
break;
case IRTTP_MAX_SDU_SIZE:
- if (self->max_sdu_size_tx != SAR_DISABLE)
- val = self->max_sdu_size_tx;
- else
- /* SAR is disabled, so use the IrLAP data size
- * instead */
- val = self->qos_tx.data_size.value - IRDA_MAX_HEADER;
-
+ val = self->max_data_size;
DEBUG(0, __FUNCTION__ "(), getting max_sdu_size = %d\n", val);
len = sizeof(int);
if (put_user(len, optlen))
@@ -1110,7 +1332,7 @@ static struct net_proto_family irda_family_ops =
irda_create
};
-static struct proto_ops irda_proto_ops = {
+static struct proto_ops irda_stream_ops = {
PF_IRDA,
sock_no_dup,
@@ -1128,7 +1350,28 @@ static struct proto_ops irda_proto_ops = {
irda_getsockopt,
sock_no_fcntl,
irda_sendmsg,
- irda_recvmsg
+ irda_recvmsg_stream
+};
+
+static struct proto_ops irda_dgram_ops = {
+ PF_IRDA,
+
+ sock_no_dup,
+ irda_release,
+ irda_bind,
+ irda_connect,
+ sock_no_socketpair,
+ irda_accept,
+ irda_getname,
+ datagram_poll,
+ irda_ioctl,
+ irda_listen,
+ irda_shutdown,
+ irda_setsockopt,
+ irda_getsockopt,
+ sock_no_fcntl,
+ irda_sendmsg,
+ irda_recvmsg_dgram
};
/*
@@ -1215,7 +1458,7 @@ void irda_proto_cleanup(void)
irda_packet_type.type = htons(ETH_P_IRDA);
dev_remove_pack(&irda_packet_type);
- unregister_netdevice_notifier( &irda_dev_notifier);
+ unregister_netdevice_notifier(&irda_dev_notifier);
sock_unregister(PF_IRDA);
irda_cleanup();
diff --git a/net/irda/crc.c b/net/irda/crc.c
index 9a6f3021f..b3019d5c2 100644
--- a/net/irda/crc.c
+++ b/net/irda/crc.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Sat Dec 12 09:56:35 1998
+ * Modified at: Sun May 2 20:28:08 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: ppp.c by Michael Callahan <callahan@maths.ox.ac.uk>
* Al Longyear <longyear@netcom.com>
@@ -59,7 +59,7 @@ __u16 const irda_crc16_table[256] =
unsigned short crc_calc( __u16 fcs, __u8 const *buf, size_t len)
{
- while ( len--)
- fcs = IR_FCS(fcs, *buf++);
- return fcs;
+ while (len--)
+ fcs = irda_fcs(fcs, *buf++);
+ return fcs;
}
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index 22def3a1e..380f1f6a8 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -6,8 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Apr 6 15:33:50 1999
- * Modified at: Sun Apr 11 00:41:58 1999
+ * Modified at: Fri May 28 20:46:38 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Modified at: Fri May 28 3:11 CST 1999
+ * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
*
@@ -28,6 +30,7 @@
*
********************************************************************/
+#include <linux/string.h>
#include <linux/socket.h>
#include <linux/irda.h>
@@ -39,28 +42,51 @@
/*
* Function irlmp_add_discovery (cachelog, discovery)
*
- *
- *
+ * Add a new discovery to the cachelog, and remove any old discoveries
+ * from the same device
*/
-void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *discovery)
+void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *new)
{
- discovery_t *old;
+ discovery_t *discovery, *node;
+ unsigned long flags;
- DEBUG(4, __FUNCTION__ "()\n");
+ spin_lock_irqsave(&irlmp->lock, flags);
+
+ /*
+ * Remove all discoveries of devices that has previously been
+ * discovered on the same link with the same name (info), or the
+ * same daddr. We do this since some devices (mostly PDAs) change
+ * their device address between every discovery.
+ */
+ discovery = (discovery_t *) hashbin_get_first(cachelog);
+ while (discovery != NULL ) {
+ node = discovery;
+
+ /* Be sure to stay one item ahead */
+ discovery = (discovery_t *) hashbin_get_next(cachelog);
+
+ if ((node->daddr == new->daddr) ||
+ (strcmp(node->info, new->info) == 0))
+ {
+ /* This discovery is a previous discovery
+ * from the same device, so just remove it
+ */
+ hashbin_remove(cachelog, node->daddr, NULL);
+ kfree(node);
+ }
+ }
- /* Check if we have discovered this device before */
- old = hashbin_remove(cachelog, discovery->daddr, NULL);
- if (old)
- kfree(old);
/* Insert the new and updated version */
- hashbin_insert(cachelog, (QUEUE *) discovery, discovery->daddr, NULL);
+ hashbin_insert(cachelog, (QUEUE *) new, new->daddr, NULL);
+
+ spin_unlock_irqrestore(&irlmp->lock, flags);
}
/*
* Function irlmp_add_discovery_log (cachelog, log)
*
- *
+ * Merge a disovery log into the cachlog.
*
*/
void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
@@ -201,10 +227,12 @@ int discovery_proc_read(char *buf, char **start, off_t offset, int len,
discovery = (discovery_t *) hashbin_get_first(cachelog);
while ( discovery != NULL) {
- len += sprintf( buf+len, " name: %s,",
- discovery->info);
+ len += sprintf(buf+len, "name: %s,", discovery->info);
- len += sprintf( buf+len, " hint: ");
+ len += sprintf(buf+len, " hint: 0x%02x%02x",
+ discovery->hints.byte[0],
+ discovery->hints.byte[1]);
+#if 0
if ( discovery->hints.byte[0] & HINT_PNP)
len += sprintf( buf+len, "PnP Compatible ");
if ( discovery->hints.byte[0] & HINT_PDA)
@@ -228,14 +256,14 @@ int discovery_proc_read(char *buf, char **start, off_t offset, int len,
len += sprintf( buf+len, "IrCOMM ");
if ( discovery->hints.byte[1] & HINT_OBEX)
len += sprintf( buf+len, "IrOBEX ");
-
+#endif
len += sprintf(buf+len, ", saddr: 0x%08x",
discovery->saddr);
len += sprintf(buf+len, ", daddr: 0x%08x\n",
discovery->daddr);
- len += sprintf( buf+len, "\n");
+ len += sprintf(buf+len, "\n");
discovery = (discovery_t *) hashbin_get_next(cachelog);
}
diff --git a/net/irda/ircomm/ircomm_common.c b/net/irda/ircomm/ircomm_common.c
index bc8758e1e..5300f5f3c 100644
--- a/net/irda/ircomm/ircomm_common.c
+++ b/net/irda/ircomm/ircomm_common.c
@@ -8,7 +8,7 @@
* Author: Takahide Higuchi <thiguchi@pluto.dti.ne.jp>
* Source: irlpt_event.c
*
- * Copyright (c) 1998, Takahide Higuchi, <thiguchi@pluto.dti.ne.jp>,
+ * Copyright (c) 1998-1999, Takahide Higuchi, <thiguchi@pluto.dti.ne.jp>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -41,21 +41,20 @@
#include <net/irda/ircomm_common.h>
-static char *revision_date = "Sun Apr 18 00:40:19 1999";
+static char *revision_date = "Tue May 18 03:11:39 1999";
-static void ircomm_state_idle( struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb );
-
-static void ircomm_state_discoverywait( struct ircomm_cb *self, IRCOMM_EVENT event,
+static void ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb );
+static void ircomm_state_discoverywait(struct ircomm_cb *self,
+ IRCOMM_EVENT event,
+ struct sk_buff *skb );
+static void ircomm_state_queryparamwait(struct ircomm_cb *self,
+ IRCOMM_EVENT event,
struct sk_buff *skb );
-
-static void ircomm_state_queryparamwait( struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb );
-
-static void ircomm_state_querylsapwait( struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb );
-
+static void ircomm_state_querylsapwait(struct ircomm_cb *self,
+ IRCOMM_EVENT event,
+ struct sk_buff *skb );
static void ircomm_state_waiti( struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb );
static void ircomm_state_waitr( struct ircomm_cb *self, IRCOMM_EVENT event,
@@ -206,15 +205,17 @@ __initfunc(int ircomm_init(void))
ircomm[i]->enq_char = 0x05;
ircomm[i]->ack_char = 0x06;
- ircomm[i]->max_txbuff_size = COMM_DEFAULT_DATA_SIZE; /* 64 */
- ircomm[i]->maxsdusize = SAR_DISABLE;
- ircomm[i]->ctrl_skb = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
+ ircomm[i]->max_header_size = COMM_MAX_HEADER_SIZE;
+ ircomm[i]->tx_max_sdu_size = COMM_DEFAULT_SDU_SIZE;
+ ircomm[i]->rx_max_sdu_size = SAR_DISABLE;
+ ircomm[i]->ctrl_skb = dev_alloc_skb(COMM_DEFAULT_SDU_SIZE
+ + COMM_MAX_HEADER_SIZE);
if (ircomm[i]->ctrl_skb == NULL){
DEBUG(0,"ircomm:init_module:alloc_skb failed!\n");
return -ENOMEM;
}
- skb_reserve(ircomm[i]->ctrl_skb,COMM_HEADER_SIZE);
+ skb_reserve(ircomm[i]->ctrl_skb,COMM_MAX_HEADER_SIZE);
}
@@ -226,7 +227,6 @@ __initfunc(int ircomm_init(void))
create_proc_entry("ircomm", 0, proc_irda)->get_info = ircomm_proc_read;
#endif /* CONFIG_PROC_FS */
-
discovering_instance = NULL;
return 0;
}
@@ -275,51 +275,55 @@ void ircomm_cleanup(void)
static int ircomm_accept_data_indication(void *instance, void *sap,
struct sk_buff *skb)
{
-
- struct ircomm_cb *self = (struct ircomm_cb *)instance;
+ struct ircomm_cb *self = (struct ircomm_cb *) instance;
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IRCOMM_MAGIC, return -1;);
- ASSERT( skb != NULL, return -1;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
+ ASSERT(skb != NULL, return -1;);
DEBUG(4,__FUNCTION__"():\n");
- ircomm_do_event( self, TTP_DATA_INDICATION, skb);
+ ircomm_do_event(self, TTP_DATA_INDICATION, skb);
self->rx_packets++;
return 0;
}
static void ircomm_accept_connect_confirm(void *instance, void *sap,
- struct qos_info *qos,
- __u32 maxsdusize, struct sk_buff *skb)
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
{
+ struct ircomm_cb *self = (struct ircomm_cb *) instance;
- struct ircomm_cb *self = (struct ircomm_cb *)instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRCOMM_MAGIC, return;);
- ASSERT( skb != NULL, return;);
- ASSERT( qos != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRCOMM_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
+ ASSERT(qos != NULL, return;);
DEBUG(0,__FUNCTION__"(): got connected!\n");
- if(maxsdusize == SAR_DISABLE)
- self->max_txbuff_size = qos->data_size.value;
+ if (max_sdu_size == SAR_DISABLE)
+ self->tx_max_sdu_size =(qos->data_size.value - max_header_size
+ - COMM_HEADER_SIZE);
else {
- ASSERT(maxsdusize >= COMM_DEFAULT_DATA_SIZE, return;);
- self->max_txbuff_size = maxsdusize; /* use fragmentation */
+ ASSERT(max_sdu_size >= COMM_DEFAULT_SDU_SIZE, return;);
+ /* use fragmentation */
+ self->tx_max_sdu_size = max_sdu_size - COMM_HEADER_SIZE;
}
self->qos = qos;
- self->null_modem_mode = 0; /* disable null modem emulation */
+ self->max_header_size = max_header_size + COMM_HEADER_SIZE;
+ self->null_modem_mode = 0; /* disable null modem emulation */
- ircomm_do_event( self, TTP_CONNECT_CONFIRM, skb);
+ ircomm_do_event(self, TTP_CONNECT_CONFIRM, skb);
}
static void ircomm_accept_connect_indication(void *instance, void *sap,
- struct qos_info *qos,
- __u32 maxsdusize,
- struct sk_buff *skb )
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
{
struct ircomm_cb *self = (struct ircomm_cb *)instance;
@@ -330,12 +334,15 @@ static void ircomm_accept_connect_indication(void *instance, void *sap,
DEBUG(0,__FUNCTION__"()\n");
- if(maxsdusize == SAR_DISABLE)
- self->max_txbuff_size = qos->data_size.value;
+ if (max_sdu_size == SAR_DISABLE)
+ self->tx_max_sdu_size =(qos->data_size.value - max_header_size
+ - COMM_HEADER_SIZE);
else
- self->max_txbuff_size = maxsdusize;
+ self->tx_max_sdu_size = max_sdu_size - COMM_HEADER_SIZE;
self->qos = qos;
+ self->max_header_size = max_header_size + COMM_HEADER_SIZE;
+
ircomm_do_event( self, TTP_CONNECT_INDICATION, skb);
/* stop connecting */
@@ -556,7 +563,7 @@ static void issue_connect_request(struct ircomm_cb *self,
irttp_connect_request(self->tsap, self->dlsap,
self->saddr, self->daddr,
- NULL, self->maxsdusize, userdata);
+ NULL, self->rx_max_sdu_size, userdata);
break;
default:
@@ -588,9 +595,10 @@ static void connect_indication(struct ircomm_cb *self, struct qos_info *qos,
/* if( !ircomm_parse_controlchannel( self, data)) */
/* self->servicetype = DEFAULT; TODOD:fix this! TH */
- if(self->notify.connect_indication)
+ if (self->notify.connect_indication)
self->notify.connect_indication(self->notify.instance, self,
- qos, 0, skb);
+ qos, self->tx_max_sdu_size,
+ self->max_header_size, skb);
}
#if 0
@@ -602,28 +610,27 @@ static void connect_indication_three_wire_raw(void)
#endif
-static void connect_confirmation(struct ircomm_cb *self, struct sk_buff *skb)
+static void connect_confirm(struct ircomm_cb *self, struct sk_buff *skb)
{
DEBUG(4 ,__FUNCTION__"()\n");
/* give a connect_confirm to the client */
if( self->notify.connect_confirm )
self->notify.connect_confirm(self->notify.instance,
- self, NULL, SAR_DISABLE, skb);
+ self, NULL, self->tx_max_sdu_size,
+ self->max_header_size, skb);
}
static void issue_connect_response(struct ircomm_cb *self,
struct sk_buff *skb)
{
-
DEBUG(0,__FUNCTION__"()\n");
if( self->servicetype == THREE_WIRE_RAW){
DEBUG(0,__FUNCTION__"():THREE_WIRE_RAW is not implemented yet\n");
/* irlmp_connect_rsp(); */
- } else {
- irttp_connect_response(self->tsap, self->maxsdusize, skb);
- }
+ } else
+ irttp_connect_response(self->tsap, self->rx_max_sdu_size, skb);
}
static void issue_disconnect_request(struct ircomm_cb *self,
@@ -642,30 +649,29 @@ static void issue_data_request(struct ircomm_cb *self,
{
int err;
- if(self->servicetype == THREE_WIRE_RAW){
+ if (self->servicetype == THREE_WIRE_RAW){
/* irlmp_data_request(self->lmhandle,userdata); */
DEBUG(0,__FUNCTION__"():not implemented!");
return;
}
DEBUG(4,__FUNCTION__"():sending frame\n");
- err = irttp_data_request(self->tsap , userdata );
- if(err){
+ err = irttp_data_request(self->tsap, userdata);
+ if (err){
printk(KERN_ERR __FUNCTION__":ttp_data_request failed\n");
- if(userdata)
+ if (userdata)
dev_kfree_skb( userdata);
}
self->tx_packets++;
}
static void issue_control_request(struct ircomm_cb *self,
- struct sk_buff *userdata )
+ struct sk_buff *userdata)
{
int err;
DEBUG(4,__FUNCTION__"()\n");
- if(self->servicetype == THREE_WIRE_RAW)
- {
+ if (self->servicetype == THREE_WIRE_RAW) {
DEBUG(0,__FUNCTION__"():THREE_WIRE_RAW is not implemented\n");
}
@@ -676,7 +682,7 @@ static void issue_control_request(struct ircomm_cb *self,
{
printk( __FUNCTION__"():ttp_data_request failed\n");
if(userdata)
- dev_kfree_skb( userdata);
+ dev_kfree_skb(userdata);
}
else
self->tx_controls++;
@@ -701,7 +707,7 @@ static void process_data(struct ircomm_cb *self, struct sk_buff *skb )
/* ircomm_parse_control(self, skb, CONTROL_CHANNEL); */
- if(self->notify.data_indication && skb->len)
+ if (self->notify.data_indication && skb->len)
self->notify.data_indication(self->notify.instance, self,
skb);
}
@@ -728,7 +734,7 @@ static void ircomm_do_event( struct ircomm_cb *self, IRCOMM_EVENT event,
DEBUG( 4, __FUNCTION__": STATE = %s, EVENT = %s\n",
ircommstate[self->state], ircommevent[event]);
- (*state[ self->state ]) ( self, event, skb);
+ (*state[self->state])(self, event, skb);
}
static void ircomm_next_state( struct ircomm_cb *self, IRCOMM_STATE state)
@@ -747,7 +753,7 @@ static void ircomm_next_state( struct ircomm_cb *self, IRCOMM_STATE state)
static void ircomm_state_idle( struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb )
{
- switch(event){
+ switch (event){
case IRCOMM_CONNECT_REQUEST:
/* ircomm_next_state(self, COMM_WAITI); */
@@ -779,7 +785,8 @@ static void ircomm_state_idle( struct ircomm_cb *self, IRCOMM_EVENT event,
/*
* ircomm_state_discoverywait
*/
-static void ircomm_state_discoverywait(struct ircomm_cb *self, IRCOMM_EVENT event,
+static void ircomm_state_discoverywait(struct ircomm_cb *self,
+ IRCOMM_EVENT event,
struct sk_buff *skb )
{
switch(event){
@@ -817,11 +824,11 @@ static void ircomm_state_discoverywait(struct ircomm_cb *self, IRCOMM_EVENT even
* ircomm_state_queryparamwait
*/
-static void ircomm_state_queryparamwait(struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb )
+static void ircomm_state_queryparamwait(struct ircomm_cb *self,
+ IRCOMM_EVENT event,
+ struct sk_buff *skb)
{
- switch(event){
-
+ switch (event) {
case TTP_CONNECT_INDICATION:
ircomm_next_state(self, COMM_WAITR);
@@ -855,10 +862,11 @@ static void ircomm_state_queryparamwait(struct ircomm_cb *self, IRCOMM_EVENT eve
* ircomm_state_querylsapwait
*/
-static void ircomm_state_querylsapwait(struct ircomm_cb *self, IRCOMM_EVENT event,
+static void ircomm_state_querylsapwait(struct ircomm_cb *self,
+ IRCOMM_EVENT event,
struct sk_buff *skb )
{
- switch(event){
+ switch (event) {
case TTP_CONNECT_INDICATION:
@@ -898,10 +906,10 @@ static void ircomm_state_querylsapwait(struct ircomm_cb *self, IRCOMM_EVENT even
static void ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb )
{
- switch(event){
+ switch (event) {
case TTP_CONNECT_CONFIRM:
ircomm_next_state(self, COMM_CONN);
- connect_confirmation( self, skb );
+ connect_confirm(self, skb );
break;
case TTP_DISCONNECT_INDICATION:
ircomm_next_state(self, COMM_IDLE);
@@ -921,21 +929,18 @@ static void ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
}
}
-
-
/*
* ircomm_state_waitr
*/
static void ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb )
+ struct sk_buff *skb )
{
-
- switch(event){
+ switch (event) {
case IRCOMM_CONNECT_RESPONSE:
/* issue_connect_response */
- if(self->servicetype==THREE_WIRE_RAW){
+ if (self->servicetype==THREE_WIRE_RAW) {
DEBUG(0,__FUNCTION__"():3WIRE_RAW is not implemented\n");
/* irlmp_connect_response(Vpeersap,
* ACCEPT,null);
@@ -987,7 +992,7 @@ static void ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
static void ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb )
{
- switch(event){
+ switch (event) {
case TTP_DATA_INDICATION:
process_data(self, skb);
break;
@@ -1033,8 +1038,6 @@ static void ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
}
}
-
-
/*
* ----------------------------------------------------------------------
* IrCOMM service interfaces and supporting functions
@@ -1042,12 +1045,12 @@ static void ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
* ----------------------------------------------------------------------
*/
-/*
- * start_discovering()
+/*
+ * Function start_discovering (self)
+ *
+ * Start discovering and enter DISCOVERY_WAIT state
*
- * start discovering and enter DISCOVERY_WAIT state
*/
-
static void start_discovering(struct ircomm_cb *self)
{
__u16 hints;
@@ -1058,7 +1061,7 @@ static void start_discovering(struct ircomm_cb *self)
hints = irlmp_service_to_hint(S_COMM);
- DEBUG(0,__FUNCTION__"():start discovering..\n");
+ DEBUG(1,__FUNCTION__"():start discovering..\n");
switch (ircomm_cs) {
case 0:
MOD_INC_USE_COUNT;
@@ -1092,19 +1095,26 @@ static void start_discovering(struct ircomm_cb *self)
/*
* queryias_done(self)
*
- * called when discovery process got wrong results, completed, or terminated.
+ *
*/
+/*
+ * Function queryias_done (self)
+ *
+ * Called when discovery process got wrong results, completed, or
+ * terminated.
+ *
+ */
static void queryias_done(struct ircomm_cb *self)
{
DEBUG(0, __FUNCTION__"():\n");
- if(self->queryias_lock){
+ if (self->queryias_lock){
self->queryias_lock = 0;
discovering_instance = NULL;
MOD_DEC_USE_COUNT;
irlmp_unregister_client(self->ckey);
}
- if(ircomm_cs != 1)
+ if (ircomm_cs != 1)
irlmp_unregister_service(self->skey);
return;
}
@@ -1120,7 +1130,6 @@ static void query_parameters(struct ircomm_cb *self)
ircomm_getvalue_confirm, self );
}
-
static void query_lsapsel(struct ircomm_cb * self)
{
DEBUG(0, __FUNCTION__"():querying IAS: Lsapsel...\n");
@@ -1135,13 +1144,13 @@ static void query_lsapsel(struct ircomm_cb * self)
}
}
-/*
- * ircomm_connect_request()
- * Impl. of this function is differ from one of the reference.
- * This functin does discovery as well as sending connect request
+/*
+ * Function ircomm_connect_request (self, servicetype)
+ *
+ * Impl. of this function is differ from one of the reference. This
+ * function does discovery as well as sending connect request
+ *
*/
-
-
void ircomm_connect_request(struct ircomm_cb *self, __u8 servicetype)
{
/*
@@ -1153,17 +1162,17 @@ void ircomm_connect_request(struct ircomm_cb *self, __u8 servicetype)
ASSERT( self->magic == IRCOMM_MAGIC, return;);
- DEBUG(0, __FUNCTION__"():sending connect_request...\n");
+ DEBUG(1, __FUNCTION__"():sending connect_request...\n");
self->servicetype= servicetype;
/* ircomm_control_request(self, SERVICETYPE); */ /*servictype*/
- self->maxsdusize = SAR_DISABLE;
- ircomm_do_event( self, IRCOMM_CONNECT_REQUEST, NULL);
+ self->rx_max_sdu_size = SAR_DISABLE;
+ ircomm_do_event(self, IRCOMM_CONNECT_REQUEST, NULL);
}
void ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata,
- __u32 maxsdusize)
+ __u32 max_sdu_size)
{
ASSERT( self != NULL, return;);
@@ -1177,20 +1186,20 @@ void ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata,
* and send it with connect_response
*/
- if(!userdata){
+ if (!userdata){
/* FIXME: check for errors and initialize? DB */
- userdata = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
- ASSERT(userdata != NULL, return;);
+ userdata = dev_alloc_skb(COMM_DEFAULT_SDU_SIZE + COMM_MAX_HEADER_SIZE);
+ if (userdata == NULL)
+ return;
- skb_reserve(userdata,COMM_HEADER_SIZE);
+ skb_reserve(userdata,COMM_MAX_HEADER_SIZE);
}
/* enable null-modem emulation (i.e. server mode )*/
self->null_modem_mode = 1;
- self->maxsdusize = maxsdusize;
- if(maxsdusize != SAR_DISABLE)
- self->max_txbuff_size = maxsdusize;
+ self->rx_max_sdu_size = max_sdu_size;
+
ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata);
}
@@ -1303,10 +1312,10 @@ static void ircomm_tx_controlchannel(struct ircomm_cb *self )
ircomm_do_event(self, IRCOMM_CONTROL_REQUEST, skb);
self->control_ch_pending = 0;
- skb = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
+ skb = dev_alloc_skb(COMM_DEFAULT_SDU_SIZE + COMM_MAX_HEADER_SIZE);
ASSERT(skb != NULL, return ;);
- skb_reserve(skb,COMM_HEADER_SIZE);
+ skb_reserve(skb,COMM_MAX_HEADER_SIZE);
self->ctrl_skb = skb;
}
@@ -1341,14 +1350,13 @@ static void append_tuple(struct ircomm_cb *self, __u8 instruction, __u8 pl ,
self->control_ch_pending = 1;
}
-
-
/*
- * ircomm_control_request();
- * this function is exported as a request to send some control-channel tuples
- * to peer device
+ * Function ircomm_control_request (self, instruction)
+ *
+ * This function is exported as a request to send some control-channel
+ * tuples * to peer device
+ *
*/
-
void ircomm_control_request(struct ircomm_cb *self, __u8 instruction)
{
diff --git a/net/irda/ircomm/irvtd_driver.c b/net/irda/ircomm/irvtd_driver.c
index 2df2fdd60..7b1ddf3cb 100644
--- a/net/irda/ircomm/irvtd_driver.c
+++ b/net/irda/ircomm/irvtd_driver.c
@@ -8,7 +8,7 @@
* Source: serial.c by Linus Torvalds
* isdn_tty.c by Fritz Elfert
*
- * Copyright (c) 1998, Takahide Higuchi, <thiguchi@pluto.dti.ne.jp>,
+ * Copyright (c) 1998-1999, Takahide Higuchi, <thiguchi@pluto.dti.ne.jp>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -51,7 +51,7 @@ struct termios *irvtd_termios_locked[COMM_MAX_TTY];
static int irvtd_refcount;
struct irvtd_cb **irvtd = NULL;
-static char *revision_date = "Sun Apr 18 17:31:53 1999";
+static char *revision_date = "Wed May 26 00:49:11 1999";
/*
@@ -83,8 +83,10 @@ static void irvtd_break(struct tty_struct *tty, int break_state);
static void irvtd_send_xchar(struct tty_struct *tty, char ch);
static void irvtd_wait_until_sent(struct tty_struct *tty, int timeout);
-static void irvtd_start_timer( struct irvtd_cb *driver);
-static void irvtd_timer_expired(unsigned long data);
+static void irvtd_start_tx_timer( struct irvtd_cb *driver, int timeout);
+static void irvtd_tx_timer_expired(unsigned long data);
+static void irvtd_start_rx_timer( struct irvtd_cb *driver, int timeout);
+static void irvtd_rx_timer_expired(unsigned long data);
static int line_info(char *buf, struct irvtd_cb *driver);
static int irvtd_read_proc(char *buf, char **start, off_t offset, int len,
@@ -118,7 +120,7 @@ static void irvtd_write_to_tty( struct irvtd_cb *driver)
if(driver->rx_disable)
return;
- skb = skb_dequeue(&driver->rxbuff);
+ skb = skb_dequeue(&driver->rxbuff);
if(skb == NULL)
return; /* there's nothing */
@@ -211,8 +213,13 @@ static void irvtd_write_to_tty( struct irvtd_cb *driver)
if(skb_queue_len(&driver->rxbuff)< IRVTD_RX_QUEUE_LOW &&
driver->ttp_stoprx){
- irttp_flow_request(driver->comm->tsap, FLOW_START);
+ DEBUG(1, __FUNCTION__"():FLOW_START\n");
+ /*
+ * next 2 lines must follow this order since irttp_flow_request()
+ * will run its rx queue
+ */
driver->ttp_stoprx = 0;
+ irttp_flow_request(driver->comm->tsap, FLOW_START);
}
if(skb_queue_empty(&driver->rxbuff) && driver->disconnect_pend){
@@ -236,10 +243,14 @@ static int irvtd_receive_data(void *instance, void *sap, struct sk_buff *skb)
skb_queue_tail( &driver->rxbuff, skb );
if(skb_queue_len(&driver->rxbuff) > IRVTD_RX_QUEUE_HIGH){
+ DEBUG(1, __FUNCTION__"():FLOW_STOP\n");
irttp_flow_request(driver->comm->tsap, FLOW_STOP);
driver->ttp_stoprx = 1;
}
irvtd_write_to_tty(driver);
+
+ if(!skb_queue_empty(&driver->rxbuff))
+ irvtd_start_rx_timer(driver,0);
return 0;
}
@@ -255,22 +266,36 @@ static int irvtd_receive_data(void *instance, void *sap, struct sk_buff *skb)
*/
-static void irvtd_start_timer( struct irvtd_cb *driver)
+static void irvtd_start_tx_timer( struct irvtd_cb *driver, int timeout)
+{
+ ASSERT( driver != NULL, return;);
+ ASSERT( driver->magic == IRVTD_MAGIC, return;);
+
+ del_timer( &driver->tx_timer);
+
+ driver->tx_timer.data = (unsigned long) driver;
+ driver->tx_timer.function = &irvtd_tx_timer_expired;
+ driver->tx_timer.expires = jiffies + timeout;
+
+ add_timer( &driver->tx_timer);
+}
+
+static void irvtd_start_rx_timer( struct irvtd_cb *driver, int timeout)
{
ASSERT( driver != NULL, return;);
ASSERT( driver->magic == IRVTD_MAGIC, return;);
- del_timer( &driver->timer);
+ del_timer( &driver->rx_timer);
- driver->timer.data = (unsigned long) driver;
- driver->timer.function = &irvtd_timer_expired;
- driver->timer.expires = jiffies + (HZ / 5); /* 200msec */
+ driver->rx_timer.data = (unsigned long) driver;
+ driver->rx_timer.function = &irvtd_rx_timer_expired;
+ driver->rx_timer.expires = jiffies + timeout;
- add_timer( &driver->timer);
+ add_timer( &driver->rx_timer);
}
-static void irvtd_timer_expired(unsigned long data)
+static void irvtd_tx_timer_expired(unsigned long data)
{
struct irvtd_cb *driver = (struct irvtd_cb *)data;
@@ -279,11 +304,26 @@ static void irvtd_timer_expired(unsigned long data)
DEBUG(4, __FUNCTION__"()\n");
irvtd_send_data_request(driver);
+}
- irvtd_write_to_tty(driver);
+static void irvtd_rx_timer_expired(unsigned long data)
+{
+ struct irvtd_cb *driver = (struct irvtd_cb *)data;
+
+ ASSERT(driver != NULL,return;);
+ ASSERT(driver->magic == IRVTD_MAGIC,return;);
+ DEBUG(4, __FUNCTION__"()\n");
- /* start our timer again and again */
- irvtd_start_timer(driver);
+ while(TTY_FLIPBUF_SIZE - driver->tty->flip.count
+ && !skb_queue_empty(&driver->rxbuff))
+ irvtd_write_to_tty(driver);
+
+ DEBUG(1, __FUNCTION__"(): room in flip_buffer = %d\n",
+ TTY_FLIPBUF_SIZE - driver->tty->flip.count);
+
+ if(!skb_queue_empty(&driver->rxbuff))
+ /* handle it later */
+ irvtd_start_rx_timer(driver, 1);
}
@@ -310,21 +350,23 @@ static void irvtd_send_data_request(struct irvtd_cb *driver)
}
#endif
- DEBUG(1, __FUNCTION__"():sending %d octets\n",(int)skb->len );
+ DEBUG(1, __FUNCTION__"():len = %d, room = %d\n",(int)skb->len,
+ skb_tailroom(skb));
driver->icount.tx += skb->len;
err = ircomm_data_request(driver->comm, driver->txbuff);
if (err){
ASSERT(err == 0,;);
- DEBUG(0,"%d chars are lost\n",(int)skb->len);
+ DEBUG(1,"%d chars are lost\n",(int)skb->len);
skb_trim(skb, 0);
}
/* allocate a new frame */
- skb = driver->txbuff = dev_alloc_skb(driver->comm->max_txbuff_size);
+ skb = driver->txbuff
+ = dev_alloc_skb(driver->tx_max_sdu_size + driver->max_header_size);
if (skb == NULL){
printk(__FUNCTION__"():alloc_skb failed!\n");
} else {
- skb_reserve(skb, COMM_HEADER_SIZE);
+ skb_reserve(skb, driver->max_header_size);
}
wake_up_interruptible(&driver->tty->write_wait);
@@ -341,20 +383,23 @@ static void irvtd_send_data_request(struct irvtd_cb *driver)
***********************************************************************
*/
-
/*
* Function irvtd_connect_confirm (instance, sap, qos, max_sdu_size, skb)
*
- * ircomm_connect_request which we have send have succeed!
+ * ircomm_connect_request which we have send, has succeeded!
*
*/
void irvtd_connect_confirm(void *instance, void *sap, struct qos_info *qos,
- __u32 max_sdu_size, struct sk_buff *skb)
+ __u32 max_sdu_size, __u8 max_header_size,
+ struct sk_buff *skb)
{
struct irvtd_cb *driver = (struct irvtd_cb *)instance;
ASSERT(driver != NULL, return;);
ASSERT(driver->magic == IRVTD_MAGIC, return;);
+
+ driver->tx_max_sdu_size = max_sdu_size;
+ driver->max_header_size = max_header_size;
/*
* set default value
*/
@@ -364,7 +409,7 @@ void irvtd_connect_confirm(void *instance, void *sap, struct qos_info *qos,
/*
* sending initial control parameters here
*/
- if(driver->comm->servicetype == THREE_WIRE_RAW)
+ if (driver->comm->servicetype == THREE_WIRE_RAW)
return; /* do nothing */
driver->comm->dte |= (MCR_DTR | MCR_RTS | DELTA_DTR | DELTA_RTS);
@@ -376,7 +421,7 @@ void irvtd_connect_confirm(void *instance, void *sap, struct qos_info *qos,
ircomm_control_request(driver->comm, XON_XOFF_CHAR);
/* ircomm_control_request(driver->comm, ENQ_ACK_CHAR); */
- switch(driver->comm->servicetype){
+ switch (driver->comm->servicetype) {
case CENTRONICS:
break;
@@ -397,17 +442,20 @@ void irvtd_connect_confirm(void *instance, void *sap, struct qos_info *qos,
*
*/
void irvtd_connect_indication(void *instance, void *sap, struct qos_info *qos,
- __u32 max_sdu_size, struct sk_buff *skb)
+ __u32 max_sdu_size, __u8 max_header_size,
+ struct sk_buff *skb)
{
-
struct irvtd_cb *driver = (struct irvtd_cb *)instance;
struct ircomm_cb *comm = (struct ircomm_cb *)sap;
+
ASSERT(driver != NULL, return;);
ASSERT(driver->magic == IRVTD_MAGIC, return;);
ASSERT(comm != NULL, return;);
ASSERT(comm->magic == IRCOMM_MAGIC, return;);
- DEBUG(4,"irvtd_connect_indication:sending connect_response...\n");
+ driver->tx_max_sdu_size = max_sdu_size;
+ driver->max_header_size = max_header_size;
+ DEBUG(4, __FUNCTION__ "():sending connect_response...\n");
ircomm_connect_response(comm, NULL, SAR_DISABLE );
@@ -416,7 +464,7 @@ void irvtd_connect_indication(void *instance, void *sap, struct qos_info *qos,
/*
* send initial control parameters
*/
- if(driver->comm->servicetype == THREE_WIRE_RAW)
+ if (driver->comm->servicetype == THREE_WIRE_RAW)
return; /* do nothing */
driver->comm->dte |= (MCR_DTR | MCR_RTS | DELTA_DTR | DELTA_RTS);
@@ -426,6 +474,7 @@ void irvtd_connect_indication(void *instance, void *sap, struct qos_info *qos,
ircomm_control_request(driver->comm, DTELINE_STATE);
break;
default:
+ DEBUG(0, __FUNCTION__ "(), not implemented!\n");
}
@@ -479,11 +528,12 @@ void irvtd_control_indication(void *instance, void *sap, IRCOMM_CMD cmd)
if(cmd == TX_READY){
driver->ttp_stoptx = 0;
driver->tty->hw_stopped = driver->cts_stoptx;
- irvtd_start_timer( driver);
if(driver->cts_stoptx)
return;
+ /* push tx queue so that client can send at least 1 octet */
+ irvtd_send_data_request(driver);
/*
* driver->tty->write_wait will keep asleep if
* our txbuff is full.
@@ -498,7 +548,7 @@ void irvtd_control_indication(void *instance, void *sap, IRCOMM_CMD cmd)
if(cmd == TX_BUSY){
driver->ttp_stoptx = driver->tty->hw_stopped = 1;
- del_timer( &driver->timer);
+ del_timer( &driver->tx_timer);
return;
}
@@ -576,6 +626,7 @@ void irvtd_control_indication(void *instance, void *sap, IRCOMM_CMD cmd)
case DATA_RATE:
case XON_XOFF_CHAR:
case DTELINE_STATE:
+ case ENQ_ACK_CHAR: /* got this from win95 */
/* (maybe) nothing to do */
break;
default:
@@ -607,7 +658,7 @@ static int irvtd_block_til_ready(struct tty_struct *tty, struct file * filp,
struct irvtd_cb *driver)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait,current);
int retval = 0;
int do_clocal = 0;
@@ -678,7 +729,7 @@ static int irvtd_block_til_ready(struct tty_struct *tty, struct file * filp,
driver->blocked_open--;
- DEBUG(0, __FUNCTION__"():after blocking\n");
+ DEBUG(1, __FUNCTION__"():after blocking\n");
if (retval)
return retval;
@@ -765,7 +816,7 @@ static int irvtd_startup(struct irvtd_cb *driver)
struct notify_t irvtd_notify;
/* FIXME: it should not be hard coded */
- __u8 oct_seq[6] = { 0,1,4,1,1,1 };
+ __u8 oct_seq[6] = { 0,1,6,1,1,1 };
DEBUG(4,__FUNCTION__"()\n" );
if(driver->flags & ASYNC_INITIALIZED)
@@ -776,12 +827,12 @@ static int irvtd_startup(struct irvtd_cb *driver)
*/
skb_queue_head_init(&driver->rxbuff);
- driver->txbuff = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
+ driver->txbuff = dev_alloc_skb(COMM_DEFAULT_SDU_SIZE + COMM_MAX_HEADER_SIZE);
if (!driver->txbuff){
- DEBUG(0,__FUNCTION__"():alloc_skb failed!\n");
+ DEBUG(0,__FUNCTION__"(), alloc_skb failed!\n");
return -ENOMEM;
}
- skb_reserve(driver->txbuff, COMM_HEADER_SIZE);
+ skb_reserve(driver->txbuff, COMM_MAX_HEADER_SIZE);
irda_notify_init(&irvtd_notify);
irvtd_notify.data_indication = irvtd_receive_data;
@@ -793,9 +844,8 @@ static int irvtd_startup(struct irvtd_cb *driver)
irvtd_notify.instance = driver;
driver->comm = ircomm_open_instance(irvtd_notify);
- if(!driver->comm){
+ if (!driver->comm)
return -ENODEV;
- }
/*
@@ -811,22 +861,20 @@ static int irvtd_startup(struct irvtd_cb *driver)
driver->flags |= ASYNC_INITIALIZED;
- /*
- * discover a peer device
- * TODO: other servicetype(i.e. 3wire,3wireraw) support
- */
- ircomm_connect_request(driver->comm, NINE_WIRE);
-
- /*
- * TODO:we have to initialize control-channel here!
- * i.e.set something into RTS,CTS and so on....
- */
-
if (driver->tty)
clear_bit(TTY_IO_ERROR, &driver->tty->flags);
change_speed(driver);
- irvtd_start_timer( driver);
+
+ /*
+ * discover a peer device
+ */
+ if(driver->tty->termios->c_cflag & CRTSCTS)
+ ircomm_connect_request(driver->comm, NINE_WIRE);
+ else
+ ircomm_connect_request(driver->comm, THREE_WIRE);
+
+ /* irvtd_start_timer( driver); */
driver->rx_disable = 0;
driver->tx_disable = 1;
@@ -989,7 +1037,8 @@ static void irvtd_shutdown(struct irvtd_cb * driver)
if (driver->tty)
set_bit(TTY_IO_ERROR, &driver->tty->flags);
- del_timer( &driver->timer);
+ del_timer( &driver->tx_timer);
+ del_timer( &driver->rx_timer);
irias_delete_object("IrDA:IrCOMM");
@@ -1144,13 +1193,21 @@ int irvtd_write(struct tty_struct * tty, int from_user,
DEBUG(4, __FUNCTION__"()\n");
save_flags(flags);
- while(1){
+ while(count > 0){
cli();
skb = driver->txbuff;
ASSERT(skb != NULL, break;);
c = MIN(count, (skb_tailroom(skb)));
if (c <= 0)
- break;
+ {
+ if(!driver->ttp_stoptx)
+ {
+ irvtd_send_data_request(driver);
+ continue;
+ }
+ else
+ break;
+ }
/* write to the frame */
@@ -1164,9 +1221,9 @@ int irvtd_write(struct tty_struct * tty, int from_user,
wrote += c;
count -= c;
buf += c;
- irvtd_send_data_request(driver);
}
restore_flags(flags);
+ irvtd_send_data_request(driver);
return (wrote);
}
@@ -1199,19 +1256,27 @@ void irvtd_put_char(struct tty_struct *tty, unsigned char ch)
DEBUG(4, __FUNCTION__"()\n");
+ again:
save_flags(flags);cli();
skb = driver->txbuff;
ASSERT(skb != NULL,return;);
+ if(!skb_tailroom(skb))
+ {
+ restore_flags(flags);
+ irvtd_send_data_request(driver);
+ goto again;
+ }
ASSERT(skb_tailroom(skb) > 0, return;);
- DEBUG(4, "irvtd_put_char(0x%02x) skb_len(%d) MAX(%d):\n",
+ DEBUG(4, "irvtd_put_char(0x%02x) skb_len(%d) room(%d):\n",
(int)ch ,(int)skb->len,
- driver->comm->max_txbuff_size - COMM_HEADER_SIZE);
+ skb_tailroom(skb));
/* append a character */
frame = skb_put(skb,1);
frame[0] = ch;
restore_flags(flags);
+ irvtd_start_tx_timer(driver,20);
return;
}
@@ -1635,6 +1700,7 @@ void irvtd_throttle(struct tty_struct *tty){
driver->comm->dte = driver->mcr;
ircomm_control_request(driver->comm, DTELINE_STATE );
+ DEBUG(1, __FUNCTION__"():FLOW_STOP\n");
irttp_flow_request(driver->comm->tsap, FLOW_STOP);
}
@@ -1649,6 +1715,7 @@ void irvtd_unthrottle(struct tty_struct *tty){
driver->comm->dte = driver->mcr;
ircomm_control_request(driver->comm, DTELINE_STATE );
+ DEBUG(1, __FUNCTION__"():FLOW_START\n");
irttp_flow_request(driver->comm->tsap, FLOW_START);
}
@@ -1859,6 +1926,12 @@ static int line_info(char *buf, struct irvtd_cb *driver)
if (driver->msr & MSR_RI)
ret += sprintf(buf+ret, "|RI");
+ ret += sprintf(buf+ret, "\n");
+ ret += sprintf(buf+ret, "rx queue:%d",
+ skb_queue_len( &driver->rxbuff));
+ ret += sprintf(buf+ret, "ttp_stoprx:%s",
+ driver->ttp_stoprx?"TRUE":"FALSE");
+
exit:
ret += sprintf(buf+ret, "\n");
return ret;
@@ -1930,6 +2003,10 @@ __initfunc(int irvtd_init(void))
irvtd[i]->line = i;
irvtd[i]->closing_wait = 10*HZ ;
irvtd[i]->close_delay = 5*HZ/10 ;
+ init_waitqueue_head(&irvtd[i]->open_wait);
+ init_waitqueue_head(&irvtd[i]->close_wait);
+ init_waitqueue_head(&irvtd[i]->tx_wait);
+ init_waitqueue_head(&irvtd[i]->delta_msr_wait);
}
/*
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index cf9e6ea34..ec7b7233a 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -6,10 +6,12 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Wed Sep 2 20:22:08 1998
- * Modified at: Wed Apr 21 09:48:19 1999
+ * Modified at: Tue Jun 1 09:05:13 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Modified at: Fri May 28 3:11 CST 1999
+ * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
*
- * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -23,6 +25,7 @@
********************************************************************/
#include <linux/config.h>
+#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/if.h>
@@ -31,6 +34,8 @@
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/tty.h>
+#include <linux/kmod.h>
+#include <linux/wireless.h>
#include <asm/ioctls.h>
#include <asm/segment.h>
@@ -53,7 +58,8 @@ extern int tekram_init(void);
extern int actisys_init(void);
extern int girbil_init(void);
-hashbin_t *irda_device = NULL;
+static hashbin_t *irda_device = NULL;
+static hashbin_t *dongles = NULL;
/* Netdevice functions */
static int irda_device_net_rebuild_header(struct sk_buff *skb);
@@ -61,9 +67,9 @@ static int irda_device_net_hard_header(struct sk_buff *skb,
struct device *dev,
unsigned short type, void *daddr,
void *saddr, unsigned len);
-static int irda_device_net_set_config( struct device *dev, struct ifmap *map);
-static int irda_device_net_change_mtu( struct device *dev, int new_mtu);
-
+static int irda_device_net_set_config(struct device *dev, struct ifmap *map);
+static int irda_device_net_change_mtu(struct device *dev, int new_mtu);
+static int irda_device_net_ioctl(struct device *dev, struct ifreq *rq,int cmd);
#ifdef CONFIG_PROC_FS
int irda_device_proc_read( char *buf, char **start, off_t offset, int len,
int unused);
@@ -74,8 +80,15 @@ __initfunc(int irda_device_init( void))
{
/* Allocate master array */
irda_device = hashbin_new( HB_LOCAL);
- if ( irda_device == NULL) {
- printk( KERN_WARNING "IrDA: Can't allocate irda_device hashbin!\n");
+ if (irda_device == NULL) {
+ WARNING("IrDA: Can't allocate irda_device hashbin!\n");
+ return -ENOMEM;
+ }
+
+ dongles = hashbin_new(HB_LOCAL);
+ if (dongles == NULL) {
+ printk(KERN_WARNING
+ "IrDA: Can't allocate dongles hashbin!\n");
return -ENOMEM;
}
@@ -92,6 +105,12 @@ __initfunc(int irda_device_init( void))
#ifdef CONFIG_NSC_FIR
pc87108_init();
#endif
+#ifdef CONFIG_TOSHIBA_FIR
+ toshoboe_init();
+#endif
+#ifdef CONFIG_SMC_IRCC_FIR
+ ircc_init();
+#endif
#ifdef CONFIG_ESI_DONGLE
esi_init();
#endif
@@ -104,6 +123,10 @@ __initfunc(int irda_device_init( void))
#ifdef CONFIG_GIRBIL_DONGLE
girbil_init();
#endif
+#ifdef CONFIG_GIRBIL_DONGLE
+ litelink_init();
+#endif
+
return 0;
}
@@ -113,6 +136,7 @@ void irda_device_cleanup(void)
ASSERT(irda_device != NULL, return;);
+ hashbin_delete(dongles, NULL);
hashbin_delete(irda_device, (FREE_FUNC) irda_device_close);
}
@@ -155,6 +179,8 @@ int irda_device_open(struct irda_device *self, char *name, void *priv)
/* Initialize timers */
init_timer(&self->media_busy_timer);
+ self->lock = SPIN_LOCK_UNLOCKED;
+
/* A pointer to the low level implementation */
self->priv = priv;
@@ -186,7 +212,7 @@ int irda_device_open(struct irda_device *self, char *name, void *priv)
/* Open network device */
dev_open(&self->netdev);
- MESSAGE("IrDA: Registred device %s\n", self->name);
+ MESSAGE("IrDA: Registered device %s\n", self->name);
irda_device_set_media_busy(self, FALSE);
@@ -238,7 +264,7 @@ void __irda_device_close(struct irda_device *self)
/*
* Function irda_device_close (self)
*
- *
+ * Close the device
*
*/
void irda_device_close(struct irda_device *self)
@@ -248,6 +274,10 @@ void irda_device_close(struct irda_device *self)
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
+ /* We are not using any dongle anymore! */
+ if (self->dongle)
+ self->dongle->close(self);
+
/* Stop and remove instance of IrLAP */
if (self->irlap)
irlap_close(self->irlap);
@@ -289,6 +319,8 @@ void irda_device_set_media_busy(struct irda_device *self, int status)
*/
static void __irda_device_change_speed(struct irda_device *self, int speed)
{
+ int n = 0;
+
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
@@ -296,18 +328,37 @@ static void __irda_device_change_speed(struct irda_device *self, int speed)
* Is is possible to change speed yet? Wait until the last byte
* has been transmitted.
*/
- if (self->wait_until_sent) {
- self->wait_until_sent(self);
- if (self->change_speed) {
- self->change_speed(self, speed);
+ if (!self->wait_until_sent) {
+ ERROR("IrDA: wait_until_sent() "
+ "has not implemented by the IrDA device driver!\n");
+ return;
+ }
+
+ /* Make sure all transmitted data has actually been sent */
+ self->wait_until_sent(self);
- /* Update the QoS value only */
- self->qos.baud_rate.value = speed;
+ /* Make sure nobody tries to transmit during the speed change */
+ while (irda_lock((void *) &self->netdev.tbusy) == FALSE) {
+ WARNING(__FUNCTION__ "(), device locked!\n");
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(MSECS_TO_JIFFIES(10));
+
+ if (n++ > 10) {
+ WARNING(__FUNCTION__ "(), breaking loop!\n");
+ break;
}
- } else {
- printk(KERN_WARNING "wait_until_sent() "
- "has not implemented by the IrDA device driver!\n");
}
+
+ if (self->dongle)
+ self->dongle->change_speed(self, speed);
+
+ if (self->change_speed) {
+ self->change_speed(self, speed);
+
+ /* Update the QoS value only */
+ self->qos.baud_rate.value = speed;
+ }
+ self->netdev.tbusy = FALSE;
}
/*
@@ -318,8 +369,6 @@ static void __irda_device_change_speed(struct irda_device *self, int speed)
*/
inline void irda_device_change_speed(struct irda_device *self, int speed)
{
- DEBUG(4, __FUNCTION__ "()\n");
-
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
@@ -330,27 +379,27 @@ inline void irda_device_change_speed(struct irda_device *self, int speed)
inline int irda_device_is_media_busy( struct irda_device *self)
{
- ASSERT( self != NULL, return FALSE;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return FALSE;);
+ ASSERT(self != NULL, return FALSE;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return FALSE;);
return self->media_busy;
}
inline int irda_device_is_receiving( struct irda_device *self)
{
- ASSERT( self != NULL, return FALSE;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return FALSE;);
+ ASSERT(self != NULL, return FALSE;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return FALSE;);
- if ( self->is_receiving)
- return self->is_receiving( self);
+ if (self->is_receiving)
+ return self->is_receiving(self);
else
return FALSE;
}
-inline struct qos_info *irda_device_get_qos( struct irda_device *self)
+inline struct qos_info *irda_device_get_qos(struct irda_device *self)
{
- ASSERT( self != NULL, return NULL;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return NULL;);
+ ASSERT(self != NULL, return NULL;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return NULL;);
return &self->qos;
}
@@ -372,8 +421,6 @@ int irda_device_setup(struct device *dev)
{
struct irda_device *self;
- DEBUG(4, __FUNCTION__ "()\n");
-
ASSERT(dev != NULL, return -1;);
self = (struct irda_device *) dev->priv;
@@ -386,6 +433,7 @@ int irda_device_setup(struct device *dev)
dev->set_config = irda_device_net_set_config;
dev->change_mtu = irda_device_net_change_mtu;
/* dev->hard_header = irda_device_net_hard_header; */
+ dev->do_ioctl = irda_device_net_ioctl;
dev->hard_header_len = 0;
dev->addr_len = 0;
@@ -444,6 +492,131 @@ static int irda_device_net_change_mtu( struct device *dev, int new_mtu)
return 0;
}
+
+#define SIOCSDONGLE SIOCDEVPRIVATE
+static int irda_device_net_ioctl(struct device *dev, /* ioctl device */
+ struct ifreq *rq, /* Data passed */
+ int cmd) /* Ioctl number */
+{
+ unsigned long flags;
+ int ret = 0;
+#ifdef WIRELESS_EXT
+ struct iwreq *wrq = (struct iwreq *) rq;
+#endif
+ struct irda_device *self;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(dev != NULL, return -1;);
+
+ self = (struct irda_device *) dev->priv;
+
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return -1;);
+
+ DEBUG(0, "%s: ->irda_device_net_ioctl(cmd=0x%X)\n", dev->name, cmd);
+
+ /* Disable interrupts & save flags */
+ save_flags(flags);
+ cli();
+
+ /* Look what is the request */
+ switch (cmd) {
+#ifdef WIRELESS_EXT
+ case SIOCGIWNAME:
+ /* Get name */
+ strcpy(wrq->u.name, self->name);
+ break;
+ case SIOCSIWNWID:
+ /* Set domain */
+ if (wrq->u.nwid.on) {
+
+ } break;
+ case SIOCGIWNWID:
+ /* Read domain*/
+/* wrq->u.nwid.nwid = domain; */
+/* wrq->u.nwid.on = 1; */
+ break;
+ case SIOCGIWENCODE:
+ /* Get scramble key */
+ /* wrq->u.encoding.code = scramble_key; */
+/* wrq->u.encoding.method = 1; */
+ break;
+ case SIOCSIWENCODE:
+ /* Set scramble key */
+ /* scramble_key = wrq->u.encoding.code; */
+ break;
+ case SIOCGIWRANGE:
+ /* Basic checking... */
+ if(wrq->u.data.pointer != (caddr_t) 0) {
+ struct iw_range range;
+
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ sizeof(struct iw_range));
+ if(ret)
+ break;
+
+ /* Set the length (useless : its constant...) */
+ wrq->u.data.length = sizeof(struct iw_range);
+
+ /* Set information in the range struct */
+ range.throughput = 1.6 * 1024 * 1024; /* don't argue on this ! */
+ range.min_nwid = 0x0000;
+ range.max_nwid = 0x01FF;
+
+ range.num_channels = range.num_frequency = 0;
+
+ range.sensitivity = 0x3F;
+ range.max_qual.qual = 255;
+ range.max_qual.level = 255;
+ range.max_qual.noise = 0;
+
+ /* Copy structure to the user buffer */
+ copy_to_user(wrq->u.data.pointer, &range,
+ sizeof(struct iw_range));
+ }
+ break;
+ case SIOCGIWPRIV:
+ /* Basic checking... */
+#if 0
+ if (wrq->u.data.pointer != (caddr_t) 0) {
+ struct iw_priv_args priv[] =
+ { /* cmd, set_args, get_args, name */
+ { SIOCGIPSNAP, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 0,
+ sizeof(struct site_survey),
+ "getsitesurvey" },
+ };
+
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ sizeof(priv));
+ if (ret)
+ break;
+
+ /* Set the number of ioctl available */
+ wrq->u.data.length = 1;
+
+ /* Copy structure to the user buffer */
+ copy_to_user(wrq->u.data.pointer, (u_char *) priv,
+ sizeof(priv));
+ }
+#endif
+ break;
+#endif
+ case SIOCSDONGLE: /* Set dongle */
+ /* Initialize dongle */
+ irda_device_init_dongle(self, (int) rq->ifr_data);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ restore_flags(flags);
+
+ return ret;
+}
+
/*
* Function irda_device_transmit_finished (void)
*
@@ -451,7 +624,7 @@ static int irda_device_net_change_mtu( struct device *dev, int new_mtu)
* device. Maybe we should use: q->q.qlen == 0.
*
*/
-int irda_device_txqueue_empty( struct irda_device *self)
+int irda_device_txqueue_empty(struct irda_device *self)
{
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRDA_DEVICE_MAGIC, return -1;);
@@ -463,6 +636,122 @@ int irda_device_txqueue_empty( struct irda_device *self)
}
/*
+ * Function irda_device_init_dongle (self, type)
+ *
+ * Initialize attached dongle. Warning, must be called with a process
+ * context!
+ */
+void irda_device_init_dongle(struct irda_device *self, int type)
+{
+ struct dongle_q *node;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
+
+#ifdef CONFIG_KMOD
+ /* Try to load the module needed */
+ switch (type) {
+ case ESI_DONGLE:
+ MESSAGE("IrDA: Initializing ESI dongle!\n");
+ request_module("esi");
+ break;
+ case TEKRAM_DONGLE:
+ MESSAGE("IrDA: Initializing Tekram dongle!\n");
+ request_module("tekram");
+ break;
+ case ACTISYS_DONGLE: /* FALLTHROUGH */
+ case ACTISYS_PLUS_DONGLE:
+ MESSAGE("IrDA: Initializing ACTiSYS dongle!\n");
+ request_module("actisys");
+ break;
+ case GIRBIL_DONGLE:
+ MESSAGE("IrDA: Initializing GIrBIL dongle!\n");
+ request_module("girbil");
+ break;
+ case LITELINK_DONGLE:
+ MESSAGE("IrDA: Initializing Litelink dongle!\n");
+ request_module("litelink");
+ break;
+ default:
+ ERROR("Unknown dongle type!\n");
+ return;
+ }
+#endif /* CONFIG_KMOD */
+
+ node = hashbin_find(dongles, type, NULL);
+ if (!node) {
+ ERROR("IrDA: Unable to find requested dongle\n");
+ return;
+ }
+
+ /* Check if we're already using a dongle */
+ if (self->dongle) {
+ self->dongle->close(self);
+ }
+
+ /* Set the dongle to be used by this driver */
+ self->dongle = node->dongle;
+
+ /* Now initialize the dongle! */
+ node->dongle->open(self, type);
+ node->dongle->qos_init(self, &self->qos);
+
+ /* Reset dongle */
+ node->dongle->reset(self);
+
+ /* Set to default baudrate */
+ irda_device_change_speed(self, 9600);
+}
+
+/*
+ * Function irda_device_register_dongle (dongle)
+ *
+ *
+ *
+ */
+int irda_device_register_dongle(struct dongle *dongle)
+{
+ struct dongle_q *new;
+
+ /* Check if this dongle has been registred before */
+ if (hashbin_find(dongles, dongle->type, NULL)) {
+ MESSAGE(__FUNCTION__ "(), Dongle already registered\n");
+ return 0;
+ }
+
+ /* Make new IrDA dongle */
+ new = (struct dongle_q *) kmalloc(sizeof(struct dongle_q), GFP_KERNEL);
+ if (new == NULL)
+ return -1;
+
+ memset(new, 0, sizeof( struct dongle_q));
+ new->dongle = dongle;
+
+ /* Insert IrDA dongle into hashbin */
+ hashbin_insert(dongles, (QUEUE *) new, dongle->type, NULL);
+
+ return 0;
+}
+
+/*
+ * Function irda_device_unregister_dongle (dongle)
+ *
+ *
+ *
+ */
+void irda_device_unregister_dongle(struct dongle *dongle)
+{
+ struct dongle_q *node;
+
+ node = hashbin_remove(dongles, dongle->type, NULL);
+ if (!node) {
+ ERROR(__FUNCTION__ "(), dongle not found!\n");
+ return;
+ }
+ kfree(node);
+}
+
+/*
* Function setup_dma (idev, buffer, count, mode)
*
* Setup the DMA channel
@@ -536,7 +825,7 @@ int irda_device_proc_read(char *buf, char **start, off_t offset, int len,
self = (struct irda_device *) hashbin_get_first(irda_device);
while ( self != NULL) {
- len += sprintf(buf+len, "%s,", self->name);
+ len += sprintf(buf+len, "\n%s,", self->name);
len += sprintf(buf+len, "\tbinding: %s\n",
self->description);
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index b87ccbd02..f3b752eb0 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
- * Modified at: Fri Apr 23 09:57:12 1999
+ * Modified at: Sun May 9 15:59:05 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -62,12 +62,17 @@ static __u32 service_handle;
extern char *lmp_reasons[];
static struct iriap_cb *iriap_open( __u8 slsap, int mode);
-static void __iriap_close( struct iriap_cb *self);
+static void __iriap_close(struct iriap_cb *self);
static void iriap_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *skb);
static void iriap_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
+ __u8 max_header_size,
struct sk_buff *skb);
+static void iriap_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size, __u8 max_header_size,
+ struct sk_buff *skb);
static int iriap_data_indication(void *instance, void *sap,
struct sk_buff *skb);
@@ -181,7 +186,7 @@ struct iriap_cb *iriap_open( __u8 slsap_sel, int mode)
self->slsap_sel = slsap_sel;
self->mode = mode;
- init_timer( &self->watchdog_timer);
+ init_timer(&self->watchdog_timer);
hashbin_insert( iriap, (QUEUE*) self, slsap_sel, NULL);
@@ -206,7 +211,7 @@ static void __iriap_close( struct iriap_cb *self)
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
- del_timer( &self->watchdog_timer);
+ del_timer(&self->watchdog_timer);
self->magic = 0;
@@ -260,7 +265,7 @@ static void iriap_disconnect_indication( void *instance, void *sap,
ASSERT( iriap != NULL, return;);
- del_timer( &self->watchdog_timer);
+ del_timer(&self->watchdog_timer);
if ( self->mode == IAS_CLIENT) {
DEBUG( 4, __FUNCTION__ "(), disconnect as client\n");
@@ -284,9 +289,8 @@ static void iriap_disconnect_indication( void *instance, void *sap,
NULL);
}
- if ( userdata) {
+ if (userdata)
dev_kfree_skb( userdata);
- }
}
/*
@@ -295,28 +299,28 @@ static void iriap_disconnect_indication( void *instance, void *sap,
*
*
*/
-void iriap_disconnect_request( struct iriap_cb *self)
+void iriap_disconnect_request(struct iriap_cb *self)
{
struct sk_buff *skb;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IAS_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IAS_MAGIC, return;);
- skb = dev_alloc_skb( 64);
+ skb = dev_alloc_skb(64);
if (skb == NULL) {
- DEBUG( 0, __FUNCTION__
- "(), Could not allocate an sk_buff of length %d\n", 64);
+ DEBUG(0, __FUNCTION__
+ "(), Could not allocate an sk_buff of length %d\n", 64);
return;
}
/*
- * Reserve space for MUX and LAP header
+ * Reserve space for MUX control and LAP header
*/
- skb_reserve( skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_reserve(skb, LMP_MAX_HEADER);
- irlmp_disconnect_request( self->lsap, skb);
+ irlmp_disconnect_request(self->lsap, skb);
}
void iriap_getinfobasedetails_request(void)
@@ -381,7 +385,7 @@ void iriap_getvaluebyclass_request(char *name, char *attr,
/* Give ourselves 10 secs to finish this operation */
iriap_start_watchdog_timer(self, 10*HZ);
- skb = dev_alloc_skb( 64);
+ skb = dev_alloc_skb(64);
if (!skb)
return;
@@ -389,7 +393,7 @@ void iriap_getvaluebyclass_request(char *name, char *attr,
attr_len = strlen(attr);
/* Reserve space for MUX and LAP header */
- skb_reserve(skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->max_header_size);
skb_put(skb, 3+name_len+attr_len);
frame = skb->data;
@@ -535,13 +539,13 @@ void iriap_getvaluebyclass_response(struct iriap_cb *self, __u16 obj_id,
* value. We add 9 bytes because of the 6 bytes for the frame and
* max 3 bytes for the value coding.
*/
- skb = dev_alloc_skb(value->len + LMP_HEADER + LAP_HEADER + 9);
+ skb = dev_alloc_skb(value->len + self->max_header_size + 9);
if (!skb)
return;
/* Reserve space for MUX and LAP header */
- skb_reserve( skb, LMP_HEADER+LAP_HEADER);
- skb_put( skb, 6);
+ skb_reserve(skb, self->max_header_size);
+ skb_put(skb, 6);
fp = skb->data;
@@ -666,7 +670,7 @@ void iriap_getvaluebyclass_indication(struct iriap_cb *self,
/*
* Function iriap_send_ack (void)
*
- *
+ * Currently not used
*
*/
void iriap_send_ack( struct iriap_cb *self)
@@ -679,13 +683,13 @@ void iriap_send_ack( struct iriap_cb *self)
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
- skb = dev_alloc_skb( 64);
+ skb = dev_alloc_skb(64);
if (!skb)
return;
/* Reserve space for MUX and LAP header */
- skb_reserve( skb, 4);
- skb_put( skb, 3);
+ skb_reserve(skb, self->max_header_size);
+ skb_put(skb, 1);
frame = skb->data;
/* Build frame */
@@ -698,8 +702,10 @@ void iriap_send_ack( struct iriap_cb *self)
* LSAP connection confirmed!
*
*/
-void iriap_connect_confirm(void *instance, void *sap, struct qos_info *qos,
- __u32 max_sdu_size, struct sk_buff *userdata)
+static void iriap_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size, __u8 header_size,
+ struct sk_buff *userdata)
{
struct iriap_cb *self;
@@ -711,7 +717,7 @@ void iriap_connect_confirm(void *instance, void *sap, struct qos_info *qos,
DEBUG(4, __FUNCTION__ "()\n");
- /* del_timer( &self->watchdog_timer); */
+ del_timer(&self->watchdog_timer);
iriap_do_client_event(self, IAP_LM_CONNECT_CONFIRM, userdata);
}
@@ -724,19 +730,17 @@ void iriap_connect_confirm(void *instance, void *sap, struct qos_info *qos,
*/
static void iriap_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
+ __u8 header_size,
struct sk_buff *userdata)
{
struct iriap_cb *self;
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = ( struct iriap_cb *) instance;
+ self = (struct iriap_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IAS_MAGIC, return;);
- ASSERT( self->mode == IAS_SERVER, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IAS_MAGIC, return;);
- iriap_do_server_event( self, IAP_LM_CONNECT_INDICATION, userdata);
+ iriap_do_server_event(self, IAP_LM_CONNECT_INDICATION, userdata);
}
/*
@@ -856,7 +860,7 @@ void iriap_call_indication( struct iriap_cb *self, struct sk_buff *skb)
}
opcode &= 0x7f; /* Mask away LST bit */
- switch( opcode) {
+ switch (opcode) {
case GET_INFO_BASE:
DEBUG( 0, "IrLMP GetInfoBaseDetails not implemented!\n");
break;
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index ccba78ece..18a70fec3 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
- * Modified at: Tue Jan 26 12:29:36 1999
+ * Modified at: Sun May 9 11:01:47 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1997 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -387,9 +387,9 @@ static void state_r_disconnect( struct iriap_cb *self, IRIAP_EVENT event,
}
/* Reserve space for MUX_CONTROL and LAP header */
- skb_reserve( tx_skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_reserve(tx_skb, LMP_MAX_HEADER);
- irlmp_connect_response( self->lsap, tx_skb);
+ irlmp_connect_response(self->lsap, tx_skb);
/*LM_Idle_request(idle); */
iriap_next_server_state( self, R_CALL);
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index f2f8271cf..e231a08d4 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -6,13 +6,14 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Thu Apr 22 23:03:55 1999
+ * Modified at: Mon May 31 14:19:34 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
* Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -61,6 +62,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap,
static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
__u32 max_sdu_size,
+ __u8 max_header_size,
struct sk_buff *);
static void irlan_check_response_param(struct irlan_cb *self, char *param,
char *value, int val_len);
@@ -79,7 +81,7 @@ static void irlan_client_kick_timer_expired(unsigned long data)
* indication it needs to make progress. If the client is still in
* IDLE state, we must kick it to, but only if the provider is not IDLE
*/
- if ((self->access_type == ACCESS_PEER) &&
+ if ((self->provider.access_type == ACCESS_PEER) &&
(self->client.state == IRLAN_IDLE) &&
(self->provider.state != IRLAN_IDLE)) {
irlan_client_wakeup(self, self->saddr, self->daddr);
@@ -105,23 +107,29 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
{
struct irmanager_event mgr_event;
- DEBUG(0, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
- /* Check if we are already awake */
- if (self->client.state != IRLAN_IDLE)
+ /*
+ * Check if we are already awake, or if we are a provider in direct
+ * mode (in that case we must leave the client idle
+ */
+ if ((self->client.state != IRLAN_IDLE) ||
+ (self->provider.access_type == ACCESS_DIRECT))
return;
/* saddr may have changed! */
self->saddr = saddr;
- /* Check if network device is up */
+ /* Before we try to connect, we check if network device is up. If it
+ * is up, that means that the "user" really wants to connect. If not
+ * we notify the user about the possibility of an IrLAN connection
+ */
if (self->dev.start) {
/* Open TSAPs */
irlan_client_open_ctrl_tsap(self);
- irlan_provider_open_ctrl_tsap(self);
irlan_open_data_tsap(self);
irlan_do_client_event(self, IRLAN_DISCOVERY_INDICATION, NULL);
@@ -161,7 +169,7 @@ void irlan_client_discovery_indication(discovery_t *discovery)
struct irlan_cb *self, *entry;
__u32 saddr, daddr;
- DEBUG(0, __FUNCTION__"()\n");
+ DEBUG(1, __FUNCTION__"()\n");
ASSERT(irlan != NULL, return;);
ASSERT(discovery != NULL, return;);
@@ -176,7 +184,8 @@ void irlan_client_discovery_indication(discovery_t *discovery)
if (self) {
ASSERT(self->magic == IRLAN_MAGIC, return;);
- DEBUG(2, __FUNCTION__ "(), Found instance!\n");
+ DEBUG(1, __FUNCTION__ "(), Found instance (%08x)!\n",
+ daddr);
irlan_client_wakeup(self, saddr, daddr);
@@ -184,30 +193,13 @@ void irlan_client_discovery_indication(discovery_t *discovery)
}
/*
- * We have no instance for daddr, so try and find an unused one
+ * We have no instance for daddr, so start a new one
*/
- self = hashbin_find(irlan, DEV_ADDR_ANY, NULL);
- if (self) {
- DEBUG(0, __FUNCTION__ "(), Found instance with DEV_ADDR_ANY!\n");
- /*
- * Rehash instance, now we have a client (daddr) to serve.
- */
- entry = hashbin_remove(irlan, self->daddr, NULL);
- ASSERT(entry == self, return;);
+ DEBUG(1, __FUNCTION__ "(), starting new instance!\n");
+ self = irlan_open(saddr, daddr, TRUE);
- self->daddr = daddr;
- self->saddr = saddr;
-
- DEBUG(0, __FUNCTION__ "(), daddr=%08x\n", self->daddr);
- hashbin_insert(irlan, (QUEUE*) self, self->daddr, NULL);
-
- /* Check if network device has been registered */
- if (!self->netdev_registered)
- irlan_register_netdev(self);
-
- /* Restart watchdog timer */
- irlan_start_watchdog_timer(self, IRLAN_TIMEOUT);
- }
+ /* Restart watchdog timer */
+ irlan_start_watchdog_timer(self, IRLAN_TIMEOUT);
}
/*
@@ -221,7 +213,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap,
{
struct irlan_cb *self;
- DEBUG(4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
self = (struct irlan_cb *) instance;
@@ -231,6 +223,12 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap,
irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb);
+ /* Ready for a new command */
+ self->client.tx_busy = FALSE;
+
+ /* Check if we have some queued commands waiting to be sent */
+ irlan_run_ctrl_tx_queue(self);
+
return 0;
}
@@ -302,6 +300,7 @@ void irlan_client_open_ctrl_tsap(struct irlan_cb *self)
static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
__u32 max_sdu_size,
+ __u8 max_header_size,
struct sk_buff *skb)
{
struct irlan_cb *self;
@@ -313,6 +312,9 @@ static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
+ self->client.max_sdu_size = max_sdu_size;
+ self->client.max_header_size = max_header_size;
+
/* TODO: we could set the MTU depending on the max_sdu_size */
irlan_do_client_event(self, IRLAN_CONNECT_COMPLETE, NULL);
@@ -339,7 +341,7 @@ void irlan_client_reconnect_data_channel(struct irlan_cb *self)
return;
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->max_header_size);
skb_put(skb, 2);
frame = skb->data;
@@ -410,11 +412,11 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
/* For all parameters */
for (i=0; i<count;i++) {
ret = irlan_extract_param(ptr, name, value, &val_len);
- if (ret == -1) {
+ if (ret < 0) {
DEBUG(2, __FUNCTION__ "(), IrLAN, Error!\n");
break;
}
- ptr+=ret;
+ ptr += ret;
irlan_check_response_param(self, name, value, val_len);
}
/* Cleanup */
@@ -423,9 +425,9 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
}
/*
- * Function check_param (param, value)
+ * Function irlan_check_response_param (self, param, value, val_len)
*
- * Check which parameter is received and update local variables
+ * Check which parameter is received and update local variables
*
*/
static void irlan_check_response_param(struct irlan_cb *self, char *param,
@@ -469,11 +471,11 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
}
if (strcmp(param, "ACCESS_TYPE") == 0) {
if (strcmp(value, "DIRECT") == 0)
- self->access_type = ACCESS_DIRECT;
+ self->client.access_type = ACCESS_DIRECT;
else if (strcmp(value, "PEER") == 0)
- self->access_type = ACCESS_PEER;
+ self->client.access_type = ACCESS_PEER;
else if (strcmp(value, "HOSTED") == 0)
- self->access_type = ACCESS_HOSTED;
+ self->client.access_type = ACCESS_HOSTED;
else {
DEBUG(2, __FUNCTION__ "(), unknown access type!\n");
}
diff --git a/net/irda/irlan/irlan_client_event.c b/net/irda/irlan/irlan_client_event.c
index 1544c093e..2b92e7a74 100644
--- a/net/irda/irlan/irlan_client_event.c
+++ b/net/irda/irlan/irlan_client_event.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Thu Apr 22 12:23:22 1999
+ * Modified at: Fri May 14 23:08:15 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -97,7 +97,7 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRLAN_MAGIC, return -1;);
- switch(event) {
+ switch (event) {
case IRLAN_DISCOVERY_INDICATION:
/* Get some values from peer IAS */
iriap_getvaluebyclass_request(
@@ -152,7 +152,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_IDLE);
/* Give the client a kick! */
- if ((self->access_type == ACCESS_PEER) &&
+ if ((self->provider.access_type == ACCESS_PEER) &&
(self->provider.state != IRLAN_IDLE))
irlan_client_wakeup(self, self->saddr, self->daddr);
break;
@@ -222,7 +222,7 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
ASSERT(self != NULL, return -1;);
- switch(event) {
+ switch (event) {
case IRLAN_DATA_INDICATION:
ASSERT(skb != NULL, return -1;);
@@ -314,7 +314,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
ASSERT(self->dtsap_sel_data != 0, return -1;);
/* Check which access type we are dealing with */
- switch(self->access_type) {
+ switch (self->client.access_type) {
case ACCESS_PEER:
if (self->provider.state == IRLAN_OPEN) {
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 6a30574ca..d0a77557b 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -6,10 +6,11 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Thu Apr 22 23:13:47 1999
+ * Modified at: Mon May 31 14:25:19 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1997 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -93,19 +94,25 @@ static void __irlan_close(struct irlan_cb *self);
static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
__u8 value_byte, __u16 value_short,
__u8 *value_array, __u16 value_len);
-static void irlan_close_tsaps(struct irlan_cb *self);
+void irlan_close_tsaps(struct irlan_cb *self);
#ifdef CONFIG_PROC_FS
static int irlan_proc_read(char *buf, char **start, off_t offset, int len,
int unused);
extern struct proc_dir_entry *proc_irda;
-#endif
+#endif /* CONFIG_PROC_FS */
+/*
+ * Function irlan_watchdog_timer_expired (data)
+ *
+ *
+ *
+ */
void irlan_watchdog_timer_expired(unsigned long data)
{
struct irmanager_event mgr_event;
- struct irlan_cb *self, *entry;
+ struct irlan_cb *self;
DEBUG(0, __FUNCTION__ "()\n");
@@ -116,6 +123,7 @@ void irlan_watchdog_timer_expired(unsigned long data)
/* Check if device still configured */
if (self->dev.start) {
+ DEBUG(0, __FUNCTION__ "(), notifying irmanager to stop irlan!\n");
mgr_event.event = EVENT_IRLAN_STOP;
sprintf(mgr_event.devname, "%s", self->ifname);
irmanager_notify(&mgr_event);
@@ -128,22 +136,13 @@ void irlan_watchdog_timer_expired(unsigned long data)
*/
self->notify_irmanager = FALSE;
} else {
- DEBUG(0, __FUNCTION__ "(), recycling instance!\n");
+ DEBUG(0, __FUNCTION__ "(), closing instance!\n");
if (self->netdev_registered) {
DEBUG(0, __FUNCTION__ "(), removing netdev!\n");
unregister_netdev(&self->dev);
self->netdev_registered = FALSE;
}
-
- /* Unbind from daddr */
- entry = hashbin_remove(irlan, self->daddr, NULL);
- ASSERT(entry == self, return;);
-
- self->daddr = DEV_ADDR_ANY;
- self->saddr = DEV_ADDR_ANY;
-
- DEBUG(2, __FUNCTION__ "(), daddr=%08x\n", self->daddr);
- hashbin_insert(irlan, (QUEUE*) self, self->daddr, NULL);
+ irlan_close(self);
}
}
@@ -195,12 +194,12 @@ __initfunc(int irlan_init(void))
/* Register with IrLMP as a service */
skey = irlmp_register_service(hints);
- /* Start the first IrLAN instance */
+ /* Start the master IrLAN instance */
new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY, FALSE);
- irlan_open_data_tsap(new);
- irlan_client_open_ctrl_tsap(new);
+ /* The master will only open its (listen) control TSAP */
irlan_provider_open_ctrl_tsap(new);
+ new->master = TRUE;
/* Do some fast discovery! */
irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
@@ -293,7 +292,7 @@ struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr, int netdev)
self->daddr = daddr;
/* Provider access can only be PEER, DIRECT, or HOSTED */
- self->access_type = access;
+ self->provider.access_type = access;
self->media = MEDIA_802_3;
self->notify_irmanager = TRUE;
@@ -302,7 +301,9 @@ struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr, int netdev)
init_timer(&self->client.kick_timer);
hashbin_insert(irlan, (QUEUE *) self, daddr, NULL);
-
+
+ skb_queue_head_init(&self->client.txq);
+
irlan_next_client_state(self, IRLAN_IDLE);
irlan_next_provider_state(self, IRLAN_IDLE);
@@ -322,7 +323,7 @@ struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr, int netdev)
*/
static void __irlan_close(struct irlan_cb *self)
{
- DEBUG(0, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -359,8 +360,11 @@ void irlan_close(struct irlan_cb *self)
/* Check if device is still configured */
if (self->dev.start) {
- DEBUG(2, __FUNCTION__
+ DEBUG(0, __FUNCTION__
"(), Device still configured, closing later!\n");
+
+ /* Give it a chance to reconnect */
+ irlan_start_watchdog_timer(self, IRLAN_TIMEOUT);
return;
}
DEBUG(2, __FUNCTION__ "(), daddr=%08x\n", self->daddr);
@@ -371,8 +375,15 @@ void irlan_close(struct irlan_cb *self)
__irlan_close(self);
}
+/*
+ * Function irlan_connect_indication (instance, sap, qos, max_sdu_size, skb)
+ *
+ * Here we receive the connect indication for the data channel
+ *
+ */
void irlan_connect_indication(void *instance, void *sap, struct qos_info *qos,
- __u32 max_sdu_size, struct sk_buff *skb)
+ __u32 max_sdu_size, __u8 max_header_size,
+ struct sk_buff *skb)
{
struct irlan_cb *self;
struct tsap_cb *tsap;
@@ -386,13 +397,17 @@ void irlan_connect_indication(void *instance, void *sap, struct qos_info *qos,
ASSERT(self->magic == IRLAN_MAGIC, return;);
ASSERT(tsap == self->tsap_data,return;);
- DEBUG(2, "IrLAN, We are now connected!\n");
+ self->max_sdu_size = max_sdu_size;
+ self->max_header_size = max_header_size;
+
+ DEBUG(0, "IrLAN, We are now connected!\n");
+
del_timer(&self->watchdog_timer);
irlan_do_provider_event(self, IRLAN_DATA_CONNECT_INDICATION, skb);
irlan_do_client_event(self, IRLAN_DATA_CONNECT_INDICATION, skb);
- if (self->access_type == ACCESS_PEER) {
+ if (self->provider.access_type == ACCESS_PEER) {
/*
* Data channel is open, so we are now allowed to
* configure the remote filter
@@ -400,22 +415,24 @@ void irlan_connect_indication(void *instance, void *sap, struct qos_info *qos,
irlan_get_unicast_addr(self);
irlan_open_unicast_addr(self);
}
- /* Ready to transfer Ethernet frames */
+ /* Ready to transfer Ethernet frames (at last) */
self->dev.tbusy = 0;
}
void irlan_connect_confirm(void *instance, void *sap, struct qos_info *qos,
- __u32 max_sdu_size, struct sk_buff *skb)
+ __u32 max_sdu_size, __u8 max_header_size,
+ struct sk_buff *skb)
{
struct irlan_cb *self;
- DEBUG(2, __FUNCTION__ "()\n");
-
self = (struct irlan_cb *) instance;
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
+ self->max_sdu_size = max_sdu_size;
+ self->max_header_size = max_header_size;
+
/* TODO: we could set the MTU depending on the max_sdu_size */
DEBUG(2, "IrLAN, We are now connected!\n");
@@ -427,9 +444,15 @@ void irlan_connect_confirm(void *instance, void *sap, struct qos_info *qos,
*/
irlan_get_unicast_addr(self);
irlan_open_unicast_addr(self);
+
+ /* Open broadcast and multicast filter by default */
+ irlan_set_broadcast_filter(self, TRUE);
+ irlan_set_multicast_filter(self, TRUE);
/* Ready to transfer Ethernet frames */
self->dev.tbusy = 0;
+
+ irlan_eth_send_gratuitous_arp(&self->dev);
}
/*
@@ -444,7 +467,7 @@ void irlan_disconnect_indication(void *instance, void *sap, LM_REASON reason,
struct irlan_cb *self;
struct tsap_cb *tsap;
- DEBUG(2, __FUNCTION__ "(), reason=%d\n", reason);
+ DEBUG(0, __FUNCTION__ "(), reason=%d\n", reason);
self = (struct irlan_cb *) instance;
tsap = (struct tsap_cb *) sap;
@@ -460,7 +483,7 @@ void irlan_disconnect_indication(void *instance, void *sap, LM_REASON reason,
switch(reason) {
case LM_USER_REQUEST: /* User request */
- //irlan_close(self);
+ irlan_close(self);
break;
case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */
irlan_start_watchdog_timer(self, IRLAN_TIMEOUT);
@@ -478,9 +501,6 @@ void irlan_disconnect_indication(void *instance, void *sap, LM_REASON reason,
break;
}
- /* Stop IP from transmitting more packets */
- /* irlan_client_flow_indication(handle, FLOW_STOP, priv); */
-
irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
}
@@ -490,7 +510,7 @@ void irlan_open_data_tsap(struct irlan_cb *self)
struct notify_t notify;
struct tsap_cb *tsap;
- DEBUG(4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -500,12 +520,12 @@ void irlan_open_data_tsap(struct irlan_cb *self)
return;
irda_notify_init(&notify);
-
+
notify.data_indication = irlan_eth_receive;
notify.udata_indication = irlan_eth_receive;
notify.connect_indication = irlan_connect_indication;
notify.connect_confirm = irlan_connect_confirm;
- notify.flow_indication = irlan_eth_flow_indication;
+ /*notify.flow_indication = irlan_eth_flow_indication;*/
notify.disconnect_indication = irlan_disconnect_indication;
notify.instance = self;
strncpy(notify.name, "IrLAN data", NOTIFY_MAX_NAME);
@@ -538,7 +558,6 @@ void irlan_close_tsaps(struct irlan_cb *self)
irttp_disconnect_request(self->tsap_data, NULL, P_NORMAL);
irttp_close_tsap(self->tsap_data);
self->tsap_data = NULL;
-
}
if (self->client.tsap_ctrl) {
irttp_disconnect_request(self->client.tsap_ctrl, NULL,
@@ -591,15 +610,60 @@ void irlan_ias_register(struct irlan_cb *self, __u8 tsap_sel)
irias_add_string_attrib(obj, "Name", "Linux");
#endif
irias_add_string_attrib(obj, "DeviceID", "HWP19F0");
- irias_add_integer_attrib(obj, "CompCnt", 2);
- irias_add_string_attrib(obj, "Comp#01", "PNP8294");
- irias_add_string_attrib(obj, "Comp#02", "PNP8389");
+ irias_add_integer_attrib(obj, "CompCnt", 1);
+ if (self->provider.access_type == ACCESS_PEER)
+ irias_add_string_attrib(obj, "Comp#02", "PNP8389");
+ else
+ irias_add_string_attrib(obj, "Comp#01", "PNP8294");
+
irias_add_string_attrib(obj, "Manufacturer", "Linux-IrDA Project");
irias_insert_object(obj);
}
}
/*
+ * Function irlan_run_ctrl_tx_queue (self)
+ *
+ * Try to send the next command in the control transmit queue
+ *
+ */
+int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+
+ if (irda_lock(&self->client.tx_busy) == FALSE)
+ return -EBUSY;
+
+ skb = skb_dequeue(&self->client.txq);
+ if (!skb) {
+ self->client.tx_busy = FALSE;
+ return 0;
+ }
+ if (self->client.tsap_ctrl == NULL) {
+ self->client.tx_busy = FALSE;
+ dev_kfree_skb(skb);
+ return -1;
+ }
+
+ return irttp_data_request(self->client.tsap_ctrl, skb);
+}
+
+/*
+ * Function irlan_ctrl_data_request (self, skb)
+ *
+ * This function makes sure that commands on the control channel is being
+ * sent in a command/response fashion
+ */
+void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb)
+{
+ /* Queue command */
+ skb_queue_tail(&self->client.txq, skb);
+
+ /* Try to send command */
+ irlan_run_ctrl_tx_queue(self);
+}
+
+/*
* Function irlan_get_provider_info (self)
*
* Send Get Provider Information command to peer IrLAN layer
@@ -620,7 +684,7 @@ void irlan_get_provider_info(struct irlan_cb *self)
return;
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->client.max_header_size);
skb_put(skb, 2);
frame = skb->data;
@@ -628,7 +692,8 @@ void irlan_get_provider_info(struct irlan_cb *self)
frame[0] = CMD_GET_PROVIDER_INFO;
frame[1] = 0x00; /* Zero parameters */
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
@@ -651,7 +716,7 @@ void irlan_open_data_channel(struct irlan_cb *self)
if (!skb)
return;
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->client.max_header_size);
skb_put(skb, 2);
frame = skb->data;
@@ -666,7 +731,8 @@ void irlan_open_data_channel(struct irlan_cb *self)
/* self->use_udata = TRUE; */
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
void irlan_close_data_channel(struct irlan_cb *self)
@@ -679,11 +745,15 @@ void irlan_close_data_channel(struct irlan_cb *self)
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
+ /* Check if the TSAP is still there */
+ if (self->client.tsap_ctrl == NULL)
+ return;
+
skb = dev_alloc_skb(64);
if (!skb)
return;
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->client.max_header_size);
skb_put(skb, 2);
frame = skb->data;
@@ -694,7 +764,8 @@ void irlan_close_data_channel(struct irlan_cb *self)
irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
@@ -719,7 +790,7 @@ void irlan_open_unicast_addr(struct irlan_cb *self)
return;
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->max_header_size);
skb_put(skb, 2);
frame = skb->data;
@@ -730,7 +801,8 @@ void irlan_open_unicast_addr(struct irlan_cb *self)
irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
@@ -757,7 +829,7 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
return;
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->client.max_header_size);
skb_put(skb, 2);
frame = skb->data;
@@ -770,8 +842,9 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
else
irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
-
- irttp_data_request(self->client.tsap_ctrl, skb);
+
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
@@ -796,7 +869,7 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
return;
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->client.max_header_size);
skb_put(skb, 2);
frame = skb->data;
@@ -809,8 +882,9 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
irlan_insert_string_param(skb, "FILTER_MODE", "ALL");
else
irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
-
- irttp_data_request(self->client.tsap_ctrl, skb);
+
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
@@ -836,7 +910,7 @@ void irlan_get_unicast_addr(struct irlan_cb *self)
return;
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->client.max_header_size);
skb_put(skb, 2);
frame = skb->data;
@@ -847,7 +921,8 @@ void irlan_get_unicast_addr(struct irlan_cb *self)
irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
irlan_insert_string_param(skb, "FILTER_OPERATION", "DYNAMIC");
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
@@ -871,7 +946,7 @@ void irlan_get_media_char(struct irlan_cb *self)
return;
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->client.max_header_size);
skb_put(skb, 2);
frame = skb->data;
@@ -882,7 +957,8 @@ void irlan_get_media_char(struct irlan_cb *self)
irlan_insert_string_param(skb, "MEDIA", "802.3");
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
@@ -1033,7 +1109,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
/* get parameter name */
memcpy(name, buf+n, name_len);
- name[ name_len] = '\0';
+ name[name_len] = '\0';
n+=name_len;
/*
@@ -1051,7 +1127,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
/* get parameter value */
memcpy(value, buf+n, val_len);
- value[ val_len] = '\0';
+ value[val_len] = '\0';
n+=val_len;
DEBUG(4, "Parameter: %s ", name);
@@ -1085,31 +1161,35 @@ static int irlan_proc_read(char *buf, char **start, off_t offset, int len,
while (self != NULL) {
ASSERT(self->magic == IRLAN_MAGIC, return len;);
- len += sprintf(buf+len, "ifname: %s,\n",
- self->ifname);
- len += sprintf(buf+len, "client state: %s, ",
- irlan_state[ self->client.state]);
- len += sprintf(buf+len, "provider state: %s,\n",
- irlan_state[ self->provider.state]);
- len += sprintf(buf+len, "saddr: %#08x, ",
- self->saddr);
- len += sprintf(buf+len, "daddr: %#08x\n",
- self->daddr);
- len += sprintf(buf+len, "version: %d.%d,\n",
- self->version[1], self->version[0]);
- len += sprintf(buf+len, "access type: %s\n",
- irlan_access[ self->access_type]);
- len += sprintf(buf+len, "media: %s\n",
- irlan_media[ self->media]);
-
- len += sprintf(buf+len, "local filter:\n");
- len += sprintf(buf+len, "remote filter: ");
- len += irlan_print_filter(self->client.filter_type, buf+len);
-
- len += sprintf(buf+len, "tx busy: %s\n", self->dev.tbusy ?
- "TRUE" : "FALSE");
-
- len += sprintf(buf+len, "\n");
+ /* Don't display the master server */
+ if (self->master == 0) {
+ len += sprintf(buf+len, "ifname: %s,\n",
+ self->ifname);
+ len += sprintf(buf+len, "client state: %s, ",
+ irlan_state[ self->client.state]);
+ len += sprintf(buf+len, "provider state: %s,\n",
+ irlan_state[ self->provider.state]);
+ len += sprintf(buf+len, "saddr: %#08x, ",
+ self->saddr);
+ len += sprintf(buf+len, "daddr: %#08x\n",
+ self->daddr);
+ len += sprintf(buf+len, "version: %d.%d,\n",
+ self->version[1], self->version[0]);
+ len += sprintf(buf+len, "access type: %s\n",
+ irlan_access[self->client.access_type]);
+ len += sprintf(buf+len, "media: %s\n",
+ irlan_media[self->media]);
+
+ len += sprintf(buf+len, "local filter:\n");
+ len += sprintf(buf+len, "remote filter: ");
+ len += irlan_print_filter(self->client.filter_type,
+ buf+len);
+
+ len += sprintf(buf+len, "tx busy: %s\n",
+ self->dev.tbusy ? "TRUE" : "FALSE");
+
+ len += sprintf(buf+len, "\n");
+ }
self = (struct irlan_cb *) hashbin_get_next(irlan);
}
@@ -1132,34 +1212,34 @@ void print_ret_code(__u8 code)
printk(KERN_INFO "Success\n");
break;
case 1:
- printk(KERN_WARNING "Insufficient resources\n");
+ WARNING("IrLAN: Insufficient resources\n");
break;
case 2:
- printk(KERN_WARNING "Invalid command format\n");
+ WARNING("IrLAN: Invalid command format\n");
break;
case 3:
- printk(KERN_WARNING "Command not supported\n");
+ WARNING("IrLAN: Command not supported\n");
break;
case 4:
- printk(KERN_WARNING "Parameter not supported\n");
+ WARNING("IrLAN: Parameter not supported\n");
break;
case 5:
- printk(KERN_WARNING "Value not supported\n");
+ WARNING("IrLAN: Value not supported\n");
break;
case 6:
- printk(KERN_WARNING "Not open\n");
+ WARNING("IrLAN: Not open\n");
break;
case 7:
- printk(KERN_WARNING "Authentication required\n");
+ WARNING("IrLAN: Authentication required\n");
break;
case 8:
- printk(KERN_WARNING "Invalid password\n");
+ WARNING("IrLAN: Invalid password\n");
break;
case 9:
- printk(KERN_WARNING "Protocol error\n");
+ WARNING("IrLAN: Protocol error\n");
break;
case 255:
- printk(KERN_WARNING "Asynchronous status\n");
+ WARNING("IrLAN: Asynchronous status\n");
break;
}
}
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index c1965a117..c2e2453db 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -6,13 +6,13 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Oct 15 08:37:58 1998
- * Modified at: Thu Apr 22 14:26:39 1999
+ * Modified at: Mon May 31 19:57:08 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
* Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
*
- * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -29,6 +29,7 @@
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <linux/if_arp.h>
+#include <linux/random.h>
#include <net/arp.h>
#include <net/irda/irda.h>
@@ -49,7 +50,7 @@ int irlan_eth_init(struct device *dev)
struct irmanager_event mgr_event;
struct irlan_cb *self;
- DEBUG(0, __FUNCTION__"()\n");
+ DEBUG(2, __FUNCTION__"()\n");
ASSERT(dev != NULL, return -1;);
@@ -65,21 +66,26 @@ int irlan_eth_init(struct device *dev)
ether_setup(dev);
- dev->tx_queue_len = TTP_MAX_QUEUE;
-
-#if 0
- /*
- * OK, since we are emulating an IrLAN sever we will have to give
- * ourself an ethernet address!
- * FIXME: this must be more dynamically
+ /*
+ * Lets do all queueing in IrTTP instead of this device driver.
+ * Queueing here as well can introduce some strange latency
+ * problems, which we will avoid by setting the queue size to 0.
*/
- dev->dev_addr[0] = 0x40;
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x00;
- dev->dev_addr[3] = 0x00;
- dev->dev_addr[4] = 0x23;
- dev->dev_addr[5] = 0x45;
-#endif
+ dev->tx_queue_len = 0;
+
+ if (self->provider.access_type == ACCESS_DIRECT) {
+ /*
+ * Since we are emulating an IrLAN sever we will have to
+ * give ourself an ethernet address!
+ */
+ dev->dev_addr[0] = 0x40;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x00;
+ dev->dev_addr[3] = 0x00;
+ get_random_bytes(dev->dev_addr+4, 1);
+ get_random_bytes(dev->dev_addr+5, 1);
+ }
+
/*
* Network device has now been registered, so tell irmanager about
* it, so it can be configured with network parameters
@@ -109,7 +115,7 @@ int irlan_eth_open(struct device *dev)
{
struct irlan_cb *self;
- DEBUG(0, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
ASSERT(dev != NULL, return -1;);
@@ -144,7 +150,7 @@ int irlan_eth_close(struct device *dev)
{
struct irlan_cb *self = (struct irlan_cb *) dev->priv;
- DEBUG(0, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
/* Stop device */
dev->tbusy = 1;
@@ -179,76 +185,58 @@ int irlan_eth_close(struct device *dev)
int irlan_eth_xmit(struct sk_buff *skb, struct device *dev)
{
struct irlan_cb *self;
+ int ret;
- DEBUG(4, __FUNCTION__ "()\n");
-
self = (struct irlan_cb *) dev->priv;
ASSERT(self != NULL, return 0;);
ASSERT(self->magic == IRLAN_MAGIC, return 0;);
- /* Lock transmit buffer */
- if (irda_lock((void *) &dev->tbusy) == FALSE) {
- /*
- * If we get here, some higher level has decided we are broken.
- * There should really be a "kick me" function call instead.
- */
- int tickssofar = jiffies - dev->trans_start;
-
- if (tickssofar < 5)
- return -EBUSY;
-
- dev->tbusy = 0;
- dev->trans_start = jiffies;
- }
+ /* Check if IrTTP can accept more frames */
+ if (dev->tbusy)
+ return -EBUSY;
- DEBUG(4, "Room left at head: %d\n", skb_headroom(skb));
- DEBUG(4, "Room left at tail: %d\n", skb_tailroom(skb));
- DEBUG(4, "Required room: %d\n", IRLAN_MAX_HEADER);
-
- /* skb headroom large enough to contain IR-headers? */
- if ((skb_headroom(skb) < IRLAN_MAX_HEADER) || (skb_shared(skb))) {
+ /* skb headroom large enough to contain all IrDA-headers? */
+ if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
struct sk_buff *new_skb =
- skb_realloc_headroom(skb, IRLAN_MAX_HEADER);
- ASSERT(new_skb != NULL, return 0;);
- ASSERT(skb_headroom(new_skb) >= IRLAN_MAX_HEADER, return 0;);
+ skb_realloc_headroom(skb, self->max_header_size);
- /* Free original skb, and use the new one */
+ /* We have to free the original skb anyway */
dev_kfree_skb(skb);
+
+ /* Did the realloc succeed? */
+ if (new_skb == NULL)
+ return 0;
+
+ /* Use the new skb instead */
skb = new_skb;
}
dev->trans_start = jiffies;
- self->stats.tx_packets++;
- self->stats.tx_bytes += skb->len;
- /*
- * Now queue the packet in the transport layer
- * FIXME: clean up the code below! DB
- */
- if (self->use_udata) {
- irttp_udata_request(self->tsap_data, skb);
- dev->tbusy = 0;
-
- return 0;
- }
-
- if (irttp_data_request(self->tsap_data, skb) == -1) {
- /*
- * IrTTPs tx queue is full, so we just have to drop the
- * frame! You might think that we should just return -1
- * and don't deallocate the frame, but that is dangerous
- * since it's possible that we have replaced the original
- * skb with a new one with larger headroom, and that would
- * really confuse do_dev_queue_xmit() in dev.c! I have
- * tried :-) DB
+ /* Now queue the packet in the transport layer */
+ if (self->use_udata)
+ ret = irttp_udata_request(self->tsap_data, skb);
+ else
+ ret = irttp_data_request(self->tsap_data, skb);
+
+ if (ret < 0) {
+ /*
+ * IrTTPs tx queue is full, so we just have to
+ * drop the frame! You might think that we should
+ * just return -1 and don't deallocate the frame,
+ * but that is dangerous since it's possible that
+ * we have replaced the original skb with a new
+ * one with larger headroom, and that would really
+ * confuse do_dev_queue_xmit() in dev.c! I have
+ * tried :-) DB
*/
dev_kfree_skb(skb);
- ++self->stats.tx_dropped;
-
- return 0;
+ self->stats.tx_dropped++;
+ } else {
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += skb->len;
}
- dev->tbusy = 0; /* Finished! */
return 0;
}
@@ -282,11 +270,11 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
skb->dev = &self->dev;
skb->protocol=eth_type_trans(skb, skb->dev); /* Remove eth header */
- netif_rx(skb); /* Eat it! */
-
self->stats.rx_packets++;
self->stats.rx_bytes += skb->len;
+ netif_rx(skb); /* Eat it! */
+
return 0;
}
@@ -301,8 +289,6 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
struct irlan_cb *self;
struct device *dev;
- DEBUG(4, __FUNCTION__ "()\n");
-
self = (struct irlan_cb *) instance;
ASSERT(self != NULL, return;);
@@ -314,26 +300,16 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
switch (flow) {
case FLOW_STOP:
- DEBUG(4, "IrLAN, stopping Ethernet layer\n");
-
dev->tbusy = 1;
break;
case FLOW_START:
- /*
- * Tell upper layers that its time to transmit frames again
- */
- DEBUG(4, "IrLAN, starting Ethernet layer\n");
-
+ default:
+ /* Tell upper layers that its time to transmit frames again */
dev->tbusy = 0;
- /*
- * Ready to receive more frames, so schedule the network
- * layer
- */
+ /* Schedule network layer */
mark_bh(NET_BH);
break;
- default:
- DEBUG(0, __FUNCTION__ "(), Unknown flow command!\n");
}
}
@@ -360,7 +336,7 @@ void irlan_eth_rebuild_header(void *buff, struct device *dev,
* Send gratuitous ARP to announce that we have changed
* hardware address, so that all peers updates their ARP tables
*/
-void irlan_etc_send_gratuitous_arp(struct device *dev)
+void irlan_eth_send_gratuitous_arp(struct device *dev)
{
struct in_device *in_dev;
@@ -373,7 +349,7 @@ void irlan_etc_send_gratuitous_arp(struct device *dev)
in_dev = dev->ip_ptr;
arp_send(ARPOP_REQUEST, ETH_P_ARP,
in_dev->ifa_list->ifa_address,
- &dev,
+ dev,
in_dev->ifa_list->ifa_address,
NULL, dev->dev_addr, NULL);
}
@@ -391,16 +367,21 @@ void irlan_eth_set_multicast_list(struct device *dev)
self = dev->priv;
- DEBUG(0, __FUNCTION__ "()\n");
- return;
+ DEBUG(2, __FUNCTION__ "()\n");
+
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
- if (dev->flags&IFF_PROMISC) {
- /* Enable promiscuous mode */
- DEBUG(0, "Promiscous mode not implemented\n");
- /* outw(MULTICAST|PROMISC, ioaddr); */
+ /* Check if data channel has been connected yet */
+ if (self->client.state != IRLAN_DATA) {
+ DEBUG(1, __FUNCTION__ "(), delaying!\n");
+ return;
}
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ WARNING("Promiscous mode not implemented by IrLAN!\n");
+ }
else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
/* Disable promiscuous mode, use normal mode. */
DEBUG(4, __FUNCTION__ "(), Setting multicast filter\n");
@@ -420,13 +401,10 @@ void irlan_eth_set_multicast_list(struct device *dev)
irlan_set_multicast_filter(self, FALSE);
}
- if (dev->flags & IFF_BROADCAST) {
- DEBUG(4, __FUNCTION__ "(), Setting broadcast filter\n");
+ if (dev->flags & IFF_BROADCAST)
irlan_set_broadcast_filter(self, TRUE);
- } else {
- DEBUG(4, __FUNCTION__ "(), Clearing broadcast filter\n");
+ else
irlan_set_broadcast_filter(self, FALSE);
- }
}
/*
diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c
index d54e7cc12..93d7c4efe 100644
--- a/net/irda/irlan/irlan_event.c
+++ b/net/irda/irlan/irlan_event.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Oct 20 09:10:16 1998
- * Modified at: Wed Feb 3 21:42:27 1999
+ * Modified at: Sun May 9 21:17:44 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -38,22 +38,22 @@ char *irlan_state[] = {
"IRLAN_SYNC",
};
-void irlan_next_client_state( struct irlan_cb *self, IRLAN_STATE state)
+void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state)
{
DEBUG(2, __FUNCTION__"(), %s\n", irlan_state[state]);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
self->client.state = state;
}
-void irlan_next_provider_state( struct irlan_cb *self, IRLAN_STATE state)
+void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state)
{
DEBUG(2, __FUNCTION__"(), %s\n", irlan_state[state]);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
self->provider.state = state;
}
diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c
index c4c1079dd..ec7178db4 100644
--- a/net/irda/irlan/irlan_filter.c
+++ b/net/irda/irlan/irlan_filter.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Fri Jan 29 11:16:38 1999
- * Modified at: Thu Feb 25 15:10:54 1999
+ * Modified at: Fri May 14 23:11:01 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -23,6 +23,7 @@
********************************************************************/
#include <linux/skbuff.h>
+#include <linux/random.h>
#include <net/irda/irlan_common.h>
@@ -41,29 +42,29 @@ void handle_filter_request(struct irlan_cb *self, struct sk_buff *skb)
(self->provider.filter_operation == DYNAMIC))
{
DEBUG(0, "Giving peer a dynamic Ethernet address\n");
-
self->provider.mac_address[0] = 0x40;
self->provider.mac_address[1] = 0x00;
self->provider.mac_address[2] = 0x00;
self->provider.mac_address[3] = 0x00;
/* Use arbitration value to generate MAC address */
- if (self->access_type == ACCESS_PEER) {
+ if (self->provider.access_type == ACCESS_PEER) {
self->provider.mac_address[4] =
self->provider.send_arb_val & 0xff;
self->provider.mac_address[5] =
(self->provider.send_arb_val >> 8) & 0xff;;
} else {
/* Just generate something for now */
- self->provider.mac_address[4] = jiffies & 0xff;
- self->provider.mac_address[5] = (jiffies >> 8) & 0xff;
+ get_random_bytes(self->provider.mac_address+4, 1);
+ get_random_bytes(self->provider.mac_address+5, 1);
}
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x03;
irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
irlan_insert_short_param(skb, "MAX_ENTRY", 0x0001);
- irlan_insert_array_param(skb, "FILTER_ENTRY", self->provider.mac_address, 6);
+ irlan_insert_array_param(skb, "FILTER_ENTRY",
+ self->provider.mac_address, 6);
return;
}
@@ -138,8 +139,7 @@ void handle_filter_request(struct irlan_cb *self, struct sk_buff *skb)
* Check parameters in request from peer device
*
*/
-void irlan_check_command_param(struct irlan_cb *self, char *param,
- char *value)
+void irlan_check_command_param(struct irlan_cb *self, char *param, char *value)
{
__u8 *bytes;
@@ -210,6 +210,12 @@ void irlan_check_command_param(struct irlan_cb *self, char *param,
}
}
+/*
+ * Function irlan_print_filter (filter_type, buf)
+ *
+ * Print status of filter. Used by /proc file system
+ *
+ */
int irlan_print_filter(int filter_type, char *buf)
{
int len = 0;
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 8e2c3c25a..947141872 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -6,13 +6,14 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Thu Apr 22 14:28:52 1999
+ * Modified at: Sun May 9 12:22:56 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
* Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -31,6 +32,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/random.h>
#include <asm/system.h>
#include <asm/bitops.h>
@@ -50,14 +52,20 @@
#include <net/irda/irlan_filter.h>
#include <net/irda/irlan_client.h>
+static void irlan_provider_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb);
+
/*
* Function irlan_provider_control_data_indication (handle, skb)
*
* This function gets the data that is received on the control channel
*
*/
-int irlan_provider_data_indication(void *instance, void *sap,
- struct sk_buff *skb)
+static int irlan_provider_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
{
struct irlan_cb *self;
__u8 code;
@@ -111,14 +119,17 @@ int irlan_provider_data_indication(void *instance, void *sap,
* Got connection from peer IrLAN layer
*
*/
-void irlan_provider_connect_indication(void *instance, void *sap,
- struct qos_info *qos,
- __u32 max_sdu_size, struct sk_buff *skb)
+static void irlan_provider_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ __u8 max_header_size,
+ struct sk_buff *skb)
{
- struct irlan_cb *self, *entry, *new;
+ struct irlan_cb *self, *new;
struct tsap_cb *tsap;
+ __u32 saddr, daddr;
- DEBUG(2, __FUNCTION__ "()\n");
+ DEBUG(0, __FUNCTION__ "()\n");
self = (struct irlan_cb *) instance;
tsap = (struct tsap_cb *) sap;
@@ -126,34 +137,69 @@ void irlan_provider_connect_indication(void *instance, void *sap,
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
+ self->provider.max_sdu_size = max_sdu_size;
+ self->provider.max_header_size = max_header_size;
+
ASSERT(tsap == self->provider.tsap_ctrl,return;);
ASSERT(self->provider.state == IRLAN_IDLE, return;);
- /* Check if this provider is currently unused */
- if (self->daddr == DEV_ADDR_ANY) {
- /*
- * Rehash instance, now we have a client (daddr) to serve.
- */
- entry = hashbin_remove(irlan, self->daddr, NULL);
- ASSERT( entry == self, return;);
-
- self->daddr = irttp_get_daddr(tsap);
- DEBUG(2, __FUNCTION__ "(), daddr=%08x\n", self->daddr);
- hashbin_insert(irlan, (QUEUE*) self, self->daddr, NULL);
+ daddr = irttp_get_daddr(tsap);
+ saddr = irttp_get_saddr(tsap);
+
+ /* Check if we already dealing with this client or peer */
+ new = (struct irlan_cb *) hashbin_find(irlan, daddr, NULL);
+ if (new) {
+ ASSERT(new->magic == IRLAN_MAGIC, return;);
+ DEBUG(0, __FUNCTION__ "(), found instance!\n");
+
+ /* Update saddr, since client may have moved to a new link */
+ new->saddr = saddr;
+ DEBUG(2, __FUNCTION__ "(), saddr=%08x\n", new->saddr);
+
+ /* Make sure that any old provider control TSAP is removed */
+ if ((new != self) && new->provider.tsap_ctrl) {
+ irttp_disconnect_request(new->provider.tsap_ctrl,
+ NULL, P_NORMAL);
+ irttp_close_tsap(new->provider.tsap_ctrl);
+ new->provider.tsap_ctrl = NULL;
+ }
} else {
- /*
- * If we already have the daddr set, this means that the
- * client must already have started (peer mode). We must
- * make sure that this connection attempt is from the same
- * device as the client is dealing with!
+ /* This must be the master instance, so start a new instance */
+ DEBUG(0, __FUNCTION__ "(), starting new provider!\n");
+
+ new = irlan_open(saddr, daddr, TRUE);
+ }
+
+ /*
+ * Check if the connection came in on the master server, or the
+ * slave server. If it came on the slave, then everything is
+ * really, OK (reconnect), if not we need to dup the connection and
+ * hand it over to the slave.
+ */
+ if (new != self) {
+
+ /* Now attach up the new "socket" */
+ new->provider.tsap_ctrl = irttp_dup(self->provider.tsap_ctrl,
+ new);
+ if (!new->provider.tsap_ctrl) {
+ DEBUG(0, __FUNCTION__ "(), dup failed!\n");
+ return;
+ }
+
+ /* new->stsap_sel = new->tsap->stsap_sel; */
+ new->dtsap_sel_ctrl = new->provider.tsap_ctrl->dtsap_sel;
+
+ /* Clean up the original one to keep it in listen state */
+ self->provider.tsap_ctrl->dtsap_sel = LSAP_ANY;
+ self->provider.tsap_ctrl->lsap->dlsap_sel = LSAP_ANY;
+ self->provider.tsap_ctrl->lsap->lsap_state = LSAP_DISCONNECTED;
+
+ /*
+ * Use the new instance from here instead of the master
+ * struct!
*/
- ASSERT(self->daddr == irttp_get_daddr(tsap), return;);
+ self = new;
}
-
- /* Update saddr, since client may have moved to a new link */
- self->saddr = irttp_get_saddr(tsap);
- DEBUG(2, __FUNCTION__ "(), saddr=%08x\n", self->saddr);
-
/* Check if network device has been registered */
if (!self->netdev_registered)
irlan_register_netdev(self);
@@ -165,9 +211,10 @@ void irlan_provider_connect_indication(void *instance, void *sap,
* indication it needs to make progress. If the client is still in
* IDLE state, we must kick it to
*/
- if ((self->access_type == ACCESS_PEER) &&
- (self->client.state == IRLAN_IDLE))
+ if ((self->provider.access_type == ACCESS_PEER) &&
+ (self->client.state == IRLAN_IDLE)) {
irlan_client_wakeup(self, self->saddr, self->daddr);
+ }
}
/*
@@ -225,6 +272,9 @@ int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb)
ret = irlan_provider_parse_command(self, CMD_OPEN_DATA_CHANNEL, skb);
+ /* Open data channel */
+ irlan_open_data_tsap(self);
+
return ret;
}
@@ -314,7 +364,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
return;
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_reserve(skb, self->provider.max_header_size);
skb_put(skb, 2);
switch (command) {
@@ -334,6 +384,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
}
irlan_insert_short_param(skb, "IRLAN_VER", 0x0101);
break;
+
case CMD_GET_MEDIA_CHAR:
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x05; /* 5 parameters */
@@ -341,7 +392,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST");
irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST");
- switch(self->access_type) {
+ switch (self->provider.access_type) {
case ACCESS_DIRECT:
irlan_insert_string_param(skb, "ACCESS_TYPE", "DIRECT");
break;
diff --git a/net/irda/irlan/irlan_provider_event.c b/net/irda/irlan/irlan_provider_event.c
index 6bdf503f1..29e660fa7 100644
--- a/net/irda/irlan/irlan_provider_event.c
+++ b/net/irda/irlan/irlan_provider_event.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Thu Apr 22 10:46:28 1999
+ * Modified at: Fri May 7 10:53:58 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -108,7 +108,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
switch(event) {
case IRLAN_GET_INFO_CMD:
/* Be sure to use 802.3 in case of peer mode */
- if (self->access_type == ACCESS_PEER) {
+ if (self->provider.access_type == ACCESS_PEER) {
self->media = MEDIA_802_3;
/* Check if client has started yet */
@@ -129,7 +129,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
break;
case IRLAN_OPEN_DATA_CMD:
ret = irlan_parse_open_data_cmd(self, skb);
- if (self->access_type == ACCESS_PEER) {
+ if (self->provider.access_type == ACCESS_PEER) {
/* FIXME: make use of random functions! */
self->provider.send_arb_val = (jiffies & 0xffff);
}
@@ -205,8 +205,6 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- struct irmanager_event mgr_event;
-
DEBUG(4, __FUNCTION__ "()\n");
ASSERT(self != NULL, return -1;);
@@ -220,10 +218,6 @@ static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
break;
case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
case IRLAN_LAP_DISCONNECT:
- mgr_event.event = EVENT_IRLAN_STOP;
- sprintf(mgr_event.devname, "%s", self->ifname);
- irmanager_notify(&mgr_event);
-
irlan_next_provider_state(self, IRLAN_IDLE);
break;
default:
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index d24923652..245884cf7 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -1,26 +1,31 @@
/*********************************************************************
*
* Filename: irlap.c
- * Version: 0.9
- * Description: An IrDA LAP driver for Linux
- * Status: Stable.
+ * Version: 1.0
+ * Description: IrLAP implementation for Linux
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Fri Apr 23 10:12:29 1999
+ * Modified at: Mon May 31 21:43:55 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
*
- * This program is free software; you can redistribute iyt and/or
+ * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
********************************************************************/
#include <linux/config.h>
@@ -60,23 +65,23 @@ static char *lap_reasons[] = {
};
#ifdef CONFIG_PROC_FS
-int irlap_proc_read( char *, char **, off_t, int, int);
+int irlap_proc_read(char *, char **, off_t, int, int);
#endif /* CONFIG_PROC_FS */
-__initfunc(int irlap_init( void))
+__initfunc(int irlap_init(void))
{
/* Allocate master array */
- irlap = hashbin_new( HB_LOCAL);
- if ( irlap == NULL) {
- printk( KERN_WARNING "IrLAP: Can't allocate irlap hashbin!\n");
+ irlap = hashbin_new(HB_LOCAL);
+ if (irlap == NULL) {
+ printk(KERN_WARNING "IrLAP: Can't allocate irlap hashbin!\n");
return -ENOMEM;
}
#ifdef CONFIG_IRDA_COMPRESSION
- irlap_compressors = hashbin_new( HB_LOCAL);
- if ( irlap_compressors == NULL) {
- printk( KERN_WARNING "IrLAP: Can't allocate compressors hashbin!\n");
+ irlap_compressors = hashbin_new(HB_LOCAL);
+ if (irlap_compressors == NULL) {
+ printk(KERN_WARNING "IrLAP: Can't allocate compressors hashbin!\n");
return -ENOMEM;
}
#endif
@@ -86,12 +91,12 @@ __initfunc(int irlap_init( void))
void irlap_cleanup(void)
{
- ASSERT( irlap != NULL, return;);
+ ASSERT(irlap != NULL, return;);
- hashbin_delete( irlap, (FREE_FUNC) __irlap_close);
+ hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
#ifdef CONFIG_IRDA_COMPRESSION
- hashbin_delete( irlap_compressors, (FREE_FUNC) kfree);
+ hashbin_delete(irlap_compressors, (FREE_FUNC) kfree);
#endif
}
@@ -101,32 +106,32 @@ void irlap_cleanup(void)
* Initialize IrLAP layer
*
*/
-struct irlap_cb *irlap_open( struct irda_device *irdev)
+struct irlap_cb *irlap_open(struct irda_device *irdev)
{
struct irlap_cb *self;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( irdev != NULL, return NULL;);
- ASSERT( irdev->magic == IRDA_DEVICE_MAGIC, return NULL;);
+ ASSERT(irdev != NULL, return NULL;);
+ ASSERT(irdev->magic == IRDA_DEVICE_MAGIC, return NULL;);
/* Initialize the irlap structure. */
- self = kmalloc( sizeof( struct irlap_cb), GFP_KERNEL);
- if ( self == NULL)
+ self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL);
+ if (self == NULL)
return NULL;
- memset( self, 0, sizeof(struct irlap_cb));
+ memset(self, 0, sizeof(struct irlap_cb));
self->magic = LAP_MAGIC;
/* Make a binding between the layers */
self->irdev = irdev;
self->netdev = &irdev->netdev;
- irlap_next_state( self, LAP_OFFLINE);
+ irlap_next_state(self, LAP_OFFLINE);
/* Initialize transmitt queue */
- skb_queue_head_init( &self->tx_list);
- skb_queue_head_init( &self->wx_list);
+ skb_queue_head_init(&self->tx_list);
+ skb_queue_head_init(&self->wx_list);
/* My unique IrLAP device address! */
get_random_bytes(&self->saddr, sizeof(self->saddr));
@@ -140,21 +145,21 @@ struct irlap_cb *irlap_open( struct irda_device *irdev)
self->caddr &= 0xfe;
}
- init_timer( &self->slot_timer);
- init_timer( &self->query_timer);
- init_timer( &self->discovery_timer);
- init_timer( &self->final_timer);
- init_timer( &self->poll_timer);
- init_timer( &self->wd_timer);
- init_timer( &self->backoff_timer);
+ init_timer(&self->slot_timer);
+ init_timer(&self->query_timer);
+ init_timer(&self->discovery_timer);
+ init_timer(&self->final_timer);
+ init_timer(&self->poll_timer);
+ init_timer(&self->wd_timer);
+ init_timer(&self->backoff_timer);
- irlap_apply_default_connection_parameters( self);
+ irlap_apply_default_connection_parameters(self);
- irlap_next_state( self, LAP_NDM);
+ irlap_next_state(self, LAP_NDM);
- hashbin_insert( irlap, (QUEUE *) self, self->saddr, NULL);
+ hashbin_insert(irlap, (QUEUE *) self, self->saddr, NULL);
- irlmp_register_link( self, self->saddr, &self->notify);
+ irlmp_register_link(self, self->saddr, &self->notify);
return self;
}
@@ -165,26 +170,26 @@ struct irlap_cb *irlap_open( struct irda_device *irdev)
* Remove IrLAP and all allocated memory. Stop any pending timers.
*
*/
-static void __irlap_close( struct irlap_cb *self)
+static void __irlap_close(struct irlap_cb *self)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/* Stop timers */
- del_timer( &self->slot_timer);
- del_timer( &self->query_timer);
- del_timer( &self->discovery_timer);
- del_timer( &self->final_timer);
- del_timer( &self->poll_timer);
- del_timer( &self->wd_timer);
- del_timer( &self->backoff_timer);
-
- irlap_flush_all_queues( self);
+ del_timer(&self->slot_timer);
+ del_timer(&self->query_timer);
+ del_timer(&self->discovery_timer);
+ del_timer(&self->final_timer);
+ del_timer(&self->poll_timer);
+ del_timer(&self->wd_timer);
+ del_timer(&self->backoff_timer);
+
+ irlap_flush_all_queues(self);
self->irdev = NULL;
self->magic = 0;
- kfree( self);
+ kfree(self);
}
/*
@@ -193,27 +198,27 @@ static void __irlap_close( struct irlap_cb *self)
* Remove IrLAP instance
*
*/
-void irlap_close( struct irlap_cb *self)
+void irlap_close(struct irlap_cb *self)
{
struct irlap_cb *lap;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- irlap_disconnect_indication( self, LAP_DISC_INDICATION);
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
irlmp_unregister_link(self->saddr);
self->notify.instance = NULL;
/* Be sure that we manage to remove ourself from the hash */
- lap = hashbin_remove( irlap, self->saddr, NULL);
- if ( !lap) {
- DEBUG( 1, __FUNCTION__ "(), Didn't find myself!\n");
+ lap = hashbin_remove(irlap, self->saddr, NULL);
+ if (!lap) {
+ DEBUG(1, __FUNCTION__ "(), Didn't find myself!\n");
return;
}
- __irlap_close( lap);
+ __irlap_close(lap);
}
/*
@@ -243,7 +248,7 @@ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
*/
void irlap_connect_response(struct irlap_cb *self, struct sk_buff *skb)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
irlap_do_event(self, CONNECT_RESPONSE, skb, NULL);
}
@@ -324,23 +329,23 @@ inline void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb)
* Received some data that was sent unreliable
*
*/
-void irlap_unit_data_indication( struct irlap_cb *self, struct sk_buff *skb)
+void irlap_unit_data_indication(struct irlap_cb *self, struct sk_buff *skb)
{
- DEBUG( 1, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
/* Hide LAP header from IrLMP layer */
- skb_pull( skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
#ifdef CONFIG_IRDA_COMPRESSION
- if ( self->qos_tx.compression.value) {
+ if (self->qos_tx.compression.value) {
- skb = irlap_decompress_frame( self, skb);
- if ( !skb) {
- DEBUG( 1, __FUNCTION__ "(), Decompress error!\n");
+ skb = irlap_decompress_frame(self, skb);
+ if (!skb) {
+ DEBUG(1, __FUNCTION__ "(), Decompress error!\n");
return;
}
}
@@ -354,40 +359,35 @@ void irlap_unit_data_indication( struct irlap_cb *self, struct sk_buff *skb)
* Queue data for transmission, must wait until XMIT state
*
*/
-inline void irlap_data_request( struct irlap_cb *self, struct sk_buff *skb,
+inline void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
int reliable)
{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
- DEBUG( 4, __FUNCTION__ "(), tx_list=%d\n",
- skb_queue_len( &self->tx_list));
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
#ifdef CONFIG_IRDA_COMPRESSION
- if ( self->qos_tx.compression.value) {
- skb = irlap_compress_frame( self, skb);
- if ( !skb) {
- DEBUG( 1, __FUNCTION__ "(), Compress error!\n");
+ if (self->qos_tx.compression.value) {
+ skb = irlap_compress_frame(self, skb);
+ if (!skb) {
+ DEBUG(1, __FUNCTION__ "(), Compress error!\n");
return;
}
}
#endif
- ASSERT( skb_headroom( skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
- return;);
- skb_push( skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+ ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
+ return;);
+ skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
/*
* Must set frame format now so that the rest of the code knows
* if its dealing with an I or an UI frame
*/
- if ( reliable)
+ if (reliable)
skb->data[1] = I_FRAME;
else {
- DEBUG( 4, __FUNCTION__ "(), queueing unreliable frame\n");
+ DEBUG(4, __FUNCTION__ "(), queueing unreliable frame\n");
skb->data[1] = UI_FRAME;
}
@@ -395,20 +395,20 @@ inline void irlap_data_request( struct irlap_cb *self, struct sk_buff *skb,
* Send event if this frame only if we are in the right state
* FIXME: udata should be sent first! (skb_queue_head?)
*/
- if (( self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
+ if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
/*
* Check if the transmit queue contains some unsent frames,
* and if so, make sure they are sent first
*/
- if ( !skb_queue_empty( &self->tx_list)) {
- skb_queue_tail( &self->tx_list, skb);
- skb = skb_dequeue( &self->tx_list);
+ if (!skb_queue_empty(&self->tx_list)) {
+ skb_queue_tail(&self->tx_list, skb);
+ skb = skb_dequeue(&self->tx_list);
- ASSERT( skb != NULL, return;);
+ ASSERT(skb != NULL, return;);
}
- irlap_do_event( self, SEND_I_CMD, skb, NULL);
+ irlap_do_event(self, SEND_I_CMD, skb, NULL);
} else
- skb_queue_tail( &self->tx_list, skb);
+ skb_queue_tail(&self->tx_list, skb);
}
/*
@@ -444,33 +444,33 @@ void irlap_disconnect_request(struct irlap_cb *self)
* Disconnect request from other device
*
*/
-void irlap_disconnect_indication( struct irlap_cb *self, LAP_REASON reason)
+void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
{
- DEBUG( 1, __FUNCTION__ "(), reason=%s\n", lap_reasons[reason]);
+ DEBUG(1, __FUNCTION__ "(), reason=%s\n", lap_reasons[reason]);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
#ifdef CONFIG_IRDA_COMPRESSION
- irda_free_compression( self);
+ irda_free_compression(self);
#endif
/* Flush queues */
- irlap_flush_all_queues( self);
+ irlap_flush_all_queues(self);
- switch( reason) {
+ switch(reason) {
case LAP_RESET_INDICATION:
- DEBUG( 1, __FUNCTION__ "(), Sending reset request!\n");
- irlap_do_event( self, RESET_REQUEST, NULL, NULL);
+ DEBUG(1, __FUNCTION__ "(), Sending reset request!\n");
+ irlap_do_event(self, RESET_REQUEST, NULL, NULL);
break;
case LAP_NO_RESPONSE: /* FALLTROUGH */
case LAP_DISC_INDICATION: /* FALLTROUGH */
case LAP_FOUND_NONE: /* FALLTROUGH */
case LAP_MEDIA_BUSY:
- irlmp_link_disconnect_indication( self->notify.instance,
+ irlmp_link_disconnect_indication(self->notify.instance,
self, reason, NULL);
break;
default:
- DEBUG( 1, __FUNCTION__ "(), Reason %d not implemented!\n",
+ DEBUG(1, __FUNCTION__ "(), Reason %d not implemented!\n",
reason);
}
}
@@ -485,22 +485,22 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
{
struct irlap_info info;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( discovery != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(discovery != NULL, return;);
- DEBUG( 4, __FUNCTION__ "(), nslots = %d\n", discovery->nslots);
+ DEBUG(4, __FUNCTION__ "(), nslots = %d\n", discovery->nslots);
- ASSERT(( discovery->nslots == 1) || ( discovery->nslots == 6) ||
- ( discovery->nslots == 8) || ( discovery->nslots == 16),
+ ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
+ (discovery->nslots == 8) || (discovery->nslots == 16),
return;);
/*
* Discovery is only possible in NDM mode
*/
- if ( self->state == LAP_NDM) {
- ASSERT( self->discovery_log == NULL, return;);
- self->discovery_log= hashbin_new( HB_LOCAL);
+ if (self->state == LAP_NDM) {
+ ASSERT(self->discovery_log == NULL, return;);
+ self->discovery_log= hashbin_new(HB_LOCAL);
info.S = discovery->nslots; /* Number of slots */
info.s = 0; /* Current slot */
@@ -526,11 +526,11 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
- irlap_do_event( self, DISCOVERY_REQUEST, NULL, &info);
+ irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
} else {
- DEBUG( 4, __FUNCTION__
+ DEBUG(4, __FUNCTION__
"(), discovery only possible in NDM mode\n");
- irlap_discovery_confirm( self, NULL);
+ irlap_discovery_confirm(self, NULL);
}
}
@@ -540,12 +540,12 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
* A device has been discovered in front of this station, we
* report directly to LMP.
*/
-void irlap_discovery_confirm( struct irlap_cb *self, hashbin_t *discovery_log)
+void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- ASSERT( self->notify.instance != NULL, return;);
+ ASSERT(self->notify.instance != NULL, return;);
/*
* Check for successful discovery, since we are then allowed to clear
@@ -556,7 +556,7 @@ void irlap_discovery_confirm( struct irlap_cb *self, hashbin_t *discovery_log)
irda_device_set_media_busy(self->irdev, FALSE);
/* Inform IrLMP */
- irlmp_link_discovery_confirm( self->notify.instance, discovery_log);
+ irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
/*
* IrLMP has now the responsibilities for the discovery_log
@@ -572,13 +572,13 @@ void irlap_discovery_confirm( struct irlap_cb *self, hashbin_t *discovery_log)
*/
void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( discovery != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(discovery != NULL, return;);
- ASSERT( self->notify.instance != NULL, return;);
+ ASSERT(self->notify.instance != NULL, return;);
irlmp_link_discovery_indication(self->notify.instance, discovery);
}
@@ -591,12 +591,12 @@ void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
*/
void irlap_status_indication(int quality_of_link)
{
- switch( quality_of_link) {
+ switch(quality_of_link) {
case STATUS_NO_ACTIVITY:
- printk( KERN_INFO "IrLAP, no activity on link!\n");
+ printk(KERN_INFO "IrLAP, no activity on link!\n");
break;
case STATUS_NOISY:
- printk( KERN_INFO "IrLAP, noisy link!\n");
+ printk(KERN_INFO "IrLAP, noisy link!\n");
break;
default:
break;
@@ -610,17 +610,17 @@ void irlap_status_indication(int quality_of_link)
*
*
*/
-void irlap_reset_indication( struct irlap_cb *self)
+void irlap_reset_indication(struct irlap_cb *self)
{
- DEBUG( 1, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- if ( self->state == LAP_RESET_WAIT)
- irlap_do_event( self, RESET_REQUEST, NULL, NULL);
+ if (self->state == LAP_RESET_WAIT)
+ irlap_do_event(self, RESET_REQUEST, NULL, NULL);
else
- irlap_do_event( self, RESET_RESPONSE, NULL, NULL);
+ irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
}
/*
@@ -631,7 +631,7 @@ void irlap_reset_indication( struct irlap_cb *self)
*/
void irlap_reset_confirm(void)
{
- DEBUG( 1, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
}
/*
@@ -641,15 +641,15 @@ void irlap_reset_confirm(void)
* S = Number of slots (0 -> S-1)
* s = Current slot
*/
-int irlap_generate_rand_time_slot( int S, int s)
+int irlap_generate_rand_time_slot(int S, int s)
{
int slot;
- ASSERT(( S - s) > 0, return 0;);
+ ASSERT((S - s) > 0, return 0;);
slot = s + jiffies % (S-s);
- ASSERT(( slot >= s) || ( slot < S), return 0;);
+ ASSERT((slot >= s) || (slot < S), return 0;);
return slot;
}
@@ -661,51 +661,51 @@ int irlap_generate_rand_time_slot( int S, int s)
* not intuitive and you should not try to change it. If you think it
* contains bugs, please mail a patch to the author instead.
*/
-void irlap_update_nr_received( struct irlap_cb *self, int nr)
+void irlap_update_nr_received(struct irlap_cb *self, int nr)
{
struct sk_buff *skb = NULL;
int count = 0;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/*
* Remove all the ack-ed frames from the window queue.
*/
- DEBUG( 4, "--> wx_list=%d, va=%d, nr=%d\n",
- skb_queue_len( &self->wx_list), self->va, nr);
+ DEBUG(4, "--> wx_list=%d, va=%d, nr=%d\n",
+ skb_queue_len(&self->wx_list), self->va, nr);
/*
* Optimize for the common case. It is most likely that the receiver
* will acknowledge all the frames we have sent! So in that case we
* delete all frames stored in window.
*/
- if ( nr == self->vs) {
- while (( skb = skb_dequeue( &self->wx_list)) != NULL) {
+ if (nr == self->vs) {
+ while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
dev_kfree_skb(skb);
}
/* The last acked frame is the next to send minus one */
self->va = nr - 1;
} else {
/* Remove all acknowledged frames in current window */
- while (( skb_peek( &self->wx_list) != NULL) &&
- ((( self->va+1) % 8) != nr))
+ while ((skb_peek(&self->wx_list) != NULL) &&
+ (((self->va+1) % 8) != nr))
{
- skb = skb_dequeue( &self->wx_list);
+ skb = skb_dequeue(&self->wx_list);
dev_kfree_skb(skb);
self->va = (self->va + 1) % 8;
count++;
}
- DEBUG( 4, "irlap_update_nr_received(), removed %d\n", count);
- DEBUG( 4, "wx_list=%d, va=%d, nr=%d -->\n",
- skb_queue_len( &self->wx_list), self->va, nr);
+ DEBUG(4, "irlap_update_nr_received(), removed %d\n", count);
+ DEBUG(4, "wx_list=%d, va=%d, nr=%d -->\n",
+ skb_queue_len(&self->wx_list), self->va, nr);
}
/* Advance window */
- self->window = self->window_size - skb_queue_len( &self->wx_list);
+ self->window = self->window_size - skb_queue_len(&self->wx_list);
}
/*
@@ -713,14 +713,14 @@ void irlap_update_nr_received( struct irlap_cb *self, int nr)
*
* Validate the next to send (ns) field from received frame.
*/
-int irlap_validate_ns_received( struct irlap_cb *self, int ns)
+int irlap_validate_ns_received(struct irlap_cb *self, int ns)
{
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
+ ASSERT(self != NULL, return -ENODEV;);
+ ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
/* ns as expected? */
- if ( ns == self->vr) {
- DEBUG( 4, __FUNCTION__ "(), expected!\n");
+ if (ns == self->vr) {
+ DEBUG(4, __FUNCTION__ "(), expected!\n");
return NS_EXPECTED;
}
/*
@@ -737,14 +737,14 @@ int irlap_validate_ns_received( struct irlap_cb *self, int ns)
* Validate the next to receive (nr) field from received frame.
*
*/
-int irlap_validate_nr_received( struct irlap_cb *self, int nr)
+int irlap_validate_nr_received(struct irlap_cb *self, int nr)
{
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
+ ASSERT(self != NULL, return -ENODEV;);
+ ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
/* nr as expected? */
- if ( nr == self->vs) {
- DEBUG( 4, __FUNCTION__ "(), expected!\n");
+ if (nr == self->vs) {
+ DEBUG(4, __FUNCTION__ "(), expected!\n");
return NR_EXPECTED;
}
@@ -752,11 +752,11 @@ int irlap_validate_nr_received( struct irlap_cb *self, int nr)
* unexpected nr? (but within current window), first we check if the
* ns numbers of the frames in the current window wrap.
*/
- if ( self->va < self->vs) {
- if (( nr >= self->va) && ( nr <= self->vs))
+ if (self->va < self->vs) {
+ if ((nr >= self->va) && (nr <= self->vs))
return NR_UNEXPECTED;
} else {
- if (( nr >= self->va) || ( nr <= self->vs))
+ if ((nr >= self->va) || (nr <= self->vs))
return NR_UNEXPECTED;
}
@@ -770,12 +770,12 @@ int irlap_validate_nr_received( struct irlap_cb *self, int nr)
* Initialize the connection state parameters
*
*/
-void irlap_initiate_connection_state( struct irlap_cb *self)
+void irlap_initiate_connection_state(struct irlap_cb *self)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/* Next to send and next to receive */
self->vs = self->vr = 0;
@@ -829,24 +829,24 @@ void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
* Flush all queues
*
*/
-void irlap_flush_all_queues( struct irlap_cb *self)
+void irlap_flush_all_queues(struct irlap_cb *self)
{
struct sk_buff* skb;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/* Free transmission queue */
- while (( skb = skb_dequeue( &self->tx_list)) != NULL)
- dev_kfree_skb( skb);
+ while ((skb = skb_dequeue(&self->tx_list)) != NULL)
+ dev_kfree_skb(skb);
/* Free sliding window buffered packets */
- while (( skb = skb_dequeue( &self->wx_list)) != NULL)
- dev_kfree_skb( skb);
+ while ((skb = skb_dequeue(&self->wx_list)) != NULL)
+ dev_kfree_skb(skb);
#ifdef CONFIG_IRDA_RECYCLE_RR
- if ( self->recycle_rr_skb) {
- dev_kfree_skb( self->recycle_rr_skb);
+ if (self->recycle_rr_skb) {
+ dev_kfree_skb(self->recycle_rr_skb);
self->recycle_rr_skb = NULL;
}
#endif
@@ -866,7 +866,7 @@ void irlap_change_speed(struct irlap_cb *self, int speed)
ASSERT(self->magic == LAP_MAGIC, return;);
if (!self->irdev) {
- DEBUG( 1, __FUNCTION__ "(), driver missing!\n");
+ DEBUG(1, __FUNCTION__ "(), driver missing!\n");
return;
}
@@ -883,8 +883,8 @@ void irlap_init_comp_qos_capabilities(struct irlap_cb *self)
__u8 mask; /* Current bit tested */
int i;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/*
* Find out which compressors we support. We do this be checking that
@@ -892,24 +892,24 @@ void irlap_init_comp_qos_capabilities(struct irlap_cb *self)
* actually been loaded. Ths is sort of hairy code but that is what
* you get when you do a little bit flicking :-)
*/
- DEBUG( 4, __FUNCTION__ "(), comp bits 0x%02x\n",
+ DEBUG(4, __FUNCTION__ "(), comp bits 0x%02x\n",
self->qos_rx.compression.bits);
mask = 0x80; /* Start with testing MSB */
- for ( i=0;i<8;i++) {
- DEBUG( 4, __FUNCTION__ "(), testing bit %d\n", 8-i);
- if ( self->qos_rx.compression.bits & mask) {
- DEBUG( 4, __FUNCTION__ "(), bit %d is set by defalt\n",
+ for (i=0;i<8;i++) {
+ DEBUG(4, __FUNCTION__ "(), testing bit %d\n", 8-i);
+ if (self->qos_rx.compression.bits & mask) {
+ DEBUG(4, __FUNCTION__ "(), bit %d is set by defalt\n",
8-i);
- comp = hashbin_find( irlap_compressors,
+ comp = hashbin_find(irlap_compressors,
compression[ msb_index(mask)],
NULL);
- if ( !comp) {
+ if (!comp) {
/* Protocol not supported, so clear the bit */
- DEBUG( 4, __FUNCTION__ "(), Compression "
+ DEBUG(4, __FUNCTION__ "(), Compression "
"protocol %d has not been loaded!\n",
compression[msb_index(mask)]);
self->qos_rx.compression.bits &= ~mask;
- DEBUG( 4, __FUNCTION__
+ DEBUG(4, __FUNCTION__
"(), comp bits 0x%02x\n",
self->qos_rx.compression.bits);
}
@@ -931,20 +931,20 @@ void irlap_init_comp_qos_capabilities(struct irlap_cb *self)
void irlap_init_qos_capabilities(struct irlap_cb *self,
struct qos_info *qos_user)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( self->irdev != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(self->irdev != NULL, return;);
/* Start out with the maximum QoS support possible */
- irda_init_max_qos_capabilies( &self->qos_rx);
+ irda_init_max_qos_capabilies(&self->qos_rx);
#ifdef CONFIG_IRDA_COMPRESSION
- irlap_init_comp_qos_capabilities( self);
+ irlap_init_comp_qos_capabilities(self);
#endif
/* Apply drivers QoS capabilities */
- irda_qos_compute_intersection( &self->qos_rx,
- irda_device_get_qos( self->irdev));
+ irda_qos_compute_intersection(&self->qos_rx,
+ irda_device_get_qos(self->irdev));
/*
* Check for user supplied QoS parameters. The service user is only
@@ -952,17 +952,17 @@ void irlap_init_qos_capabilities(struct irlap_cb *self,
* user may not have set all of them.
*/
if (qos_user) {
- DEBUG( 1, __FUNCTION__ "(), Found user specified QoS!\n");
+ DEBUG(1, __FUNCTION__ "(), Found user specified QoS!\n");
- if ( qos_user->baud_rate.bits)
+ if (qos_user->baud_rate.bits)
self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
- if ( qos_user->max_turn_time.bits)
+ if (qos_user->max_turn_time.bits)
self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
- if ( qos_user->data_size.bits)
+ if (qos_user->data_size.bits)
self->qos_rx.data_size.bits &= qos_user->data_size.bits;
- if ( qos_user->link_disc_time.bits)
+ if (qos_user->link_disc_time.bits)
self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
#ifdef CONFIG_IRDA_COMPRESSION
self->qos_rx.compression.bits &= qos_user->compression.bits;
@@ -984,7 +984,7 @@ void irlap_init_qos_capabilities(struct irlap_cb *self,
/* Set disconnect time */
self->qos_rx.link_disc_time.bits &= 0x07;
- irda_qos_bits_to_value( &self->qos_rx);
+ irda_qos_bits_to_value(&self->qos_rx);
}
/*
@@ -993,14 +993,14 @@ void irlap_init_qos_capabilities(struct irlap_cb *self,
* Use the default connection and transmission parameters
*
*/
-void irlap_apply_default_connection_parameters( struct irlap_cb *self)
+void irlap_apply_default_connection_parameters(struct irlap_cb *self)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- irlap_change_speed( self, 9600);
+ irlap_change_speed(self, 9600);
/* Default value in NDM */
self->bofs_count = 11;
@@ -1028,12 +1028,12 @@ void irlap_apply_default_connection_parameters( struct irlap_cb *self)
void irlap_apply_connection_parameters(struct irlap_cb *self,
struct qos_info *qos)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- irlap_change_speed( self, qos->baud_rate.value);
+ irlap_change_speed(self, qos->baud_rate.value);
self->window_size = qos->window_size.value;
self->window = qos->window_size.value;
@@ -1045,7 +1045,7 @@ void irlap_apply_connection_parameters(struct irlap_cb *self,
*/
self->window_bytes = qos->baud_rate.value
* qos->max_turn_time.value / 10000;
- DEBUG( 4, "Setting window_bytes = %d\n", self->window_bytes);
+ DEBUG(4, "Setting window_bytes = %d\n", self->window_bytes);
/*
* Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
@@ -1058,10 +1058,10 @@ void irlap_apply_connection_parameters(struct irlap_cb *self,
else
self->N1 = 3000 / qos->max_turn_time.value;
- DEBUG( 4, "Setting N1 = %d\n", self->N1);
+ DEBUG(4, "Setting N1 = %d\n", self->N1);
self->N2 = qos->link_disc_time.value * 1000 / qos->max_turn_time.value;
- DEBUG( 4, "Setting N2 = %d\n", self->N2);
+ DEBUG(4, "Setting N2 = %d\n", self->N2);
/*
* Initialize timeout values, some of the rules are listed on
@@ -1072,11 +1072,11 @@ void irlap_apply_connection_parameters(struct irlap_cb *self,
self->wd_timeout = self->poll_timeout * 2;
#ifdef CONFIG_IRDA_COMPRESSION
- if ( qos->compression.value) {
- DEBUG( 1, __FUNCTION__ "(), Initializing compression\n");
- irda_set_compression( self, qos->compression.value);
+ if (qos->compression.value) {
+ DEBUG(1, __FUNCTION__ "(), Initializing compression\n");
+ irda_set_compression(self, qos->compression.value);
- irlap_compressor_init( self, 0);
+ irlap_compressor_init(self, 0);
}
#endif
}
@@ -1088,7 +1088,7 @@ void irlap_apply_connection_parameters(struct irlap_cb *self,
* Give some info to the /proc file system
*
*/
-int irlap_proc_read( char *buf, char **start, off_t offset, int len,
+int irlap_proc_read(char *buf, char **start, off_t offset, int len,
int unused)
{
struct irlap_cb *self;
@@ -1100,81 +1100,81 @@ int irlap_proc_read( char *buf, char **start, off_t offset, int len,
len = 0;
- self = (struct irlap_cb *) hashbin_get_first( irlap);
- while ( self != NULL) {
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
+ self = (struct irlap_cb *) hashbin_get_first(irlap);
+ while (self != NULL) {
+ ASSERT(self != NULL, return -ENODEV;);
+ ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
- len += sprintf( buf+len, "irlap%d <-> %s ",
+ len += sprintf(buf+len, "irlap%d <-> %s ",
i++, self->irdev->name);
- len += sprintf( buf+len, "state: %s\n",
+ len += sprintf(buf+len, "state: %s\n",
irlap_state[ self->state]);
- len += sprintf( buf+len, " caddr: %#02x, ", self->caddr);
- len += sprintf( buf+len, "saddr: %#08x, ", self->saddr);
- len += sprintf( buf+len, "daddr: %#08x\n", self->daddr);
+ len += sprintf(buf+len, " caddr: %#02x, ", self->caddr);
+ len += sprintf(buf+len, "saddr: %#08x, ", self->saddr);
+ len += sprintf(buf+len, "daddr: %#08x\n", self->daddr);
- len += sprintf( buf+len, " win size: %d, ",
+ len += sprintf(buf+len, " win size: %d, ",
self->window_size);
- len += sprintf( buf+len, "win: %d, ", self->window);
- len += sprintf( buf+len, "win bytes: %d, ", self->window_bytes);
- len += sprintf( buf+len, "bytes left: %d\n", self->bytes_left);
-
- len += sprintf( buf+len, " tx queue len: %d ",
- skb_queue_len( &self->tx_list));
- len += sprintf( buf+len, "win queue len: %d ",
- skb_queue_len( &self->wx_list));
- len += sprintf( buf+len, "rbusy: %s\n", self->remote_busy ?
+ len += sprintf(buf+len, "win: %d, ", self->window);
+ len += sprintf(buf+len, "win bytes: %d, ", self->window_bytes);
+ len += sprintf(buf+len, "bytes left: %d\n", self->bytes_left);
+
+ len += sprintf(buf+len, " tx queue len: %d ",
+ skb_queue_len(&self->tx_list));
+ len += sprintf(buf+len, "win queue len: %d ",
+ skb_queue_len(&self->wx_list));
+ len += sprintf(buf+len, "rbusy: %s\n", self->remote_busy ?
"TRUE" : "FALSE");
- len += sprintf( buf+len, " retrans: %d ", self->retry_count);
- len += sprintf( buf+len, "vs: %d ", self->vs);
- len += sprintf( buf+len, "vr: %d ", self->vr);
- len += sprintf( buf+len, "va: %d\n", self->va);
+ len += sprintf(buf+len, " retrans: %d ", self->retry_count);
+ len += sprintf(buf+len, "vs: %d ", self->vs);
+ len += sprintf(buf+len, "vr: %d ", self->vr);
+ len += sprintf(buf+len, "va: %d\n", self->va);
- len += sprintf( buf+len, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
+ len += sprintf(buf+len, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
- len += sprintf( buf+len, " tx\t%d\t",
+ len += sprintf(buf+len, " tx\t%d\t",
self->qos_tx.baud_rate.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.max_turn_time.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.data_size.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.window_size.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.additional_bofs.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.min_turn_time.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.link_disc_time.value);
#ifdef CONFIG_IRDA_COMPRESSION
- len += sprintf( buf+len, "%d",
+ len += sprintf(buf+len, "%d",
self->qos_tx.compression.value);
#endif
- len += sprintf( buf+len, "\n");
+ len += sprintf(buf+len, "\n");
- len += sprintf( buf+len, " rx\t%d\t",
+ len += sprintf(buf+len, " rx\t%d\t",
self->qos_rx.baud_rate.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.max_turn_time.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.data_size.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.window_size.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.additional_bofs.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.min_turn_time.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.link_disc_time.value);
#ifdef CONFIG_IRDA_COMPRESSION
- len += sprintf( buf+len, "%d",
+ len += sprintf(buf+len, "%d",
self->qos_rx.compression.value);
#endif
- len += sprintf( buf+len, "\n");
+ len += sprintf(buf+len, "\n");
- self = (struct irlap_cb *) hashbin_get_next( irlap);
+ self = (struct irlap_cb *) hashbin_get_next(irlap);
}
restore_flags(flags);
diff --git a/net/irda/irlap_comp.c b/net/irda/irlap_comp.c
index 9959b64bc..299d1705c 100644
--- a/net/irda/irlap_comp.c
+++ b/net/irda/irlap_comp.c
@@ -6,11 +6,13 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Fri Oct 9 09:18:07 1998
- * Modified at: Mon Feb 8 01:23:52 1999
+ * Modified at: Sun May 9 11:37:06 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Modified at: Fri May 28 3:11 CST 1999
+ * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
* Sources: ppp.c, isdn_ppp.c
*
- * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -23,6 +25,8 @@
*
********************************************************************/
+#include <linux/string.h>
+
#include <net/irda/irda.h>
#include <net/irda/irqueue.h>
#include <net/irda/irlap.h>
@@ -255,11 +259,11 @@ struct sk_buff *irlap_compress_frame( struct irlap_cb *self,
}
/* FIXME: Find out what is the max overhead (not 10) */
- new_skb = dev_alloc_skb( skb->len+LAP_HEADER+10);
+ new_skb = dev_alloc_skb( skb->len+LAP_MAX_HEADER+10);
if(!new_skb)
return skb;
- skb_reserve( new_skb, LAP_HEADER);
+ skb_reserve( new_skb, LAP_MAX_HEADER);
skb_put( new_skb, skb->len+10);
count = (self->compressor.cp->compress)( self->compressor.state,
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index a2fbadf65..aeb8ff678 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Aug 16 00:59:29 1997
- * Modified at: Fri Apr 23 11:55:12 1999
+ * Modified at: Mon May 31 21:55:42 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* Thomas Davis <ratbert@radiks.net>
* All Rights Reserved.
*
@@ -209,8 +209,8 @@ void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
* Rushes through the state machine without any delay. If state == XMIT
* then send queued data frames.
*/
-void irlap_do_event( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
int ret;
@@ -218,7 +218,7 @@ void irlap_do_event( struct irlap_cb *self, IRLAP_EVENT event,
return;
DEBUG(4, __FUNCTION__ "(), event = %s, state = %s\n",
- irlap_event[ event], irlap_state[ self->state]);
+ irlap_event[event], irlap_state[self->state]);
ret = (*state[ self->state]) (self, event, skb, info);
@@ -236,13 +236,12 @@ void irlap_do_event( struct irlap_cb *self, IRLAP_EVENT event,
if (skb_queue_len(&self->tx_list)) {
/* Try to send away all queued data frames */
while ((skb = skb_dequeue(&self->tx_list)) != NULL) {
- ret = (*state[ self->state])(self, SEND_I_CMD,
- skb, NULL);
+ ret = (*state[self->state])(self, SEND_I_CMD,
+ skb, NULL);
if ( ret == -EPROTO)
break; /* Try again later! */
}
} else if (self->disconnect_pending) {
- DEBUG(0, __FUNCTION__ "(), disconnecting!\n");
self->disconnect_pending = FALSE;
ret = (*state[self->state])(self, DISCONNECT_REQUEST,
@@ -274,22 +273,22 @@ void irlap_do_event( struct irlap_cb *self, IRLAP_EVENT event,
* Switches state and provides debug information
*
*/
-void irlap_next_state( struct irlap_cb *self, IRLAP_STATE state)
+void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
{
- if ( !self || self->magic != LAP_MAGIC)
+ if (!self || self->magic != LAP_MAGIC)
return;
- DEBUG( 4, "next LAP state = %s\n", irlap_state[ state]);
+ DEBUG(4, "next LAP state = %s\n", irlap_state[ state]);
self->state = state;
/*
* If we are swithing away from a XMIT state then we are allowed to
* transmit a maximum number of bytes again when we enter the XMIT
- * state again. Since its possible to "switch" from XMIT to XMIT and
+ * state again. Since its possible to "switch" from XMIT to XMIT,
* we cannot do this when swithing into the XMIT state :-)
*/
- if (( state != LAP_XMIT_P) && ( state != LAP_XMIT_S))
+ if ((state != LAP_XMIT_P) && (state != LAP_XMIT_S))
self->bytes_left = self->window_bytes;
}
@@ -310,7 +309,7 @@ static int irlap_state_ndm( struct irlap_cb *self, IRLAP_EVENT event,
ASSERT( self != NULL, return -1;);
ASSERT( self->magic == LAP_MAGIC, return -1;);
- switch( event) {
+ switch(event) {
case CONNECT_REQUEST:
ASSERT( self->irdev != NULL, return -1;);
@@ -393,7 +392,6 @@ static int irlap_state_ndm( struct irlap_cb *self, IRLAP_EVENT event,
irlap_start_query_timer( self, QUERY_TIMEOUT);
irlap_next_state( self, LAP_REPLY);
}
-
dev_kfree_skb(skb);
break;
@@ -530,7 +528,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
irlap_send_discovery_xid_frame(self, info->S,
self->slot, FALSE,
discovery_rsp);
-
+
self->frame_sent = TRUE;
irlap_next_state(self, LAP_REPLY);
}
@@ -568,27 +566,28 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
switch (event) {
case CONNECT_RESPONSE:
- skb_pull( skb, 11);
+ /* skb_pull(skb, 11); */
+ skb_pull(skb, sizeof(struct snrm_frame));
- ASSERT( self->irdev != NULL, return -1;);
- irda_qos_negotiate( &self->qos_rx, &self->qos_tx, skb);
+ ASSERT(self->irdev != NULL, return -1;);
+ irda_qos_negotiate(&self->qos_rx, &self->qos_tx, skb);
irlap_initiate_connection_state( self);
/*
* We are allowed to send two frames!
*/
- irlap_send_ua_response_frame( self, &self->qos_rx);
- irlap_send_ua_response_frame( self, &self->qos_rx);
+ irlap_send_ua_response_frame(self, &self->qos_rx);
+ irlap_send_ua_response_frame(self, &self->qos_rx);
- irlap_apply_connection_parameters( self, &self->qos_tx);
+ irlap_apply_connection_parameters(self, &self->qos_tx);
/*
* The WD-timer could be set to the duration of the P-timer
- * for this case, but it is recommomended to use twice the
+ * for this case, but it is recommended to use twice the
* value (note 3 IrLAP p. 60).
*/
- irlap_start_wd_timer( self, self->wd_timeout);
+ irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state( self, LAP_NRM_S);
break;
@@ -669,28 +668,30 @@ static int irlap_state_setup( struct irlap_cb *self, IRLAP_EVENT event,
* The device with the largest device address wins the battle
* (both have sent a SNRM command!)
*/
- if ( info->daddr > self->saddr) {
- del_timer( &self->final_timer);
- irlap_initiate_connection_state( self);
+ if (info->daddr > self->saddr) {
+ del_timer(&self->final_timer);
+ irlap_initiate_connection_state(self);
- ASSERT( self->irdev != NULL, return -1;);
- irda_qos_negotiate( &self->qos_rx, &self->qos_tx, skb);
+ ASSERT(self->irdev != NULL, return -1;);
+ /* skb_pull(skb, 11); */
+ skb_pull(skb, sizeof(struct snrm_frame));
+ irda_qos_negotiate(&self->qos_rx, &self->qos_tx, skb);
irlap_send_ua_response_frame(self, &self->qos_rx);
- irlap_apply_connection_parameters( self, &self->qos_tx);
- irlap_connect_confirm( self, skb);
+ irlap_apply_connection_parameters(self, &self->qos_tx);
+ irlap_connect_confirm(self, skb);
/*
* The WD-timer could be set to the duration of the
- * P-timer for this case, but it is recommomended
+ * P-timer for this case, but it is recommended
* to use twice the value (note 3 IrLAP p. 60).
*/
- irlap_start_wd_timer( self, self->wd_timeout);
+ irlap_start_wd_timer(self, self->wd_timeout);
- irlap_next_state( self, LAP_NRM_S);
+ irlap_next_state(self, LAP_NRM_S);
} else {
/* We just ignore the other device! */
- irlap_next_state( self, LAP_SETUP);
+ irlap_next_state(self, LAP_SETUP);
}
break;
case RECV_UA_RSP:
@@ -702,9 +703,10 @@ static int irlap_state_setup( struct irlap_cb *self, IRLAP_EVENT event,
/* Negotiate connection parameters */
ASSERT( skb->len > 10, return -1;);
- skb_pull( skb, 10);
+ /* skb_pull(skb, 10); */
+ skb_pull(skb, sizeof(struct ua_frame));
- ASSERT( self->irdev != NULL, return -1;);
+ ASSERT(self->irdev != NULL, return -1;);
irda_qos_negotiate( &self->qos_rx, &self->qos_tx, skb);
irlap_apply_connection_parameters( self, &self->qos_tx);
@@ -758,36 +760,30 @@ static int irlap_state_offline( struct irlap_cb *self, IRLAP_EVENT event,
* stations.
*
*/
-static int irlap_state_xmit_p( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
-
- DEBUG( 4, __FUNCTION__ "(), event=%s, vs=%d, vr=%d",
- irlap_event[ event], self->vs, self->vr);
+ DEBUG(4, __FUNCTION__ "(), event=%s, vs=%d, vr=%d",
+ irlap_event[event], self->vs, self->vr);
switch (event) {
case SEND_I_CMD:
- ASSERT( skb != NULL, return -1;);
- DEBUG( 4, __FUNCTION__ "(), Window=%d\n", self->window);
-
/*
* Only send frame if send-window > 0.
*/
- if (( self->window > 0) && ( !self->remote_busy)) {
+ if ((self->window > 0) && (!self->remote_busy)) {
/*
* Test if we have transmitted more bytes over the
* link than its possible to do with the current
* speed and turn-around-time.
*/
- if (( skb->len+self->bofs_count) > self->bytes_left) {
- DEBUG( 4, __FUNCTION__ "(), Not allowed to "
- "transmit more bytes!\n");
- skb_queue_head( &self->tx_list, skb);
+ if ((skb->len+self->bofs_count) > self->bytes_left) {
+ DEBUG(4, __FUNCTION__ "(), Not allowed to "
+ "transmit more bytes!\n");
+ skb_queue_head(&self->tx_list, skb);
/*
* We should switch state to LAP_NRM_P, but
@@ -799,7 +795,7 @@ static int irlap_state_xmit_p( struct irlap_cb *self, IRLAP_EVENT event,
*/
return -EPROTO;
}
- self->bytes_left -= ( skb->len + self->bofs_count);
+ self->bytes_left -= (skb->len + self->bofs_count);
/*
* Send data with poll bit cleared only if window > 1
@@ -808,11 +804,9 @@ static int irlap_state_xmit_p( struct irlap_cb *self, IRLAP_EVENT event,
if (( self->window > 1) &&
skb_queue_len( &self->tx_list) > 0)
{
- DEBUG( 4, __FUNCTION__ "(), window > 1\n");
irlap_send_data_primary( self, skb);
irlap_next_state( self, LAP_XMIT_P);
} else {
- DEBUG( 4, __FUNCTION__ "(), window <= 1\n");
irlap_send_data_primary_poll( self, skb);
irlap_next_state( self, LAP_NRM_P);
@@ -930,9 +924,6 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
int ns_status;
int nr_status;
- ASSERT(self != NULL, return -1;);
- ASSERT(self->magic == LAP_MAGIC, return -1;);
-
switch (event) {
case RECV_I_RSP: /* Optimize for the common case */
/* FIXME: must check for remote_busy below */
@@ -944,7 +935,6 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
*/
self->fast_RR = FALSE;
#endif
-
ASSERT( info != NULL, return -1;);
ns_status = irlap_validate_ns_received(self, info->ns);
@@ -1138,13 +1128,6 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
}
break;
case RECV_RR_RSP:
- DEBUG(4, __FUNCTION__ "(), RECV_RR_FRAME: "
- "Retrans:%d, nr=%d, va=%d, vs=%d, vr=%d\n",
- self->retry_count, info->nr, self->va, self->vs,
- self->vr);
-
- ASSERT(info != NULL, return -1;);
-
/*
* If you get a RR, the remote isn't busy anymore,
* no matter what the NR
@@ -1191,14 +1174,6 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
/* Resend rejected frames */
irlap_resend_rejected_frames( self, CMD_FRAME);
- /*
- * Start only if not running, DB
- * TODO: Should this one be here?
- */
- /* if ( !self->final_timer.prev) */
-/* irda_start_timer( FINAL_TIMER, self->final_timeout); */
-
- /* Keep state */
irlap_next_state( self, LAP_NRM_P);
} else if (ret == NR_INVALID) {
DEBUG(1, "irlap_state_nrm_p: received RR with "
@@ -1207,8 +1182,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state( self, LAP_RESET_WAIT);
- irlap_disconnect_indication( self,
- LAP_RESET_INDICATION);
+ irlap_disconnect_indication(self, LAP_RESET_INDICATION);
self->xmitflag = TRUE;
}
if (skb)
@@ -1476,13 +1450,13 @@ static int irlap_state_xmit_s( struct irlap_cb *self, IRLAP_EVENT event,
/*
* Send frame only if send window > 1
*/
- if (( self->window > 0) && ( !self->remote_busy)) {
+ if ((self->window > 0) && ( !self->remote_busy)) {
/*
* Test if we have transmitted more bytes over the
* link than its possible to do with the current
* speed and turn-around-time.
*/
- if (( skb->len+self->bofs_count) > self->bytes_left) {
+ if ((skb->len+self->bofs_count) > self->bytes_left) {
DEBUG( 4, "IrDA: Not allowed to transmit more bytes!\n");
skb_queue_head( &self->tx_list, skb);
/*
@@ -1504,11 +1478,9 @@ static int irlap_state_xmit_s( struct irlap_cb *self, IRLAP_EVENT event,
if (( self->window > 1) &&
skb_queue_len( &self->tx_list) > 0)
{
- DEBUG( 4, __FUNCTION__ "(), window > 1\n");
irlap_send_data_secondary( self, skb);
irlap_next_state( self, LAP_XMIT_S);
} else {
- DEBUG( 4, "(), window <= 1\n");
irlap_send_data_secondary_final( self, skb);
irlap_next_state( self, LAP_NRM_S);
@@ -1570,7 +1542,7 @@ static int irlap_state_nrm_s( struct irlap_cb *self, IRLAP_EVENT event,
/*
* poll bit cleared?
*/
- if ( !info->pf) {
+ if (!info->pf) {
self->vr = (self->vr + 1) % 8;
/* Update Nr received */
@@ -1600,35 +1572,39 @@ static int irlap_state_nrm_s( struct irlap_cb *self, IRLAP_EVENT event,
* also before changing to XMIT_S
* state. (note 1, IrLAP p. 82)
*/
- irlap_wait_min_turn_around( self, &self->qos_tx);
- /*
- * Any pending data requests?
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ /*
+ * Give higher layers a chance to
+ * immediately reply with some data before
+ * we decide if we should send a RR frame
+ * or not
*/
- if (( skb_queue_len( &self->tx_list) > 0) &&
- ( self->window > 0))
+ irlap_data_indication(self, skb);
+
+ /* Any pending data requests? */
+ if ((skb_queue_len(&self->tx_list) > 0) &&
+ (self->window > 0))
{
self->ack_required = TRUE;
- del_timer( &self->wd_timer);
+ del_timer(&self->wd_timer);
- irlap_next_state( self, LAP_XMIT_S);
+ irlap_next_state(self, LAP_XMIT_S);
} else {
- irlap_send_rr_frame( self, RSP_FRAME);
- irlap_start_wd_timer( self, self->wd_timeout);
+ irlap_send_rr_frame(self, RSP_FRAME);
+ irlap_start_wd_timer(self, self->wd_timeout);
/* Keep the state */
- irlap_next_state( self, LAP_NRM_S);
+ irlap_next_state(self, LAP_NRM_S);
}
- irlap_data_indication( self, skb);
-
break;
}
}
/*
* Check for Unexpected next to send (Ns)
*/
- if (( ns_status == NS_UNEXPECTED) &&
- ( nr_status == NR_EXPECTED))
+ if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED))
{
/* Unexpected next to send, with final bit cleared */
if ( !info->pf) {
@@ -1651,8 +1627,7 @@ static int irlap_state_nrm_s( struct irlap_cb *self, IRLAP_EVENT event,
/*
* Unexpected Next to Receive(NR) ?
*/
- if (( ns_status == NS_EXPECTED) &&
- ( nr_status == NR_UNEXPECTED))
+ if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED))
{
if ( info->pf) {
DEBUG( 4, "RECV_I_RSP: frame(s) lost\n");
@@ -1748,20 +1723,20 @@ static int irlap_state_nrm_s( struct irlap_cb *self, IRLAP_EVENT event,
irlap_update_nr_received( self, info->nr);
del_timer( &self->wd_timer);
- irlap_wait_min_turn_around( self, &self->qos_tx);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_next_state( self, LAP_XMIT_S);
} else {
self->remote_busy = FALSE;
/* Update Nr received */
- irlap_update_nr_received( self, info->nr);
- irlap_wait_min_turn_around( self, &self->qos_tx);
+ irlap_update_nr_received(self, info->nr);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
- irlap_send_rr_frame( self, RSP_FRAME);
+ irlap_send_rr_frame(self, RSP_FRAME);
- irlap_start_wd_timer( self, self->wd_timeout);
- irlap_next_state( self, LAP_NRM_S);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ irlap_next_state(self, LAP_NRM_S);
}
- } else if ( nr_status == NR_UNEXPECTED) {
+ } else if (nr_status == NR_UNEXPECTED) {
self->remote_busy = FALSE;
irlap_update_nr_received( self, info->nr);
irlap_resend_rejected_frames( self, RSP_FRAME);
@@ -1773,8 +1748,8 @@ static int irlap_state_nrm_s( struct irlap_cb *self, IRLAP_EVENT event,
} else {
DEBUG(1, __FUNCTION__ "(), invalid nr not implemented!\n");
}
- if ( skb)
- dev_kfree_skb( skb);
+ if (skb)
+ dev_kfree_skb(skb);
break;
case RECV_SNRM_CMD:
@@ -1886,7 +1861,7 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
ASSERT( self != NULL, return -ENODEV;);
ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
- switch( event) {
+ switch(event) {
case RESET_RESPONSE:
irlap_send_ua_response_frame( self, &self->qos_rx);
irlap_initiate_connection_state( self);
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index cda78e7f1..3011284d1 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Aug 19 10:27:26 1997
- * Modified at: Fri Apr 23 09:30:42 1999
+ * Modified at: Mon May 31 09:29:13 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Resrved.
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Resrved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -1001,10 +1001,6 @@ void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb,
{
__u8 *frame;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
frame = skb->data;
/* Insert connection address */
@@ -1014,15 +1010,6 @@ void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb,
/* Insert next to receive (Vr) */
frame[1] |= (self->vr << 5); /* insert nr */
-#if 0
- {
- int ns;
- ns = (frame[1] >> 1) & 0x07; /* Next to send */
-
- DEBUG(0, __FUNCTION__ "(), ns=%d\n", ns);
- }
-#endif
-
irlap_queue_xmit(self, skb);
}
@@ -1056,8 +1043,8 @@ static inline void irlap_recv_i_frame(struct irlap_cb *self,
* Receive and parse an Unnumbered Information (UI) frame
*
*/
-static void irlap_recv_ui_frame( struct irlap_cb *self, struct sk_buff *skb,
- struct irlap_info *info)
+static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info)
{
__u8 *frame;
@@ -1240,7 +1227,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct device *dev,
* Optimize for the common case and check if the frame is an
* I(nformation) frame. Only I-frames have bit 0 set to 0
*/
- if(~control & 0x01) {
+ if (~control & 0x01) {
irlap_recv_i_frame(self, skb, &info, command);
self->stats.rx_packets++;
return 0;
@@ -1254,7 +1241,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct device *dev,
* Received S(upervisory) frame, check which frame type it is
* only the first nibble is of interest
*/
- switch(control & 0x0f) {
+ switch (control & 0x0f) {
case RR:
irlap_recv_rr_frame( self, skb, &info, command);
self->stats.rx_packets++;
@@ -1279,7 +1266,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct device *dev,
/*
* This must be a C(ontrol) frame
*/
- switch(control) {
+ switch (control) {
case XID_RSP:
irlap_recv_discovery_xid_rsp(self, skb, &info);
break;
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index d76661b6c..7df849c2a 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -6,10 +6,10 @@
* Status: Stable.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 17 20:54:32 1997
- * Modified at: Fri Apr 23 09:13:24 1999
+ * Modified at: Mon May 31 21:49:41 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -197,7 +197,7 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, struct notify_t *notify)
}
/*
- * Function irlmp_close_lsap (self)
+ * Function __irlmp_close_lsap (self)
*
* Remove an instance of LSAP
*/
@@ -369,11 +369,11 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
if (!skb)
return -ENOMEM;
- skb_reserve(skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_reserve(skb, LMP_MAX_HEADER);
} else
skb = userdata;
- /* Make room for MUX control header ( 3 bytes) */
+ /* Make room for MUX control header (3 bytes) */
ASSERT(skb_headroom(skb) >= LMP_CONTROL_HEADER, return -1;);
skb_push(skb, LMP_CONTROL_HEADER);
@@ -443,25 +443,35 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb)
{
int max_seg_size;
-
- DEBUG(3, __FUNCTION__ "()\n");
+ int lap_header_size;
+ int max_header_size;
ASSERT(self != NULL, return;);
ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
ASSERT(skb != NULL, return;);
ASSERT(self->lap != NULL, return;);
+ DEBUG(2, __FUNCTION__ "(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ self->slsap_sel, self->dlsap_sel);
+
self->qos = *self->lap->qos;
- max_seg_size = self->lap->qos->data_size.value;
- DEBUG(4, __FUNCTION__ "(), max_seg_size=%d\n", max_seg_size);
+ max_seg_size = self->lap->qos->data_size.value-LMP_HEADER;
+ DEBUG(2, __FUNCTION__ "(), max_seg_size=%d\n", max_seg_size);
+ lap_header_size = irlap_get_header_size(self->lap->irlap);
+
+ max_header_size = LMP_HEADER + lap_header_size;
+
+ DEBUG(2, __FUNCTION__ "(), max_header_size=%d\n", max_header_size);
+
/* Hide LMP_CONTROL_HEADER header from layer above */
skb_pull(skb, LMP_CONTROL_HEADER);
if (self->notify.connect_indication)
self->notify.connect_indication(self->notify.instance, self,
- &self->qos, max_seg_size, skb);
+ &self->qos, max_seg_size,
+ max_header_size, skb);
}
/*
@@ -470,24 +480,22 @@ void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb)
* Service user is accepting connection
*
*/
-void irlmp_connect_response( struct lsap_cb *self, struct sk_buff *userdata)
+void irlmp_connect_response(struct lsap_cb *self, struct sk_buff *userdata)
{
- DEBUG(3, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
- ASSERT( userdata != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT(userdata != NULL, return;);
self->connected = TRUE;
- DEBUG( 4, "irlmp_connect_response: slsap_sel=%02x, dlsap_sel=%02x\n",
- self->slsap_sel, self->dlsap_sel);
+ DEBUG(2, __FUNCTION__ "(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ self->slsap_sel, self->dlsap_sel);
/* Make room for MUX control header ( 3 bytes) */
- ASSERT( skb_headroom( userdata) >= LMP_CONTROL_HEADER, return;);
- skb_push( userdata, LMP_CONTROL_HEADER);
+ ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return;);
+ skb_push(userdata, LMP_CONTROL_HEADER);
- irlmp_do_lsap_event( self, LM_CONNECT_RESPONSE, userdata);
+ irlmp_do_lsap_event(self, LM_CONNECT_RESPONSE, userdata);
}
/*
@@ -498,25 +506,34 @@ void irlmp_connect_response( struct lsap_cb *self, struct sk_buff *userdata)
void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb)
{
int max_seg_size;
+ int max_header_size;
+ int lap_header_size;
DEBUG(3, __FUNCTION__ "()\n");
- ASSERT( skb != NULL, return;);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
- ASSERT( self->lap != NULL, return;);
+ ASSERT(self->lap != NULL, return;);
self->qos = *self->lap->qos;
- max_seg_size = self->qos.data_size.value;
- DEBUG( 4, __FUNCTION__ "(), max_seg_size=%d\n", max_seg_size);
+ max_seg_size = self->lap->qos->data_size.value-LMP_HEADER;
+ DEBUG(2, __FUNCTION__ "(), max_seg_size=%d\n", max_seg_size);
+
+ lap_header_size = irlap_get_header_size(self->lap->irlap);
+ max_header_size = LMP_HEADER + lap_header_size;
+
+ DEBUG(2, __FUNCTION__ "(), max_header_size=%d\n", max_header_size);
+
/* Hide LMP_CONTROL_HEADER header from layer above */
- skb_pull( skb, LMP_CONTROL_HEADER);
+ skb_pull(skb, LMP_CONTROL_HEADER);
- if ( self->notify.connect_confirm) {
- self->notify.connect_confirm( self->notify.instance, self,
- &self->qos, max_seg_size, skb);
+ if (self->notify.connect_confirm) {
+ self->notify.connect_confirm(self->notify.instance, self,
+ &self->qos, max_seg_size,
+ max_header_size, skb);
}
}
@@ -620,8 +637,8 @@ void irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata)
*
* LSAP is being closed!
*/
-void irlmp_disconnect_indication( struct lsap_cb *self, LM_REASON reason,
- struct sk_buff *userdata)
+void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
+ struct sk_buff *userdata)
{
struct lsap_cb *lsap;
@@ -637,6 +654,10 @@ void irlmp_disconnect_indication( struct lsap_cb *self, LM_REASON reason,
self->connected = FALSE;
self->dlsap_sel = LSAP_ANY;
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ irlmp->cache.valid = FALSE;
+#endif
+
/*
* Remove association between this LSAP and the link it used
*/
@@ -975,7 +996,7 @@ void irlmp_status_request(void)
DEBUG( 1, "irlmp_status_request(), Not implemented\n");
}
-void irlmp_status_indication( LINK_STATUS link, LOCK_STATUS lock)
+void irlmp_status_indication(LINK_STATUS link, LOCK_STATUS lock)
{
DEBUG( 4, "irlmp_status_indication(), Not implemented\n");
}
@@ -1418,14 +1439,14 @@ __u32 irlmp_get_daddr(struct lsap_cb *self)
* Give some info to the /proc file system
*
*/
-int irlmp_proc_read( char *buf, char **start, off_t offset, int len,
- int unused)
+int irlmp_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused)
{
struct lsap_cb *self;
struct lap_cb *lap;
unsigned long flags;
- ASSERT( irlmp != NULL, return 0;);
+ ASSERT(irlmp != NULL, return 0;);
save_flags( flags);
cli();
@@ -1449,35 +1470,34 @@ int irlmp_proc_read( char *buf, char **start, off_t offset, int len,
}
len += sprintf( buf+len, "\nRegistred Link Layers:\n");
- lap = (struct lap_cb *) hashbin_get_first( irlmp->links);
- while ( lap != NULL) {
- ASSERT( lap->magic == LMP_LAP_MAGIC, return 0;);
- len += sprintf( buf+len, "lap state: %s, ",
- irlmp_state[ lap->lap_state]);
+ lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
+ while (lap != NULL) {
+ len += sprintf(buf+len, "lap state: %s, ",
+ irlmp_state[lap->lap_state]);
- len += sprintf( buf+len, "saddr: %#08x, daddr: %#08x, ",
- lap->saddr, lap->daddr);
- len += sprintf( buf+len, "\n");
+ len += sprintf(buf+len, "saddr: %#08x, daddr: %#08x, ",
+ lap->saddr, lap->daddr);
+ len += sprintf(buf+len, "\n");
len += sprintf( buf+len, "\nConnected LSAPs:\n");
self = (struct lsap_cb *) hashbin_get_first( lap->lsaps);
- while ( self != NULL) {
- ASSERT( self->magic == LMP_LSAP_MAGIC, return 0;);
- len += sprintf( buf+len, "lsap state: %s, ",
- irlsap_state[ self->lsap_state]);
- len += sprintf( buf+len,
- "slsap_sel: %#02x, dlsap_sel: %#02x, ",
- self->slsap_sel, self->dlsap_sel);
- len += sprintf( buf+len, "(%s)", self->notify.name);
- len += sprintf( buf+len, "\n");
+ while (self != NULL) {
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return 0;);
+ len += sprintf(buf+len, "lsap state: %s, ",
+ irlsap_state[ self->lsap_state]);
+ len += sprintf(buf+len,
+ "slsap_sel: %#02x, dlsap_sel: %#02x, ",
+ self->slsap_sel, self->dlsap_sel);
+ len += sprintf(buf+len, "(%s)", self->notify.name);
+ len += sprintf(buf+len, "\n");
- self = ( struct lsap_cb *) hashbin_get_next(
+ self = (struct lsap_cb *) hashbin_get_next(
lap->lsaps);
}
+ len += sprintf(buf+len, "\n");
- lap = ( struct lap_cb *) hashbin_get_next(
- irlmp->links);
+ lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
}
restore_flags( flags);
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c
index bf1bab31e..95a707a7f 100644
--- a/net/irda/irlmp_frame.c
+++ b/net/irda/irlmp_frame.c
@@ -1,15 +1,15 @@
/*********************************************************************
*
* Filename: irlmp_frame.c
- * Version: 0.8
+ * Version: 0.9
* Description: IrLMP frame implementation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Aug 19 02:09:59 1997
- * Modified at: Fri Apr 23 09:12:23 1999
+ * Modified at: Mon May 31 09:53:16 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -59,16 +59,16 @@ inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
*
* Send Link Control Frame to IrLAP
*/
-void irlmp_send_lcf_pdu( struct lap_cb *self, __u8 dlsap, __u8 slsap,
- __u8 opcode, struct sk_buff *skb)
+void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
+ __u8 opcode, struct sk_buff *skb)
{
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
frame = skb->data;
@@ -82,8 +82,8 @@ void irlmp_send_lcf_pdu( struct lap_cb *self, __u8 dlsap, __u8 slsap,
else
frame[3] = 0x00; /* rsvd */
- ASSERT( self->irlap != NULL, return;);
- irlap_data_request( self->irlap, skb, TRUE);
+ ASSERT(self->irlap != NULL, return;);
+ irlap_data_request(self->irlap, skb, TRUE);
}
/*
@@ -112,7 +112,7 @@ void irlmp_link_data_indication(struct lap_cb *self, int reliable,
*/
slsap_sel = fp[0] & LSAP_MASK;
dlsap_sel = fp[1];
-
+
/*
* Check if this is an incoming connection, since we must deal with
* it in a different way than other established connections.
@@ -134,17 +134,17 @@ void irlmp_link_data_indication(struct lap_cb *self, int reliable,
self->lsaps);
if (lsap == NULL) {
- DEBUG(0, "IrLMP, Sorry, no LSAP for received frame!\n");
- DEBUG(0, __FUNCTION__
+ DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n");
+ DEBUG(2, __FUNCTION__
"(), slsap_sel = %02x, dlsap_sel = %02x\n", slsap_sel,
dlsap_sel);
if (fp[0] & CONTROL_BIT) {
- DEBUG(0, __FUNCTION__
+ DEBUG(2, __FUNCTION__
"(), received control frame %02x\n", fp[2]);
} else {
- DEBUG(0, __FUNCTION__ "(), received data frame\n");
+ DEBUG(2, __FUNCTION__ "(), received data frame\n");
}
- dev_kfree_skb( skb);
+ dev_kfree_skb(skb);
return;
}
@@ -224,11 +224,11 @@ void irlmp_link_disconnect_indication(struct lap_cb *lap,
* Incoming LAP connection!
*
*/
-void irlmp_link_connect_indication( struct lap_cb *self, __u32 saddr,
- __u32 daddr, struct qos_info *qos,
- struct sk_buff *skb)
+void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr,
+ __u32 daddr, struct qos_info *qos,
+ struct sk_buff *skb)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
/* Copy QoS settings for this session */
self->qos = qos;
@@ -237,7 +237,7 @@ void irlmp_link_connect_indication( struct lap_cb *self, __u32 saddr,
self->daddr = daddr;
ASSERT(self->saddr == saddr, return;);
- irlmp_do_lap_event( self, LM_LAP_CONNECT_INDICATION, skb);
+ irlmp_do_lap_event(self, LM_LAP_CONNECT_INDICATION, skb);
}
/*
@@ -246,19 +246,19 @@ void irlmp_link_connect_indication( struct lap_cb *self, __u32 saddr,
* LAP connection confirmed!
*
*/
-void irlmp_link_connect_confirm( struct lap_cb *self, struct qos_info *qos,
- struct sk_buff *userdata)
+void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
+ struct sk_buff *userdata)
{
- DEBUG( 4, "irlmp_link_connect_confirm()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LAP_MAGIC, return;);
- ASSERT( qos != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+ ASSERT(qos != NULL, return;);
/* Copy QoS settings for this session */
self->qos = qos;
- irlmp_do_lap_event( self, LM_LAP_CONNECT_CONFIRM, NULL);
+ irlmp_do_lap_event(self, LM_LAP_CONNECT_CONFIRM, NULL);
}
/*
@@ -276,7 +276,9 @@ void irlmp_link_discovery_indication(struct lap_cb *self,
irlmp_add_discovery(irlmp->cachelog, discovery);
/* Just handle it the same way as a discovery confirm */
+#if 0
irlmp_do_lap_event(self, LM_LAP_DISCOVERY_CONFIRM, NULL);
+#endif
}
/*
@@ -365,7 +367,7 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
#endif
return lsap;
}
- lsap = ( struct lsap_cb *) hashbin_get_next(queue);
+ lsap = (struct lsap_cb *) hashbin_get_next(queue);
}
/* Sorry not found! */
diff --git a/net/irda/irlpt/irlpt_cli.c b/net/irda/irlpt/irlpt_cli.c
index a0fbe23d6..9e02465cc 100644
--- a/net/irda/irlpt/irlpt_cli.c
+++ b/net/irda/irlpt/irlpt_cli.c
@@ -51,10 +51,11 @@ static void irlpt_client_discovery_indication(discovery_t *);
static void irlpt_client_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
__u32 max_seg_size,
+ __u8 max_header_size,
struct sk_buff *skb);
-static void irlpt_client_disconnect_indication( void *instance, void *sap,
- LM_REASON reason,
- struct sk_buff *userdata);
+static void irlpt_client_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *userdata);
static void irlpt_client_expired(unsigned long data);
#if 0
@@ -187,7 +188,7 @@ __initfunc(int irlpt_client_init(void))
#ifdef CONFIG_PROC_FS
create_proc_entry("irlpt_client", 0, proc_irda)->get_info
- = irlpt_client_proc_read;
+ = irlpt_client_proc_read;
#endif /* CONFIG_PROC_FS */
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
@@ -215,7 +216,6 @@ static void irlpt_client_cleanup(void)
#ifdef CONFIG_PROC_FS
remove_proc_entry("irlpt_client", proc_irda);
#endif
-
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
}
#endif /* MODULE */
@@ -403,9 +403,8 @@ static void irlpt_client_disconnect_indication( void *instance,
irlpt_client_do_event( self, LMP_DISCONNECT, NULL, NULL);
- if (skb) {
+ if (skb)
dev_kfree_skb( skb);
- }
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
}
@@ -417,7 +416,8 @@ static void irlpt_client_disconnect_indication( void *instance,
*/
static void irlpt_client_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
- __u32 max_sdu_size,
+ __u32 max_seg_size,
+ __u8 max_header_size,
struct sk_buff *skb)
{
struct irlpt_info info;
@@ -443,14 +443,14 @@ static void irlpt_client_connect_confirm(void *instance, void *sap,
}
#endif
- self->irlap_data_size = (qos->data_size.value - IRLPT_MAX_HEADER);
+ self->max_data_size = max_seg_size;
+ self->max_header_size = max_header_size;
self->connected = TRUE;
irlpt_client_do_event( self, LMP_CONNECT, NULL, NULL);
- if (skb) {
+ if (skb)
dev_kfree_skb( skb);
- }
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
}
@@ -603,7 +603,7 @@ static void irlpt_client_expired(unsigned long data)
return;
}
- skb_reserve( skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_reserve(skb, LMP_MAX_HEADER);
irlmp_disconnect_request(self->lsap, skb);
DEBUG(irlpt_client_debug, __FUNCTION__
": irlmp_close_slap(self->lsap)\n");
diff --git a/net/irda/irlpt/irlpt_cli_fsm.c b/net/irda/irlpt/irlpt_cli_fsm.c
index 75598742a..83a2e6991 100644
--- a/net/irda/irlpt/irlpt_cli_fsm.c
+++ b/net/irda/irlpt/irlpt_cli_fsm.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Jan 12 11:06:00 1999
- * Modified at: Tue Jan 26 12:02:31 1999
+ * Modified at: Sun May 9 13:36:13 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>
+ * Copyright (c) 1998-1999, Thomas Davis, <ratbert@radiks.net>
* Copyright (c) 1998, Dag Brattli, <dagb@cs.uit.no>
* All Rights Reserved.
*
@@ -43,10 +43,10 @@ static int irlpt_client_state_query ( struct irlpt_cb *self,
IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info);
-static int irlpt_client_state_ready ( struct irlpt_cb *self,
- IRLPT_EVENT event,
- struct sk_buff *skb,
- struct irlpt_info *info);
+static int irlpt_client_state_ready ( struct irlpt_cb *self,
+ IRLPT_EVENT event,
+ struct sk_buff *skb,
+ struct irlpt_info *info);
static int irlpt_client_state_waiti ( struct irlpt_cb *self,
IRLPT_EVENT event,
struct sk_buff *skb,
diff --git a/net/irda/irlpt/irlpt_common.c b/net/irda/irlpt/irlpt_common.c
index b4512736a..c6401416b 100644
--- a/net/irda/irlpt/irlpt_common.c
+++ b/net/irda/irlpt/irlpt_common.c
@@ -251,18 +251,18 @@ ssize_t irlpt_write(struct file *file, const char *buffer,
}
DEBUG( irlpt_common_debug, __FUNCTION__
- ": count = %d, irlap_data_size = %d, IRLPT_MAX_HEADER = %d\n",
- count, self->irlap_data_size, IRLPT_MAX_HEADER);
+ ": count = %d, max_data_size = %d, IRLPT_MAX_HEADER = %d\n",
+ count, self->max_data_size, IRLPT_MAX_HEADER);
- if (count > (self->irlap_data_size - IRLPT_MAX_HEADER)) {
- count = (self->irlap_data_size - IRLPT_MAX_HEADER);
+ if (count > self->max_data_size) {
+ count = self->max_data_size;
DEBUG(irlpt_common_debug, __FUNCTION__
": setting count to %d\n", count);
}
DEBUG( irlpt_common_debug, __FUNCTION__ ": count = %d\n", count);
- skb = dev_alloc_skb(count + IRLPT_MAX_HEADER);
+ skb = dev_alloc_skb(count + self->max_header_size);
if ( skb == NULL) {
printk( KERN_INFO
__FUNCTION__ ": couldn't allocate skbuff!\n");
@@ -417,7 +417,7 @@ int irlpt_close(struct inode *inode,
return 0;
}
- skb_reserve( skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_reserve( skb, LMP_MAX_HEADER);
irlmp_disconnect_request(self->lsap, skb);
DEBUG(irlpt_common_debug, __FUNCTION__
": irlmp_close_slap(self->lsap)\n");
diff --git a/net/irda/irlpt/irlpt_srvr.c b/net/irda/irlpt/irlpt_srvr.c
index a1362d0dc..12e5867a5 100644
--- a/net/irda/irlpt/irlpt_srvr.c
+++ b/net/irda/irlpt/irlpt_srvr.c
@@ -51,15 +51,21 @@ int irlpt_server_init(void);
static void irlpt_server_disconnect_indication(void *instance, void *sap,
LM_REASON reason,
struct sk_buff *skb);
+
+#if 0
static void irlpt_server_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
__u32 max_seg_size,
+ __u8 max_header_size,
struct sk_buff *skb);
static void irlpt_server_connect_indication(void *instance,
void *sap,
struct qos_info *qos,
__u32 max_seg_size,
+ __u8 max_header_size,
struct sk_buff *skb);
+#endif
+
static int irlpt_server_data_indication(void *instance, void *sap,
struct sk_buff *skb);
static void register_irlpt_server(void);
@@ -161,7 +167,6 @@ static int irlpt_server_proc_read(char *buf, char **start, off_t offset,
}
extern struct proc_dir_entry *proc_irda;
-
#endif /* CONFIG_PROC_FS */
/*
@@ -171,9 +176,9 @@ extern struct proc_dir_entry *proc_irda;
*
*/
-/*int irlpt_init( struct device *dev) {*/
__initfunc(int irlpt_server_init(void))
{
+ struct irmanager_event mgr_event;
__u16 hints;
DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
@@ -212,6 +217,10 @@ __initfunc(int irlpt_server_init(void))
= irlpt_server_proc_read;
#endif /* CONFIG_PROC_FS */
+ mgr_event.event = EVENT_IRLPT_START;
+ sprintf(mgr_event.devname, "%s", irlpt_server->ifname);
+ irmanager_notify(&mgr_event);
+
DEBUG( irlpt_server_debug, __FUNCTION__ " -->\n");
return 0;
@@ -225,6 +234,7 @@ __initfunc(int irlpt_server_init(void))
*/
static void irlpt_server_cleanup(void)
{
+ struct irmanager_event mgr_event;
struct sk_buff *skb;
DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
@@ -245,6 +255,10 @@ static void irlpt_server_cleanup(void)
remove_proc_entry("irlpt_server", proc_irda);
#endif
+ mgr_event.event = EVENT_IRLPT_STOP;
+ sprintf( mgr_event.devname, "%s", irlpt_server->ifname);
+ irmanager_notify( &mgr_event);
+
DEBUG( irlpt_server_debug, __FUNCTION__ " -->\n");
}
@@ -304,6 +318,7 @@ static void irlpt_server_connect_confirm(void *instance,
void *sap,
struct qos_info *qos,
__u32 max_seg_size,
+ __u8 max_header_size,
struct sk_buff *skb)
{
struct irlpt_cb *self;
@@ -314,6 +329,9 @@ static void irlpt_server_connect_confirm(void *instance,
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
+ self->max_data_size = max_seg_size;
+ self->max_header_size = max_header_size;
+
self->connected = TRUE;
irlpt_server_do_event( self, LMP_CONNECT, NULL, NULL);
@@ -329,6 +347,7 @@ static void irlpt_server_connect_indication(void *instance,
void *sap,
struct qos_info *qos,
__u32 max_seg_size,
+ __u8 max_header_size,
struct sk_buff *skb)
{
struct irlpt_cb *self;
@@ -343,14 +362,16 @@ static void irlpt_server_connect_indication(void *instance,
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
+ self->max_data_size = max_seg_size;
+ self->max_header_size = max_header_size;
+
self->connected = IRLPT_CONNECTED;
self->eof = FALSE;
irlpt_server_do_event( self, LMP_CONNECT, NULL, &info);
- if (skb) {
+ if (skb)
dev_kfree_skb( skb);
- }
DEBUG( irlpt_server_debug, __FUNCTION__ " -->\n");
}
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index 88d61c2cd..ab7354e1d 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Dec 15 13:55:39 1997
- * Modified at: Mon Apr 12 11:31:01 1999
+ * Modified at: Fri May 14 13:46:02 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1997 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1997, 1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -45,7 +45,7 @@
#include <net/irda/wrapper.h>
#include <net/irda/timer.h>
-extern struct proc_dir_entry proc_irda;
+extern struct proc_dir_entry *proc_irda;
struct irda_cb irda; /* One global instance */
@@ -110,6 +110,7 @@ EXPORT_SYMBOL(irttp_disconnect_request);
EXPORT_SYMBOL(irttp_flow_request);
EXPORT_SYMBOL(irttp_connect_request);
EXPORT_SYMBOL(irttp_udata_request);
+EXPORT_SYMBOL(irttp_dup);
/* Main IrDA module */
#ifdef CONFIG_IRDA_DEBUG
@@ -151,6 +152,7 @@ EXPORT_SYMBOL(irlmp_connect_response);
EXPORT_SYMBOL(irlmp_disconnect_request);
EXPORT_SYMBOL(irlmp_get_daddr);
EXPORT_SYMBOL(irlmp_get_saddr);
+EXPORT_SYMBOL(irlmp_dup);
EXPORT_SYMBOL(lmp_reasons);
/* Queue */
@@ -174,10 +176,15 @@ EXPORT_SYMBOL(irda_device_close);
EXPORT_SYMBOL(irda_device_setup);
EXPORT_SYMBOL(irda_device_set_media_busy);
EXPORT_SYMBOL(irda_device_txqueue_empty);
+
+EXPORT_SYMBOL(irda_device_init_dongle);
+EXPORT_SYMBOL(irda_device_register_dongle);
+EXPORT_SYMBOL(irda_device_unregister_dongle);
+
EXPORT_SYMBOL(async_wrap_skb);
EXPORT_SYMBOL(async_unwrap_char);
EXPORT_SYMBOL(irda_start_timer);
-EXPORT_SYMBOL(irda_get_mtt);
+/* EXPORT_SYMBOL(irda_get_mtt); */
EXPORT_SYMBOL(setup_dma);
#ifdef CONFIG_IRTTY
@@ -505,19 +512,28 @@ void irda_mod_dec_use_count(void)
#endif
}
-#ifdef MODULE
-#ifdef CONFIG_PROC_FS
+/*
+ * Function irda_proc_modcount (inode, fill)
+ *
+ * Use by the proc file system functions to prevent the irda module
+ * being removed while the use is standing in the net/irda directory
+ */
void irda_proc_modcount(struct inode *inode, int fill)
{
+#ifdef MODULE
+#ifdef CONFIG_PROC_FS
if (fill)
MOD_INC_USE_COUNT;
else
MOD_DEC_USE_COUNT;
-}
#endif /* CONFIG_PROC_FS */
+#endif /* MODULE */
+}
+
+#ifdef MODULE
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
-MODULE_DESCRIPTION("The Linux IrDA protocol subsystem");
+MODULE_DESCRIPTION("The Linux IrDA Protocol Subsystem");
MODULE_PARM(irda_debug, "1l");
/*
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index f3b710b95..a04951694 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Thomas Davis, <ratbert@radiks.net>
* Created at: Sat Feb 21 21:33:24 1998
- * Modified at: Tue Apr 6 19:07:06 1999
+ * Modified at: Fri May 7 08:06:49 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>,
+ * Copyright (c) 1998-1999, Thomas Davis, <ratbert@radiks.net>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -20,8 +20,6 @@
* I, Thomas Davis, provide no warranty for any of this software.
* This material is provided "AS-IS" and at no charge.
*
- * Portions lifted from the linux/fs/procfs/ files.
- *
********************************************************************/
#include <linux/miscdevice.h>
@@ -44,28 +42,27 @@ extern int irias_proc_read(char *buf, char **start, off_t offset, int len,
int unused);
extern int discovery_proc_read(char *buf, char **start, off_t offset, int len,
int unused);
+static int proc_discovery_read(char *buf, char **start, off_t offset, int len,
+ int unused);
-enum irda_directory_inos {
- PROC_IRDA_LAP = 1,
- PROC_IRDA_LMP,
- PROC_IRDA_TTP,
- PROC_IRDA_LPT,
- PROC_IRDA_COMM,
- PROC_IRDA_IRDA_DEVICE,
- PROC_IRDA_IRIAS
-};
+/* enum irda_directory_inos { */
+/* PROC_IRDA_LAP = 1, */
+/* PROC_IRDA_LMP, */
+/* PROC_IRDA_TTP, */
+/* PROC_IRDA_LPT, */
+/* PROC_IRDA_COMM, */
+/* PROC_IRDA_IRDA_DEVICE, */
+/* PROC_IRDA_IRIAS */
+/* }; */
struct irda_entry {
char *name;
- int (*fn)(char*,char**,off_t,int,int);
+ int (*fn)(char*, char**, off_t, int, int);
};
struct proc_dir_entry *proc_irda;
-
+
static struct irda_entry dir[] = {
-#if 0
- {"lpt", irlpt_proc_read},
-#endif
{"discovery", discovery_proc_read},
{"irda_device", irda_device_proc_read},
{"irttp", irttp_proc_read},
@@ -75,19 +72,22 @@ static struct irda_entry dir[] = {
};
#define IRDA_ENTRIES_NUM (sizeof(dir)/sizeof(dir[0]))
-
+
/*
* Function irda_proc_register (void)
*
* Register irda entry in /proc file system
*
*/
-void irda_proc_register(void) {
+void irda_proc_register(void)
+{
int i;
+
proc_irda = create_proc_entry("net/irda", S_IFDIR, NULL);
#ifdef MODULE
proc_irda->fill_inode = &irda_proc_modcount;
#endif /* MODULE */
+
for (i=0;i<IRDA_ENTRIES_NUM;i++)
create_proc_entry(dir[i].name,0,proc_irda)->get_info=dir[i].fn;
}
@@ -98,9 +98,14 @@ void irda_proc_register(void) {
* Unregister irda entry in /proc file system
*
*/
-void irda_proc_unregister(void) {
+void irda_proc_unregister(void)
+{
int i;
+
for (i=0;i<IRDA_ENTRIES_NUM;i++)
remove_proc_entry(dir[i].name, proc_irda);
+
remove_proc_entry("net/irda", NULL);
}
+
+
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 0b9a4f189..e82c2edd3 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -1,15 +1,15 @@
/*********************************************************************
*
* Filename: irsysctl.c
- * Version:
- * Description:
+ * Version: 1.0
+ * Description: Sysctl interface for IrDA
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun May 24 22:12:06 1998
- * Modified at: Fri Apr 23 09:46:38 1999
+ * Modified at: Thu May 6 21:32:46 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1997 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1997, 1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index bf0624eee..8d5e31f41 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:31 1997
- * Modified at: Sat Apr 10 10:32:21 1999
+ * Modified at: Mon May 31 10:29:56 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -35,7 +35,7 @@
#include <net/irda/irlmp.h>
#include <net/irda/irttp.h>
-struct irttp_cb *irttp = NULL;
+static struct irttp_cb *irttp = NULL;
static void __irttp_close_tsap(struct tsap_cb *self);
@@ -44,19 +44,20 @@ static int irttp_data_indication(void *instance, void *sap,
static int irttp_udata_indication(void *instance, void *sap,
struct sk_buff *skb);
static void irttp_disconnect_indication(void *instance, void *sap,
- LM_REASON reason,
- struct sk_buff *);
+ LM_REASON reason, struct sk_buff *);
static void irttp_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
- struct sk_buff *skb);
-
+ __u8 header_size, struct sk_buff *skb);
+static void irttp_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ __u8 header_size, struct sk_buff *skb);
static void irttp_run_tx_queue(struct tsap_cb *self);
static void irttp_run_rx_queue(struct tsap_cb *self);
static void irttp_flush_queues(struct tsap_cb *self);
static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
-static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
static void irttp_start_todo_timer(struct tsap_cb *self, int timeout);
+static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
/*
* Function irttp_init (void)
@@ -296,7 +297,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
/* Check that nothing bad happens */
if ((skb->len == 0) || (!self->connected)) {
- DEBUG(4, __FUNCTION__ "(), No data, or not connected\n");
+ ERROR(__FUNCTION__ "(), No data, or not connected\n");
return -ENOTCONN;
}
@@ -305,8 +306,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
* inside an IrLAP frame
*/
if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
- DEBUG(1, __FUNCTION__
- "(), SAR disabled, and data is to large for IrLAP!\n");
+ ERROR(__FUNCTION__
+ "(), SAR disabled, and data is to large for IrLAP!\n");
return -EMSGSIZE;
}
@@ -318,8 +319,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
(self->tx_max_sdu_size != SAR_UNBOUND) &&
(skb->len > self->tx_max_sdu_size))
{
- DEBUG(1, __FUNCTION__ "(), SAR enabled, "
- "but data is larger than TxMaxSduSize!\n");
+ ERROR(__FUNCTION__ "(), SAR enabled, "
+ "but data is larger than TxMaxSduSize!\n");
return -EMSGSIZE;
}
/*
@@ -337,10 +338,10 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
/* Queue frame, or queue frame segments */
if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
/* Queue frame */
+ ASSERT(skb_headroom(skb) >= TTP_HEADER, return -1;);
frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
- DEBUG(4, __FUNCTION__ "(), queueing original skb\n");
skb_queue_tail(&self->tx_queue, skb);
} else {
/*
@@ -360,8 +361,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
self->tx_sdu_busy = TRUE;
if (self->notify.flow_indication) {
- self->notify.flow_indication(
- self->notify.instance, self, FLOW_STOP);
+ self->notify.flow_indication(self->notify.instance,
+ self, FLOW_STOP);
}
}
@@ -381,12 +382,8 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
{
struct sk_buff *skb = NULL;
unsigned long flags;
- __u8 *frame;
int n;
- ASSERT(self != NULL, return;);
- ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
-
if (irda_lock(&self->tx_queue_lock) == FALSE)
return;
@@ -421,12 +418,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
* More bit must be set by the data_request() or fragment()
* functions
*/
- frame = skb->data;
-
- DEBUG(4, __FUNCTION__ "(), More=%s\n", frame[0] & 0x80 ?
- "TRUE" : "FALSE" );
-
- frame[0] |= (__u8) (n & 0x7f);
+ skb->data[0] |= (n & 0x7f);
irlmp_data_request(self->lsap, skb);
self->stats.tx_packets++;
@@ -434,12 +426,12 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
/* Check if we can accept more frames from client */
if ((self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) < LOW_THRESHOLD))
- {
+ {
self->tx_sdu_busy = FALSE;
if (self->notify.flow_indication)
self->notify.flow_indication(
- self->notify.instance, self,
+ self->notify.instance, self,
FLOW_START);
}
}
@@ -472,7 +464,7 @@ void irttp_give_credit(struct tsap_cb *self)
return;
/* Reserve space for LMP, and LAP header */
- skb_reserve(tx_skb, LMP_HEADER+LAP_HEADER);
+ skb_reserve(tx_skb, self->max_header_size);
/*
* Since we can transmit and receive frames concurrently,
@@ -538,23 +530,14 @@ static int irttp_data_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct tsap_cb *self;
- int more;
int n;
- __u8 *frame;
-
+
self = (struct tsap_cb *) instance;
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
- ASSERT(skb != NULL, return -1;);
- frame = skb->data;
-
- n = frame[0] & 0x7f; /* Extract the credits */
- more = frame[0] & 0x80;
-
- DEBUG(3, __FUNCTION__"(), got %d credits, TSAP sel=%02x\n",
- n, self->stsap_sel);
+ n = skb->data[0] & 0x7f; /* Extract the credits */
self->stats.rx_packets++;
@@ -562,10 +545,9 @@ static int irttp_data_indication(void *instance, void *sap,
* Data or dataless frame? Dataless frames only contain the
* TTP_HEADER
*/
- if (skb->len == 1) {
- /* Dataless flowdata TTP-PDU */
- self->send_credit += n;
- } else {
+ if (skb->len == 1)
+ self->send_credit += n; /* Dataless flowdata TTP-PDU */
+ else {
/* Deal with inbound credit */
self->send_credit += n;
self->remote_credit--;
@@ -655,15 +637,14 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
return -ENOMEM;
/* Reserve space for MUX_CONTROL and LAP header */
- skb_reserve(skb, (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER));
+ skb_reserve(skb, TTP_MAX_HEADER);
} else {
skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
- ASSERT(skb_headroom(userdata) >=
- (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER), return -1;);
+ ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER, return -1;);
}
/* Initialize connection parameters */
@@ -691,12 +672,11 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
/* SAR enabled? */
if (max_sdu_size > 0) {
- ASSERT(skb_headroom(skb) >=
- (TTP_HEADER_WITH_SAR+LMP_CONTROL_HEADER+LAP_HEADER),
- return -1;);
+ ASSERT(skb_headroom(skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
+ return -1;);
/* Insert SAR parameters */
- frame = skb_push(skb, TTP_HEADER_WITH_SAR);
+ frame = skb_push(skb, TTP_HEADER+TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
@@ -724,8 +704,10 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
* Sevice user confirms TSAP connection with peer.
*
*/
-void irttp_connect_confirm(void *instance, void *sap, struct qos_info *qos,
- __u32 max_seg_size, struct sk_buff *skb)
+static void irttp_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_seg_size, __u8 max_header_size,
+ struct sk_buff *skb)
{
struct tsap_cb *self;
int parameters;
@@ -741,7 +723,8 @@ void irttp_connect_confirm(void *instance, void *sap, struct qos_info *qos,
ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
ASSERT(skb != NULL, return;);
- self->max_seg_size = max_seg_size-LMP_HEADER-LAP_HEADER;
+ self->max_seg_size = max_seg_size;
+ self->max_header_size = max_header_size + TTP_HEADER;
/*
* Check if we have got some QoS parameters back! This should be the
@@ -764,6 +747,10 @@ void irttp_connect_confirm(void *instance, void *sap, struct qos_info *qos,
self->connected = TRUE;
parameters = frame[0] & 0x80;
+
+ ASSERT(skb->len >= TTP_HEADER, return;);
+ skb_pull(skb, TTP_HEADER);
+
if (parameters) {
plen = frame[1];
pi = frame[2];
@@ -789,17 +776,19 @@ void irttp_connect_confirm(void *instance, void *sap, struct qos_info *qos,
DEBUG(4, __FUNCTION__ "(), RxMaxSduSize=%d\n",
self->tx_max_sdu_size);
+
+ /* Remove parameters */
+ ASSERT(skb->len >= (plen+1), return;);
+ skb_pull(skb, plen+1);
}
DEBUG(4, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n",
self->send_credit, self->avail_credit, self->remote_credit);
- skb_pull(skb, TTP_HEADER);
-
if (self->notify.connect_confirm) {
- self->notify.connect_confirm(self->notify.instance, self,
- qos, self->tx_max_sdu_size,
- skb);
+ self->notify.connect_confirm(self->notify.instance, self, qos,
+ self->tx_max_sdu_size,
+ self->max_header_size, skb);
}
}
@@ -809,8 +798,8 @@ void irttp_connect_confirm(void *instance, void *sap, struct qos_info *qos,
* Some other device is connecting to this TSAP
*
*/
-void irttp_connect_indication(void *instance, void *sap,
- struct qos_info *qos, __u32 max_seg_size,
+void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos,
+ __u32 max_seg_size, __u8 max_header_size,
struct sk_buff *skb)
{
struct tsap_cb *self;
@@ -828,7 +817,9 @@ void irttp_connect_indication(void *instance, void *sap,
lsap = (struct lsap_cb *) sap;
- self->max_seg_size = max_seg_size-LMP_HEADER-LAP_HEADER;
+ self->max_seg_size = max_seg_size;
+
+ self->max_header_size = max_header_size+TTP_HEADER;
DEBUG(4, __FUNCTION__ "(), TSAP sel=%02x\n", self->stsap_sel);
@@ -841,7 +832,11 @@ void irttp_connect_indication(void *instance, void *sap,
self->send_credit = n;
self->tx_max_sdu_size = 0;
- parameters = frame[0] & 0x80;
+ parameters = frame[0] & 0x80;
+
+ ASSERT(skb->len >= TTP_HEADER, return;);
+ skb_pull(skb, TTP_HEADER);
+
if (parameters) {
DEBUG(3, __FUNCTION__ "(), Contains parameters!\n");
plen = frame[1];
@@ -850,7 +845,7 @@ void irttp_connect_indication(void *instance, void *sap,
switch (pl) {
case 1:
- self->tx_max_sdu_size = *(frame+4);
+ self->tx_max_sdu_size = frame[4];
break;
case 2:
self->tx_max_sdu_size =
@@ -865,7 +860,10 @@ void irttp_connect_indication(void *instance, void *sap,
"() illegal value length for max_sdu_size!\n");
self->tx_max_sdu_size = 0;
};
-
+
+ /* Remove parameters */
+ ASSERT(skb->len >= (plen+1), return;);
+ skb_pull(skb, plen+1);
DEBUG(3, __FUNCTION__ "(), MaxSduSize=%d\n",
self->tx_max_sdu_size);
@@ -873,12 +871,10 @@ void irttp_connect_indication(void *instance, void *sap,
DEBUG(4, __FUNCTION__ "(), initial send_credit=%d\n", n);
- skb_pull(skb, 1); /* Remove TTP header */
-
if (self->notify.connect_indication) {
self->notify.connect_indication(self->notify.instance, self,
qos, self->rx_max_sdu_size,
- skb);
+ self->max_header_size, skb);
}
}
@@ -909,15 +905,14 @@ void irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
return;
/* Reserve space for MUX_CONTROL and LAP header */
- skb_reserve(skb, (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER));
+ skb_reserve(skb, TTP_MAX_HEADER);
} else {
skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
- ASSERT(skb_headroom(skb) >=
- (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER), return;);
+ ASSERT(skb_headroom(skb) >= TTP_MAX_HEADER, return;);
}
self->avail_credit = 0;
@@ -939,12 +934,11 @@ void irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
/* SAR enabled? */
if (max_sdu_size > 0) {
- ASSERT(skb_headroom(skb) >=
- (TTP_HEADER_WITH_SAR+LMP_CONTROL_HEADER+LAP_HEADER),
+ ASSERT(skb_headroom(skb) >= (TTP_MAX_HEADER+TTP_SAR_HEADER),
return;);
/* Insert TTP header with SAR parameters */
- frame = skb_push(skb, TTP_HEADER_WITH_SAR);
+ frame = skb_push(skb, TTP_HEADER+TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
@@ -1079,7 +1073,7 @@ void irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
/*
* Reserve space for MUX and LAP header
*/
- skb_reserve(skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_reserve(skb, TTP_MAX_HEADER);
userdata = skb;
}
@@ -1357,13 +1351,11 @@ static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb)
}
/* Make new segment */
- frag = dev_alloc_skb(self->max_seg_size+
- TTP_HEADER+LMP_HEADER+
- LAP_HEADER);
+ frag = dev_alloc_skb(self->max_seg_size+self->max_header_size);
if (!frag)
return;
- skb_reserve(frag, LMP_HEADER+LAP_HEADER);
+ skb_reserve(frag, self->max_header_size);
/*
* Copy data from the original skb into this fragment. We
@@ -1401,11 +1393,9 @@ static void irttp_todo_expired(unsigned long data)
irttp_run_tx_queue(self);
/* Give avay some credits to peer? */
- if ((skb_queue_empty(&self->tx_queue)) &&
- (self->remote_credit < LOW_THRESHOLD) &&
- (self->avail_credit > 0))
+ if ((self->remote_credit < LOW_THRESHOLD) &&
+ (self->avail_credit > 0) && (skb_queue_empty(&self->tx_queue)))
{
- DEBUG(4, __FUNCTION__ "(), sending credit!\n");
irttp_give_credit(self);
}
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 7b226dfa6..82f7fc28a 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -6,10 +6,11 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Sep 9 00:00:26 1997
- * Modified at: Mon Apr 12 11:49:24 1999
+ * Modified at: Mon May 3 21:15:08 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -52,10 +53,10 @@ int compression[] = { CI_BZIP2, CI_DEFLATE, CI_DEFLATE_DRAFT };
* Compute the intersection of the old QoS capabilites with new ones
*
*/
-void irda_qos_compute_intersection( struct qos_info *qos, struct qos_info *new)
+void irda_qos_compute_intersection(struct qos_info *qos, struct qos_info *new)
{
- ASSERT( qos != NULL, return;);
- ASSERT( new != NULL, return;);
+ ASSERT(qos != NULL, return;);
+ ASSERT(new != NULL, return;);
/* Apply */
qos->baud_rate.bits &= new->baud_rate.bits;
diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c
index c4822e2c6..2ddf84efa 100644
--- a/net/irda/wrapper.c
+++ b/net/irda/wrapper.c
@@ -1,15 +1,17 @@
/*********************************************************************
*
* Filename: wrapper.c
- * Version: 1.1
- * Description: SIR wrapper layer
+ * Version: 1.2
+ * Description: IrDA SIR async wrapper layer
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Wed Apr 21 12:45:55 1999
+ * Modified at: Fri May 28 20:30:24 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Modified at: Fri May 28 3:11 CST 1999
+ * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
+ * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -24,6 +26,7 @@
********************************************************************/
#include <linux/skbuff.h>
+#include <linux/string.h>
#include <asm/byteorder.h>
#include <net/irda/irda.h>
@@ -34,7 +37,20 @@
#include <net/irda/irlap_frame.h>
#include <net/irda/irda_device.h>
-inline static int stuff_byte(__u8 byte, __u8 *buf);
+static inline int stuff_byte(__u8 byte, __u8 *buf);
+
+static void state_outside_frame(struct irda_device *idev, __u8 byte);
+static void state_begin_frame(struct irda_device *idev, __u8 byte);
+static void state_link_escape(struct irda_device *idev, __u8 byte);
+static void state_inside_frame(struct irda_device *idev, __u8 byte);
+
+static void (*state[])(struct irda_device *idev, __u8 byte) =
+{
+ state_outside_frame,
+ state_begin_frame,
+ state_link_escape,
+ state_inside_frame,
+};
/*
* Function async_wrap (skb, *tx_buff)
@@ -52,8 +68,6 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
__u8 bytes[2];
} fcs;
- ASSERT(skb != NULL, return 0;);
-
/* Initialize variables */
fcs.value = INIT_FCS;
n = 0;
@@ -74,13 +88,9 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
} else
xbofs = ((struct irlap_skb_cb *)(skb->cb))->xbofs;
-#if 0
- for (i=0; i<xbofs; i++)
- tx_buff[n++] = XBOF;
-#else
memset(tx_buff+n, XBOF, xbofs);
n += xbofs;
-#endif
+
/* Start of packet character BOF */
tx_buff[n++] = BOF;
@@ -94,7 +104,7 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
ASSERT(n < (buffsize-5), return n;);
n += stuff_byte(skb->data[i], tx_buff+n);
- fcs.value = IR_FCS(fcs.value, skb->data[i]);
+ fcs.value = irda_fcs(fcs.value, skb->data[i]);
}
/* Insert CRC in little endian format (LSB first) */
@@ -108,15 +118,6 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
#endif
tx_buff[n++] = EOF;
-#if 0
- {
- int i;
-
- for (i=0;i<n;i++)
- printk("%02x", tx_buff[i]);
- printk("\n");
- }
-#endif
return n;
}
@@ -155,147 +156,13 @@ static inline void async_bump(struct irda_device *idev, __u8 *buf, int len)
}
/*
- * Function async_unwrap (skb)
- *
- * Parse and de-stuff frame received from the IR-port
- *
- */
-void async_unwrap_char(struct irda_device *idev, __u8 byte)
-{
- /* State machine for receiving frames */
- switch (idev->rx_buff.state) {
- case OUTSIDE_FRAME:
- switch(byte) {
- case BOF:
- idev->rx_buff.state = BEGIN_FRAME;
- idev->rx_buff.in_frame = TRUE;
- break;
- case XBOF:
- /* idev->xbofs++; */
- break;
- case EOF:
- irda_device_set_media_busy( idev, TRUE);
- break;
- default:
- break;
- }
- break;
- case BEGIN_FRAME:
- switch (byte) {
- case BOF:
- /* Continue */
- break;
- case CE:
- /* Stuffed byte */
- idev->rx_buff.state = LINK_ESCAPE;
- break;
- case EOF:
- /* Abort frame */
- idev->rx_buff.state = OUTSIDE_FRAME;
-
- idev->stats.rx_errors++;
- idev->stats.rx_frame_errors++;
- break;
- default:
- /* Got first byte of frame */
- idev->rx_buff.data = idev->rx_buff.head;
- idev->rx_buff.len = 0;
-
- idev->rx_buff.data[idev->rx_buff.len++] = byte;
-
- idev->rx_buff.fcs = IR_FCS(INIT_FCS, byte);
- idev->rx_buff.state = INSIDE_FRAME;
- break;
- }
- break;
- case LINK_ESCAPE:
- switch (byte) {
- case BOF:
- /* New frame? */
- idev->rx_buff.state = BEGIN_FRAME;
- irda_device_set_media_busy(idev, TRUE);
- break;
- case CE:
- DEBUG(4, "WARNING: State not defined\n");
- break;
- case EOF:
- /* Abort frame */
- idev->rx_buff.state = OUTSIDE_FRAME;
- break;
- default:
- /*
- * Stuffed char, complement bit 5 of byte
- * following CE, IrLAP p.114
- */
- byte ^= IR_TRANS;
- if (idev->rx_buff.len < idev->rx_buff.truesize) {
- idev->rx_buff.data[idev->rx_buff.len++] = byte;
- idev->rx_buff.fcs = IR_FCS(idev->rx_buff.fcs,
- byte);
- idev->rx_buff.state = INSIDE_FRAME;
- } else {
- DEBUG(1, __FUNCTION__
- "(), Rx buffer overflow, aborting\n");
- idev->rx_buff.state = OUTSIDE_FRAME;
- }
- break;
- }
- break;
- case INSIDE_FRAME:
- switch (byte) {
- case BOF:
- /* New frame? */
- idev->rx_buff.state = BEGIN_FRAME;
- irda_device_set_media_busy(idev, TRUE);
- break;
- case CE:
- /* Stuffed char */
- idev->rx_buff.state = LINK_ESCAPE;
- break;
- case EOF:
- /* End of frame */
- idev->rx_buff.state = OUTSIDE_FRAME;
- idev->rx_buff.in_frame = FALSE;
-
- /*
- * Test FCS and deliver frame if it's good
- */
- if (idev->rx_buff.fcs == GOOD_FCS) {
- async_bump(idev, idev->rx_buff.data,
- idev->rx_buff.len);
- } else {
- /* Wrong CRC, discard frame! */
- irda_device_set_media_busy(idev, TRUE);
-
- idev->stats.rx_errors++;
- idev->stats.rx_crc_errors++;
- }
- break;
- default:
- /* Next byte of frame */
- if (idev->rx_buff.len < idev->rx_buff.truesize) {
- idev->rx_buff.data[idev->rx_buff.len++] = byte;
- idev->rx_buff.fcs = IR_FCS(idev->rx_buff.fcs,
- byte);
- } else {
- DEBUG(1, __FUNCTION__
- "(), Rx buffer overflow, aborting\n");
- idev->rx_buff.state = OUTSIDE_FRAME;
- }
- break;
- }
- break;
- }
-}
-
-/*
* Function stuff_byte (byte, buf)
*
* Byte stuff one single byte and put the result in buffer pointed to by
* buf. The buffer must at all times be able to have two bytes inserted.
*
*/
-inline static int stuff_byte(__u8 byte, __u8 *buf)
+static inline int stuff_byte(__u8 byte, __u8 *buf)
{
switch (byte) {
case BOF: /* FALLTHROUGH */
@@ -303,7 +170,7 @@ inline static int stuff_byte(__u8 byte, __u8 *buf)
case CE:
/* Insert transparently coded */
buf[0] = CE; /* Send link escape */
- buf[1] = byte^IR_TRANS; /* Complement bit 5 */
+ buf[1] = byte^IRDA_TRANS; /* Complement bit 5 */
return 2;
/* break; */
default:
@@ -313,7 +180,163 @@ inline static int stuff_byte(__u8 byte, __u8 *buf)
/* break; */
}
}
+
+/*
+ * Function async_unwrap (skb)
+ *
+ * Parse and de-stuff frame received from the IrDA-port
+ *
+ */
+inline void async_unwrap_char(struct irda_device *idev, __u8 byte)
+{
+ (*state[idev->rx_buff.state]) (idev, byte);
+}
+/*
+ * Function state_outside_frame (idev, byte)
+ *
+ *
+ *
+ */
+static void state_outside_frame(struct irda_device *idev, __u8 byte)
+{
+ switch (byte) {
+ case BOF:
+ idev->rx_buff.state = BEGIN_FRAME;
+ idev->rx_buff.in_frame = TRUE;
+ break;
+ case XBOF:
+ /* idev->xbofs++; */
+ break;
+ case EOF:
+ irda_device_set_media_busy( idev, TRUE);
+ break;
+ default:
+ break;
+ }
+}
+/*
+ * Function state_begin_frame (idev, byte)
+ *
+ * Begin of frame detected
+ *
+ */
+static void state_begin_frame(struct irda_device *idev, __u8 byte)
+{
+ switch (byte) {
+ case BOF:
+ /* Continue */
+ break;
+ case CE:
+ /* Stuffed byte */
+ idev->rx_buff.state = LINK_ESCAPE;
+
+ /* Time to initialize receive buffer */
+ idev->rx_buff.data = idev->rx_buff.head;
+ idev->rx_buff.len = 0;
+ break;
+ case EOF:
+ /* Abort frame */
+ idev->rx_buff.state = OUTSIDE_FRAME;
+
+ idev->stats.rx_errors++;
+ idev->stats.rx_frame_errors++;
+ break;
+ default:
+ /* Time to initialize receive buffer */
+ idev->rx_buff.data = idev->rx_buff.head;
+ idev->rx_buff.len = 0;
+
+ idev->rx_buff.data[idev->rx_buff.len++] = byte;
+
+ idev->rx_buff.fcs = irda_fcs(INIT_FCS, byte);
+ idev->rx_buff.state = INSIDE_FRAME;
+ break;
+ }
+}
+
+/*
+ * Function state_link_escape (idev, byte)
+ *
+ *
+ *
+ */
+static void state_link_escape(struct irda_device *idev, __u8 byte)
+{
+ switch (byte) {
+ case BOF: /* New frame? */
+ idev->rx_buff.state = BEGIN_FRAME;
+ irda_device_set_media_busy(idev, TRUE);
+ break;
+ case CE:
+ DEBUG(4, "WARNING: State not defined\n");
+ break;
+ case EOF: /* Abort frame */
+ idev->rx_buff.state = OUTSIDE_FRAME;
+ break;
+ default:
+ /*
+ * Stuffed char, complement bit 5 of byte
+ * following CE, IrLAP p.114
+ */
+ byte ^= IRDA_TRANS;
+ if (idev->rx_buff.len < idev->rx_buff.truesize) {
+ idev->rx_buff.data[idev->rx_buff.len++] = byte;
+ idev->rx_buff.fcs = irda_fcs(idev->rx_buff.fcs, byte);
+ idev->rx_buff.state = INSIDE_FRAME;
+ } else {
+ DEBUG(1, __FUNCTION__
+ "(), Rx buffer overflow, aborting\n");
+ idev->rx_buff.state = OUTSIDE_FRAME;
+ }
+ break;
+ }
+}
+
+/*
+ * Function state_inside_frame (idev, byte)
+ *
+ * Handle bytes received within a frame
+ *
+ */
+static void state_inside_frame(struct irda_device *idev, __u8 byte)
+{
+ switch (byte) {
+ case BOF: /* New frame? */
+ idev->rx_buff.state = BEGIN_FRAME;
+ irda_device_set_media_busy(idev, TRUE);
+ break;
+ case CE: /* Stuffed char */
+ idev->rx_buff.state = LINK_ESCAPE;
+ break;
+ case EOF: /* End of frame */
+ idev->rx_buff.state = OUTSIDE_FRAME;
+ idev->rx_buff.in_frame = FALSE;
+
+ /* Test FCS and deliver frame if it's good */
+ if (idev->rx_buff.fcs == GOOD_FCS) {
+ async_bump(idev, idev->rx_buff.data,
+ idev->rx_buff.len);
+ } else {
+ /* Wrong CRC, discard frame! */
+ irda_device_set_media_busy(idev, TRUE);
+
+ idev->stats.rx_errors++;
+ idev->stats.rx_crc_errors++;
+ }
+ break;
+ default: /* Must be the next byte of the frame */
+ if (idev->rx_buff.len < idev->rx_buff.truesize) {
+ idev->rx_buff.data[idev->rx_buff.len++] = byte;
+ idev->rx_buff.fcs = irda_fcs(idev->rx_buff.fcs, byte);
+ } else {
+ DEBUG(1, __FUNCTION__
+ "(), Rx buffer overflow, aborting\n");
+ idev->rx_buff.state = OUTSIDE_FRAME;
+ }
+ break;
+ }
+}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 9247bf99c..eed55f8ac 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1,7 +1,7 @@
/*
* NETLINK Kernel-user communication protocol.
*
- * Authors: Alan Cox <alan@cymru.net>
+ * Authors: Alan Cox <alan@redhat.com>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
@@ -45,7 +45,7 @@
static struct sock *nl_table[MAX_LINKS];
static atomic_t nl_table_lock[MAX_LINKS];
-static struct wait_queue *nl_table_wait;
+static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
#ifdef NL_EMULATE_DEV
static struct socket *netlink_kernel[MAX_LINKS];
@@ -203,7 +203,7 @@ static int netlink_release(struct socket *sock, struct socket *peer)
*/
while (netlink_locked(sk)) {
- current->counter = 0;
+ current->policy |= SCHED_YIELD;
schedule();
}
@@ -331,7 +331,7 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock
struct sock *sk;
int len = skb->len;
int protocol = ssk->protocol;
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
retry:
for (sk = nl_table[protocol]; sk; sk = sk->next) {
diff --git a/net/netlink/netlink_dev.c b/net/netlink/netlink_dev.c
index b127137b2..c4b600729 100644
--- a/net/netlink/netlink_dev.c
+++ b/net/netlink/netlink_dev.c
@@ -2,7 +2,7 @@
* NETLINK An implementation of a loadable kernel mode driver providing
* multiple kernel/user space bidirectional communications links.
*
- * Author: Alan Cox <alan@cymru.net>
+ * Author: Alan Cox <alan@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index d46e45eb6..fa2167a46 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -564,10 +564,13 @@ struct device *nr_dev_first(void)
{
struct device *dev, *first = NULL;
- for (dev = dev_base; dev != NULL; dev = dev->next)
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
+ }
+ read_unlock(&dev_base_lock);
return first;
}
@@ -579,11 +582,14 @@ struct device *nr_dev_get(ax25_address *addr)
{
struct device *dev;
- for (dev = dev_base; dev != NULL; dev = dev->next)
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0)
- return dev;
-
- return NULL;
+ goto out;
+ }
+out:
+ read_unlock(&dev_base_lock);
+ return dev;
}
static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters)
diff --git a/net/netsyms.c b/net/netsyms.c
index 764900d50..021e17ced 100644
--- a/net/netsyms.c
+++ b/net/netsyms.c
@@ -106,10 +106,17 @@ EXPORT_SYMBOL(dev_lockct);
EXPORT_SYMBOL(skb_over_panic);
EXPORT_SYMBOL(skb_under_panic);
+/* Socket layer global data */
+EXPORT_SYMBOL(sockhash_lock);
+
/* Socket layer registration */
EXPORT_SYMBOL(sock_register);
EXPORT_SYMBOL(sock_unregister);
+/* Socket locking */
+EXPORT_SYMBOL(lock_sock);
+EXPORT_SYMBOL(release_sock);
+
/* Socket layer support routines */
EXPORT_SYMBOL(memcpy_fromiovec);
EXPORT_SYMBOL(memcpy_tokerneliovec);
@@ -156,7 +163,6 @@ EXPORT_SYMBOL(put_cmsg);
EXPORT_SYMBOL(net_families);
EXPORT_SYMBOL(sock_kmalloc);
EXPORT_SYMBOL(sock_kfree_s);
-EXPORT_SYMBOL(skb_queue_lock);
#ifdef CONFIG_FILTER
EXPORT_SYMBOL(sk_run_filter);
@@ -164,7 +170,6 @@ EXPORT_SYMBOL(sk_run_filter);
EXPORT_SYMBOL(neigh_table_init);
EXPORT_SYMBOL(neigh_table_clear);
-EXPORT_SYMBOL(__neigh_lookup);
EXPORT_SYMBOL(neigh_resolve_output);
EXPORT_SYMBOL(neigh_connected_output);
EXPORT_SYMBOL(neigh_update);
@@ -187,7 +192,6 @@ EXPORT_SYMBOL(neigh_rand_reach_time);
/* dst_entry */
EXPORT_SYMBOL(dst_alloc);
EXPORT_SYMBOL(__dst_free);
-EXPORT_SYMBOL(dst_total);
EXPORT_SYMBOL(dst_destroy);
/* misc. support routines */
@@ -243,7 +247,6 @@ EXPORT_SYMBOL(ip_mc_dec_group);
EXPORT_SYMBOL(__ip_finish_output);
EXPORT_SYMBOL(inet_dgram_ops);
EXPORT_SYMBOL(ip_cmsg_recv);
-EXPORT_SYMBOL(__release_sock);
/* Route manipulation */
EXPORT_SYMBOL(ip_rt_ioctl);
@@ -279,9 +282,11 @@ EXPORT_SYMBOL(inet_recvmsg);
/* Socket demultiplexing. */
EXPORT_SYMBOL(tcp_good_socknum);
-EXPORT_SYMBOL(tcp_established_hash);
+EXPORT_SYMBOL(tcp_ehash);
+EXPORT_SYMBOL(tcp_ehash_size);
EXPORT_SYMBOL(tcp_listening_hash);
-EXPORT_SYMBOL(tcp_bound_hash);
+EXPORT_SYMBOL(tcp_bhash);
+EXPORT_SYMBOL(tcp_bhash_size);
EXPORT_SYMBOL(udp_good_socknum);
EXPORT_SYMBOL(udp_hash);
@@ -382,8 +387,7 @@ EXPORT_SYMBOL(neigh_dump_info);
EXPORT_SYMBOL(dev_set_allmulti);
EXPORT_SYMBOL(dev_set_promiscuity);
EXPORT_SYMBOL(sklist_remove_socket);
-EXPORT_SYMBOL(rtnl_wait);
-EXPORT_SYMBOL(rtnl_rlockct);
+EXPORT_SYMBOL(rtnl_sem);
EXPORT_SYMBOL(rtnl_lock);
EXPORT_SYMBOL(rtnl_unlock);
@@ -470,6 +474,7 @@ EXPORT_SYMBOL(netdev_unregister_fc);
EXPORT_SYMBOL(netdev_fc_xoff);
#endif
EXPORT_SYMBOL(dev_base);
+EXPORT_SYMBOL(dev_base_lock);
EXPORT_SYMBOL(dev_close);
EXPORT_SYMBOL(dev_mc_add);
EXPORT_SYMBOL(dev_mc_delete);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e78e41352..fbfe95482 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -5,7 +5,7 @@
*
* PACKET - implements raw packet sockets.
*
- * Version: $Id: af_packet.c,v 1.19 1999/03/21 05:23:03 davem Exp $
+ * Version: $Id: af_packet.c,v 1.20 1999/06/09 10:11:32 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -286,26 +286,27 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, int len,
else
return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
+ dev_lock_list();
+
/*
* Find the device first to size check it
*/
saddr->spkt_device[13] = 0;
dev = dev_get(saddr->spkt_device);
- if (dev == NULL)
- {
- return(-ENODEV);
- }
+ err = -ENODEV;
+ if (dev == NULL)
+ goto out_unlock;
/*
* You may not queue a frame bigger than the mtu. This is the lowest level
* raw protocol and you must do your own fragmentation at this level.
*/
- if(len>dev->mtu+dev->hard_header_len)
- return -EMSGSIZE;
+ err = -EMSGSIZE;
+ if(len>dev->mtu+dev->hard_header_len)
+ goto out_unlock;
- dev_lock_list();
err = -ENOBUFS;
skb = sock_wmalloc(sk, len+dev->hard_header_len+15, 0, GFP_KERNEL);
@@ -351,8 +352,8 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, int len,
* Now send it
*/
- dev_unlock_list();
dev_queue_xmit(skb);
+ dev_unlock_list();
return(len);
out_free:
@@ -455,16 +456,18 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, int len,
addr = saddr->sll_addr;
}
+ dev_lock_list();
dev = dev_get_by_index(ifindex);
+ err = -ENXIO;
if (dev == NULL)
- return -ENXIO;
+ goto out_unlock;
if (sock->type == SOCK_RAW)
reserve = dev->hard_header_len;
+ err = -EMSGSIZE;
if (len > dev->mtu+reserve)
- return -EMSGSIZE;
+ goto out_unlock;
- dev_lock_list();
skb = sock_alloc_send_skb(sk, len+dev->hard_header_len+15, 0,
msg->msg_flags & MSG_DONTWAIT, &err);
@@ -501,8 +504,8 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, int len,
* Now send it
*/
- dev_unlock_list();
dev_queue_xmit(skb);
+ dev_unlock_list();
return(len);
out_free:
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 1fad6b7cc..28a6f8adb 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -543,10 +543,13 @@ struct device *rose_dev_first(void)
{
struct device *dev, *first = NULL;
- for (dev = dev_base; dev != NULL; dev = dev->next)
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
+ }
+ read_unlock(&dev_base_lock);
return first;
}
@@ -558,11 +561,14 @@ struct device *rose_dev_get(rose_address *addr)
{
struct device *dev;
- for (dev = dev_base; dev != NULL; dev = dev->next)
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
- return dev;
-
- return NULL;
+ goto out;
+ }
+out:
+ read_unlock(&dev_base_lock);
+ return dev;
}
struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neigh)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 683063137..9d2a95ea6 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -39,20 +39,24 @@
static struct tcf_proto_ops *tcf_proto_base;
+/* Protects list of registered TC modules. It is pure SMP lock. */
+static rwlock_t cls_mod_lock = RW_LOCK_UNLOCKED;
/* Find classifier type by string name */
struct tcf_proto_ops * tcf_proto_lookup_ops(struct rtattr *kind)
{
- struct tcf_proto_ops *t;
+ struct tcf_proto_ops *t = NULL;
if (kind) {
+ read_lock(&cls_mod_lock);
for (t = tcf_proto_base; t; t = t->next) {
if (rtattr_strcmp(kind, t->kind) == 0)
- return t;
+ break;
}
+ read_unlock(&cls_mod_lock);
}
- return NULL;
+ return t;
}
/* Register(unregister) new classifier type */
@@ -61,12 +65,17 @@ int register_tcf_proto_ops(struct tcf_proto_ops *ops)
{
struct tcf_proto_ops *t, **tp;
- for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
- if (strcmp(ops->kind, t->kind) == 0)
+ write_lock(&cls_mod_lock);
+ for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) {
+ if (strcmp(ops->kind, t->kind) == 0) {
+ write_unlock(&cls_mod_lock);
return -EEXIST;
+ }
+ }
ops->next = NULL;
*tp = ops;
+ write_unlock(&cls_mod_lock);
return 0;
}
@@ -74,13 +83,17 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
{
struct tcf_proto_ops *t, **tp;
+ write_lock(&cls_mod_lock);
for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
if (t == ops)
break;
- if (!t)
+ if (!t) {
+ write_unlock(&cls_mod_lock);
return -ENOENT;
+ }
*tp = t->next;
+ write_unlock(&cls_mod_lock);
return 0;
}
@@ -217,8 +230,12 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
kfree(tp);
goto errout;
}
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
tp->next = *back;
*back = tp;
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
} else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
goto errout;
@@ -226,8 +243,11 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
*back = tp->next;
- synchronize_bh();
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
tp->ops->destroy(tp);
kfree(tp);
@@ -344,12 +364,16 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
return skb->len;
+
+ read_lock(&qdisc_tree_lock);
if (!tcm->tcm_parent)
q = dev->qdisc_sleeping;
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
- if (q == NULL)
+ if (q == NULL) {
+ read_unlock(&qdisc_tree_lock);
return skb->len;
+ }
if ((cops = q->ops->cl_ops) == NULL)
goto errout;
if (TC_H_MIN(tcm->tcm_parent)) {
@@ -400,6 +424,7 @@ errout:
if (cl)
cops->put(q, cl);
+ read_unlock(&qdisc_tree_lock);
return skb->len;
}
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index e92b846ee..598867187 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -136,7 +136,7 @@ static void fw_destroy(struct tcf_proto *tp)
unsigned long cl;
head->ht[h] = f->next;
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(f->police);
@@ -161,10 +161,11 @@ static int fw_delete(struct tcf_proto *tp, unsigned long arg)
if (*fp == f) {
unsigned long cl;
+ tcf_tree_lock(tp);
*fp = f->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(f->police);
@@ -203,7 +204,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]);
cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid);
- cl = cls_set_class(&f->res.class, cl);
+ cl = cls_set_class(tp, &f->res.class, cl);
if (cl)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
}
@@ -211,8 +212,9 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
if (tb[TCA_FW_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_FW_POLICE-1], tca[TCA_RATE-1]);
+ tcf_tree_lock(tp);
police = xchg(&f->police, police);
- synchronize_bh();
+ tcf_tree_unlock(tp);
tcf_police_release(police);
}
@@ -229,8 +231,9 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
return -ENOBUFS;
memset(head, 0, sizeof(*head));
+ tcf_tree_lock(tp);
tp->root = head;
- synchronize_bh();
+ tcf_tree_unlock(tp);
}
f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL);
@@ -245,7 +248,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
if (RTA_PAYLOAD(tb[TCA_FW_CLASSID-1]) != 4)
goto errout;
f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]);
- cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
}
#ifdef CONFIG_NET_CLS_POLICE
@@ -254,8 +257,9 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
#endif
f->next = head->ht[fw_hash(handle)];
- wmb();
+ tcf_tree_lock(tp);
head->ht[fw_hash(handle)] = f;
+ tcf_tree_unlock(tp);
*arg = (unsigned long)f;
return 0;
@@ -335,7 +339,8 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (f->police) {
- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
+ if (qdisc_copy_stats(skb, &f->police->stats))
+ goto rtattr_failure;
}
#endif
return skb->len;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index f83e79134..cb0d21d0f 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -83,11 +83,11 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
return id&0xF;
}
-static void route4_reset_fastmap(struct route4_head *head, u32 id)
+static void route4_reset_fastmap(struct device *dev, struct route4_head *head, u32 id)
{
- start_bh_atomic();
+ spin_lock_bh(&dev->queue_lock);
memset(head->fastmap, 0, sizeof(head->fastmap));
- end_bh_atomic();
+ spin_unlock_bh(&dev->queue_lock);
}
static void __inline__
@@ -297,7 +297,7 @@ static void route4_destroy(struct tcf_proto *tp)
unsigned long cl;
b->ht[h2] = f->next;
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(f->police);
@@ -329,12 +329,13 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
if (*fp == f) {
unsigned long cl;
+ tcf_tree_lock(tp);
*fp = f->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
- route4_reset_fastmap(head, f->id);
+ route4_reset_fastmap(tp->q->dev, head, f->id);
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
@@ -349,8 +350,9 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
return 0;
/* OK, session has no flows */
+ tcf_tree_lock(tp);
head->table[to_hash(h)] = NULL;
- synchronize_bh();
+ tcf_tree_unlock(tp);
kfree(b);
return 0;
@@ -387,7 +389,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
unsigned long cl;
f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
- cl = cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cl = cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
if (cl)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
}
@@ -395,8 +397,9 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
if (tb[TCA_ROUTE4_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
+ tcf_tree_lock(tp);
police = xchg(&f->police, police);
- synchronize_bh();
+ tcf_tree_unlock(tp);
tcf_police_release(police);
}
@@ -412,8 +415,9 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
return -ENOBUFS;
memset(head, 0, sizeof(struct route4_head));
+ tcf_tree_lock(tp);
tp->root = head;
- synchronize_bh();
+ tcf_tree_unlock(tp);
}
f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
@@ -475,8 +479,9 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
goto errout;
memset(b, 0, sizeof(*b));
+ tcf_tree_lock(tp);
head->table[h1] = b;
- synchronize_bh();
+ tcf_tree_unlock(tp);
}
f->bkt = b;
@@ -489,17 +494,18 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
goto errout;
}
- cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_ROUTE4_POLICE-1])
f->police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
#endif
f->next = f1;
- wmb();
+ tcf_tree_lock(tp);
*ins_f = f;
+ tcf_tree_unlock(tp);
- route4_reset_fastmap(head, f->id);
+ route4_reset_fastmap(tp->q->dev, head, f->id);
*arg = (unsigned long)f;
return 0;
@@ -589,7 +595,8 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (f->police) {
- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
+ if (qdisc_copy_stats(skb, &f->police->stats))
+ goto rtattr_failure;
}
#endif
return skb->len;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 48142c6e7..be4471d78 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -282,7 +282,7 @@ static void rsvp_destroy(struct tcf_proto *tp)
unsigned long cl;
s->ht[h2] = f->next;
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(f->police);
@@ -310,10 +310,11 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
unsigned long cl;
+ tcf_tree_lock(tp);
*fp = f->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
@@ -332,8 +333,9 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
*sp; sp = &(*sp)->next) {
if (*sp == s) {
+ tcf_tree_lock(tp);
*sp = s->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
kfree(s);
return 0;
@@ -446,7 +448,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
unsigned long cl;
f->res.classid = *(u32*)RTA_DATA(tb[TCA_RSVP_CLASSID-1]);
- cl = cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cl = cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
if (cl)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
}
@@ -454,8 +456,9 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (tb[TCA_RSVP_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_RSVP_POLICE-1], tca[TCA_RATE-1]);
+ tcf_tree_lock(tp);
police = xchg(&f->police, police);
- synchronize_bh();
+ tcf_tree_unlock(tp);
tcf_police_release(police);
}
@@ -536,7 +539,7 @@ insert:
f->sess = s;
if (f->tunnelhdr == 0)
- cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_RSVP_POLICE-1])
f->police = tcf_police_locate(tb[TCA_RSVP_POLICE-1], tca[TCA_RATE-1]);
@@ -659,7 +662,8 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (f->police) {
- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
+ if (qdisc_copy_stats(skb, &f->police->stats))
+ goto rtattr_failure;
}
#endif
return skb->len;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 98d4e1f7b..f759d150a 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -307,7 +307,7 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
{
unsigned long cl;
- if ((cl = cls_set_class(&n->res.class, 0)) != 0)
+ if ((cl = __cls_set_class(&n->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(n->police);
@@ -326,8 +326,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
if (ht) {
for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
if (*kp == key) {
+ tcf_tree_lock(tp);
*kp = key->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
u32_destroy_key(tp, key);
return 0;
@@ -346,7 +347,6 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
for (h=0; h<=ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) {
ht->ht[h] = n->next;
- synchronize_bh();
u32_destroy_key(tp, n);
}
@@ -465,8 +465,9 @@ static int u32_set_parms(struct Qdisc *q, unsigned long base,
ht_down->refcnt++;
}
+ sch_tree_lock(q);
ht_down = xchg(&n->ht_down, ht_down);
- synchronize_bh();
+ sch_tree_unlock(q);
if (ht_down)
ht_down->refcnt--;
@@ -475,7 +476,9 @@ static int u32_set_parms(struct Qdisc *q, unsigned long base,
unsigned long cl;
n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
- cl = cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid));
+ sch_tree_lock(q);
+ cl = __cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid));
+ sch_tree_unlock(q);
if (cl)
q->ops->cl_ops->unbind_tcf(q, cl);
}
@@ -483,8 +486,9 @@ static int u32_set_parms(struct Qdisc *q, unsigned long base,
if (tb[TCA_U32_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
+ sch_tree_lock(q);
police = xchg(&n->police, police);
- synchronize_bh();
+ sch_tree_lock(q);
tcf_police_release(police);
}
@@ -682,7 +686,8 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (TC_U32_KEY(n->handle) && n->police) {
- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &n->police->stats);
+ if (qdisc_copy_stats(skb, &n->police->stats))
+ goto rtattr_failure;
}
#endif
return skb->len;
diff --git a/net/sched/estimator.c b/net/sched/estimator.c
index d51017c84..e70066f9c 100644
--- a/net/sched/estimator.c
+++ b/net/sched/estimator.c
@@ -97,29 +97,38 @@ struct qdisc_estimator_head
static struct qdisc_estimator_head elist[EST_MAX_INTERVAL+1];
+/* Estimator array lock */
+static rwlock_t est_lock = RW_LOCK_UNLOCKED;
+
static void est_timer(unsigned long arg)
{
int idx = (int)arg;
struct qdisc_estimator *e;
+ read_lock(&est_lock);
for (e = elist[idx].list; e; e = e->next) {
- u64 nbytes = e->stats->bytes;
- u32 npackets = e->stats->packets;
+ struct tc_stats *st = e->stats;
+ u64 nbytes;
+ u32 npackets;
u32 rate;
-
+
+ spin_lock(st->lock);
+ nbytes = st->bytes;
+ npackets = st->packets;
rate = (nbytes - e->last_bytes)<<(7 - idx);
e->last_bytes = nbytes;
e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log;
- e->stats->bps = (e->avbps+0xF)>>5;
+ st->bps = (e->avbps+0xF)>>5;
rate = (npackets - e->last_packets)<<(12 - idx);
e->last_packets = npackets;
e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
e->stats->pps = (e->avpps+0x1FF)>>10;
+ spin_unlock(st->lock);
}
- elist[idx].timer.expires = jiffies + ((HZ/4)<<idx);
- add_timer(&elist[idx].timer);
+ mod_timer(&elist[idx].timer, jiffies + ((HZ/4)<<idx));
+ read_unlock(&est_lock);
}
int qdisc_new_estimator(struct tc_stats *stats, struct rtattr *opt)
@@ -154,7 +163,9 @@ int qdisc_new_estimator(struct tc_stats *stats, struct rtattr *opt)
elist[est->interval].timer.function = est_timer;
add_timer(&elist[est->interval].timer);
}
+ write_lock_bh(&est_lock);
elist[est->interval].list = est;
+ write_unlock_bh(&est_lock);
return 0;
}
@@ -172,8 +183,9 @@ void qdisc_kill_estimator(struct tc_stats *stats)
continue;
}
+ write_lock_bh(&est_lock);
*pest = est->next;
- synchronize_bh();
+ write_unlock_bh(&est_lock);
kfree(est);
killed++;
diff --git a/net/sched/police.c b/net/sched/police.c
index 89e58d8be..f81f89aed 100644
--- a/net/sched/police.c
+++ b/net/sched/police.c
@@ -38,6 +38,10 @@
static u32 idx_gen;
static struct tcf_police *tcf_police_ht[16];
+/* Policer hash table lock */
+static rwlock_t police_lock = RW_LOCK_UNLOCKED;
+
+/* Each policer is serialized by its individual spinlock */
static __inline__ unsigned tcf_police_hash(u32 index)
{
@@ -48,11 +52,13 @@ static __inline__ struct tcf_police * tcf_police_lookup(u32 index)
{
struct tcf_police *p;
+ read_lock(&police_lock);
for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) {
if (p->index == index)
- return p;
+ break;
}
- return NULL;
+ read_unlock(&police_lock);
+ return p;
}
static __inline__ u32 tcf_police_new_index(void)
@@ -73,7 +79,9 @@ void tcf_police_destroy(struct tcf_police *p)
for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
if (*p1p == p) {
+ write_lock_bh(&police_lock);
*p1p = p->next;
+ write_unlock_bh(&police_lock);
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&p->stats);
#endif
@@ -114,6 +122,8 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
memset(p, 0, sizeof(*p));
p->refcnt = 1;
+ spin_lock_init(&p->lock);
+ p->stats.lock = &p->lock;
if (parm->rate.rate) {
if ((p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1])) == NULL)
goto failure;
@@ -144,8 +154,10 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
qdisc_new_estimator(&p->stats, est);
#endif
h = tcf_police_hash(p->index);
+ write_lock_bh(&police_lock);
p->next = tcf_police_ht[h];
tcf_police_ht[h] = p;
+ write_unlock_bh(&police_lock);
return p;
failure:
@@ -161,19 +173,24 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
long toks;
long ptoks = 0;
+ spin_lock(&p->lock);
+
p->stats.bytes += skb->len;
p->stats.packets++;
#ifdef CONFIG_NET_ESTIMATOR
if (p->ewma_rate && p->stats.bps >= p->ewma_rate) {
p->stats.overlimits++;
+ spin_unlock(&p->lock);
return p->action;
}
#endif
if (skb->len <= p->mtu) {
- if (p->R_tab == NULL)
+ if (p->R_tab == NULL) {
+ spin_unlock(&p->lock);
return p->result;
+ }
PSCHED_GET_TIME(now);
@@ -194,11 +211,13 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
p->t_c = now;
p->toks = toks;
p->ptoks = ptoks;
+ spin_unlock(&p->lock);
return p->result;
}
}
p->stats.overlimits++;
+ spin_unlock(&p->lock);
return p->action;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 0ced70bbc..fec6faefe 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -124,6 +124,10 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
changes qdisc parameters.
*/
+/* Protects list of registered TC modules. It is pure SMP lock. */
+static rwlock_t qdisc_mod_lock = RW_LOCK_UNLOCKED;
+
+
/************************************************
* Queueing disciplines manipulation. *
************************************************/
@@ -139,9 +143,13 @@ int register_qdisc(struct Qdisc_ops *qops)
{
struct Qdisc_ops *q, **qp;
- for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
- if (strcmp(qops->id, q->id) == 0)
+ write_lock(&qdisc_mod_lock);
+ for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next) {
+ if (strcmp(qops->id, q->id) == 0) {
+ write_unlock(&qdisc_mod_lock);
return -EEXIST;
+ }
+ }
if (qops->enqueue == NULL)
qops->enqueue = noop_qdisc_ops.enqueue;
@@ -152,20 +160,26 @@ int register_qdisc(struct Qdisc_ops *qops)
qops->next = NULL;
*qp = qops;
+ write_unlock(&qdisc_mod_lock);
return 0;
}
int unregister_qdisc(struct Qdisc_ops *qops)
{
struct Qdisc_ops *q, **qp;
+ int err = -ENOENT;
+
+ write_lock(&qdisc_mod_lock);
for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
if (q == qops)
break;
- if (!q)
- return -ENOENT;
- *qp = q->next;
- q->next = NULL;
- return 0;
+ if (q) {
+ *qp = q->next;
+ q->next = NULL;
+ err = 0;
+ }
+ write_unlock(&qdisc_mod_lock);
+ return err;
}
/* We know handle. Find qdisc among all qdisc's attached to device
@@ -203,15 +217,17 @@ struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind)
{
- struct Qdisc_ops *q;
+ struct Qdisc_ops *q = NULL;
if (kind) {
+ read_lock(&qdisc_mod_lock);
for (q = qdisc_base; q; q = q->next) {
if (rtattr_strcmp(kind, q->id) == 0)
- return q;
+ break;
}
+ read_unlock(&qdisc_mod_lock);
}
- return NULL;
+ return q;
}
static struct qdisc_rate_table *qdisc_rtab_list;
@@ -284,7 +300,8 @@ dev_graft_qdisc(struct device *dev, struct Qdisc *qdisc)
if (dev->flags & IFF_UP)
dev_deactivate(dev);
- start_bh_atomic();
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
oqdisc = dev->qdisc_sleeping;
/* Prune old scheduler */
@@ -296,7 +313,8 @@ dev_graft_qdisc(struct device *dev, struct Qdisc *qdisc)
qdisc = &noop_qdisc;
dev->qdisc_sleeping = qdisc;
dev->qdisc = &noop_qdisc;
- end_bh_atomic();
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
if (dev->flags & IFF_UP)
dev_activate(dev);
@@ -376,7 +394,7 @@ qdisc_create(struct device *dev, u32 handle, struct rtattr **tca, int *errp)
goto err_out;
/* Grrr... Resolve race condition with module unload */
-
+
err = -EINVAL;
if (ops != qdisc_lookup_ops(kind))
goto err_out;
@@ -389,6 +407,7 @@ qdisc_create(struct device *dev, u32 handle, struct rtattr **tca, int *errp)
sch->dequeue = ops->dequeue;
sch->dev = dev;
atomic_set(&sch->refcnt, 1);
+ sch->stats.lock = &dev->queue_lock;
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
err = -ENOMEM;
@@ -398,8 +417,10 @@ qdisc_create(struct device *dev, u32 handle, struct rtattr **tca, int *errp)
sch->handle = handle;
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
+ write_lock(&qdisc_tree_lock);
sch->next = dev->qdisc_list;
dev->qdisc_list = sch;
+ write_unlock(&qdisc_tree_lock);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
qdisc_new_estimator(&sch->stats, tca[TCA_RATE-1]);
@@ -521,7 +542,9 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return err;
if (q) {
qdisc_notify(skb, n, clid, q, NULL);
+ spin_lock_bh(&dev->queue_lock);
qdisc_destroy(q);
+ spin_unlock_bh(&dev->queue_lock);
}
} else {
qdisc_notify(skb, n, clid, NULL, q);
@@ -637,17 +660,36 @@ graft:
struct Qdisc *old_q = NULL;
err = qdisc_graft(dev, p, clid, q, &old_q);
if (err) {
- if (q)
+ if (q) {
+ spin_lock_bh(&dev->queue_lock);
qdisc_destroy(q);
+ spin_unlock_bh(&dev->queue_lock);
+ }
return err;
}
qdisc_notify(skb, n, clid, old_q, q);
- if (old_q)
+ if (old_q) {
+ spin_lock_bh(&dev->queue_lock);
qdisc_destroy(old_q);
+ spin_unlock_bh(&dev->queue_lock);
+ }
}
return 0;
}
+int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st)
+{
+ spin_lock_bh(st->lock);
+ RTA_PUT(skb, TCA_STATS, (char*)&st->lock - (char*)st, st);
+ spin_unlock_bh(st->lock);
+ return 0;
+
+rtattr_failure:
+ spin_unlock_bh(st->lock);
+ return -1;
+}
+
+
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 pid, u32 seq, unsigned flags, int event)
{
@@ -667,7 +709,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
if (q->ops->dump && q->ops->dump(q, skb) < 0)
goto rtattr_failure;
q->stats.qlen = q->q.qlen;
- RTA_PUT(skb, TCA_STATS, sizeof(q->stats), &q->stats);
+ if (qdisc_copy_stats(skb, &q->stats))
+ goto rtattr_failure;
nlh->nlmsg_len = skb->tail - b;
return skb->len;
@@ -713,22 +756,29 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
s_idx = cb->args[0];
s_q_idx = q_idx = cb->args[1];
+ read_lock(&dev_base_lock);
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
if (idx > s_idx)
s_q_idx = 0;
+ read_lock(&qdisc_tree_lock);
for (q = dev->qdisc_list, q_idx = 0; q;
q = q->next, q_idx++) {
if (q_idx < s_q_idx)
continue;
if (tc_fill_qdisc(skb, q, 0, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
+ read_unlock(&qdisc_tree_lock);
goto done;
+ }
}
+ read_unlock(&qdisc_tree_lock);
}
done:
+ read_unlock(&dev_base_lock);
+
cb->args[0] = idx;
cb->args[1] = q_idx;
@@ -933,6 +983,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
+ read_lock(&qdisc_tree_lock);
for (q=dev->qdisc_list, t=0; q; q = q->next, t++) {
if (t < s_t) continue;
if (!q->ops->cl_ops) continue;
@@ -951,6 +1002,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
if (arg.w.stop)
break;
}
+ read_unlock(&qdisc_tree_lock);
cb->args[0] = t;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c8094a882..2244b68ed 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1417,6 +1417,7 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
q->link.ewma_log = TC_CBQ_DEF_EWMA;
q->link.avpkt = q->link.allot/2;
q->link.minidle = -0x7FFFFFFF;
+ q->link.stats.lock = &sch->dev->queue_lock;
init_timer(&q->wd_timer);
q->wd_timer.data = (unsigned long)sch;
@@ -1558,6 +1559,16 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
return 0;
}
+int cbq_copy_xstats(struct sk_buff *skb, struct tc_cbq_xstats *st)
+{
+ RTA_PUT(skb, TCA_STATS, sizeof(*st), st);
+ return 0;
+
+rtattr_failure:
+ return -1;
+}
+
+
static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
@@ -1569,8 +1580,13 @@ static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
if (cbq_dump_attr(skb, &q->link) < 0)
goto rtattr_failure;
rta->rta_len = skb->tail - b;
+ spin_lock_bh(&sch->dev->queue_lock);
q->link.xstats.avgidle = q->link.avgidle;
- RTA_PUT(skb, TCA_XSTATS, sizeof(q->link.xstats), &q->link.xstats);
+ if (cbq_copy_xstats(skb, &q->link.xstats)) {
+ spin_unlock_bh(&sch->dev->queue_lock);
+ goto rtattr_failure;
+ }
+ spin_unlock_bh(&sch->dev->queue_lock);
return skb->len;
rtattr_failure:
@@ -1600,12 +1616,19 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg,
goto rtattr_failure;
rta->rta_len = skb->tail - b;
cl->stats.qlen = cl->q->q.qlen;
- RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
+ if (qdisc_copy_stats(skb, &cl->stats))
+ goto rtattr_failure;
+ spin_lock_bh(&sch->dev->queue_lock);
cl->xstats.avgidle = cl->avgidle;
cl->xstats.undertime = 0;
if (!PSCHED_IS_PASTPERFECT(cl->undertime))
cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
- RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
+ q->link.xstats.avgidle = q->link.avgidle;
+ if (cbq_copy_xstats(skb, &cl->xstats)) {
+ spin_unlock_bh(&sch->dev->queue_lock);
+ goto rtattr_failure;
+ }
+ spin_unlock_bh(&sch->dev->queue_lock);
return skb->len;
@@ -1631,8 +1654,11 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new->reshape_fail = cbq_reshape_fail;
#endif
}
- if ((*old = xchg(&cl->q, new)) != NULL)
- qdisc_reset(*old);
+ sch_tree_lock(sch);
+ *old = cl->q;
+ cl->q = new;
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
return 0;
}
@@ -1710,16 +1736,16 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class*)arg;
- start_bh_atomic();
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE
+ spin_lock_bh(&sch->dev->queue_lock);
if (q->rx_class == cl)
q->rx_class = NULL;
+ spin_unlock_bh(&sch->dev->queue_lock);
#endif
+
cbq_destroy_class(cl);
}
- end_bh_atomic();
- return;
}
static int
@@ -1780,7 +1806,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
}
/* Change class parameters */
- start_bh_atomic();
+ sch_tree_lock(sch);
if (cl->next_alive != NULL)
cbq_deactivate_class(cl);
@@ -1812,7 +1838,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
if (cl->q->q.qlen)
cbq_activate_class(cl);
- end_bh_atomic();
+ sch_tree_lock(sch);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1]) {
@@ -1878,8 +1904,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
cl->allot = parent->allot;
cl->quantum = cl->allot;
cl->weight = cl->R_tab->rate.rate;
+ cl->stats.lock = &sch->dev->queue_lock;
- start_bh_atomic();
+ sch_tree_lock(sch);
cbq_link_class(cl);
cl->borrow = cl->tparent;
if (cl->tparent != &q->link)
@@ -1903,7 +1930,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
#endif
if (tb[TCA_CBQ_FOPT-1])
cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
- end_bh_atomic();
+ sch_tree_unlock(sch);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
@@ -1926,7 +1953,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
if (cl->filters || cl->children || cl == &q->link)
return -EBUSY;
- start_bh_atomic();
+ sch_tree_lock(sch);
if (cl->next_alive)
cbq_deactivate_class(cl);
@@ -1948,12 +1975,11 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
cbq_sync_defmap(cl);
cbq_rmprio(q, cl);
+ sch_tree_unlock(sch);
if (--cl->refcnt == 0)
cbq_destroy_class(cl);
- end_bh_atomic();
-
return 0;
}
diff --git a/net/sched/sch_csz.c b/net/sched/sch_csz.c
index 2202fd81a..c1be3729e 100644
--- a/net/sched/sch_csz.c
+++ b/net/sched/sch_csz.c
@@ -885,7 +885,7 @@ static int csz_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr *
a = &q->flow[cl];
- start_bh_atomic();
+ spin_lock_bh(&sch->dev->queue_lock);
#if 0
a->rate_log = copt->rate_log;
#endif
@@ -899,7 +899,7 @@ static int csz_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr *
if (tb[TCA_CSZ_RTAB-1])
memcpy(a->L_tab, RTA_DATA(tb[TCA_CSZ_RTAB-1]), 1024);
- end_bh_atomic();
+ spin_unlock_bh(&sch->dev->queue_lock);
return 0;
}
/* NI */
@@ -920,14 +920,14 @@ static int csz_delete(struct Qdisc *sch, unsigned long cl)
a = &q->flow[cl];
- start_bh_atomic();
+ spin_lock_bh(&sch->dev->queue_lock);
a->fprev->fnext = a->fnext;
a->fnext->fprev = a->fprev;
a->sprev->snext = a->snext;
a->snext->sprev = a->sprev;
a->start = a->finish = 0;
kfree(xchg(&q->flow[cl].L_tab, NULL));
- end_bh_atomic();
+ spin_unlock_bh(&sch->dev->queue_lock);
return 0;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ba40033e5..2dc1ed327 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -34,7 +34,45 @@
/* Main transmission queue. */
-struct Qdisc_head qdisc_head = { &qdisc_head };
+struct Qdisc_head qdisc_head = { &qdisc_head, &qdisc_head };
+spinlock_t qdisc_runqueue_lock = SPIN_LOCK_UNLOCKED;
+
+/* Main qdisc structure lock.
+
+ However, modifications
+ to data, participating in scheduling must be additionally
+ protected with dev->queue_lock spinlock.
+
+ The idea is the following:
+ - enqueue, dequeue are serialized via top level device
+ spinlock dev->queue_lock.
+ - tree walking is protected by read_lock(qdisc_tree_lock)
+ and this lock is used only in process context.
+ - updates to tree are made only under rtnl semaphore,
+ hence this lock may be made without local bh disabling.
+
+ qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
+ */
+rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED;
+
+/* Anti deadlock rules:
+
+ qdisc_runqueue_lock protects main transmission list qdisc_head.
+ Run list is accessed only under this spinlock.
+
+ dev->queue_lock serializes queue accesses for this device
+ AND dev->qdisc pointer itself.
+
+ dev->xmit_lock serializes accesses to device driver.
+
+ dev->queue_lock and dev->xmit_lock are mutually exclusive,
+ if one is grabbed, another must be free.
+
+ qdisc_runqueue_lock may be requested under dev->queue_lock,
+ but neither dev->queue_lock nor dev->xmit_lock may be requested
+ under qdisc_runqueue_lock.
+ */
+
/* Kick device.
Note, that this procedure can be called by a watchdog timer, so that
@@ -44,7 +82,7 @@ struct Qdisc_head qdisc_head = { &qdisc_head };
>0 - queue is not empty, but throttled.
<0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
- NOTE: Called only from NET BH
+ NOTE: Called under dev->queue_lock with locally disabled BH.
*/
int qdisc_restart(struct device *dev)
@@ -53,27 +91,97 @@ int qdisc_restart(struct device *dev)
struct sk_buff *skb;
if ((skb = q->dequeue(q)) != NULL) {
+ /* Dequeue packet and release queue */
+ spin_unlock(&dev->queue_lock);
+
if (netdev_nit)
dev_queue_xmit_nit(skb, dev);
- if (dev->hard_start_xmit(skb, dev) == 0) {
- q->tx_last = jiffies;
- return -1;
+ if (spin_trylock(&dev->xmit_lock)) {
+ /* Remember that the driver is grabbed by us. */
+ dev->xmit_lock_owner = smp_processor_id();
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+
+ spin_lock(&dev->queue_lock);
+ dev->qdisc->tx_last = jiffies;
+ return -1;
+ }
+ /* Release the driver */
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+ } else {
+ /* So, someone grabbed the driver. */
+
+ /* It may be transient configuration error,
+ when hard_start_xmit() recurses. We detect
+ it by checking xmit owner and drop the
+ packet when deadloop is detected.
+ */
+ if (dev->xmit_lock_owner == smp_processor_id()) {
+ kfree_skb(skb);
+ if (net_ratelimit())
+ printk(KERN_DEBUG "Dead loop on virtual %s, fix it urgently!\n", dev->name);
+ spin_lock(&dev->queue_lock);
+ return -1;
+ }
+
+ /* Otherwise, packet is requeued
+ and will be sent by the next net_bh run.
+ */
+ mark_bh(NET_BH);
}
/* Device kicked us out :(
This is possible in three cases:
+ 0. driver is locked
1. fastroute is enabled
2. device cannot determine busy state
before start of transmission (f.e. dialout)
3. device is buggy (ppp)
*/
+ spin_lock(&dev->queue_lock);
+ q = dev->qdisc;
q->ops->requeue(skb, q);
return -1;
}
- return q->q.qlen;
+ return dev->qdisc->q.qlen;
+}
+
+static __inline__ void
+qdisc_stop_run(struct Qdisc *q)
+{
+ q->h.forw->back = q->h.back;
+ q->h.back->forw = q->h.forw;
+ q->h.forw = NULL;
+}
+
+extern __inline__ void
+qdisc_continue_run(struct Qdisc *q)
+{
+ if (!qdisc_on_runqueue(q) && q->dev) {
+ q->h.forw = &qdisc_head;
+ q->h.back = qdisc_head.back;
+ qdisc_head.back->forw = &q->h;
+ qdisc_head.back = &q->h;
+ }
+}
+
+static __inline__ int
+qdisc_init_run(struct Qdisc_head *lh)
+{
+ if (qdisc_head.forw != &qdisc_head) {
+ *lh = qdisc_head;
+ lh->forw->back = lh;
+ lh->back->forw = lh;
+ qdisc_head.forw = &qdisc_head;
+ qdisc_head.back = &qdisc_head;
+ return 1;
+ }
+ return 0;
}
/* Scan transmission queue and kick devices.
@@ -84,58 +192,90 @@ int qdisc_restart(struct device *dev)
I have no idea how to solve it using only "anonymous" Linux mark_bh().
To change queue from device interrupt? Ough... only not this...
+
+ This function is called only from net_bh.
*/
void qdisc_run_queues(void)
{
- struct Qdisc_head **hp, *h;
+ struct Qdisc_head lh, *h;
+
+ spin_lock(&qdisc_runqueue_lock);
+ if (!qdisc_init_run(&lh))
+ goto out;
- hp = &qdisc_head.forw;
- while ((h = *hp) != &qdisc_head) {
- int res = -1;
+ while ((h = lh.forw) != &lh) {
+ int res;
+ struct device *dev;
struct Qdisc *q = (struct Qdisc*)h;
- struct device *dev = q->dev;
-
- while (!dev->tbusy && (res = qdisc_restart(dev)) < 0)
- /* NOTHING */;
-
- /* An explanation is necessary here.
- qdisc_restart called dev->hard_start_xmit,
- if device is virtual, it could trigger one more
- dev_queue_xmit and a new device could appear
- in the active chain. In this case we cannot unlink
- the empty queue, because we lost the back pointer.
- No problem, we will unlink it during the next round.
- */
- if (res == 0 && *hp == h) {
- *hp = h->forw;
- h->forw = NULL;
- continue;
+ qdisc_stop_run(q);
+
+ dev = q->dev;
+ spin_unlock(&qdisc_runqueue_lock);
+
+ res = -1;
+ if (spin_trylock(&dev->queue_lock)) {
+ while (!dev->tbusy && (res = qdisc_restart(dev)) < 0)
+ /* NOTHING */;
+ spin_unlock(&dev->queue_lock);
}
- hp = &h->forw;
+
+ spin_lock(&qdisc_runqueue_lock);
+ /* If qdisc is not empty add it to the tail of list */
+ if (res)
+ qdisc_continue_run(q);
}
+out:
+ spin_unlock(&qdisc_runqueue_lock);
}
-/* Periodic watchdoc timer to recover from hard/soft device bugs. */
+/* Periodic watchdog timer to recover from hard/soft device bugs. */
static void dev_do_watchdog(unsigned long dummy);
static struct timer_list dev_watchdog =
{ NULL, NULL, 0L, 0L, &dev_do_watchdog };
+/* This function is called only from timer */
+
static void dev_do_watchdog(unsigned long dummy)
{
- struct Qdisc_head *h;
+ struct Qdisc_head lh, *h;
+
+ if (!spin_trylock(&qdisc_runqueue_lock)) {
+ /* No hurry with watchdog. */
+ mod_timer(&dev_watchdog, jiffies + HZ/10);
+ return;
+ }
- for (h = qdisc_head.forw; h != &qdisc_head; h = h->forw) {
+ if (!qdisc_init_run(&lh))
+ goto out;
+
+ while ((h = lh.forw) != &lh) {
+ struct device *dev;
struct Qdisc *q = (struct Qdisc*)h;
- struct device *dev = q->dev;
- if (dev->tbusy && jiffies - q->tx_last > q->tx_timeo)
- qdisc_restart(dev);
+
+ qdisc_stop_run(q);
+
+ dev = q->dev;
+ spin_unlock(&qdisc_runqueue_lock);
+
+ if (spin_trylock(&dev->queue_lock)) {
+ q = dev->qdisc;
+ if (dev->tbusy && jiffies - q->tx_last > q->tx_timeo)
+ qdisc_restart(dev);
+ spin_unlock(&dev->queue_lock);
+ }
+
+ spin_lock(&qdisc_runqueue_lock);
+
+ qdisc_continue_run(dev->qdisc);
}
- dev_watchdog.expires = jiffies + 5*HZ;
- add_timer(&dev_watchdog);
+
+out:
+ mod_timer(&dev_watchdog, jiffies + 5*HZ);
+ spin_unlock(&qdisc_runqueue_lock);
}
@@ -206,7 +346,7 @@ struct Qdisc noqueue_qdisc =
{
{ NULL },
NULL,
- NULL,
+ noop_dequeue,
TCQ_F_BUILTIN,
&noqueue_qdisc_ops,
};
@@ -322,6 +462,7 @@ struct Qdisc * qdisc_create_dflt(struct device *dev, struct Qdisc_ops *ops)
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
+ sch->stats.lock = &dev->queue_lock;
atomic_set(&sch->refcnt, 1);
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
@@ -330,42 +471,45 @@ struct Qdisc * qdisc_create_dflt(struct device *dev, struct Qdisc_ops *ops)
return NULL;
}
+/* Under dev->queue_lock and BH! */
+
void qdisc_reset(struct Qdisc *qdisc)
{
struct Qdisc_ops *ops = qdisc->ops;
- start_bh_atomic();
if (ops->reset)
ops->reset(qdisc);
- end_bh_atomic();
}
+/* Under dev->queue_lock and BH! */
+
void qdisc_destroy(struct Qdisc *qdisc)
{
struct Qdisc_ops *ops = qdisc->ops;
+ struct device *dev;
if (!atomic_dec_and_test(&qdisc->refcnt))
return;
+ dev = qdisc->dev;
+
#ifdef CONFIG_NET_SCHED
- if (qdisc->dev) {
+ if (dev) {
struct Qdisc *q, **qp;
- for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next)
+ for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next) {
if (q == qdisc) {
*qp = q->next;
- q->next = NULL;
break;
}
+ }
}
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&qdisc->stats);
#endif
#endif
- start_bh_atomic();
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
ops->destroy(qdisc);
- end_bh_atomic();
if (!(qdisc->flags&TCQ_F_BUILTIN))
kfree(qdisc);
}
@@ -380,19 +524,23 @@ void dev_activate(struct device *dev)
*/
if (dev->qdisc_sleeping == &noop_qdisc) {
+ struct Qdisc *qdisc;
if (dev->tx_queue_len) {
- struct Qdisc *qdisc;
qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
if (qdisc == NULL) {
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
- dev->qdisc_sleeping = qdisc;
- } else
- dev->qdisc_sleeping = &noqueue_qdisc;
+ } else {
+ qdisc = &noqueue_qdisc;
+ }
+ write_lock(&qdisc_tree_lock);
+ dev->qdisc_sleeping = qdisc;
+ write_unlock(&qdisc_tree_lock);
}
- start_bh_atomic();
+ spin_lock_bh(&dev->queue_lock);
+ spin_lock(&qdisc_runqueue_lock);
if ((dev->qdisc = dev->qdisc_sleeping) != &noqueue_qdisc) {
dev->qdisc->tx_timeo = 5*HZ;
dev->qdisc->tx_last = jiffies - dev->qdisc->tx_timeo;
@@ -400,51 +548,50 @@ void dev_activate(struct device *dev)
dev_watchdog.expires = jiffies + 5*HZ;
add_timer(&dev_watchdog);
}
- end_bh_atomic();
+ spin_unlock(&qdisc_runqueue_lock);
+ spin_unlock_bh(&dev->queue_lock);
}
void dev_deactivate(struct device *dev)
{
struct Qdisc *qdisc;
- start_bh_atomic();
-
- qdisc = xchg(&dev->qdisc, &noop_qdisc);
+ spin_lock_bh(&dev->queue_lock);
+ qdisc = dev->qdisc;
+ dev->qdisc = &noop_qdisc;
qdisc_reset(qdisc);
- if (qdisc->h.forw) {
- struct Qdisc_head **hp, *h;
-
- for (hp = &qdisc_head.forw; (h = *hp) != &qdisc_head; hp = &h->forw) {
- if (h == &qdisc->h) {
- *hp = h->forw;
- break;
- }
- }
- }
-
- end_bh_atomic();
+ spin_lock(&qdisc_runqueue_lock);
+ if (qdisc_on_runqueue(qdisc))
+ qdisc_stop_run(qdisc);
+ spin_unlock(&qdisc_runqueue_lock);
+ spin_unlock_bh(&dev->queue_lock);
}
void dev_init_scheduler(struct device *dev)
{
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
dev->qdisc = &noop_qdisc;
+ spin_unlock_bh(&dev->queue_lock);
dev->qdisc_sleeping = &noop_qdisc;
dev->qdisc_list = NULL;
+ write_unlock(&qdisc_tree_lock);
}
void dev_shutdown(struct device *dev)
{
struct Qdisc *qdisc;
- start_bh_atomic();
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
qdisc = dev->qdisc_sleeping;
dev->qdisc = &noop_qdisc;
dev->qdisc_sleeping = &noop_qdisc;
qdisc_destroy(qdisc);
BUG_TRAP(dev->qdisc_list == NULL);
dev->qdisc_list = NULL;
- end_bh_atomic();
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
}
-
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 5222d149d..a7069dec7 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -178,7 +178,7 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
return -EINVAL;
}
- start_bh_atomic();
+ sch_tree_lock(sch);
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
@@ -187,7 +187,7 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
if (child != &noop_qdisc)
qdisc_destroy(child);
}
- end_bh_atomic();
+ sch_tree_unlock(sch);
for (i=0; i<=TC_PRIO_MAX; i++) {
int band = q->prio2band[i];
@@ -195,11 +195,12 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
struct Qdisc *child;
child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (child) {
+ sch_tree_lock(sch);
child = xchg(&q->queues[band], child);
- synchronize_bh();
if (child != &noop_qdisc)
qdisc_destroy(child);
+ sch_tree_unlock(sch);
}
}
}
@@ -265,7 +266,11 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- *old = xchg(&q->queues[band], new);
+ sch_tree_lock(sch);
+ *old = q->queues[band];
+ q->queues[band] = new;
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
return 0;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8baf254eb..dfde116f3 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -387,7 +387,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
return -EINVAL;
- start_bh_atomic();
+ sch_tree_lock(sch);
q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
q->perturb_period = ctl->perturb_period*HZ;
@@ -396,7 +396,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
q->perturb_timer.expires = jiffies + q->perturb_period;
add_timer(&q->perturb_timer);
}
- end_bh_atomic();
+ sch_tree_unlock(sch);
return 0;
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index a4d13b628..90e469b02 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -308,7 +308,7 @@ static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
if (rtab->data[max_size>>qopt->rate.cell_log] > qopt->buffer)
goto done;
- start_bh_atomic();
+ sch_tree_lock(sch);
q->limit = qopt->limit;
q->mtu = qopt->mtu;
q->max_size = max_size;
@@ -317,7 +317,7 @@ static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
q->ptokens = q->mtu;
rtab = xchg(&q->R_tab, rtab);
ptab = xchg(&q->P_tab, ptab);
- end_bh_atomic();
+ sch_tree_unlock(sch);
err = 0;
done:
if (rtab)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 66040d5e9..ffed0de11 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -125,9 +125,11 @@ teql_dequeue(struct Qdisc* sch)
if (skb == NULL) {
struct device *m = dat->m->dev.qdisc->dev;
if (m) {
- m->tbusy = 0;
dat->m->slaves = sch;
+ spin_lock(&m->queue_lock);
+ m->tbusy = 0;
qdisc_restart(m);
+ spin_unlock(&m->queue_lock);
}
}
sch->q.qlen = dat->q.qlen + dat->m->dev.qdisc->q.qlen;
@@ -167,7 +169,9 @@ teql_destroy(struct Qdisc* sch)
master->slaves = NEXT_SLAVE(q);
if (q == master->slaves) {
master->slaves = NULL;
+ spin_lock_bh(&master->dev.queue_lock);
qdisc_reset(master->dev.qdisc);
+ spin_unlock_bh(&master->dev.queue_lock);
}
}
skb_queue_purge(&dat->q);
@@ -190,6 +194,9 @@ static int teql_qdisc_init(struct Qdisc *sch, struct rtattr *opt)
if (dev->hard_header_len > m->dev.hard_header_len)
return -EINVAL;
+ if (&m->dev == dev)
+ return -ELOOP;
+
q->m = m;
skb_queue_head_init(&q->q);
@@ -244,7 +251,11 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct device *dev)
return -ENOBUFS;
}
if (neigh_event_send(n, skb_res) == 0) {
- if (dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len) < 0) {
+ int err;
+ read_lock(&n->lock);
+ err = dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len);
+ read_unlock(&n->lock);
+ if (err < 0) {
neigh_release(n);
return -EINVAL;
}
@@ -295,19 +306,24 @@ restart:
continue;
}
- if (q->h.forw == NULL) {
- q->h.forw = qdisc_head.forw;
- qdisc_head.forw = &q->h;
- }
+ if (!qdisc_on_runqueue(q))
+ qdisc_run(q);
switch (teql_resolve(skb, skb_res, slave)) {
case 0:
- if (slave->hard_start_xmit(skb, slave) == 0) {
- master->slaves = NEXT_SLAVE(q);
- dev->tbusy = 0;
- master->stats.tx_packets++;
- master->stats.tx_bytes += len;
+ if (spin_trylock(&slave->xmit_lock)) {
+ slave->xmit_lock_owner = smp_processor_id();
+ if (slave->hard_start_xmit(skb, slave) == 0) {
+ slave->xmit_lock_owner = -1;
+ spin_unlock(&slave->xmit_lock);
+ master->slaves = NEXT_SLAVE(q);
+ dev->tbusy = 0;
+ master->stats.tx_packets++;
+ master->stats.tx_bytes += len;
return 0;
+ }
+ slave->xmit_lock_owner = -1;
+ spin_unlock(&slave->xmit_lock);
}
if (dev->tbusy)
busy = 1;
diff --git a/net/socket.c b/net/socket.c
index 181effb79..41499da08 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -131,6 +131,11 @@ struct net_proto_family *net_families[NPROTO];
static int sockets_in_use = 0;
/*
+ * Socket hashing lock.
+ */
+rwlock_t sockhash_lock = RW_LOCK_UNLOCKED;
+
+/*
* Support routines. Move socket addresses back and forth across the kernel/user
* divide and look after the messy bits.
*/
@@ -199,7 +204,7 @@ static int get_fd(struct inode *inode)
return -ENFILE;
}
- file->f_dentry = d_alloc_root(inode, NULL);
+ file->f_dentry = d_alloc_root(inode);
if (!file->f_dentry) {
put_filp(file);
put_unused_fd(fd);
@@ -283,7 +288,7 @@ struct socket *sock_alloc(void)
inode->i_gid = current->fsgid;
sock->inode = inode;
- init_waitqueue(&sock->wait);
+ init_waitqueue_head(&sock->wait);
sock->fasync_list = NULL;
sock->state = SS_UNCONNECTED;
sock->flags = 0;
@@ -561,7 +566,8 @@ int sock_wake_async(struct socket *sock, int how)
/* fall through */
case 0:
call_kill:
- kill_fasync(sock->fasync_list, SIGIO);
+ if(sock->fasync_list != NULL)
+ kill_fasync(sock->fasync_list, SIGIO);
break;
}
return 0;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 2b8db00cc..7c966779b 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -4,9 +4,12 @@
* Generic RPC authentication API.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ *
+ * Modified May 1999, Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
*/
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/sched.h>
#include <linux/malloc.h>
#include <linux/errno.h>
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 6912c229d..6596085b3 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -4,9 +4,12 @@
* UNIX-style authentication; no AUTH_SHORT support
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ *
+ * Modified May 1999 Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
*/
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/malloc.h>
#include <linux/socket.h>
#include <linux/in.h>
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index dc06be6b0..6ffcc187b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -39,7 +39,7 @@
# define RPCDBG_FACILITY RPCDBG_CALL
#endif
-static struct wait_queue * destroy_wait = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
static void call_bind(struct rpc_task *task);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 222a3f9ec..b4e0b4b76 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -60,9 +60,9 @@ static struct rpc_task * all_tasks = NULL;
/*
* rpciod-related stuff
*/
-static struct wait_queue * rpciod_idle = NULL;
-static struct wait_queue * rpciod_killer = NULL;
-static struct semaphore rpciod_sema = MUTEX;
+static DECLARE_WAIT_QUEUE_HEAD(rpciod_idle);
+static DECLARE_WAIT_QUEUE_HEAD(rpciod_killer);
+static DECLARE_MUTEX(rpciod_sema);
static unsigned int rpciod_users = 0;
static pid_t rpciod_pid = 0;
static int rpc_inhibit = 0;
@@ -616,6 +616,7 @@ rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
task->tk_client = clnt;
task->tk_flags = RPC_TASK_RUNNING | flags;
task->tk_exit = callback;
+ init_waitqueue_head(&task->tk_wait);
if (current->uid != current->fsuid || current->gid != current->fsgid)
task->tk_flags |= RPC_TASK_SETUID;
@@ -800,7 +801,7 @@ rpc_killall_tasks(struct rpc_clnt *clnt)
rpc_inhibit--;
}
-static struct semaphore rpciod_running = MUTEX_LOCKED;
+static DECLARE_MUTEX_LOCKED(rpciod_running);
/*
* This is the rpciod kernel thread
@@ -808,7 +809,7 @@ static struct semaphore rpciod_running = MUTEX_LOCKED;
static int
rpciod(void *ptr)
{
- struct wait_queue **assassin = (struct wait_queue **) ptr;
+ wait_queue_head_t *assassin = (wait_queue_head_t*) ptr;
unsigned long oldflags;
int rounds = 0;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2353c2e27..d98adb31c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -123,6 +123,8 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
goto out;
memset(rqstp, 0, sizeof(*rqstp));
+ init_waitqueue_head(&rqstp->rq_wait);
+
if (!(rqstp->rq_argp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL))
|| !(rqstp->rq_resp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL))
|| !svc_init_buffer(&rqstp->rq_defbuf, serv->sv_bufsz))
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d2248ad74..0fb992f47 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -738,7 +738,7 @@ svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
{
struct svc_sock *svsk;
int len;
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
dprintk("svc: server %p waiting for data (to = %ld)\n",
rqstp, timeout);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 1e5ad01c3..aa1bfa8f5 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -42,6 +42,7 @@
#define __KERNEL_SYSCALLS__
#include <linux/version.h>
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/malloc.h>
#include <linux/sched.h>
@@ -56,6 +57,8 @@
#include <linux/file.h>
#include <net/sock.h>
+#include <net/checksum.h>
+#include <net/udp.h>
#include <asm/uaccess.h>
@@ -356,6 +359,7 @@ xprt_close(struct rpc_xprt *xprt)
sk->user_data = NULL;
#endif
sk->data_ready = xprt->old_data_ready;
+ sk->no_check = 0;
sk->state_change = xprt->old_state_change;
sk->write_space = xprt->old_write_space;
@@ -563,18 +567,61 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
return;
}
-/*
- * Input handler for RPC replies. Called from a bottom half and hence
+/* We have set things up such that we perform the checksum of the UDP
+ * packet in parallel with the copies into the RPC client iovec. -DaveM
+ */
+static int csum_partial_copy_to_page_cache(struct iovec *iov,
+ struct sk_buff *skb,
+ int copied)
+{
+ __u8 *pkt_data = skb->data + sizeof(struct udphdr);
+ __u8 *cur_ptr = iov->iov_base;
+ __kernel_size_t cur_len = iov->iov_len;
+ unsigned int csum = skb->csum;
+ int need_csum = (skb->ip_summed != CHECKSUM_UNNECESSARY);
+ int slack = skb->len - copied - sizeof(struct udphdr);
+
+ if (need_csum)
+ csum = csum_partial(skb->h.raw, sizeof(struct udphdr), csum);
+ while (copied > 0) {
+ if (cur_len) {
+ int to_move = cur_len;
+ if (to_move > copied)
+ to_move = copied;
+ if (need_csum)
+ csum = csum_partial_copy_nocheck(pkt_data, cur_ptr,
+ to_move, csum);
+ else
+ memcpy(cur_ptr, pkt_data, to_move);
+ pkt_data += to_move;
+ copied -= to_move;
+ cur_ptr += to_move;
+ cur_len -= to_move;
+ }
+ if (cur_len <= 0) {
+ iov++;
+ cur_len = iov->iov_len;
+ cur_ptr = iov->iov_base;
+ }
+ }
+ if (need_csum) {
+ if (slack > 0)
+ csum = csum_partial(pkt_data, slack, csum);
+ if ((unsigned short)csum_fold(csum))
+ return -1;
+ }
+ return 0;
+}
+
+/* Input handler for RPC replies. Called from a bottom half and hence
* atomic.
*/
static inline void
udp_data_ready(struct sock *sk, int len)
{
- struct rpc_task *task;
struct rpc_xprt *xprt;
struct rpc_rqst *rovr;
struct sk_buff *skb;
- struct iovec iov[MAX_IOVEC];
int err, repsize, copied;
dprintk("RPC: udp_data_ready...\n");
@@ -584,28 +631,31 @@ udp_data_ready(struct sock *sk, int len)
if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
return;
- repsize = skb->len - 8; /* don't account for UDP header */
+ repsize = skb->len - sizeof(struct udphdr);
if (repsize < 4) {
printk("RPC: impossible RPC reply size %d!\n", repsize);
goto dropit;
}
/* Look up the request corresponding to the given XID */
- if (!(rovr = xprt_lookup_rqst(xprt, *(u32 *) (skb->h.raw + 8))))
+ if (!(rovr = xprt_lookup_rqst(xprt,
+ *(u32 *) (skb->h.raw + sizeof(struct udphdr)))))
goto dropit;
- task = rovr->rq_task;
- dprintk("RPC: %4d received reply\n", task->tk_pid);
- xprt_pktdump("packet data:", (u32 *) (skb->h.raw+8), repsize);
+ dprintk("RPC: %4d received reply\n", rovr->rq_task->tk_pid);
+ xprt_pktdump("packet data:",
+ (u32 *) (skb->h.raw + sizeof(struct udphdr)), repsize);
if ((copied = rovr->rq_rlen) > repsize)
copied = repsize;
- /* Okay, we have it. Copy datagram... */
- memcpy(iov, rovr->rq_rvec, rovr->rq_rnr * sizeof(iov[0]));
- /* This needs to stay tied with the usermode skb_copy_dagram... */
- memcpy_tokerneliovec(iov, skb->data+8, copied);
+ /* Suck it into the iovec, verify checksum if not done by hw. */
+ if (csum_partial_copy_to_page_cache(rovr->rq_rvec, skb, copied))
+ goto dropit;
+
+ /* Something worked... */
+ dst_confirm(skb->dst);
xprt_complete_rqst(xprt, rovr, copied);
@@ -1341,6 +1391,7 @@ xprt_setup(struct socket *sock, int proto,
xprt->old_write_space = inet->write_space;
if (proto == IPPROTO_UDP) {
inet->data_ready = udp_data_ready;
+ inet->no_check = UDP_CSUM_NORCV;
} else {
inet->data_ready = tcp_data_ready;
inet->state_change = tcp_state_change;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 21614a3c6..1d12037da 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: af_unix.c,v 1.76 1999/05/08 05:54:55 davem Exp $
+ * Version: $Id: af_unix.c,v 1.78 1999/05/27 00:38:41 davem Exp $
*
* Fixes:
* Linus Torvalds : Assorted bug cures.
@@ -114,8 +114,8 @@ int sysctl_unix_max_dgram_qlen = 10;
unix_socket *unix_socket_table[UNIX_HASH_SIZE+1];
static atomic_t unix_nr_socks = ATOMIC_INIT(0);
-static struct wait_queue * unix_ack_wqueue = NULL;
-static struct wait_queue * unix_dgram_wqueue = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(unix_ack_wqueue);
+static DECLARE_WAIT_QUEUE_HEAD(unix_dgram_wqueue);
#define unix_sockets_unbound (unix_socket_table[UNIX_HASH_SIZE])
@@ -144,19 +144,21 @@ extern __inline__ int unix_may_send(unix_socket *sk, unix_socket *osk)
return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
}
+#define ulock(sk) (&(sk->protinfo.af_unix.user_count))
+
extern __inline__ void unix_lock(unix_socket *sk)
{
- atomic_inc(&sk->sock_readers);
+ atomic_inc(ulock(sk));
}
extern __inline__ void unix_unlock(unix_socket *sk)
{
- atomic_dec(&sk->sock_readers);
+ atomic_dec(ulock(sk));
}
extern __inline__ int unix_locked(unix_socket *sk)
{
- return atomic_read(&sk->sock_readers);
+ return (atomic_read(ulock(sk)) != 0);
}
extern __inline__ void unix_release_addr(struct unix_address *addr)
@@ -433,7 +435,7 @@ static struct sock * unix_create1(struct socket *sock, int stream)
sk->destruct = unix_destruct_addr;
sk->protinfo.af_unix.family=PF_UNIX;
sk->protinfo.af_unix.dentry=NULL;
- sk->protinfo.af_unix.readsem=MUTEX; /* single task reading lock */
+ init_MUTEX(&sk->protinfo.af_unix.readsem);/* single task reading lock */
sk->protinfo.af_unix.list=&unix_sockets_unbound;
unix_insert_socket(sk);
@@ -1511,7 +1513,7 @@ static int unix_read_proc(char *buffer, char **start, off_t offset,
{
len+=sprintf(buffer+len,"%p: %08X %08X %08lX %04X %02X %5ld",
s,
- atomic_read(&s->sock_readers),
+ atomic_read(ulock(s)),
0,
s->socket ? s->socket->flags : 0,
s->type,
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 18942bd20..0c91e4c44 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -30,6 +30,7 @@
* Dec 22, 1998 Arnaldo Melo vmalloc/vfree used in device_setup to allocate
* kernel memory and copy configuration data to
* kernel space (for big firmwares)
+* May 19, 1999 Arnaldo Melo __initfunc in wanrouter_init
*****************************************************************************/
#include <linux/config.h>
@@ -104,17 +105,18 @@ static unsigned char oui_802_2[] = { 0x00, 0x80, 0xC2 };
#endif
#ifndef MODULE
-
-int wanrouter_init(void)
+__initfunc(int wanrouter_init(void))
{
int err;
- extern void wanpipe_init(void);
+ extern int wanpipe_init(void),
+ cyclomx_init(void);
printk(KERN_INFO "%s v%u.%u %s\n",
fullname, ROUTER_VERSION, ROUTER_RELEASE, copyright);
err = wanrouter_proc_init();
if (err)
- printk(KERN_ERR "%s: can't create entry in proc filesystem!\n", modname);
+ printk(KERN_ERR "%s: can't create entry in proc filesystem!\n",
+ modname);
/*
* Initialise compiled in boards
@@ -123,6 +125,9 @@ int wanrouter_init(void)
#ifdef CONFIG_VENDOR_SANGOMA
wanpipe_init();
#endif
+#ifdef CONFIG_CYCLADES_SYNC
+ cyclomx_init();
+#endif
return err;
}
@@ -187,7 +192,6 @@ void cleanup_module (void)
* Context: process
*/
-
int register_wan_device(wan_device_t* wandev)
{
int err, namelen;
@@ -207,7 +211,6 @@ int register_wan_device(wan_device_t* wandev)
printk(KERN_INFO "%s: registering WAN device %s\n",
modname, wandev->name);
#endif
-
/*
* Register /proc directory entry
*/
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e7f894e8e..a4f070023 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1336,14 +1336,17 @@ int init_module(void)
/*
* Register any pre existing devices.
*/
- for (dev = dev_base; dev != NULL; dev = dev->next)
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && (dev->type == ARPHRD_X25
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
|| dev->type == ARPHRD_ETHER
#endif
- ))
- x25_link_device_up(dev);
-
+ ))
+ x25_link_device_up(dev);
+ }
+ read_unlock(&dev_base_lock);
+
return 0;
}