summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
commit99a7e12f34b3661a0d1354eef83a0eef4df5e34c (patch)
tree3560aca9ca86792f9ab7bd87861ea143a1b3c7a3 /net
parente73a04659c0b8cdee4dd40e58630e2cf63afb316 (diff)
Merge with Linux 2.3.38.
Diffstat (limited to 'net')
-rw-r--r--net/Config.in5
-rw-r--r--net/README23
-rw-r--r--net/bridge/br.c656
-rw-r--r--net/bridge/br_tree.c3
-rw-r--r--net/core/dev.c38
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/netfilter.c10
-rw-r--r--net/core/skbuff.c122
-rw-r--r--net/core/sock.c2
-rw-r--r--net/decnet/dn_route.c136
-rw-r--r--net/ipv4/Config.in14
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/af_inet.c12
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/icmp.c8
-rw-r--r--net/ipv4/igmp.c6
-rw-r--r--net/ipv4/inetpeer.c447
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_output.c91
-rw-r--r--net/ipv4/ipip.c4
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c300
-rw-r--r--net/ipv4/sysctl_net_ipv4.c23
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/Config.in2
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/ip6_output.c16
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/ipv6/tcp_ipv6.c32
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/ipx/af_spx.c2
-rw-r--r--net/irda/Config.in2
-rw-r--r--net/irda/af_irda.c918
-rw-r--r--net/irda/discovery.c3
-rw-r--r--net/irda/ircomm/ircomm_core.c43
-rw-r--r--net/irda/ircomm/ircomm_event.c12
-rw-r--r--net/irda/ircomm/ircomm_lmp.c17
-rw-r--r--net/irda/ircomm/ircomm_param.c141
-rw-r--r--net/irda/ircomm/ircomm_ttp.c28
-rw-r--r--net/irda/ircomm/ircomm_tty.c292
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c143
-rw-r--r--net/irda/ircomm/ircomm_tty_ioctl.c106
-rw-r--r--net/irda/irda_device.c98
-rw-r--r--net/irda/iriap.c204
-rw-r--r--net/irda/iriap_event.c54
-rw-r--r--net/irda/irias_object.c29
-rw-r--r--net/irda/irlan/irlan_client.c13
-rw-r--r--net/irda/irlan/irlan_client_event.c4
-rw-r--r--net/irda/irlan/irlan_common.c3
-rw-r--r--net/irda/irlan/irlan_eth.c3
-rw-r--r--net/irda/irlap.c362
-rw-r--r--net/irda/irlap_event.c502
-rw-r--r--net/irda/irlap_frame.c363
-rw-r--r--net/irda/irlmp.c216
-rw-r--r--net/irda/irlmp_event.c175
-rw-r--r--net/irda/irlmp_frame.c133
-rw-r--r--net/irda/irmod.c28
-rw-r--r--net/irda/irqueue.c16
-rw-r--r--net/irda/irttp.c130
-rw-r--r--net/irda/parameters.c4
-rw-r--r--net/irda/qos.c184
-rw-r--r--net/irda/timer.c6
-rw-r--r--net/irda/wrapper.c31
-rw-r--r--net/netsyms.c14
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/Config.in42
-rw-r--r--net/sched/Makefile32
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sched/cls_tcindex.c503
-rw-r--r--net/sched/sch_api.c87
-rw-r--r--net/sched/sch_dsmark.c476
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_gred.c606
-rw-r--r--net/sched/sch_ingress.c392
-rw-r--r--net/sched/sch_prio.c13
-rw-r--r--net/sched/sch_teql.c2
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/unix/sysctl_net_unix.c2
89 files changed, 6701 insertions, 1765 deletions
diff --git a/net/Config.in b/net/Config.in
index ead9fc816..82f4fe6d9 100644
--- a/net/Config.in
+++ b/net/Config.in
@@ -62,7 +62,10 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25
tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB
bool 'Bridging (EXPERIMENTAL)' CONFIG_BRIDGE
- bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC
+ if [ "$CONFIG_BRIDGE" != "n" ]; then
+ int ' Maximum number of bridged interfaces' CONFIG_BRIDGE_NUM_PORTS 8
+ fi
+bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC
# if [ "$CONFIG_LLC" = "y" ]; then
# bool ' Netbeui (EXPERIMENTAL)' CONFIG_NETBEUI
# fi
diff --git a/net/README b/net/README
index 8b3543f8b..d54d8526b 100644
--- a/net/README
+++ b/net/README
@@ -5,14 +5,14 @@ Code Section Bug Report Contact
-------------------+-------------------------------------------
802 [other ] alan@lxorguk.ukuu.org.uk
[token ring ] p.norton@computer.org
-appletalk Jay.Schulist@spacs.k12.wi.us
+appletalk jschlst@turbolinux.com
ax25 g4klx@g4klx.demon.co.uk
core alan@lxorguk.ukuu.org.uk
decnet SteveW@ACM.org
ethernet alan@lxorguk.ukuu.org.uk
ipv4 davem@caip.rutgers.edu,Eric.Schenk@dna.lth.se
ipv6 davem@caip.rutgers.edu,Eric.Schenk@dna.lth.se
-ipx/spx Jay.Schulist@spacs.k12.wi.us
+ipx/spx jschlst@turbolinux.com
irda dagb@cs.uit.no
lapb g4klx@g4klx.demon.co.uk
netrom g4klx@g4klx.demon.co.uk
@@ -22,22 +22,3 @@ unix alan@lxorguk.ukuu.org.uk
x25 g4klx@g4klx.demon.co.uk
- If in doubt contact me <alan@lxorguk.ukuu.org.uk> first.
-
----------------------------------------------------------------------------
-
-For commercial UK custom Linux networking projects, drivers and development
-(but not free support!) I can be contacted via
-
- CymruNET Ltd, The Innovation Centre, University Of Wales
- Swansea SA2 8PP.
- Fax: +44 1792 295811
- Tel: +44 1792 295213
-
- Email: alan@cymru.net
-
-Please don't send commercial queries to my .ac.uk email address as I have that
-in an academic and _not_ commercial capacity. On the other hand feel
-free to send bug reports, queries and enhancements that way.
-
-Alan
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 4947a9b89..d2279e56d 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -40,10 +40,19 @@
* so blame me first if its broken ;)
*
* Robert Pintarelli: fixed bug in bpdu time values
+ *
+ * Matthew Grant: start ports disabled.
+ * auto-promiscuous mode on port enable/disable
+ * fleshed out interface event handling, interfaces
+ * now register with bridge on module load as well as ifup
+ * port control ioctls with ifindex support
+ * brg0 logical ethernet interface
+ * reworked brcfg to take interface arguments
+ * added support for changing the hardware address
+ * generally made bridge a lot more usable.
*
* Todo:
- * Don't bring up devices automatically. Start ports disabled
- * and use a netlink notifier so a daemon can maintain the bridge
+ * Use a netlink notifier so a daemon can maintain the bridge
* port group (could we also do multiple groups ????).
* A nice /proc file interface.
* Put the path costs in the port info and devices.
@@ -52,6 +61,7 @@
*
*/
+#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -60,10 +70,13 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
+#include <linux/malloc.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
@@ -71,8 +84,10 @@
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/system.h>
+#include <linux/rtnetlink.h>
#include <net/br.h>
#include <linux/proc_fs.h>
+#include <linux/delay.h>
#ifndef min
#define min(a, b) (((a) <= (b)) ? (a) : (b))
@@ -144,11 +159,20 @@ static void br_add_local_mac(unsigned char *mac);
static int br_flood(struct sk_buff *skb, int port);
static int br_drop(struct sk_buff *skb);
static int br_learn(struct sk_buff *skb, int port); /* 3.8 */
+static int br_protocol_ok(unsigned short protocol);
+static int br_find_port(int ifindex);
+static void br_get_ifnames(void);
+static int brg_rx(struct sk_buff *skb, int port);
static unsigned char bridge_ula[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
static Bridge_data bridge_info; /* (4.5.3) */
Port_data port_info[All_ports]; /* (4.5.5) */
+/* MAG: Maximum port registered - used to speed up flooding and to make
+ * have a large ports array more efficient
+ */
+static int max_port_used = 0;
+
/* JRP: fdb cache 1/port save kmalloc/kfree on every frame */
struct fdb *newfdb[All_ports];
int allocated_fdb_cnt = 0;
@@ -189,6 +213,30 @@ static struct notifier_block br_dev_notifier={
0
};
+
+/*
+ * the following data is for the bridge network device
+ */
+struct brg_if {
+ struct net_device dev;
+ char name[IFNAMSIZ];
+};
+static struct brg_if brg_if;
+
+/*
+ * Here to save linkage? problems
+ */
+
+static inline int find_port(struct net_device *dev)
+{
+ int i;
+
+ for (i = One; i <= No_of_ports; i++)
+ if (port_info[i].dev == dev)
+ return(i);
+ return(0);
+}
+
/*
* Implementation of Protocol specific bridging
*
@@ -201,7 +249,7 @@ static struct notifier_block br_dev_notifier={
/* Checks if that protocol type is to be bridged */
-int br_protocol_ok(unsigned short protocol)
+static int inline br_protocol_ok(unsigned short protocol)
{
unsigned x;
@@ -830,12 +878,19 @@ static int br_tree_get_info(char *buffer, char **start, off_t offset, int length
return len;
}
+
void __init br_init(void)
{ /* (4.8.1) */
int port_no;
- printk(KERN_INFO "NET4: Ethernet Bridge 005 for NET4.0\n");
+ printk(KERN_INFO "NET4: Ethernet Bridge 006 for NET4.0\n");
+ /* Set up brg device information */
+ bridge_info.instance = 0;
+ brg_init();
+
+ max_port_used = 0;
+
/*
* Form initial topology change time.
* The topology change timer is only used if this is the root bridge.
@@ -865,8 +920,8 @@ void __init br_init(void)
stop_topology_change_timer();
memset(newfdb, 0, sizeof(newfdb));
for (port_no = One; port_no <= No_of_ports; port_no++) { /* (4.8.1.4) */
- /* initial state = Enable */
- user_port_state[port_no] = ~Disabled;
+ /* initial state = Disable */
+ user_port_state[port_no] = Disabled;
port_priority[port_no] = 128;
br_init_port(port_no);
disable_port(port_no);
@@ -1194,7 +1249,7 @@ static struct sk_buff *alloc_bridge_skb(int port_no, int pdu_size, char *pdu_nam
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
if (br_stats.flags & BR_DEBUG)
- printk("send_%s_bpdu: port %i src %02x:%02x:%02x:%02x:%02x:%02x\n",
+ printk(KERN_DEBUG "send_%s_bpdu: port %i src %02x:%02x:%02x:%02x:%02x:%02x\n",
pdu_name,
port_no,
eth->h_source[0],
@@ -1295,6 +1350,9 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
if (dev->flags & IFF_LOOPBACK)
return(NOTIFY_DONE);
+ if (dev == &brg_if.dev)
+ return(NOTIFY_DONE); /* Don't attach the brg device to a port! */
+
switch (event)
{
case NETDEV_DOWN:
@@ -1324,6 +1382,9 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
{
port_info[i].dev = dev;
port_info[i].port_id = i;
+ dev->bridge_port_id = i;
+ if( i > max_port_used )
+ max_port_used = i;
/* set bridge addr from 1st device addr */
if (((htonl(bridge_info.bridge_id.BRIDGE_ID[0])&0xffff) == 0) &&
(bridge_info.bridge_id.BRIDGE_ID[1] == 0))
@@ -1333,7 +1394,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
bridge_info.bridge_id.BRIDGE_PRIORITY = htons(32768);
set_bridge_priority(&bridge_info.bridge_id);
}
+ /* Add local MAC address */
br_add_local_mac(dev->dev_addr);
+ /* Save MAC address for latter change address events */
+ memcpy(port_info[i].ifmac.BRIDGE_ID_ULA, dev->dev_addr, 6);
if((br_stats.flags & BR_UP) &&
(user_port_state[i] != Disabled))
{
@@ -1351,19 +1415,116 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
}
}
break;
+ case NETDEV_REGISTER:
+ if (br_stats.flags & BR_DEBUG)
+ printk(KERN_DEBUG "br_device_event: NETDEV_REGISTER...\n");
+ /* printk(KERN_ERR "br_device_event: NETDEV_REGISTER...\n"); */
+ /* printk(KERN_ERR "br_device_event: dev->type: 0x%X\n", dev->type); */
+ /* Only handle ethernet ports */
+ if(dev->type!=ARPHRD_ETHER && dev->type!=ARPHRD_LOOPBACK)
+ return NOTIFY_DONE;
+ /* printk(KERN_ERR "br_device_event: Looking for port...\n"); */
+ for (i = One; i <= No_of_ports; i++)
+ {
+ if (port_info[i].dev == NULL || port_info[i].dev == dev)
+ {
+ /* printk(KERN_ERR "br_device_event: Found port %d\n", i); */
+ port_info[i].dev = dev;
+ port_info[i].port_id = i;
+ dev->bridge_port_id = i;
+ if( i > max_port_used )
+ max_port_used = i;
+ /* handle local MAC address minuplations */
+ br_add_local_mac(dev->dev_addr);
+ memcpy(port_info[i].ifmac.BRIDGE_ID_ULA, dev->dev_addr, 6);
+ return NOTIFY_DONE;
+ break;
+ }
+ }
+ break;
case NETDEV_UNREGISTER:
if (br_stats.flags & BR_DEBUG)
printk(KERN_DEBUG "br_device_event: NETDEV_UNREGISTER...\n");
i = find_port(dev);
if (i > 0) {
br_avl_delete_by_port(i);
+ memset(port_info[i].ifmac.BRIDGE_ID_ULA, 0, 6);
port_info[i].dev = NULL;
}
break;
+ case NETDEV_CHANGEADDR:
+ if (br_stats.flags & BR_DEBUG)
+ printk(KERN_DEBUG "br_device_event: NETDEV_CHANGEADDR...\n");
+ i = find_port(dev);
+ if (i <= 0)
+ break;
+ if (memcmp(port_info[i].ifmac.BRIDGE_ID_ULA, dev->dev_addr, 6) != 0)
+ break; /* Don't worry about a change of hardware broadcast address! */
+ if (dev->start) {
+ printk(KERN_CRIT "br_device_event: NETDEV_CHANGEADDR on busy device %s - FIX DRIVER!\n",
+ dev->name);
+ /* return NOTIFY_BAD; It SHOULD be this, but I want to be friendly... */
+ return NOTIFY_DONE;
+ }
+ br_avl_delete_by_port(i);
+ memset(port_info[i].ifmac.BRIDGE_ID_ULA, 0, 6);
+ break;
}
return NOTIFY_DONE;
}
+/* Routine to loop over device list and register
+ * interfaces to bridge. Called from last part of net_dev_init just before
+ * bootp/rarp interface setup
+ */
+void br_spacedevice_register(void)
+{
+ struct net_device *dev;
+ for( dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ br_device_event(NULL, NETDEV_REGISTER, dev);
+ }
+}
+
+
+/* This is for SPEED in the kernel in net_bh.c */
+
+int br_call_bridge(struct sk_buff *skb, unsigned short type)
+{
+ int port;
+ struct net_device *dev;
+
+#if 0 /* Checked first in handle_bridge to save expense of function call */
+ if(!(br_stats.flags & BR_UP))
+ return 0;
+#endif
+
+ dev = skb->dev;
+ port = dev->bridge_port_id;
+
+ if(!port)
+ return 0;
+
+ /* Sanity - make sure we are not leaping off into fairy space! */
+ if ( port < 0 || port > max_port_used || port_info[port].dev != dev) {
+ if (net_ratelimit())
+ printk(KERN_CRIT "br_call_bridge: device %s has invalid port ID %d!\n",
+ dev->name,
+ dev->bridge_port_id);
+ return 0;
+ }
+
+ if(user_port_state[port] == Disabled)
+ return 0;
+
+ if (!br_protocol_ok(ntohs(type)))
+ return 0;
+
+ return 1;
+
+}
+
+
/*
* following routine is called when a frame is received
* from an interface, it returns 1 when it consumes the
@@ -1375,6 +1536,7 @@ int br_receive_frame(struct sk_buff *skb) /* 3.5 */
int port;
Port_data *p;
struct ethhdr *eth;
+ struct net_device *dev;
/* sanity */
if (!skb) {
@@ -1382,17 +1544,27 @@ int br_receive_frame(struct sk_buff *skb) /* 3.5 */
return(1);
}
+ dev = skb->dev;
+
skb->pkt_bridged = IS_BRIDGED;
/* check for loopback */
- if (skb->dev->flags & IFF_LOOPBACK)
+ if (dev->flags & IFF_LOOPBACK)
return 0 ;
- port = find_port(skb->dev);
+#if 0
+ port = find_port(dev);
+#else
+ port = dev->bridge_port_id;
+#endif
if(!port)
return 0;
+ /* Hand off to brg_rx BEFORE we screw up the skb */
+ if(brg_rx(skb, port))
+ return(1);
+
skb->nh.raw = skb->mac.raw;
eth = skb->mac.ethernet;
p = &port_info[port];
@@ -1501,7 +1673,7 @@ int br_tx_frame(struct sk_buff *skb) /* 3.5 */
eth = skb->mac.ethernet;
port = 0; /* an impossible port (locally generated) */
if (br_stats.flags & BR_DEBUG)
- printk("br_tx_fr : port %i src %02x:%02x:%02x:%02x:%02x:%02x"
+ printk(KERN_DEBUG "br_tx_fr : port %i src %02x:%02x:%02x:%02x:%02x:%02x"
" dest %02x:%02x:%02x:%02x:%02x:%02x\n",
port,
eth->h_source[0],
@@ -1739,7 +1911,7 @@ static int br_forward(struct sk_buff *skb, int port) /* 3.7 */
/* timer expired, invalidate entry */
f->flags &= ~FDB_ENT_VALID;
if (br_stats.flags & BR_DEBUG)
- printk("fdb entry expired...\n");
+ printk(KERN_DEBUG "fdb entry expired...\n");
/*
* Send flood and drop original
*/
@@ -1781,7 +1953,7 @@ static int br_forward(struct sk_buff *skb, int port) /* 3.7 */
/* timer expired, invalidate entry */
f->flags &= ~FDB_ENT_VALID;
if (br_stats.flags & BR_DEBUG)
- printk("fdb entry expired...\n");
+ printk(KERN_DEBUG "fdb entry expired...\n");
++br_stats_cnt.drop_same_port_aged;
}
else ++br_stats_cnt.drop_same_port;
@@ -1808,6 +1980,9 @@ static int br_flood(struct sk_buff *skb, int port)
{
if (i == port) /* don't send back where we got it */
continue;
+ if (i > max_port_used)
+ /* Don't go scanning empty port entries */
+ break;
if (port_info[i].state == Forwarding)
{
nskb = skb_clone(skb, GFP_ATOMIC);
@@ -1820,7 +1995,7 @@ static int br_flood(struct sk_buff *skb, int port)
/* To get here we must have done ARP already,
or have a received valid MAC header */
-/* printk("Flood to port %d\n",i);*/
+/* printk(KERN_DEBUG "Flood to port %d\n",i);*/
nskb->nh.raw = nskb->data + ETH_HLEN;
nskb->priority = 1;
dev_queue_xmit(nskb);
@@ -1829,16 +2004,6 @@ static int br_flood(struct sk_buff *skb, int port)
return(0);
}
-static int find_port(struct net_device *dev)
-{
- int i;
-
- for (i = One; i <= No_of_ports; i++)
- if (port_info[i].dev == dev)
- return(i);
- return(0);
-}
-
/*
* FIXME: This needs to come from the device structs, eg for
* 10,100,1Gbit ethernet.
@@ -1945,17 +2110,58 @@ struct fdb_info *get_fdb_info(int user_buf_size, int *copied,int *notcopied)
return fdbis;
}
+
+/* Fill in interface names in port_info structure
+ */
+static void br_get_ifnames(void) {
+ int i;
+
+ for(i=One;i<=No_of_ports; i++) {
+ /* memset IS needed. Kernel strncpy does NOT NULL terminate strings when limit
+ reached */
+ memset(port_info[i].ifname, 0, IFNAMSIZ);
+ if( port_info[i].dev == 0 )
+ continue;
+ strncpy(port_info[i].ifname, port_info[i].dev->name, IFNAMSIZ-1);
+ /* Being paranoid */
+ port_info[i].ifname[IFNAMSIZ-1] = '\0';
+ }
+}
+
+/* Given an interface index, loop over port array to see if configured. If
+ so, return port number, otherwise error */
+static int br_find_port(int ifindex)
+{
+ int i;
+
+ for(i=1; i <= No_of_ports; i++) {
+ if (port_info[i].dev == 0)
+ continue;
+ if (port_info[i].dev->ifindex == ifindex)
+ return(i);
+ }
+
+ return -EUNATCH; /* Tell me if this is incorrect error code for this case */
+}
+
+
int br_ioctl(unsigned int cmd, void *arg)
{
- int err, i;
+ int err, i, ifflags;
struct br_cf bcf;
bridge_id_t new_id;
-
+ struct net_device *dev;
+
switch(cmd)
{
case SIOCGIFBR: /* get bridging control blocks */
memcpy(&br_stats.bridge_data, &bridge_info, sizeof(Bridge_data));
- memcpy(&br_stats.port_data, &port_info, sizeof(Port_data)*No_of_ports);
+
+ /* Fill in interface names in port_info*/
+ br_get_ifnames();
+
+ br_stats.num_ports = No_of_ports;
+ memcpy(&br_stats.port_data, &port_info, sizeof(Port_data)*All_ports);
err = copy_to_user(arg, &br_stats, sizeof(struct br_stat));
if (err)
@@ -2021,16 +2227,28 @@ int br_ioctl(unsigned int cmd, void *arg)
}
br_stats.flags ^= BR_STP_DISABLED;
break;
+ case BRCMD_IF_ENABLE:
+ bcf.arg1 = br_find_port(bcf.arg1);
+ if (bcf.arg1 < 0)
+ return(bcf.arg1);
case BRCMD_PORT_ENABLE:
if (port_info[bcf.arg1].dev == 0)
return(-EINVAL);
if (user_port_state[bcf.arg1] != Disabled)
return(-EALREADY);
printk(KERN_DEBUG "br: enabling port %i\n",bcf.arg1);
+ dev = port_info[bcf.arg1].dev;
+ ifflags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI))
+ |(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI));
+ dev_change_flags(dev, ifflags|IFF_PROMISC);
user_port_state[bcf.arg1] = ~Disabled;
if(br_stats.flags & BR_UP)
enable_port(bcf.arg1);
break;
+ case BRCMD_IF_DISABLE:
+ bcf.arg1 = br_find_port(bcf.arg1);
+ if (bcf.arg1 < 0)
+ return(bcf.arg1);
case BRCMD_PORT_DISABLE:
if (port_info[bcf.arg1].dev == 0)
return(-EINVAL);
@@ -2040,12 +2258,20 @@ int br_ioctl(unsigned int cmd, void *arg)
user_port_state[bcf.arg1] = Disabled;
if(br_stats.flags & BR_UP)
disable_port(bcf.arg1);
+ dev = port_info[bcf.arg1].dev;
+ ifflags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI))
+ |(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI));
+ dev_change_flags(port_info[bcf.arg1].dev, ifflags & ~IFF_PROMISC);
break;
case BRCMD_SET_BRIDGE_PRIORITY:
new_id = bridge_info.bridge_id;
new_id.BRIDGE_PRIORITY = htons(bcf.arg1);
set_bridge_priority(&new_id);
break;
+ case BRCMD_SET_IF_PRIORITY:
+ bcf.arg1 = br_find_port(bcf.arg1);
+ if (bcf.arg1 < 0)
+ return(bcf.arg1);
case BRCMD_SET_PORT_PRIORITY:
if((port_info[bcf.arg1].dev == 0)
|| (bcf.arg2 & ~0xff))
@@ -2053,6 +2279,10 @@ int br_ioctl(unsigned int cmd, void *arg)
port_priority[bcf.arg1] = bcf.arg2;
set_port_priority(bcf.arg1);
break;
+ case BRCMD_SET_IF_PATH_COST:
+ bcf.arg1 = br_find_port(bcf.arg1);
+ if (bcf.arg1 < 0)
+ return(bcf.arg1);
case BRCMD_SET_PATH_COST:
if (port_info[bcf.arg1].dev == 0)
return(-EINVAL);
@@ -2134,3 +2364,377 @@ static int br_cmp(unsigned int *a, unsigned int *b)
}
return(0);
}
+
+
+
+
+/* --------------------------------------------------------------------------------
+ *
+ *
+ * Bridge network device here for future modularization - device structures
+ * must be 'static' inside bridge instance
+ * Modelled after sch_teql.c
+ *
+ */
+
+
+
+/*
+ * Index to functions.
+ */
+
+int brg_probe(struct net_device *dev);
+static int brg_open(struct net_device *dev);
+static int brg_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int brg_close(struct net_device *dev);
+static struct net_device_stats *brg_get_stats(struct net_device *dev);
+static void brg_set_multicast_list(struct net_device *dev);
+
+/*
+ * Board-specific info in dev->priv.
+ */
+
+struct net_local
+{
+ __u32 groups;
+ struct net_device_stats stats;
+};
+
+
+
+
+/*
+ * To call this a probe is a bit misleading, however for real
+ * hardware it would have to check what was present.
+ */
+
+int __init brg_probe(struct net_device *dev)
+{
+ unsigned int bogomips;
+ struct timeval utime;
+
+ printk(KERN_INFO "%s: network interface for Ethernet Bridge 006/NET4.0\n", dev->name);
+
+ /*
+ * Initialize the device structure.
+ */
+
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ /* Set up MAC address based on BogoMIPs figure for first CPU and time
+ */
+ bogomips = (loops_per_sec+2500)/500000 ;
+ get_fast_time(&utime);
+
+ /* Ummmm.... YES! */
+ dev->dev_addr[0] = '\xFE';
+ dev->dev_addr[1] = '\xFD';
+ dev->dev_addr[2] = (bridge_info.instance & 0x0F) << 4;
+ dev->dev_addr[2] |= ((utime.tv_sec & 0x000F0000) >> 16);
+ dev->dev_addr[3] = bogomips & 0xFF;
+ dev->dev_addr[4] = (utime.tv_sec & 0x0000FF00) >> 8;
+ dev->dev_addr[5] = (utime.tv_sec & 0x000000FF);
+
+ printk(KERN_INFO "%s: generated MAC address %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
+ dev->name,
+ dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+
+ printk(KERN_INFO "%s: attached to bridge instance %lu\n", dev->name, dev->base_addr);
+
+ /*
+ * The brg specific entries in the device structure.
+ */
+
+ dev->open = brg_open;
+ dev->hard_start_xmit = brg_start_xmit;
+ dev->stop = brg_close;
+ dev->get_stats = brg_get_stats;
+ dev->set_multicast_list = brg_set_multicast_list;
+
+ /*
+ * Setup the generic properties
+ */
+
+ ether_setup(dev);
+
+ dev->tx_queue_len = 0;
+
+ return 0;
+}
+
+/*
+ * Open/initialize the board.
+ */
+
+static int brg_open(struct net_device *dev)
+{
+ if (br_stats.flags & BR_DEBUG)
+ printk(KERN_DEBUG "%s: Doing brg_open()...", dev->name);
+
+ if (memcmp(dev->dev_addr, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) == 0)
+ return -EFAULT;
+
+ dev->start = 1;
+ dev->tbusy = 0;
+ return 0;
+}
+
+static unsigned brg_mc_hash(__u8 *dest)
+{
+ unsigned idx = 0;
+ idx ^= dest[0];
+ idx ^= dest[1];
+ idx ^= dest[2];
+ idx ^= dest[3];
+ idx ^= dest[4];
+ idx ^= dest[5];
+ return 1U << (idx&0x1F);
+}
+
+static void brg_set_multicast_list(struct net_device *dev)
+{
+ unsigned groups = ~0;
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
+ struct dev_mc_list *dmi;
+
+ groups = brg_mc_hash(dev->broadcast);
+
+ for (dmi=dev->mc_list; dmi; dmi=dmi->next) {
+ if (dmi->dmi_addrlen != 6)
+ continue;
+ groups |= brg_mc_hash(dmi->dmi_addr);
+ }
+ }
+ lp->groups = groups;
+}
+
+/*
+ * We transmit by throwing the packet at the bridge.
+ */
+
+static int brg_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ struct ethhdr *eth = (struct ethhdr*)skb->data;
+ int port;
+
+ /* Deal with the bridge being disabled */
+ if(!(br_stats.flags & BR_UP)) {
+ /* Either this */
+ /* lp->stats.tx_errors++; */ /* this condition is NOT an error */
+ /* or this (implied by RFC 2233) */
+ lp->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ lp->stats.tx_bytes+=skb->len;
+ lp->stats.tx_packets++;
+
+#if 0
+ ++br_stats_cnt.port_not_disable;
+#endif
+ skb->mac.raw = skb->nh.raw = skb->data;
+ eth = skb->mac.ethernet;
+ port = 0; /* an impossible port (locally generated) */
+
+ if (br_stats.flags & BR_DEBUG)
+ printk(KERN_DEBUG "%s: brg_start_xmit - src %02x:%02x:%02x:%02x:%02x:%02x"
+ " dest %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name,
+ eth->h_source[0],
+ eth->h_source[1],
+ eth->h_source[2],
+ eth->h_source[3],
+ eth->h_source[4],
+ eth->h_source[5],
+ eth->h_dest[0],
+ eth->h_dest[1],
+ eth->h_dest[2],
+ eth->h_dest[3],
+ eth->h_dest[4],
+ eth->h_dest[5]);
+
+ /* Forward the packet ! */
+ if(br_forward(skb, port))
+ return(0);
+
+ /* Throw packet initially */
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+/*
+ * The typical workload of the driver:
+ * Handle the ether interface interrupts.
+ *
+ * (In this case handle the packets posted from the bridge)
+ */
+
+static int brg_rx(struct sk_buff *skb, int port)
+{
+ struct net_device *dev = &brg_if.dev;
+ struct net_local *lp = (struct net_local *)dev->priv;
+ struct ethhdr *eth = (struct ethhdr*)(skb->data);
+ int len = skb->len;
+ int clone = 0;
+
+ if (br_stats.flags & BR_DEBUG)
+ printk(KERN_DEBUG "%s: brg_rx()\n", dev->name);
+
+ /* Get out of here if the bridge interface is not up
+ */
+ if(!(dev->flags & IFF_UP))
+ return(0);
+
+ /* Check that the port that this thing came off is in the forwarding state
+ * We sould only listen to the same address scope we will transmit to.
+ */
+ if(port_info[port].state != Forwarding)
+ return(0);
+
+ /* Is this for us? - broadcast/mulitcast/promiscuous packets need cloning,
+ * with uni-cast we eat the packet
+ */
+ clone = 0;
+ if (dev->flags & IFF_PROMISC) {
+ clone = 1;
+ }
+ else if (eth->h_dest[0]&1) {
+ if (!(dev->flags&(IFF_ALLMULTI))
+ && !(brg_mc_hash(eth->h_dest)&lp->groups))
+ return(0);
+ clone = 1;
+ }
+ else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN) != 0) {
+ return(0);
+ }
+
+ /* Clone things here - we want to be transparent before we check packet data
+ * integrity
+ */
+ if(clone) {
+ struct sk_buff *skb2 = skb;
+ skb = skb_clone(skb2, GFP_ATOMIC);
+ if (skb == NULL) {
+ return(0);
+ }
+
+ }
+
+ /* Check packet length
+ */
+ if (len < 16) {
+ printk(KERN_DEBUG "%s : rx len = %d\n", dev->name, len);
+ kfree_skb(skb);
+ return(!clone);
+ }
+
+ if (br_stats.flags & BR_DEBUG)
+ printk(KERN_DEBUG "%s: brg_rx - src %02x:%02x:%02x:%02x:%02x:%02x"
+ " dest %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name,
+ eth->h_source[0],
+ eth->h_source[1],
+ eth->h_source[2],
+ eth->h_source[3],
+ eth->h_source[4],
+ eth->h_source[5],
+ eth->h_dest[0],
+ eth->h_dest[1],
+ eth->h_dest[2],
+ eth->h_dest[3],
+ eth->h_dest[4],
+ eth->h_dest[5]);
+
+ /* Do it */
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = dev;
+ skb->protocol=eth_type_trans(skb,dev);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes+=len;
+ netif_rx(skb);
+ return(!clone);
+}
+
+static int brg_close(struct net_device *dev)
+{
+ if (br_stats.flags & BR_DEBUG)
+ printk(KERN_DEBUG "%s: Shutting down.\n", dev->name);
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ return 0;
+}
+
+static struct net_device_stats *brg_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ return &lp->stats;
+}
+
+/*
+ *
+ */
+
+int __init brg_init(void)
+{
+ int err;
+
+ memset(&brg_if, 0, sizeof(brg_if));
+
+ rtnl_lock();
+
+ brg_if.dev.base_addr = bridge_info.instance;
+ sprintf (brg_if.name, "brg%d", bridge_info.instance);
+ brg_if.dev.name = (void*)&brg_if.name;
+ if(dev_get(brg_if.name)) {
+ printk(KERN_INFO "%s already loaded.\n", brg_if.name);
+ return -EBUSY;
+ }
+ brg_if.dev.init = brg_probe;
+
+ err = register_netdevice(&brg_if.dev);
+ rtnl_unlock();
+ return err;
+}
+
+
+#if 0 /* Its here if we ever need it... */
+#ifdef MODULE
+
+void cleanup_module(void)
+{
+
+ /*
+ * Unregister the device
+ */
+ rtnl_lock();
+ unregister_netdevice(&the_master.dev);
+ rtnl_unlock();
+
+ /*
+ * Free up the private structure.
+ */
+
+ kfree(brg_if.dev.priv);
+ brg_if.dev.priv = NULL; /* gets re-allocated by brg_probe */
+}
+
+#endif /* MODULE */
+
+#endif
diff --git a/net/bridge/br_tree.c b/net/bridge/br_tree.c
index c1ed82f10..67efa8f1b 100644
--- a/net/bridge/br_tree.c
+++ b/net/bridge/br_tree.c
@@ -492,7 +492,8 @@ void br_avl_delete_by_port(int port)
port_info[port].fdb = NULL;
/* remove the local mac too */
- next = br_avl_find_addr(port_info[port].dev->dev_addr);
+/* next = br_avl_find_addr(port_info[port].dev->dev_addr); */
+ next = br_avl_find_addr(port_info[port].ifmac.BRIDGE_ID_ULA);
if (next != NULL)
br_avl_remove(next);
diff --git a/net/core/dev.c b/net/core/dev.c
index 64c13ed05..698a59cfc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -55,7 +55,7 @@
* Adam Sulmicki : Bug Fix : Network Device Unload
* A network device unload needs to purge
* the backlog queue.
- * Paul Rusty Russel : SIOCSIFNAME
+ * Paul Rusty Russell : SIOCSIFNAME
*/
#include <asm/uaccess.h>
@@ -87,9 +87,9 @@
#include <net/profile.h>
#include <linux/init.h>
#include <linux/kmod.h>
-#ifdef CONFIG_NET_RADIO
-#include <linux/wireless.h>
-#endif /* CONFIG_NET_RADIO */
+#if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO)
+#include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
+#endif /* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
#ifdef CONFIG_PLIP
extern int plip_init(void);
#endif
@@ -860,7 +860,11 @@ void netif_rx(struct sk_buff *skb)
#ifdef CONFIG_BRIDGE
static inline void handle_bridge(struct sk_buff *skb, unsigned short type)
{
- if (br_stats.flags & BR_UP && br_protocol_ok(ntohs(type)))
+ /*
+ * The br_stats.flags is checked here to save the expense of a
+ * function call.
+ */
+ if ((br_stats.flags & BR_UP) && br_call_bridge(skb, type))
{
/*
* We pass the bridge a complete frame. This means
@@ -884,7 +888,6 @@ static inline void handle_bridge(struct sk_buff *skb, unsigned short type)
}
#endif
-
/*
* When we are called the queue is ready to grab, the interrupts are
* on and hardware can interrupt and queue to the receive queue as we
@@ -1307,7 +1310,7 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset,
#endif /* CONFIG_PROC_FS */
-#ifdef CONFIG_NET_RADIO
+#ifdef WIRELESS_EXT
#ifdef CONFIG_PROC_FS
/*
@@ -1392,7 +1395,7 @@ static int dev_get_wireless_info(char * buffer, char **start, off_t offset,
return len;
}
#endif /* CONFIG_PROC_FS */
-#endif /* CONFIG_NET_RADIO */
+#endif /* WIRELESS_EXT */
void dev_set_promiscuity(struct net_device *dev, int inc)
{
@@ -1624,13 +1627,13 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
return -EOPNOTSUPP;
}
-#ifdef CONFIG_NET_RADIO
+#ifdef WIRELESS_EXT
if(cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
if (dev->do_ioctl)
return dev->do_ioctl(dev, ifr, cmd);
return -EOPNOTSUPP;
}
-#endif /* CONFIG_NET_RADIO */
+#endif /* WIRELESS_EXT */
}
return -EINVAL;
@@ -1754,7 +1757,7 @@ int dev_ioctl(unsigned int cmd, void *arg)
return -EFAULT;
return ret;
}
-#ifdef CONFIG_NET_RADIO
+#ifdef WIRELESS_EXT
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
dev_load(ifr.ifr_name);
if (IW_IS_SET(cmd)) {
@@ -1769,7 +1772,7 @@ int dev_ioctl(unsigned int cmd, void *arg)
return -EFAULT;
return ret;
}
-#endif /* CONFIG_NET_RADIO */
+#endif /* WIRELESS_EXT */
return -EINVAL;
}
}
@@ -2083,9 +2086,9 @@ int __init net_dev_init(void)
#ifdef CONFIG_PROC_FS
proc_net_create("dev", 0, dev_get_info);
create_proc_read_entry("net/dev_stat", 0, 0, dev_proc_stats, NULL);
-#ifdef CONFIG_NET_RADIO
+#ifdef WIRELESS_EXT
proc_net_create("wireless", 0, dev_get_wireless_info);
-#endif /* CONFIG_NET_RADIO */
+#endif /* WIRELESS_EXT */
#endif /* CONFIG_PROC_FS */
init_bh(NET_BH, net_bh);
@@ -2095,6 +2098,13 @@ int __init net_dev_init(void)
dst_init();
dev_mcast_init();
+#ifdef CONFIG_BRIDGE
+ /*
+ * Register any statically linked ethernet devices with the bridge
+ */
+ br_spacedevice_register();
+#endif
+
/*
* Initialise network devices
*/
diff --git a/net/core/filter.c b/net/core/filter.c
index d9939e3a4..8749e8c7b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2,7 +2,7 @@
* Linux Socket Filter - Kernel level socket filtering
*
* Author:
- * Jay Schulist <Jay.Schulist@spacs.k12.wi.us>
+ * Jay Schulist <jschlst@turbolinux.com>
*
* Based on the design of:
* - The Berkeley Packet Filter
diff --git a/net/core/netfilter.c b/net/core/netfilter.c
index 91c46a9ef..bf734a60e 100644
--- a/net/core/netfilter.c
+++ b/net/core/netfilter.c
@@ -353,11 +353,12 @@ static unsigned int nf_iterate(struct list_head *head,
int hook,
const struct net_device *indev,
const struct net_device *outdev,
- struct list_head **i)
+ struct list_head **i,
+ int (*okfn)(struct sk_buff *))
{
for (*i = (*i)->next; *i != head; *i = (*i)->next) {
struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
- switch (elem->hook(hook, skb, indev, outdev)) {
+ switch (elem->hook(hook, skb, indev, outdev, okfn)) {
case NF_QUEUE:
NFDEBUG("nf_iterate: NF_QUEUE for %p.\n", *skb);
return NF_QUEUE;
@@ -471,7 +472,7 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
read_lock_bh(&nf_lock);
elem = &nf_hooks[pf][hook];
verdict = nf_iterate(&nf_hooks[pf][hook], &skb, hook, indev,
- outdev, &elem);
+ outdev, &elem, okfn);
if (verdict == NF_QUEUE) {
NFDEBUG("nf_hook: Verdict = QUEUE.\n");
nf_queue(skb, elem, pf, hook, indev, outdev, okfn);
@@ -553,7 +554,8 @@ void nf_reinject(struct sk_buff *skb, unsigned long mark, unsigned int verdict)
skb->nfmark = mark;
verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
&skb, info->hook,
- info->indev, info->outdev, &elem);
+ info->indev, info->outdev, &elem,
+ info->okfn);
}
if (verdict == NF_QUEUE) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7c8a332e2..12a8f8d72 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4,7 +4,7 @@
* Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
* Florian La Roche <rzsfl@rz.uni-sb.de>
*
- * Version: $Id: skbuff.c,v 1.60 1999/08/23 07:02:01 davem Exp $
+ * Version: $Id: skbuff.c,v 1.63 2000/01/02 09:15:17 davem Exp $
*
* Fixes:
* Alan Cox : Fixed the worst of the load balancer bugs.
@@ -275,14 +275,48 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
return n;
}
+static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+{
+ /*
+ * Shift between the two data areas in bytes
+ */
+ unsigned long offset = new->data - old->data;
+
+ new->list=NULL;
+ new->sk=NULL;
+ new->dev=old->dev;
+ new->rx_dev=NULL;
+ new->priority=old->priority;
+ new->protocol=old->protocol;
+ new->dst=dst_clone(old->dst);
+ new->h.raw=old->h.raw+offset;
+ new->nh.raw=old->nh.raw+offset;
+ new->mac.raw=old->mac.raw+offset;
+ memcpy(new->cb, old->cb, sizeof(old->cb));
+ new->used=old->used;
+ new->is_clone=0;
+ atomic_set(&new->users, 1);
+ new->pkt_type=old->pkt_type;
+ new->stamp=old->stamp;
+ new->destructor = NULL;
+ new->security=old->security;
+#ifdef CONFIG_NETFILTER
+ new->nfmark=old->nfmark;
+ new->nfreason=old->nfreason;
+ new->nfcache=old->nfcache;
+#ifdef CONFIG_NETFILTER_DEBUG
+ new->nf_debug=old->nf_debug;
+#endif
+#endif
+}
+
/*
* This is slower, and copies the whole data area
*/
-struct sk_buff *skb_copy(struct sk_buff *skb, int gfp_mask)
+struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
{
struct sk_buff *n;
- unsigned long offset;
/*
* Allocate the copy buffer
@@ -292,12 +326,6 @@ struct sk_buff *skb_copy(struct sk_buff *skb, int gfp_mask)
if(n==NULL)
return NULL;
- /*
- * Shift between the two data areas in bytes
- */
-
- offset=n->head-skb->head;
-
/* Set the data pointer */
skb_reserve(n,skb->data-skb->head);
/* Set the tail pointer and length */
@@ -305,86 +333,36 @@ struct sk_buff *skb_copy(struct sk_buff *skb, int gfp_mask)
/* Copy the bytes */
memcpy(n->head,skb->head,skb->end-skb->head);
n->csum = skb->csum;
- n->list=NULL;
- n->sk=NULL;
- n->dev=skb->dev;
- n->rx_dev=NULL;
- n->priority=skb->priority;
- n->protocol=skb->protocol;
- n->dst=dst_clone(skb->dst);
- n->h.raw=skb->h.raw+offset;
- n->nh.raw=skb->nh.raw+offset;
- n->mac.raw=skb->mac.raw+offset;
- memcpy(n->cb, skb->cb, sizeof(skb->cb));
- n->used=skb->used;
- n->is_clone=0;
- atomic_set(&n->users, 1);
- n->pkt_type=skb->pkt_type;
- n->stamp=skb->stamp;
- n->destructor = NULL;
- n->security=skb->security;
-#ifdef CONFIG_NETFILTER
- n->nfmark=skb->nfmark;
- n->nfreason=skb->nfreason;
- n->nfcache=skb->nfcache;
-#ifdef CONFIG_NETFILTER_DEBUG
- n->nf_debug=skb->nf_debug;
-#endif
-#endif
+ copy_skb_header(n, skb);
+
return n;
}
-struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, int newheadroom)
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+ int newheadroom,
+ int newtailroom,
+ int gfp_mask)
{
struct sk_buff *n;
- unsigned long offset;
/*
* Allocate the copy buffer
*/
- n=alloc_skb((skb->end-skb->data)+newheadroom, GFP_ATOMIC);
+ n=alloc_skb(newheadroom + (skb->tail - skb->data) + newtailroom,
+ gfp_mask);
if(n==NULL)
return NULL;
skb_reserve(n,newheadroom);
- /*
- * Shift between the two data areas in bytes
- */
-
- offset=n->data-skb->data;
-
/* Set the tail pointer and length */
skb_put(n,skb->len);
- /* Copy the bytes */
- memcpy(n->data,skb->data,skb->len);
- n->list=NULL;
- n->sk=NULL;
- n->priority=skb->priority;
- n->protocol=skb->protocol;
- n->dev=skb->dev;
- n->rx_dev=NULL;
- n->dst=dst_clone(skb->dst);
- n->h.raw=skb->h.raw+offset;
- n->nh.raw=skb->nh.raw+offset;
- n->mac.raw=skb->mac.raw+offset;
- memcpy(n->cb, skb->cb, sizeof(skb->cb));
- n->used=skb->used;
- n->is_clone=0;
- atomic_set(&n->users, 1);
- n->pkt_type=skb->pkt_type;
- n->stamp=skb->stamp;
- n->destructor = NULL;
- n->security=skb->security;
-#ifdef CONFIG_NETFILTER
- n->nfmark=skb->nfmark;
- n->nfreason=skb->nfreason;
- n->nfcache=skb->nfcache;
-#ifdef CONFIG_NETFILTER_DEBUG
- n->nf_debug=skb->nf_debug;
-#endif
-#endif
+
+ /* Copy the data only. */
+ memcpy(n->data, skb->data, skb->len);
+
+ copy_skb_header(n, skb);
return n;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 15b0ec64f..e069ca898 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -7,7 +7,7 @@
* handler for protocols to use and generic option handler.
*
*
- * Version: $Id: sock.c,v 1.86 1999/09/01 08:11:49 davem Exp $
+ * Version: $Id: sock.c,v 1.87 1999/11/23 08:56:59 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 679c3dfb0..c4f834157 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -22,6 +22,9 @@
* Steve Whitehouse : More SMP locking changes & dn_cache_dump()
* Steve Whitehouse : Prerouting NF hook, now really is prerouting.
* Fixed possible skb leak in rtnetlink funcs.
+ * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
+ * Alexey Kuznetsov's finer grained locking
+ * from ipv4/route.c.
*/
/******************************************************************************
@@ -44,7 +47,6 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
-#include <linux/timer.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/netdevice.h>
@@ -69,9 +71,14 @@
#include <net/dn_fib.h>
#include <net/dn_raw.h>
+struct dn_rt_hash_bucket
+{
+ struct dn_route *chain;
+ rwlock_t lock;
+} __attribute__((__aligned__(8)));
+
extern struct neigh_table dn_neigh_table;
-#define DN_HASHBUCKETS 16
static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
@@ -82,8 +89,8 @@ static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static int dn_route_input(struct sk_buff *);
-static struct dn_route *dn_route_cache[DN_HASHBUCKETS];
-static rwlock_t dn_hash_lock = RW_LOCK_UNLOCKED;
+static struct dn_rt_hash_bucket *dn_rt_hash_table;
+static unsigned dn_rt_hash_mask;
static struct timer_list dn_route_timer = { NULL, NULL, 0, 0L, NULL };
int decnet_dst_gc_interval = 2;
@@ -104,8 +111,11 @@ static struct dst_ops dn_dst_ops = {
static __inline__ unsigned dn_hash(unsigned short dest)
{
- unsigned short tmp = (dest&0xff) ^ (dest>>8);
- return (tmp&0x0f) ^ (tmp>>4);
+ unsigned short tmp = dest;
+ tmp ^= (dest >> 3);
+ tmp ^= (dest >> 5);
+ tmp ^= (dest >> 10);
+ return dn_rt_hash_mask & (unsigned)tmp;
}
static void dn_dst_check_expire(unsigned long dummy)
@@ -115,10 +125,10 @@ static void dn_dst_check_expire(unsigned long dummy)
unsigned long now = jiffies;
unsigned long expire = 120 * HZ;
- for(i = 0; i < DN_HASHBUCKETS; i++) {
- rtp = &dn_route_cache[i];
+ for(i = 0; i <= dn_rt_hash_mask; i++) {
+ rtp = &dn_rt_hash_table[i].chain;
- write_lock(&dn_hash_lock);
+ write_lock(&dn_rt_hash_table[i].lock);
for(;(rt=*rtp); rtp = &rt->u.rt_next) {
if (atomic_read(&rt->u.dst.__refcnt) ||
(now - rt->u.dst.lastuse) < expire)
@@ -127,7 +137,7 @@ static void dn_dst_check_expire(unsigned long dummy)
rt->u.rt_next = NULL;
dst_free(&rt->u.dst);
}
- write_unlock(&dn_hash_lock);
+ write_unlock(&dn_rt_hash_table[i].lock);
if ((jiffies - now) > 0)
break;
@@ -144,9 +154,9 @@ static int dn_dst_gc(void)
unsigned long now = jiffies;
unsigned long expire = 10 * HZ;
- write_lock_bh(&dn_hash_lock);
- for(i = 0; i < DN_HASHBUCKETS; i++) {
- rtp = &dn_route_cache[i];
+ for(i = 0; i <= dn_rt_hash_mask; i++) {
+ write_lock_bh(&dn_rt_hash_table[i].lock);
+ rtp = &dn_rt_hash_table[i].chain;
for(; (rt=*rtp); rtp = &rt->u.rt_next) {
if (atomic_read(&rt->u.dst.__refcnt) ||
(now - rt->u.dst.lastuse) < expire)
@@ -156,8 +166,8 @@ static int dn_dst_gc(void)
dst_free(&rt->u.dst);
break;
}
+ write_unlock_bh(&dn_rt_hash_table[i].lock);
}
- write_unlock_bh(&dn_hash_lock);
return 0;
}
@@ -194,15 +204,15 @@ static void dn_insert_route(struct dn_route *rt)
unsigned hash = dn_hash(rt->rt_daddr);
unsigned long now = jiffies;
- write_lock_bh(&dn_hash_lock);
- rt->u.rt_next = dn_route_cache[hash];
- dn_route_cache[hash] = rt;
+ write_lock_bh(&dn_rt_hash_table[hash].lock);
+ rt->u.rt_next = dn_rt_hash_table[hash].chain;
+ dn_rt_hash_table[hash].chain = rt;
dst_hold(&rt->u.dst);
rt->u.dst.__use++;
rt->u.dst.lastuse = now;
- write_unlock_bh(&dn_hash_lock);
+ write_unlock_bh(&dn_rt_hash_table[hash].lock);
}
void dn_run_flush(unsigned long dummy)
@@ -210,18 +220,21 @@ void dn_run_flush(unsigned long dummy)
int i;
struct dn_route *rt, *next;
- write_lock_bh(&dn_hash_lock);
- for(i = 0; i < DN_HASHBUCKETS; i++) {
- if ((rt = xchg(&dn_route_cache[i], NULL)) == NULL)
- continue;
+ for(i = 0; i < dn_rt_hash_mask; i++) {
+ write_lock_bh(&dn_rt_hash_table[i].lock);
+
+ if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL)
+ goto nothing_to_declare;
for(; rt; rt=next) {
next = rt->u.rt_next;
rt->u.rt_next = NULL;
dst_free((struct dst_entry *)rt);
}
+
+nothing_to_declare:
+ write_unlock_bh(&dn_rt_hash_table[i].lock);
}
- write_unlock_bh(&dn_hash_lock);
}
static int dn_route_rx_packet(struct sk_buff *skb)
@@ -607,8 +620,8 @@ int dn_route_output(struct dst_entry **pprt, dn_address dst, dn_address src, int
struct dn_route *rt = NULL;
if (!(flags & MSG_TRYHARD)) {
- read_lock_bh(&dn_hash_lock);
- for(rt = dn_route_cache[hash]; rt; rt = rt->u.rt_next) {
+ read_lock_bh(&dn_rt_hash_table[hash].lock);
+ for(rt = dn_rt_hash_table[hash].chain; rt; rt = rt->u.rt_next) {
if ((dst == rt->rt_daddr) &&
(src == rt->rt_saddr) &&
(rt->rt_iif == 0) &&
@@ -616,12 +629,12 @@ int dn_route_output(struct dst_entry **pprt, dn_address dst, dn_address src, int
rt->u.dst.lastuse = jiffies;
dst_hold(&rt->u.dst);
rt->u.dst.__use++;
- read_unlock_bh(&dn_hash_lock);
+ read_unlock_bh(&dn_rt_hash_table[hash].lock);
*pprt = &rt->u.dst;
return 0;
}
}
- read_unlock_bh(&dn_hash_lock);
+ read_unlock_bh(&dn_rt_hash_table[hash].lock);
}
return dn_route_output_slow(pprt, dst, src, flags);
@@ -731,8 +744,8 @@ int dn_route_input(struct sk_buff *skb)
if (skb->dst)
return 0;
- read_lock_bh(&dn_hash_lock);
- for(rt = dn_route_cache[hash]; rt != NULL; rt = rt->u.rt_next) {
+ read_lock(&dn_rt_hash_table[hash].lock);
+ for(rt = dn_rt_hash_table[hash].chain; rt != NULL; rt = rt->u.rt_next) {
if ((rt->rt_saddr == cb->dst) &&
(rt->rt_daddr == cb->src) &&
(rt->rt_oif == 0) &&
@@ -740,12 +753,12 @@ int dn_route_input(struct sk_buff *skb)
rt->u.dst.lastuse = jiffies;
dst_hold(&rt->u.dst);
rt->u.dst.__use++;
- read_unlock_bh(&dn_hash_lock);
+ read_unlock(&dn_rt_hash_table[hash].lock);
skb->dst = (struct dst_entry *)rt;
return 0;
}
}
- read_unlock_bh(&dn_hash_lock);
+ read_unlock(&dn_rt_hash_table[hash].lock);
return dn_route_input_slow(skb);
}
@@ -831,7 +844,9 @@ int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
skb->rx_dev = dev;
cb->src = src;
cb->dst = dst;
+ local_bh_disable();
err = dn_route_input(skb);
+ local_bh_enable();
memset(cb, 0, sizeof(struct dn_skb_cb));
rt = (struct dn_route *)skb->dst;
} else {
@@ -885,25 +900,25 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
s_h = cb->args[0];
s_idx = idx = cb->args[1];
- for(h = 0; h < DN_HASHBUCKETS; h++) {
+ for(h = 0; h <= dn_rt_hash_mask; h++) {
if (h < s_h)
continue;
if (h > s_h)
s_idx = 0;
- read_lock_bh(&dn_hash_lock);
- for(rt = dn_route_cache[h], idx = 0; rt; rt = rt->u.rt_next, idx++) {
+ read_lock_bh(&dn_rt_hash_table[h].lock);
+ for(rt = dn_rt_hash_table[h].chain, idx = 0; rt; rt = rt->u.rt_next, idx++) {
if (idx < s_idx)
continue;
skb->dst = dst_clone(&rt->u.dst);
if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1) <= 0) {
dst_release(xchg(&skb->dst, NULL));
- read_unlock_bh(&dn_hash_lock);
+ read_unlock_bh(&dn_rt_hash_table[h].lock);
goto done;
}
dst_release(xchg(&skb->dst, NULL));
}
- read_unlock_bh(&dn_hash_lock);
+ read_unlock_bh(&dn_rt_hash_table[h].lock);
}
done:
@@ -924,9 +939,9 @@ static int decnet_cache_get_info(char *buffer, char **start, off_t offset, int l
int i;
char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
- read_lock_bh(&dn_hash_lock);
- for(i = 0; i < DN_HASHBUCKETS; i++) {
- rt = dn_route_cache[i];
+ for(i = 0; i <= dn_rt_hash_mask; i++) {
+ read_lock_bh(&dn_rt_hash_table[i].lock);
+ rt = dn_rt_hash_table[i].chain;
for(; rt != NULL; rt = rt->u.rt_next) {
len += sprintf(buffer + len, "%-8s %-7s %-7s %04d %04d %04d\n",
rt->u.dst.dev ? rt->u.dst.dev->name : "*",
@@ -937,6 +952,7 @@ static int decnet_cache_get_info(char *buffer, char **start, off_t offset, int l
(int)rt->u.dst.rtt
);
+
pos = begin + len;
if (pos < offset) {
@@ -946,10 +962,10 @@ static int decnet_cache_get_info(char *buffer, char **start, off_t offset, int l
if (pos > offset + length)
break;
}
+ read_unlock_bh(&dn_rt_hash_table[i].lock);
if (pos > offset + length)
break;
}
- read_unlock_bh(&dn_hash_lock);
*start = buffer + (offset - begin);
len -= (offset - begin);
@@ -963,17 +979,55 @@ static int decnet_cache_get_info(char *buffer, char **start, off_t offset, int l
void __init dn_route_init(void)
{
- memset(dn_route_cache, 0, sizeof(struct dn_route *) * DN_HASHBUCKETS);
+ int i, goal, order;
dn_dst_ops.kmem_cachep = kmem_cache_create("dn_dst_cache",
sizeof(struct dn_route),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
+ if (!dn_dst_ops.kmem_cachep)
+ panic("DECnet: Failed to allocate dn_dst_cache\n");
+
dn_route_timer.function = dn_dst_check_expire;
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
add_timer(&dn_route_timer);
+ goal = num_physpages >> (26 - PAGE_SHIFT);
+
+ for(order = 0; (1UL << order) < goal; order++)
+ /* NOTHING */;
+
+ /*
+ * Only want 1024 entries max, since the table is very, very unlikely
+ * to be larger than that.
+ */
+ while(order && ((((1UL << order) * PAGE_SIZE) /
+ sizeof(struct dn_rt_hash_bucket)) >= 2048))
+ order--;
+
+ do {
+ dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
+ sizeof(struct dn_rt_hash_bucket);
+ while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
+ dn_rt_hash_mask--;
+ dn_rt_hash_table = (struct dn_rt_hash_bucket *)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while (dn_rt_hash_table == NULL && --order > 0);
+
+ if (!dn_rt_hash_table)
+ panic("Failed to allocate DECnet route cache hash table\n");
+
+ printk(KERN_INFO "DECnet: Routing cache hash table of %u buckets, %dKbytes\n", dn_rt_hash_mask, (dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
+
+ dn_rt_hash_mask--;
+ for(i = 0; i <= dn_rt_hash_mask; i++) {
+ dn_rt_hash_table[i].lock = RW_LOCK_UNLOCKED;
+ dn_rt_hash_table[i].chain = NULL;
+ }
+
+ dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
+
#ifdef CONFIG_PROC_FS
proc_net_create("decnet_cache",0,decnet_cache_get_info);
#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/Config.in b/net/ipv4/Config.in
index 487b7411a..32e2aca16 100644
--- a/net/ipv4/Config.in
+++ b/net/ipv4/Config.in
@@ -8,22 +8,22 @@ if [ "$CONFIG_IP_ADVANCED_ROUTER" = "y" ]; then
define_bool CONFIG_NETLINK y
bool ' IP: policy routing' CONFIG_IP_MULTIPLE_TABLES
if [ "$CONFIG_IP_MULTIPLE_TABLES" = "y" ]; then
- bool ' IP: use FWMARK value as routing key' CONFIG_IP_ROUTE_FWMARK
+ if [ "$CONFIG_NETFILTER" = "y" ]; then
+ bool ' IP: use FWMARK value as routing key' CONFIG_IP_ROUTE_FWMARK
+ fi
+ bool ' IP: fast network address translation' CONFIG_IP_ROUTE_NAT
fi
bool ' IP: equal cost multipath' CONFIG_IP_ROUTE_MULTIPATH
bool ' IP: use TOS value as routing key' CONFIG_IP_ROUTE_TOS
bool ' IP: verbose route monitoring' CONFIG_IP_ROUTE_VERBOSE
bool ' IP: large routing tables' CONFIG_IP_ROUTE_LARGE_TABLES
- if [ "$CONFIG_IP_MULTIPLE_TABLES" = "y" ]; then
- bool ' IP: fast network address translation' CONFIG_IP_ROUTE_NAT
- fi
fi
bool ' IP: kernel level autoconfiguration' CONFIG_IP_PNP
if [ "$CONFIG_IP_PNP" = "y" ]; then
- bool ' BOOTP support' CONFIG_IP_PNP_BOOTP
- bool ' RARP support' CONFIG_IP_PNP_RARP
+ bool ' IP: BOOTP support' CONFIG_IP_PNP_BOOTP
+ bool ' IP: RARP support' CONFIG_IP_PNP_RARP
# not yet ready..
-# bool ' ARP support' CONFIG_IP_PNP_ARP
+# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
fi
bool ' IP: optimize as router not host' CONFIG_IP_ROUTER
tristate ' IP: tunneling' CONFIG_NET_IPIP
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index e2f9a45b7..a0d501ec5 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -8,7 +8,7 @@
# Note 2! The CFLAGS definition is now in the main makefile...
O_TARGET := ipv4.o
-IPV4_OBJS := utils.o route.o proc.o protocol.o \
+IPV4_OBJS := utils.o route.o inetpeer.o proc.o protocol.o \
ip_input.o ip_fragment.o ip_forward.o ip_options.o \
ip_output.o ip_sockglue.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o\
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 5c370cc7c..b0d08ebe7 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -5,7 +5,7 @@
*
* PF_INET protocol family socket handler.
*
- * Version: $Id: af_inet.c,v 1.97 1999/09/08 03:46:46 davem Exp $
+ * Version: $Id: af_inet.c,v 1.100 1999/12/21 04:05:02 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -109,9 +109,9 @@
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
-#ifdef CONFIG_NET_RADIO
-#include <linux/wireless.h>
-#endif /* CONFIG_NET_RADIO */
+#if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO)
+#include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
+#endif /* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
#define min(a,b) ((a)<(b)?(a):(b))
@@ -887,10 +887,10 @@ static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
(cmd <= (SIOCDEVPRIVATE + 15)))
return(dev_ioctl(cmd,(void *) arg));
-#ifdef CONFIG_NET_RADIO
+#ifdef WIRELESS_EXT
if((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST))
return(dev_ioctl(cmd,(void *) arg));
-#endif
+#endif /* WIRELESS_EXT */
if (sk->prot->ioctl==NULL || (err=sk->prot->ioctl(sk, cmd, arg))==-ENOIOCTLCMD)
return(dev_ioctl(cmd,(void *) arg));
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index f4272338b..591f3cceb 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1,6 +1,6 @@
/* linux/net/inet/arp.c
*
- * Version: $Id: arp.c,v 1.81 1999/08/30 10:17:05 davem Exp $
+ * Version: $Id: arp.c,v 1.83 1999/12/15 22:39:03 davem Exp $
*
* Copyright (C) 1994 by Florian La Roche
*
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index d48704cc6..0ae574a64 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -5,7 +5,7 @@
*
* IPv4 Forwarding Information Base: FIB frontend.
*
- * Version: $Id: fib_frontend.c,v 1.19 1999/08/31 07:03:23 davem Exp $
+ * Version: $Id: fib_frontend.c,v 1.21 1999/12/15 22:39:07 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 4d24ed413..24ee69e05 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -3,7 +3,7 @@
*
* Alan Cox, <alan@redhat.com>
*
- * Version: $Id: icmp.c,v 1.61 1999/08/31 07:03:33 davem Exp $
+ * Version: $Id: icmp.c,v 1.62 1999/12/23 01:43:37 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -483,7 +483,7 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned
unsigned long csum;
if (offset) {
- icmp_param->csum=csum_partial_copy(icmp_param->data_ptr+offset-sizeof(struct icmphdr),
+ icmp_param->csum=csum_partial_copy_nocheck(icmp_param->data_ptr+offset-sizeof(struct icmphdr),
to, fraglen,icmp_param->csum);
return 0;
}
@@ -493,10 +493,10 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned
* the other fragments first, so that we get the checksum
* for the whole packet here.
*/
- csum = csum_partial_copy((void *)&icmp_param->icmph,
+ csum = csum_partial_copy_nocheck((void *)&icmp_param->icmph,
to, sizeof(struct icmphdr),
icmp_param->csum);
- csum = csum_partial_copy(icmp_param->data_ptr,
+ csum = csum_partial_copy_nocheck(icmp_param->data_ptr,
to+sizeof(struct icmphdr),
fraglen-sizeof(struct icmphdr), csum);
icmph=(struct icmphdr *)to;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5f4ab1078..93dd76391 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -8,7 +8,7 @@
* the older version didn't come out right using gcc 2.5.8, the newer one
* seems to fall out with gcc 2.6.2.
*
- * Version: $Id: igmp.c,v 1.34 1999/08/20 11:05:12 davem Exp $
+ * Version: $Id: igmp.c,v 1.36 2000/01/06 00:41:54 davem Exp $
*
* Authors:
* Alan Cox <Alan.Cox@linux.org>
@@ -224,13 +224,13 @@ static int igmp_send_report(struct net_device *dev, u32 group, int type)
iph->version = 4;
iph->ihl = (sizeof(struct iphdr)+4)>>2;
iph->tos = 0;
- iph->frag_off = 0;
+ iph->frag_off = __constant_htons(IP_DF);
iph->ttl = 1;
iph->daddr = dst;
iph->saddr = rt->rt_src;
iph->protocol = IPPROTO_IGMP;
iph->tot_len = htons(IGMP_SIZE);
- iph->id = htons(ip_id_count++);
+ ip_select_ident(iph, &rt->u.dst);
((u8*)&iph[1])[0] = IPOPT_RA;
((u8*)&iph[1])[1] = 4;
((u8*)&iph[1])[2] = 0;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
new file mode 100644
index 000000000..a20273d7d
--- /dev/null
+++ b/net/ipv4/inetpeer.c
@@ -0,0 +1,447 @@
+/*
+ * INETPEER - A storage for permanent information about peers
+ *
+ * This source is covered by the GNU GPL, the same as all kernel sources.
+ *
+ * Version: $Id: inetpeer.c,v 1.1 2000/01/06 00:41:55 davem Exp $
+ *
+ * Authors: Andrey V. Savochkin <saw@msu.ru>
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <net/inetpeer.h>
+
+/*
+ * Theory of operations.
+ * We keep one entry for each peer IP address. The nodes contains long-living
+ * information about the peer which doesn't depend on routes.
+ * At this moment this information consists only of ID field for the next
+ * outgoing IP packet. This field is incremented with each packet as encoded
+ * in inet_getid() function (include/net/inetpeer.h).
+ * At the moment of writing this notes identifier of IP packets is generated
+ * to be unpredictable using this code only for packets subjected
+ * (actually or potentially) to defragmentation. I.e. DF packets less than
+ * PMTU in size uses a constant ID and do not use this code (see
+ * ip_select_ident() in include/net/ip.h).
+ *
+ * Route cache entries hold references to our nodes.
+ * New cache entries get references via lookup by destination IP address in
+ * the avl tree. The reference is grabbed only when it's needed i.e. only
+ * when we try to output IP packet which needs an unpredictable ID (see
+ * __ip_select_ident() in net/ipv4/route.c).
+ * Nodes are removed only when reference counter goes to 0.
+ * When it's happened the node may be removed when a sufficient amount of
+ * time has been passed since its last use. The less-recently-used entry can
+ * also be removed if the pool is overloaded i.e. if the total amount of
+ * entries is greater-or-equal than the threshold.
+ *
+ * Node pool is organised as an AVL tree.
+ * Such an implementation has been chosen not just for fun. It's a way to
+ * prevent easy and efficient DoS attacks by creating hash collisions. A huge
+ * amount of long living nodes in a single hash slot would significantly delay
+ * lookups performed with disabled BHs.
+ *
+ * Serialisation issues.
+ * 1. Nodes may appear in the tree only with the pool write lock held.
+ * 2. Nodes may disappear from the tree only with the pool write lock held
+ * AND reference count being 0.
+ * 3. Nodes appears and disappears from unused node list only under
+ * "inet_peer_unused_lock".
+ * 4. Global variable peer_total is modified under the pool lock.
+ * 5. struct inet_peer fields modification:
+ * avl_left, avl_right, avl_parent, avl_height: pool lock
+ * unused_next, unused_prevp: unused node list lock
+ * refcnt: atomically against modifications on other CPU;
+ * usually under some other lock to prevent node disappearing
+ * dtime: unused node list lock
+ * v4daddr: unchangeable
+ * ip_id_count: idlock
+ */
+
+spinlock_t inet_peer_idlock = SPIN_LOCK_UNLOCKED;
+
+static kmem_cache_t *peer_cachep;
+
+#define node_height(x) x->avl_height
+static struct inet_peer peer_fake_node = {
+ avl_left : &peer_fake_node,
+ avl_right : &peer_fake_node,
+ avl_height : 0
+};
+#define peer_avl_empty (&peer_fake_node)
+static struct inet_peer *peer_root = peer_avl_empty;
+static rwlock_t peer_pool_lock = RW_LOCK_UNLOCKED;
+#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
+
+static volatile int peer_total = 0;
+int inet_peer_threshold = 65536 + 128; /* start to throw entries more
+ * aggressively at this stage */
+int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */
+int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */
+struct inet_peer *inet_peer_unused_head = NULL,
+ **inet_peer_unused_tailp = &inet_peer_unused_head;
+spinlock_t inet_peer_unused_lock = SPIN_LOCK_UNLOCKED;
+#define PEER_MAX_CLEANUP_WORK 30
+
+static void peer_check_expire(unsigned long dummy);
+static struct timer_list peer_periodic_timer =
+ { NULL, NULL, 0, 0, &peer_check_expire };
+int inet_peer_gc_mintime = 10 * HZ,
+ inet_peer_gc_maxtime = 120 * HZ;
+
+
+void __init inet_initpeers(void)
+{
+ struct sysinfo si;
+
+ /* Use the straight interface to information about memory. */
+ si_meminfo(&si);
+ /* The values below were suggested by Alexey Kuznetsov
+ * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
+ * myself. --SAW
+ */
+ if (si.totalram <= 32768*1024)
+ inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
+ if (si.totalram <= 16384*1024)
+ inet_peer_threshold >>= 1; /* about 512KB */
+ if (si.totalram <= 8192*1024)
+ inet_peer_threshold >>= 2; /* about 128KB */
+
+ peer_cachep = kmem_cache_create("inet_peer_cache",
+ sizeof(struct inet_peer),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+
+ /* All the timers, started at system startup tend
+ to synchronize. Perturb it a bit.
+ */
+ peer_periodic_timer.expires = jiffies
+ + net_random() % inet_peer_gc_maxtime
+ + inet_peer_gc_maxtime;
+ add_timer(&peer_periodic_timer);
+}
+
+/* Called with or without local BH being disabled. */
+static void unlink_from_unused(struct inet_peer *p)
+{
+ spin_lock_bh(&inet_peer_unused_lock);
+ if (p->unused_prevp != NULL) {
+ /* On unused list. */
+ *p->unused_prevp = p->unused_next;
+ if (p->unused_next != NULL)
+ p->unused_next->unused_prevp = p->unused_prevp;
+ else
+ inet_peer_unused_tailp = p->unused_prevp;
+ p->unused_prevp = NULL; /* mark it as removed */
+ }
+ spin_unlock_bh(&inet_peer_unused_lock);
+}
+
+/* Called with local BH disabled and the pool lock held. */
+#define lookup(daddr) \
+({ \
+ struct inet_peer *u, **v; \
+ stackptr = stack; \
+ *stackptr++ = &peer_root; \
+ for (u = peer_root; u != peer_avl_empty; ) { \
+ if (daddr == u->v4daddr) \
+ break; \
+ if (daddr < u->v4daddr) \
+ v = &u->avl_left; \
+ else \
+ v = &u->avl_right; \
+ *stackptr++ = v; \
+ u = *v; \
+ } \
+ u; \
+})
+
+/* Called with local BH disabled and the pool write lock held. */
+#define lookup_rightempty(start) \
+({ \
+ struct inet_peer *u, **v; \
+ *stackptr++ = &start->avl_left; \
+ v = &start->avl_left; \
+ for (u = *v; u->avl_right != peer_avl_empty; ) { \
+ v = &u->avl_right; \
+ *stackptr++ = v; \
+ u = *v; \
+ } \
+ u; \
+})
+
+/* Called with local BH disabled and the pool write lock held.
+ * Variable names are the proof of operation correctness.
+ * Look into mm/map_avl.c for more detail description of the ideas. */
+static void peer_avl_rebalance(struct inet_peer **stack[],
+ struct inet_peer ***stackend)
+{
+ struct inet_peer **nodep, *node, *l, *r;
+ int lh, rh;
+
+ while (stackend > stack) {
+ nodep = *--stackend;
+ node = *nodep;
+ l = node->avl_left;
+ r = node->avl_right;
+ lh = node_height(l);
+ rh = node_height(r);
+ if (lh > rh + 1) { /* l: RH+2 */
+ struct inet_peer *ll, *lr, *lrl, *lrr;
+ int lrh;
+ ll = l->avl_left;
+ lr = l->avl_right;
+ lrh = node_height(lr);
+ if (lrh <= node_height(ll)) { /* ll: RH+1 */
+ node->avl_left = lr; /* lr: RH or RH+1 */
+ node->avl_right = r; /* r: RH */
+ node->avl_height = lrh + 1; /* RH+1 or RH+2 */
+ l->avl_left = ll; /* ll: RH+1 */
+ l->avl_right = node; /* node: RH+1 or RH+2 */
+ l->avl_height = node->avl_height + 1;
+ *nodep = l;
+ } else { /* ll: RH, lr: RH+1 */
+ lrl = lr->avl_left; /* lrl: RH or RH-1 */
+ lrr = lr->avl_right; /* lrr: RH or RH-1 */
+ node->avl_left = lrr; /* lrr: RH or RH-1 */
+ node->avl_right = r; /* r: RH */
+ node->avl_height = rh + 1; /* node: RH+1 */
+ l->avl_left = ll; /* ll: RH */
+ l->avl_right = lrl; /* lrl: RH or RH-1 */
+ l->avl_height = rh + 1; /* l: RH+1 */
+ lr->avl_left = l; /* l: RH+1 */
+ lr->avl_right = node; /* node: RH+1 */
+ lr->avl_height = rh + 2;
+ *nodep = lr;
+ }
+ } else if (rh > lh + 1) { /* r: LH+2 */
+ struct inet_peer *rr, *rl, *rlr, *rll;
+ int rlh;
+ rr = r->avl_right;
+ rl = r->avl_left;
+ rlh = node_height(rl);
+ if (rlh <= node_height(rr)) { /* rr: LH+1 */
+ node->avl_right = rl; /* rl: LH or LH+1 */
+ node->avl_left = l; /* l: LH */
+ node->avl_height = rlh + 1; /* LH+1 or LH+2 */
+ r->avl_right = rr; /* rr: LH+1 */
+ r->avl_left = node; /* node: LH+1 or LH+2 */
+ r->avl_height = node->avl_height + 1;
+ *nodep = r;
+ } else { /* rr: RH, rl: RH+1 */
+ rlr = rl->avl_right; /* rlr: LH or LH-1 */
+ rll = rl->avl_left; /* rll: LH or LH-1 */
+ node->avl_right = rll; /* rll: LH or LH-1 */
+ node->avl_left = l; /* l: LH */
+ node->avl_height = lh + 1; /* node: LH+1 */
+ r->avl_right = rr; /* rr: LH */
+ r->avl_left = rlr; /* rlr: LH or LH-1 */
+ r->avl_height = lh + 1; /* r: LH+1 */
+ rl->avl_right = r; /* r: LH+1 */
+ rl->avl_left = node; /* node: LH+1 */
+ rl->avl_height = lh + 2;
+ *nodep = rl;
+ }
+ } else {
+ node->avl_height = (lh > rh ? lh : rh) + 1;
+ }
+ }
+}
+
+/* Called with local BH disabled and the pool write lock held. */
+#define link_to_pool(n) \
+do { \
+ n->avl_height = 1; \
+ n->avl_left = peer_avl_empty; \
+ n->avl_right = peer_avl_empty; \
+ **--stackptr = n; \
+ peer_avl_rebalance(stack, stackptr); \
+} while(0)
+
+/* May be called with local BH enabled. */
+static void unlink_from_pool(struct inet_peer *p)
+{
+ int do_free;
+
+ do_free = 0;
+
+ write_lock_bh(&peer_pool_lock);
+ /* Check the reference counter. It was artificially incremented by 1
+ * in cleanup() function to prevent sudden disappearing. If the
+ * reference count is still 1 then the node is referenced only as `p'
+ * here and from the pool. So under the exclusive pool lock it's safe
+ * to remove the node and free it later. */
+ if (atomic_read(&p->refcnt) == 1) {
+ struct inet_peer **stack[PEER_MAXDEPTH];
+ struct inet_peer ***stackptr, ***delp;
+ if (lookup(p->v4daddr) != p)
+ BUG();
+ delp = stackptr - 1; /* *delp[0] == p */
+ if (p->avl_left == peer_avl_empty) {
+ *delp[0] = p->avl_right;
+ --stackptr;
+ } else {
+ /* look for a node to insert instead of p */
+ struct inet_peer *t;
+ t = lookup_rightempty(p);
+ if (*stackptr[-1] != t)
+ BUG();
+ **--stackptr = t->avl_left;
+ /* t is removed, t->v4daddr > x->v4daddr for any
+ * x in p->avl_left subtree.
+ * Put t in the old place of p. */
+ *delp[0] = t;
+ t->avl_left = p->avl_left;
+ t->avl_right = p->avl_right;
+ t->avl_height = p->avl_height;
+ if (delp[1] != &p->avl_left)
+ BUG();
+ delp[1] = &t->avl_left; /* was &p->avl_left */
+ }
+ peer_avl_rebalance(stack, stackptr);
+ peer_total--;
+ do_free = 1;
+ }
+ write_unlock_bh(&peer_pool_lock);
+
+ if (do_free)
+ kmem_cache_free(peer_cachep, p);
+ else
+ /* The node is used again. Decrease the reference counter
+ * back. The loop "cleanup -> unlink_from_unused
+ * -> unlink_from_pool -> putpeer -> link_to_unused
+ * -> cleanup (for the same node)"
+ * doesn't really exist because the entry will have a
+ * recent deletion time and will not be cleaned again soon. */
+ inet_putpeer(p);
+}
+
+/* May be called with local BH enabled. */
+static int cleanup_once(unsigned long ttl)
+{
+ struct inet_peer *p;
+
+ /* Remove the first entry from the list of unused nodes. */
+ spin_lock_bh(&inet_peer_unused_lock);
+ p = inet_peer_unused_head;
+ if (p != NULL) {
+ if (time_after(p->dtime + ttl, jiffies)) {
+ /* Do not prune fresh entries. */
+ spin_unlock_bh(&inet_peer_unused_lock);
+ return -1;
+ }
+ inet_peer_unused_head = p->unused_next;
+ if (p->unused_next != NULL)
+ p->unused_next->unused_prevp = p->unused_prevp;
+ else
+ inet_peer_unused_tailp = p->unused_prevp;
+ p->unused_prevp = NULL; /* mark as not on the list */
+ /* Grab an extra reference to prevent node disappearing
+ * before unlink_from_pool() call. */
+ atomic_inc(&p->refcnt);
+ }
+ spin_unlock_bh(&inet_peer_unused_lock);
+
+ if (p == NULL)
+ /* It means that the total number of USED entries has
+ * grown over inet_peer_threshold. It shouldn't really
+ * happen because of entry limits in route cache. */
+ return -1;
+
+ unlink_from_pool(p);
+ return 0;
+}
+
+/* Called with or without local BH being disabled. */
+struct inet_peer *inet_getpeer(__u32 daddr, int create)
+{
+ struct inet_peer *p, *n;
+ struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
+
+ /* Look up for the address quickly. */
+ read_lock_bh(&peer_pool_lock);
+ p = lookup(daddr);
+ if (p != peer_avl_empty)
+ atomic_inc(&p->refcnt);
+ read_unlock_bh(&peer_pool_lock);
+
+ if (p != peer_avl_empty) {
+ /* The existing node has been found. */
+ /* Remove the entry from unused list if it was there. */
+ unlink_from_unused(p);
+ return p;
+ }
+
+ if (!create)
+ return NULL;
+
+ /* Allocate the space outside the locked region. */
+ n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
+ if (n == NULL)
+ return NULL;
+ n->v4daddr = daddr;
+ atomic_set(&n->refcnt, 1);
+ n->ip_id_count = secure_ip_id(daddr);
+ n->tcp_ts_stamp = 0;
+
+ write_lock_bh(&peer_pool_lock);
+ /* Check if an entry has suddenly appeared. */
+ p = lookup(daddr);
+ if (p != peer_avl_empty)
+ goto out_free;
+
+ /* Link the node. */
+ link_to_pool(n);
+ n->unused_prevp = NULL; /* not on the list */
+ peer_total++;
+ write_unlock_bh(&peer_pool_lock);
+
+ if (peer_total >= inet_peer_threshold)
+ /* Remove one less-recently-used entry. */
+ cleanup_once(0);
+
+ return n;
+
+out_free:
+ /* The appropriate node is already in the pool. */
+ atomic_inc(&p->refcnt);
+ write_unlock_bh(&peer_pool_lock);
+ /* Remove the entry from unused list if it was there. */
+ unlink_from_unused(p);
+ /* Free preallocated the preallocated node. */
+ kmem_cache_free(peer_cachep, n);
+ return p;
+}
+
+/* Called with local BH disabled. */
+static void peer_check_expire(unsigned long dummy)
+{
+ int i;
+ int ttl;
+
+ if (peer_total >= inet_peer_threshold)
+ ttl = inet_peer_minttl;
+ else
+ ttl = inet_peer_maxttl
+ - (inet_peer_maxttl - inet_peer_minttl) / HZ *
+ peer_total / inet_peer_threshold * HZ;
+ for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++);
+
+ /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
+ * interval depending on the total number of entries (more entries,
+ * less interval). */
+ peer_periodic_timer.expires = jiffies
+ + inet_peer_gc_maxtime
+ - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
+ peer_total / inet_peer_threshold * HZ;
+ add_timer(&peer_periodic_timer);
+}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 0a5402030..b00584e59 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -815,7 +815,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
iph->tot_len = htons(skb->len);
- iph->id = htons(ip_id_count++);
+ ip_select_ident(iph, &rt->u.dst);
ip_send_check(iph);
stats->tx_bytes += skb->len;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 159cacb1b..f89109f15 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -5,7 +5,7 @@
*
* The Internet Protocol (IP) output module.
*
- * Version: $Id: ip_output.c,v 1.72 1999/09/07 02:31:15 davem Exp $
+ * Version: $Id: ip_output.c,v 1.76 2000/01/06 00:41:57 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -71,6 +71,7 @@
#include <net/icmp.h>
#include <net/raw.h>
#include <net/checksum.h>
+#include <net/inetpeer.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/mroute.h>
@@ -82,8 +83,6 @@
int sysctl_ip_dynaddr = 0;
-int ip_id_count = 0;
-
/* Generate a checksum for an outgoing IP datagram. */
__inline__ void ip_send_check(struct iphdr *iph)
{
@@ -154,7 +153,7 @@ void ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
{
struct rtable *rt = (struct rtable *)skb->dst;
struct iphdr *iph;
-
+
/* Build the IP header. */
if (opt)
iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
@@ -172,7 +171,7 @@ void ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->saddr = rt->rt_src;
iph->protocol = sk->protocol;
iph->tot_len = htons(skb->len);
- iph->id = htons(ip_id_count++);
+ ip_select_ident(iph, &rt->u.dst);
skb->nh.iph = iph;
if (opt && opt->optlen) {
@@ -182,7 +181,7 @@ void ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
ip_send_check(iph);
/* Send it out. */
- NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, NULL,
+ NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
output_maybe_reroute);
}
@@ -356,6 +355,8 @@ static inline int ip_queue_xmit2(struct sk_buff *skb)
if (ip_dont_fragment(sk, &rt->u.dst))
iph->frag_off |= __constant_htons(IP_DF);
+ ip_select_ident(iph, &rt->u.dst);
+
/* Add an IP checksum. */
ip_send_check(iph);
@@ -375,6 +376,7 @@ fragment:
kfree_skb(skb);
return -EMSGSIZE;
}
+ ip_select_ident(iph, &rt->u.dst);
return ip_fragment(skb, skb->dst->output);
}
@@ -429,7 +431,6 @@ int ip_queue_xmit(struct sk_buff *skb)
}
iph->tot_len = htons(skb->len);
- iph->id = htons(ip_id_count++);
return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
ip_queue_xmit2);
@@ -475,7 +476,7 @@ static int ip_build_xmit_slow(struct sock *sk,
int err;
int offset, mf;
int mtu;
- unsigned short id;
+ u16 id = 0;
int hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
int nfrags=0;
@@ -485,7 +486,7 @@ static int ip_build_xmit_slow(struct sock *sk,
mtu = rt->u.dst.pmtu;
if (ip_dont_fragment(sk, &rt->u.dst))
df = htons(IP_DF);
-
+
length -= sizeof(struct iphdr);
if (opt) {
@@ -493,12 +494,12 @@ static int ip_build_xmit_slow(struct sock *sk,
maxfraglen = ((mtu-sizeof(struct iphdr)-opt->optlen) & ~7) + fragheaderlen;
} else {
fragheaderlen = sizeof(struct iphdr);
-
+
/*
* Fragheaderlen is the size of 'overhead' on each buffer. Now work
* out the size of the frames to send.
*/
-
+
maxfraglen = ((mtu-sizeof(struct iphdr)) & ~7) + fragheaderlen;
}
@@ -510,15 +511,15 @@ static int ip_build_xmit_slow(struct sock *sk,
/*
* Start at the end of the frame by handling the remainder.
*/
-
+
offset = length - (length % (maxfraglen - fragheaderlen));
-
+
/*
* Amount of memory to allocate for final fragment.
*/
-
+
fraglen = length - offset + fragheaderlen;
-
+
if (length-offset==0) {
fraglen = maxfraglen;
offset -= maxfraglen-fragheaderlen;
@@ -527,14 +528,14 @@ static int ip_build_xmit_slow(struct sock *sk,
/*
* The last fragment will not have MF (more fragments) set.
*/
-
+
mf = 0;
/*
* Don't fragment packets for path mtu discovery.
*/
-
- if (offset > 0 && df) {
+
+ if (offset > 0 && sk->protinfo.af_inet.pmtudisc==IP_PMTUDISC_DO) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
return -EMSGSIZE;
}
@@ -542,15 +543,9 @@ static int ip_build_xmit_slow(struct sock *sk,
goto out;
/*
- * Get an identifier
- */
-
- id = htons(ip_id_count++);
-
- /*
* Begin outputting the bytes.
*/
-
+
do {
char *data;
struct sk_buff * skb;
@@ -566,7 +561,7 @@ static int ip_build_xmit_slow(struct sock *sk,
/*
* Fill in the control structures
*/
-
+
skb->priority = sk->priority;
skb->dst = dst_clone(&rt->u.dst);
skb_reserve(skb, hh_len);
@@ -574,14 +569,14 @@ static int ip_build_xmit_slow(struct sock *sk,
/*
* Find where to start putting bytes.
*/
-
+
data = skb_put(skb, fraglen);
skb->nh.iph = (struct iphdr *)data;
/*
* Only write IP header onto non-raw packets
*/
-
+
{
struct iphdr *iph = (struct iphdr *)data;
@@ -594,9 +589,23 @@ static int ip_build_xmit_slow(struct sock *sk,
}
iph->tos = sk->protinfo.af_inet.tos;
iph->tot_len = htons(fraglen - fragheaderlen + iph->ihl*4);
+ iph->frag_off = htons(offset>>3)|mf|df;
iph->id = id;
- iph->frag_off = htons(offset>>3);
- iph->frag_off |= mf|df;
+ if (!mf) {
+ if (offset || !df) {
+ /* Select an unpredictable ident only
+ * for packets without DF or having
+ * been fragmented.
+ */
+ __ip_select_ident(iph, &rt->u.dst);
+ id = iph->id;
+ }
+
+ /*
+ * Any further fragments will have MF set.
+ */
+ mf = htons(IP_MF);
+ }
if (rt->rt_type == RTN_MULTICAST)
iph->ttl = sk->protinfo.af_inet.mc_ttl;
else
@@ -607,14 +616,8 @@ static int ip_build_xmit_slow(struct sock *sk,
iph->daddr = rt->rt_dst;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
data += iph->ihl*4;
-
- /*
- * Any further fragments will have MF set.
- */
-
- mf = htons(IP_MF);
}
-
+
/*
* User data callback
*/
@@ -712,20 +715,20 @@ int ip_build_xmit(struct sock *sk,
goto error;
skb_reserve(skb, hh_len);
}
-
+
skb->priority = sk->priority;
skb->dst = dst_clone(&rt->u.dst);
skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
-
+
if(!sk->protinfo.af_inet.hdrincl) {
iph->version=4;
iph->ihl=5;
iph->tos=sk->protinfo.af_inet.tos;
iph->tot_len = htons(length);
- iph->id=htons(ip_id_count++);
iph->frag_off = df;
iph->ttl=sk->protinfo.af_inet.mc_ttl;
+ ip_select_ident(iph, &rt->u.dst);
if (rt->rt_type != RTN_MULTICAST)
iph->ttl=sk->protinfo.af_inet.ttl;
iph->protocol=sk->protocol;
@@ -757,8 +760,6 @@ error:
ip_statistics.IpOutDiscards++;
return err;
}
-
-
/*
* This IP datagram is too large to be sent in one piece. Break it up into
@@ -850,6 +851,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
skb2->dst = dst_clone(skb->dst);
+ skb2->dev = skb->dev;
/*
* Copy the packet header into the new buffer.
@@ -904,7 +906,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
kfree_skb(skb);
ip_statistics.IpFragOKs++;
return err;
-
+
fail:
kfree_skb(skb);
ip_statistics.IpFragFails++;
@@ -1005,7 +1007,6 @@ static struct packet_type ip_packet_type =
NULL,
};
-
/*
* IP registers the packet type and then calls the subprotocol initialisers
*/
@@ -1015,9 +1016,9 @@ void __init ip_init(void)
dev_add_pack(&ip_packet_type);
ip_rt_init();
+ inet_initpeers();
#ifdef CONFIG_IP_MULTICAST
proc_net_create("igmp", 0, ip_mc_procinfo);
#endif
}
-
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 0a5ae3cfe..5518ec1cb 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -1,7 +1,7 @@
/*
* Linux NET3: IP/IP protocol decoder.
*
- * Version: $Id: ipip.c,v 1.29 1999/08/31 07:03:42 davem Exp $
+ * Version: $Id: ipip.c,v 1.30 2000/01/06 00:41:55 davem Exp $
*
* Authors:
* Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
@@ -616,7 +616,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->ttl = old_iph->ttl;
iph->tot_len = htons(skb->len);
- iph->id = htons(ip_id_count++);
+ ip_select_ident(iph, &rt->u.dst);
ip_send_check(iph);
stats->tx_bytes += skb->len;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 86393bfda..cba4995cc 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -9,7 +9,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: ipmr.c,v 1.46 1999/08/31 07:03:44 davem Exp $
+ * Version: $Id: ipmr.c,v 1.49 2000/01/06 00:41:56 davem Exp $
*
* Fixes:
* Michael Chastain : Incorrect size of copying.
@@ -1095,7 +1095,7 @@ static void ip_encap(struct sk_buff *skb, u32 saddr, u32 daddr)
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
iph->tot_len = htons(skb->len);
- iph->id = htons(ip_id_count++);
+ ip_select_ident(iph, skb->dst);
ip_send_check(iph);
skb->h.ipiph = skb->nh.iph;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index e2068a3ab..520f6ef81 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -7,7 +7,7 @@
* PROC file system. It is mainly used for debugging and
* statistics.
*
- * Version: $Id: proc.c,v 1.36 1999/07/02 11:26:34 davem Exp $
+ * Version: $Id: proc.c,v 1.37 1999/12/15 22:39:19 davem Exp $
*
* Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de>
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index d44552176..78e1455c1 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -5,7 +5,7 @@
*
* RAW - implementation of IP "raw" sockets.
*
- * Version: $Id: raw.c,v 1.43 1999/08/20 11:05:57 davem Exp $
+ * Version: $Id: raw.c,v 1.45 2000/01/06 00:41:58 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -257,6 +257,7 @@ struct rawfakehdr
{
struct iovec *iov;
u32 saddr;
+ struct dst_entry *dst;
};
/*
@@ -296,7 +297,7 @@ static int raw_getrawfrag(const void *p, char *to, unsigned int offset, unsigned
* ip_build_xmit clean (well less messy).
*/
if (!iph->id)
- iph->id = htons(ip_id_count++);
+ ip_select_ident(iph, rfh->dst);
iph->check=ip_fast_csum((unsigned char *)iph, iph->ihl);
}
return 0;
@@ -416,6 +417,7 @@ back_from_confirm:
rfh.iov = msg->msg_iov;
rfh.saddr = rt->rt_src;
+ rfh.dst = &rt->u.dst;
if (!ipc.addr)
ipc.addr = rt->rt_dst;
err=ip_build_xmit(sk, sk->protinfo.af_inet.hdrincl ? raw_getrawfrag : raw_getfrag,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 33b27036f..5acfa8953 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -5,7 +5,7 @@
*
* ROUTE - implementation of the IP router.
*
- * Version: $Id: route.c,v 1.72 1999/08/30 10:17:12 davem Exp $
+ * Version: $Id: route.c,v 1.77 2000/01/06 00:41:59 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -83,9 +83,11 @@
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/random.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
+#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
@@ -101,8 +103,7 @@
int ip_rt_min_delay = 2*HZ;
int ip_rt_max_delay = 10*HZ;
-int ip_rt_gc_thresh = RT_HASH_DIVISOR;
-int ip_rt_max_size = RT_HASH_DIVISOR*16;
+int ip_rt_max_size;
int ip_rt_gc_timeout = RT_GC_TIMEOUT;
int ip_rt_gc_interval = 60*HZ;
int ip_rt_gc_min_interval = 5*HZ;
@@ -120,12 +121,8 @@ static unsigned long rt_deadline = 0;
#define RTprint(a...) printk(KERN_DEBUG a)
-static void rt_run_flush(unsigned long dummy);
-
-static struct timer_list rt_flush_timer =
- { NULL, NULL, 0, 0L, rt_run_flush };
-static struct timer_list rt_periodic_timer =
- { NULL, NULL, 0, 0L, NULL };
+static struct timer_list rt_flush_timer;
+static struct timer_list rt_periodic_timer;
/*
* Interface to generic destination cache.
@@ -134,6 +131,7 @@ static struct timer_list rt_periodic_timer =
static struct dst_entry * ipv4_dst_check(struct dst_entry * dst, u32);
static struct dst_entry * ipv4_dst_reroute(struct dst_entry * dst,
struct sk_buff *);
+static void ipv4_dst_destroy(struct dst_entry * dst);
static struct dst_entry * ipv4_negative_advice(struct dst_entry *);
static void ipv4_link_failure(struct sk_buff *skb);
static int rt_garbage_collect(void);
@@ -143,12 +141,12 @@ struct dst_ops ipv4_dst_ops =
{
AF_INET,
__constant_htons(ETH_P_IP),
- RT_HASH_DIVISOR,
+ 0,
rt_garbage_collect,
ipv4_dst_check,
ipv4_dst_reroute,
- NULL,
+ ipv4_dst_destroy,
ipv4_negative_advice,
ipv4_link_failure,
sizeof(struct rtable),
@@ -180,7 +178,7 @@ __u8 ip_tos2prio[16] = {
/* The locking scheme is rather straight forward:
*
- * 1) A BH protected rwlock protects the central route hash.
+ * 1) A BH protected rwlocks protect buckets of the central route hash.
* 2) Only writers remove entries, and they hold the lock
* as they look at rtable reference counts.
* 3) Only readers acquire references to rtable entries,
@@ -188,17 +186,23 @@ __u8 ip_tos2prio[16] = {
* lock held.
*/
-static struct rtable *rt_hash_table[RT_HASH_DIVISOR];
-static rwlock_t rt_hash_lock = RW_LOCK_UNLOCKED;
+struct rt_hash_bucket {
+ struct rtable *chain;
+ rwlock_t lock;
+} __attribute__((__aligned__(8)));
+
+static struct rt_hash_bucket *rt_hash_table;
+static unsigned rt_hash_mask;
+static int rt_hash_log;
static int rt_intern_hash(unsigned hash, struct rtable * rth, struct rtable ** res);
static __inline__ unsigned rt_hash_code(u32 daddr, u32 saddr, u8 tos)
{
unsigned hash = ((daddr&0xF0F0F0F0)>>4)|((daddr&0x0F0F0F0F)<<4);
- hash = hash^saddr^tos;
- hash = hash^(hash>>16);
- return (hash^(hash>>8)) & 0xFF;
+ hash ^= saddr^tos;
+ hash ^= (hash>>16);
+ return (hash^(hash>>8)) & rt_hash_mask;
}
#ifndef CONFIG_PROC_FS
@@ -219,11 +223,9 @@ static int rt_cache_get_info(char *buffer, char **start, off_t offset, int lengt
len = 128;
}
-
- read_lock_bh(&rt_hash_lock);
-
- for (i = 0; i<RT_HASH_DIVISOR; i++) {
- for (r = rt_hash_table[i]; r; r = r->u.rt_next) {
+ for (i = rt_hash_mask; i>=0; i--) {
+ read_lock_bh(&rt_hash_table[i].lock);
+ for (r = rt_hash_table[i].chain; r; r = r->u.rt_next) {
/*
* Spin through entries until we are ready
*/
@@ -250,14 +252,15 @@ static int rt_cache_get_info(char *buffer, char **start, off_t offset, int lengt
r->rt_spec_dst);
sprintf(buffer+len,"%-127s\n",temp);
len += 128;
- if (pos >= offset+length)
+ if (pos >= offset+length) {
+ read_unlock_bh(&rt_hash_table[i].lock);
goto done;
+ }
}
+ read_unlock_bh(&rt_hash_table[i].lock);
}
done:
- read_unlock_bh(&rt_hash_lock);
-
*start = buffer+len-(pos-offset);
len = pos-offset;
if (len>length)
@@ -312,21 +315,23 @@ static __inline__ int rt_may_expire(struct rtable *rth, int tmo1, int tmo2)
/* This runs via a timer and thus is always in BH context. */
static void rt_check_expire(unsigned long dummy)
{
- int i;
+ int i, t;
static int rover;
struct rtable *rth, **rthp;
unsigned long now = jiffies;
- for (i=0; i<RT_HASH_DIVISOR/5; i++) {
+ i = rover;
+
+ for (t=(ip_rt_gc_interval<<rt_hash_log); t>=0; t -= ip_rt_gc_timeout) {
unsigned tmo = ip_rt_gc_timeout;
- rover = (rover + 1) & (RT_HASH_DIVISOR-1);
- rthp = &rt_hash_table[rover];
+ i = (i + 1) & rt_hash_mask;
+ rthp = &rt_hash_table[i].chain;
- write_lock(&rt_hash_lock);
+ write_lock(&rt_hash_table[i].lock);
while ((rth = *rthp) != NULL) {
if (rth->u.dst.expires) {
- /* Entrie is expired even if it is in use */
+ /* Entry is expired even if it is in use */
if ((long)(now - rth->u.dst.expires) <= 0) {
tmo >>= 1;
rthp = &rth->u.rt_next;
@@ -344,14 +349,14 @@ static void rt_check_expire(unsigned long dummy)
*rthp = rth->u.rt_next;
rt_free(rth);
}
- write_unlock(&rt_hash_lock);
+ write_unlock(&rt_hash_table[i].lock);
/* Fallback loop breaker. */
if ((jiffies - now) > 0)
break;
}
- rt_periodic_timer.expires = now + ip_rt_gc_interval;
- add_timer(&rt_periodic_timer);
+ rover = i;
+ mod_timer(&rt_periodic_timer, now + ip_rt_gc_interval);
}
/* This can run from both BH and non-BH contexts, the latter
@@ -364,11 +369,12 @@ static void rt_run_flush(unsigned long dummy)
rt_deadline = 0;
- for (i=0; i<RT_HASH_DIVISOR; i++) {
- write_lock_bh(&rt_hash_lock);
- rth = rt_hash_table[i];
- rt_hash_table[i] = NULL;
- write_unlock_bh(&rt_hash_lock);
+ for (i=rt_hash_mask; i>=0; i--) {
+ write_lock_bh(&rt_hash_table[i].lock);
+ rth = rt_hash_table[i].chain;
+ if (rth)
+ rt_hash_table[i].chain = NULL;
+ write_unlock_bh(&rt_hash_table[i].lock);
for (; rth; rth=next) {
next = rth->u.rt_next;
@@ -415,8 +421,7 @@ void rt_cache_flush(int delay)
if (rt_deadline == 0)
rt_deadline = now + ip_rt_max_delay;
- rt_flush_timer.expires = now + delay;
- add_timer(&rt_flush_timer);
+ mod_timer(&rt_flush_timer, now+delay);
spin_unlock_bh(&rt_flush_lock);
}
@@ -452,20 +457,20 @@ static int rt_garbage_collect(void)
return 0;
/* Calculate number of entries, which we want to expire now. */
- goal = atomic_read(&ipv4_dst_ops.entries) - RT_HASH_DIVISOR*ip_rt_gc_elasticity;
+ goal = atomic_read(&ipv4_dst_ops.entries) - (ip_rt_gc_elasticity<<rt_hash_log);
if (goal <= 0) {
if (equilibrium < ipv4_dst_ops.gc_thresh)
equilibrium = ipv4_dst_ops.gc_thresh;
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
if (goal > 0) {
- equilibrium += min(goal/2, RT_HASH_DIVISOR);
+ equilibrium += min(goal/2, rt_hash_mask+1);
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
}
} else {
/* We are in dangerous area. Try to reduce cache really
* aggressively.
*/
- goal = max(goal/2, RT_HASH_DIVISOR);
+ goal = max(goal/2, rt_hash_mask+1);
equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
}
@@ -480,15 +485,12 @@ static int rt_garbage_collect(void)
do {
int i, k;
- /* The write lock is held during the entire hash
- * traversal to ensure consistent state of the rover.
- */
- write_lock_bh(&rt_hash_lock);
- for (i=0, k=rover; i<RT_HASH_DIVISOR; i++) {
+ for (i=rt_hash_mask, k=rover; i>=0; i--) {
unsigned tmo = expire;
- k = (k + 1) & (RT_HASH_DIVISOR-1);
- rthp = &rt_hash_table[k];
+ k = (k + 1) & rt_hash_mask;
+ rthp = &rt_hash_table[k].chain;
+ write_lock_bh(&rt_hash_table[k].lock);
while ((rth = *rthp) != NULL) {
if (!rt_may_expire(rth, tmo, expire)) {
tmo >>= 1;
@@ -499,11 +501,11 @@ static int rt_garbage_collect(void)
rt_free(rth);
goal--;
}
+ write_unlock_bh(&rt_hash_table[k].lock);
if (goal <= 0)
break;
}
rover = k;
- write_unlock_bh(&rt_hash_lock);
if (goal <= 0)
goto work_done;
@@ -553,20 +555,20 @@ static int rt_intern_hash(unsigned hash, struct rtable * rt, struct rtable ** rp
int attempts = !in_interrupt();
restart:
- rthp = &rt_hash_table[hash];
+ rthp = &rt_hash_table[hash].chain;
- write_lock_bh(&rt_hash_lock);
+ write_lock_bh(&rt_hash_table[hash].lock);
while ((rth = *rthp) != NULL) {
if (memcmp(&rth->key, &rt->key, sizeof(rt->key)) == 0) {
/* Put it first */
*rthp = rth->u.rt_next;
- rth->u.rt_next = rt_hash_table[hash];
- rt_hash_table[hash] = rth;
+ rth->u.rt_next = rt_hash_table[hash].chain;
+ rt_hash_table[hash].chain = rth;
rth->u.dst.__use++;
dst_hold(&rth->u.dst);
rth->u.dst.lastuse = now;
- write_unlock_bh(&rt_hash_lock);
+ write_unlock_bh(&rt_hash_table[hash].lock);
rt_drop(rt);
*rp = rth;
@@ -581,7 +583,7 @@ restart:
*/
if (rt->rt_type == RTN_UNICAST || rt->key.iif == 0) {
if (!arp_bind_neighbour(&rt->u.dst)) {
- write_unlock_bh(&rt_hash_lock);
+ write_unlock_bh(&rt_hash_table[hash].lock);
/* Neighbour tables are full and nothing
can be released. Try to shrink route cache,
@@ -610,7 +612,7 @@ restart:
}
}
- rt->u.rt_next = rt_hash_table[hash];
+ rt->u.rt_next = rt_hash_table[hash].chain;
#if RT_CACHE_DEBUG >= 2
if (rt->u.rt_next) {
struct rtable * trt;
@@ -620,26 +622,85 @@ restart:
printk("\n");
}
#endif
- rt_hash_table[hash] = rt;
- write_unlock_bh(&rt_hash_lock);
+ rt_hash_table[hash].chain = rt;
+ write_unlock_bh(&rt_hash_table[hash].lock);
*rp = rt;
return 0;
}
+void rt_bind_peer(struct rtable *rt, int create)
+{
+ static spinlock_t rt_peer_lock = SPIN_LOCK_UNLOCKED;
+ struct inet_peer *peer;
+
+ peer = inet_getpeer(rt->rt_dst, create);
+
+ spin_lock_bh(&rt_peer_lock);
+ if (rt->peer == NULL) {
+ rt->peer = peer;
+ peer = NULL;
+ }
+ spin_unlock_bh(&rt_peer_lock);
+ if (peer)
+ inet_putpeer(peer);
+}
+
+/*
+ * Peer allocation may fail only in serious out-of-memory conditions. However
+ * we still can generate some output.
+ * Random ID selection looks a bit dangerous because we have no chances to
+ * select ID being unique in a reasonable period of time.
+ * But broken packet identifier may be better than no packet at all.
+ */
+static void ip_select_fb_ident(struct iphdr *iph)
+{
+ static spinlock_t ip_fb_id_lock = SPIN_LOCK_UNLOCKED;
+ static u32 ip_fallback_id;
+ u32 salt;
+
+ spin_lock_bh(&ip_fb_id_lock);
+ salt = secure_ip_id(ip_fallback_id ^ iph->daddr);
+ iph->id = salt & 0xFFFF;
+ ip_fallback_id = salt;
+ spin_unlock_bh(&ip_fb_id_lock);
+}
+
+void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ if (rt) {
+ if (rt->peer == NULL)
+ rt_bind_peer(rt, 1);
+
+ /* If peer is attached to destination, it is never detached,
+ so that we need not to grab a lock to dereference it.
+ */
+ if (rt->peer) {
+ iph->id = inet_getid(rt->peer);
+ return;
+ }
+ } else {
+ printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", NET_CALLER(iph));
+ }
+
+ ip_select_fb_ident(iph);
+}
+
static void rt_del(unsigned hash, struct rtable *rt)
{
struct rtable **rthp;
- write_lock_bh(&rt_hash_lock);
+ write_lock_bh(&rt_hash_table[hash].lock);
ip_rt_put(rt);
- for (rthp = &rt_hash_table[hash]; *rthp; rthp = &(*rthp)->u.rt_next) {
+ for (rthp = &rt_hash_table[hash].chain; *rthp; rthp = &(*rthp)->u.rt_next) {
if (*rthp == rt) {
*rthp = rt->u.rt_next;
rt_free(rt);
break;
}
}
- write_unlock_bh(&rt_hash_lock);
+ write_unlock_bh(&rt_hash_table[hash].lock);
}
void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
@@ -674,9 +735,9 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
for (k=0; k<2; k++) {
unsigned hash = rt_hash_code(daddr, skeys[i]^(ikeys[k]<<5), tos);
- rthp=&rt_hash_table[hash];
+ rthp=&rt_hash_table[hash].chain;
- read_lock(&rt_hash_lock);
+ read_lock(&rt_hash_table[hash].lock);
while ( (rth = *rthp) != NULL) {
struct rtable *rt;
@@ -697,7 +758,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
break;
dst_clone(&rth->u.dst);
- read_unlock(&rt_hash_lock);
+ read_unlock(&rt_hash_table[hash].lock);
rt = dst_alloc(&ipv4_dst_ops);
if (rt == NULL) {
@@ -727,6 +788,9 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
/* Redirect received -> path was valid */
dst_confirm(&rth->u.dst);
+ if (rt->peer)
+ atomic_inc(&rt->peer->refcnt);
+
if (!arp_bind_neighbour(&rt->u.dst) ||
!(rt->u.dst.neighbour->nud_state&NUD_VALID)) {
if (rt->u.dst.neighbour)
@@ -736,12 +800,12 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
goto do_next;
}
- rt_del(hash, rt);
+ rt_del(hash, rth);
if (!rt_intern_hash(hash, rt, &rt))
ip_rt_put(rt);
goto do_next;
}
- read_unlock(&rt_hash_lock);
+ read_unlock(&rt_hash_table[hash].lock);
do_next:
;
}
@@ -909,8 +973,8 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
for (i=0; i<2; i++) {
unsigned hash = rt_hash_code(daddr, skeys[i], tos);
- read_lock(&rt_hash_lock);
- for (rth = rt_hash_table[hash]; rth; rth = rth->u.rt_next) {
+ read_lock(&rt_hash_table[hash].lock);
+ for (rth = rt_hash_table[hash].chain; rth; rth = rth->u.rt_next) {
if (rth->key.dst == daddr &&
rth->key.src == skeys[i] &&
rth->rt_dst == daddr &&
@@ -943,7 +1007,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
}
}
}
- read_unlock(&rt_hash_lock);
+ read_unlock(&rt_hash_table[hash].lock);
}
return est_mtu ? : new_mtu;
}
@@ -973,6 +1037,17 @@ static struct dst_entry * ipv4_dst_reroute(struct dst_entry * dst,
return NULL;
}
+static void ipv4_dst_destroy(struct dst_entry * dst)
+{
+ struct rtable *rt = (struct rtable *) dst;
+ struct inet_peer *peer = rt->peer;
+
+ if (peer) {
+ rt->peer = NULL;
+ inet_putpeer(peer);
+ }
+}
+
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
@@ -1474,8 +1549,8 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
tos &= IPTOS_TOS_MASK;
hash = rt_hash_code(daddr, saddr^(iif<<5), tos);
- read_lock_bh(&rt_hash_lock);
- for (rth=rt_hash_table[hash]; rth; rth=rth->u.rt_next) {
+ read_lock(&rt_hash_table[hash].lock);
+ for (rth=rt_hash_table[hash].chain; rth; rth=rth->u.rt_next) {
if (rth->key.dst == daddr &&
rth->key.src == saddr &&
rth->key.iif == iif &&
@@ -1489,12 +1564,12 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
rth->u.dst.lastuse = jiffies;
dst_hold(&rth->u.dst);
rth->u.dst.__use++;
- read_unlock_bh(&rt_hash_lock);
+ read_unlock(&rt_hash_table[hash].lock);
skb->dst = (struct dst_entry*)rth;
return 0;
}
}
- read_unlock_bh(&rt_hash_lock);
+ read_unlock(&rt_hash_table[hash].lock);
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
@@ -1809,8 +1884,8 @@ int ip_route_output(struct rtable **rp, u32 daddr, u32 saddr, u32 tos, int oif)
hash = rt_hash_code(daddr, saddr^(oif<<5), tos);
- read_lock_bh(&rt_hash_lock);
- for (rth=rt_hash_table[hash]; rth; rth=rth->u.rt_next) {
+ read_lock_bh(&rt_hash_table[hash].lock);
+ for (rth=rt_hash_table[hash].chain; rth; rth=rth->u.rt_next) {
if (rth->key.dst == daddr &&
rth->key.src == saddr &&
rth->key.iif == 0 &&
@@ -1821,12 +1896,12 @@ int ip_route_output(struct rtable **rp, u32 daddr, u32 saddr, u32 tos, int oif)
rth->u.dst.lastuse = jiffies;
dst_hold(&rth->u.dst);
rth->u.dst.__use++;
- read_unlock_bh(&rt_hash_lock);
+ read_unlock_bh(&rt_hash_table[hash].lock);
*rp = rth;
return 0;
}
}
- read_unlock_bh(&rt_hash_lock);
+ read_unlock_bh(&rt_hash_table[hash].lock);
return ip_route_output_slow(rp, daddr, saddr, tos, oif);
}
@@ -1885,6 +1960,16 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, int no
else
ci.rta_expires = 0;
ci.rta_error = rt->u.dst.error;
+ ci.rta_id = 0;
+ ci.rta_ts = 0;
+ ci.rta_tsage = 0;
+ if (rt->peer) {
+ ci.rta_id = rt->peer->ip_id_count;
+ if (rt->peer->tcp_ts_stamp) {
+ ci.rta_ts = rt->peer->tcp_ts;
+ ci.rta_tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
+ }
+ }
#ifdef CONFIG_IP_MROUTE
eptr = (struct rtattr*)skb->tail;
#endif
@@ -1957,7 +2042,9 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
return -ENODEV;
skb->protocol = __constant_htons(ETH_P_IP);
skb->dev = dev;
+ local_bh_disable();
err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
+ local_bh_enable();
rt = (struct rtable*)skb->dst;
if (!err && rt->u.dst.error)
err = -rt->u.dst.error;
@@ -1999,24 +2086,24 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
s_h = cb->args[0];
s_idx = idx = cb->args[1];
- for (h=0; h < RT_HASH_DIVISOR; h++) {
+ for (h=0; h <= rt_hash_mask; h++) {
if (h < s_h) continue;
if (h > s_h)
s_idx = 0;
- read_lock_bh(&rt_hash_lock);
- for (rt = rt_hash_table[h], idx = 0; rt; rt = rt->u.rt_next, idx++) {
+ read_lock_bh(&rt_hash_table[h].lock);
+ for (rt = rt_hash_table[h].chain, idx = 0; rt; rt = rt->u.rt_next, idx++) {
if (idx < s_idx)
continue;
skb->dst = dst_clone(&rt->u.dst);
if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1) <= 0) {
dst_release(xchg(&skb->dst, NULL));
- read_unlock_bh(&rt_hash_lock);
+ read_unlock_bh(&rt_hash_table[h].lock);
goto done;
}
dst_release(xchg(&skb->dst, NULL));
}
- read_unlock_bh(&rt_hash_lock);
+ read_unlock_bh(&rt_hash_table[h].lock);
}
done:
@@ -2145,17 +2232,56 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
#endif
#endif
-
void __init ip_rt_init(void)
{
+ int i, order, goal;
+
ipv4_dst_ops.kmem_cachep = kmem_cache_create("ip_dst_cache",
sizeof(struct rtable),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
-
+
+ if (!ipv4_dst_ops.kmem_cachep)
+ panic("IP: failed to allocate ip_dst_cache\n");
+
+ goal = num_physpages >> (26 - PAGE_SHIFT);
+
+ for (order = 0; (1UL << order) < goal; order++)
+ /* NOTHING */;
+
+ do {
+ rt_hash_mask = (1UL << order) * PAGE_SIZE /
+ sizeof(struct rt_hash_bucket);
+ while (rt_hash_mask & (rt_hash_mask-1))
+ rt_hash_mask--;
+ rt_hash_table = (struct rt_hash_bucket *)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while (rt_hash_table == NULL && --order > 0);
+
+ if (!rt_hash_table)
+ panic("Failed to allocate IP route cache hash table\n");
+
+ printk("IP: routing cache hash table of %u buckets, %dKbytes\n",
+ rt_hash_mask, (rt_hash_mask*sizeof(struct rt_hash_bucket))/1024);
+
+ for (rt_hash_log=0; (1<<rt_hash_log) != rt_hash_mask; rt_hash_log++)
+ /* NOTHING */;
+
+ rt_hash_mask--;
+ for (i = 0; i <= rt_hash_mask; i++) {
+ rt_hash_table[i].lock = RW_LOCK_UNLOCKED;
+ rt_hash_table[i].chain = NULL;
+ }
+
+ ipv4_dst_ops.gc_thresh = (rt_hash_mask+1);
+ ip_rt_max_size = (rt_hash_mask+1)*16;
+
devinet_init();
ip_fib_init();
+
+ rt_flush_timer.function = rt_run_flush;
rt_periodic_timer.function = rt_check_expire;
+
/* All the timers, started at system startup tend
to synchronize. Perturb it a bit.
*/
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1ff1566af..1557974c5 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1,7 +1,7 @@
/*
* sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem.
*
- * $Id: sysctl_net_ipv4.c,v 1.40 1999/09/07 02:31:17 davem Exp $
+ * $Id: sysctl_net_ipv4.c,v 1.41 2000/01/06 00:42:03 davem Exp $
*
* Begun April 1, 1996, Mike Shaver.
* Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS]
@@ -70,6 +70,13 @@ extern int sysctl_icmp_echoreply_time;
/* From igmp.c */
extern int sysctl_igmp_max_memberships;
+/* From inetpeer.c */
+extern int inet_peer_threshold;
+extern int inet_peer_minttl;
+extern int inet_peer_maxttl;
+extern int inet_peer_gc_mintime;
+extern int inet_peer_gc_maxtime;
+
int tcp_retr1_max = 255;
struct ipv4_config ipv4_config;
@@ -200,6 +207,20 @@ ctl_table ipv4_table[] = {
{NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships",
&sysctl_igmp_max_memberships, sizeof(int), 0644, NULL, &proc_dointvec},
#endif
+ {NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold",
+ &inet_peer_threshold, sizeof(int), 0644, NULL, &proc_dointvec},
+ {NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl",
+ &inet_peer_minttl, sizeof(int), 0644, NULL,
+ &proc_dointvec_jiffies, &sysctl_jiffies},
+ {NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl",
+ &inet_peer_maxttl, sizeof(int), 0644, NULL,
+ &proc_dointvec_jiffies, &sysctl_jiffies},
+ {NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime",
+ &inet_peer_gc_mintime, sizeof(int), 0644, NULL,
+ &proc_dointvec_jiffies, &sysctl_jiffies},
+ {NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime",
+ &inet_peer_gc_maxtime, sizeof(int), 0644, NULL,
+ &proc_dointvec_jiffies, &sysctl_jiffies},
{0}
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4135d4c89..a8654f0ae 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp.c,v 1.151 1999/09/07 02:31:21 davem Exp $
+ * Version: $Id: tcp.c,v 1.152 1999/11/23 08:57:03 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 14fa4f80d..45e094b3e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.173 1999/09/07 02:31:27 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.176 1999/12/20 05:19:46 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8e2c56654..e1b6d70ab 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_ipv4.c,v 1.189 1999/09/07 02:31:33 davem Exp $
+ * Version: $Id: tcp_ipv4.c,v 1.193 2000/01/06 00:42:01 davem Exp $
*
* IPv4 specific functions
*
@@ -528,7 +528,10 @@ __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, i
read_lock(&tcp_lhash_lock);
sk = tcp_listening_hash[tcp_lhashfn(hnum)];
if (sk) {
- if (sk->num == hnum && sk->next == NULL)
+ if (sk->num == hnum &&
+ sk->next == NULL &&
+ (!sk->rcv_saddr || sk->rcv_saddr == daddr) &&
+ !sk->bound_dev_if)
goto sherry_cache;
sk = __tcp_v4_lookup_listener(sk, daddr, hnum, dif);
}
@@ -592,7 +595,8 @@ __inline__ struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport
static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
{
- return secure_tcp_sequence_number(sk->saddr, sk->daddr,
+ return secure_tcp_sequence_number(skb->nh.iph->daddr,
+ skb->nh.iph->saddr,
skb->h.th->dest,
skb->h.th->source);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 518572142..27df3243d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -5,7 +5,7 @@
*
* The User Datagram Protocol (UDP).
*
- * Version: $Id: udp.c,v 1.74 1999/08/20 11:06:12 davem Exp $
+ * Version: $Id: udp.c,v 1.76 2000/01/05 21:27:51 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -665,10 +665,6 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
return(0);
}
-#ifndef HAVE_CSUM_COPY_USER
-#undef CONFIG_UDP_DELAY_CSUM
-#endif
-
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
diff --git a/net/ipv6/Config.in b/net/ipv6/Config.in
index 6bf46e4e8..80498a9fe 100644
--- a/net/ipv6/Config.in
+++ b/net/ipv6/Config.in
@@ -3,7 +3,7 @@
#
bool ' IPv6: enable EUI-64 token format' CONFIG_IPV6_EUI64
if [ "$CONFIG_IPV6_EUI64" = "y" ]; then
- bool ' IPv6: disable provider based addresses' CONFIG_IPV6_NO_PB
+ bool ' IPv6: disable provider based addresses' CONFIG_IPV6_NO_PB
fi
if [ "$CONFIG_NETLINK" = "y" ]; then
if [ "$CONFIG_RTNETLINK" = "n" ]; then
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d7192096b..d82ef3846 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -6,7 +6,7 @@
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
- * $Id: addrconf.c,v 1.53 1999/08/31 07:03:54 davem Exp $
+ * $Id: addrconf.c,v 1.55 1999/12/15 22:39:40 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 638c1e79d..68badee52 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -7,7 +7,7 @@
*
* Adapted from linux/net/ipv4/af_inet.c
*
- * $Id: af_inet6.c,v 1.47 1999/08/31 07:03:58 davem Exp $
+ * $Id: af_inet6.c,v 1.49 1999/12/15 22:39:43 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index bfeff3dc9..2f1d8800e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: ip6_output.c,v 1.22 1999/08/20 11:06:21 davem Exp $
+ * $Id: ip6_output.c,v 1.23 2000/01/06 00:42:07 davem Exp $
*
* Based on linux/net/ipv4/ip_output.c
*
@@ -45,7 +45,17 @@
#include <net/rawv6.h>
#include <net/icmp.h>
-static u32 ipv6_fragmentation_id = 1;
+static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
+{
+ static u32 ipv6_fragmentation_id = 1;
+ static spinlock_t ip6_id_lock = SPIN_LOCK_UNLOCKED;
+
+ spin_lock_bh(&ip6_id_lock);
+ fhdr->identification = ipv6_fragmentation_id;
+ if (++ipv6_fragmentation_id == 0)
+ ipv6_fragmentation_id = 1;
+ spin_unlock_bh(&ip6_id_lock);
+}
int ip6_output(struct sk_buff *skb)
{
@@ -224,7 +234,7 @@ static __inline__ u8 * ipv6_build_fraghdr(struct sk_buff *skb, u8* prev_hdr, uns
fhdr->reserved = 0;
fhdr->frag_off = htons(offset);
- fhdr->identification = ipv6_fragmentation_id++;
+ ipv6_select_ident(skb, fhdr);
return &fhdr->nexthdr;
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 1abc90346..f8be2fb92 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: mcast.c,v 1.26 1999/08/31 07:04:08 davem Exp $
+ * $Id: mcast.c,v 1.27 1999/12/09 00:52:49 davem Exp $
*
* Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
*
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 2e2603668..999ac75fe 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -7,7 +7,7 @@
* PROC file system. This is very similar to the IPv4 version,
* except it reports the sockets in the INET6 address family.
*
- * Version: $Id: proc.c,v 1.11 1999/07/02 11:26:45 davem Exp $
+ * Version: $Id: proc.c,v 1.12 1999/12/15 22:39:48 davem Exp $
*
* Authors: David S. Miller (davem@caip.rutgers.edu)
*
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d52c6a129..baa6611b4 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -7,7 +7,7 @@
*
* Adapted from linux/net/ipv4/raw.c
*
- * $Id: raw.c,v 1.29 1999/08/20 11:06:26 davem Exp $
+ * $Id: raw.c,v 1.30 1999/12/15 22:39:51 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dddc12bc9..b39959f4f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: route.c,v 1.40 1999/08/31 07:04:13 davem Exp $
+ * $Id: route.c,v 1.43 2000/01/06 00:42:08 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -1544,6 +1544,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
ci.rta_used = rt->u.dst.__use;
ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
ci.rta_error = rt->u.dst.error;
+ ci.rta_id = 0;
+ ci.rta_ts = 0;
+ ci.rta_tsage = 0;
RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
nlh->nlmsg_len = skb->tail - b;
return skb->len;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8691d5de7..818ad66ca 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -6,7 +6,7 @@
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
- * $Id: sit.c,v 1.34 1999/08/31 07:04:16 davem Exp $
+ * $Id: sit.c,v 1.35 2000/01/06 00:42:08 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -544,7 +544,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->ttl = iph6->hop_limit;
iph->tot_len = htons(skb->len);
- iph->id = htons(ip_id_count++);
+ ip_select_ident(iph, &rt->u.dst);
ip_send_check(iph);
stats->tx_bytes += skb->len;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8f7fbb7ba..6e71f5479 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: tcp_ipv6.c,v 1.112 1999/08/31 07:04:19 davem Exp $
+ * $Id: tcp_ipv6.c,v 1.115 2000/01/06 00:42:09 davem Exp $
*
* Based on:
* linux/net/ipv4/tcp.c
@@ -273,8 +273,8 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor
}
}
}
- if (sk)
- sock_hold(sk);
+ if (result)
+ sock_hold(result);
read_unlock(&tcp_lhash_lock);
return result;
}
@@ -343,20 +343,17 @@ static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
{
- __u32 si;
- __u32 di;
-
if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
- si = skb->nh.ipv6h->saddr.s6_addr32[3];
- di = skb->nh.ipv6h->daddr.s6_addr32[3];
+ return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
+ skb->nh.ipv6h->saddr.s6_addr32,
+ skb->h.th->dest,
+ skb->h.th->source);
} else {
- si = skb->nh.iph->saddr;
- di = skb->nh.iph->daddr;
+ return secure_tcp_sequence_number(skb->nh.iph->daddr,
+ skb->nh.iph->saddr,
+ skb->h.th->dest,
+ skb->h.th->source);
}
-
- return secure_tcp_sequence_number(di, si,
- skb->h.th->dest,
- skb->h.th->source);
}
static int tcp_v6_check_established(struct sock *sk)
@@ -622,9 +619,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
*/
if (!tp->write_seq)
- tp->write_seq = secure_tcp_sequence_number(np->saddr.s6_addr32[3],
- np->daddr.s6_addr32[3],
- sk->sport, sk->dport);
+ tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
+ np->daddr.s6_addr32,
+ sk->sport, sk->dport);
err = tcp_connect(sk, buff);
if (err == 0)
@@ -730,7 +727,6 @@ void tcp_v6_err(struct sk_buff *skb, struct ipv6hdr *hdr,
if (dst == NULL) {
struct flowi fl;
- struct dst_entry *dst;
/* BUGGG_FUTURE: Again, it is not clear how
to handle rthdr case. Ignore this complexity
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6c2777b32..7bc3a3914 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -7,7 +7,7 @@
*
* Based on linux/ipv4/udp.c
*
- * $Id: udp.c,v 1.45 1999/08/20 11:06:32 davem Exp $
+ * $Id: udp.c,v 1.47 2000/01/05 21:27:54 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -341,10 +341,6 @@ static void udpv6_close(struct sock *sk, long timeout)
inet_sock_release(sk);
}
-#ifndef HAVE_CSUM_COPY_USER
-#undef CONFIG_UDP_DELAY_CSUM
-#endif
-
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
diff --git a/net/ipx/af_spx.c b/net/ipx/af_spx.c
index 60dbdc288..9f52dfe4e 100644
--- a/net/ipx/af_spx.c
+++ b/net/ipx/af_spx.c
@@ -6,7 +6,7 @@
* Revision Date: February 9, 1993
*
* Developers:
- * Jay Schulist <Jay.Schulist@spacs.k12.wi.us>
+ * Jay Schulist <jschlst@turbolinux.com>
* Jim Freeman <jfree@caldera.com>
*
* Changes:
diff --git a/net/irda/Config.in b/net/irda/Config.in
index 942182e35..8cf2b4a82 100644
--- a/net/irda/Config.in
+++ b/net/irda/Config.in
@@ -12,7 +12,7 @@ if [ "$CONFIG_NET" != "n" ]; then
comment 'IrDA protocols'
source net/irda/irlan/Config.in
source net/irda/ircomm/Config.in
-
+ bool ' Ultra (connectionless) protocol' CONFIG_IRDA_ULTRA
bool ' IrDA protocol options' CONFIG_IRDA_OPTIONS
if [ "$CONFIG_IRDA_OPTIONS" != "n" ]; then
comment ' IrDA options'
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 19bbae258..8bea0ce98 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1,28 +1,48 @@
/*********************************************************************
*
* Filename: af_irda.c
- * Version: 0.7
+ * Version: 0.9
* Description: IrDA sockets implementation
- * Status: Experimental.
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun May 31 10:12:43 1998
- * Modified at: Sun Oct 31 19:32:37 1999
+ * Modified at: Sat Dec 25 21:10:23 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: af_netroom.c, af_ax25.c, af_rose.c, af_x25.c etc.
*
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- *
+ * Copyright (c) 1999 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1999 Jean Tourrilhes <jeant@rockfort.hpl.hp.com>
+ * All Rights Reserved.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * Linux-IrDA now supports four different types of IrDA sockets:
+ *
+ * o SOCK_STREAM: TinyTP connections with SAR disabled. The
+ * max SDU size is 0 for conn. of this type
+ * o SOCK_SEQPACKET: TinyTP connections with SAR enabled. TTP may
+ * fragment the messages, but will preserve
+ * the message boundaries
+ * o SOCK_DGRAM: IRDAPROTO_UNITDATA: TinyTP connections with Unitdata
+ * (unreliable) transfers
+ * IRDAPROTO_ULTRA: Connectionless and unreliable data
*
********************************************************************/
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
@@ -39,6 +59,7 @@
#include <net/irda/irda.h>
#include <net/irda/iriap.h>
#include <net/irda/irias_object.h>
+#include <net/irda/irlmp.h>
#include <net/irda/irttp.h>
#include <net/irda/discovery.h>
@@ -50,7 +71,14 @@ extern int irlap_driver_rcv(struct sk_buff *, struct net_device *,
static int irda_create(struct socket *sock, int protocol);
static struct proto_ops irda_stream_ops;
+static struct proto_ops irda_seqpacket_ops;
static struct proto_ops irda_dgram_ops;
+
+#ifdef CONFIG_IRDA_ULTRA
+static struct proto_ops irda_ultra_ops;
+#define ULTRA_MAX_DATA 382
+#endif /* CONFIG_IRDA_ULTRA */
+
static hashbin_t *cachelog = NULL;
static DECLARE_WAIT_QUEUE_HEAD(discovery_wait); /* Wait for discovery */
@@ -133,6 +161,10 @@ static void irda_connect_confirm(void *instance, void *sap,
self = (struct irda_sock *) instance;
+ sk = self->sk;
+ if (sk == NULL)
+ return;
+
/* How much header space do we need to reserve */
self->max_header_size = max_header_size;
@@ -140,19 +172,30 @@ static void irda_connect_confirm(void *instance, void *sap,
self->max_sdu_size_tx = max_sdu_size;
/* Find out what the largest chunk of data that we can transmit is */
- if (max_sdu_size == SAR_DISABLE)
- self->max_data_size = irttp_get_max_seq_size(self->tsap);
- else
+ switch (sk->type) {
+ case SOCK_STREAM:
+ if (max_sdu_size != 0) {
+ ERROR(__FUNCTION__ "(), max_sdu_size must be 0\n");
+ return;
+ }
+ self->max_data_size = irttp_get_max_seg_size(self->tsap);
+ break;
+ case SOCK_SEQPACKET:
+ if (max_sdu_size == 0) {
+ ERROR(__FUNCTION__ "(), max_sdu_size cannot be 0\n");
+ return;
+ }
self->max_data_size = max_sdu_size;
+ break;
+ default:
+ self->max_data_size = irttp_get_max_seg_size(self->tsap);
+ };
- IRDA_DEBUG(1, __FUNCTION__ "(), max_data_size=%d\n", self->max_data_size);
+ IRDA_DEBUG(2, __FUNCTION__ "(), max_data_size=%d\n",
+ self->max_data_size);
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
- sk = self->sk;
- if (sk == NULL)
- return;
-
skb_queue_tail(&sk->receive_queue, skb);
/* We are now connected! */
@@ -175,7 +218,11 @@ static void irda_connect_indication(void *instance, void *sap,
IRDA_DEBUG(2, __FUNCTION__ "()\n");
- self = (struct irda_sock *) instance;
+ self = (struct irda_sock *) instance;
+
+ sk = self->sk;
+ if (sk == NULL)
+ return;
/* How much header space do we need to reserve */
self->max_header_size = max_header_size;
@@ -184,21 +231,31 @@ static void irda_connect_indication(void *instance, void *sap,
self->max_sdu_size_tx = max_sdu_size;
/* Find out what the largest chunk of data that we can transmit is */
- if (max_sdu_size == SAR_DISABLE)
- self->max_data_size = irttp_get_max_seq_size(self->tsap);
- else
+ switch (sk->type) {
+ case SOCK_STREAM:
+ if (max_sdu_size != 0) {
+ ERROR(__FUNCTION__ "(), max_sdu_size must be 0\n");
+ return;
+ }
+ self->max_data_size = irttp_get_max_seg_size(self->tsap);
+ break;
+ case SOCK_SEQPACKET:
+ if (max_sdu_size == 0) {
+ ERROR(__FUNCTION__ "(), max_sdu_size cannot be 0\n");
+ return;
+ }
self->max_data_size = max_sdu_size;
+ break;
+ default:
+ self->max_data_size = irttp_get_max_seg_size(self->tsap);
+ };
- IRDA_DEBUG(1, __FUNCTION__ "(), max_data_size=%d\n", self->max_data_size);
+ IRDA_DEBUG(2, __FUNCTION__ "(), max_data_size=%d\n",
+ self->max_data_size);
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
-
- sk = self->sk;
- if (sk == NULL)
- return;
skb_queue_tail(&sk->receive_queue, skb);
-
sk->state_change(sk);
}
@@ -212,7 +269,7 @@ void irda_connect_response(struct irda_sock *self)
{
struct sk_buff *skb;
- IRDA_DEBUG(1, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
@@ -239,7 +296,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
struct irda_sock *self;
struct sock *sk;
- IRDA_DEBUG(1, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
self = (struct irda_sock *) instance;
ASSERT(self != NULL, return;);
@@ -254,26 +311,30 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
break;
case FLOW_START:
self->tx_flow = flow;
- IRDA_DEBUG(1, __FUNCTION__ "(), IrTTP wants us to start again\n");
+ IRDA_DEBUG(1, __FUNCTION__
+ "(), IrTTP wants us to start again\n");
wake_up_interruptible(sk->sleep);
break;
default:
IRDA_DEBUG( 0, __FUNCTION__ "(), Unknown flow command!\n");
+ /* Unknown flow command, better stop */
+ self->tx_flow = flow;
+ break;
}
}
/*
- * Function irda_get_value_confirm (obj_id, value, priv)
+ * Function irda_getvalue_confirm (obj_id, value, priv)
*
* Got answer from remote LM-IAS
*
*/
-static void irda_get_value_confirm(int result, __u16 obj_id,
- struct ias_value *value, void *priv)
+static void irda_getvalue_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv)
{
struct irda_sock *self;
- IRDA_DEBUG(1, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
ASSERT(priv != NULL, return;);
self = (struct irda_sock *) priv;
@@ -287,12 +348,12 @@ static void irda_get_value_confirm(int result, __u16 obj_id,
iriap_close(self->iriap);
self->iriap = NULL;
+ self->errno = result;
+
/* Check if request succeeded */
if (result != IAS_SUCCESS) {
IRDA_DEBUG(0, __FUNCTION__ "(), IAS query failed!\n");
- self->errno = result;
-
/* Wake up any processes waiting for result */
wake_up_interruptible(&self->ias_wait);
@@ -312,6 +373,8 @@ static void irda_get_value_confirm(int result, __u16 obj_id,
IRDA_DEBUG(0, __FUNCTION__ "(), bad type!\n");
break;
}
+ irias_delete_value(value);
+
/* Wake up any processes waiting for result */
wake_up_interruptible(&self->ias_wait);
}
@@ -324,7 +387,7 @@ static void irda_get_value_confirm(int result, __u16 obj_id,
*/
static void irda_discovery_indication(hashbin_t *log)
{
- IRDA_DEBUG(1, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
cachelog = log;
@@ -353,6 +416,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
notify.connect_indication = irda_connect_indication;
notify.disconnect_indication = irda_disconnect_indication;
notify.data_indication = irda_data_indication;
+ notify.udata_indication = irda_data_indication;
notify.flow_indication = irda_flow_indication;
notify.instance = self;
strncpy(notify.name, name, NOTIFY_MAX_NAME);
@@ -370,6 +434,38 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
}
/*
+ * Function irda_open_lsap (self)
+ *
+ * Open local Link Service Access Point (LSAP). Used for opening Ultra
+ * sockets
+ */
+#ifdef CONFIG_IRDA_ULTRA
+static int irda_open_lsap(struct irda_sock *self, int pid)
+{
+ notify_t notify;
+
+ if (self->lsap) {
+ WARNING(__FUNCTION__ "(), busy!\n");
+ return -EBUSY;
+ }
+
+ /* Initialize callbacks to be used by the IrDA stack */
+ irda_notify_init(&notify);
+ notify.udata_indication = irda_data_indication;
+ notify.instance = self;
+ strncpy(notify.name, "Ultra", NOTIFY_MAX_NAME);
+
+ self->lsap = irlmp_open_lsap(LSAP_CONNLESS, &notify, pid);
+ if (self->lsap == NULL) {
+ IRDA_DEBUG( 0, __FUNCTION__ "(), Unable to allocate LSAP!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
* Function irda_find_lsap_sel (self, name)
*
* Try to lookup LSAP selector in remote LM-IAS
@@ -377,7 +473,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
*/
static int irda_find_lsap_sel(struct irda_sock *self, char *name)
{
- IRDA_DEBUG(2, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "(), name=%s\n", name);
ASSERT(self != NULL, return -1;);
@@ -387,7 +483,7 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
}
self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
- irda_get_value_confirm);
+ irda_getvalue_confirm);
/* Query remote LM-IAS */
iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr,
@@ -401,6 +497,104 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
return -ENETUNREACH; /* May not be true */
}
+ /*
+ * Function irda_discover_daddr_and_lsap_sel (self, name)
+ *
+ * This try to find a device with the requested service.
+ *
+ * It basically look into the discovery log. For each address in the list,
+ * it queries the LM-IAS of the device to find if this device offer
+ * the requested service.
+ * If there is more than one node supporting the service, we complain
+ * to the user (it should move devices around).
+ * The, we set both the destination address and the lsap selector to point
+ * on the service on the unique device we have found.
+ *
+ * Note : this function fails if there is more than one device in range,
+ * because IrLMP doesn't disconnect the LAP when the last LSAP is closed.
+ * Moreover, we would need to wait the LAP disconnection...
+ */
+static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
+{
+ discovery_t *discovery;
+ int err = -ENETUNREACH;
+ __u32 daddr = 0x0; /* Address we found the service on */
+ __u8 dtsap_sel = 0x0; /* TSAP associated with it */
+
+ IRDA_DEBUG(2, __FUNCTION__ "(), name=%s\n", name);
+
+ ASSERT(self != NULL, return -1;);
+
+ /* Tell IrLMP we want to be notified */
+ irlmp_update_client(self->ckey, self->mask, NULL,
+ irda_discovery_indication);
+
+ /* Do some discovery */
+ irlmp_discovery_request(self->nslots);
+
+ /* Check if the we got some results */
+ if (!cachelog)
+ /* Wait for answer */
+ /*interruptible_sleep_on(&self->discovery_wait);*/
+ return -EAGAIN;
+
+ /*
+ * Now, check all discovered devices (if any), and connect
+ * client only about the services that the client is
+ * interested in...
+ */
+ discovery = (discovery_t *) hashbin_get_first(cachelog);
+ while (discovery != NULL) {
+ /* Mask out the ones we don't want */
+ if (discovery->hints.word & self->mask) {
+ /* Try this address */
+ self->daddr = discovery->daddr;
+ self->saddr = 0x0;
+ IRDA_DEBUG(1, __FUNCTION__ "(), trying daddr = %08x\n",
+ self->daddr);
+
+ /* Query remote LM-IAS for this service */
+ err = irda_find_lsap_sel(self, name);
+ if (err == 0) {
+ /* We found the requested service */
+ if(daddr != 0x0) {
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), discovered service ''%s'' in two different devices !!!\n",
+ name);
+ return(-ENOTUNIQ);
+ }
+ /* First time we foun that one, save it ! */
+ daddr = self->daddr;
+ dtsap_sel = self->dtsap_sel;
+ }
+ }
+
+ /* Next node, maybe we will be more lucky... */
+ discovery = (discovery_t *) hashbin_get_next(cachelog);
+ }
+ cachelog = NULL;
+
+ /* Check out what we found */
+ if(daddr == 0x0) {
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), cannot discover service ''%s'' in any device !!!\n",
+ name);
+ self->daddr = 0; /* Guessing */
+ return(-ENETUNREACH);
+ }
+
+ /* Revert back to discovered device & service */
+ self->daddr = daddr;
+ self->saddr = 0x0;
+ self->dtsap_sel = dtsap_sel;
+
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), discovered requested service ''%s'' at address %08x\n",
+ name, self->daddr);
+
+ return 0;
+}
+
/*
* Function irda_getname (sock, uaddr, uaddr_len, peer)
*
@@ -446,12 +640,16 @@ static int irda_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
- IRDA_DEBUG(0, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
+ if ((sk->type != SOCK_STREAM) && (sk->type != SOCK_SEQPACKET) &&
+ (sk->type != SOCK_DGRAM))
+ return -EOPNOTSUPP;
- if (sk->type == SOCK_STREAM && sk->state != TCP_LISTEN) {
+ if (sk->state != TCP_LISTEN) {
sk->max_ack_backlog = backlog;
sk->state = TCP_LISTEN;
-
+
return 0;
}
@@ -472,15 +670,38 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
__u16 hints = 0;
int err;
- IRDA_DEBUG(0, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
self = sk->protinfo.irda;
ASSERT(self != NULL, return -1;);
- if ((addr_len < sizeof(struct sockaddr_irda)) ||
- (addr_len > sizeof(struct sockaddr_irda)))
+ if (addr_len != sizeof(struct sockaddr_irda))
return -EINVAL;
+#ifdef CONFIG_IRDA_ULTRA
+ /* Special care for Ultra sockets */
+ if ((sk->type == SOCK_DGRAM) && (sk->protocol == IRDAPROTO_ULTRA)) {
+ self->pid = addr->sir_lsap_sel;
+ if (self->pid & 0x80) {
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), extension in PID not supp!\n");
+ return -EOPNOTSUPP;
+ }
+ err = irda_open_lsap(self, self->pid);
+ if (err < 0)
+ return err;
+
+ self->max_data_size = ULTRA_MAX_DATA - LMP_PID_HEADER;
+ self->max_header_size = IRDA_MAX_HEADER + LMP_PID_HEADER;
+
+ /* Pretend we are connected */
+ sock->state = SS_CONNECTED;
+ sk->state = TCP_ESTABLISHED;
+
+ return 0;
+ }
+#endif /* CONFIG_IRDA_ULTRA */
+
err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
if (err < 0)
return err;
@@ -490,6 +711,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel",
self->stsap_sel);
irias_insert_object(self->ias_obj);
+
+#if 1 /* Will be removed in near future */
/* Fill in some default hint bits values */
if (strncmp(addr->sir_name, "OBEX", 4) == 0)
@@ -497,7 +720,7 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (hints)
self->skey = irlmp_register_service(hints);
-
+#endif
return 0;
}
@@ -515,7 +738,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
struct sk_buff *skb;
int err;
- IRDA_DEBUG(0, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
self = sk->protinfo.irda;
ASSERT(self != NULL, return -1;);
@@ -530,10 +753,11 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
if ((sk = sock->sk) == NULL)
return -EINVAL;
- if (sk->type != SOCK_STREAM)
+ if ((sk->type != SOCK_STREAM) && (sk->type != SOCK_SEQPACKET) &&
+ (sk->type != SOCK_DGRAM))
return -EOPNOTSUPP;
- if (sk->state != TCP_LISTEN)
+ if (sk->state != TCP_LISTEN)
return -EINVAL;
/*
@@ -606,9 +830,13 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
struct irda_sock *self;
int err;
- IRDA_DEBUG(0, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
self = sk->protinfo.irda;
+
+ /* Don't allow connect for Ultra sockets */
+ if ((sk->type == SOCK_DGRAM) && (sk->protocol == IRDAPROTO_ULTRA))
+ return -ESOCKTNOSUPPORT;
if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
@@ -629,18 +857,26 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
if (addr_len != sizeof(struct sockaddr_irda))
return -EINVAL;
- /* Check if user supplied the required destination device address */
- if (!addr->sir_addr)
- return -EINVAL;
-
- self->daddr = addr->sir_addr;
- IRDA_DEBUG(1, __FUNCTION__ "(), daddr = %08x\n", self->daddr);
-
- /* Query remote LM-IAS */
- err = irda_find_lsap_sel(self, addr->sir_name);
- if (err) {
- IRDA_DEBUG(0, __FUNCTION__ "(), connect failed!\n");
- return err;
+ /* Check if user supplied any destination device address */
+ if (!addr->sir_addr) {
+ /* Try to find one suitable */
+ err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name);
+ if (err) {
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), auto-connect failed!\n");
+ return -EINVAL;
+ }
+ } else {
+ /* Use the one provided by the user */
+ self->daddr = addr->sir_addr;
+ IRDA_DEBUG(1, __FUNCTION__ "(), daddr = %08x\n", self->daddr);
+
+ /* Query remote LM-IAS */
+ err = irda_find_lsap_sel(self, addr->sir_name);
+ if (err) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), connect failed!\n");
+ return err;
+ }
}
/* Check if we have opened a local TSAP */
@@ -704,8 +940,9 @@ static int irda_create(struct socket *sock, int protocol)
/* Check for valid socket type */
switch (sock->type) {
- case SOCK_STREAM: /* FALLTHROUGH */
- case SOCK_SEQPACKET:
+ case SOCK_STREAM: /* For TTP connections with SAR disabled */
+ case SOCK_SEQPACKET: /* For TTP connections with SAR enabled */
+ case SOCK_DGRAM: /* For TTP Unitdata or LMP Ultra transfers */
break;
default:
return -ESOCKTNOSUPPORT;
@@ -726,10 +963,35 @@ static int irda_create(struct socket *sock, int protocol)
sk->protinfo.irda = self;
sock_init_data(sock, sk);
- if (sock->type == SOCK_STREAM)
+ switch (sock->type) {
+ case SOCK_STREAM:
sock->ops = &irda_stream_ops;
- else
- sock->ops = &irda_dgram_ops;
+ self->max_sdu_size_rx = TTP_SAR_DISABLE;
+ break;
+ case SOCK_SEQPACKET:
+ sock->ops = &irda_seqpacket_ops;
+ self->max_sdu_size_rx = TTP_SAR_UNBOUND;
+ break;
+ case SOCK_DGRAM:
+ switch (protocol) {
+#ifdef CONFIG_IRDA_ULTRA
+ case IRDAPROTO_ULTRA:
+ sock->ops = &irda_ultra_ops;
+ break;
+#endif /* CONFIG_IRDA_ULTRA */
+ case IRDAPROTO_UNITDATA:
+ sock->ops = &irda_dgram_ops;
+ /* We let Unitdata conn. be like seqpack conn. */
+ self->max_sdu_size_rx = TTP_SAR_UNBOUND;
+ break;
+ default:
+ ERROR(__FUNCTION__ "(), protocol not supported!\n");
+ return -ESOCKTNOSUPPORT;
+ }
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
+ }
sk->protocol = protocol;
@@ -737,8 +999,8 @@ static int irda_create(struct socket *sock, int protocol)
self->ckey = irlmp_register_client(0, NULL, NULL);
self->mask = 0xffff;
self->rx_flow = self->tx_flow = FLOW_START;
- self->max_sdu_size_rx = SAR_DISABLE; /* Default value */
self->nslots = DISCOVERY_DEFAULT_SLOTS;
+ self->daddr = DEV_ADDR_ANY;
/* Notify that we are using the irda module, so nobody removes it */
irda_mod_inc_use_count();
@@ -774,7 +1036,12 @@ void irda_destroy_socket(struct irda_sock *self)
irttp_close_tsap(self->tsap);
self->tsap = NULL;
}
-
+#ifdef CONFIG_IRDA_ULTRA
+ if (self->lsap) {
+ irlmp_close_lsap(self->lsap);
+ self->lsap = NULL;
+ }
+#endif /* CONFIG_IRDA_ULTRA */
kfree(self);
/* Notify that we are not using the irda module anymore */
@@ -784,7 +1051,7 @@ void irda_destroy_socket(struct irda_sock *self)
}
/*
- * Function irda_release (sock, peer)
+ * Function irda_release (sock)
*
*
*
@@ -793,7 +1060,7 @@ static int irda_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- IRDA_DEBUG(1, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
if (sk == NULL)
return 0;
@@ -814,14 +1081,14 @@ static int irda_release(struct socket *sock)
/*
* Function irda_sendmsg (sock, msg, len, scm)
*
- * Send message down to TinyTP
- *
+ * Send message down to TinyTP. This function is used for both STREAM and
+ * SEQPACK services. This is possible since it forces the client to
+ * fragment the message if necessary
*/
static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len,
struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
-/* struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name; */
struct irda_sock *self;
struct sk_buff *skb;
unsigned char *asmptr;
@@ -855,9 +1122,9 @@ static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len,
/* Check that we don't send out to big frames */
if (len > self->max_data_size) {
- IRDA_DEBUG(0, __FUNCTION__ "(), Warning to much data! "
- "Chopping frame from %d to %d bytes!\n", len,
- self->max_data_size);
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), Chopping frame from %d to %d bytes!\n", len,
+ self->max_data_size);
len = self->max_data_size;
}
@@ -868,7 +1135,6 @@ static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len,
skb_reserve(skb, self->max_header_size);
- IRDA_DEBUG(4, __FUNCTION__ "(), appending user data\n");
asmptr = skb->h.raw = skb_put(skb, len);
memcpy_fromiovec(asmptr, msg->msg_iov, len);
@@ -881,14 +1147,15 @@ static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len,
IRDA_DEBUG(0, __FUNCTION__ "(), err=%d\n", err);
return err;
}
+ /* Tell client how much data we actually sent */
return len;
}
/*
- * Function irda_recvmsg (sock, msg, size, flags, scm)
- *
- * Try to receive message and copy it to user
+ * Function irda_recvmsg_dgram (sock, msg, size, flags, scm)
*
+ * Try to receive message and copy it to user. The frame is discarded
+ * after being read, regardless of how much the user actually read
*/
static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg,
int size, int flags, struct scm_cookie *scm)
@@ -912,6 +1179,9 @@ static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg,
copied = skb->len;
if (copied > size) {
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), Received truncated frame (%d < %d)!\n",
+ copied, size);
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
@@ -980,7 +1250,6 @@ static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg,
if (flags & MSG_WAITALL)
target = size;
-
msg->msg_namelen = 0;
do {
@@ -1060,6 +1329,132 @@ static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg,
}
/*
+ * Function irda_sendmsg_dgram (sock, msg, len, scm)
+ *
+ * Send message down to TinyTP for the unreliable sequenced
+ * packet service...
+ *
+ */
+static int irda_sendmsg_dgram(struct socket *sock, struct msghdr *msg,
+ int len, struct scm_cookie *scm)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self;
+ struct sk_buff *skb;
+ unsigned char *asmptr;
+ int err;
+
+ IRDA_DEBUG(4, __FUNCTION__ "(), len=%d\n", len);
+
+ if (msg->msg_flags & ~MSG_DONTWAIT)
+ return -EINVAL;
+
+ if (sk->shutdown & SEND_SHUTDOWN) {
+ send_sig(SIGPIPE, current, 0);
+ return -EPIPE;
+ }
+
+ if (sk->state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ self = sk->protinfo.irda;
+ ASSERT(self != NULL, return -1;);
+
+ /*
+ * Check that we don't send out to big frames. This is an unreliable
+ * service, so we have no fragmentation and no coalescence
+ */
+ if (len > self->max_data_size) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), Warning to much data! "
+ "Chopping frame from %d to %d bytes!\n", len,
+ self->max_data_size);
+ len = self->max_data_size;
+ }
+
+ skb = sock_alloc_send_skb(sk, len + self->max_header_size, 0,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return -ENOBUFS;
+
+ skb_reserve(skb, self->max_header_size);
+
+ IRDA_DEBUG(4, __FUNCTION__ "(), appending user data\n");
+ asmptr = skb->h.raw = skb_put(skb, len);
+ memcpy_fromiovec(asmptr, msg->msg_iov, len);
+
+ /*
+ * Just send the message to TinyTP, and let it deal with possible
+ * errors. No need to duplicate all that here
+ */
+ err = irttp_udata_request(self->tsap, skb);
+ if (err) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), err=%d\n", err);
+ return err;
+ }
+ return len;
+}
+
+/*
+ * Function irda_sendmsg_ultra (sock, msg, len, scm)
+ *
+ * Send message down to IrLMP for the unreliable Ultra
+ * packet service...
+ */
+#ifdef CONFIG_IRDA_ULTRA
+static int irda_sendmsg_ultra(struct socket *sock, struct msghdr *msg,
+ int len, struct scm_cookie *scm)
+{
+ struct sock *sk = sock->sk;
+ struct irda_sock *self;
+ struct sk_buff *skb;
+ unsigned char *asmptr;
+ int err;
+
+ IRDA_DEBUG(4, __FUNCTION__ "(), len=%d\n", len);
+
+ if (msg->msg_flags & ~MSG_DONTWAIT)
+ return -EINVAL;
+
+ if (sk->shutdown & SEND_SHUTDOWN) {
+ send_sig(SIGPIPE, current, 0);
+ return -EPIPE;
+ }
+
+ self = sk->protinfo.irda;
+ ASSERT(self != NULL, return -1;);
+
+ /*
+ * Check that we don't send out to big frames. This is an unreliable
+ * service, so we have no fragmentation and no coalescence
+ */
+ if (len > self->max_data_size) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), Warning to much data! "
+ "Chopping frame from %d to %d bytes!\n", len,
+ self->max_data_size);
+ len = self->max_data_size;
+ }
+
+ skb = sock_alloc_send_skb(sk, len + self->max_header_size, 0,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return -ENOBUFS;
+
+ skb_reserve(skb, self->max_header_size);
+
+ IRDA_DEBUG(4, __FUNCTION__ "(), appending user data\n");
+ asmptr = skb->h.raw = skb_put(skb, len);
+ memcpy_fromiovec(asmptr, msg->msg_iov, len);
+
+ err = irlmp_connless_data_request(self->lsap, skb);
+ if (err) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), err=%d\n", err);
+ return err;
+ }
+ return len;
+}
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
* Function irda_shutdown (sk, how)
*
*
@@ -1070,7 +1465,7 @@ static int irda_shutdown(struct socket *sock, int how)
struct irda_sock *self;
struct sock *sk = sock->sk;
- IRDA_DEBUG( 0, __FUNCTION__ "()\n");
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
self = sk->protinfo.irda;
ASSERT(self != NULL, return -1;);
@@ -1103,7 +1498,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
struct sock *sk = sock->sk;
unsigned int mask;
- IRDA_DEBUG(1, __FUNCTION__ "()\n");
+ IRDA_DEBUG(4, __FUNCTION__ "()\n");
poll_wait(file, sk->sleep, wait);
mask = 0;
@@ -1115,9 +1510,10 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
mask |= POLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->receive_queue))
+ if (!skb_queue_empty(&sk->receive_queue)) {
+ IRDA_DEBUG(4, "Socket is readable\n");
mask |= POLLIN | POLLRDNORM;
-
+ }
/* Connection-based need to check for termination and startup */
if (sk->type == SOCK_STREAM && sk->state==TCP_CLOSE)
mask |= POLLHUP;
@@ -1208,6 +1604,8 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct irda_sock *self;
+ struct irda_ias_set ias_opt;
+ struct ias_object *ias_obj;
int opt;
self = sk->protinfo.irda;
@@ -1215,20 +1613,101 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
if (level != SOL_IRLMP)
return -ENOPROTOOPT;
-
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (get_user(opt, (int *)optval))
- return -EFAULT;
-
+
switch (optname) {
case IRLMP_IAS_SET:
+ if (optlen != sizeof(struct irda_ias_set))
+ return -EINVAL;
+
+ /* Copy query to the driver. */
+ if (copy_from_user(&ias_opt, (char *)optval, optlen))
+ return -EFAULT;
+
+ /* Find the object we target */
+ ias_obj = irias_find_object(ias_opt.irda_class_name);
+ if(ias_obj == (struct ias_object *) NULL) {
+ /* Create a new object */
+ ias_obj = irias_new_object(ias_opt.irda_class_name,
+ jiffies);
+ }
+
+ /* Do we have it already ? */
+ if(irias_find_attrib(ias_obj, ias_opt.irda_attrib_name))
+ return -EINVAL;
+
+ /* Look at the type */
+ switch(ias_opt.irda_attrib_type) {
+ case IAS_INTEGER:
+ /* Add an integer attribute */
+ irias_add_integer_attrib(ias_obj,
+ ias_opt.irda_attrib_name,
+ ias_opt.attribute.irda_attrib_int);
+ break;
+ case IAS_OCT_SEQ:
+ /* Check length */
+ if(ias_opt.attribute.irda_attrib_octet_seq.len >
+ IAS_MAX_OCTET_STRING)
+ return -EINVAL;
+ /* Add an octet sequence attribute */
+ irias_add_octseq_attrib(
+ ias_obj,
+ ias_opt.irda_attrib_name,
+ ias_opt.attribute.irda_attrib_octet_seq.octet_seq,
+ ias_opt.attribute.irda_attrib_octet_seq.len);
+ break;
+ case IAS_STRING:
+ /* Should check charset & co */
+ /* Check length */
+ if(ias_opt.attribute.irda_attrib_string.len >
+ IAS_MAX_STRING)
+ return -EINVAL;
+ /* NULL terminate the string (avoid troubles) */
+ ias_opt.attribute.irda_attrib_string.string[ias_opt.attribute.irda_attrib_string.len] = '\0';
+ /* Add a string attribute */
+ irias_add_string_attrib(
+ ias_obj,
+ ias_opt.irda_attrib_name,
+ ias_opt.attribute.irda_attrib_string.string);
+ break;
+ default :
+ return -EINVAL;
+ }
+ irias_insert_object(ias_obj);
+ break;
+
IRDA_DEBUG(0, __FUNCTION__ "(), sorry not impl. yet!\n");
return -ENOPROTOOPT;
- case IRTTP_MAX_SDU_SIZE:
- IRDA_DEBUG(2, __FUNCTION__ "(), setting max_sdu_size = %d\n", opt);
- self->max_sdu_size_rx = opt;
+ case IRLMP_MAX_SDU_SIZE:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(opt, (int *)optval))
+ return -EFAULT;
+
+ /* Only possible for a seqpacket service (TTP with SAR) */
+ if (sk->type != SOCK_SEQPACKET) {
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), setting max_sdu_size = %d\n", opt);
+ self->max_sdu_size_rx = opt;
+ } else {
+ WARNING(__FUNCTION__
+ "(), not allowed to set MAXSDUSIZE for this "
+ "socket type!\n");
+ return -ENOPROTOOPT;
+ }
+ break;
+ case IRLMP_HINTS_SET:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(opt, (int *)optval))
+ return -EFAULT;
+
+ /* Unregister any old registration */
+ if (self->skey)
+ irlmp_unregister_service(self->skey);
+
+ self->skey = irlmp_register_service((__u16) opt);
break;
default:
return -ENOPROTOOPT;
@@ -1236,6 +1715,104 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
return 0;
}
+ /*
+ * Function irda_simple_getvalue_confirm (obj_id, value, priv)
+ *
+ * Got answer from remote LM-IAS, just copy object to requester...
+ *
+ * Note : duplicate from above, but we need our own version that
+ * doesn't touch the dtsap_sel and save the full value structure...
+ */
+static void irda_simple_getvalue_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv)
+{
+ struct irda_sock *self;
+
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
+ ASSERT(priv != NULL, return;);
+ self = (struct irda_sock *) priv;
+
+ if (!self) {
+ WARNING(__FUNCTION__ "(), lost myself!\n");
+ return;
+ }
+
+ /* We probably don't need to make any more queries */
+ iriap_close(self->iriap);
+ self->iriap = NULL;
+
+ /* Check if request succeeded */
+ if (result != IAS_SUCCESS) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), IAS query failed!\n");
+
+ self->errno = -EHOSTUNREACH;
+
+ /* Wake up any processes waiting for result */
+ wake_up_interruptible(&self->ias_wait);
+
+ return;
+ }
+
+ /* Clone the object (so the requester can free it) */
+ self->ias_result = kmalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ memcpy(self->ias_result, value, sizeof(struct ias_value));
+ irias_delete_value(value);
+
+ self->errno = 0;
+
+ /* Wake up any processes waiting for result */
+ wake_up_interruptible(&self->ias_wait);
+}
+
+/*
+ * Function irda_extract_ias_value(ias_opt, ias_value)
+ *
+ * Translate internal IAS value structure to the user space representation
+ *
+ * The external representation of IAS values, as we exchange them with
+ * user space program is quite different from the internal representation,
+ * as stored in the IAS database (because we need a flat structure for
+ * crossing kernel boundary).
+ * This function transform the former in the latter. We also check
+ * that the value type is valid.
+ */
+static int irda_extract_ias_value(struct irda_ias_set *ias_opt,
+ struct ias_value *ias_value)
+{
+ /* Look at the type */
+ switch (ias_value->type) {
+ case IAS_INTEGER:
+ /* Copy the integer */
+ ias_opt->attribute.irda_attrib_int = ias_value->t.integer;
+ break;
+ case IAS_OCT_SEQ:
+ /* Set length */
+ ias_opt->attribute.irda_attrib_octet_seq.len = ias_value->len;
+ /* Copy over */
+ memcpy(ias_opt->attribute.irda_attrib_octet_seq.octet_seq,
+ ias_value->t.oct_seq, ias_value->len);
+ break;
+ case IAS_STRING:
+ /* Set length */
+ ias_opt->attribute.irda_attrib_string.len = ias_value->len;
+ ias_opt->attribute.irda_attrib_string.charset = ias_value->charset;
+ /* Copy over */
+ memcpy(ias_opt->attribute.irda_attrib_string.string,
+ ias_value->t.string, ias_value->len);
+ /* NULL terminate the string (avoid troubles) */
+ ias_opt->attribute.irda_attrib_string.string[ias_value->len] = '\0';
+ break;
+ default :
+ return -EINVAL;
+ }
+
+ /* Copy type over */
+ ias_opt->irda_attrib_type = ias_value->type;
+
+ return 0;
+}
+
/*
* Function irda_getsockopt (sock, level, optname, optval, optlen)
*
@@ -1250,8 +1827,12 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
struct irda_device_list list;
struct irda_device_info *info;
discovery_t *discovery;
+ struct irda_ias_set ias_opt; /* IAS get/query params */
+ struct ias_object * ias_obj; /* Object in IAS */
+ struct ias_attrib * ias_attr; /* Attribute in IAS object */
int val = 0;
int len = 0;
+ int err;
int offset, total;
self = sk->protinfo.irda;
@@ -1293,9 +1874,8 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
while (discovery != NULL) {
/* Mask out the ones we don't want */
if (discovery->hints.word & self->mask) {
-
/* Check if room for this device entry */
- if (len - total < sizeof(struct irda_device_info))
+ if (len-total<sizeof(struct irda_device_info))
break;
/* Copy discovery information */
@@ -1307,7 +1887,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
strncpy(info->info, discovery->nickname,
NICKNAME_MAX_LEN);
- if (copy_to_user(optval+offset, info,
+ if (copy_to_user(optval+total, info,
sizeof(struct irda_device_info)))
return -EFAULT;
list.len++;
@@ -1327,7 +1907,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
sizeof(struct irda_device_info)))
return -EFAULT;
break;
- case IRTTP_MAX_SDU_SIZE:
+ case IRLMP_MAX_SDU_SIZE:
val = self->max_data_size;
len = sizeof(int);
if (put_user(len, optlen))
@@ -1336,10 +1916,95 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
if (copy_to_user(optval, &val, len))
return -EFAULT;
break;
+ case IRLMP_IAS_GET:
+ /* The user want an object from our local IAS database.
+ * We just need to query the IAS and return the value
+ * that we found */
+
+ /* Check that the user has allocated the right space for us */
+ if (len != sizeof(ias_opt))
+ return -EINVAL;
+
+ /* Copy query to the driver. */
+ if (copy_from_user((char *) &ias_opt, (char *)optval, len))
+ return -EFAULT;
+
+ /* Find the object we target */
+ ias_obj = irias_find_object(ias_opt.irda_class_name);
+ if(ias_obj == (struct ias_object *) NULL)
+ return -EINVAL;
+
+ /* Find the attribute (in the object) we target */
+ ias_attr = irias_find_attrib(ias_obj,
+ ias_opt.irda_attrib_name);
+ if(ias_attr == (struct ias_attrib *) NULL)
+ return -EINVAL;
+
+ /* Translate from internal to user structure */
+ err = irda_extract_ias_value(&ias_opt, ias_attr->value);
+ if(err)
+ return err;
+
+ /* Copy reply to the user */
+ if (copy_to_user((char *)optval, (char *) &ias_opt,
+ sizeof(ias_opt)))
+ return -EFAULT;
+ /* Note : don't need to put optlen, we checked it */
+ break;
+ case IRLMP_IAS_QUERY:
+ /* The user want an object from a remote IAS database.
+ * We need to use IAP to query the remote database and
+ * then wait for the answer to come back. */
+
+ /* Check that the user has allocated the right space for us */
+ if (len != sizeof(ias_opt))
+ return -EINVAL;
+
+ /* Copy query to the driver. */
+ if (copy_from_user((char *) &ias_opt, (char *)optval, len))
+ return -EFAULT;
+
+ /* Check that we can proceed with IAP */
+ if (self->iriap) {
+ WARNING(__FUNCTION__
+ "(), busy with a previous query\n");
+ return -EBUSY;
+ }
+
+ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
+ irda_simple_getvalue_confirm);
+
+ /* Treat unexpected signals as disconnect */
+ self->errno = -EHOSTUNREACH;
+
+ /* Query remote LM-IAS */
+ iriap_getvaluebyclass_request(self->iriap,
+ self->saddr, self->daddr,
+ ias_opt.irda_class_name,
+ ias_opt.irda_attrib_name);
+ /* Wait for answer */
+ interruptible_sleep_on(&self->ias_wait);
+ /* Check what happened */
+ if (self->errno)
+ return (self->errno);
+
+ /* Translate from internal to user structure */
+ err = irda_extract_ias_value(&ias_opt, self->ias_result);
+ if (self->ias_result)
+ kfree(self->ias_result);
+ if (err)
+ return err;
+
+ /* Copy reply to the user */
+ if (copy_to_user((char *)optval, (char *) &ias_opt,
+ sizeof(ias_opt)))
+ return -EFAULT;
+ /* Note : don't need to put optlen, we checked it */
+ break;
default:
return -ENOPROTOOPT;
}
-
+
return 0;
}
@@ -1370,7 +2035,7 @@ static struct proto_ops SOCKOPS_WRAPPED(irda_stream_ops) = {
sock_no_mmap
};
-static struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = {
+static struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = {
PF_IRDA,
irda_release,
@@ -1388,12 +2053,57 @@ static struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = {
sock_no_fcntl,
irda_sendmsg,
irda_recvmsg_dgram,
- sock_no_mmap
+ sock_no_mmap,
+};
+
+static struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = {
+ PF_IRDA,
+
+ irda_release,
+ irda_bind,
+ irda_connect,
+ sock_no_socketpair,
+ irda_accept,
+ irda_getname,
+ datagram_poll,
+ irda_ioctl,
+ irda_listen,
+ irda_shutdown,
+ irda_setsockopt,
+ irda_getsockopt,
+ sock_no_fcntl,
+ irda_sendmsg_dgram,
+ irda_recvmsg_dgram,
+ sock_no_mmap,
};
+#ifdef CONFIG_IRDA_ULTRA
+static struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = {
+ PF_IRDA,
+
+ irda_release,
+ irda_bind,
+ sock_no_connect,
+ sock_no_socketpair,
+ sock_no_accept,
+ irda_getname,
+ datagram_poll,
+ irda_ioctl,
+ sock_no_listen,
+ irda_shutdown,
+ irda_setsockopt,
+ irda_getsockopt,
+ sock_no_fcntl,
+ irda_sendmsg_ultra,
+ irda_recvmsg_dgram,
+ sock_no_mmap,
+};
+#endif /* CONFIG_IRDA_ULTRA */
+
#include <linux/smp_lock.h>
-SOCKOPS_WRAP(irda_dgram, PF_IRDA);
SOCKOPS_WRAP(irda_stream, PF_IRDA);
+SOCKOPS_WRAP(irda_seqpacket, PF_IRDA);
+SOCKOPS_WRAP(irda_dgram, PF_IRDA);
/*
* Function irda_device_event (this, event, ptr)
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index 84d0239e3..957426154 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -209,7 +209,8 @@ __u32 irlmp_find_device(hashbin_t *cachelog, char *name, __u32 *saddr)
* Print discovery information in /proc file system
*
*/
-int discovery_proc_read(char *buf, char **start, off_t offset, int len)
+int discovery_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused)
{
discovery_t *discovery;
unsigned long flags;
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index ed45a509f..805186128 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Jun 6 20:37:34 1999
- * Modified at: Sat Oct 30 12:48:14 1999
+ * Modified at: Tue Dec 21 13:26:41 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
@@ -144,16 +144,14 @@ static int __ircomm_close(struct ircomm_cb *self)
{
IRDA_DEBUG(2, __FUNCTION__"()\n");
- ASSERT(self != NULL, return -EIO;);
- ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;);
-
/* Disconnect link if any */
ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL);
/* Remove TSAP */
- if (self->tsap)
+ if (self->tsap) {
irttp_close_tsap(self->tsap);
- self->tsap = NULL;
+ self->tsap = NULL;
+ }
/* Remove LSAP */
if (self->lsap) {
@@ -177,6 +175,11 @@ int ircomm_close(struct ircomm_cb *self)
{
struct ircomm_cb *entry;
+ ASSERT(self != NULL, return -EIO;);
+ ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;);
+
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
+
entry = hashbin_remove(ircomm, self->line, NULL);
ASSERT(entry == self, return -1;);
@@ -236,14 +239,14 @@ void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
* deliver it first. The side effect is that the control channel
* will be removed from the skb
*/
-#if 0
- if (clen > 0)
- ircomm_control_indication(self, skb, clen);
-#endif
if (self->notify.connect_indication)
self->notify.connect_indication(self->notify.instance, self,
info->qos, info->max_data_size,
info->max_header_size, skb);
+ else {
+ IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n");
+ dev_kfree_skb(skb);
+ }
}
/*
@@ -282,6 +285,10 @@ void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb,
self, info->qos,
info->max_data_size,
info->max_header_size, skb);
+ else {
+ IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n");
+ dev_kfree_skb(skb);
+ }
}
/*
@@ -319,6 +326,10 @@ void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb)
if (self->notify.data_indication)
self->notify.data_indication(self->notify.instance, self, skb);
+ else {
+ IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n");
+ dev_kfree_skb(skb);
+ }
}
/*
@@ -349,7 +360,8 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb)
if (skb->len)
ircomm_data_indication(self, skb);
else {
- IRDA_DEBUG(4, __FUNCTION__ "(), data was control info only!\n");
+ IRDA_DEBUG(4, __FUNCTION__
+ "(), data was control info only!\n");
dev_kfree_skb(skb);
}
}
@@ -399,6 +411,10 @@ static void ircomm_control_indication(struct ircomm_cb *self,
if (self->notify.udata_indication)
self->notify.udata_indication(self->notify.instance, self,
ctrl_skb);
+ else {
+ IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n");
+ dev_kfree_skb(skb);
+ }
}
/*
@@ -438,6 +454,9 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
if (self->notify.disconnect_indication) {
self->notify.disconnect_indication(self->notify.instance, self,
info->reason, skb);
+ } else {
+ IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n");
+ dev_kfree_skb(skb);
}
}
@@ -462,7 +481,7 @@ void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow)
#ifdef CONFIG_PROC_FS
/*
- * Function ircomm_proc_read (buf, start, offset, len)
+ * Function ircomm_proc_read (buf, start, offset, len, unused)
*
*
*
diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c
index 7e2bdd50a..5b43be858 100644
--- a/net/irda/ircomm/ircomm_event.c
+++ b/net/irda/ircomm/ircomm_event.c
@@ -3,10 +3,10 @@
* Filename: ircomm_event.c
* Version: 1.0
* Description: IrCOMM layer state machine
- * Status: Experimental.
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Jun 6 20:33:11 1999
- * Modified at: Sat Oct 30 13:05:23 1999
+ * Modified at: Sun Dec 12 13:44:32 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
@@ -109,6 +109,8 @@ static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
default:
IRDA_DEBUG(4, __FUNCTION__"(), unknown event: %s\n",
ircomm_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
return -EINVAL;
}
return ret;
@@ -139,6 +141,8 @@ static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
default:
IRDA_DEBUG(0, __FUNCTION__"(), unknown event: %s\n",
ircomm_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
ret = -EINVAL;
}
return ret;
@@ -172,6 +176,8 @@ static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
default:
IRDA_DEBUG(0, __FUNCTION__ "(), unknown event = %s\n",
ircomm_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
ret = -EINVAL;
}
return ret;
@@ -214,6 +220,8 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
default:
IRDA_DEBUG(0, __FUNCTION__ "(), unknown event = %s\n",
ircomm_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
ret = -EINVAL;
}
return ret;
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index bf35c75e0..ca4526260 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: ircomm_lmp.c
- * Version:
+ * Version: 1.0
* Description: Interface between IrCOMM and IrLMP
- * Status: Experimental.
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Jun 6 20:48:27 1999
- * Modified at: Sat Oct 30 12:55:24 1999
+ * Modified at: Sun Dec 12 13:44:17 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: Previous IrLPT work by Thomas Davis
*
@@ -60,7 +60,7 @@ int ircomm_open_lsap(struct ircomm_cb *self)
notify.instance = self;
strncpy(notify.name, "IrCOMM", NOTIFY_MAX_NAME);
- self->lsap = irlmp_open_lsap(LSAP_ANY, &notify);
+ self->lsap = irlmp_open_lsap(LSAP_ANY, &notify, 0);
if (!self->lsap) {
IRDA_DEBUG(0,__FUNCTION__"failed to allocate tsap\n");
return -1;
@@ -128,11 +128,8 @@ int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
if (!skb)
return -ENOMEM;
- /*
- * Reserve space for MUX and LAP header
- */
- skb_reserve(skb, LMP_MAX_HEADER);
-
+ /* Reserve space for MUX and LAP header */
+ skb_reserve(skb, LMP_MAX_HEADER);
userdata = skb;
}
ret = irlmp_disconnect_request(self->lsap, userdata);
@@ -323,5 +320,3 @@ void ircomm_lmp_disconnect_indication(void *instance, void *sap,
ircomm_do_event(self, IRCOMM_LMP_DISCONNECT_INDICATION, skb, &info);
}
-
-
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index 382027892..3c9ff35da 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Jun 7 10:25:11 1999
- * Modified at: Sat Oct 30 13:05:42 1999
+ * Modified at: Tue Dec 14 15:26:30 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
@@ -28,6 +28,9 @@
*
********************************************************************/
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
#include <net/irda/irda.h>
#include <net/irda/parameters.h>
@@ -67,7 +70,7 @@ static pi_minor_info_t pi_minor_call_table_non_raw[] = {
static pi_minor_info_t pi_minor_call_table_9_wire[] = {
{ ircomm_param_dte, PV_INT_8_BITS },
{ ircomm_param_dce, PV_INT_8_BITS },
- { ircomm_param_poll, PV_INT_8_BITS },
+ { ircomm_param_poll, PV_NO_VALUE },
};
static pi_major_info_t pi_major_call_table[] = {
@@ -102,6 +105,7 @@ int ircomm_param_flush(struct ircomm_tty_cb *self)
*/
int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
{
+ struct tty_struct *tty;
unsigned long flags;
struct sk_buff *skb;
int count;
@@ -111,10 +115,9 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
- if (self->state != IRCOMM_TTY_READY) {
- IRDA_DEBUG(2, __FUNCTION__ "(), not ready yet!\n");
+ tty = self->tty;
+ if (!tty)
return 0;
- }
/* Make sure we don't send parameters for raw mode */
if (self->service_type == IRCOMM_3_WIRE_RAW)
@@ -132,8 +135,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
}
skb_reserve(skb, self->max_header_size);
-
- self->ctrl_skb = skb;
+ self->ctrl_skb = skb;
}
/*
* Inserting is a little bit tricky since we don't know how much
@@ -142,17 +144,22 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
count = irda_param_insert(self, pi, skb->tail, skb_tailroom(skb),
&ircomm_param_info);
if (count < 0) {
- IRDA_DEBUG(0, __FUNCTION__ "(), no room for parameter!\n");
+ WARNING(__FUNCTION__ "(), no room for parameter!\n");
restore_flags(flags);
return -1;
}
skb_put(skb, count);
+
restore_flags(flags);
+ IRDA_DEBUG(2, __FUNCTION__ "(), skb->len=%d\n", skb->len);
+
if (flush) {
- ircomm_control_request(self->ircomm, skb);
- self->ctrl_skb = NULL;
+ /* ircomm_tty_do_softint will take care of the rest */
+ queue_task(&self->tqueue, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
}
+
return count;
}
@@ -172,38 +179,45 @@ static int ircomm_param_service_type(void *instance, param_t *param, int get)
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (get) {
- param->pv.b = self->session.service_type;
+ param->pv.b = self->settings.service_type;
return 0;
}
+ /* Find all common service types */
+ service_type &= self->service_type;
+ if (!service_type) {
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), No common service type to use!\n");
+ return -1;
+ }
+ IRDA_DEBUG(0, __FUNCTION__ "(), services in common=%02x\n",
+ service_type);
+
/*
* Now choose a preferred service type of those available
*/
- if (service_type & IRCOMM_3_WIRE_RAW) {
- IRDA_DEBUG(2, __FUNCTION__ "(), peer supports 3 wire raw\n");
- self->session.service_type |= IRCOMM_3_WIRE_RAW;
- }
- if (service_type & IRCOMM_3_WIRE) {
- IRDA_DEBUG(2, __FUNCTION__ "(), peer supports 3 wire\n");
- self->session.service_type |= IRCOMM_3_WIRE;
- }
- if (service_type & IRCOMM_9_WIRE) {
- IRDA_DEBUG(2, __FUNCTION__ "(), peer supports 9 wire\n");
- self->session.service_type |= IRCOMM_9_WIRE;
- }
- if (service_type & IRCOMM_CENTRONICS) {
- IRDA_DEBUG(2, __FUNCTION__ "(), peer supports Centronics\n");
- self->session.service_type |= IRCOMM_CENTRONICS;
- }
-
- self->session.service_type &= self->service_type;
- if (!self->session.service_type) {
- IRDA_DEBUG(2, __FUNCTION__"(), No common service type to use!\n");
- return -1;
+ if (service_type & IRCOMM_CENTRONICS)
+ self->settings.service_type = IRCOMM_CENTRONICS;
+ else if (service_type & IRCOMM_9_WIRE)
+ self->settings.service_type = IRCOMM_9_WIRE;
+ else if (service_type & IRCOMM_3_WIRE)
+ self->settings.service_type = IRCOMM_3_WIRE;
+ else if (service_type & IRCOMM_3_WIRE_RAW)
+ self->settings.service_type = IRCOMM_3_WIRE_RAW;
+
+ IRDA_DEBUG(0, __FUNCTION__ "(), resulting service type=0x%02x\n",
+ self->settings.service_type);
+
+ /*
+ * Now the line is ready for some communication. Check if we are a
+ * server, and send over some initial parameters
+ */
+ if (!self->client && (self->settings.service_type != IRCOMM_3_WIRE_RAW))
+ {
+ /* Init connection */
+ ircomm_tty_send_initial_parameters(self);
+ ircomm_tty_link_established(self);
}
-
- IRDA_DEBUG(2, __FUNCTION__ "(), resulting service type=0x%02x\n",
- self->session.service_type);
return 0;
}
@@ -225,10 +239,10 @@ static int ircomm_param_port_type(void *instance, param_t *param, int get)
if (get)
param->pv.b = IRCOMM_SERIAL;
else {
- self->session.port_type = param->pv.b;
+ self->settings.port_type = param->pv.b;
IRDA_DEBUG(0, __FUNCTION__ "(), port type=%d\n",
- self->session.port_type);
+ self->settings.port_type);
}
return 0;
}
@@ -250,7 +264,7 @@ static int ircomm_param_port_name(void *instance, param_t *param, int get)
IRDA_DEBUG(0, __FUNCTION__ "(), not imp!\n");
} else {
IRDA_DEBUG(0, __FUNCTION__ "(), port-name=%s\n", param->pv.c);
- strncpy(self->session.port_name, param->pv.c, 32);
+ strncpy(self->settings.port_name, param->pv.c, 32);
}
return 0;
@@ -259,7 +273,7 @@ static int ircomm_param_port_name(void *instance, param_t *param, int get)
/*
* Function ircomm_param_data_rate (self, param)
*
- * Exchange data rate to be used in this session
+ * Exchange data rate to be used in this settings
*
*/
static int ircomm_param_data_rate(void *instance, param_t *param, int get)
@@ -270,9 +284,9 @@ static int ircomm_param_data_rate(void *instance, param_t *param, int get)
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (get)
- param->pv.i = self->session.data_rate;
+ param->pv.i = self->settings.data_rate;
else
- self->session.data_rate = param->pv.i;
+ self->settings.data_rate = param->pv.i;
IRDA_DEBUG(2, __FUNCTION__ "(), data rate = %d\n", param->pv.i);
@@ -282,7 +296,7 @@ static int ircomm_param_data_rate(void *instance, param_t *param, int get)
/*
* Function ircomm_param_data_format (self, param)
*
- * Exchange data format to be used in this session
+ * Exchange data format to be used in this settings
*
*/
static int ircomm_param_data_format(void *instance, param_t *param, int get)
@@ -293,9 +307,9 @@ static int ircomm_param_data_format(void *instance, param_t *param, int get)
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (get)
- param->pv.b = self->session.data_format;
+ param->pv.b = self->settings.data_format;
else
- self->session.data_format = param->pv.b;
+ self->settings.data_format = param->pv.b;
return 0;
}
@@ -303,7 +317,7 @@ static int ircomm_param_data_format(void *instance, param_t *param, int get)
/*
* Function ircomm_param_flow_control (self, param)
*
- * Exchange flow control settings to be used in this session
+ * Exchange flow control settings to be used in this settings
*
*/
static int ircomm_param_flow_control(void *instance, param_t *param, int get)
@@ -314,9 +328,9 @@ static int ircomm_param_flow_control(void *instance, param_t *param, int get)
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (get)
- param->pv.b = self->session.flow_control;
+ param->pv.b = self->settings.flow_control;
else
- self->session.flow_control = param->pv.b;
+ self->settings.flow_control = param->pv.b;
IRDA_DEBUG(1, __FUNCTION__ "(), flow control = 0x%02x\n", param->pv.b);
@@ -337,14 +351,14 @@ static int ircomm_param_xon_xoff(void *instance, param_t *param, int get)
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (get) {
- param->pv.s = self->session.xonxoff[0];
- param->pv.s |= self->session.xonxoff[1] << 8;
+ param->pv.s = self->settings.xonxoff[0];
+ param->pv.s |= self->settings.xonxoff[1] << 8;
} else {
- self->session.xonxoff[0] = param->pv.s & 0xff;
- self->session.xonxoff[1] = param->pv.s >> 8;
+ self->settings.xonxoff[0] = param->pv.s & 0xff;
+ self->settings.xonxoff[1] = param->pv.s >> 8;
}
- IRDA_DEBUG(0, __FUNCTION__ "(), XON/XOFF = 0x%02x\n,0x%02x",
+ IRDA_DEBUG(0, __FUNCTION__ "(), XON/XOFF = 0x%02x,0x%02x\n",
param->pv.s & 0xff, param->pv.s >> 8);
return 0;
@@ -364,11 +378,11 @@ static int ircomm_param_enq_ack(void *instance, param_t *param, int get)
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (get) {
- param->pv.s = self->session.enqack[0];
- param->pv.s |= self->session.enqack[1] << 8;
+ param->pv.s = self->settings.enqack[0];
+ param->pv.s |= self->settings.enqack[1] << 8;
} else {
- self->session.enqack[0] = param->pv.s & 0xff;
- self->session.enqack[1] = param->pv.s >> 8;
+ self->settings.enqack[0] = param->pv.s & 0xff;
+ self->settings.enqack[1] = param->pv.s >> 8;
}
IRDA_DEBUG(0, __FUNCTION__ "(), ENQ/ACK = 0x%02x,0x%02x\n",
@@ -405,29 +419,29 @@ static int ircomm_param_dte(void *instance, param_t *param, int get)
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (get)
- param->pv.b = self->session.dte;
+ param->pv.b = self->settings.dte;
else {
dte = param->pv.b;
if (dte & IRCOMM_DELTA_DTR)
- self->session.dce |= (IRCOMM_DELTA_DSR|
+ self->settings.dce |= (IRCOMM_DELTA_DSR|
IRCOMM_DELTA_RI |
IRCOMM_DELTA_CD);
if (dte & IRCOMM_DTR)
- self->session.dce |= (IRCOMM_DSR|
+ self->settings.dce |= (IRCOMM_DSR|
IRCOMM_RI |
IRCOMM_CD);
if (dte & IRCOMM_DELTA_RTS)
- self->session.dce |= IRCOMM_DELTA_CTS;
+ self->settings.dce |= IRCOMM_DELTA_CTS;
if (dte & IRCOMM_RTS)
- self->session.dce |= IRCOMM_CTS;
+ self->settings.dce |= IRCOMM_CTS;
/* Take appropriate actions */
ircomm_tty_check_modem_status(self);
/* Null modem cable emulator */
- self->session.null_modem = TRUE;
+ self->settings.null_modem = TRUE;
}
return 0;
@@ -451,7 +465,7 @@ static int ircomm_param_dce(void *instance, param_t *param, int get)
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
- self->session.dce = dce;
+ self->settings.dce = dce;
/* Check if any of the settings have changed */
if (dce & 0x0f) {
@@ -483,7 +497,6 @@ static int ircomm_param_poll(void *instance, param_t *param, int get)
/* Respond with DTE line settings */
ircomm_param_request(self, IRCOMM_DTE, TRUE);
}
-
return 0;
}
diff --git a/net/irda/ircomm/ircomm_ttp.c b/net/irda/ircomm/ircomm_ttp.c
index 211bc14a8..642b8416a 100644
--- a/net/irda/ircomm/ircomm_ttp.c
+++ b/net/irda/ircomm/ircomm_ttp.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: ircomm_ttp.c
- * Version:
+ * Version: 1.0
* Description: Interface between IrCOMM and IrTTP
- * Status: Experimental.
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Jun 6 20:48:27 1999
- * Modified at: Sat Oct 30 12:55:36 1999
+ * Modified at: Mon Dec 13 11:35:13 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
@@ -64,7 +64,7 @@ int ircomm_open_tsap(struct ircomm_cb *self)
self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT,
&notify);
if (!self->tsap) {
- IRDA_DEBUG(0,__FUNCTION__"failed to allocate tsap\n");
+ IRDA_DEBUG(0, __FUNCTION__"failed to allocate tsap\n");
return -1;
}
self->slsap_sel = self->tsap->stsap_sel;
@@ -95,8 +95,8 @@ int ircomm_ttp_connect_request(struct ircomm_cb *self,
IRDA_DEBUG(4, __FUNCTION__ "()\n");
ret = irttp_connect_request(self->tsap, info->dlsap_sel,
- info->saddr, info->daddr,
- NULL, SAR_DISABLE, userdata);
+ info->saddr, info->daddr, NULL,
+ TTP_SAR_DISABLE, userdata);
return ret;
}
@@ -112,7 +112,7 @@ int ircomm_ttp_connect_response(struct ircomm_cb *self, struct sk_buff *skb)
IRDA_DEBUG(4, __FUNCTION__"()\n");
- ret = irttp_connect_response(self->tsap, SAR_DISABLE, skb);
+ ret = irttp_connect_response(self->tsap, TTP_SAR_DISABLE, skb);
return ret;
}
@@ -139,7 +139,7 @@ int ircomm_ttp_data_request(struct ircomm_cb *self, struct sk_buff *skb,
* Insert clen field, currently we either send data only, or control
* only frames, to make things easier and avoid queueing
*/
- ASSERT(skb_headroom(skb) >= 1, return -1;);
+ ASSERT(skb_headroom(skb) >= IRCOMM_HEADER_SIZE, return -1;);
skb_push(skb, IRCOMM_HEADER_SIZE);
skb->data[0] = clen;
@@ -191,12 +191,13 @@ void ircomm_ttp_connect_confirm(void *instance, void *sap,
ASSERT(skb != NULL, return;);
ASSERT(qos != NULL, return;);
- if (max_sdu_size != SAR_DISABLE) {
+ if (max_sdu_size != TTP_SAR_DISABLE) {
ERROR(__FUNCTION__ "(), SAR not allowed for IrCOMM!\n");
+ dev_kfree_skb(skb);
return;
}
- info.max_data_size = irttp_get_max_seq_size(self->tsap)
+ info.max_data_size = irttp_get_max_seg_size(self->tsap)
- IRCOMM_HEADER_SIZE;
info.max_header_size = max_header_size + IRCOMM_HEADER_SIZE;
info.qos = qos;
@@ -227,12 +228,13 @@ void ircomm_ttp_connect_indication(void *instance, void *sap,
ASSERT(skb != NULL, return;);
ASSERT(qos != NULL, return;);
- if (max_sdu_size != SAR_DISABLE) {
+ if (max_sdu_size != TTP_SAR_DISABLE) {
ERROR(__FUNCTION__ "(), SAR not allowed for IrCOMM!\n");
+ dev_kfree_skb(skb);
return;
}
- info.max_data_size = irttp_get_max_seq_size(self->tsap)
+ info.max_data_size = irttp_get_max_seg_size(self->tsap)
- IRCOMM_HEADER_SIZE;
info.max_header_size = max_header_size + IRCOMM_HEADER_SIZE;
info.qos = qos;
@@ -270,7 +272,7 @@ void ircomm_ttp_disconnect_indication(void *instance, void *sap,
struct ircomm_cb *self = (struct ircomm_cb *) instance;
struct ircomm_info info;
- IRDA_DEBUG(4, __FUNCTION__"()\n");
+ IRDA_DEBUG(2, __FUNCTION__"()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_MAGIC, return;);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index e79b10bcd..b53f8a8cf 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -6,11 +6,11 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Jun 6 21:00:56 1999
- * Modified at: Sat Oct 30 12:49:26 1999
+ * Modified at: Tue Jan 4 14:12:06 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: serial.c and previous IrCOMM work by Takahide Higuchi
*
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -61,6 +61,7 @@ static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
static void ircomm_tty_hangup(struct tty_struct *tty);
static void ircomm_tty_do_softint(void *private_);
+static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
static int ircomm_tty_data_indication(void *instance, void *sap,
struct sk_buff *skb);
@@ -134,6 +135,20 @@ int __init ircomm_tty_init(void)
return 0;
}
+#ifdef MODULE
+static void __ircomm_tty_cleanup(struct ircomm_tty_cb *self)
+{
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ ircomm_tty_shutdown(self);
+
+ self->magic = 0;
+ kfree(self);
+}
+
/*
* Function ircomm_tty_cleanup ()
*
@@ -144,7 +159,7 @@ void ircomm_tty_cleanup(void)
{
int ret;
- IRDA_DEBUG(4, __FUNCTION__"()\n");
+ IRDA_DEBUG(4, __FUNCTION__"()\n");
ret = tty_unregister_driver(&driver);
if (ret) {
@@ -152,8 +167,9 @@ void ircomm_tty_cleanup(void)
return;
}
- hashbin_delete(ircomm_tty, (FREE_FUNC) kfree);
+ hashbin_delete(ircomm_tty, (FREE_FUNC) __ircomm_tty_cleanup);
}
+#endif /* MODULE */
/*
* Function ircomm_startup (self)
@@ -166,6 +182,8 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self)
notify_t notify;
int ret;
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
@@ -189,9 +207,10 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self)
strncpy(notify.name, "ircomm_tty", NOTIFY_MAX_NAME);
notify.instance = self;
- if (!self->ircomm)
+ if (!self->ircomm) {
self->ircomm = ircomm_open(&notify, self->service_type,
self->line);
+ }
if (!self->ircomm)
return -ENODEV;
@@ -224,25 +243,23 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
unsigned long flags;
struct tty_struct *tty;
- tty = self->tty;
-
IRDA_DEBUG(2, __FUNCTION__ "()\n");
+ tty = self->tty;
+
if (tty->driver.subtype == SERIAL_TYPE_CALLOUT) {
/* this is a callout device */
/* just verify that normal device is not in use */
if (self->flags & ASYNC_NORMAL_ACTIVE)
return -EBUSY;
-#if 0
if ((self->flags & ASYNC_CALLOUT_ACTIVE) &&
(self->flags & ASYNC_SESSION_LOCKOUT) &&
(self->session != current->session))
- return -EBUSY;
-#endif
+ return -EBUSY;
if ((self->flags & ASYNC_CALLOUT_ACTIVE) &&
(self->flags & ASYNC_PGRP_LOCKOUT) &&
(self->pgrp != current->pgrp))
- return -EBUSY;
+ return -EBUSY;
self->flags |= ASYNC_CALLOUT_ACTIVE;
return 0;
}
@@ -263,13 +280,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
}
if (self->flags & ASYNC_CALLOUT_ACTIVE) {
- if (self->normal_termios.c_cflag & CLOCAL)
+ if (self->normal_termios.c_cflag & CLOCAL) {
IRDA_DEBUG(1, __FUNCTION__ "(), doing CLOCAL!\n");
do_clocal = 1;
+ }
} else {
- if (tty->termios->c_cflag & CLOCAL)
+ if (tty->termios->c_cflag & CLOCAL) {
IRDA_DEBUG(1, __FUNCTION__ "(), doing CLOCAL!\n");
do_clocal = 1;
+ }
}
/* Wait for carrier detect and the line to become
@@ -297,13 +316,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
if (!(self->flags & ASYNC_CALLOUT_ACTIVE) &&
(tty->termios->c_cflag & CBAUD)) {
save_flags(flags); cli();
- self->session.dte |= IRCOMM_RTS + IRCOMM_DTR;
+ self->settings.dte |= IRCOMM_RTS + IRCOMM_DTR;
ircomm_param_request(self, IRCOMM_DTE, TRUE);
restore_flags(flags);
}
- set_current_state(TASK_INTERRUPTIBLE);
+ current->state = TASK_INTERRUPTIBLE;
if (tty_hung_up_p(filp) || !(self->flags & ASYNC_INITIALIZED)){
retval = (self->flags & ASYNC_HUP_NOTIFY) ?
@@ -318,7 +337,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
*/
if (!(self->flags & ASYNC_CALLOUT_ACTIVE) &&
!(self->flags & ASYNC_CLOSING) &&
- (do_clocal || (self->session.dce & IRCOMM_CD)) &&
+ (do_clocal || (self->settings.dce & IRCOMM_CD)) &&
self->state == IRCOMM_TTY_READY)
{
break;
@@ -386,6 +405,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
memset(self, 0, sizeof(struct ircomm_tty_cb));
self->magic = IRCOMM_TTY_MAGIC;
+ self->flow = FLOW_STOP;
+
self->line = line;
self->tqueue.routine = ircomm_tty_do_softint;
self->tqueue.data = self;
@@ -397,7 +418,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
/* Init some important stuff */
init_timer(&self->watchdog_timer);
init_waitqueue_head(&self->open_wait);
- init_waitqueue_head(&self->close_wait);
+ init_waitqueue_head(&self->close_wait);
/*
* Force TTY into raw mode by default which is usually what
@@ -440,10 +461,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
/* Check if this is a "normal" ircomm device, or an irlpt device */
if (line < 0x10) {
self->service_type = IRCOMM_3_WIRE | IRCOMM_9_WIRE;
+ self->settings.service_type = IRCOMM_9_WIRE; /* Default */
IRDA_DEBUG(2, __FUNCTION__ "(), IrCOMM device\n");
} else {
IRDA_DEBUG(2, __FUNCTION__ "(), IrLPT device\n");
self->service_type = IRCOMM_3_WIRE_RAW;
+ self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */
}
ret = ircomm_tty_startup(self);
@@ -453,15 +476,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
ret = ircomm_tty_block_til_ready(self, filp);
if (ret) {
/* MOD_DEC_USE_COUNT; "info->tty" will cause this? */
- IRDA_DEBUG(0, __FUNCTION__
+ IRDA_DEBUG(2, __FUNCTION__
"(), returning after block_til_ready with %d\n",
ret);
return ret;
}
-#if 0
+
self->session = current->session;
-#endif
self->pgrp = current->pgrp;
return 0;
@@ -478,14 +500,11 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
unsigned long flags;
- IRDA_DEBUG(2, __FUNCTION__ "()\n");
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
if (!tty)
return;
- ASSERT(self != NULL, return;);
- ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
-
save_flags(flags);
cli();
@@ -493,10 +512,27 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
MOD_DEC_USE_COUNT;
restore_flags(flags);
- IRDA_DEBUG(2, __FUNCTION__ "(), returning 1\n");
+ IRDA_DEBUG(0, __FUNCTION__ "(), returning 1\n");
return;
}
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ if ((tty->count == 1) && (self->open_count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. state->count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ IRDA_DEBUG(0, __FUNCTION__ "(), bad serial port count; "
+ "tty->count is 1, state->count is %d\n",
+ self->open_count);
+ self->open_count = 1;
+ }
+
if (--self->open_count < 0) {
ERROR(__FUNCTION__
"(), bad serial port count for ttys%d: %d\n",
@@ -507,7 +543,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
MOD_DEC_USE_COUNT;
restore_flags(flags);
- IRDA_DEBUG(2, __FUNCTION__ "(), open count > 0\n");
+ IRDA_DEBUG(0, __FUNCTION__ "(), open count > 0\n");
return;
}
self->flags |= ASYNC_CLOSING;
@@ -520,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, self->closing_wait);
- self->flags &= ~ASYNC_INITIALIZED;
+ ircomm_tty_shutdown(self);
if (tty->driver.flush_buffer)
tty->driver.flush_buffer(tty);
@@ -543,34 +579,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
wake_up_interruptible(&self->close_wait);
MOD_DEC_USE_COUNT;
-
- del_timer(&self->watchdog_timer);
-
- /* Free parameter buffer */
- if (self->ctrl_skb) {
- dev_kfree_skb(self->ctrl_skb);
- self->ctrl_skb = NULL;
- }
-
- /* Free transmit buffer */
- if (self->tx_skb) {
- dev_kfree_skb(self->tx_skb);
- self->tx_skb = NULL;
- }
-
restore_flags(flags);
-
- ircomm_tty_detach_cable(self);
- ircomm_close(self->ircomm);
- self->ircomm = NULL;
-
-#if IRCOMM_TTY_CLOSE_WILL_DEALLOC
- self->magic = 0;
-
- hashbin_remove(ircomm_tty, self->line, NULL);
-
- kfree(self);
-#endif
}
/*
@@ -606,7 +615,9 @@ static void ircomm_tty_do_softint(void *private_)
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_;
struct tty_struct *tty;
unsigned long flags;
- struct sk_buff *skb;
+ struct sk_buff *skb, *ctrl_skb;
+
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
if (!self || self->magic != IRCOMM_TTY_MAGIC)
return;
@@ -615,6 +626,19 @@ static void ircomm_tty_do_softint(void *private_)
if (!tty)
return;
+ /* Unlink control buffer */
+ save_flags(flags);
+ cli();
+
+ ctrl_skb = self->ctrl_skb;
+ self->ctrl_skb = NULL;
+
+ restore_flags(flags);
+
+ /* Flush control buffer if any */
+ if (ctrl_skb && self->flow == FLOW_START)
+ ircomm_control_request(self->ircomm, ctrl_skb);
+
if (tty->hw_stopped)
return;
@@ -625,7 +649,7 @@ static void ircomm_tty_do_softint(void *private_)
skb = self->tx_skb;
self->tx_skb = NULL;
- restore_flags(flags);
+ restore_flags(flags);
/* Flush transmit buffer if any */
if (skb)
@@ -658,7 +682,7 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
int len = 0;
int size;
- IRDA_DEBUG(3, __FUNCTION__ "(), count=%d, hw_stopped=%d\n", count,
+ IRDA_DEBUG(2, __FUNCTION__ "(), count=%d, hw_stopped=%d\n", count,
tty->hw_stopped);
ASSERT(self != NULL, return -1;);
@@ -788,7 +812,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
unsigned long orig_jiffies, poll_time;
- IRDA_DEBUG(0, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -831,8 +855,8 @@ static void ircomm_tty_throttle(struct tty_struct *tty)
/* Hardware flow control? */
if (tty->termios->c_cflag & CRTSCTS) {
- self->session.dte &= ~IRCOMM_RTS;
- self->session.dte |= IRCOMM_DELTA_RTS;
+ self->settings.dte &= ~IRCOMM_RTS;
+ self->settings.dte |= IRCOMM_DELTA_RTS;
ircomm_param_request(self, IRCOMM_DTE, TRUE);
}
@@ -863,7 +887,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty)
/* Using hardware flow control? */
if (tty->termios->c_cflag & CRTSCTS) {
- self->session.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS);
+ self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS);
ircomm_param_request(self, IRCOMM_DTE, TRUE);
IRDA_DEBUG(1, __FUNCTION__"(), FLOW_START\n");
@@ -897,6 +921,46 @@ static int ircomm_tty_chars_in_buffer(struct tty_struct *tty)
return len;
}
+static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
+{
+ unsigned long flags;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
+
+ if (!(self->flags & ASYNC_INITIALIZED))
+ return;
+
+ save_flags(flags);
+ cli();
+
+ del_timer(&self->watchdog_timer);
+
+ /* Free parameter buffer */
+ if (self->ctrl_skb) {
+ dev_kfree_skb(self->ctrl_skb);
+ self->ctrl_skb = NULL;
+ }
+
+ /* Free transmit buffer */
+ if (self->tx_skb) {
+ dev_kfree_skb(self->tx_skb);
+ self->tx_skb = NULL;
+ }
+
+ ircomm_tty_detach_cable(self);
+
+ if (self->ircomm) {
+ ircomm_close(self->ircomm);
+ self->ircomm = NULL;
+ }
+ self->flags &= ~ASYNC_INITIALIZED;
+
+ restore_flags(flags);
+}
+
/*
* Function ircomm_tty_hangup (tty)
*
@@ -908,7 +972,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
- IRDA_DEBUG(2, __FUNCTION__"()\n");
+ IRDA_DEBUG(0, __FUNCTION__"()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -917,17 +981,11 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
return;
/* ircomm_tty_flush_buffer(tty); */
- ircomm_tty_detach_cable(self);
- ircomm_close(self->ircomm);
+ ircomm_tty_shutdown(self);
- self->ircomm = NULL;
-
- self->flags &= ~ASYNC_INITIALIZED;
-
- self->open_count = 0;
self->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE);
self->tty = 0;
-
+ self->open_count = 0;
wake_up_interruptible(&self->open_wait);
}
@@ -983,19 +1041,21 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
struct tty_struct *tty;
int status;
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
+
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
tty = self->tty;
- status = self->session.dce;
+ status = self->settings.dce;
if (status & IRCOMM_DCE_DELTA_ANY) {
/*wake_up_interruptible(&self->delta_msr_wait);*/
}
if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) {
IRDA_DEBUG(2, __FUNCTION__
- "(), ttys%d CD now %s...\n", self->line,
+ "(), ircomm%d CD now %s...\n", self->line,
(status & IRCOMM_CD) ? "on" : "off");
if (status & IRCOMM_CD) {
@@ -1003,27 +1063,30 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
} else if (!((self->flags & ASYNC_CALLOUT_ACTIVE) &&
(self->flags & ASYNC_CALLOUT_NOHUP)))
{
- IRDA_DEBUG(2, __FUNCTION__ "(), Doing serial hangup..\n");
-
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), Doing serial hangup..\n");
if (tty)
tty_hangup(tty);
+
+ /* Hangup will remote the tty, so better break out */
+ return;
}
}
if (self->flags & ASYNC_CTS_FLOW) {
if (tty->hw_stopped) {
if (status & IRCOMM_CTS) {
- IRDA_DEBUG(2, __FUNCTION__ "(), CTS tx start...\n");
-
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), CTS tx start...\n");
tty->hw_stopped = 0;
-
+
queue_task(&self->tqueue, &tq_immediate);
mark_bh(IMMEDIATE_BH);
return;
}
} else {
if (!(status & IRCOMM_CTS)) {
- IRDA_DEBUG(2, __FUNCTION__ "(), CTS tx stop...\n");
-
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), CTS tx stop...\n");
tty->hw_stopped = 1;
}
}
@@ -1047,8 +1110,26 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
ASSERT(skb != NULL, return -1;);
- if (!self->tty)
+ if (!self->tty) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), no tty!\n");
+ dev_kfree_skb(skb);
return 0;
+ }
+
+ /*
+ * If we receive data when hardware is stopped then something is wrong.
+ * We try to poll the peers line settings to check if we are up todate.
+ * Devices like WinCE can do this, and since they don't send any
+ * params, we can just as well declare the hardware for running.
+ */
+ if (self->tty->hw_stopped && (self->flow == FLOW_START)) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), polling for line settings!\n");
+ ircomm_param_request(self, IRCOMM_POLL, TRUE);
+
+ /* We can just as well declare the hardware for running */
+ ircomm_tty_send_initial_parameters(self);
+ ircomm_tty_link_established(self);
+ }
/*
* Just give it over to the line discipline. There is no need to
@@ -1113,15 +1194,15 @@ static void ircomm_tty_flow_indication(void *instance, void *sap,
/* ircomm_tty_do_softint will take care of the rest */
queue_task(&self->tqueue, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ mark_bh(IMMEDIATE_BH);
break;
- default:
- /* If we get here, something is very wrong, better stop */
+ default: /* If we get here, something is very wrong, better stop */
case FLOW_STOP:
IRDA_DEBUG(2, __FUNCTION__ "(), hw stopped!\n");
tty->hw_stopped = 1;
break;
}
+ self->flow = cmd;
}
static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
@@ -1141,57 +1222,57 @@ static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
ret += sprintf(buf+ret, "No common service type!\n");
ret += sprintf(buf+ret, "\n");
- ret += sprintf(buf+ret, "Port name: %s\n", self->session.port_name);
+ ret += sprintf(buf+ret, "Port name: %s\n", self->settings.port_name);
ret += sprintf(buf+ret, "DTE status: ");
- if (self->session.dte & IRCOMM_RTS)
+ if (self->settings.dte & IRCOMM_RTS)
ret += sprintf(buf+ret, "RTS|");
- if (self->session.dte & IRCOMM_DTR)
+ if (self->settings.dte & IRCOMM_DTR)
ret += sprintf(buf+ret, "DTR|");
- if (self->session.dte)
+ if (self->settings.dte)
ret--; /* remove the last | */
ret += sprintf(buf+ret, "\n");
ret += sprintf(buf+ret, "DCE status: ");
- if (self->session.dce & IRCOMM_CTS)
+ if (self->settings.dce & IRCOMM_CTS)
ret += sprintf(buf+ret, "CTS|");
- if (self->session.dce & IRCOMM_DSR)
+ if (self->settings.dce & IRCOMM_DSR)
ret += sprintf(buf+ret, "DSR|");
- if (self->session.dce & IRCOMM_CD)
+ if (self->settings.dce & IRCOMM_CD)
ret += sprintf(buf+ret, "CD|");
- if (self->session.dce & IRCOMM_RI)
+ if (self->settings.dce & IRCOMM_RI)
ret += sprintf(buf+ret, "RI|");
- if (self->session.dce)
+ if (self->settings.dce)
ret--; /* remove the last | */
ret += sprintf(buf+ret, "\n");
ret += sprintf(buf+ret, "Configuration: ");
- if (!self->session.null_modem)
+ if (!self->settings.null_modem)
ret += sprintf(buf+ret, "DTE <-> DCE\n");
else
ret += sprintf(buf+ret,
"DTE <-> DTE (null modem emulation)\n");
- ret += sprintf(buf+ret, "Data rate: %d\n", self->session.data_rate);
+ ret += sprintf(buf+ret, "Data rate: %d\n", self->settings.data_rate);
ret += sprintf(buf+ret, "Flow control: ");
- if (self->session.flow_control & IRCOMM_XON_XOFF_IN)
+ if (self->settings.flow_control & IRCOMM_XON_XOFF_IN)
ret += sprintf(buf+ret, "XON_XOFF_IN|");
- if (self->session.flow_control & IRCOMM_XON_XOFF_OUT)
+ if (self->settings.flow_control & IRCOMM_XON_XOFF_OUT)
ret += sprintf(buf+ret, "XON_XOFF_OUT|");
- if (self->session.flow_control & IRCOMM_RTS_CTS_IN)
+ if (self->settings.flow_control & IRCOMM_RTS_CTS_IN)
ret += sprintf(buf+ret, "RTS_CTS_IN|");
- if (self->session.flow_control & IRCOMM_RTS_CTS_OUT)
+ if (self->settings.flow_control & IRCOMM_RTS_CTS_OUT)
ret += sprintf(buf+ret, "RTS_CTS_OUT|");
- if (self->session.flow_control & IRCOMM_DSR_DTR_IN)
+ if (self->settings.flow_control & IRCOMM_DSR_DTR_IN)
ret += sprintf(buf+ret, "DSR_DTR_IN|");
- if (self->session.flow_control & IRCOMM_DSR_DTR_OUT)
+ if (self->settings.flow_control & IRCOMM_DSR_DTR_OUT)
ret += sprintf(buf+ret, "DSR_DTR_OUT|");
- if (self->session.flow_control & IRCOMM_ENQ_ACK_IN)
+ if (self->settings.flow_control & IRCOMM_ENQ_ACK_IN)
ret += sprintf(buf+ret, "ENQ_ACK_IN|");
- if (self->session.flow_control & IRCOMM_ENQ_ACK_OUT)
+ if (self->settings.flow_control & IRCOMM_ENQ_ACK_OUT)
ret += sprintf(buf+ret, "ENQ_ACK_OUT|");
- if (self->session.flow_control)
+ if (self->settings.flow_control)
ret--; /* remove the last | */
ret += sprintf(buf+ret, "\n");
@@ -1214,7 +1295,12 @@ static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
ret--; /* remove the last | */
ret += sprintf(buf+ret, "\n");
+ ret += sprintf(buf+ret, "Role: %s\n", self->client ?
+ "client" : "server");
ret += sprintf(buf+ret, "Open count: %d\n", self->open_count);
+ ret += sprintf(buf+ret, "Max data size: %d\n", self->max_data_size);
+ ret += sprintf(buf+ret, "Max header size: %d\n", self->max_header_size);
+
if (self->tty)
ret += sprintf(buf+ret, "Hardware: %s\n",
self->tty->hw_stopped ? "Stopped" : "Running");
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index db8f382dc..94c9fee09 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Jun 5 17:42:00 1999
- * Modified at: Sun Oct 31 22:19:37 1999
+ * Modified at: Tue Jan 4 14:20:49 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -124,7 +124,7 @@ static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
*/
int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
{
- IRDA_DEBUG(2, __FUNCTION__ "()\n");
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
@@ -159,17 +159,21 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
*/
void ircomm_tty_detach_cable(struct ircomm_tty_cb *self)
{
- IRDA_DEBUG(2, __FUNCTION__ "()\n");
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+ del_timer(&self->watchdog_timer);
+
/* Remove IrCOMM hint bits */
irlmp_unregister_client(self->ckey);
irlmp_unregister_service(self->skey);
- if (self->iriap)
+ if (self->iriap) {
iriap_close(self->iriap);
+ self->iriap = NULL;
+ }
/* Remove LM-IAS object */
if (self->obj) {
@@ -183,7 +187,7 @@ void ircomm_tty_detach_cable(struct ircomm_tty_cb *self)
self->daddr = self->saddr = 0;
self->dlsap_sel = self->slsap_sel = 0;
- memset(&self->session, 0, sizeof(struct ircomm_params));
+ memset(&self->settings, 0, sizeof(struct ircomm_params));
}
/*
@@ -197,6 +201,8 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self)
__u8 oct_seq[6];
__u16 hints;
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
+
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -237,7 +243,7 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self)
* Send initial parameters to the remote IrCOMM device. These parameters
* must be sent before any data.
*/
-static int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
+int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
{
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
@@ -250,27 +256,29 @@ static int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
* haven't set them already
*/
IRDA_DEBUG(2, __FUNCTION__ "(), data-rate = %d\n",
- self->session.data_rate);
- if (!self->session.data_rate)
- self->session.data_rate = 9600;
+ self->settings.data_rate);
+ if (!self->settings.data_rate)
+ self->settings.data_rate = 9600;
IRDA_DEBUG(2, __FUNCTION__ "(), data-format = %d\n",
- self->session.data_format);
- if (!self->session.data_format)
- self->session.data_format = IRCOMM_WSIZE_8; /* 8N1 */
+ self->settings.data_format);
+ if (!self->settings.data_format)
+ self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */
IRDA_DEBUG(2, __FUNCTION__ "(), flow-control = %d\n",
- self->session.flow_control);
- /*self->session.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/
+ self->settings.flow_control);
+ /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/
/* Do not set delta values for the initial parameters */
- self->session.dte = IRCOMM_DTR | IRCOMM_RTS;
+ self->settings.dte = IRCOMM_DTR | IRCOMM_RTS;
- ircomm_param_request(self, IRCOMM_SERVICE_TYPE, FALSE);
+ /* Only send service type parameter when we are the client */
+ if (self->client)
+ ircomm_param_request(self, IRCOMM_SERVICE_TYPE, FALSE);
ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE);
ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
/* For a 3 wire service, we just flush the last parameter and return */
- if (self->session.service_type == IRCOMM_3_WIRE) {
+ if (self->settings.service_type == IRCOMM_3_WIRE) {
ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE);
return 0;
}
@@ -335,6 +343,12 @@ void ircomm_tty_disconnect_indication(void *instance, void *sap,
if (!self->tty)
return;
+ /* This will stop control data transfers */
+ self->flow = FLOW_STOP;
+
+ /* Stop data transfers */
+ self->tty->hw_stopped = 1;
+
ircomm_tty_do_event(self, IRCOMM_TTY_DISCONNECT_INDICATION, NULL,
NULL);
}
@@ -395,6 +409,7 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
IRDA_DEBUG(0, __FUNCTION__"(), got unknown type!\n");
break;
}
+ irias_delete_value(value);
}
/*
@@ -416,10 +431,14 @@ void ircomm_tty_connect_confirm(void *instance, void *sap,
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+ self->client = TRUE;
self->max_data_size = max_data_size;
self->max_header_size = max_header_size;
+ self->flow = FLOW_START;
ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_CONFIRM, NULL, NULL);
+
+ dev_kfree_skb(skb);
}
/*
@@ -443,8 +462,10 @@ void ircomm_tty_connect_indication(void *instance, void *sap,
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+ self->client = FALSE;
self->max_data_size = max_data_size;
self->max_header_size = max_header_size;
+ self->flow = FLOW_START;
clen = skb->data[0];
if (clen)
@@ -453,6 +474,8 @@ void ircomm_tty_connect_indication(void *instance, void *sap,
&ircomm_param_info);
ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_INDICATION, NULL, NULL);
+
+ dev_kfree_skb(skb);
}
/*
@@ -465,26 +488,32 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
{
IRDA_DEBUG(2, __FUNCTION__ "()\n");
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
+ if (!self->tty)
+ return;
+
del_timer(&self->watchdog_timer);
- /*
+ /*
* IrCOMM link is now up, and if we are not using hardware
- * flow-control, then declare the hardware as running. Otherwise
- * the client will have to wait for the CD to be set.
+ * flow-control, then declare the hardware as running. Otherwise we
+ * will have to wait for the peer device (DCE) to raise the CTS
+ * line.
*/
- if (!(self->flags & ASYNC_CTS_FLOW)) {
+ if (self->flags & ASYNC_CTS_FLOW) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), waiting for CTS ...\n");
+ return;
+ } else {
IRDA_DEBUG(2, __FUNCTION__ "(), starting hardware!\n");
- if (!self->tty)
- return;
+
self->tty->hw_stopped = 0;
+
+ /* Wake up processes blocked on open */
+ wake_up_interruptible(&self->open_wait);
}
- /* Wake up processes blocked on open */
- wake_up_interruptible(&self->open_wait);
- /*
- * Wake up processes blocked on write, or waiting for a write
- * wakeup notification
- */
queue_task(&self->tqueue, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
@@ -498,6 +527,9 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
*/
void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self, int timeout)
{
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
ircomm_tty_watchdog_timer_expired);
}
@@ -512,7 +544,7 @@ void ircomm_tty_watchdog_timer_expired(void *data)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data;
- IRDA_DEBUG(4, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -535,13 +567,12 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
IRDA_DEBUG(2, __FUNCTION__": state=%s, event=%s\n",
ircomm_tty_state[self->state], ircomm_tty_event[event]);
-
switch (event) {
case IRCOMM_TTY_ATTACH_CABLE:
/* Try to discover any remote devices */
ircomm_tty_start_watchdog_timer(self, 3*HZ);
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
-
+
irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
break;
case IRCOMM_TTY_DISCOVERY_INDICATION:
@@ -570,10 +601,6 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
-
- /* Init connection */
- ircomm_tty_send_initial_parameters(self);
- ircomm_tty_link_established(self);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Just stay idle */
@@ -640,15 +667,15 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
-
- /* Init connection */
- ircomm_tty_send_initial_parameters(self);
- ircomm_tty_link_established(self);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
+#if 1
+ /* Give up */
+#else
/* Try to discover any remote devices */
ircomm_tty_start_watchdog_timer(self, 3*HZ);
irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
+#endif
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
@@ -706,10 +733,6 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
-
- /* Init connection */
- ircomm_tty_send_initial_parameters(self);
- ircomm_tty_link_established(self);
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
@@ -758,10 +781,6 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
-
- /* Init connection */
- ircomm_tty_send_initial_parameters(self);
- ircomm_tty_link_established(self);
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
@@ -808,10 +827,6 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
-
- /* Init connection */
- ircomm_tty_send_initial_parameters(self);
- ircomm_tty_link_established(self);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Go back to search mode */
@@ -819,7 +834,7 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
ircomm_tty_start_watchdog_timer(self, 3*HZ);
break;
case IRCOMM_TTY_DETACH_CABLE:
- ircomm_disconnect_request(self->ircomm, NULL);
+ /* ircomm_disconnect_request(self->ircomm, NULL); */
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
@@ -855,9 +870,15 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
- /* Drop carrier */
- self->session.dce = IRCOMM_DELTA_CD;
- ircomm_tty_check_modem_status(self);
+ if (self->flags & ASYNC_CHECK_CD) {
+ /* Drop carrier */
+ self->settings.dce = IRCOMM_DELTA_CD;
+ ircomm_tty_check_modem_status(self);
+ } else {
+ IRDA_DEBUG(0, __FUNCTION__ "(), hanging up!\n");
+ if (self->tty)
+ tty_hangup(self->tty);
+ }
break;
default:
IRDA_DEBUG(2, __FUNCTION__"(), unknown event: %s\n",
@@ -876,9 +897,12 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
struct sk_buff *skb, struct ircomm_tty_info *info)
{
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+
IRDA_DEBUG(2, __FUNCTION__": state=%s, event=%s\n",
ircomm_tty_state[self->state], ircomm_tty_event[event]);
-
+
return (*state[self->state])(self, event, skb, info);
}
@@ -890,6 +914,9 @@ int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
*/
void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_STATE state)
{
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
+
self->state = state;
IRDA_DEBUG(2, __FUNCTION__": next state=%s, service type=%d\n",
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index 5262af954..107ceb612 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Jun 10 14:39:09 1999
- * Modified at: Sat Oct 30 12:50:41 1999
+ * Modified at: Wed Jan 5 14:45:43 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -59,6 +59,8 @@ void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
unsigned cflag, cval;
int baud;
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
if (!self->tty || !self->tty->termios || !self->ircomm)
return;
@@ -85,16 +87,17 @@ void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
if (!baud)
baud = 9600; /* B0 transition handled in rs_set_termios */
- self->session.data_rate = baud;
+ self->settings.data_rate = baud;
ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE);
/* CTS flow control flag and modem status interrupts */
if (cflag & CRTSCTS) {
self->flags |= ASYNC_CTS_FLOW;
- self->session.flow_control |= IRCOMM_RTS_CTS_IN;
- } else
+ self->settings.flow_control |= IRCOMM_RTS_CTS_IN;
+ } else {
self->flags &= ~ASYNC_CTS_FLOW;
-
+ self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN;
+ }
if (cflag & CLOCAL)
self->flags &= ~ASYNC_CHECK_CD;
else
@@ -126,7 +129,7 @@ void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
self->ignore_status_mask |= LSR_OE;
}
#endif
- self->session.data_format = cval;
+ self->settings.data_format = cval;
ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE);
@@ -146,6 +149,8 @@ void ircomm_tty_set_termios(struct tty_struct *tty,
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
unsigned int cflag = tty->termios->c_cflag;
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
if ((cflag == old_termios->c_cflag) &&
(RELEVANT_IFLAG(tty->termios->c_iflag) ==
RELEVANT_IFLAG(old_termios->c_iflag)))
@@ -158,17 +163,17 @@ void ircomm_tty_set_termios(struct tty_struct *tty,
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) &&
!(cflag & CBAUD)) {
- self->session.dte &= ~(IRCOMM_DTR|IRCOMM_RTS);
+ self->settings.dte &= ~(IRCOMM_DTR|IRCOMM_RTS);
ircomm_param_request(self, IRCOMM_DTE, TRUE);
}
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) &&
(cflag & CBAUD)) {
- self->session.dte |= IRCOMM_DTR;
+ self->settings.dte |= IRCOMM_DTR;
if (!(tty->termios->c_cflag & CRTSCTS) ||
!test_bit(TTY_THROTTLED, &tty->flags)) {
- self->session.dte |= IRCOMM_RTS;
+ self->settings.dte |= IRCOMM_RTS;
}
ircomm_param_request(self, IRCOMM_DTE, TRUE);
}
@@ -193,14 +198,14 @@ static int ircomm_tty_get_modem_info(struct ircomm_tty_cb *self,
{
unsigned int result;
- IRDA_DEBUG(1, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
- result = ((self->session.dte & IRCOMM_RTS) ? TIOCM_RTS : 0)
- | ((self->session.dte & IRCOMM_DTR) ? TIOCM_DTR : 0)
- | ((self->session.dce & IRCOMM_CD) ? TIOCM_CAR : 0)
- | ((self->session.dce & IRCOMM_RI) ? TIOCM_RNG : 0)
- | ((self->session.dce & IRCOMM_DSR) ? TIOCM_DSR : 0)
- | ((self->session.dce & IRCOMM_CTS) ? TIOCM_CTS : 0);
+ result = ((self->settings.dte & IRCOMM_RTS) ? TIOCM_RTS : 0)
+ | ((self->settings.dte & IRCOMM_DTR) ? TIOCM_DTR : 0)
+ | ((self->settings.dce & IRCOMM_CD) ? TIOCM_CAR : 0)
+ | ((self->settings.dce & IRCOMM_RI) ? TIOCM_RNG : 0)
+ | ((self->settings.dce & IRCOMM_DSR) ? TIOCM_DSR : 0)
+ | ((self->settings.dce & IRCOMM_CTS) ? TIOCM_CTS : 0);
return put_user(result, value);
}
@@ -227,27 +232,27 @@ static int ircomm_tty_set_modem_info(struct ircomm_tty_cb *self,
if (error)
return error;
- old_rts = self->session.dte & IRCOMM_RTS;
- old_dtr = self->session.dte & IRCOMM_DTR;
+ old_rts = self->settings.dte & IRCOMM_RTS;
+ old_dtr = self->settings.dte & IRCOMM_DTR;
switch (cmd) {
case TIOCMBIS:
if (arg & TIOCM_RTS)
- self->session.dte |= IRCOMM_RTS;
+ self->settings.dte |= IRCOMM_RTS;
if (arg & TIOCM_DTR)
- self->session.dte |= IRCOMM_DTR;
+ self->settings.dte |= IRCOMM_DTR;
break;
case TIOCMBIC:
if (arg & TIOCM_RTS)
- self->session.dte &= ~IRCOMM_RTS;
+ self->settings.dte &= ~IRCOMM_RTS;
if (arg & TIOCM_DTR)
- self->session.dte &= ~IRCOMM_DTR;
+ self->settings.dte &= ~IRCOMM_DTR;
break;
case TIOCMSET:
- self->session.dte =
- ((self->session.dte & ~(IRCOMM_RTS | IRCOMM_DTR))
+ self->settings.dte =
+ ((self->settings.dte & ~(IRCOMM_RTS | IRCOMM_DTR))
| ((arg & TIOCM_RTS) ? IRCOMM_RTS : 0)
| ((arg & TIOCM_DTR) ? IRCOMM_DTR : 0));
break;
@@ -256,11 +261,11 @@ static int ircomm_tty_set_modem_info(struct ircomm_tty_cb *self,
return -EINVAL;
}
- if ((self->session.dte & IRCOMM_RTS) != old_rts)
- self->session.dte |= IRCOMM_DELTA_RTS;
+ if ((self->settings.dte & IRCOMM_RTS) != old_rts)
+ self->settings.dte |= IRCOMM_DELTA_RTS;
- if ((self->session.dte & IRCOMM_DTR) != old_dtr)
- self->session.dte |= IRCOMM_DELTA_DTR;
+ if ((self->settings.dte & IRCOMM_DTR) != old_dtr)
+ self->settings.dte |= IRCOMM_DELTA_DTR;
ircomm_param_request(self, IRCOMM_DTE, TRUE);
@@ -281,12 +286,12 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
if (!retinfo)
return -EFAULT;
- IRDA_DEBUG(1, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
memset(&info, 0, sizeof(info));
info.line = self->line;
info.flags = self->flags;
- info.baud_base = self->session.data_rate;
+ info.baud_base = self->settings.data_rate;
info.close_delay = self->close_delay;
info.closing_wait = self->closing_wait;
@@ -310,29 +315,33 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
*
*
*/
-static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *tty,
+static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self,
struct serial_struct *new_info)
{
#if 0
struct serial_struct new_serial;
- struct ircomm_tty_cb old_driver;
+ struct ircomm_tty_cb old_state, *state;
- IRDA_DEBUG(2, __FUNCTION__ "()\n");
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
return -EFAULT;
- old_driver = *driver;
+
+ state = self
+ old_state = *self;
if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.baud_base != driver->comm->data_rate) ||
- (new_serial.close_delay != driver->close_delay) ||
+ if ((new_serial.baud_base != state->settings.data_rate) ||
+ (new_serial.close_delay != state->close_delay) ||
((new_serial.flags & ~ASYNC_USR_MASK) !=
- (driver->flags & ~ASYNC_USR_MASK)))
+ (self->flags & ~ASYNC_USR_MASK)))
return -EPERM;
- driver->flags = ((driver->flags & ~ASYNC_USR_MASK) |
+ state->flags = ((state->flags & ~ASYNC_USR_MASK) |
(new_serial.flags & ASYNC_USR_MASK));
- driver->custom_divisor = new_serial.custom_divisor;
+ self->flags = ((self->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ /* self->custom_divisor = new_serial.custom_divisor; */
goto check_and_exit;
}
@@ -341,15 +350,14 @@ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *tty,
* At this point, we start making changes.....
*/
- if (self->session.data_rate != new_serial.baud_base) {
- self->session.data_rate.data_rate = new_serial.baud_base;
- if (driver->comm->state == IRCOMM_CONN)
- ircomm_control_request(driver->comm, DATA_RATE);
+ if (self->settings.data_rate != new_serial.baud_base) {
+ self->settings.data_rate = new_serial.baud_base;
+ ircomm_param_request(self, IRCOMM_DATA_RATE, TRUE);
}
- driver->close_delay = new_serial.close_delay * HZ/100;
- driver->closing_wait = new_serial.closing_wait * HZ/100;
- driver->custom_divisor = new_serial.custom_divisor;
+ self->close_delay = new_serial.close_delay * HZ/100;
+ self->closing_wait = new_serial.closing_wait * HZ/100;
+ /* self->custom_divisor = new_serial.custom_divisor; */
self->flags = ((self->flags & ~ASYNC_FLAGS) |
(new_serial.flags & ASYNC_FLAGS));
@@ -358,7 +366,7 @@ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *tty,
check_and_exit:
if (self->flags & ASYNC_INITIALIZED) {
- if (((old_driver.flags & ASYNC_SPD_MASK) !=
+ if (((old_state.flags & ASYNC_SPD_MASK) !=
(self->flags & ASYNC_SPD_MASK)) ||
(old_driver.custom_divisor != driver->custom_divisor)) {
if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
@@ -388,6 +396,8 @@ int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file,
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
int ret = 0;
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index dd17e2dd3..0e272947d 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Oct 9 09:22:27 1999
- * Modified at: Tue Nov 16 12:54:13 1999
+ * Modified at: Wed Jan 5 14:17:16 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -62,6 +62,8 @@ extern int tekram_init(void);
extern int actisys_init(void);
extern int girbil_init(void);
+static void __irda_task_delete(struct irda_task *task);
+
static hashbin_t *dongles = NULL;
static hashbin_t *tasks = NULL;
@@ -94,14 +96,14 @@ int irda_device_proc_read(char *buf, char **start, off_t offset, int len,
int __init irda_device_init( void)
{
- dongles = hashbin_new(HB_LOCAL);
+ dongles = hashbin_new(HB_GLOBAL);
if (dongles == NULL) {
printk(KERN_WARNING
"IrDA: Can't allocate dongles hashbin!\n");
return -ENOMEM;
}
- tasks = hashbin_new(HB_LOCAL);
+ tasks = hashbin_new(HB_GLOBAL);
if (tasks == NULL) {
printk(KERN_WARNING
"IrDA: Can't allocate tasks hashbin!\n");
@@ -145,6 +147,9 @@ int __init irda_device_init( void)
#ifdef CONFIG_AIRPORT_DONGLE
airport_init();
#endif
+#ifdef CONFIG_OLD_BELKIN
+ old_belkin_init();
+#endif
return 0;
}
@@ -152,7 +157,7 @@ void irda_device_cleanup(void)
{
IRDA_DEBUG(4, __FUNCTION__ "()\n");
- hashbin_delete(tasks, NULL);
+ hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete);
hashbin_delete(dongles, NULL);
}
@@ -188,7 +193,7 @@ int irda_device_set_dtr_rts(struct net_device *dev, int dtr, int rts)
struct if_irda_req req;
int ret;
- IRDA_DEBUG(0, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
if (!dev->do_ioctl) {
ERROR(__FUNCTION__ "(), do_ioctl not impl. by "
@@ -209,7 +214,7 @@ int irda_device_change_speed(struct net_device *dev, __u32 speed)
struct if_irda_req req;
int ret;
- IRDA_DEBUG(0, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
if (!dev->do_ioctl) {
ERROR(__FUNCTION__ "(), do_ioctl not impl. by "
@@ -257,6 +262,21 @@ void irda_task_next_state(struct irda_task *task, TASK_STATE state)
task->state = state;
}
+static void __irda_task_delete(struct irda_task *task)
+{
+ del_timer(&task->timer);
+
+ kfree(task);
+}
+
+void irda_task_delete(struct irda_task *task)
+{
+ /* Unregister task */
+ hashbin_remove(tasks, (int) task, NULL);
+
+ __irda_task_delete(task);
+}
+
/*
* Function irda_task_kick (task)
*
@@ -267,9 +287,9 @@ void irda_task_next_state(struct irda_task *task, TASK_STATE state)
*/
int irda_task_kick(struct irda_task *task)
{
- int timeout;
- int ret = 0;
+ int finished = TRUE;
int count = 0;
+ int timeout;
IRDA_DEBUG(2, __FUNCTION__ "()\n");
@@ -281,13 +301,15 @@ int irda_task_kick(struct irda_task *task)
timeout = task->function(task);
if (count++ > 100) {
ERROR(__FUNCTION__ "(), error in task handler!\n");
- return -1;
+ irda_task_delete(task);
+ return TRUE;
}
} while ((timeout == 0) && (task->state != IRDA_TASK_DONE));
if (timeout < 0) {
ERROR(__FUNCTION__ "(), Error executing task!\n");
- return -1;
+ irda_task_delete(task);
+ return TRUE;
}
/* Check if we are finished */
@@ -311,20 +333,18 @@ int irda_task_kick(struct irda_task *task)
irda_task_kick(task->parent);
}
}
- /* Unregister task */
- hashbin_remove(tasks, (int) task, NULL);
-
- kfree(task);
+ irda_task_delete(task);
} else if (timeout > 0) {
irda_start_timer(&task->timer, timeout, (void *) task,
irda_task_timer_expired);
- ret = 1;
+ finished = FALSE;
} else {
- IRDA_DEBUG(0, __FUNCTION__ "(), not finished, and no timeout!\n");
- ret = 1;
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), not finished, and no timeout!\n");
+ finished = FALSE;
}
- return ret;
+ return finished;
}
/*
@@ -335,25 +355,26 @@ int irda_task_kick(struct irda_task *task)
* called from interrupt context, so it's not possible to use
* schedule_timeout()
*/
-int irda_task_execute(void *instance, TASK_CALLBACK function,
- TASK_CALLBACK finished, struct irda_task *parent,
- void *param)
+struct irda_task *irda_task_execute(void *instance, TASK_CALLBACK function,
+ TASK_CALLBACK finished,
+ struct irda_task *parent, void *param)
{
struct irda_task *task;
+ int ret;
IRDA_DEBUG(2, __FUNCTION__ "()\n");
task = kmalloc(sizeof(struct irda_task), GFP_ATOMIC);
if (!task)
- return -ENOMEM;
+ return NULL;
- task->state = IRDA_TASK_INIT;
+ task->state = IRDA_TASK_INIT;
task->instance = instance;
task->function = function;
task->finished = finished;
- task->parent = parent;
- task->param = param;
- task->magic = IRDA_TASK_MAGIC;
+ task->parent = parent;
+ task->param = param;
+ task->magic = IRDA_TASK_MAGIC;
init_timer(&task->timer);
@@ -361,7 +382,11 @@ int irda_task_execute(void *instance, TASK_CALLBACK function,
hashbin_insert(tasks, (queue_t *) task, (int) task, NULL);
/* No time to waste, so lets get going! */
- return irda_task_kick(task);
+ ret = irda_task_kick(task);
+ if (ret)
+ return NULL;
+ else
+ return task;
}
/*
@@ -456,6 +481,8 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
if (!dongle)
return NULL;
+ memset(dongle, 0, sizeof(dongle_t));
+
/* Bind the registration info to this particular instance */
dongle->issue = reg;
dongle->dev = dev;
@@ -518,12 +545,13 @@ void irda_device_unregister_dongle(struct dongle_reg *dongle)
}
/*
- * Function irda_device_set_raw_mode (self, mode)
- *
- *
+ * Function irda_device_set_mode (self, mode)
*
+ * Set the Infrared device driver into mode where it sends and receives
+ * data without using IrLAP framing. Check out the particular device
+ * driver to find out which modes it support.
*/
-int irda_device_set_raw_mode(struct net_device* dev, int mode)
+int irda_device_set_mode(struct net_device* dev, int mode)
{
struct if_irda_req req;
int ret;
@@ -536,9 +564,9 @@ int irda_device_set_raw_mode(struct net_device* dev, int mode)
return -1;
}
- req.ifr_raw_mode = mode;
+ req.ifr_mode = mode;
- ret = dev->do_ioctl(dev, (struct ifreq *) &req, SIOCSRAWMODE);
+ ret = dev->do_ioctl(dev, (struct ifreq *) &req, SIOCSMODE);
return ret;
}
@@ -546,7 +574,7 @@ int irda_device_set_raw_mode(struct net_device* dev, int mode)
/*
* Function setup_dma (idev, buffer, count, mode)
*
- * Setup the DMA channel
+ * Setup the DMA channel. Commonly used by ISA FIR drivers
*
*/
void setup_dma(int channel, char *buffer, int count, int mode)
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index ad6c01183..5f1140525 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
- * Modified at: Fri Nov 5 20:25:42 1999
+ * Modified at: Sat Dec 25 16:42:42 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
@@ -61,6 +61,7 @@ static __u32 service_handle;
extern char *lmp_reasons[];
static void __iriap_close(struct iriap_cb *self);
+static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode);
static void iriap_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *skb);
static void iriap_connect_indication(void *instance, void *sap,
@@ -82,8 +83,10 @@ static int iriap_data_indication(void *instance, void *sap,
*/
int __init iriap_init(void)
{
- __u16 hints;
struct ias_object *obj;
+ struct iriap_cb *server;
+ __u8 oct_seq[6];
+ __u16 hints;
/* Allocate master array */
iriap = hashbin_new(HB_LOCAL);
@@ -100,22 +103,32 @@ int __init iriap_init(void)
* Register some default services for IrLMP
*/
hints = irlmp_service_to_hint(S_COMPUTER);
- /*hints |= irlmp_service_to_hint(S_PNP);*/
service_handle = irlmp_register_service(hints);
- /*
- * Register the Device object with LM-IAS
- */
+ /* Register the Device object with LM-IAS */
obj = irias_new_object("Device", IAS_DEVICE_ID);
irias_add_string_attrib(obj, "DeviceName", "Linux");
+
+ oct_seq[0] = 0x01; /* Version 1 */
+ oct_seq[1] = 0x00; /* IAS support bits */
+ oct_seq[2] = 0x00; /* LM-MUX support bits */
+#ifdef CONFIG_IRDA_ULTRA
+ oct_seq[2] |= 0x04; /* Connectionless Data support */
+#endif
+ irias_add_octseq_attrib(obj, "IrLMPSupport", oct_seq, 3);
irias_insert_object(obj);
/*
* Register server support with IrLMP so we can accept incoming
* connections
*/
- iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
-
+ server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
+ if (!server) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), unable to open server\n");
+ return -1;
+ }
+ iriap_register_lsap(server, LSAP_IAS, IAS_SERVER);
+
return 0;
}
@@ -142,10 +155,8 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
CONFIRM_CALLBACK callback)
{
struct iriap_cb *self;
- struct lsap_cb *lsap;
- notify_t notify;
- IRDA_DEBUG(4, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
self = kmalloc(sizeof(struct iriap_cb), GFP_ATOMIC);
if (!self) {
@@ -157,36 +168,18 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
* Initialize instance
*/
memset(self, 0, sizeof(struct iriap_cb));
-
- irda_notify_init(&notify);
- notify.connect_confirm = iriap_connect_confirm;
- notify.connect_indication = iriap_connect_indication;
- notify.disconnect_indication = iriap_disconnect_indication;
- notify.data_indication = iriap_data_indication;
- notify.instance = self;
- if (mode == IAS_CLIENT)
- strcpy(notify.name, "IrIAS cli");
- else
- strcpy(notify.name, "IrIAS srv");
-
- lsap = irlmp_open_lsap(slsap_sel, &notify);
- if (lsap == NULL) {
- ERROR(__FUNCTION__ "(), Unable to allocated LSAP!\n");
- return NULL;
- }
- slsap_sel = lsap->slsap_sel;
self->magic = IAS_MAGIC;
- self->lsap = lsap;
- self->slsap_sel = slsap_sel;
self->mode = mode;
+ if (mode == IAS_CLIENT)
+ iriap_register_lsap(self, slsap_sel, mode);
self->confirm = callback;
self->priv = priv;
init_timer(&self->watchdog_timer);
- hashbin_insert(iriap, (queue_t *) self, slsap_sel, NULL);
+ hashbin_insert(iriap, (queue_t *) self, (int) self, NULL);
/* Initialize state machines */
iriap_next_client_state(self, S_DISCONNECT);
@@ -212,6 +205,9 @@ static void __iriap_close(struct iriap_cb *self)
del_timer(&self->watchdog_timer);
+ if (self->skb)
+ dev_kfree_skb(self->skb);
+
self->magic = 0;
kfree(self);
@@ -226,6 +222,8 @@ void iriap_close(struct iriap_cb *self)
{
struct iriap_cb *entry;
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
ASSERT(self != NULL, return;);
ASSERT(self->magic == IAS_MAGIC, return;);
@@ -234,13 +232,39 @@ void iriap_close(struct iriap_cb *self)
self->lsap = NULL;
}
- entry = (struct iriap_cb *) hashbin_remove(iriap, self->slsap_sel,
- NULL);
+ entry = (struct iriap_cb *) hashbin_remove(iriap, (int) self, NULL);
ASSERT(entry == self, return;);
__iriap_close(self);
}
+static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode)
+{
+ notify_t notify;
+
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
+ irda_notify_init(&notify);
+ notify.connect_confirm = iriap_connect_confirm;
+ notify.connect_indication = iriap_connect_indication;
+ notify.disconnect_indication = iriap_disconnect_indication;
+ notify.data_indication = iriap_data_indication;
+ notify.instance = self;
+ if (mode == IAS_CLIENT)
+ strcpy(notify.name, "IrIAS cli");
+ else
+ strcpy(notify.name, "IrIAS srv");
+
+ self->lsap = irlmp_open_lsap(slsap_sel, &notify, 0);
+ if (self->lsap == NULL) {
+ ERROR(__FUNCTION__ "(), Unable to allocated LSAP!\n");
+ return -1;
+ }
+ self->slsap_sel = self->lsap->slsap_sel;
+
+ return 0;
+}
+
/*
* Function iriap_disconnect_indication (handle, reason)
*
@@ -281,6 +305,7 @@ static void iriap_disconnect_indication(void *instance, void *sap,
IRDA_DEBUG(4, __FUNCTION__ "(), disconnect as server\n");
iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION,
NULL);
+ iriap_close(self);
}
if (userdata)
@@ -465,6 +490,10 @@ void iriap_getvaluebyclass_confirm(struct iriap_cb *self, struct sk_buff *skb)
IRDA_DEBUG(0, __FUNCTION__
"(), charset %s, not supported\n",
ias_charset_types[charset]);
+
+ /* Aborting, close connection! */
+ iriap_disconnect_request(self);
+ dev_kfree_skb(skb);
return;
/* break; */
}
@@ -487,7 +516,7 @@ void iriap_getvaluebyclass_confirm(struct iriap_cb *self, struct sk_buff *skb)
value = irias_new_octseq_value(fp+n, value_len);
break;
default:
- value = &missing;
+ value = irias_new_missing_value();
break;
}
@@ -499,6 +528,11 @@ void iriap_getvaluebyclass_confirm(struct iriap_cb *self, struct sk_buff *skb)
*/
if (self->confirm)
self->confirm(IAS_SUCCESS, obj_id, value, self->priv);
+ else {
+ IRDA_DEBUG(0, __FUNCTION__ "(), missing handler!\n");
+ irias_delete_value(value);
+ }
+ dev_kfree_skb(skb);
}
/*
@@ -527,10 +561,10 @@ void iriap_getvaluebyclass_response(struct iriap_cb *self, __u16 obj_id,
/*
* We must adjust the size of the response after the length of the
- * value. We add 9 bytes because of the 6 bytes for the frame and
- * max 3 bytes for the value coding.
+ * value. We add 32 bytes because of the 6 bytes for the frame and
+ * max 5 bytes for the value coding.
*/
- skb = dev_alloc_skb(value->len + self->max_header_size + 9);
+ skb = dev_alloc_skb(value->len + self->max_header_size + 32);
if (!skb)
return;
@@ -552,7 +586,7 @@ void iriap_getvaluebyclass_response(struct iriap_cb *self, __u16 obj_id,
tmp_be16 = cpu_to_be16(obj_id);
memcpy(fp+n, &tmp_be16, 2); n += 2;
- switch(value->type) {
+ switch (value->type) {
case IAS_STRING:
skb_put(skb, 3 + value->len);
fp[n++] = value->type;
@@ -577,7 +611,7 @@ void iriap_getvaluebyclass_response(struct iriap_cb *self, __u16 obj_id,
break;
case IAS_MISSING:
IRDA_DEBUG( 3, __FUNCTION__ ": sending IAS_MISSING\n");
- skb_put( skb, 1);
+ skb_put(skb, 1);
fp[n++] = value->type;
break;
default:
@@ -622,16 +656,14 @@ void iriap_getvaluebyclass_indication(struct iriap_cb *self,
memcpy(attr, fp+n, attr_len); n+=attr_len;
attr[attr_len] = '\0';
+ /* We do not need the buffer anymore */
dev_kfree_skb(skb);
- /*
- * Now, do some advanced parsing! :-)
- */
IRDA_DEBUG(4, "LM-IAS: Looking up %s: %s\n", name, attr);
obj = irias_find_object(name);
if (obj == NULL) {
- IRDA_DEBUG(0, "LM-IAS: Object not found\n");
+ IRDA_DEBUG(2, "LM-IAS: Object %s not found\n", name);
iriap_getvaluebyclass_response(self, 0x1235, IAS_CLASS_UNKNOWN,
&missing);
return;
@@ -640,20 +672,16 @@ void iriap_getvaluebyclass_indication(struct iriap_cb *self,
attrib = irias_find_attrib(obj, attr);
if (attrib == NULL) {
- IRDA_DEBUG(0, "LM-IAS: Attribute %s not found\n", attr);
+ IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr);
iriap_getvaluebyclass_response(self, obj->id,
IAS_ATTRIB_UNKNOWN, &missing);
return;
}
- IRDA_DEBUG(4, "LM-IAS: found %s\n", attrib->name);
-
- /*
- * We have a match; send the value.
- */
+ /* We have a match; send the value. */
iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS,
attrib->value);
-
+
return;
}
@@ -668,7 +696,7 @@ void iriap_send_ack(struct iriap_cb *self)
struct sk_buff *skb;
__u8 *frame;
- IRDA_DEBUG(6, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IAS_MAGIC, return;);
@@ -683,7 +711,25 @@ void iriap_send_ack(struct iriap_cb *self)
frame = skb->data;
/* Build frame */
- frame[0] = IAP_LST | self->operation;
+ frame[0] = IAP_LST | IAP_ACK | self->operation;
+
+ irlmp_data_request(self->lsap, skb);
+}
+
+void iriap_connect_request(struct iriap_cb *self)
+{
+ int ret;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IAS_MAGIC, return;);
+
+ ret = irlmp_connect_request(self->lsap, LSAP_IAS,
+ self->saddr, self->daddr,
+ NULL, NULL);
+ if (ret < 0) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), connect failed!\n");
+ self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
+ }
}
/*
@@ -693,8 +739,8 @@ void iriap_send_ack(struct iriap_cb *self)
*
*/
static void iriap_connect_confirm(void *instance, void *sap,
- struct qos_info *qos,
- __u32 max_sdu_size, __u8 header_size,
+ struct qos_info *qos, __u32 max_seg_size,
+ __u8 max_header_size,
struct sk_buff *userdata)
{
struct iriap_cb *self;
@@ -705,7 +751,8 @@ static void iriap_connect_confirm(void *instance, void *sap,
ASSERT(self->magic == IAS_MAGIC, return;);
ASSERT(userdata != NULL, return;);
- IRDA_DEBUG(4, __FUNCTION__ "()\n");
+ self->max_data_size = max_seg_size;
+ self->max_header_size = max_header_size;
del_timer(&self->watchdog_timer);
@@ -719,18 +766,43 @@ static void iriap_connect_confirm(void *instance, void *sap,
*
*/
static void iriap_connect_indication(void *instance, void *sap,
- struct qos_info *qos, __u32 max_sdu_size,
- __u8 header_size,
+ struct qos_info *qos, __u32 max_seg_size,
+ __u8 max_header_size,
struct sk_buff *userdata)
{
- struct iriap_cb *self;
+ struct iriap_cb *self, *new;
+
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
self = (struct iriap_cb *) instance;
ASSERT(self != NULL, return;);
ASSERT(self->magic == IAS_MAGIC, return;);
+
+ /* Start new server */
+ new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
+ if (!new) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), open failed\n");
+ dev_kfree_skb(userdata);
+ return;
+ }
+
+ /* Now attach up the new "socket" */
+ new->lsap = irlmp_dup(self->lsap, new);
+ if (!new->lsap) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), dup failed!\n");
+ return;
+ }
+
+ new->max_data_size = max_seg_size;
+ new->max_header_size = max_header_size;
- iriap_do_server_event(self, IAP_LM_CONNECT_INDICATION, userdata);
+ /* Clean up the original one to keep it in listen state */
+ self->lsap->dlsap_sel = LSAP_ANY;
+ self->lsap->lsap_state = LSAP_DISCONNECTED;
+ /* FIXME: refcount in irlmp might get wrong */
+
+ iriap_do_server_event(new, IAP_LM_CONNECT_INDICATION, userdata);
}
/*
@@ -746,7 +818,7 @@ static int iriap_data_indication(void *instance, void *sap,
__u8 *frame;
__u8 opcode;
- IRDA_DEBUG( 4, __FUNCTION__ "()\n");
+ IRDA_DEBUG(3, __FUNCTION__ "()\n");
self = (struct iriap_cb *) instance;
@@ -768,12 +840,14 @@ static int iriap_data_indication(void *instance, void *sap,
if (~opcode & IAP_LST) {
WARNING(__FUNCTION__ "(), IrIAS multiframe commands or "
"results is not implemented yet!\n");
+ dev_kfree_skb(skb);
return 0;
}
/* Check for ack frames since they don't contain any data */
if (opcode & IAP_ACK) {
IRDA_DEBUG(0, __FUNCTION__ "() Got ack frame!\n");
+ dev_kfree_skb(skb);
return 0;
}
@@ -782,9 +856,10 @@ static int iriap_data_indication(void *instance, void *sap,
switch (opcode) {
case GET_INFO_BASE:
IRDA_DEBUG(0, "IrLMP GetInfoBaseDetails not implemented!\n");
+ dev_kfree_skb(skb);
break;
case GET_VALUE_BY_CLASS:
- iriap_do_call_event(self, IAP_RECV_F_LST, skb);
+ iriap_do_call_event(self, IAP_RECV_F_LST, NULL);
switch (frame[1]) {
case IAS_SUCCESS:
@@ -800,8 +875,9 @@ static int iriap_data_indication(void *instance, void *sap,
* no to use self anymore after calling confirm
*/
if (self->confirm)
- self->confirm(IAS_CLASS_UNKNOWN, 0, NULL,
+ self->confirm(IAS_CLASS_UNKNOWN, 0, NULL,
self->priv);
+ dev_kfree_skb(skb);
break;
case IAS_ATTRIB_UNKNOWN:
WARNING(__FUNCTION__ "(), No such attribute!\n");
@@ -815,12 +891,14 @@ static int iriap_data_indication(void *instance, void *sap,
if (self->confirm)
self->confirm(IAS_CLASS_UNKNOWN, 0, NULL,
self->priv);
+ dev_kfree_skb(skb);
break;
}
break;
default:
IRDA_DEBUG(0, __FUNCTION__ "(), Unknown op-code: %02x\n",
opcode);
+ dev_kfree_skb(skb);
break;
}
return 0;
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index cc9d0c11a..eb8463b73 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
- * Modified at: Sun Oct 31 22:13:00 1999
+ * Modified at: Sat Dec 25 21:09:47 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
@@ -42,29 +42,29 @@ static void state_s_calling (struct iriap_cb *self, IRIAP_EVENT event,
static void state_s_outstanding (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_s_replying (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void state_s_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void state_r_disconnect (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void state_r_call (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void state_r_waiting (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void state_r_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void state_r_receiving (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void state_r_execute (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void state_r_returning (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static void (*iriap_state[])(struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb) = {
+ struct sk_buff *skb) = {
/* Client FSM */
state_s_disconnect,
state_s_connecting,
@@ -141,7 +141,7 @@ void iriap_do_call_event(struct iriap_cb *self, IRIAP_EVENT event,
}
void iriap_do_server_event(struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
ASSERT(self != NULL, return;);
ASSERT(self->magic == IAS_MAGIC, return;);
@@ -168,18 +168,15 @@ void iriap_do_r_connect_event(struct iriap_cb *self, IRIAP_EVENT event,
static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- int ret;
-
ASSERT(self != NULL, return;);
ASSERT(self->magic == IAS_MAGIC, return;);
switch (event) {
case IAP_CALL_REQUEST_GVBC:
iriap_next_client_state(self, S_CONNECTING);
+ ASSERT(self->skb == NULL, return;);
self->skb = skb;
- ret = irlmp_connect_request(self->lsap, LSAP_IAS,
- self->saddr, self->daddr,
- NULL, NULL);
+ iriap_connect_request(self);
break;
case IAP_LM_DISCONNECT_INDICATION:
break;
@@ -258,15 +255,18 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event,
switch (event) {
case IAP_CALL_REQUEST:
- irlmp_data_request(self->lsap, self->skb);
+ skb = self->skb;
+ self->skb = NULL;
+
+ irlmp_data_request(self->lsap, skb);
iriap_next_call_state(self, S_OUTSTANDING);
break;
default:
IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %d\n", event);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
- if (skb)
- dev_kfree_skb(skb);
}
/*
@@ -297,7 +297,6 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event,
iriap_send_ack(self);
/*LM_Idle_request(idle); */
- dev_kfree_skb(skb);
iriap_next_call_state(self, S_WAIT_FOR_CALL);
break;
default:
@@ -324,7 +323,7 @@ static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event,
*
*/
static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
IRDA_DEBUG(0, __FUNCTION__ "(), Not implemented\n");
}
@@ -400,14 +399,13 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "state_r_call()\n");
+ IRDA_DEBUG(4, __FUNCTION__ "()\n");
switch (event) {
case IAP_LM_DISCONNECT_INDICATION:
/* Abort call */
iriap_next_server_state(self, R_DISCONNECT);
- iriap_next_r_connect_state(self, R_WAITING);
-
+ iriap_next_r_connect_state(self, R_WAITING);
break;
default:
IRDA_DEBUG(0, __FUNCTION__ "(), unknown event!\n");
@@ -431,8 +429,8 @@ static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event,
IRDA_DEBUG(0, __FUNCTION__ "(), Not implemented\n");
}
-static void state_r_wait_active(struct iriap_cb *self,
- IRIAP_EVENT event, struct sk_buff *skb)
+static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
+ struct sk_buff *skb)
{
IRDA_DEBUG(0, __FUNCTION__ "(), Not implemented\n");
}
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index c83bb855b..17ad3801d 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Oct 1 22:50:04 1998
- * Modified at: Sat Oct 9 17:11:16 1999
+ * Modified at: Wed Dec 15 11:23:16 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
@@ -34,7 +34,7 @@ hashbin_t *objects = NULL;
/*
* Used when a missing value needs to be returned
*/
-struct ias_value missing = { IAS_MISSING, 0, 0, {0}};
+struct ias_value missing = { IAS_MISSING, 0, 0, {0}};
/*
* Function strdup (str)
@@ -72,8 +72,8 @@ struct ias_object *irias_new_object( char *name, int id)
IRDA_DEBUG( 4, __FUNCTION__ "()\n");
- obj = (struct ias_object *) kmalloc( sizeof( struct ias_object),
- GFP_ATOMIC);
+ obj = (struct ias_object *) kmalloc(sizeof(struct ias_object),
+ GFP_ATOMIC);
if (obj == NULL) {
IRDA_DEBUG(0, __FUNCTION__ "(), Unable to allocate object!\n");
return NULL;
@@ -364,7 +364,7 @@ struct ias_value *irias_new_integer_value(int integer)
{
struct ias_value *value;
- value = kmalloc(sizeof( struct ias_value), GFP_ATOMIC);
+ value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC);
if (value == NULL) {
WARNING(__FUNCTION__ "(), Unable to kmalloc!\n");
return NULL;
@@ -419,7 +419,7 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
WARNING(__FUNCTION__ "(), Unable to kmalloc!\n");
return NULL;
}
- memset(value, 0, sizeof( struct ias_value));
+ memset(value, 0, sizeof(struct ias_value));
value->type = IAS_OCT_SEQ;
value->len = len;
@@ -433,6 +433,23 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
return value;
}
+struct ias_value *irias_new_missing_value(void)
+{
+ struct ias_value *value;
+
+ value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ if (value == NULL) {
+ WARNING(__FUNCTION__ "(), Unable to kmalloc!\n");
+ return NULL;
+ }
+ memset(value, 0, sizeof(struct ias_value));
+
+ value->type = IAS_MISSING;
+ value->len = 0;
+
+ return value;
+}
+
/*
* Function irias_delete_value (value)
*
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index 4faa8edf5..355bae955 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Sun Oct 31 19:44:41 1999
+ * Modified at: Tue Dec 14 15:47:02 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
@@ -584,16 +584,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id,
NULL);
return;
}
- break;
- case IAS_STRING:
- IRDA_DEBUG(2, __FUNCTION__ "(), got string %s\n",
- value->t.string);
- break;
- case IAS_OCT_SEQ:
- IRDA_DEBUG(2, __FUNCTION__ "(), OCT_SEQ not implemented\n");
- break;
- case IAS_MISSING:
- IRDA_DEBUG(2, __FUNCTION__ "(), MISSING not implemented\n");
+ irias_delete_value(value);
break;
default:
IRDA_DEBUG(2, __FUNCTION__ "(), unknown type!\n");
diff --git a/net/irda/irlan/irlan_client_event.c b/net/irda/irlan/irlan_client_event.c
index 3a1aac699..a06b18582 100644
--- a/net/irda/irlan/irlan_client_event.c
+++ b/net/irda/irlan/irlan_client_event.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Sun Oct 31 19:41:55 1999
+ * Modified at: Sun Dec 26 21:52:24 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
@@ -194,7 +194,7 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
ASSERT(self != NULL, return -1;);
- switch(event) {
+ switch (event) {
case IRLAN_CONNECT_COMPLETE:
/* Send getinfo cmd */
irlan_get_provider_info(self);
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 64593e319..9d276e0a1 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Sun Oct 31 19:43:50 1999
+ * Modified at: Sun Dec 26 21:53:10 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
@@ -705,7 +705,6 @@ void irlan_get_provider_info(struct irlan_cb *self)
frame[0] = CMD_GET_PROVIDER_INFO;
frame[1] = 0x00; /* Zero parameters */
- /* irttp_data_request(self->client.tsap_ctrl, skb); */
irlan_ctrl_data_request(self, skb);
}
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 85c75b846..c2037ad74 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Oct 15 08:37:58 1998
- * Modified at: Sat Oct 30 12:58:30 1999
+ * Modified at: Thu Nov 4 14:50:52 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
@@ -348,6 +348,7 @@ void irlan_eth_send_gratuitous_arp(struct net_device *dev)
return;
read_lock(&in_dev->lock);
if (in_dev->ifa_list)
+
arp_send(ARPOP_REQUEST, ETH_P_ARP,
in_dev->ifa_list->ifa_address,
dev,
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 09257bd45..1c2958c4b 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -6,7 +6,7 @@
* Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Tue Nov 16 10:01:06 1999
+ * Modified at: Tue Dec 14 09:26:44 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
@@ -131,22 +131,14 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos)
irlap_next_state(self, LAP_OFFLINE);
/* Initialize transmit queue */
- skb_queue_head_init(&self->tx_list);
+ skb_queue_head_init(&self->txq);
+ skb_queue_head_init(&self->txq_ultra);
skb_queue_head_init(&self->wx_list);
/* My unique IrLAP device address! */
get_random_bytes(&self->saddr, sizeof(self->saddr));
memcpy(dev->dev_addr, &self->saddr, 4);
- /*
- * Generate random connection address for this session, which must
- * be 7 bits wide and different from 0x00 and 0xfe
- */
- while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
- get_random_bytes(&self->caddr, sizeof(self->caddr));
- self->caddr &= 0xfe;
- }
-
init_timer(&self->slot_timer);
init_timer(&self->query_timer);
init_timer(&self->discovery_timer);
@@ -157,6 +149,8 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos)
init_timer(&self->media_busy_timer);
irlap_apply_default_connection_parameters(self);
+
+ self->N3 = 3; /* # connections attemts to try before giving up */
irlap_next_state(self, LAP_NDM);
@@ -308,7 +302,8 @@ void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
* IrLMP for further processing
*
*/
-inline void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb)
+void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
+ int unreliable)
{
/* Hide LAP header from IrLMP layer */
skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
@@ -322,38 +317,9 @@ inline void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb)
}
}
#endif
- irlmp_link_data_indication(self->notify.instance, LAP_RELIABLE, skb);
+ irlmp_link_data_indication(self->notify.instance, skb, unreliable);
}
-/*
- * Function irlap_unit_data_indication (self, skb)
- *
- * Received some data that was sent unreliable
- *
- */
-void irlap_unit_data_indication(struct irlap_cb *self, struct sk_buff *skb)
-{
- IRDA_DEBUG(1, __FUNCTION__ "()\n");
-
- ASSERT(self != NULL, return;);
- ASSERT(self->magic == LAP_MAGIC, return;);
- ASSERT(skb != NULL, return;);
-
- /* Hide LAP header from IrLMP layer */
- skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
-
-#ifdef CONFIG_IRDA_COMPRESSION
- if (self->qos_tx.compression.value) {
-
- skb = irlap_decompress_frame(self, skb);
- if (!skb) {
- IRDA_DEBUG(1, __FUNCTION__ "(), Decompress error!\n");
- return;
- }
- }
-#endif
- irlmp_link_data_indication(self->notify.instance, LAP_UNRELIABLE, skb);
-}
/*
* Function irlap_data_request (self, skb)
@@ -361,12 +327,14 @@ void irlap_unit_data_indication(struct irlap_cb *self, struct sk_buff *skb)
* Queue data for transmission, must wait until XMIT state
*
*/
-inline void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
- int reliable)
+void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
+ int unreliable)
{
ASSERT(self != NULL, return;);
ASSERT(self->magic == LAP_MAGIC, return;);
+ IRDA_DEBUG(3, __FUNCTION__ "()\n");
+
#ifdef CONFIG_IRDA_COMPRESSION
if (self->qos_tx.compression.value) {
skb = irlap_compress_frame(self, skb);
@@ -376,7 +344,6 @@ inline void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
}
}
#endif
-
ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
return;);
skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
@@ -385,12 +352,10 @@ inline void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
* Must set frame format now so that the rest of the code knows
* if its dealing with an I or an UI frame
*/
- if (reliable)
- skb->data[1] = I_FRAME;
- else {
- IRDA_DEBUG(4, __FUNCTION__ "(), queueing unreliable frame\n");
+ if (unreliable)
skb->data[1] = UI_FRAME;
- }
+ else
+ skb->data[1] = I_FRAME;
/*
* Send event if this frame only if we are in the right state
@@ -401,18 +366,67 @@ inline void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
* Check if the transmit queue contains some unsent frames,
* and if so, make sure they are sent first
*/
- if (!skb_queue_empty(&self->tx_list)) {
- skb_queue_tail(&self->tx_list, skb);
- skb = skb_dequeue(&self->tx_list);
+ if (!skb_queue_empty(&self->txq)) {
+ skb_queue_tail(&self->txq, skb);
+ skb = skb_dequeue(&self->txq);
ASSERT(skb != NULL, return;);
}
irlap_do_event(self, SEND_I_CMD, skb, NULL);
} else
- skb_queue_tail(&self->tx_list, skb);
+ skb_queue_tail(&self->txq, skb);
}
/*
+ * Function irlap_unitdata_request (self, skb)
+ *
+ * Send Ultra data. This is data that must be sent outside any connection
+ *
+ */
+#ifdef CONFIG_IRDA_ULTRA
+void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
+{
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+
+ IRDA_DEBUG(3, __FUNCTION__ "()\n");
+
+ ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
+ return;);
+ skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+
+ skb->data[0] = CBROADCAST;
+ skb->data[1] = UI_FRAME;
+
+ skb_queue_tail(&self->txq_ultra, skb);
+
+ irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
+}
+#endif /*CONFIG_IRDA_ULTRA */
+
+/*
+ * Function irlap_udata_indication (self, skb)
+ *
+ * Receive Ultra data. This is data that is received outside any connection
+ *
+ */
+#ifdef CONFIG_IRDA_ULTRA
+void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
+{
+ IRDA_DEBUG(1, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
+
+ /* Hide LAP header from IrLMP layer */
+ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+
+ irlmp_link_unitdata_indication(self->notify.instance, skb);
+}
+#endif /* CONFIG_IRDA_ULTRA */
+
+/*
* Function irlap_disconnect_request (void)
*
* Request to disconnect connection by service user
@@ -425,7 +439,7 @@ void irlap_disconnect_request(struct irlap_cb *self)
ASSERT(self->magic == LAP_MAGIC, return;);
/* Don't disconnect until all data frames are successfully sent */
- if (skb_queue_len(&self->tx_list) > 0) {
+ if (skb_queue_len(&self->txq) > 0) {
self->disconnect_pending = TRUE;
return;
@@ -466,7 +480,7 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
/* Flush queues */
irlap_flush_all_queues(self);
- switch(reason) {
+ switch (reason) {
case LAP_RESET_INDICATION:
IRDA_DEBUG(1, __FUNCTION__ "(), Sending reset request!\n");
irlap_do_event(self, RESET_REQUEST, NULL, NULL);
@@ -479,8 +493,7 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
reason, NULL);
break;
default:
- IRDA_DEBUG(1, __FUNCTION__ "(), Reason %d not implemented!\n",
- reason);
+ ERROR(__FUNCTION__ "(), Unknown reason %d\n", reason);
}
}
@@ -504,48 +517,47 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
(discovery->nslots == 8) || (discovery->nslots == 16),
return;);
+ /* Discovery is only possible in NDM mode */
+ if (self->state != LAP_NDM) {
+ IRDA_DEBUG(4, __FUNCTION__
+ "(), discovery only possible in NDM mode\n");
+ irlap_discovery_confirm(self, NULL);
+ return;
+ }
+
/* Check if last discovery request finished in time */
if (self->discovery_log != NULL) {
hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
self->discovery_log = NULL;
}
-
- /*
- * Discovery is only possible in NDM mode
- */
- if (self->state == LAP_NDM) {
- self->discovery_log= hashbin_new(HB_LOCAL);
-
- info.S = discovery->nslots; /* Number of slots */
- info.s = 0; /* Current slot */
-
- self->discovery_cmd = discovery;
- info.discovery = discovery;
-
- /* Check if the slot timeout is within limits */
- if (sysctl_slot_timeout < 20) {
- ERROR(__FUNCTION__
- "(), to low value for slot timeout!\n");
- sysctl_slot_timeout = 20;
- }
- /*
- * Highest value is actually 8, but we allow higher since
- * some devices seems to require it.
- */
- if (sysctl_slot_timeout > 160) {
- ERROR(__FUNCTION__
- "(), to high value for slot timeout!\n");
- sysctl_slot_timeout = 160;
- }
-
- self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
-
- irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
- } else {
- IRDA_DEBUG(4, __FUNCTION__
- "(), discovery only possible in NDM mode\n");
- irlap_discovery_confirm(self, NULL);
- }
+
+ self->discovery_log= hashbin_new(HB_LOCAL);
+
+ info.S = discovery->nslots; /* Number of slots */
+ info.s = 0; /* Current slot */
+
+ self->discovery_cmd = discovery;
+ info.discovery = discovery;
+
+ /* Check if the slot timeout is within limits */
+ if (sysctl_slot_timeout < 20) {
+ ERROR(__FUNCTION__
+ "(), to low value for slot timeout!\n");
+ sysctl_slot_timeout = 20;
+ }
+ /*
+ * Highest value is actually 8, but we allow higher since
+ * some devices seems to require it.
+ */
+ if (sysctl_slot_timeout > 160) {
+ ERROR(__FUNCTION__
+ "(), to high value for slot timeout!\n");
+ sysctl_slot_timeout = 160;
+ }
+
+ self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
+
+ irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
}
/*
@@ -790,17 +802,16 @@ void irlap_initiate_connection_state(struct irlap_cb *self)
*/
void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
{
+ __u32 min_turn_time;
__u32 speed;
- __u32 usecs;
- __u32 bytes ;
/* Get QoS values. */
speed = qos->baud_rate.value;
- usecs = qos->min_turn_time.value;
+ min_turn_time = qos->min_turn_time.value;
/* No need to calculate XBOFs for speeds over 115200 bps */
if (speed > 115200) {
- self->mtt_required = usecs;
+ self->mtt_required = min_turn_time;
return;
}
@@ -809,9 +820,7 @@ void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
* min turn time, so now we must calculate how many chars (XBOF's) we
* must send for the requested time period (min turn time)
*/
- bytes = speed * usecs / 10000000;
-
- self->xbofs_delay = bytes;
+ self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
}
/*
@@ -828,9 +837,12 @@ void irlap_flush_all_queues(struct irlap_cb *self)
ASSERT(self->magic == LAP_MAGIC, return;);
/* Free transmission queue */
- while ((skb = skb_dequeue(&self->tx_list)) != NULL)
+ while ((skb = skb_dequeue(&self->txq)) != NULL)
dev_kfree_skb(skb);
+ while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
+ dev_kfree_skb(skb);
+
/* Free sliding window buffered packets */
while ((skb = skb_dequeue(&self->wx_list)) != NULL)
dev_kfree_skb(skb);
@@ -872,7 +884,7 @@ void irlap_init_comp_qos_capabilities(struct irlap_cb *self)
ASSERT(self != NULL, return;);
ASSERT(self->magic == LAP_MAGIC, return;);
-
+
/*
* Find out which compressors we support. We do this be checking that
* the corresponding compressor for each bit set in the QoS bits has
@@ -888,17 +900,17 @@ void irlap_init_comp_qos_capabilities(struct irlap_cb *self)
IRDA_DEBUG(4, __FUNCTION__
"(), bit %d is set by defalt\n", 8-i);
comp = hashbin_find(irlap_compressors,
- compression[msb_index(mask)],
+ compressions[msb_index(mask)],
NULL);
if (!comp) {
/* Protocol not supported, so clear the bit */
IRDA_DEBUG(4, __FUNCTION__ "(), Compression "
- "protocol %d has not been loaded!\n",
- compression[msb_index(mask)]);
+ "protocol %d has not been loaded!\n",
+ compressions[msb_index(mask)]);
self->qos_rx.compression.bits &= ~mask;
IRDA_DEBUG(4, __FUNCTION__
- "(), comp bits 0x%02x\n",
- self->qos_rx.compression.bits);
+ "(), comp bits 0x%02x\n",
+ self->qos_rx.compression.bits);
}
}
/* Try the next bit */
@@ -955,17 +967,11 @@ void irlap_init_qos_capabilities(struct irlap_cb *self,
#endif
}
- /*
- * Make the intersection between IrLAP and drivers QoS
- * capabilities
- */
-
/* Use 500ms in IrLAP for now */
- self->qos_rx.max_turn_time.bits &= 0x03;
self->qos_rx.max_turn_time.bits &= 0x01;
/* Set data size */
- /* self->qos_rx.data_size.bits &= 0x03; */
+ /*self->qos_rx.data_size.bits &= 0x03;*/
/* Set disconnect time */
self->qos_rx.link_disc_time.bits &= 0x07;
@@ -988,21 +994,47 @@ void irlap_apply_default_connection_parameters(struct irlap_cb *self)
irlap_change_speed(self, 9600, TRUE);
+ /* Set mbusy when going to NDM state */
+ irda_device_set_media_busy(self->netdev, TRUE);
+
/* Default value in NDM */
self->bofs_count = 11;
- /* Use these until connection has been made */
+ /*
+ * Generate random connection address for this session, which must
+ * be 7 bits wide and different from 0x00 and 0xfe
+ */
+ while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
+ get_random_bytes(&self->caddr, sizeof(self->caddr));
+ self->caddr &= 0xfe;
+ }
+
+ /* Use default values until connection has been negitiated */
self->slot_timeout = sysctl_slot_timeout;
self->final_timeout = FINAL_TIMEOUT;
self->poll_timeout = POLL_TIMEOUT;
self->wd_timeout = WD_TIMEOUT;
+ /* Set some default values */
+ self->qos_tx.baud_rate.value = 9600;
+ self->qos_rx.baud_rate.value = 9600;
+ self->qos_tx.max_turn_time.value = 0;
+ self->qos_rx.max_turn_time.value = 0;
+ self->qos_tx.min_turn_time.value = 0;
+ self->qos_rx.min_turn_time.value = 0;
self->qos_tx.data_size.value = 64;
+ self->qos_rx.data_size.value = 64;
+ self->qos_tx.window_size.value = 1;
+ self->qos_rx.window_size.value = 1;
self->qos_tx.additional_bofs.value = 11;
+ self->qos_rx.additional_bofs.value = 11;
+ self->qos_tx.link_disc_time.value = 0;
+ self->qos_rx.link_disc_time.value = 0;
irlap_flush_all_queues(self);
self->disconnect_pending = FALSE;
+ self->connect_pending = FALSE;
}
/*
@@ -1011,56 +1043,62 @@ void irlap_apply_default_connection_parameters(struct irlap_cb *self)
* Initialize IrLAP with the negotiated QoS values
*
*/
-void irlap_apply_connection_parameters(struct irlap_cb *self,
- struct qos_info *qos)
+void irlap_apply_connection_parameters(struct irlap_cb *self)
{
IRDA_DEBUG(4, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == LAP_MAGIC, return;);
- irlap_change_speed(self, qos->baud_rate.value, FALSE);
+ irlap_change_speed(self, self->qos_tx.baud_rate.value, FALSE);
- self->window_size = qos->window_size.value;
- self->window = qos->window_size.value;
- self->bofs_count = qos->additional_bofs.value;
+ self->window_size = self->qos_tx.window_size.value;
+ self->window = self->qos_tx.window_size.value;
+ self->bofs_count = self->qos_tx.additional_bofs.value;
/*
* Calculate how many bytes it is possible to transmit before the
- * link must be turned around wb = baud * mtt/1000 * 1/2
+ * link must be turned around
*/
- self->window_bytes = qos->baud_rate.value
- * qos->max_turn_time.value / 10000;
- IRDA_DEBUG(4, "Setting window_bytes = %d\n", self->window_bytes);
-
+ self->line_capacity =
+ irlap_max_line_capacity(self->qos_tx.baud_rate.value,
+ self->qos_tx.max_turn_time.value);
/*
* Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
* 3 seconds otherwise. See page 71 in IrLAP for more details.
* TODO: these values should be calculated from the final timer
* as well
*/
- if (qos->link_disc_time.value == 3)
+ ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
+ if (self->qos_tx.link_disc_time.value == 3)
self->N1 = 0;
else
- self->N1 = 3000 / qos->max_turn_time.value;
+ self->N1 = 3000 / self->qos_tx.max_turn_time.value;
IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
- self->N2 = qos->link_disc_time.value * 1000 / qos->max_turn_time.value;
+
+ self->N2 = self->qos_tx.link_disc_time.value * 1000 /
+ self->qos_tx.max_turn_time.value;
IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
/*
* Initialize timeout values, some of the rules are listed on
* page 92 in IrLAP.
*/
- self->poll_timeout = qos->max_turn_time.value * HZ / 1000;
- self->final_timeout = qos->max_turn_time.value * HZ / 1000;
+ self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
self->wd_timeout = self->poll_timeout * 2;
+ /*
+ * Be careful to keep our promises to the peer device about how long
+ * time it can keep the pf bit. So here we must use the rx_qos value
+ */
+ self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
+
#ifdef CONFIG_IRDA_COMPRESSION
- if (qos->compression.value) {
+ if (self->qos_tx.compression.value) {
IRDA_DEBUG(1, __FUNCTION__ "(), Initializing compression\n");
- irda_set_compression(self, qos->compression.value);
+ irda_set_compression(self, self->qos_tx.compression.value);
irlap_compressor_init(self, 0);
}
@@ -1087,7 +1125,7 @@ void irlap_set_local_busy(struct irlap_cb *self, int status)
#ifdef CONFIG_PROC_FS
/*
- * Function irlap_proc_read (buf, start, offset, len)
+ * Function irlap_proc_read (buf, start, offset, len, unused)
*
* Give some info to the /proc file system
*
@@ -1110,24 +1148,28 @@ int irlap_proc_read(char *buf, char **start, off_t offset, int len)
len += sprintf(buf+len, "irlap%d ", i++);
len += sprintf(buf+len, "state: %s\n",
- irlap_state[ self->state]);
+ irlap_state[self->state]);
len += sprintf(buf+len, " caddr: %#02x, ", self->caddr);
len += sprintf(buf+len, "saddr: %#08x, ", self->saddr);
len += sprintf(buf+len, "daddr: %#08x\n", self->daddr);
len += sprintf(buf+len, " win size: %d, ",
- self->window_size);
+ self->window_size);
len += sprintf(buf+len, "win: %d, ", self->window);
- len += sprintf(buf+len, "win bytes: %d, ", self->window_bytes);
+#if CONFIG_IRDA_DYNAMIC_WINDOW
+ len += sprintf(buf+len, "line capacity: %d, ",
+ self->line_capacity);
len += sprintf(buf+len, "bytes left: %d\n", self->bytes_left);
-
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
len += sprintf(buf+len, " tx queue len: %d ",
- skb_queue_len(&self->tx_list));
+ skb_queue_len(&self->txq));
len += sprintf(buf+len, "win queue len: %d ",
- skb_queue_len(&self->wx_list));
- len += sprintf(buf+len, "rbusy: %s\n", self->remote_busy ?
- "TRUE" : "FALSE");
+ skb_queue_len(&self->wx_list));
+ len += sprintf(buf+len, "rbusy: %s", self->remote_busy ?
+ "TRUE" : "FALSE");
+ len += sprintf(buf+len, " mbusy: %s\n", self->media_busy ?
+ "TRUE" : "FALSE");
len += sprintf(buf+len, " retrans: %d ", self->retry_count);
len += sprintf(buf+len, "vs: %d ", self->vs);
@@ -1137,42 +1179,42 @@ int irlap_proc_read(char *buf, char **start, off_t offset, int len)
len += sprintf(buf+len, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
len += sprintf(buf+len, " tx\t%d\t",
- self->qos_tx.baud_rate.value);
+ self->qos_tx.baud_rate.value);
len += sprintf(buf+len, "%d\t",
- self->qos_tx.max_turn_time.value);
+ self->qos_tx.max_turn_time.value);
len += sprintf(buf+len, "%d\t",
- self->qos_tx.data_size.value);
+ self->qos_tx.data_size.value);
len += sprintf(buf+len, "%d\t",
- self->qos_tx.window_size.value);
+ self->qos_tx.window_size.value);
len += sprintf(buf+len, "%d\t",
- self->qos_tx.additional_bofs.value);
+ self->qos_tx.additional_bofs.value);
len += sprintf(buf+len, "%d\t",
- self->qos_tx.min_turn_time.value);
+ self->qos_tx.min_turn_time.value);
len += sprintf(buf+len, "%d\t",
- self->qos_tx.link_disc_time.value);
+ self->qos_tx.link_disc_time.value);
#ifdef CONFIG_IRDA_COMPRESSION
len += sprintf(buf+len, "%d",
- self->qos_tx.compression.value);
+ self->qos_tx.compression.value);
#endif
len += sprintf(buf+len, "\n");
len += sprintf(buf+len, " rx\t%d\t",
- self->qos_rx.baud_rate.value);
+ self->qos_rx.baud_rate.value);
len += sprintf(buf+len, "%d\t",
- self->qos_rx.max_turn_time.value);
+ self->qos_rx.max_turn_time.value);
len += sprintf(buf+len, "%d\t",
- self->qos_rx.data_size.value);
+ self->qos_rx.data_size.value);
len += sprintf(buf+len, "%d\t",
- self->qos_rx.window_size.value);
+ self->qos_rx.window_size.value);
len += sprintf(buf+len, "%d\t",
- self->qos_rx.additional_bofs.value);
+ self->qos_rx.additional_bofs.value);
len += sprintf(buf+len, "%d\t",
- self->qos_rx.min_turn_time.value);
+ self->qos_rx.min_turn_time.value);
len += sprintf(buf+len, "%d\t",
- self->qos_rx.link_disc_time.value);
+ self->qos_rx.link_disc_time.value);
#ifdef CONFIG_IRDA_COMPRESSION
len += sprintf(buf+len, "%d",
- self->qos_rx.compression.value);
+ self->qos_rx.compression.value);
#endif
len += sprintf(buf+len, "\n");
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index 7ddadc761..08501162e 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irlap_event.c
- * Version: 0.8
+ * Version: 0.9
* Description: IrLAP state machine implementation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Aug 16 00:59:29 1997
- * Modified at: Tue Nov 16 12:33:41 1999
+ * Modified at: Sat Dec 25 21:07:57 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
@@ -85,6 +85,7 @@ static const char *irlap_event[] = {
"RESET_REQUEST",
"RESET_RESPONSE",
"SEND_I_CMD",
+ "SEND_UI_FRAME",
"RECV_DISCOVERY_XID_CMD",
"RECV_DISCOVERY_XID_RSP",
"RECV_SNRM_CMD",
@@ -92,14 +93,20 @@ static const char *irlap_event[] = {
"RECV_TEST_RSP",
"RECV_UA_RSP",
"RECV_DM_RSP",
+ "RECV_RD_RSP",
"RECV_I_CMD",
"RECV_I_RSP",
"RECV_UI_FRAME",
"RECV_FRMR_RSP",
"RECV_RR_CMD",
"RECV_RR_RSP",
- "RECV_RNR_FRAME",
- "RECV_DISC_FRAME",
+ "RECV_RNR_CMD",
+ "RECV_RNR_RSP",
+ "RECV_REJ_CMD",
+ "RECV_REJ_RSP",
+ "RECV_SREJ_CMD",
+ "RECV_SREJ_RSP",
+ "RECV_DISC_CMD",
"SLOT_TIMER_EXPIRED",
"QUERY_TIMER_EXPIRED",
"FINAL_TIMER_EXPIRED",
@@ -173,7 +180,7 @@ void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
* Send out the RR frames faster if our own transmit queue is empty, or
* if the peer is busy. The effect is a much faster conversation
*/
- if ((skb_queue_len(&self->tx_list) == 0) || (self->remote_busy)) {
+ if ((skb_queue_len(&self->txq) == 0) || (self->remote_busy)) {
if (self->fast_RR == TRUE) {
/*
* Assert that the fast poll timer has not reached the
@@ -200,8 +207,9 @@ void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
} else
self->fast_RR = FALSE;
- IRDA_DEBUG(4, __FUNCTION__ "(), Timeout=%d\n", timeout);
-#endif
+ IRDA_DEBUG(3, __FUNCTION__ "(), timeout=%d (%ld)\n", timeout, jiffies);
+#endif /* CONFIG_IRDA_FAST_RR */
+
if (timeout == 0)
irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
else
@@ -223,11 +231,11 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
if (!self || self->magic != LAP_MAGIC)
return;
- IRDA_DEBUG(4, __FUNCTION__ "(), event = %s, state = %s\n",
- irlap_event[event], irlap_state[self->state]);
+ IRDA_DEBUG(3, __FUNCTION__ "(), event = %s, state = %s\n",
+ irlap_event[event], irlap_state[self->state]);
ret = (*state[self->state])(self, event, skb, info);
-
+
/*
* Check if there are any pending events that needs to be executed
*/
@@ -239,9 +247,9 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
* try to disconnect link if we send any data frames, since
* that will change the state away form XMIT
*/
- if (skb_queue_len(&self->tx_list)) {
+ if (skb_queue_len(&self->txq)) {
/* Try to send away all queued data frames */
- while ((skb = skb_dequeue(&self->tx_list)) != NULL) {
+ while ((skb = skb_dequeue(&self->txq)) != NULL) {
ret = (*state[self->state])(self, SEND_I_CMD,
skb, NULL);
if (ret == -EPROTO)
@@ -282,10 +290,11 @@ void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
if (!self || self->magic != LAP_MAGIC)
return;
- IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[ state]);
+ IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]);
self->state = state;
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
/*
* If we are swithing away from a XMIT state then we are allowed to
* transmit a maximum number of bytes again when we enter the XMIT
@@ -293,7 +302,8 @@ void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
* we cannot do this when swithing into the XMIT state :-)
*/
if ((state != LAP_XMIT_P) && (state != LAP_XMIT_S))
- self->bytes_left = self->window_bytes;
+ self->bytes_left = self->line_capacity;
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
}
/*
@@ -307,7 +317,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
{
discovery_t *discovery_rsp;
int ret = 0;
-
+ int i;
+
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -317,7 +328,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
if (self->media_busy) {
IRDA_DEBUG(0, __FUNCTION__
- "(), CONNECT_REQUEST: media busy!\n");
+ "(), CONNECT_REQUEST: media busy!\n");
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
@@ -343,9 +354,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
irlap_connect_indication(self, skb);
} else {
- IRDA_DEBUG(0, __FUNCTION__
- "(), SNRM frame does not contain"
- " and I field!\n");
+ IRDA_DEBUG(0, __FUNCTION__ "(), SNRM frame does not "
+ "contain an I field!\n");
dev_kfree_skb(skb);
}
break;
@@ -355,10 +365,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
if (self->media_busy) {
IRDA_DEBUG(0, __FUNCTION__ "(), media busy!\n");
/* irlap->log.condition = MEDIA_BUSY; */
-
- /* Always switch state before calling upper layers */
- irlap_next_state(self, LAP_NDM);
-
+
/* This will make IrLMP try again */
irlap_discovery_confirm(self, NULL);
return 0;
@@ -405,6 +412,32 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
}
dev_kfree_skb(skb);
break;
+#ifdef CONFIG_IRDA_ULTRA
+ case SEND_UI_FRAME:
+ /* Only allowed to repeat an operation twice */
+ for (i=0; ((i<2) && (self->media_busy == FALSE)); i++) {
+ skb = skb_dequeue(&self->txq_ultra);
+ if (skb)
+ irlap_send_ui_frame(self, skb, CBROADCAST,
+ CMD_FRAME);
+ else
+ break;
+ }
+ if (i == 2) {
+ /* Force us to listen 500 ms again */
+ irda_device_set_media_busy(self->netdev, TRUE);
+ }
+ break;
+ case RECV_UI_FRAME:
+ /* Only accept broadcast frames in NDM mode */
+ if (info->caddr != CBROADCAST) {
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), not a broadcast frame!\n");
+ dev_kfree_skb(skb);
+ } else
+ irlap_unitdata_indication(self, skb);
+ break;
+#endif /* CONFIG_IRDA_ULTRA */
case RECV_TEST_CMD:
/* Remove test frame header */
skb_pull(skb, sizeof(struct test_frame));
@@ -413,7 +446,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
* Send response. This skb will not be sent out again, and
* will only be used to send out the same info as the cmd
*/
- irlap_send_test_frame(self, info->daddr, skb);
+ irlap_send_test_frame(self, CBROADCAST, info->daddr, skb);
dev_kfree_skb(skb);
break;
case RECV_TEST_RSP:
@@ -422,8 +455,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
break;
default:
IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %s\n",
- irlap_event[event]);
-
+ irlap_event[event]);
+
if (skb)
dev_kfree_skb(skb);
@@ -447,28 +480,29 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == LAP_MAGIC, return -1;);
- switch(event) {
+ switch (event) {
case RECV_DISCOVERY_XID_RSP:
ASSERT(info != NULL, return -1;);
ASSERT(info->discovery != NULL, return -1;);
IRDA_DEBUG(4, __FUNCTION__ "(), daddr=%08x\n",
- info->discovery->daddr);
+ info->discovery->daddr);
if (!self->discovery_log) {
WARNING(__FUNCTION__ "(), discovery log is gone! "
"maybe the discovery timeout has been set to "
"short?\n");
+ dev_kfree_skb(skb);
break;
}
hashbin_insert(self->discovery_log,
(queue_t *) info->discovery,
info->discovery->daddr, NULL);
- dev_kfree_skb(skb);
-
/* Keep state */
- irlap_next_state(self, LAP_QUERY);
+ /* irlap_next_state(self, LAP_QUERY); */
+
+ dev_kfree_skb(skb);
break;
case SLOT_TIMER_EXPIRED:
if (self->s < self->S) {
@@ -500,8 +534,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
}
break;
default:
- IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %d, %s\n", event,
- irlap_event[event]);
+ IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %s\n",
+ irlap_event[event]);
if (skb)
dev_kfree_skb(skb);
@@ -565,10 +599,10 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
break;
default:
IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, %s\n", event,
- irlap_event[event]);
+ irlap_event[event]);
- if ( skb)
- dev_kfree_skb( skb);
+ if (skb)
+ dev_kfree_skb(skb);
ret = -1;
break;
@@ -595,7 +629,6 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
switch (event) {
case CONNECT_RESPONSE:
- /* skb_pull(skb, 11); */
skb_pull(skb, sizeof(struct snrm_frame));
ASSERT(self->netdev != NULL, return -1;);
@@ -616,7 +649,7 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
* Applying the parameters now will make sure we change speed
* after we have sent the next frame
*/
- irlap_apply_connection_parameters(self, &self->qos_tx);
+ irlap_apply_connection_parameters(self);
/*
* Sending this frame will force a speed change after it has
@@ -631,10 +664,15 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
*/
irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state(self, LAP_NRM_S);
+
+ dev_kfree_skb(skb);
break;
case RECV_DISCOVERY_XID_CMD:
- IRDA_DEBUG(3, __FUNCTION__ "(), event RECV_DISCOVER_XID_CMD!\n");
+ IRDA_DEBUG(3, __FUNCTION__
+ "(), event RECV_DISCOVER_XID_CMD!\n");
irlap_next_state(self, LAP_NDM);
+
+ dev_kfree_skb(skb);
break;
case DISCONNECT_REQUEST:
irlap_send_dm_frame(self);
@@ -671,7 +709,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == LAP_MAGIC, return -1;);
- switch(event) {
+ switch (event) {
case FINAL_TIMER_EXPIRED:
if (self->retry_count < self->N3) {
/*
@@ -694,7 +732,6 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
irlap_start_final_timer(self, self->final_timeout);
self->retry_count++;
break;
-
case RECV_SNRM_CMD:
IRDA_DEBUG(4, __FUNCTION__ "(), SNRM battle!\n");
@@ -716,7 +753,9 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
irlap_qos_negotiate(self, skb);
irlap_send_ua_response_frame(self, &self->qos_rx);
- irlap_apply_connection_parameters(self, &self->qos_tx);
+ irlap_apply_connection_parameters(self);
+
+ irlap_next_state(self, LAP_NRM_S);
irlap_connect_confirm(self, skb);
/*
@@ -725,8 +764,6 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
* to use twice the value (note 3 IrLAP p. 60).
*/
irlap_start_wd_timer(self, self->wd_timeout);
-
- irlap_next_state(self, LAP_NRM_S);
} else {
/* We just ignore the other device! */
dev_kfree_skb(skb);
@@ -749,7 +786,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
irlap_qos_negotiate(self, skb);
- irlap_apply_connection_parameters(self, &self->qos_tx);
+ irlap_apply_connection_parameters(self);
self->retry_count = 0;
/* This frame will actually force the speed change */
@@ -760,22 +797,20 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
irlap_connect_confirm(self, skb);
break;
-
- case RECV_DISC_FRAME:
+ case RECV_DM_RSP: /* FALLTHROUGH */
+ case RECV_DISC_CMD:
del_timer(&self->final_timer);
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ dev_kfree_skb(skb);
break;
-
- /* DM handled in irlap_frame.c, irlap_driver_rcv() */
default:
- IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, %s\n", event,
- irlap_event[event]);
-
+ IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, %s\n", event,
+ irlap_event[event]);
if (skb)
dev_kfree_skb(skb);
-
+
ret = -1;
break;
}
@@ -815,16 +850,17 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
* Only send frame if send-window > 0.
*/
if ((self->window > 0) && (!self->remote_busy)) {
-
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
/*
* Test if we have transmitted more bytes over the
* link than its possible to do with the current
* speed and turn-around-time.
*/
- if ((skb->len+self->bofs_count) > self->bytes_left) {
- IRDA_DEBUG(4, __FUNCTION__ "(), Not allowed to "
- "transmit more bytes!\n");
- skb_queue_head(&self->tx_list, skb);
+ if (skb->len > self->bytes_left) {
+ IRDA_DEBUG(4, __FUNCTION__
+ "(), Not allowed to transmit more "
+ "bytes!\n");
+ skb_queue_head(&self->txq, skb);
/*
* We should switch state to LAP_NRM_P, but
@@ -836,14 +872,14 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
*/
return -EPROTO;
}
- self->bytes_left -= (skb->len + self->bofs_count);
-
+ self->bytes_left -= skb->len;
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
/*
* Send data with poll bit cleared only if window > 1
* and there is more frames after this one to be sent
*/
if ((self->window > 1) &&
- skb_queue_len( &self->tx_list) > 0)
+ skb_queue_len( &self->txq) > 0)
{
irlap_send_data_primary(self, skb);
irlap_next_state(self, LAP_XMIT_P);
@@ -860,11 +896,11 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
#ifdef CONFIG_IRDA_FAST_RR
/* Peer may want to reply immediately */
self->fast_RR = FALSE;
-#endif
+#endif /* CONFIG_IRDA_FAST_RR */
} else {
IRDA_DEBUG(4, __FUNCTION__
- "(), Unable to send! remote busy?\n");
- skb_queue_head(&self->tx_list, skb);
+ "(), Unable to send! remote busy?\n");
+ skb_queue_head(&self->txq, skb);
/*
* The next ret is important, because it tells
@@ -874,6 +910,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
}
break;
case POLL_TIMER_EXPIRED:
+ IRDA_DEBUG(3, __FUNCTION__ "(), POLL_TIMER_EXPIRED (%ld)\n",
+ jiffies);
irlap_send_rr_frame(self, CMD_FRAME);
irlap_start_final_timer(self, self->final_timeout);
irlap_next_state(self, LAP_NRM_P);
@@ -889,7 +927,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
break;
default:
IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n",
- irlap_event[event]);
+ irlap_event[event]);
if (skb)
dev_kfree_skb(skb);
@@ -916,16 +954,17 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event,
ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
- case RECV_UA_RSP:
+ case RECV_UA_RSP: /* FALLTHROUGH */
+ case RECV_DM_RSP:
del_timer(&self->final_timer);
irlap_apply_default_connection_parameters(self);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
-
+
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
-
+ dev_kfree_skb(skb);
break;
case FINAL_TIMER_EXPIRED:
if (self->retry_count < self->N3) {
@@ -937,9 +976,7 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event,
} else {
irlap_apply_default_connection_parameters(self);
- /*
- * Always switch state before calling upper layers
- */
+ /* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_NO_RESPONSE);
@@ -983,7 +1020,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
* to transmitt
*/
self->fast_RR = FALSE;
-#endif
+#endif /* CONFIG_IRDA_FAST_RR */
ASSERT( info != NULL, return -1;);
ns_status = irlap_validate_ns_received(self, info->ns);
@@ -993,9 +1030,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
* Check for expected I(nformation) frame
*/
if ((ns_status == NS_EXPECTED) && (nr_status == NR_EXPECTED)) {
- /*
- * poll bit cleared?
- */
+ /* poll bit cleared? */
if (!info->pf) {
self->vr = (self->vr + 1) % 8;
@@ -1007,7 +1042,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_P);
- irlap_data_indication(self, skb);
+ irlap_data_indication(self, skb, FALSE);
} else {
del_timer(&self->final_timer);
@@ -1031,7 +1066,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
*/
irlap_next_state(self, LAP_XMIT_P);
- irlap_data_indication(self, skb);
+ irlap_data_indication(self, skb, FALSE);
/* This is the last frame */
irlap_start_poll_timer(self, self->poll_timeout);
@@ -1039,9 +1074,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
break;
}
- /*
- * Unexpected next to send (Ns)
- */
+ /* Unexpected next to send (Ns) */
if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED))
{
if (!info->pf) {
@@ -1055,7 +1088,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
/* Keep state */
irlap_next_state(self, LAP_NRM_P);
} else {
- IRDA_DEBUG( 4, __FUNCTION__
+ IRDA_DEBUG(4, __FUNCTION__
"(), missing or duplicate frame!\n");
/* Update Nr received */
@@ -1092,7 +1125,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_P);
- irlap_data_indication(self, skb);
+ irlap_data_indication(self, skb, FALSE);
} else {
/*
* Do not resend frames until the last
@@ -1110,7 +1143,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
/* Keep state, do not move this line!*/
irlap_next_state(self, LAP_NRM_P);
- irlap_data_indication(self, skb);
+ irlap_data_indication(self, skb, FALSE);
}
break;
}
@@ -1121,7 +1154,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
if ((ns_status == NS_UNEXPECTED) &&
(nr_status == NR_UNEXPECTED))
{
- IRDA_DEBUG( 4, "IrLAP: unexpected nr and ns!\n");
+ IRDA_DEBUG(4, __FUNCTION__
+ "(), unexpected nr and ns!\n");
if (info->pf) {
/* Resend rejected frames */
irlap_resend_rejected_frames(self, CMD_FRAME);
@@ -1137,6 +1171,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
self->ack_required = FALSE;
}
+ dev_kfree_skb(skb);
break;
}
@@ -1148,7 +1183,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
del_timer(&self->final_timer);
irlap_next_state(self, LAP_RESET_WAIT);
-
+
irlap_disconnect_indication(self, LAP_RESET_INDICATION);
self->xmitflag = TRUE;
} else {
@@ -1158,6 +1193,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
self->xmitflag = FALSE;
}
+ dev_kfree_skb(skb);
break;
}
IRDA_DEBUG(1, __FUNCTION__ "(), Not implemented!\n");
@@ -1166,13 +1202,13 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
irlap_event[ event], ns_status, nr_status);
break;
case RECV_UI_FRAME:
- /* poll bit cleared? */
+ /* Poll bit cleared? */
if (!info->pf) {
- irlap_unit_data_indication(self, skb);
+ irlap_data_indication(self, skb, TRUE);
irlap_next_state(self, LAP_NRM_P);
} else {
del_timer(&self->final_timer);
- irlap_unit_data_indication(self, skb);
+ irlap_data_indication(self, skb, TRUE);
irlap_start_poll_timer(self, self->poll_timeout);
}
break;
@@ -1226,7 +1262,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state(self, LAP_NRM_P);
} else if (ret == NR_INVALID) {
IRDA_DEBUG(1, __FUNCTION__ "(), Received RR with "
- "invalid nr !\n");
+ "invalid nr !\n");
del_timer(&self->final_timer);
irlap_next_state(self, LAP_RESET_WAIT);
@@ -1234,15 +1270,9 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
irlap_disconnect_indication(self, LAP_RESET_INDICATION);
self->xmitflag = TRUE;
}
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
break;
- case RECV_RNR_FRAME:
- IRDA_DEBUG(4, "irlap_state_nrm_p: RECV_RNR_FRAME: Retrans:%d, "
- "nr=%d, va=%d, vs=%d, vr=%d\n",
- self->retry_count, info->nr, self->va, self->vs,
- self->vr);
-
+ case RECV_RNR_RSP:
ASSERT(info != NULL, return -1;);
/* Stop final timer */
@@ -1251,21 +1281,18 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
-
irlap_next_state(self, LAP_XMIT_P);
/* Start poll timer */
irlap_start_poll_timer(self, self->poll_timeout);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
break;
case RECV_FRMR_RSP:
del_timer(&self->final_timer);
self->xmitflag = TRUE;
irlap_next_state(self, LAP_RESET_WAIT);
irlap_reset_indication(self);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
break;
case FINAL_TIMER_EXPIRED:
/*
@@ -1274,8 +1301,10 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
* of receiving a frame (page 45, IrLAP). Check that
* we only do this once for each frame.
*/
- if (irda_device_is_receiving(self->netdev) && !self->add_wait) {
- IRDA_DEBUG(4, "FINAL_TIMER_EXPIRED when receiving a "
+ if (irda_device_is_receiving(self->netdev) &&
+ !self->add_wait)
+ {
+ IRDA_DEBUG(1, "FINAL_TIMER_EXPIRED when receiving a "
"frame! Waiting a little bit more!\n");
irlap_start_final_timer(self, MSECS_TO_JIFFIES(300));
@@ -1299,7 +1328,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
self->retry_count++;
IRDA_DEBUG(4, "irlap_state_nrm_p: FINAL_TIMER_EXPIRED:"
- " retry_count=%d\n", self->retry_count);
+ " retry_count=%d\n", self->retry_count);
/* Keep state */
} else if (self->retry_count == self->N1) {
irlap_status_indication(STATUS_NO_ACTIVITY);
@@ -1310,7 +1339,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
self->retry_count++;
IRDA_DEBUG(4, "retry count = N1; retry_count=%d\n",
- self->retry_count);
+ self->retry_count);
/* Keep state */
} else if (self->retry_count >= self->N2) {
irlap_apply_default_connection_parameters(self);
@@ -1320,30 +1349,38 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
irlap_disconnect_indication(self, LAP_NO_RESPONSE);
}
break;
- case RECV_DISC_FRAME: /* FIXME: Check how this is in the standard! */
- IRDA_DEBUG(1, __FUNCTION__ "(), RECV_DISC_FRAME()\n");
-
- /* Always switch state before calling upper layers */
- irlap_next_state(self, LAP_NDM);
-
- irlap_wait_min_turn_around(self, &self->qos_tx);
- irlap_send_ua_response_frame(self, NULL);
-
- del_timer(&self->final_timer);
- /* del_timer( &self->poll_timer); */
+ case RECV_REJ_RSP:
+ irlap_update_nr_received(self, info->nr);
+ if (self->remote_busy) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ } else
+ irlap_resend_rejected_frames(self, CMD_FRAME);
+ irlap_start_final_timer(self, self->final_timeout);
+ dev_kfree_skb(skb);
+ break;
+ case RECV_SREJ_RSP:
+ irlap_update_nr_received(self, info->nr);
+ if (self->remote_busy) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ } else
+ irlap_resend_rejected_frame(self, CMD_FRAME);
+ irlap_start_final_timer(self, self->final_timeout);
+ dev_kfree_skb(skb);
+ break;
+ case RECV_RD_RSP:
+ IRDA_DEBUG(0, __FUNCTION__ "(), RECV_RD_RSP\n");
+ irlap_next_state(self, LAP_PCLOSE);
+ irlap_send_disc_frame(self);
irlap_flush_all_queues(self);
- irlap_apply_default_connection_parameters(self);
-
- irlap_disconnect_indication(self, LAP_DISC_INDICATION);
- if (skb)
- dev_kfree_skb(skb);
-
+ irlap_start_final_timer(self, self->final_timeout);
+ self->retry_count = 0;
break;
default:
IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %s\n",
- irlap_event[event]);
-
+ irlap_event[event]);
if (skb)
dev_kfree_skb(skb);
@@ -1392,7 +1429,7 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
break;
default:
IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %s\n",
- irlap_event[event]);
+ irlap_event[event]);
if (skb)
dev_kfree_skb(skb);
@@ -1420,7 +1457,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
- case RECV_DISC_FRAME:
+ case RECV_DISC_CMD:
del_timer(&self->final_timer);
irlap_apply_default_connection_parameters(self);
@@ -1429,6 +1466,8 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state(self, LAP_NDM);
irlap_disconnect_indication(self, LAP_NO_RESPONSE);
+
+ dev_kfree_skb(skb);
break;
case RECV_UA_RSP:
del_timer(&self->final_timer);
@@ -1443,6 +1482,8 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state(self, LAP_XMIT_P);
irlap_start_poll_timer(self, self->poll_timeout);
+
+ dev_kfree_skb(skb);
break;
case FINAL_TIMER_EXPIRED:
if (self->retry_count < 3) {
@@ -1478,15 +1519,14 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state(self, LAP_NDM);
} else {
- IRDA_DEBUG(0, __FUNCTION__ "(), SNRM frame contained an I "
- "field!\n");
- dev_kfree_skb(skb);
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), SNRM frame contained an I field!\n");
}
+ dev_kfree_skb(skb);
break;
default:
IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %s\n",
- irlap_event[event]);
-
+ irlap_event[event]);
if (skb)
dev_kfree_skb(skb);
@@ -1508,7 +1548,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(4, __FUNCTION__ "(), event=%s\n", irlap_event[ event]);
+ IRDA_DEBUG(4, __FUNCTION__ "(), event=%s\n", irlap_event[event]);
ASSERT(self != NULL, return -ENODEV;);
ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
@@ -1519,13 +1559,14 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
* Send frame only if send window > 1
*/
if ((self->window > 0) && (!self->remote_busy)) {
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
/*
* Test if we have transmitted more bytes over the
* link than its possible to do with the current
* speed and turn-around-time.
*/
- if ((skb->len+self->bofs_count) > self->bytes_left) {
- skb_queue_head(&self->tx_list, skb);
+ if (skb->len > self->bytes_left) {
+ skb_queue_head(&self->txq, skb);
/*
* Switch to NRM_S, this is only possible
* when we are in secondary mode, since we
@@ -1536,14 +1577,14 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
return -EPROTO; /* Try again later */
}
- self->bytes_left -= (skb->len + self->bofs_count);
-
+ self->bytes_left -= skb->len;
+#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
/*
* Send data with final bit cleared only if window > 1
* and there is more frames to be sent
*/
if ((self->window > 1) &&
- skb_queue_len(&self->tx_list) > 0)
+ skb_queue_len(&self->txq) > 0)
{
irlap_send_data_secondary(self, skb);
irlap_next_state(self, LAP_XMIT_S);
@@ -1558,14 +1599,20 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
ret = -EPROTO;
}
} else {
- IRDA_DEBUG(1, __FUNCTION__ "(), Unable to send!\n");
- skb_queue_head( &self->tx_list, skb);
+ IRDA_DEBUG(2, __FUNCTION__ "(), Unable to send!\n");
+ skb_queue_head(&self->txq, skb);
ret = -EPROTO;
}
break;
+ case DISCONNECT_REQUEST:
+ irlap_send_rd_frame(self);
+ irlap_flush_all_queues(self);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ irlap_next_state(self, LAP_SCLOSE);
+ break;
default:
- IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %s\n",
- irlap_event[event]);
+ IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %s\n",
+ irlap_event[event]);
if (skb)
dev_kfree_skb(skb);
@@ -1598,8 +1645,8 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
case RECV_I_CMD: /* Optimize for the common case */
/* FIXME: must check for remote_busy below */
IRDA_DEBUG(4, __FUNCTION__ "(), event=%s nr=%d, vs=%d, ns=%d, "
- "vr=%d, pf=%d\n", irlap_event[event], info->nr,
- self->vs, info->ns, self->vr, info->pf);
+ "vr=%d, pf=%d\n", irlap_event[event], info->nr,
+ self->vs, info->ns, self->vr, info->pf);
self->retry_count = 0;
@@ -1624,18 +1671,19 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
* Starting WD-timer here is optional, but
* not recommended. Note 6 IrLAP p. 83
*/
- /* irda_start_timer(WD_TIMER, self->wd_timeout); */
-
+#if 0
+ irda_start_timer(WD_TIMER, self->wd_timeout);
+#endif
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_S);
- irlap_data_indication( self, skb);
+ irlap_data_indication(self, skb, FALSE);
break;
} else {
self->vr = (self->vr + 1) % 8;
/* Update Nr received */
- irlap_update_nr_received( self, info->nr);
+ irlap_update_nr_received(self, info->nr);
/*
* We should wait before sending RR, and
@@ -1650,10 +1698,10 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
* we decide if we should send a RR frame
* or not
*/
- irlap_data_indication(self, skb);
+ irlap_data_indication(self, skb, FALSE);
/* Any pending data requests? */
- if ((skb_queue_len(&self->tx_list) > 0) &&
+ if ((skb_queue_len(&self->txq) > 0) &&
(self->window > 0))
{
self->ack_required = TRUE;
@@ -1714,7 +1762,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_S);
- irlap_data_indication(self, skb);
+ irlap_data_indication(self, skb, FALSE);
irlap_start_wd_timer(self, self->wd_timeout);
break;
}
@@ -1731,16 +1779,19 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
/* Keep state, do not move this line */
irlap_next_state(self, LAP_NRM_S);
- irlap_data_indication(self, skb);
+ irlap_data_indication(self, skb, FALSE);
irlap_start_wd_timer(self, self->wd_timeout);
}
+ break;
}
if (ret == NR_INVALID) {
IRDA_DEBUG(0, "NRM_S, NR_INVALID not implemented!\n");
+ dev_kfree_skb(skb);
}
if (ret == NS_INVALID) {
IRDA_DEBUG(0, "NRM_S, NS_INVALID not implemented!\n");
+ dev_kfree_skb(skb);
}
break;
case RECV_UI_FRAME:
@@ -1748,22 +1799,22 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
* poll bit cleared?
*/
if (!info->pf) {
- irlap_unit_data_indication(self, skb);
+ irlap_data_indication(self, skb, TRUE);
irlap_next_state(self, LAP_NRM_S); /* Keep state */
} else {
/*
* Any pending data requests?
*/
- if ((skb_queue_len(&self->tx_list) > 0) &&
+ if ((skb_queue_len(&self->txq) > 0) &&
(self->window > 0) && !self->remote_busy)
{
- irlap_unit_data_indication(self, skb);
+ irlap_data_indication(self, skb, TRUE);
del_timer(&self->wd_timer);
irlap_next_state(self, LAP_XMIT_S);
} else {
- irlap_unit_data_indication(self, skb);
+ irlap_data_indication(self, skb, TRUE);
irlap_wait_min_turn_around(self, &self->qos_tx);
@@ -1785,7 +1836,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
*/
nr_status = irlap_validate_nr_received(self, info->nr);
if (nr_status == NR_EXPECTED) {
- if ((skb_queue_len( &self->tx_list) > 0) &&
+ if ((skb_queue_len( &self->txq) > 0) &&
(self->window > 0)) {
self->remote_busy = FALSE;
@@ -1809,18 +1860,17 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
} else if (nr_status == NR_UNEXPECTED) {
self->remote_busy = FALSE;
irlap_update_nr_received(self, info->nr);
- irlap_resend_rejected_frames( self, RSP_FRAME);
+ irlap_resend_rejected_frames(self, RSP_FRAME);
irlap_start_wd_timer(self, self->wd_timeout);
/* Keep state */
irlap_next_state(self, LAP_NRM_S);
} else {
- IRDA_DEBUG(1, __FUNCTION__ "(), invalid nr not implemented!\n");
+ IRDA_DEBUG(1, __FUNCTION__
+ "(), invalid nr not implemented!\n");
}
- if (skb)
- dev_kfree_skb(skb);
-
+ dev_kfree_skb(skb);
break;
case RECV_SNRM_CMD:
/* SNRM frame is not allowed to contain an I-field */
@@ -1831,10 +1881,31 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
irlap_reset_indication(self);
} else {
- IRDA_DEBUG(0, __FUNCTION__ "(), SNRM frame contained an "
- "I-field!\n");
- dev_kfree_skb(skb);
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), SNRM frame contained an I-field!\n");
+
}
+ dev_kfree_skb(skb);
+ break;
+ case RECV_REJ_CMD:
+ irlap_update_nr_received(self, info->nr);
+ if (self->remote_busy) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ } else
+ irlap_resend_rejected_frames(self, CMD_FRAME);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ dev_kfree_skb(skb);
+ break;
+ case RECV_SREJ_CMD:
+ irlap_update_nr_received(self, info->nr);
+ if (self->remote_busy) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rr_frame(self, CMD_FRAME);
+ } else
+ irlap_resend_rejected_frame(self, CMD_FRAME);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ dev_kfree_skb(skb);
break;
case WD_TIMER_EXPIRED:
/*
@@ -1842,7 +1913,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
* disconnect time (note 2 in IrLAP p. 82)
*/
IRDA_DEBUG(1, __FUNCTION__ "(), retry_count = %d\n",
- self->retry_count);
+ self->retry_count);
if ((self->retry_count < (self->N2/2)) &&
(self->retry_count != self->N1/2)) {
@@ -1862,7 +1933,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
irlap_disconnect_indication(self, LAP_NO_RESPONSE);
}
break;
- case RECV_DISC_FRAME:
+ case RECV_DISC_CMD:
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
@@ -1873,9 +1944,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
irlap_apply_default_connection_parameters(self);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
- if (skb)
- dev_kfree_skb(skb);
-
+ dev_kfree_skb(skb);
break;
case RECV_DISCOVERY_XID_CMD:
irlap_wait_min_turn_around(self, &self->qos_tx);
@@ -1883,6 +1952,8 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
self->ack_required = TRUE;
irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state(self, LAP_NRM_S);
+
+ dev_kfree_skb(skb);
break;
case RECV_TEST_CMD:
/* Remove test frame header */
@@ -1892,13 +1963,16 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
irlap_start_wd_timer(self, self->wd_timeout);
/* Send response (info will be copied) */
- irlap_send_test_frame(self, info->daddr, skb);
+ irlap_send_test_frame(self, self->caddr, info->daddr, skb);
dev_kfree_skb(skb);
break;
default:
IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, (%s)\n",
- event, irlap_event[event]);
- ret = -1;
+ event, irlap_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
+
+ ret = -EINVAL;
break;
}
return ret;
@@ -1910,10 +1984,53 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
*
*
*/
-static int irlap_state_sclose( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
- IRDA_DEBUG( 0, __FUNCTION__ "(), Not implemented!\n");
+ int ret = 0;
+
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -ENODEV;);
+ ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
+
+ switch (event) {
+ case RECV_DISC_CMD:
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_ua_response_frame(self, NULL);
+ del_timer(&self->wd_timer);
+ irlap_apply_default_connection_parameters(self);
+
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ dev_kfree_skb(skb);
+ break;
+ case RECV_DM_RSP:
+ /* Always switch state before calling upper layers */
+ irlap_next_state(self, LAP_NDM);
+
+ del_timer(&self->wd_timer);
+ irlap_apply_default_connection_parameters(self);
+
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ dev_kfree_skb(skb);
+ break;
+ case WD_TIMER_EXPIRED:
+ irlap_apply_default_connection_parameters(self);
+
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
+ break;
+ default:
+ IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, (%s)\n",
+ event, irlap_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
+
+ ret = -EINVAL;
+ break;
+ }
return -1;
}
@@ -1924,31 +2041,34 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(1, __FUNCTION__ "(), event=%s\n", irlap_event[ event]);
+ IRDA_DEBUG(1, __FUNCTION__ "(), event=%s\n", irlap_event[event]);
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
+ ASSERT(self != NULL, return -ENODEV;);
+ ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
- switch(event) {
+ switch (event) {
case RESET_RESPONSE:
- irlap_send_ua_response_frame( self, &self->qos_rx);
- irlap_initiate_connection_state( self);
- irlap_start_wd_timer( self, WD_TIMEOUT);
- irlap_flush_all_queues( self);
+ irlap_send_ua_response_frame(self, &self->qos_rx);
+ irlap_initiate_connection_state(self);
+ irlap_start_wd_timer(self, WD_TIMEOUT);
+ irlap_flush_all_queues(self);
- irlap_next_state( self, LAP_NRM_S);
+ irlap_next_state(self, LAP_NRM_S);
break;
case DISCONNECT_REQUEST:
- irlap_wait_min_turn_around( self, &self->qos_tx);
- /* irlap_send_rd_frame(self); */
- irlap_start_wd_timer( self, WD_TIMEOUT);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_rd_frame(self);
+ irlap_start_wd_timer(self, WD_TIMEOUT);
+ irlap_next_state(self, LAP_SCLOSE);
break;
default:
IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, (%s)\n",
- event, irlap_event[event]);
- ret = -1;
+ event, irlap_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
+
+ ret = -EINVAL;
break;
}
-
return ret;
}
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 5aa489cc2..830fe6ef7 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -1,16 +1,16 @@
/*********************************************************************
*
* Filename: irlap_frame.c
- * Version: 0.9
+ * Version: 1.0
* Description: Build and transmit IrLAP frames
- * Status: Experimental.
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Aug 19 10:27:26 1997
- * Modified at: Fri Nov 5 09:45:58 1999
+ * Modified at: Wed Jan 5 08:59:04 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Resrved.
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -46,8 +46,8 @@
* Function irlap_insert_info (self, skb)
*
* Insert minimum turnaround time and speed information into the skb. We
- * need to do this since it's per packet relevant information.
- *
+ * need to do this since it's per packet relevant information. Safe to
+ * have this function inlined since it's only called from one place
*/
static inline void irlap_insert_info(struct irlap_cb *self,
struct sk_buff *skb)
@@ -66,7 +66,7 @@ static inline void irlap_insert_info(struct irlap_cb *self,
self->mtt_required = 0;
/*
- * Delay equals negotiated BOFs count plus the number of BOFs to
+ * Delay equals negotiated BOFs count, plus the number of BOFs to
* force the negotiated minimum turnaround time
*/
cb->xbofs = self->bofs_count+self->xbofs_delay;
@@ -83,13 +83,6 @@ static inline void irlap_insert_info(struct irlap_cb *self,
*/
void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb)
{
- /* Make sure data is not larger than max data size plus LAP header */
- if (skb->len > 2050) {
- ERROR(__FUNCTION__ "(), size=%d of sk_buff to big!\n",
- (int) skb->len);
- return;
- }
-
/* Some common init stuff */
skb->dev = self->netdev;
skb->h.raw = skb->nh.raw = skb->mac.raw = skb->data;
@@ -120,8 +113,7 @@ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
if (!skb)
return;
- skb_put(skb, 2);
- frame = (struct snrm_frame *) skb->data;
+ frame = (struct snrm_frame *) skb_put(skb, 2);
/* Insert connection address field */
if (qos)
@@ -216,8 +208,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
if (!skb)
return;
- skb_put(skb, 10);
- frame = (struct ua_frame *) skb->data;
+ frame = (struct ua_frame *) skb_put(skb, 10);
/* Build UA response */
frame->caddr = self->caddr;
@@ -257,8 +248,7 @@ void irlap_send_dm_frame( struct irlap_cb *self)
if (!skb)
return;
- skb_put( skb, 2);
- frame = skb->data;
+ frame = skb_put( skb, 2);
if (self->state == LAP_NDM)
frame[0] = CBROADCAST;
@@ -286,12 +276,11 @@ void irlap_send_disc_frame(struct irlap_cb *self)
ASSERT(self != NULL, return;);
ASSERT(self->magic == LAP_MAGIC, return;);
- skb = dev_alloc_skb(32);
+ skb = dev_alloc_skb(16);
if (!skb)
return;
- skb_put(skb, 2);
- frame = skb->data;
+ frame = skb_put(skb, 2);
frame[0] = self->caddr | CMD_FRAME;
frame[1] = DISC_CMD | PF_BIT;
@@ -313,7 +302,8 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
__u32 bcast = BROADCAST;
__u8 *info;
- IRDA_DEBUG(4, __FUNCTION__ "(), s=%d, S=%d, command=%d\n", s, S, command);
+ IRDA_DEBUG(4, __FUNCTION__ "(), s=%d, S=%d, command=%d\n", s, S,
+ command);
ASSERT(self != NULL, return;);
ASSERT(self->magic == LAP_MAGIC, return;);
@@ -369,24 +359,23 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
* EXTENSION bit is set in the first byte.
*/
if (!command || (frame->slotnr == 0xff)) {
- int i;
+ int len;
- if (discovery->hints.byte[0] & HINT_EXTENSION)
- info = skb_put(skb, 3+discovery->name_len);
- else
- info = skb_put(skb, 2+discovery->name_len);
-
- i = 0;
- info[i++] = discovery->hints.byte[0];
- if (discovery->hints.byte[0] & HINT_EXTENSION)
- info[i++] = discovery->hints.byte[1];
-
- info[i++] = discovery->charset;
-
- memcpy(&info[i++], discovery->nickname, discovery->name_len);
- }
- ASSERT(self->netdev != NULL, return;);
+ if (discovery->hints.byte[0] & HINT_EXTENSION) {
+ info = skb_put(skb, 2);
+ info[0] = discovery->hints.byte[0];
+ info[1] = discovery->hints.byte[1];
+ } else {
+ info = skb_put(skb, 1);
+ info[0] = discovery->hints.byte[0];
+ }
+ info = skb_put(skb, 1);
+ info[0] = discovery->charset;
+ len = IRDA_MIN(discovery->name_len, skb_tailroom(skb));
+ info = skb_put(skb, len);
+ memcpy(info, discovery->nickname, len);
+ }
irlap_queue_xmit(self, skb);
}
@@ -417,13 +406,15 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
/* Make sure frame is addressed to us */
if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) {
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), frame is not addressed to us!\n");
dev_kfree_skb(skb);
- IRDA_DEBUG(0, __FUNCTION__ "(), frame is not addressed to us!\n");
return;
}
if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
WARNING(__FUNCTION__ "(), kmalloc failed!\n");
+ dev_kfree_skb(skb);
return;
}
memset(discovery, 0, sizeof(discovery_t));
@@ -449,7 +440,7 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
text = (char *) &discovery_info[2];
}
/*
- * Terminate string, should be safe since this is where the
+ * Terminate info string, should be safe since this is where the
* FCS bytes resides.
*/
skb->data[skb->len] = '\0';
@@ -483,7 +474,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
/* Make sure frame is addressed to us */
if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) {
- IRDA_DEBUG(0, __FUNCTION__ "(), frame is not addressed to us!\n");
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), frame is not addressed to us!\n");
dev_kfree_skb(skb);
return;
}
@@ -503,6 +495,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
break;
default:
/* Error!! */
+ dev_kfree_skb(skb);
return;
}
info->s = xid->slotnr;
@@ -519,6 +512,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
if (!discovery) {
WARNING(__FUNCTION__ "(), unable to malloc!\n");
+ dev_kfree_skb(skb);
return;
}
@@ -562,7 +556,7 @@ void irlap_send_rr_frame(struct irlap_cb *self, int command)
struct sk_buff *skb;
__u8 *frame;
- skb = dev_alloc_skb(32);
+ skb = dev_alloc_skb(16);
if (!skb)
return;
@@ -577,6 +571,29 @@ void irlap_send_rr_frame(struct irlap_cb *self, int command)
}
/*
+ * Function irlap_send_rd_frame (self)
+ *
+ * Request disconnect. Used by a secondary station to request the
+ * disconnection of the link.
+ */
+void irlap_send_rd_frame(struct irlap_cb *self)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ skb = dev_alloc_skb(16);
+ if (!skb)
+ return;
+
+ frame = skb_put(skb, 2);
+
+ frame[0] = self->caddr;
+ frame[1] = RD_RSP | PF_BIT;
+
+ irlap_queue_xmit(self, skb);
+}
+
+/*
* Function irlap_recv_rr_frame (skb, info)
*
* Received RR (Receive Ready) frame from peer station, no harm in
@@ -608,8 +625,7 @@ void irlap_send_frmr_frame( struct irlap_cb *self, int command)
if (!skb)
return;
- skb_put( skb, 2);
- frame = skb->data;
+ frame = skb_put( skb, 2);
frame[0] = self->caddr;
frame[0] |= (command) ? CMD_FRAME : 0;
@@ -620,7 +636,7 @@ void irlap_send_frmr_frame( struct irlap_cb *self, int command)
frame[2] = 0;
- IRDA_DEBUG( 4, __FUNCTION__ "(), vr=%d, %ld\n",self->vr, jiffies);
+ IRDA_DEBUG(4, __FUNCTION__ "(), vr=%d, %ld\n",self->vr, jiffies);
irlap_queue_xmit(self, skb);
}
@@ -631,20 +647,57 @@ void irlap_send_frmr_frame( struct irlap_cb *self, int command)
* Received RNR (Receive Not Ready) frame from peer station
*
*/
-static void irlap_recv_rnr_frame( struct irlap_cb *self, struct sk_buff *skb,
- struct irlap_info *info)
+static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
{
- __u8 *frame;
+ info->nr = skb->data[1] >> 5;
- ASSERT( skb != NULL, return;);
- ASSERT( info != NULL, return;);
+ IRDA_DEBUG(4, __FUNCTION__ "(), nr=%d, %ld\n", info->nr, jiffies);
- frame = skb->data;
- info->nr = frame[1] >> 5;
+ if (command)
+ irlap_do_event(self, RECV_RNR_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_RNR_RSP, skb, info);
+}
+
+static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
+
+ info->nr = skb->data[1] >> 5;
+
+ /* Check if this is a command or a response frame */
+ if (command)
+ irlap_do_event(self, RECV_REJ_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_REJ_RSP, skb, info);
+}
+
+static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
+
+ info->nr = skb->data[1] >> 5;
+
+ /* Check if this is a command or a response frame */
+ if (command)
+ irlap_do_event(self, RECV_SREJ_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_SREJ_RSP, skb, info);
+}
- IRDA_DEBUG( 4, __FUNCTION__ "(), nr=%d, %ld\n", info->nr, jiffies);
+static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ IRDA_DEBUG(0, __FUNCTION__ "()\n");
- irlap_do_event( self, RECV_RNR_FRAME, skb, info);
+ /* Check if this is a command or a response frame */
+ if (command)
+ irlap_do_event(self, RECV_DISC_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_RD_RSP, skb, info);
}
/*
@@ -653,8 +706,9 @@ static void irlap_recv_rnr_frame( struct irlap_cb *self, struct sk_buff *skb,
* Received UA (Unnumbered Acknowledgement) frame
*
*/
-static void irlap_recv_ua_frame(struct irlap_cb *self, struct sk_buff *skb,
- struct irlap_info *info)
+static inline void irlap_recv_ua_frame(struct irlap_cb *self,
+ struct sk_buff *skb,
+ struct irlap_info *info)
{
irlap_do_event(self, RECV_UA_RSP, skb, info);
}
@@ -701,8 +755,8 @@ void irlap_send_data_primary(struct irlap_cb *self, struct sk_buff *skb)
irlap_send_i_frame( self, tx_skb, CMD_FRAME);
} else {
- IRDA_DEBUG( 4, __FUNCTION__ "(), sending unreliable frame\n");
- irlap_send_ui_frame(self, skb, CMD_FRAME);
+ IRDA_DEBUG(4, __FUNCTION__ "(), sending unreliable frame\n");
+ irlap_send_ui_frame(self, skb, self->caddr, CMD_FRAME);
self->window -= 1;
}
}
@@ -765,12 +819,12 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
del_timer(&self->poll_timer);
if (self->ack_required) {
- irlap_send_ui_frame(self, skb, CMD_FRAME);
+ irlap_send_ui_frame(self, skb, self->caddr, CMD_FRAME);
irlap_send_rr_frame(self, CMD_FRAME);
self->ack_required = FALSE;
} else {
skb->data[1] |= PF_BIT;
- irlap_send_ui_frame(self, skb, CMD_FRAME);
+ irlap_send_ui_frame(self, skb, self->caddr, CMD_FRAME);
}
self->window = self->window_size;
irlap_start_final_timer(self, self->final_timeout);
@@ -824,12 +878,12 @@ void irlap_send_data_secondary_final(struct irlap_cb *self,
irlap_send_i_frame(self, tx_skb, RSP_FRAME);
} else {
if (self->ack_required) {
- irlap_send_ui_frame(self, skb, RSP_FRAME);
+ irlap_send_ui_frame(self, skb, self->caddr, RSP_FRAME);
irlap_send_rr_frame(self, RSP_FRAME);
self->ack_required = FALSE;
} else {
skb->data[1] |= PF_BIT;
- irlap_send_ui_frame(self, skb, RSP_FRAME);
+ irlap_send_ui_frame(self, skb, self->caddr, RSP_FRAME);
}
self->window = self->window_size;
@@ -874,7 +928,7 @@ void irlap_send_data_secondary(struct irlap_cb *self, struct sk_buff *skb)
irlap_send_i_frame(self, tx_skb, RSP_FRAME);
} else {
- irlap_send_ui_frame(self, skb, RSP_FRAME);
+ irlap_send_ui_frame(self, skb, self->caddr, RSP_FRAME);
self->window -= 1;
}
}
@@ -882,13 +936,11 @@ void irlap_send_data_secondary(struct irlap_cb *self, struct sk_buff *skb)
/*
* Function irlap_resend_rejected_frames (nr)
*
- * Resend frames which has not been acknowledged. TODO: check that the
- * traversal of the list is atomic, i.e that no-one tries to insert or
- * remove frames from the list while we travers it!
- *
- * FIXME: It is not safe to traverse a this list without locking it!
+ * Resend frames which has not been acknowledged. Should be safe to
+ * traverse the list without locking it since this function will only be
+ * called from interrupt context (BH)
*/
-void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
+void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
{
struct sk_buff *tx_skb;
struct sk_buff *skb;
@@ -900,10 +952,9 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
/* Initialize variables */
skb = tx_skb = NULL;
- /*
- * Resend all unacknowledged frames
- */
count = skb_queue_len(&self->wx_list);
+
+ /* Resend unacknowledged frame(s) */
skb = skb_peek(&self->wx_list);
while (skb != NULL) {
irlap_wait_min_turn_around(self, &self->qos_tx);
@@ -913,13 +964,9 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
*/
/* tx_skb = skb_clone( skb, GFP_ATOMIC); */
tx_skb = skb_copy(skb, GFP_ATOMIC);
- if (tx_skb == NULL) {
- /* Unlink tx_skb from list */
- tx_skb->next = tx_skb->prev = NULL;
- tx_skb->list = NULL;
-
- dev_kfree_skb(skb);
- return;
+ if (!tx_skb) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), unable to copy\n");
+ return;
}
/* Unlink tx_skb from list */
tx_skb->next = tx_skb->prev = NULL;
@@ -948,22 +995,21 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
* If our skb is the last buffer in the list, then
* we are finished, if not, move to the next sk-buffer
*/
- if (skb == skb_peek_tail( &self->wx_list))
+ if (skb == skb_peek_tail(&self->wx_list))
skb = NULL;
else
skb = skb->next;
}
+#if 0 /* Not yet */
/*
* We can now fill the window with additinal data frames
*/
- return; /* Skip this for now, DB */
-
- while (skb_queue_len( &self->tx_list) > 0) {
+ while (skb_queue_len( &self->txq) > 0) {
IRDA_DEBUG(0, __FUNCTION__ "(), sending additional frames!\n");
- if ((skb_queue_len( &self->tx_list) > 0) &&
+ if ((skb_queue_len( &self->txq) > 0) &&
(self->window > 0)) {
- skb = skb_dequeue( &self->tx_list);
+ skb = skb_dequeue( &self->txq);
ASSERT(skb != NULL, return;);
/*
@@ -971,7 +1017,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
* bit cleared
*/
if ((self->window > 1) &&
- skb_queue_len(&self->tx_list) > 0)
+ skb_queue_len(&self->txq) > 0)
{
irlap_send_data_primary(self, skb);
} else {
@@ -979,6 +1025,52 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
}
}
}
+#endif
+}
+
+void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
+{
+ struct sk_buff *tx_skb;
+ struct sk_buff *skb;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+
+ /* Initialize variables */
+ skb = tx_skb = NULL;
+
+ /* Resend unacknowledged frame(s) */
+ skb = skb_peek(&self->wx_list);
+ if (skb != NULL) {
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ /* We copy the skb to be retransmitted since we will have to
+ * modify it. Cloning will confuse packet sniffers
+ */
+ /* tx_skb = skb_clone( skb, GFP_ATOMIC); */
+ tx_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!tx_skb) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), unable to copy\n");
+ return;
+ }
+ /* Unlink tx_skb from list */
+ tx_skb->next = tx_skb->prev = NULL;
+ tx_skb->list = NULL;
+
+ /*
+ * make sure the skb->sk accounting of memory usage is sane
+ */
+ if (skb->sk != NULL)
+ skb_set_owner_w(tx_skb, skb->sk);
+
+ /* Clear old Nr field + poll bit */
+ tx_skb->data[1] &= 0x0f;
+
+ /* Set poll/final bit */
+ tx_skb->data[1] |= PF_BIT; /* Set p/f bit */
+
+ irlap_send_i_frame(self, tx_skb, command);
+ }
}
/*
@@ -988,21 +1080,16 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
*
*/
void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
- int command)
+ __u8 caddr, int command)
{
- __u8 *frame;
-
IRDA_DEBUG(4, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == LAP_MAGIC, return;);
ASSERT(skb != NULL, return;);
- frame = skb->data;
-
/* Insert connection address */
- frame[0] = self->caddr;
- frame[0] |= (command) ? CMD_FRAME : 0;
+ skb->data[0] = caddr | ((command) ? CMD_FRAME : 0);
irlap_queue_xmit(self, skb);
}
@@ -1055,13 +1142,9 @@ static inline void irlap_recv_i_frame(struct irlap_cb *self,
static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
struct irlap_info *info)
{
- __u8 *frame;
-
IRDA_DEBUG( 4, __FUNCTION__ "()\n");
- frame = skb->data;
-
- info->pf = frame[1] & PF_BIT; /* Final bit */
+ info->pf = skb->data[1] & PF_BIT; /* Final bit */
irlap_do_event(self, RECV_UI_FRAME, skb, info);
}
@@ -1088,7 +1171,7 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb,
frame = skb->data;
info->nr = frame[2] >> 5; /* Next to receive */
- info->pf = frame[2] & PF_BIT; /* Final bit */
+ info->pf = frame[2] & PF_BIT; /* Final bit */
info->ns = (frame[2] >> 1) & 0x07; /* Next to send */
w = frame[3] & 0x01;
@@ -1122,7 +1205,7 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb,
* Send a test frame response
*
*/
-void irlap_send_test_frame(struct irlap_cb *self, __u32 daddr,
+void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
struct sk_buff *cmd)
{
struct sk_buff *skb;
@@ -1133,22 +1216,20 @@ void irlap_send_test_frame(struct irlap_cb *self, __u32 daddr,
if (!skb)
return;
- skb_put(skb, sizeof(struct test_frame));
+ /* Broadcast frames must include saddr and daddr fields */
+ if (caddr == CBROADCAST) {
+ frame = (struct test_frame *)
+ skb_put(skb, sizeof(struct test_frame));
- frame = (struct test_frame *) skb->data;
-
- /* Build header */
- if (self->state == LAP_NDM)
- frame->caddr = CBROADCAST; /* Send response */
- else
- frame->caddr = self->caddr;
+ /* Insert the swapped addresses */
+ frame->saddr = cpu_to_le32(self->saddr);
+ frame->daddr = cpu_to_le32(daddr);
+ } else
+ frame = (struct test_frame *) skb_put(skb, LAP_MAX_HEADER);
+ frame->caddr = caddr;
frame->control = TEST_RSP;
- /* Insert the swapped addresses */
- frame->saddr = cpu_to_le32(self->saddr);
- frame->daddr = cpu_to_le32(daddr);
-
/* Copy info */
info = skb_put(skb, cmd->len);
memcpy(info, cmd->data, cmd->len);
@@ -1164,28 +1245,34 @@ void irlap_send_test_frame(struct irlap_cb *self, __u32 daddr,
* Receive a test frame
*
*/
-void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
- struct irlap_info *info, int command)
+static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
{
struct test_frame *frame;
IRDA_DEBUG(2, __FUNCTION__ "()\n");
- if (skb->len < sizeof(struct test_frame)) {
- IRDA_DEBUG(0, __FUNCTION__ "() test frame to short!\n");
- return;
- }
-
frame = (struct test_frame *) skb->data;
+
+ /* Broadcast frames must carry saddr and daddr fields */
+ if (info->caddr == CBROADCAST) {
+ if (skb->len < sizeof(struct test_frame)) {
+ IRDA_DEBUG(0, __FUNCTION__
+ "() test frame to short!\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /* Read and swap addresses */
+ info->daddr = le32_to_cpu(frame->saddr);
+ info->saddr = le32_to_cpu(frame->daddr);
- /* Read and swap addresses */
- info->daddr = le32_to_cpu(frame->saddr);
- info->saddr = le32_to_cpu(frame->daddr);
-
- /* Make sure frame is addressed to us */
- if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) {
- dev_kfree_skb(skb);
- return;
+ /* Make sure frame is addressed to us */
+ if ((info->saddr != self->saddr) &&
+ (info->saddr != BROADCAST)) {
+ dev_kfree_skb(skb);
+ return;
+ }
}
if (command)
@@ -1261,18 +1348,19 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
irlap_recv_rr_frame(self, skb, &info, command);
break;
case RNR:
- irlap_recv_rnr_frame(self, skb, &info);
+ irlap_recv_rnr_frame(self, skb, &info, command);
break;
case REJ:
- IRDA_DEBUG( 0, "*** REJ frame received! ***\n");
+ irlap_recv_rej_frame(self, skb, &info, command);
break;
case SREJ:
- IRDA_DEBUG( 0, "*** SREJ frame received! ***\n");
+ irlap_recv_srej_frame(self, skb, &info, command);
break;
default:
WARNING(__FUNCTION__
"() Unknown S-frame %02x received!\n",
info.control);
+ dev_kfree_skb(skb);
break;
}
return 0;
@@ -1291,11 +1379,10 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
irlap_recv_snrm_cmd(self, skb, &info);
break;
case DM_RSP:
- IRDA_DEBUG( 0, "DM rsp frame received!\n");
- irlap_next_state(self, LAP_NDM);
+ irlap_do_event(self, RECV_DM_RSP, skb, &info);
break;
- case DISC_CMD:
- irlap_do_event(self, RECV_DISC_FRAME, skb, &info);
+ case DISC_CMD: /* And RD_RSP since they have the same value */
+ irlap_recv_disc_frame(self, skb, &info, command);
break;
case TEST_CMD:
irlap_recv_test_frame(self, skb, &info, command);
@@ -1312,7 +1399,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
default:
WARNING(__FUNCTION__ "(), Unknown frame %02x received!\n",
info.control);
- dev_kfree_skb( skb);
+ dev_kfree_skb(skb);
break;
}
return 0;
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index c83d62f44..5be0298a9 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -1,15 +1,15 @@
/*********************************************************************
*
* Filename: irlmp.c
- * Version: 0.9
+ * Version: 1.0
* Description: IrDA Link Management Protocol (LMP) layer
* Status: Stable.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 17 20:54:32 1997
- * Modified at: Sat Oct 9 17:00:49 1999
+ * Modified at: Wed Jan 5 11:26:03 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -69,20 +69,19 @@ int irlmp_proc_read(char *buf, char **start, off_t offst, int len);
/*
* Function irlmp_init (void)
*
- * Create (allocate) the main IrLMP structure and the pointer array
- * which will contain pointers to each instance of a LSAP.
+ * Create (allocate) the main IrLMP structure
+ *
*/
int __init irlmp_init(void)
{
/* Initialize the irlmp structure. */
- if ( irlmp == NULL) {
- irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
- if ( irlmp == NULL)
- return -ENOMEM;
- }
- memset( irlmp, 0, sizeof(struct irlmp_cb));
+ irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
+ if (irlmp == NULL)
+ return -ENOMEM;
+ memset(irlmp, 0, sizeof(struct irlmp_cb));
irlmp->magic = LMP_MAGIC;
+ spin_lock_init(&irlmp->lock);
irlmp->clients = hashbin_new(HB_GLOBAL);
irlmp->services = hashbin_new(HB_GLOBAL);
@@ -90,7 +89,7 @@ int __init irlmp_init(void)
irlmp->unconnected_lsaps = hashbin_new(HB_GLOBAL);
irlmp->cachelog = hashbin_new(HB_GLOBAL);
- irlmp->free_lsap_sel = 0x10; /* Servers use 0x00-0x0f */
+ irlmp->free_lsap_sel = 0x10; /* Reserved 0x00-0x0f */
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
irlmp->cache.valid = FALSE;
#endif
@@ -134,7 +133,7 @@ void irlmp_cleanup(void)
* Register with IrLMP and create a local LSAP,
* returns handle to LSAP.
*/
-struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify)
+struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
{
struct lsap_cb *self;
@@ -142,40 +141,33 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify)
ASSERT(irlmp != NULL, return NULL;);
ASSERT(irlmp->magic == LMP_MAGIC, return NULL;);
- /*
- * Does the client care which Source LSAP selector it gets?
- */
+ /* Does the client care which Source LSAP selector it gets? */
if (slsap_sel == LSAP_ANY) {
- /*
- * Find unused LSAP
- */
slsap_sel = irlmp_find_free_slsap();
- if ( slsap_sel == 0)
- return NULL;
- } else {
- /*
- * Client wants specific LSAP, so check if it's already
- * in use
- */
- if (irlmp_slsap_inuse(slsap_sel)) {
+ if (!slsap_sel)
return NULL;
- }
- }
+ } else if (irlmp_slsap_inuse(slsap_sel))
+ return NULL;
- /*
- * Allocate new instance of a LSAP connection
- */
+ /* Allocate new instance of a LSAP connection */
self = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
if (self == NULL) {
- printk( KERN_ERR "IrLMP: Can't allocate memory for "
- "LSAP control block!\n");
+ ERROR(__FUNCTION__ "(), can't allocate memory");
return NULL;
}
memset(self, 0, sizeof(struct lsap_cb));
self->magic = LMP_LSAP_MAGIC;
self->slsap_sel = slsap_sel;
- self->dlsap_sel = LSAP_ANY;
+
+ /* Fix connectionless LSAP's */
+ if (slsap_sel == LSAP_CONNLESS) {
+#ifdef CONFIG_IRDA_ULTRA
+ self->dlsap_sel = LSAP_CONNLESS;
+ self->pid = pid;
+#endif /* CONFIG_IRDA_ULTRA */
+ } else
+ self->dlsap_sel = LSAP_ANY;
self->connected = FALSE;
init_timer(&self->watchdog_timer);
@@ -185,9 +177,7 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify)
irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
- /*
- * Insert into queue of unconnected LSAPs
- */
+ /* Insert into queue of unconnected LSAPs */
hashbin_insert(irlmp->unconnected_lsaps, (queue_t *) self, (int) self,
NULL);
@@ -209,9 +199,12 @@ static void __irlmp_close_lsap(struct lsap_cb *self)
/*
* Set some of the variables to preset values
*/
- self->magic = ~LMP_LSAP_MAGIC;
+ self->magic = 0;
del_timer(&self->watchdog_timer); /* Important! */
+ if (self->conn_skb)
+ dev_kfree_skb(self->conn_skb);
+
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
ASSERT(irlmp != NULL, return;);
irlmp->cache.valid = FALSE;
@@ -249,7 +242,7 @@ void irlmp_close_lsap(struct lsap_cb *self)
}
if (!lsap) {
IRDA_DEBUG(0, __FUNCTION__
- "(), Looks like somebody has removed me already!\n");
+ "(), Looks like somebody has removed me already!\n");
return;
}
__irlmp_close_lsap(self);
@@ -373,7 +366,6 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
skb_push(skb, LMP_CONTROL_HEADER);
self->dlsap_sel = dlsap_sel;
- self->tmp_skb = skb;
/*
* Find the link to where we should try to connect since there may
@@ -384,9 +376,18 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
* device with the given daddr
*/
if (!saddr) {
- discovery = hashbin_find(irlmp->cachelog, daddr, NULL);
- if (discovery)
+ if (daddr != DEV_ADDR_ANY)
+ discovery = hashbin_find(irlmp->cachelog, daddr, NULL);
+ else {
+ IRDA_DEBUG(2, __FUNCTION__ "(), no daddr\n");
+ discovery = (discovery_t *)
+ hashbin_get_first(irlmp->cachelog);
+ }
+
+ if (discovery) {
saddr = discovery->saddr;
+ daddr = discovery->daddr;
+ }
}
lap = hashbin_find(irlmp->links, saddr, NULL);
if (lap == NULL) {
@@ -447,22 +448,17 @@ void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb)
ASSERT(self->lap != NULL, return;);
IRDA_DEBUG(2, __FUNCTION__ "(), slsap_sel=%02x, dlsap_sel=%02x\n",
- self->slsap_sel, self->dlsap_sel);
-
+ self->slsap_sel, self->dlsap_sel);
+
self->qos = *self->lap->qos;
max_seg_size = self->lap->qos->data_size.value-LMP_HEADER;
- IRDA_DEBUG(2, __FUNCTION__ "(), max_seg_size=%d\n", max_seg_size);
-
lap_header_size = IRLAP_GET_HEADER_SIZE(self->lap->irlap);
-
max_header_size = LMP_HEADER + lap_header_size;
- IRDA_DEBUG(2, __FUNCTION__ "(), max_header_size=%d\n", max_header_size);
-
/* Hide LMP_CONTROL_HEADER header from layer above */
skb_pull(skb, LMP_CONTROL_HEADER);
-
+
if (self->notify.connect_indication)
self->notify.connect_indication(self->notify.instance, self,
&self->qos, max_seg_size,
@@ -484,9 +480,9 @@ int irlmp_connect_response(struct lsap_cb *self, struct sk_buff *userdata)
self->connected = TRUE;
IRDA_DEBUG(2, __FUNCTION__ "(), slsap_sel=%02x, dlsap_sel=%02x\n",
- self->slsap_sel, self->dlsap_sel);
+ self->slsap_sel, self->dlsap_sel);
- /* Make room for MUX control header ( 3 bytes) */
+ /* Make room for MUX control header (3 bytes) */
ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;);
skb_push(userdata, LMP_CONTROL_HEADER);
@@ -519,7 +515,8 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb)
lap_header_size = IRLAP_GET_HEADER_SIZE(self->lap->irlap);
max_header_size = LMP_HEADER + lap_header_size;
- IRDA_DEBUG(2, __FUNCTION__ "(), max_header_size=%d\n", max_header_size);
+ IRDA_DEBUG(2, __FUNCTION__ "(), max_header_size=%d\n",
+ max_header_size);
/* Hide LMP_CONTROL_HEADER header from layer above */
skb_pull(skb, LMP_CONTROL_HEADER);
@@ -637,13 +634,12 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
struct lsap_cb *lsap;
IRDA_DEBUG(1, __FUNCTION__ "(), reason=%s\n", lmp_reasons[reason]);
-
ASSERT(self != NULL, return;);
ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
ASSERT(self->connected == TRUE, return;);
IRDA_DEBUG(3, __FUNCTION__ "(), slsap_sel=%02x, dlsap_sel=%02x\n",
- self->slsap_sel, self->dlsap_sel);
+ self->slsap_sel, self->dlsap_sel);
self->connected = FALSE;
self->dlsap_sel = LSAP_ANY;
@@ -670,9 +666,12 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
/*
* Inform service user
*/
- if (self->notify.disconnect_indication) {
+ if (self->notify.disconnect_indication)
self->notify.disconnect_indication(self->notify.instance,
self, reason, userdata);
+ else {
+ IRDA_DEBUG(0, __FUNCTION__ "(), no handler\n");
+ dev_kfree_skb(userdata);
}
}
@@ -687,9 +686,7 @@ void irlmp_do_discovery(int nslots)
struct lap_cb *lap;
/* Make sure the value is sane */
- if ((nslots != 1) && (nslots != 6) &&
- (nslots != 8) && (nslots != 16))
- {
+ if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){
WARNING(__FUNCTION__
"(), invalid value for number of slots!\n");
nslots = sysctl_discovery_slots = 8;
@@ -918,13 +915,15 @@ int irlmp_data_request(struct lsap_cb *self, struct sk_buff *skb)
* Got data from LAP layer so pass it up to upper layer
*
*/
-inline void irlmp_data_indication(struct lsap_cb *self, struct sk_buff *skb)
+void irlmp_data_indication(struct lsap_cb *self, struct sk_buff *skb)
{
/* Hide LMP header from layer above */
skb_pull(skb, LMP_HEADER);
if (self->notify.data_indication)
self->notify.data_indication(self->notify.instance, self, skb);
+ else
+ dev_kfree_skb(skb);
}
/*
@@ -933,17 +932,17 @@ inline void irlmp_data_indication(struct lsap_cb *self, struct sk_buff *skb)
*
*
*/
-inline void irlmp_udata_request(struct lsap_cb *self, struct sk_buff *skb)
+int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *skb)
{
IRDA_DEBUG(4, __FUNCTION__ "()\n");
- ASSERT(skb != NULL, return;);
+ ASSERT(skb != NULL, return -1;);
/* Make room for MUX header */
- ASSERT(skb_headroom(skb) >= LMP_HEADER, return;);
+ ASSERT(skb_headroom(skb) >= LMP_HEADER, return -1;);
skb_push(skb, LMP_HEADER);
- irlmp_do_lsap_event(self, LM_UDATA_REQUEST, skb);
+ return irlmp_do_lsap_event(self, LM_UDATA_REQUEST, skb);
}
/*
@@ -964,30 +963,83 @@ void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb)
skb_pull(skb, LMP_HEADER);
if (self->notify.udata_indication)
- self->notify.udata_indication(self->notify.instance, self, skb);
+ self->notify.udata_indication(self->notify.instance, self,
+ skb);
+ else
+ dev_kfree_skb(skb);
}
/*
- * Function irlmp_connection_less_data_request (skb)
+ * Function irlmp_connless_data_request (self, skb)
*
- * Send out of connection UI frames
+ *
*
*/
-void irlmp_connectionless_data_request( struct sk_buff *skb)
+#ifdef CONFIG_IRDA_ULTRA
+int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *skb)
{
- IRDA_DEBUG( 1, __FUNCTION__ "(), Sorry not implemented\n");
+ struct sk_buff *clone_skb;
+ struct lap_cb *lap;
+
+ IRDA_DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(skb != NULL, return -1;);
+
+ /* Make room for MUX and PID header */
+ ASSERT(skb_headroom(skb) >= LMP_HEADER+LMP_PID_HEADER, return -1;);
+
+ /* Insert protocol identifier */
+ skb_push(skb, LMP_PID_HEADER);
+ skb->data[0] = self->pid;
+
+ /* Connectionless sockets must use 0x70 */
+ skb_push(skb, LMP_HEADER);
+ skb->data[0] = skb->data[1] = LSAP_CONNLESS;
+
+ /* Try to send Connectionless packets out on all links */
+ lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
+ while (lap != NULL) {
+ ASSERT(lap->magic == LMP_LAP_MAGIC, return -1;);
+
+ clone_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!clone_skb)
+ return -ENOMEM;
+
+ irlap_unitdata_request(lap->irlap, clone_skb);
+
+ lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
+ }
+ dev_kfree_skb(skb);
+
+ return 0;
}
+#endif /* CONFIG_IRDA_ULTRA */
/*
- * Function irlmp_connection_less_data_indication (skb)
+ * Function irlmp_connless_data_indication (self, skb)
*
- *
+ * Receive unreliable data outside any connection. Mostly used by Ultra
*
*/
-void irlmp_connectionless_data_indication(struct sk_buff *skb)
+#ifdef CONFIG_IRDA_ULTRA
+void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb)
{
- IRDA_DEBUG( 1, __FUNCTION__ "()\n");
+ IRDA_DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
+
+ /* Hide LMP and PID header from layer above */
+ skb_pull(skb, LMP_HEADER+LMP_PID_HEADER);
+
+ if (self->notify.udata_indication)
+ self->notify.udata_indication(self->notify.instance, self,
+ skb);
+ else
+ dev_kfree_skb(skb);
}
+#endif /* CONFIG_IRDA_ULTRA */
void irlmp_status_request(void)
{
@@ -1310,8 +1362,14 @@ int irlmp_slsap_inuse(__u8 slsap_sel)
IRDA_DEBUG(4, __FUNCTION__ "()\n");
+#ifdef CONFIG_IRDA_ULTRA
+ /* Accept all bindings to the connectionless LSAP */
+ if (slsap_sel == LSAP_CONNLESS)
+ return FALSE;
+#endif /* CONFIG_IRDA_ULTRA */
+
/* Valid values are between 0 and 127 */
- if (slsap_sel > 127)
+ if (slsap_sel > LSAP_MAX)
return TRUE;
/*
@@ -1359,8 +1417,8 @@ __u8 irlmp_find_free_slsap(void)
while (irlmp_slsap_inuse(irlmp->free_lsap_sel)) {
irlmp->free_lsap_sel++;
- /* Check if we need to wraparound */
- if (irlmp->free_lsap_sel > 127) {
+ /* Check if we need to wraparound (0x70-0x7f are reserved) */
+ if (irlmp->free_lsap_sel > LSAP_MAX) {
irlmp->free_lsap_sel = 10;
/* Make sure we terminate the loop */
@@ -1431,7 +1489,7 @@ __u32 irlmp_get_daddr(struct lsap_cb *self)
#ifdef CONFIG_PROC_FS
/*
- * Function irlmp_proc_read (buf, start, offset, len)
+ * Function irlmp_proc_read (buf, start, offset, len, unused)
*
* Give some info to the /proc file system
*
@@ -1474,6 +1532,7 @@ int irlmp_proc_read(char *buf, char **start, off_t offset, int len)
len += sprintf(buf+len, "saddr: %#08x, daddr: %#08x, ",
lap->saddr, lap->daddr);
+ len += sprintf(buf+len, "refcount: %d", lap->refcount);
len += sprintf(buf+len, "\n");
len += sprintf(buf+len, "\nConnected LSAPs:\n");
@@ -1495,7 +1554,6 @@ int irlmp_proc_read(char *buf, char **start, off_t offset, int len)
lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
}
-
restore_flags(flags);
return len;
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index 122affe25..ace20d70e 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Tue Oct 5 13:47:53 1999
+ * Modified at: Tue Dec 14 23:04:16 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
@@ -23,6 +23,7 @@
*
********************************************************************/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <net/irda/irda.h>
@@ -121,7 +122,7 @@ int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event,
ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
IRDA_DEBUG(4, __FUNCTION__ "(), EVENT = %s, STATE = %s\n",
- irlmp_event[ event], irlmp_state[ self->lsap_state]);
+ irlmp_event[event], irlmp_state[ self->lsap_state]);
return (*lsap_state[self->lsap_state]) (self, event, skb);
}
@@ -139,8 +140,8 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
ASSERT(self->magic == LMP_LAP_MAGIC, return;);
IRDA_DEBUG(4, __FUNCTION__ "(), EVENT = %s, STATE = %s\n",
- irlmp_event[event],
- irlmp_state[self->lap_state]);
+ irlmp_event[event],
+ irlmp_state[self->lap_state]);
(*lap_state[self->lap_state]) (self, event, skb);
}
@@ -221,19 +222,23 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
case LM_LAP_CONNECT_REQUEST:
IRDA_DEBUG(4, __FUNCTION__ "() LS_CONNECT_REQUEST\n");
+ irlmp_next_lap_state(self, LAP_U_CONNECT);
+ self->refcount++;
+
/* FIXME: need to set users requested QoS */
irlap_connect_request(self->irlap, self->daddr, NULL, 0);
-
- irlmp_next_lap_state(self, LAP_U_CONNECT);
break;
case LM_LAP_DISCONNECT_INDICATION:
IRDA_DEBUG(4, __FUNCTION__
- "(), Error LM_LAP_DISCONNECT_INDICATION\n");
+ "(), Error LM_LAP_DISCONNECT_INDICATION\n");
- irlmp_next_lap_state( self, LAP_STANDBY);
+ irlmp_next_lap_state(self, LAP_STANDBY);
break;
default:
- IRDA_DEBUG(4, __FUNCTION__ "(), Unknown event\n");
+ IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n",
+ irlmp_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
}
@@ -245,8 +250,8 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
* since the IrLAP connection does not exist, we must first start an
* IrLAP connection. We are now waiting response from IrLAP.
* */
-static void irlmp_state_u_connect( struct lap_cb *self, IRLMP_EVENT event,
- struct sk_buff *skb)
+static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
{
struct lsap_cb *lsap;
struct lsap_cb *lsap_current;
@@ -254,23 +259,41 @@ static void irlmp_state_u_connect( struct lap_cb *self, IRLMP_EVENT event,
IRDA_DEBUG(2, __FUNCTION__ "(), event=%s\n", irlmp_event[event]);
switch (event) {
+ case LM_LAP_CONNECT_INDICATION:
+ /* It's important to switch state first, to avoid IrLMP to
+ * think that the link is free since IrLMP may then start
+ * discovery before the connection is properly set up. DB.
+ */
+ irlmp_next_lap_state(self, LAP_ACTIVE);
+
+ /* Just accept connection TODO, this should be fixed */
+ irlap_connect_response(self->irlap, skb);
+
+ lsap = (struct lsap_cb *) hashbin_get_first(self->lsaps);
+ while (lsap != NULL) {
+ irlmp_do_lsap_event(lsap, LM_LAP_CONNECT_CONFIRM, NULL);
+ lsap = (struct lsap_cb*) hashbin_get_next(self->lsaps);
+ }
+ break;
+ case LM_LAP_CONNECT_REQUEST:
+ /* Already trying to connect */
+ self->refcount++;
+ break;
case LM_LAP_CONNECT_CONFIRM:
/* For all lsap_ce E Associated do LS_Connect_confirm */
irlmp_next_lap_state(self, LAP_ACTIVE);
lsap = (struct lsap_cb *) hashbin_get_first(self->lsaps);
while (lsap != NULL) {
- irlmp_do_lsap_event(lsap, LM_LAP_CONNECT_CONFIRM, skb);
+ irlmp_do_lsap_event(lsap, LM_LAP_CONNECT_CONFIRM, NULL);
lsap = (struct lsap_cb*) hashbin_get_next(self->lsaps);
- }
+ }
break;
case LM_LAP_DISCONNECT_INDICATION:
- IRDA_DEBUG(2, __FUNCTION__ "(), IRLAP_DISCONNECT_INDICATION\n");
-
irlmp_next_lap_state(self, LAP_STANDBY);
+ self->refcount = 0;
/* Send disconnect event to all LSAPs using this link */
-
lsap = (struct lsap_cb *) hashbin_get_first( self->lsaps);
while (lsap != NULL ) {
ASSERT(lsap->magic == LMP_LSAP_MAGIC, return;);
@@ -278,7 +301,7 @@ static void irlmp_state_u_connect( struct lap_cb *self, IRLMP_EVENT event,
lsap_current = lsap;
/* Be sure to stay one item ahead */
- lsap = (struct lsap_cb *) hashbin_get_next( self->lsaps);
+ lsap = (struct lsap_cb *) hashbin_get_next(self->lsaps);
irlmp_do_lsap_event(lsap_current,
LM_LAP_DISCONNECT_INDICATION,
NULL);
@@ -287,13 +310,15 @@ static void irlmp_state_u_connect( struct lap_cb *self, IRLMP_EVENT event,
case LM_LAP_DISCONNECT_REQUEST:
IRDA_DEBUG(4, __FUNCTION__ "(), LM_LAP_DISCONNECT_REQUEST\n");
- irlmp_next_lap_state(self, LAP_STANDBY);
-
- /* FIXME */
-/* irlap_disconnect_request( self->irlap); */
+ self->refcount--;
+ if (self->refcount == 0)
+ irlmp_next_lap_state(self, LAP_STANDBY);
break;
default:
- IRDA_DEBUG(4, __FUNCTION__ "(), Unknown event\n");
+ IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n",
+ irlmp_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
}
@@ -310,11 +335,12 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
struct lsap_cb *lsap;
struct lsap_cb *lsap_current;
- IRDA_DEBUG( 4, __FUNCTION__ "()\n");
+ IRDA_DEBUG(4, __FUNCTION__ "()\n");
- switch( event) {
+ switch (event) {
case LM_LAP_CONNECT_REQUEST:
- IRDA_DEBUG( 4, __FUNCTION__ "(), LS_CONNECT_REQUEST\n");
+ IRDA_DEBUG(4, __FUNCTION__ "(), LS_CONNECT_REQUEST\n");
+ self->refcount++;
/*
* LAP connection allready active, just bounce back! Since we
@@ -324,7 +350,7 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
*/
lsap = (struct lsap_cb *) hashbin_get_first(self->lsaps);
while (lsap != NULL) {
- irlmp_do_lsap_event(lsap, LM_LAP_CONNECT_CONFIRM, skb);
+ irlmp_do_lsap_event(lsap, LM_LAP_CONNECT_CONFIRM, NULL);
lsap = (struct lsap_cb*) hashbin_get_next(self->lsaps);
}
@@ -336,11 +362,13 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
/* Be sure to stay one item ahead */
lsap = (struct lsap_cb*) hashbin_get_next(irlmp->unconnected_lsaps);
irlmp_do_lsap_event(lsap_current,
- LM_LAP_CONNECT_CONFIRM, skb);
+ LM_LAP_CONNECT_CONFIRM, NULL);
}
/* Keep state */
break;
case LM_LAP_DISCONNECT_REQUEST:
+ self->refcount--;
+
/*
* Need to find out if we should close IrLAP or not. If there
* is only one LSAP connection left on this link, that LSAP
@@ -363,6 +391,7 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
break;
case LM_LAP_DISCONNECT_INDICATION:
irlmp_next_lap_state(self, LAP_STANDBY);
+ self->refcount = 0;
/*
* Inform all connected LSAP's using this link
@@ -381,7 +410,9 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
}
break;
default:
- IRDA_DEBUG(4, __FUNCTION__ "(), Unknown event %d\n", event);
+ IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %d\n", event);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
}
@@ -409,8 +440,21 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
switch (event) {
+#ifdef CONFIG_IRDA_ULTRA
+ case LM_UDATA_INDICATION:
+ irlmp_connless_data_indication(self, skb);
+ break;
+#endif /* CONFIG_IRDA_ULTRA */
case LM_CONNECT_REQUEST:
IRDA_DEBUG(4, __FUNCTION__ "(), LM_CONNECT_REQUEST\n");
+
+ if (self->conn_skb) {
+ WARNING(__FUNCTION__
+ "(), busy with another request!\n");
+ return -EBUSY;
+ }
+ self->conn_skb = skb;
+
irlmp_next_lsap_state(self, LSAP_SETUP_PEND);
irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
@@ -421,10 +465,20 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
case LM_CONNECT_INDICATION:
irlmp_next_lsap_state(self, LSAP_CONNECT_PEND);
- irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, skb);
+ if (self->conn_skb) {
+ WARNING(__FUNCTION__
+ "(), busy with another request!\n");
+ return -EBUSY;
+ }
+ self->conn_skb = skb;
+
+ irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
break;
default:
- IRDA_DEBUG( 4, __FUNCTION__ "(), Unknown event %d\n", event);
+ IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %s\n",
+ irlmp_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
return ret;
@@ -471,7 +525,10 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_DATA_TRANSFER_READY);
break;
default:
- IRDA_DEBUG( 4, __FUNCTION__ "(), Unknown event\n");
+ IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n",
+ irlmp_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
return ret;
@@ -499,21 +556,28 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
break;
case LM_CONNECT_RESPONSE:
IRDA_DEBUG(0, __FUNCTION__ "(), LM_CONNECT_RESPONSE, "
- "no indication issued yet\n");
+ "no indication issued yet\n");
/* Keep state */
break;
case LM_DISCONNECT_REQUEST:
IRDA_DEBUG(0, __FUNCTION__ "(), LM_DISCONNECT_REQUEST, "
- "not yet bound to IrLAP connection\n");
+ "not yet bound to IrLAP connection\n");
/* Keep state */
break;
case LM_LAP_CONNECT_CONFIRM:
IRDA_DEBUG(4, __FUNCTION__ "(), LS_CONNECT_CONFIRM\n");
irlmp_next_lsap_state(self, LSAP_CONNECT);
+
+ skb = self->conn_skb;
+ self->conn_skb = NULL;
+
irlmp_connect_indication(self, skb);
break;
default:
- IRDA_DEBUG( 4, __FUNCTION__ "Unknown event %d\n", event);
+ IRDA_DEBUG(0, __FUNCTION__ "Unknown event %s\n",
+ irlmp_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
return ret;
@@ -541,11 +605,9 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
case LM_DATA_REQUEST: /* Optimize for the common case */
irlmp_send_data_pdu(self->lap, self->dlsap_sel,
self->slsap_sel, FALSE, skb);
- /* irlmp_next_lsap_state( DATA_TRANSFER_READY, info->handle);*/
break;
case LM_DATA_INDICATION: /* Optimize for the common case */
irlmp_data_indication(self, skb);
- /* irlmp_next_lsap_state( DATA_TRANSFER_READY, info->handle);*/
break;
case LM_UDATA_REQUEST:
ASSERT(skb != NULL, return -1;);
@@ -554,21 +616,20 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
break;
case LM_UDATA_INDICATION:
irlmp_udata_indication(self, skb);
- /* irlmp_next_lsap_state( DATA_TRANSFER_READY, info->handle);*/
break;
case LM_CONNECT_REQUEST:
IRDA_DEBUG(0, __FUNCTION__ "(), LM_CONNECT_REQUEST, "
- "error, LSAP already connected\n");
+ "error, LSAP already connected\n");
/* Keep state */
break;
case LM_CONNECT_RESPONSE:
IRDA_DEBUG(0, __FUNCTION__ "(), LM_CONNECT_RESPONSE, "
- "error, LSAP allready connected\n");
+ "error, LSAP allready connected\n");
/* Keep state */
break;
case LM_DISCONNECT_REQUEST:
- irlmp_send_lcf_pdu(self->lap, self->dlsap_sel,
- self->slsap_sel, DISCONNECT, skb);
+ irlmp_send_lcf_pdu(self->lap, self->dlsap_sel, self->slsap_sel,
+ DISCONNECT, skb);
irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
/* Try to close the LAP connection if its still there */
@@ -603,7 +664,10 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, reason, skb);
break;
default:
- IRDA_DEBUG(4, __FUNCTION__ "(), Unknown event %d\n", event);
+ IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n",
+ irlmp_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
return ret;
@@ -673,7 +737,10 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, LM_CONNECT_FAILURE, NULL);
break;
default:
- IRDA_DEBUG(4, __FUNCTION__ "(), Unknown event %d\n", event);
+ IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n",
+ irlmp_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
return ret;
@@ -700,9 +767,14 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
switch (event) {
case LM_LAP_CONNECT_CONFIRM:
+ ASSERT(self->conn_skb != NULL, return -1;);
+
+ skb = self->conn_skb;
+ self->conn_skb = NULL;
+
irlmp_send_lcf_pdu(self->lap, self->dlsap_sel,
- self->slsap_sel, CONNECT_CMD,
- self->tmp_skb);
+ self->slsap_sel, CONNECT_CMD, skb);
+
irlmp_next_lsap_state(self, LSAP_SETUP);
break;
case LM_WATCHDOG_TIMEOUT:
@@ -710,21 +782,24 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
ASSERT(self->lap != NULL, return -1;);
irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
- irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
- irlmp_disconnect_indication( self, LM_CONNECT_FAILURE, NULL);
+ irlmp_disconnect_indication(self, LM_CONNECT_FAILURE, NULL);
break;
case LM_LAP_DISCONNECT_INDICATION: /* LS_Disconnect.indication */
del_timer( &self->watchdog_timer);
- irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
- reason = irlmp_convert_lap_reason( self->lap->reason);
+ reason = irlmp_convert_lap_reason(self->lap->reason);
irlmp_disconnect_indication(self, reason, NULL);
break;
default:
- IRDA_DEBUG(4, __FUNCTION__ "(), Unknown event %d\n", event);
+ IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n",
+ irlmp_event[event]);
+ if (skb)
+ dev_kfree_skb(skb);
break;
}
return ret;
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c
index 7a71005bf..c9bca1ea2 100644
--- a/net/irda/irlmp_frame.c
+++ b/net/irda/irlmp_frame.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Aug 19 02:09:59 1997
- * Modified at: Thu Jul 8 12:12:02 1999
+ * Modified at: Mon Dec 13 13:41:12 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>
@@ -45,9 +45,9 @@ inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
if (expedited) {
IRDA_DEBUG(4, __FUNCTION__ "(), sending expedited data\n");
- irlap_data_request(self->irlap, skb, FALSE);
- } else
irlap_data_request(self->irlap, skb, TRUE);
+ } else
+ irlap_data_request(self->irlap, skb, FALSE);
}
/*
@@ -60,8 +60,8 @@ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
{
__u8 *frame;
- IRDA_DEBUG(4, __FUNCTION__ "()\n");
-
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
ASSERT(self != NULL, return;);
ASSERT(self->magic == LMP_LAP_MAGIC, return;);
ASSERT(skb != NULL, return;);
@@ -78,8 +78,7 @@ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
else
frame[3] = 0x00; /* rsvd */
- ASSERT(self->irlap != NULL, return;);
- irlap_data_request(self->irlap, skb, TRUE);
+ irlap_data_request(self->irlap, skb, FALSE);
}
/*
@@ -88,14 +87,16 @@ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
* Used by IrLAP to pass received data frames to IrLMP layer
*
*/
-void irlmp_link_data_indication(struct lap_cb *self, int reliable,
- struct sk_buff *skb)
+void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
+ int unreliable)
{
struct lsap_cb *lsap;
__u8 slsap_sel; /* Source (this) LSAP address */
__u8 dlsap_sel; /* Destination LSAP address */
__u8 *fp;
+ IRDA_DEBUG(4, __FUNCTION__ "()\n");
+
ASSERT(self != NULL, return;);
ASSERT(self->magic == LMP_LAP_MAGIC, return;);
ASSERT(skb->len > 2, return;);
@@ -107,15 +108,16 @@ void irlmp_link_data_indication(struct lap_cb *self, int reliable,
* destination LSAP of received frame is source LSAP in our view
*/
slsap_sel = fp[0] & LSAP_MASK;
- dlsap_sel = fp[1];
+ dlsap_sel = fp[1];
/*
* Check if this is an incoming connection, since we must deal with
* it in a different way than other established connections.
*/
if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) {
- IRDA_DEBUG(3,"Incoming connection, source LSAP=%d, dest LSAP=%d\n",
- slsap_sel, dlsap_sel);
+ IRDA_DEBUG(3, __FUNCTION__ "(), incoming connection, "
+ "source LSAP=%d, dest LSAP=%d\n",
+ slsap_sel, dlsap_sel);
/* Try to find LSAP among the unconnected LSAPs */
lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD,
@@ -148,7 +150,7 @@ void irlmp_link_data_indication(struct lap_cb *self, int reliable,
* Check if we received a control frame?
*/
if (fp[0] & CONTROL_BIT) {
- switch(fp[2]) {
+ switch (fp[2]) {
case CONNECT_CMD:
lsap->lap = self;
irlmp_do_lsap_event(lsap, LM_CONNECT_INDICATION, skb);
@@ -157,35 +159,107 @@ void irlmp_link_data_indication(struct lap_cb *self, int reliable,
irlmp_do_lsap_event(lsap, LM_CONNECT_CONFIRM, skb);
break;
case DISCONNECT:
- IRDA_DEBUG( 4, __FUNCTION__ "(), Disconnect indication!\n");
+ IRDA_DEBUG(4, __FUNCTION__
+ "(), Disconnect indication!\n");
irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION,
skb);
break;
case ACCESSMODE_CMD:
- IRDA_DEBUG( 0, "Access mode cmd not implemented!\n");
+ IRDA_DEBUG(0, "Access mode cmd not implemented!\n");
+ dev_kfree_skb(skb);
break;
case ACCESSMODE_CNF:
- IRDA_DEBUG( 0, "Access mode cnf not implemented!\n");
+ IRDA_DEBUG(0, "Access mode cnf not implemented!\n");
+ dev_kfree_skb(skb);
break;
default:
- IRDA_DEBUG( 0, __FUNCTION__
- "(), Unknown control frame %02x\n", fp[2]);
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), Unknown control frame %02x\n", fp[2]);
+ dev_kfree_skb(skb);
break;
}
- } else if (reliable == LAP_RELIABLE) {
+ } else if (unreliable) {
/* Optimize and bypass the state machine if possible */
if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY)
- irlmp_data_indication(lsap, skb);
+ irlmp_udata_indication(lsap, skb);
else
- irlmp_do_lsap_event(lsap, LM_DATA_INDICATION, skb);
- } else if (reliable == LAP_UNRELIABLE) {
+ irlmp_do_lsap_event(lsap, LM_UDATA_INDICATION, skb);
+ } else {
/* Optimize and bypass the state machine if possible */
if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY)
irlmp_data_indication(lsap, skb);
else
- irlmp_do_lsap_event(lsap, LM_UDATA_INDICATION, skb);
+ irlmp_do_lsap_event(lsap, LM_DATA_INDICATION, skb);
+ }
+}
+
+/*
+ * Function irlmp_link_unitdata_indication (self, skb)
+ *
+ *
+ *
+ */
+#ifdef CONFIG_IRDA_ULTRA
+void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
+{
+ struct lsap_cb *lsap;
+ __u8 slsap_sel; /* Source (this) LSAP address */
+ __u8 dlsap_sel; /* Destination LSAP address */
+ __u8 pid; /* Protocol identifier */
+ __u8 *fp;
+
+ IRDA_DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+ ASSERT(skb->len > 2, return;);
+
+ fp = skb->data;
+
+ /*
+ * The next statements may be confusing, but we do this so that
+ * destination LSAP of received frame is source LSAP in our view
+ */
+ slsap_sel = fp[0] & LSAP_MASK;
+ dlsap_sel = fp[1];
+ pid = fp[2];
+
+ if (pid & 0x80) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), extension in PID not supp!\n");
+ dev_kfree_skb(skb);
+
+ return;
+ }
+
+ /* Check if frame is addressed to the connectionless LSAP */
+ if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), dropping frame!\n");
+ dev_kfree_skb(skb);
+
+ return;
+ }
+
+ lsap = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps);
+ while (lsap != NULL) {
+ /*
+ * Check if source LSAP and dest LSAP selectors and PID match.
+ */
+ if ((lsap->slsap_sel == slsap_sel) &&
+ (lsap->dlsap_sel == dlsap_sel) &&
+ (lsap->pid == pid))
+ {
+ break;
+ }
+ lsap = (struct lsap_cb *) hashbin_get_next(irlmp->unconnected_lsaps);
+ }
+ if (lsap)
+ irlmp_connless_data_indication(lsap, skb);
+ else {
+ IRDA_DEBUG(0, __FUNCTION__ "(), found no matching LSAP!\n");
+ dev_kfree_skb(skb);
}
}
+#endif /* CONFIG_IRDA_ULTRA */
/*
* Function irlmp_link_disconnect_indication (reason, userdata)
@@ -207,7 +281,9 @@ void irlmp_link_disconnect_indication(struct lap_cb *lap,
lap->daddr = DEV_ADDR_ANY;
/* FIXME: must do something with the userdata if any */
-
+ if (userdata)
+ dev_kfree_skb(userdata);
+
/*
* Inform station state machine
*/
@@ -251,6 +327,10 @@ void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
ASSERT(self->magic == LMP_LAP_MAGIC, return;);
ASSERT(qos != NULL, return;);
+ /* Don't need use the userdata for now */
+ if (userdata)
+ dev_kfree_skb(userdata);
+
/* Copy QoS settings for this session */
self->qos = qos;
@@ -270,9 +350,10 @@ void irlmp_link_discovery_indication(struct lap_cb *self,
ASSERT(self->magic == LMP_LAP_MAGIC, return;);
irlmp_add_discovery(irlmp->cachelog, discovery);
+
+#if 0 /* This will just cause a lot of connection collisions */
/* Just handle it the same way as a discovery confirm */
-#if 0
irlmp_do_lap_event(self, LM_LAP_DISCOVERY_CONFIRM, NULL);
#endif
}
@@ -369,5 +450,3 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
/* Sorry not found! */
return NULL;
}
-
-
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index ca6c9d3ba..c26433f80 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -6,10 +6,10 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Dec 15 13:55:39 1997
- * Modified at: Sun Nov 14 08:57:52 1999
+ * Modified at: Wed Jan 5 15:12:41 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1997, 1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1997, 1999-2000 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -140,6 +140,7 @@ EXPORT_SYMBOL(irias_add_string_attrib);
EXPORT_SYMBOL(irias_insert_object);
EXPORT_SYMBOL(irias_new_object);
EXPORT_SYMBOL(irias_delete_object);
+EXPORT_SYMBOL(irias_delete_value);
EXPORT_SYMBOL(irias_find_object);
EXPORT_SYMBOL(irias_find_attrib);
EXPORT_SYMBOL(irias_new_integer_value);
@@ -194,6 +195,7 @@ EXPORT_SYMBOL(irda_device_unregister_dongle);
EXPORT_SYMBOL(irda_task_execute);
EXPORT_SYMBOL(irda_task_kick);
EXPORT_SYMBOL(irda_task_next_state);
+EXPORT_SYMBOL(irda_task_delete);
EXPORT_SYMBOL(async_wrap_skb);
EXPORT_SYMBOL(async_unwrap_char);
@@ -330,8 +332,7 @@ void irda_execute_as_process( void *self, TODO_CALLBACK callback, __u32 param)
struct irmanager_event event;
/* Make sure irmanager is running */
- if ( !irda.in_use) {
- printk( KERN_ERR "irmanager is not running!\n");
+ if (!irda.in_use) {
return;
}
@@ -370,7 +371,6 @@ void irmanager_notify( struct irmanager_event *event)
/* Make sure irmanager is running */
if (!irda.in_use) {
- printk( KERN_ERR "irmanager is not running!\n");
return;
}
@@ -525,6 +525,24 @@ void irda_mod_dec_use_count(void)
#endif
}
+/*
+ * Function irda_proc_modcount (inode, fill)
+ *
+ * Use by the proc file system functions to prevent the irda module
+ * being removed while the use is standing in the net/irda directory
+ */
+void irda_proc_modcount(struct inode *inode, int fill)
+{
+#ifdef MODULE
+#ifdef CONFIG_PROC_FS
+ if (fill)
+ MOD_INC_USE_COUNT;
+ else
+ MOD_DEC_USE_COUNT;
+#endif /* CONFIG_PROC_FS */
+#endif /* MODULE */
+}
+
#ifdef MODULE
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 91a7a6f85..1d26d8f19 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Jun 9 13:29:31 1998
- * Modified at: Tue Oct 5 09:02:15 1999
+ * Modified at: Sun Dec 12 13:48:22 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
@@ -88,13 +88,13 @@ int hashbin_clear( hashbin_t* hashbin, FREE_FUNC free_func)
/*
* Free the entries in the hashbin
*/
- for ( i = 0; i < HASHBIN_SIZE; i ++ ) {
- queue = dequeue_first( (queue_t**) &hashbin->hb_queue[ i]);
- while( queue ) {
- if ( free_func)
- (*free_func)( queue );
+ for (i = 0; i < HASHBIN_SIZE; i ++ ) {
+ queue = dequeue_first( (queue_t**) &hashbin->hb_queue[i]);
+ while (queue) {
+ if (free_func)
+ (*free_func)(queue);
queue = dequeue_first(
- (queue_t**) &hashbin->hb_queue[ i]);
+ (queue_t**) &hashbin->hb_queue[i]);
}
}
hashbin->hb_size = 0;
@@ -210,7 +210,7 @@ void hashbin_unlock(hashbin_t* hashbin, __u32 hashv, char* name,
* Insert an entry into the hashbin
*
*/
-void hashbin_insert( hashbin_t* hashbin, queue_t* entry, __u32 hashv, char* name)
+void hashbin_insert(hashbin_t* hashbin, queue_t* entry, __u32 hashv, char* name)
{
unsigned long flags = 0;
int bin;
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 862360b4a..46c2ae85e 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -3,13 +3,13 @@
* Filename: irttp.c
* Version: 1.2
* Description: Tiny Transport Protocol (TTP) implementation
- * Status: Experimental.
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:31 1997
- * Modified at: Tue Oct 19 21:40:00 1999
+ * Modified at: Wed Jan 5 11:31:27 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -144,6 +144,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
return NULL;
}
memset(self, 0, sizeof(struct tsap_cb));
+ spin_lock_init(&self->lock);
init_timer(&self->todo_timer);
@@ -166,7 +167,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
/*
* Create LSAP at IrLMP layer
*/
- lsap = irlmp_open_lsap(stsap_sel, &ttp_notify);
+ lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
if (lsap == NULL) {
WARNING(__FUNCTION__ "(), unable to allocate LSAP!!\n");
return NULL;
@@ -326,7 +327,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
* TxMaxSduSize
*/
if ((self->tx_max_sdu_size != 0) &&
- (self->tx_max_sdu_size != SAR_UNBOUND) &&
+ (self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
(skb->len > self->tx_max_sdu_size))
{
ERROR(__FUNCTION__ "(), SAR enabled, "
@@ -365,7 +366,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
/* Check if we can accept more data from client */
if ((!self->tx_sdu_busy) &&
- (skb_queue_len(&self->tx_queue) > HIGH_THRESHOLD)) {
+ (skb_queue_len(&self->tx_queue) > TTP_HIGH_THRESHOLD)) {
/* Tx queue filling up, so stop client */
self->tx_sdu_busy = TRUE;
@@ -446,7 +447,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
/* Check if we can accept more frames from client */
if ((self->tx_sdu_busy) &&
- (skb_queue_len(&self->tx_queue) < LOW_THRESHOLD))
+ (skb_queue_len(&self->tx_queue) < TTP_LOW_THRESHOLD))
{
self->tx_sdu_busy = FALSE;
@@ -477,7 +478,7 @@ void irttp_give_credit(struct tsap_cb *self)
ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_DEBUG(4, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n",
- self->send_credit, self->avail_credit, self->remote_credit);
+ self->send_credit, self->avail_credit, self->remote_credit);
/* Give credit to peer */
tx_skb = dev_alloc_skb(64);
@@ -533,9 +534,11 @@ static int irttp_udata_indication(void *instance, void *sap,
ASSERT(skb != NULL, return -1;);
/* Just pass data to layer above */
- if (self->notify.udata_indication) {
- self->notify.udata_indication(self->notify.instance, self, skb);
- }
+ if (self->notify.udata_indication)
+ self->notify.udata_indication(self->notify.instance, self,skb);
+ else
+ dev_kfree_skb(skb);
+
self->stats.rx_packets++;
return 0;
@@ -573,8 +576,10 @@ static int irttp_data_indication(void *instance, void *sap,
* more bit, so the defragment routing knows what to do
*/
skb_queue_tail(&self->rx_queue, skb);
- } else
+ } else {
self->send_credit += n; /* Dataless flowdata TTP-PDU */
+ dev_kfree_skb(skb);
+ }
irttp_run_rx_queue(self);
@@ -582,7 +587,7 @@ static int irttp_data_indication(void *instance, void *sap,
* Give avay some credits to peer?
*/
if ((skb_queue_empty(&self->tx_queue)) &&
- (self->remote_credit < LOW_THRESHOLD) &&
+ (self->remote_credit < TTP_LOW_THRESHOLD) &&
(self->avail_credit > 0))
{
/* Schedule to start immediately after this thread */
@@ -830,7 +835,7 @@ void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos,
lsap = (struct lsap_cb *) sap;
- self->max_seg_size = max_seg_size;
+ self->max_seg_size = max_seg_size - TTP_HEADER;;
self->max_header_size = max_header_size+TTP_HEADER;
IRDA_DEBUG(4, __FUNCTION__ "(), TSAP sel=%02x\n", self->stsap_sel);
@@ -873,7 +878,8 @@ void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos,
self->notify.connect_indication(self->notify.instance, self,
qos, self->tx_max_sdu_size,
self->max_header_size, skb);
- }
+ } else
+ dev_kfree_skb(skb);
}
/*
@@ -895,7 +901,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_DEBUG(4, __FUNCTION__ "(), Source TSAP selector=%02x\n",
- self->stsap_sel);
+ self->stsap_sel);
/* Any userdata supplied? */
if (userdata == NULL) {
@@ -1093,7 +1099,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
*
*/
void irttp_disconnect_indication(void *instance, void *sap, LM_REASON reason,
- struct sk_buff *userdata)
+ struct sk_buff *skb)
{
struct tsap_cb *self;
@@ -1106,11 +1112,22 @@ void irttp_disconnect_indication(void *instance, void *sap, LM_REASON reason,
self->connected = FALSE;
- if (!self->notify.disconnect_indication)
+ /* Check if client has already tried to close the TSAP */
+ if (self->close_pend) {
+ irttp_close_tsap(self);
return;
+ }
- self->notify.disconnect_indication(self->notify.instance, self, reason,
- userdata);
+ /* No need to notify the client if has already tried to disconnect */
+ if (self->disconnect_pend)
+ return;
+
+ if (self->notify.disconnect_indication)
+ self->notify.disconnect_indication(self->notify.instance, self,
+ reason, skb);
+ else
+ if (skb)
+ dev_kfree_skb(skb);
}
/*
@@ -1124,6 +1141,12 @@ void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
{
int err;
+ /* Check if client has already tried to close the TSAP */
+ if (self->close_pend || self->disconnect_pend) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
err = self->notify.data_indication(self->notify.instance, self, skb);
/* Usually the layer above will notify that it's input queue is
@@ -1157,8 +1180,8 @@ void irttp_run_rx_queue(struct tsap_cb *self)
struct sk_buff *skb;
int more = 0;
- IRDA_DEBUG(4, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n",
- self->send_credit, self->avail_credit, self->remote_credit);
+ IRDA_DEBUG(2, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n",
+ self->send_credit, self->avail_credit, self->remote_credit);
if (irda_lock(&self->rx_queue_lock) == FALSE)
return;
@@ -1183,7 +1206,7 @@ void irttp_run_rx_queue(struct tsap_cb *self)
* immediately. This can be requested by clients that
* implements byte streams without any message boundaries
*/
- if (self->rx_max_sdu_size == SAR_DISABLE) {
+ if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {
irttp_do_data_indication(self, skb);
self->rx_sdu_size = 0;
@@ -1209,7 +1232,7 @@ void irttp_run_rx_queue(struct tsap_cb *self)
* This is the last fragment, so time to reassemble!
*/
if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
- (self->rx_max_sdu_size == SAR_UNBOUND))
+ (self->rx_max_sdu_size == TTP_SAR_UNBOUND))
{
/*
* A little optimizing. Only queue the fragment if
@@ -1285,10 +1308,10 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
ASSERT(self != NULL, return NULL;);
ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
- IRDA_DEBUG(4, __FUNCTION__ "(), self->rx_sdu_size=%d\n",
- self->rx_sdu_size);
+ IRDA_DEBUG(2, __FUNCTION__ "(), self->rx_sdu_size=%d\n",
+ self->rx_sdu_size);
- skb = dev_alloc_skb(self->rx_sdu_size);
+ skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
if (!skb)
return NULL;
@@ -1308,11 +1331,12 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
dev_kfree_skb(frag);
}
- IRDA_DEBUG(4, __FUNCTION__ "(), frame len=%d\n", n);
- /* Set the new length */
+ IRDA_DEBUG(2, __FUNCTION__ "(), frame len=%d\n", n);
- IRDA_DEBUG(4, __FUNCTION__ "(), rx_sdu_size=%d\n", self->rx_sdu_size);
+ IRDA_DEBUG(2, __FUNCTION__ "(), rx_sdu_size=%d\n", self->rx_sdu_size);
ASSERT(n <= self->rx_sdu_size, return NULL;);
+
+ /* Set the new length */
skb_trim(skb, n);
self->rx_sdu_size = 0;
@@ -1331,7 +1355,7 @@ static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb)
struct sk_buff *frag;
__u8 *frame;
- IRDA_DEBUG(4, __FUNCTION__ "()\n");
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -1340,22 +1364,9 @@ static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb)
/*
* Split frame into a number of segments
*/
- while (skb->len > 0) {
- /*
- * Instead of making the last segment, we just
- * queue what is left of the original skb
- */
- if (skb->len < self->max_seg_size) {
- IRDA_DEBUG(4, __FUNCTION__
- "(), queuing last segment\n");
+ while (skb->len > self->max_seg_size) {
+ IRDA_DEBUG(2, __FUNCTION__ "(), fragmenting ...\n");
- frame = skb_push(skb, TTP_HEADER);
- frame[0] = 0x00; /* Clear more bit */
- skb_queue_tail(&self->tx_queue, skb);
-
- return;
- }
-
/* Make new segment */
frag = dev_alloc_skb(self->max_seg_size+self->max_header_size);
if (!frag)
@@ -1363,19 +1374,28 @@ static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb)
skb_reserve(frag, self->max_header_size);
- /*
- * Copy data from the original skb into this fragment. We
- * first insert the TTP header with the more bit set
- */
- frame = skb_put(frag, self->max_seg_size+TTP_HEADER);
+ /* Copy data from the original skb into this fragment. */
+ memcpy(skb_put(frag, self->max_seg_size), skb->data,
+ self->max_seg_size);
+
+ /* Insert TTP header, with the more bit set */
+ frame = skb_push(frag, TTP_HEADER);
frame[0] = TTP_MORE;
- memcpy(frag->data+1, skb->data, self->max_seg_size);
/* Hide the copied data from the original skb */
skb_pull(skb, self->max_seg_size);
-
+
+ /* Queue fragment */
skb_queue_tail(&self->tx_queue, frag);
}
+ /* Queue what is left of the original skb */
+ IRDA_DEBUG(2, __FUNCTION__ "(), queuing last segment\n");
+
+ frame = skb_push(skb, TTP_HEADER);
+ frame[0] = 0x00; /* Clear more bit */
+
+ /* Queue fragment */
+ skb_queue_tail(&self->tx_queue, skb);
}
/*
@@ -1422,7 +1442,7 @@ static void irttp_todo_expired(unsigned long data)
irttp_run_tx_queue(self);
/* Give avay some credits to peer? */
- if ((self->remote_credit < LOW_THRESHOLD) &&
+ if ((self->remote_credit < TTP_LOW_THRESHOLD) &&
(self->avail_credit > 0) && (skb_queue_empty(&self->tx_queue)))
{
irttp_give_credit(self);
@@ -1477,7 +1497,7 @@ static void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
#ifdef CONFIG_PROC_FS
/*
- * Function irttp_proc_read (buf, start, offset, len)
+ * Function irttp_proc_read (buf, start, offset, len, unused)
*
* Give some info to the /proc file system
*/
diff --git a/net/irda/parameters.c b/net/irda/parameters.c
index c6fd97e1e..19f76eae4 100644
--- a/net/irda/parameters.c
+++ b/net/irda/parameters.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Jun 7 10:25:11 1999
- * Modified at: Fri Nov 5 08:20:38 1999
+ * Modified at: Tue Dec 14 16:03:57 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
@@ -88,7 +88,7 @@ static int irda_insert_no_value(void *self, __u8 *buf, int len, __u8 pi,
ret = (*func)(self, &p, PV_GET);
/* Extract values anyway, since handler may need them */
- irda_param_pack(buf, "bb", p.pi, &p.pl);
+ irda_param_pack(buf, "bb", p.pi, p.pl);
if (ret < 0)
return ret;
diff --git a/net/irda/qos.c b/net/irda/qos.c
index df6ba6421..2c2c39b85 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -1,13 +1,12 @@
/*********************************************************************
- *
- *
+ *
* Filename: qos.c
* Version: 1.0
* Description: IrLAP QoS parameter negotiation
- * Status: Experimental.
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Sep 9 00:00:26 1997
- * Modified at: Tue Nov 16 09:50:19 1999
+ * Modified at: Sun Dec 12 13:47:09 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
@@ -52,18 +51,32 @@ static int irlap_param_window_size(void *instance, param_t *param, int get);
static int irlap_param_additional_bofs(void *instance, param_t *parm, int get);
static int irlap_param_min_turn_time(void *instance, param_t *param, int get);
-__u32 min_turn_time[] = { 10000, 5000, 1000, 500, 100, 50, 10, 0 }; /* us */
-__u32 baud_rates[] = { 2400, 9600, 19200, 38400, 57600, 115200, 576000,
- 1152000, 4000000, 16000000 }; /* bps */
-__u32 data_size[] = { 64, 128, 256, 512, 1024, 2048 }; /* bytes */
-__u32 add_bofs[] = { 48, 24, 12, 5, 3, 2, 1, 0 }; /* bytes */
-__u32 max_turn_time[] = { 500, 250, 100, 50 }; /* ms */
-__u32 link_disc_time[] = { 3, 8, 12, 16, 20, 25, 30, 40 }; /* secs */
+__u32 min_turn_times[] = { 10000, 5000, 1000, 500, 100, 50, 10, 0 }; /* us */
+__u32 baud_rates[] = { 2400, 9600, 19200, 38400, 57600, 115200, 576000,
+ 1152000, 4000000, 16000000 }; /* bps */
+__u32 data_sizes[] = { 64, 128, 256, 512, 1024, 2048 }; /* bytes */
+__u32 add_bofs[] = { 48, 24, 12, 5, 3, 2, 1, 0 }; /* bytes */
+__u32 max_turn_times[] = { 500, 250, 100, 50 }; /* ms */
+__u32 link_disc_times[] = { 3, 8, 12, 16, 20, 25, 30, 40 }; /* secs */
#ifdef CONFIG_IRDA_COMPRESSION
-__u32 compression[] = { CI_BZIP2, CI_DEFLATE, CI_DEFLATE_DRAFT };
+__u32 compressions[] = { CI_BZIP2, CI_DEFLATE, CI_DEFLATE_DRAFT };
#endif
+__u32 max_line_capacities[10][4] = {
+ /* 500 ms 250 ms 100 ms 50 ms (max turn time) */
+ { 100, 0, 0, 0 }, /* 2400 bps */
+ { 400, 0, 0, 0 }, /* 9600 bps */
+ { 800, 0, 0, 0 }, /* 19200 bps */
+ { 1600, 0, 0, 0 }, /* 38400 bps */
+ { 2360, 0, 0, 0 }, /* 57600 bps */
+ { 4800, 2400, 960, 480 }, /* 115200 bps */
+ { 28800, 11520, 5760, 2880 }, /* 576000 bps */
+ { 57600, 28800, 11520, 5760 }, /* 1152000 bps */
+ { 200000, 100000, 40000, 20000 }, /* 4000000 bps */
+ { 800000, 400000, 160000, 80000 }, /* 16000000 bps */
+};
+
static pi_minor_info_t pi_minor_call_table_type_0[] = {
{ NULL, 0 },
/* 01 */{ irlap_param_baud_rate, PV_INTEGER | PV_LITTLE_ENDIAN },
@@ -150,6 +163,69 @@ void irda_init_max_qos_capabilies(struct qos_info *qos)
}
/*
+ * Function irlap_adjust_qos_settings (qos)
+ *
+ * Adjust QoS settings in case some values are not possible to use because
+ * of other settings
+ */
+void irlap_adjust_qos_settings(struct qos_info *qos)
+{
+ __u32 line_capacity;
+ int index;
+
+ IRDA_DEBUG(2, __FUNCTION__ "()\n");
+
+ /*
+ * Not allowed to use a max turn time less than 500 ms if the baudrate
+ * is less than 115200
+ */
+ if ((qos->baud_rate.value < 115200) &&
+ (qos->max_turn_time.value < 500))
+ {
+ IRDA_DEBUG(0, __FUNCTION__
+ "(), adjusting max turn time from %d to 500 ms\n",
+ qos->max_turn_time.value);
+ qos->max_turn_time.value = 500;
+ }
+
+ /*
+ * The data size must be adjusted according to the baud rate and max
+ * turn time
+ */
+ index = value_index(qos->data_size.value, data_sizes);
+ line_capacity = irlap_max_line_capacity(qos->baud_rate.value,
+ qos->max_turn_time.value);
+
+#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
+ while ((qos->data_size.value > line_capacity) && (index > 0)) {
+ qos->data_size.value = data_sizes[index--];
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), redusing data size to %d\n",
+ qos->data_size.value);
+ }
+#else /* Use method descibed in section 6.6.11 of IrLAP */
+ while (irlap_requested_line_capacity(qos) > line_capacity) {
+ ASSERT(index != 0, return;);
+
+ /* Must be able to send at least one frame */
+ if (qos->window_size.value > 1) {
+ qos->window_size.value--;
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), redusing window size to %d\n",
+ qos->window_size.value);
+ } else if (index > 1) {
+ qos->data_size.value = data_sizes[index--];
+ IRDA_DEBUG(2, __FUNCTION__
+ "(), redusing data size to %d\n",
+ qos->data_size.value);
+ } else {
+ WARNING(__FUNCTION__ "(), nothing more we can do!\n");
+ }
+ }
+#endif CONFIG_IRDA_DYNAMIC_WINDOW
+}
+
+/*
* Function irlap_negotiate (qos_device, qos_session, skb)
*
* Negotiate QoS values, not really that much negotiation :-)
@@ -176,24 +252,26 @@ int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb)
/* Convert the negotiated bits to values */
irda_qos_bits_to_value(&self->qos_tx);
irda_qos_bits_to_value(&self->qos_rx);
-
+
+ irlap_adjust_qos_settings(&self->qos_tx);
+
IRDA_DEBUG(2, "Setting BAUD_RATE to %d bps.\n",
- self->qos_tx.baud_rate.value);
+ self->qos_tx.baud_rate.value);
IRDA_DEBUG(2, "Setting DATA_SIZE to %d bytes\n",
- self->qos_tx.data_size.value);
+ self->qos_tx.data_size.value);
IRDA_DEBUG(2, "Setting WINDOW_SIZE to %d\n",
- self->qos_tx.window_size.value);
+ self->qos_tx.window_size.value);
IRDA_DEBUG(2, "Setting XBOFS to %d\n",
- self->qos_tx.additional_bofs.value);
+ self->qos_tx.additional_bofs.value);
IRDA_DEBUG(2, "Setting MAX_TURN_TIME to %d ms.\n",
- self->qos_tx.max_turn_time.value);
+ self->qos_tx.max_turn_time.value);
IRDA_DEBUG(2, "Setting MIN_TURN_TIME to %d usecs.\n",
- self->qos_tx.min_turn_time.value);
+ self->qos_tx.min_turn_time.value);
IRDA_DEBUG(2, "Setting LINK_DISC to %d secs.\n",
- self->qos_tx.link_disc_time.value);
+ self->qos_tx.link_disc_time.value);
#ifdef CONFIG_IRDA_COMPRESSION
IRDA_DEBUG(2, "Setting COMPRESSION to %d\n",
- self->qos_tx.compression.value);
+ self->qos_tx.compression.value);
#endif
return ret;
}
@@ -278,7 +356,8 @@ static int irlap_param_baud_rate(void *instance, param_t *param, int get)
if (get) {
param->pv.i = self->qos_rx.baud_rate.bits;
- IRDA_DEBUG(2, __FUNCTION__ "(), baud rate = 0x%02x\n", param->pv.i);
+ IRDA_DEBUG(2, __FUNCTION__ "(), baud rate = 0x%02x\n",
+ param->pv.i);
} else {
/*
* Stations must agree on baud rate, so calculate
@@ -435,6 +514,57 @@ static int irlap_param_min_turn_time(void *instance, param_t *param, int get)
return 0;
}
+/*
+ * Function irlap_max_line_capacity (speed, max_turn_time, min_turn_time)
+ *
+ * Calculate the maximum line capacity
+ *
+ */
+__u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time)
+{
+ __u32 line_capacity;
+ int i,j;
+
+ IRDA_DEBUG(2, __FUNCTION__ "(), speed=%d, max_turn_time=%d\n",
+ speed, max_turn_time);
+
+ i = value_index(speed, baud_rates);
+ j = value_index(max_turn_time, max_turn_times);
+
+ ASSERT(((i >=0) && (i <=10)), return 0;);
+ ASSERT(((j >=0) && (j <=4)), return 0;);
+
+ line_capacity = max_line_capacities[i][j];
+
+ IRDA_DEBUG(2, __FUNCTION__ "(), line capacity=%d bytes\n",
+ line_capacity);
+
+ return line_capacity;
+}
+
+__u32 irlap_requested_line_capacity(struct qos_info *qos)
+{ __u32 line_capacity;
+
+ line_capacity = qos->window_size.value *
+ (qos->data_size.value + 6 + qos->additional_bofs.value) +
+ irlap_min_turn_time_in_bytes(qos->baud_rate.value,
+ qos->min_turn_time.value);
+
+ IRDA_DEBUG(2, __FUNCTION__ "(), requested line capacity=%d\n",
+ line_capacity);
+
+ return line_capacity;
+}
+
+__u32 irlap_min_turn_time_in_bytes(__u32 speed, __u32 min_turn_time)
+{
+ __u32 bytes;
+
+ bytes = speed * min_turn_time / 10000000;
+
+ return bytes;
+}
+
__u32 byte_value(__u8 byte, __u32 *array)
{
int index;
@@ -503,19 +633,19 @@ void irda_qos_bits_to_value(struct qos_info *qos)
qos->baud_rate.value = baud_rates[index];
index = msb_index(qos->data_size.bits);
- qos->data_size.value = data_size[index];
+ qos->data_size.value = data_sizes[index];
index = msb_index(qos->window_size.bits);
qos->window_size.value = index+1;
index = msb_index(qos->min_turn_time.bits);
- qos->min_turn_time.value = min_turn_time[index];
+ qos->min_turn_time.value = min_turn_times[index];
index = msb_index(qos->max_turn_time.bits);
- qos->max_turn_time.value = max_turn_time[index];
+ qos->max_turn_time.value = max_turn_times[index];
index = msb_index(qos->link_disc_time.bits);
- qos->link_disc_time.value = link_disc_time[index];
+ qos->link_disc_time.value = link_disc_times[index];
index = msb_index(qos->additional_bofs.bits);
qos->additional_bofs.value = add_bofs[index];
@@ -523,7 +653,7 @@ void irda_qos_bits_to_value(struct qos_info *qos)
#ifdef CONFIG_IRDA_COMPRESSION
index = msb_index(qos->compression.bits);
if (index >= 0)
- qos->compression.value = compression[index];
+ qos->compression.value = compressions[index];
else
qos->compression.value = 0;
#endif
diff --git a/net/irda/timer.c b/net/irda/timer.c
index 974863721..7625eecfc 100644
--- a/net/irda/timer.c
+++ b/net/irda/timer.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Aug 16 00:59:29 1997
- * Modified at: Thu Oct 7 12:30:19 1999
+ * Modified at: Wed Dec 8 12:50:34 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
@@ -210,4 +210,8 @@ void irlap_media_busy_expired(void* data)
ASSERT(self != NULL, return;);
irda_device_set_media_busy(self->netdev, FALSE);
+
+ /* Send any pending Ultra frames if any */
+ if (!skb_queue_empty(&self->txq_ultra))
+ irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
}
diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c
index 6f21b15f3..13e4d0465 100644
--- a/net/irda/wrapper.c
+++ b/net/irda/wrapper.c
@@ -3,10 +3,10 @@
* Filename: wrapper.c
* Version: 1.2
* Description: IrDA SIR async wrapper layer
- * Status: Experimental.
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Sat Oct 30 17:24:25 1999
+ * Modified at: Sun Dec 12 13:46:40 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Modified at: Fri May 28 3:11 CST 1999
* Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
@@ -86,11 +86,24 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
* additional XBOFS
*/
if (((struct irda_skb_cb *)(skb->cb))->magic != LAP_MAGIC) {
+ /*
+ * This will happen for all frames sent from user-space.
+ * Nothing to worry about, but we set the default number of
+ * BOF's
+ */
IRDA_DEBUG(1, __FUNCTION__ "(), wrong magic in skb!\n");
xbofs = 10;
} else
xbofs = ((struct irda_skb_cb *)(skb->cb))->xbofs;
+ IRDA_DEBUG(4, __FUNCTION__ "(), xbofs=%d\n", xbofs);
+
+ /* Check that we never use more than 115 + 48 xbofs */
+ if (xbofs > 163) {
+ IRDA_DEBUG(0, __FUNCTION__ "(), too many xbofs (%d)\n", xbofs);
+ xbofs = 163;
+ }
+
memset(tx_buff+n, XBOF, xbofs);
n += xbofs;
@@ -200,7 +213,7 @@ inline void async_unwrap_char(struct net_device *dev,
/*
* Function state_outside_frame (dev, rx_buff, byte)
*
- *
+ * Not receiving any frame (or just bogus data)
*
*/
static void state_outside_frame(struct net_device *dev,
@@ -219,6 +232,7 @@ static void state_outside_frame(struct net_device *dev,
irda_device_set_media_busy(dev, TRUE);
break;
default:
+ irda_device_set_media_busy(dev, TRUE);
break;
}
}
@@ -249,13 +263,12 @@ static void state_begin_frame(struct net_device *dev,
case EOF:
/* Abort frame */
rx_buff->state = OUTSIDE_FRAME;
-
+ IRDA_DEBUG(1, __FUNCTION__ "(), abort frame\n");
stats->rx_errors++;
stats->rx_frame_errors++;
break;
default:
rx_buff->data[rx_buff->len++] = byte;
-
rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
rx_buff->state = INSIDE_FRAME;
break;
@@ -265,7 +278,7 @@ static void state_begin_frame(struct net_device *dev,
/*
* Function state_link_escape (idev, byte)
*
- *
+ * Found link escape character
*
*/
static void state_link_escape(struct net_device *dev,
@@ -278,7 +291,7 @@ static void state_link_escape(struct net_device *dev,
irda_device_set_media_busy(dev, TRUE);
break;
case CE:
- IRDA_DEBUG(4, "WARNING: State not defined\n");
+ WARNING(__FUNCTION__ "(), state not defined\n");
break;
case EOF: /* Abort frame */
rx_buff->state = OUTSIDE_FRAME;
@@ -294,8 +307,7 @@ static void state_link_escape(struct net_device *dev,
rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
rx_buff->state = INSIDE_FRAME;
} else {
- IRDA_DEBUG(1, __FUNCTION__
- "(), Rx buffer overflow, aborting\n");
+ IRDA_DEBUG(1, __FUNCTION__ "(), rx buffer overflow\n");
rx_buff->state = OUTSIDE_FRAME;
}
break;
@@ -336,6 +348,7 @@ static void state_inside_frame(struct net_device *dev,
/* Wrong CRC, discard frame! */
irda_device_set_media_busy(dev, TRUE);
+ IRDA_DEBUG(1, __FUNCTION__ "(), crc error\n");
stats->rx_errors++;
stats->rx_crc_errors++;
}
diff --git a/net/netsyms.c b/net/netsyms.c
index 7fdae70ea..037d59d92 100644
--- a/net/netsyms.c
+++ b/net/netsyms.c
@@ -28,6 +28,7 @@
#endif
#include <net/pkt_sched.h>
#include <net/scm.h>
+#include <linux/random.h>
#ifdef CONFIG_BRIDGE
#include <net/br.h>
@@ -150,7 +151,7 @@ EXPORT_SYMBOL(skb_recv_datagram);
EXPORT_SYMBOL(skb_free_datagram);
EXPORT_SYMBOL(skb_copy_datagram);
EXPORT_SYMBOL(skb_copy_datagram_iovec);
-EXPORT_SYMBOL(skb_realloc_headroom);
+EXPORT_SYMBOL(skb_copy_expand);
EXPORT_SYMBOL(datagram_poll);
EXPORT_SYMBOL(put_cmsg);
EXPORT_SYMBOL(sock_kmalloc);
@@ -235,7 +236,7 @@ EXPORT_SYMBOL(ip_options_compile);
EXPORT_SYMBOL(ip_options_undo);
EXPORT_SYMBOL(arp_send);
EXPORT_SYMBOL(arp_broken_ops);
-EXPORT_SYMBOL(ip_id_count);
+EXPORT_SYMBOL(__ip_select_ident);
EXPORT_SYMBOL(ip_send_check);
EXPORT_SYMBOL(ip_fragment);
EXPORT_SYMBOL(inet_family_ops);
@@ -376,6 +377,12 @@ EXPORT_SYMBOL(dev_loopback_xmit);
#ifdef CONFIG_SYSCTL
EXPORT_SYMBOL(sysctl_max_syn_backlog);
#endif
+
+#if defined (CONFIG_IPV6_MODULE)
+EXPORT_SYMBOL(secure_tcpv6_sequence_number);
+EXPORT_SYMBOL(secure_ipv6_id);
+#endif
+
#endif
#ifdef CONFIG_NETLINK
@@ -542,6 +549,8 @@ EXPORT_SYMBOL(qdisc_restart);
EXPORT_SYMBOL(qdisc_head);
EXPORT_SYMBOL(qdisc_create_dflt);
EXPORT_SYMBOL(noop_qdisc);
+EXPORT_SYMBOL(qdisc_tree_lock);
+EXPORT_SYMBOL(qdisc_runqueue_lock);
#ifdef CONFIG_NET_SCHED
PSCHED_EXPORTLIST;
EXPORT_SYMBOL(pfifo_qdisc_ops);
@@ -578,6 +587,7 @@ EXPORT_SYMBOL(nf_reinject);
EXPORT_SYMBOL(nf_register_interest);
EXPORT_SYMBOL(nf_unregister_interest);
EXPORT_SYMBOL(nf_hook_slow);
+EXPORT_SYMBOL(nf_hooks);
#endif
EXPORT_SYMBOL(register_gifconf);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0e6617c1f..eec4d92d7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -5,7 +5,7 @@
*
* PACKET - implements raw packet sockets.
*
- * Version: $Id: af_packet.c,v 1.24 1999/08/30 12:14:52 davem Exp $
+ * Version: $Id: af_packet.c,v 1.26 1999/12/20 05:20:02 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
diff --git a/net/sched/Config.in b/net/sched/Config.in
index aeb2141bb..f1d9059f4 100644
--- a/net/sched/Config.in
+++ b/net/sched/Config.in
@@ -3,34 +3,38 @@
#
define_bool CONFIG_NETLINK y
define_bool CONFIG_RTNETLINK y
-tristate 'CBQ packet scheduler' CONFIG_NET_SCH_CBQ
-tristate 'CSZ packet scheduler' CONFIG_NET_SCH_CSZ
-#tristate 'H-PFQ packet scheduler' CONFIG_NET_SCH_HPFQ
-#tristate 'H-FSC packet scheduler' CONFIG_NET_SCH_HFCS
+tristate ' CBQ packet scheduler' CONFIG_NET_SCH_CBQ
+tristate ' CSZ packet scheduler' CONFIG_NET_SCH_CSZ
+#tristate ' H-PFQ packet scheduler' CONFIG_NET_SCH_HPFQ
+#tristate ' H-FSC packet scheduler' CONFIG_NET_SCH_HFCS
if [ "$CONFIG_ATM" = "y" ]; then
- bool 'ATM pseudo-scheduler' CONFIG_NET_SCH_ATM
+ bool ' ATM pseudo-scheduler' CONFIG_NET_SCH_ATM
fi
-tristate 'The simplest PRIO pseudoscheduler' CONFIG_NET_SCH_PRIO
-tristate 'RED queue' CONFIG_NET_SCH_RED
-tristate 'SFQ queue' CONFIG_NET_SCH_SFQ
-tristate 'TEQL queue' CONFIG_NET_SCH_TEQL
-tristate 'TBF queue' CONFIG_NET_SCH_TBF
-bool 'QoS support' CONFIG_NET_QOS
+tristate ' The simplest PRIO pseudoscheduler' CONFIG_NET_SCH_PRIO
+tristate ' RED queue' CONFIG_NET_SCH_RED
+tristate ' SFQ queue' CONFIG_NET_SCH_SFQ
+tristate ' TEQL queue' CONFIG_NET_SCH_TEQL
+tristate ' TBF queue' CONFIG_NET_SCH_TBF
+tristate ' GRED queue' CONFIG_NET_SCH_GRED
+tristate ' Diffserv field marker' CONFIG_NET_SCH_DSMARK
+tristate ' Ingress Qdisc' CONFIG_NET_SCH_INGRESS
+bool ' QoS support' CONFIG_NET_QOS
if [ "$CONFIG_NET_QOS" = "y" ]; then
- bool ' Rate estimator' CONFIG_NET_ESTIMATOR
+ bool ' Rate estimator' CONFIG_NET_ESTIMATOR
fi
-bool 'Packet classifier API' CONFIG_NET_CLS
+bool ' Packet classifier API' CONFIG_NET_CLS
if [ "$CONFIG_NET_CLS" = "y" ]; then
- tristate ' Routing table based classifier' CONFIG_NET_CLS_ROUTE4
+ tristate ' TC index classifier' CONFIG_NET_CLS_TCINDEX
+ tristate ' Routing table based classifier' CONFIG_NET_CLS_ROUTE4
if [ "$CONFIG_NET_CLS_ROUTE4" != "n" ]; then
define_bool CONFIG_NET_CLS_ROUTE y
fi
- tristate ' Firewall based classifier' CONFIG_NET_CLS_FW
- tristate ' U32 classifier' CONFIG_NET_CLS_U32
+ tristate ' Firewall based classifier' CONFIG_NET_CLS_FW
+ tristate ' U32 classifier' CONFIG_NET_CLS_U32
if [ "$CONFIG_NET_QOS" = "y" ]; then
- tristate ' Special RSVP classifier' CONFIG_NET_CLS_RSVP
- tristate ' Special RSVP classifier for IPv6' CONFIG_NET_CLS_RSVP6
- bool ' Ingres traffic policing' CONFIG_NET_CLS_POLICE
+ tristate ' Special RSVP classifier' CONFIG_NET_CLS_RSVP
+ tristate ' Special RSVP classifier for IPv6' CONFIG_NET_CLS_RSVP6
+ bool ' Traffic policing (needed for in/egress)' CONFIG_NET_CLS_POLICE
fi
fi
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 5c96ebe82..de1f71601 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -28,6 +28,14 @@ endif
endif
+ifeq ($(CONFIG_NET_SCH_INGRESS), y)
+O_OBJS += sch_ingress.o
+else
+ ifeq ($(CONFIG_NET_SCH_INGRESS), m)
+ M_OBJS += sch_ingress.o
+ endif
+endif
+
ifeq ($(CONFIG_NET_SCH_CBQ), y)
O_OBJS += sch_cbq.o
else
@@ -101,6 +109,30 @@ else
endif
endif
+ifeq ($(CONFIG_NET_SCH_GRED), y)
+O_OBJS += sch_gred.o
+else
+ ifeq ($(CONFIG_NET_SCH_GRED), m)
+ M_OBJS += sch_gred.o
+ endif
+endif
+
+ifeq ($(CONFIG_NET_SCH_DSMARK), y)
+O_OBJS += sch_dsmark.o
+else
+ ifeq ($(CONFIG_NET_SCH_DSMARK), m)
+ M_OBJS += sch_dsmark.o
+ endif
+endif
+
+ifeq ($(CONFIG_NET_CLS_TCINDEX), y)
+O_OBJS += cls_tcindex.o
+else
+ ifeq ($(CONFIG_NET_CLS_TCINDEX), m)
+ M_OBJS += cls_tcindex.o
+ endif
+endif
+
ifeq ($(CONFIG_NET_SCH_ATM), y)
O_OBJS += sch_atm.o
endif
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index d77a8daf2..2632cee0f 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -466,6 +466,9 @@ int __init tc_filter_init(void)
#ifdef CONFIG_NET_CLS_RSVP
INIT_TC_FILTER(rsvp);
#endif
+#ifdef CONFIG_NET_CLS_TCINDEX
+ INIT_TC_FILTER(tcindex);
+#endif
#ifdef CONFIG_NET_CLS_RSVP6
INIT_TC_FILTER(rsvp6);
#endif
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
new file mode 100644
index 000000000..5aa6133cd
--- /dev/null
+++ b/net/sched/cls_tcindex.c
@@ -0,0 +1,503 @@
+/*
+ * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
+ *
+ * Written 1998,1999 by Werner Almesberger, EPFL ICA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/ip.h>
+#include <net/pkt_sched.h>
+#include <net/route.h>
+
+
+/*
+ * Not quite sure if we need all the xchgs Alexey uses when accessing things.
+ * Can always add them later ... :)
+ */
+
+/*
+ * Passing parameters to the root seems to be done more awkwardly than really
+ * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
+ * verified. FIXME.
+ */
+
+#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
+#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
+
+
+#if 1 /* control */
+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define DPRINTK(format,args...)
+#endif
+
+#if 0 /* data */
+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define D2PRINTK(format,args...)
+#endif
+
+
+#define PRIV(tp) ((struct tcindex_data *) (tp)->root)
+
+
+struct tcindex_filter_result {
+ struct tcf_police *police;
+ struct tcf_result res;
+};
+
+struct tcindex_filter {
+ __u16 key;
+ struct tcindex_filter_result result;
+ struct tcindex_filter *next;
+};
+
+
+struct tcindex_data {
+ struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
+ struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
+ NULL if unused */
+ __u16 mask; /* AND key with mask */
+ int shift; /* shift ANDed key to the right */
+ int hash; /* hash table size; 0 if undefined */
+ int alloc_hash; /* allocated size */
+ int fall_through; /* 0: only classify if explicit match */
+};
+
+
+static struct tcindex_filter_result *lookup(struct tcindex_data *p,__u16 key)
+{
+ struct tcindex_filter *f;
+
+ if (p->perfect)
+ return p->perfect[key].res.classid ? p->perfect+key : NULL;
+ if (!p->h)
+ return NULL;
+ for (f = p->h[key % p->hash]; f; f = f->next) {
+ if (f->key == key)
+ return &f->result;
+ }
+ return NULL;
+}
+
+
+static int tcindex_classify(struct sk_buff *skb, struct tcf_proto *tp,
+ struct tcf_result *res)
+{
+ struct tcindex_data *p = PRIV(tp);
+ struct tcindex_filter_result *f;
+
+ D2PRINTK("tcindex_classify(skb %p,tp %p,res %p),p %p\n",skb,tp,res,p);
+
+ f = lookup(p,(skb->tc_index & p->mask) >> p->shift);
+ if (!f) {
+ if (!p->fall_through)
+ return -1;
+ res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle),
+ (skb->tc_index& p->mask) >> p->shift);
+ res->class = 0;
+ D2PRINTK("alg 0x%x\n",res->classid);
+ return 0;
+ }
+ *res = f->res;
+ D2PRINTK("map 0x%x\n",res->classid);
+#ifdef CONFIG_NET_CLS_POLICE
+ if (f->police) {
+ int result;
+
+ result = tcf_police(skb,f->police);
+ D2PRINTK("police %d\n",res);
+ return result;
+ }
+#endif
+ return 0;
+}
+
+
+static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
+{
+ DPRINTK("tcindex_get(tp %p,handle 0x%08x)\n",tp,handle);
+ return (unsigned long) lookup(PRIV(tp),handle);
+}
+
+
+static void tcindex_put(struct tcf_proto *tp, unsigned long f)
+{
+ DPRINTK("tcindex_put(tp %p,f 0x%lx)\n",tp,f);
+}
+
+
+static int tcindex_init(struct tcf_proto *tp)
+{
+ struct tcindex_data *p;
+
+ DPRINTK("tcindex_init(tp %p)\n",tp);
+ MOD_INC_USE_COUNT;
+ p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL);
+ if (!p) {
+ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ }
+ tp->root = p;
+ p->perfect = NULL;
+ p->h = NULL;
+ p->hash = 0;
+ p->mask = 0xffff;
+ p->shift = 0;
+ p->fall_through = 1;
+ return 0;
+}
+
+
+static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
+{
+ struct tcindex_data *p = PRIV(tp);
+ struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
+ struct tcindex_filter *f = NULL;
+ unsigned long cl;
+
+ DPRINTK("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n",tp,arg,p,f);
+ if (p->perfect) {
+ if (!r->res.classid)
+ return -ENOENT;
+ } else {
+ int i;
+ struct tcindex_filter **walk = NULL;
+
+ for (i = 0; !f && i < p->hash; i++) {
+ for (walk = p->h+i; !f && *walk; walk = &(*walk)->next) {
+ if (&(*walk)->result == r)
+ f = *walk;
+ }
+ }
+ if (!f)
+ return -ENOENT;
+/*
+ @@@ OK? -- No (jhs)
+Look more into it
+ tcf_tree_lock(tp);
+*/
+ *walk = f->next;
+/*
+ tcf_tree_unlock(tp);
+*/
+ }
+ cl = __cls_set_class(&r->res.class,0);
+ if (cl)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q,cl);
+#ifdef CONFIG_NET_CLS_POLICE
+ tcf_police_release(r->police);
+#endif
+ if (f)
+ kfree(f);
+ return 0;
+}
+
+
+/*
+ * There are no parameters for tcindex_init, so we overload tcindex_change
+ */
+
+
+static int tcindex_change(struct tcf_proto *tp,unsigned long base,u32 handle,
+ struct rtattr **tca,unsigned long *arg)
+{
+ struct tcindex_filter_result new_filter_result = {
+ NULL, /* no policing */
+ { 0,0 }, /* no classification */
+ };
+ struct rtattr *opt = tca[TCA_OPTIONS-1];
+ struct rtattr *tb[TCA_TCINDEX_MAX];
+ struct tcindex_data *p = PRIV(tp);
+ struct tcindex_filter *f;
+ struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
+ struct tcindex_filter **walk;
+ int hash;
+ __u16 mask;
+
+ DPRINTK("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
+ "p %p,r %p\n",tp,handle,tca,arg,opt,p,r);
+ if (arg)
+ DPRINTK("*arg = 0x%lx\n",*arg);
+ if (!opt)
+ return 0;
+ if (rtattr_parse(tb,TCA_TCINDEX_MAX,RTA_DATA(opt),RTA_PAYLOAD(opt)) < 0)
+ return -EINVAL;
+ if (!tb[TCA_TCINDEX_HASH-1]) {
+ hash = p->hash;
+ } else {
+ if (RTA_PAYLOAD(tb[TCA_TCINDEX_HASH-1]) < sizeof(int))
+ return -EINVAL;
+ hash = *(int *) RTA_DATA(tb[TCA_TCINDEX_HASH-1]);
+ }
+ if (!tb[TCA_TCINDEX_MASK-1]) {
+ mask = p->mask;
+ } else {
+ if (RTA_PAYLOAD(tb[TCA_TCINDEX_MASK-1]) < sizeof(__u16))
+ return -EINVAL;
+ mask = *(__u16 *) RTA_DATA(tb[TCA_TCINDEX_MASK-1]);
+ }
+ if (p->perfect && hash <= mask)
+ return -EBUSY;
+ if ((p->perfect || p->h) && hash > p->alloc_hash)
+ return -EBUSY;
+ p->hash = hash;
+ p->mask = mask;
+ if (tb[TCA_TCINDEX_SHIFT-1]) {
+ if (RTA_PAYLOAD(tb[TCA_TCINDEX_SHIFT-1]) < sizeof(__u16))
+ return -EINVAL;
+ p->shift = *(int *) RTA_DATA(tb[TCA_TCINDEX_SHIFT-1]);
+ }
+ if (tb[TCA_TCINDEX_FALL_THROUGH-1]) {
+ if (RTA_PAYLOAD(tb[TCA_TCINDEX_FALL_THROUGH-1]) < sizeof(int))
+ return -EINVAL;
+ p->fall_through =
+ *(int *) RTA_DATA(tb[TCA_TCINDEX_FALL_THROUGH-1]);
+ }
+ DPRINTK("classid/police %p/%p\n",tb[TCA_TCINDEX_CLASSID-1],
+ tb[TCA_TCINDEX_POLICE-1]);
+ if (!tb[TCA_TCINDEX_CLASSID-1] && !tb[TCA_TCINDEX_POLICE-1])
+ return 0;
+ if (!p->hash) {
+ if (p->mask < PERFECT_HASH_THRESHOLD) {
+ p->hash = p->mask+1;
+ } else {
+ p->hash = DEFAULT_HASH_SIZE;
+ }
+ }
+ if (!p->perfect && !p->h) {
+ p->alloc_hash = p->hash;
+ DPRINTK("hash %d mask %d\n",p->hash,p->mask);
+ if (p->hash > p->mask) {
+ p->perfect = kmalloc(p->hash*
+ sizeof(struct tcindex_filter_result),GFP_KERNEL);
+ if (!p->perfect)
+ return -ENOMEM;
+ memset(p->perfect, 0,
+ p->hash * sizeof(struct tcindex_filter_result));
+ } else {
+ p->h = kmalloc(p->hash*sizeof(struct tcindex_filter *),
+ GFP_KERNEL);
+ if (!p->h)
+ return -ENOMEM;
+ memset(p->h, 0, p->hash*sizeof(struct tcindex_filter *));
+ }
+ }
+ if (handle > p->mask)
+ return -EINVAL;
+ if (p->perfect) {
+ r = p->perfect+handle;
+ } else {
+ r = lookup(p,handle);
+ DPRINTK("r=%p\n",r);
+ if (!r)
+ r = &new_filter_result;
+ }
+ DPRINTK("r=%p\n",r);
+ if (tb[TCA_TCINDEX_CLASSID-1]) {
+ unsigned long cl = cls_set_class(tp,&r->res.class,0);
+
+ if (cl)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q,cl);
+ r->res.classid = *(__u32 *) RTA_DATA(tb[TCA_TCINDEX_CLASSID-1]);
+ r->res.class = tp->q->ops->cl_ops->bind_tcf(tp->q,base,
+ r->res.classid);
+ if (!r->res.class) {
+ r->res.classid = 0;
+ return -ENOENT;
+ }
+ }
+#ifdef CONFIG_NET_CLS_POLICE
+ if (!tb[TCA_TCINDEX_POLICE-1]) {
+ r->police = NULL;
+ } else {
+ struct tcf_police *police =
+ tcf_police_locate(tb[TCA_TCINDEX_POLICE-1],NULL);
+
+ tcf_tree_lock(tp);
+ police = xchg(&r->police,police);
+ tcf_tree_unlock(tp);
+ tcf_police_release(police);
+ }
+#endif
+ if (r != &new_filter_result)
+ return 0;
+ f = kmalloc(sizeof(struct tcindex_filter),GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+ f->key = handle;
+ f->result = new_filter_result;
+ f->next = NULL;
+ for (walk = p->h+(handle % p->hash); *walk; walk = &(*walk)->next)
+ /* nothing */;
+ wmb();
+ *walk = f;
+ return 0;
+}
+
+
+static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
+{
+ struct tcindex_data *p = PRIV(tp);
+ struct tcindex_filter *f;
+ int i;
+
+ DPRINTK("tcindex_walk(tp %p,walker %p),p %p\n",tp,walker,p);
+ if (p->perfect) {
+ for (i = 0; i < p->hash; i++) {
+ if (!p->perfect[i].res.classid)
+ continue;
+ if (walker->count >= walker->skip) {
+ if (walker->fn(tp,
+ (unsigned long) (p->perfect+i), walker)
+ < 0) {
+ walker->stop = 1;
+ return;
+ }
+ }
+ walker->count++;
+ }
+ }
+ if (!p->h)
+ return;
+ for (i = 0; i < p->hash; i++) {
+ for (f = p->h[i]; f; f = f->next) {
+ if (walker->count >= walker->skip) {
+ if (walker->fn(tp,(unsigned long) &f->result,
+ walker) < 0) {
+ walker->stop = 1;
+ return;
+ }
+ }
+ walker->count++;
+ }
+ }
+}
+
+
+static int tcindex_destroy_element(struct tcf_proto *tp,
+ unsigned long arg, struct tcf_walker *walker)
+{
+ return tcindex_delete(tp,arg);
+}
+
+
+static void tcindex_destroy(struct tcf_proto *tp)
+{
+ struct tcindex_data *p = PRIV(tp);
+ struct tcf_walker walker;
+
+ DPRINTK("tcindex_destroy(tp %p),p %p\n",tp,p);
+ walker.count = 0;
+ walker.skip = 0;
+ walker.fn = &tcindex_destroy_element;
+ tcindex_walk(tp,&walker);
+ if (p->perfect)
+ kfree(p->perfect);
+ if (p->h)
+ kfree(p->h);
+ kfree(p);
+ tp->root = NULL;
+ MOD_DEC_USE_COUNT;
+}
+
+
+#ifdef CONFIG_RTNETLINK
+
+static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
+ struct sk_buff *skb, struct tcmsg *t)
+{
+ struct tcindex_data *p = PRIV(tp);
+ struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
+ unsigned char *b = skb->tail;
+ struct rtattr *rta;
+
+ DPRINTK("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
+ tp,fh,skb,t,p,r,b);
+ DPRINTK("p->perfect %p p->h %p\n",p->perfect,p->h);
+ rta = (struct rtattr *) b;
+ RTA_PUT(skb,TCA_OPTIONS,0,NULL);
+ if (!fh) {
+ t->tcm_handle = ~0; /* whatever ... */
+ RTA_PUT(skb,TCA_TCINDEX_HASH,sizeof(p->hash),&p->hash);
+ RTA_PUT(skb,TCA_TCINDEX_MASK,sizeof(p->mask),&p->mask);
+ RTA_PUT(skb,TCA_TCINDEX_SHIFT,sizeof(p->shift),&p->shift);
+ RTA_PUT(skb,TCA_TCINDEX_FALL_THROUGH,sizeof(p->fall_through),
+ &p->fall_through);
+ } else {
+ if (p->perfect) {
+ t->tcm_handle = r-p->perfect;
+ } else {
+ struct tcindex_filter *f;
+ int i;
+
+ t->tcm_handle = 0;
+ for (i = 0; !t->tcm_handle && i < p->hash; i++) {
+ for (f = p->h[i]; !t->tcm_handle && f;
+ f = f->next) {
+ if (&f->result == r)
+ t->tcm_handle = f->key;
+ }
+ }
+ }
+ DPRINTK("handle = %d\n",t->tcm_handle);
+ if (r->res.class)
+ RTA_PUT(skb, TCA_TCINDEX_CLASSID, 4, &r->res.classid);
+#ifdef CONFIG_NET_CLS_POLICE
+ if (r->police) {
+ struct rtattr *p_rta = (struct rtattr *) skb->tail;
+
+ RTA_PUT(skb,TCA_TCINDEX_POLICE,0,NULL);
+ if (tcf_police_dump(skb,r->police) < 0)
+ goto rtattr_failure;
+ p_rta->rta_len = skb->tail-(u8 *) p_rta;
+ }
+#endif
+ }
+ rta->rta_len = skb->tail-b;
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb, b - skb->data);
+ return -1;
+}
+
+#endif
+
+
+struct tcf_proto_ops cls_tcindex_ops = {
+ NULL,
+ "tcindex",
+ tcindex_classify,
+ tcindex_init,
+ tcindex_destroy,
+
+ tcindex_get,
+ tcindex_put,
+ tcindex_change,
+ tcindex_delete,
+ tcindex_walk,
+#ifdef CONFIG_RTNETLINK
+ tcindex_dump
+#else
+ NULL
+#endif
+};
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ return register_tcf_proto_ops(&cls_tcindex_ops);
+}
+
+void cleanup_module(void)
+{
+ unregister_tcf_proto_ops(&cls_tcindex_ops);
+}
+#endif
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 09f16a51c..1d57af985 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -12,6 +12,7 @@
*
* Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
* Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
+ * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
*/
#include <linux/config.h>
@@ -210,6 +211,7 @@ struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
if (cops == NULL)
return NULL;
cl = cops->get(p, classid);
+
if (cl == 0)
return NULL;
leaf = cops->leaf(p, cl);
@@ -306,17 +308,32 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
write_lock(&qdisc_tree_lock);
spin_lock_bh(&dev->queue_lock);
- oqdisc = dev->qdisc_sleeping;
+ if (qdisc && qdisc->flags&TCQ_F_INGRES) {
+ oqdisc = dev->qdisc_ingress;
+ /* Prune old scheduler */
+ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
+ /* delete */
+ qdisc_reset(oqdisc);
+ dev->qdisc_ingress = NULL;
+ } else { /* new */
+ dev->qdisc_ingress = qdisc;
+ }
+
+ } else {
+
+ oqdisc = dev->qdisc_sleeping;
- /* Prune old scheduler */
- if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
- qdisc_reset(oqdisc);
+ /* Prune old scheduler */
+ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
+ qdisc_reset(oqdisc);
+
+ /* ... and graft new one */
+ if (qdisc == NULL)
+ qdisc = &noop_qdisc;
+ dev->qdisc_sleeping = qdisc;
+ dev->qdisc = &noop_qdisc;
+ }
- /* ... and graft new one */
- if (qdisc == NULL)
- qdisc = &noop_qdisc;
- dev->qdisc_sleeping = qdisc;
- dev->qdisc = &noop_qdisc;
spin_unlock_bh(&dev->queue_lock);
write_unlock(&qdisc_tree_lock);
@@ -337,9 +354,15 @@ int qdisc_graft(struct net_device *dev, struct Qdisc *parent, u32 classid,
struct Qdisc *new, struct Qdisc **old)
{
int err = 0;
+ struct Qdisc *q = *old;
- if (parent == NULL) {
- *old = dev_graft_qdisc(dev, new);
+
+ if (parent == NULL) {
+ if (q && q->flags&TCQ_F_INGRES) {
+ *old = dev_graft_qdisc(dev, q);
+ } else {
+ *old = dev_graft_qdisc(dev, new);
+ }
} else {
struct Qdisc_class_ops *cops = parent->ops->cl_ops;
@@ -406,6 +429,10 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
memset(sch, 0, size);
skb_queue_head_init(&sch->q);
+
+ if (handle == TC_H_INGRESS)
+ sch->flags |= TCQ_F_INGRES;
+
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
@@ -418,7 +445,11 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
if (handle == 0)
goto err_out;
}
- sch->handle = handle;
+
+ if (handle == TC_H_INGRESS)
+ sch->handle =TC_H_MAKE(TC_H_INGRESS, 0);
+ else
+ sch->handle = handle;
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
write_lock(&qdisc_tree_lock);
@@ -518,12 +549,16 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (clid) {
if (clid != TC_H_ROOT) {
- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
- return -ENOENT;
- q = qdisc_leaf(p, clid);
- } else
+ if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
+ if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+ return -ENOENT;
+ q = qdisc_leaf(p, clid);
+ } else { /* ingress */
+ q = dev->qdisc_ingress;
+ }
+ } else {
q = dev->qdisc_sleeping;
-
+ }
if (!q)
return -ENOENT;
@@ -575,9 +610,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (clid) {
if (clid != TC_H_ROOT) {
- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
- return -ENOENT;
- q = qdisc_leaf(p, clid);
+ if (clid != TC_H_INGRESS) {
+ if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+ return -ENOENT;
+ q = qdisc_leaf(p, clid);
+ } else { /*ingress */
+ q = dev->qdisc_ingress;
+ }
} else {
q = dev->qdisc_sleeping;
}
@@ -655,7 +694,10 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
create_n_graft:
if (!(n->nlmsg_flags&NLM_F_CREATE))
return -ENOENT;
- q = qdisc_create(dev, tcm->tcm_handle, tca, &err);
+ if (clid == TC_H_INGRESS)
+ q = qdisc_create(dev, tcm->tcm_parent, tca, &err);
+ else
+ q = qdisc_create(dev, tcm->tcm_handle, tca, &err);
if (q == NULL)
return err;
@@ -1190,6 +1232,9 @@ int __init pktsched_init(void)
#ifdef CONFIG_NET_SCH_GRED
INIT_QDISC(gred);
#endif
+#ifdef CONFIG_NET_SCH_INGRESS
+ INIT_QDISC(ingress);
+#endif
#ifdef CONFIG_NET_SCH_DSMARK
INIT_QDISC(dsmark);
#endif
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
new file mode 100644
index 000000000..bb98653e1
--- /dev/null
+++ b/net/sched/sch_dsmark.c
@@ -0,0 +1,476 @@
+/* net/sched/sch_dsmark.c - Differentiated Services field marker */
+
+/* Written 1998,1999 by Werner Almesberger, EPFL ICA */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h> /* for pkt_sched */
+#include <linux/rtnetlink.h>
+#include <net/pkt_sched.h>
+#include <net/dsfield.h>
+#include <asm/byteorder.h>
+
+
+#if 1 /* control */
+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define DPRINTK(format,args...)
+#endif
+
+#if 0 /* data */
+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define D2PRINTK(format,args...)
+#endif
+
+
+#define PRIV(sch) ((struct dsmark_qdisc_data *) (sch)->data)
+
+
+/*
+ * classid class marking
+ * ------- ----- -------
+ * n/a 0 n/a
+ * x:0 1 use entry [0]
+ * ... ... ...
+ * x:y y>0 y+1 use entry [y]
+ * ... ... ...
+ * x:indices-1 indices use entry [indices-1]
+ */
+
+
+struct dsmark_qdisc_data {
+ struct Qdisc *q;
+ struct tcf_proto *filter_list;
+ __u8 *mask; /* "owns" the array */
+ __u8 *value;
+ __u16 indices;
+ __u16 default_index;
+ int set_tc_index;
+};
+
+
+/* ------------------------- Class/flow operations ------------------------- */
+
+
+static int dsmark_graft(struct Qdisc *sch,unsigned long arg,
+ struct Qdisc *new,struct Qdisc **old)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",sch,p,new,
+ old);
+ if (!new)
+ new = &noop_qdisc;
+ sch_tree_lock(sch);
+ *old = xchg(&p->q,new);
+ if (*old)
+ qdisc_reset(*old);
+ sch_tree_unlock(sch); /* @@@ move up ? */
+ return 0;
+}
+
+
+static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+
+static unsigned long dsmark_get(struct Qdisc *sch,u32 classid)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("dsmark_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid);
+ return TC_H_MIN(classid)+1;
+}
+
+
+static unsigned long dsmark_bind_filter(struct Qdisc *sch,
+ unsigned long parent, u32 classid)
+{
+ return dsmark_get(sch,classid);
+}
+
+
+static void dsmark_put(struct Qdisc *sch, unsigned long cl)
+{
+}
+
+
+static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
+ struct rtattr **tca, unsigned long *arg)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+ struct rtattr *opt = tca[TCA_OPTIONS-1];
+ struct rtattr *tb[TCA_DSMARK_MAX];
+
+ DPRINTK("dsmark_change(sch %p,[qdisc %p],classid %x,parent %x),"
+ "arg 0x%lx\n",sch,p,classid,parent,*arg);
+ if (*arg > p->indices)
+ return -ENOENT;
+ if (!opt || rtattr_parse(tb, TCA_DSMARK_MAX, RTA_DATA(opt),
+ RTA_PAYLOAD(opt)))
+ return -EINVAL;
+ if (tb[TCA_DSMARK_MASK-1]) {
+ if (!RTA_PAYLOAD(tb[TCA_DSMARK_MASK-1]))
+ return -EINVAL;
+ p->mask[*arg-1] = *(__u8 *) RTA_DATA(tb[TCA_DSMARK_MASK-1]);
+ }
+ if (tb[TCA_DSMARK_VALUE-1]) {
+ if (!RTA_PAYLOAD(tb[TCA_DSMARK_VALUE-1]))
+ return -EINVAL;
+ p->value[*arg-1] = *(__u8 *) RTA_DATA(tb[TCA_DSMARK_VALUE-1]);
+ }
+ return 0;
+}
+
+
+static int dsmark_delete(struct Qdisc *sch,unsigned long arg)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+
+ if (!arg || arg > p->indices)
+ return -EINVAL;
+ p->mask[arg-1] = 0xff;
+ p->value[arg-1] = 0;
+ return 0;
+}
+
+
+static void dsmark_walk(struct Qdisc *sch,struct qdisc_walker *walker)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+ int i;
+
+ DPRINTK("dsmark_walk(sch %p,[qdisc %p],walker %p)\n",sch,p,walker);
+ if (walker->stop)
+ return;
+ for (i = 0; i < p->indices; i++) {
+ if (p->mask[i] == 0xff && !p->value[i])
+ continue;
+ if (walker->count >= walker->skip) {
+ if (walker->fn(sch, i+1, walker) < 0) {
+ walker->stop = 1;
+ break;
+ }
+ }
+ walker->count++;
+ }
+}
+
+
+static struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,unsigned long cl)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+
+ return &p->filter_list;
+}
+
+
+/* --------------------------- Qdisc operations ---------------------------- */
+
+
+static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+ struct tcf_result res;
+ int result;
+ int ret;
+
+ D2PRINTK("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
+ if (p->set_tc_index) {
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ skb->tc_index = ipv4_get_dsfield(skb->nh.iph);
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ skb->tc_index = ipv6_get_dsfield(skb->nh.ipv6h);
+ break;
+ default:
+ skb->tc_index = 0;
+ break;
+ };
+ }
+ result = TC_POLICE_OK; /* be nice to gcc */
+ if (TC_H_MAJ(skb->priority) == sch->handle) {
+ skb->tc_index = TC_H_MIN(skb->priority);
+ } else {
+ result = tc_classify(skb,p->filter_list,&res);
+ D2PRINTK("result %d class 0x%04x\n",result,res.classid);
+ switch (result) {
+#ifdef CONFIG_NET_CLS_POLICE
+ case TC_POLICE_SHOT:
+ kfree_skb(skb);
+ break;
+#if 0
+ case TC_POLICE_RECLASSIFY:
+ /* FIXME: what to do here ??? */
+#endif
+#endif
+ case TC_POLICE_OK:
+ skb->tc_index = TC_H_MIN(res.classid);
+ break;
+ case TC_POLICE_UNSPEC:
+ /* fall through */
+ default:
+ if (p->default_index)
+ skb->tc_index = p->default_index;
+ break;
+ };
+ }
+ if (
+#ifdef CONFIG_NET_CLS_POLICE
+ result == TC_POLICE_SHOT ||
+#endif
+
+ ((ret = p->q->enqueue(skb,p->q)) != 0)) {
+ sch->stats.drops++;
+ return 0;
+ }
+ sch->stats.bytes += skb->len;
+ sch->stats.packets++;
+ sch->q.qlen++;
+ return ret;
+}
+
+
+static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+ struct sk_buff *skb;
+ int index;
+
+ D2PRINTK("dsmark_dequeue(sch %p,[qdisc %p])\n",sch,p);
+ skb = p->q->ops->dequeue(p->q);
+ if (!skb)
+ return NULL;
+ sch->q.qlen--;
+ index = skb->tc_index & (p->indices-1);
+ D2PRINTK("index %d->%d\n",skb->tc_index,index);
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ ipv4_change_dsfield(skb->nh.iph,
+ p->mask[index],p->value[index]);
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ ipv6_change_dsfield(skb->nh.ipv6h,
+ p->mask[index],p->value[index]);
+ break;
+ default:
+ /*
+ * Only complain if a change was actually attempted.
+ * This way, we can send non-IP traffic through dsmark
+ * and don't need yet another qdisc as a bypass.
+ */
+ if (p->mask[index] != 0xff || p->value[index])
+ printk(KERN_WARNING "dsmark_dequeue: "
+ "unsupported protocol %d\n",
+ htons(skb->protocol));
+ break;
+ };
+ return skb;
+}
+
+
+static int dsmark_requeue(struct sk_buff *skb,struct Qdisc *sch)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+
+ D2PRINTK("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
+ return p->q->ops->requeue(skb,p->q);
+}
+
+
+static int dsmark_drop(struct Qdisc *sch)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n",sch,p);
+ if (!p->q->ops->drop)
+ return 0;
+ if (!p->q->ops->drop(p->q))
+ return 0;
+ sch->q.qlen--;
+ return 1;
+}
+
+
+int dsmark_init(struct Qdisc *sch,struct rtattr *opt)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+ struct rtattr *tb[TCA_DSMARK_MAX];
+ __u16 tmp;
+
+ DPRINTK("dsmark_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt);
+ if (rtattr_parse(tb,TCA_DSMARK_MAX,RTA_DATA(opt),RTA_PAYLOAD(opt)) < 0 ||
+ !tb[TCA_DSMARK_INDICES-1] ||
+ RTA_PAYLOAD(tb[TCA_DSMARK_INDICES-1]) < sizeof(__u16))
+ return -EINVAL;
+ memset(p,0,sizeof(*p));
+ p->filter_list = NULL;
+ p->indices = *(__u16 *) RTA_DATA(tb[TCA_DSMARK_INDICES-1]);
+ if (!p->indices)
+ return -EINVAL;
+ for (tmp = p->indices; tmp != 1; tmp >>= 1) {
+ if (tmp & 1)
+ return -EINVAL;
+ }
+ p->default_index = 0;
+ if (tb[TCA_DSMARK_DEFAULT_INDEX-1]) {
+ if (RTA_PAYLOAD(tb[TCA_DSMARK_DEFAULT_INDEX-1]) < sizeof(__u16))
+ return -EINVAL;
+ p->default_index =
+ *(__u16 *) RTA_DATA(tb[TCA_DSMARK_DEFAULT_INDEX-1]);
+ if (!p->default_index || p->default_index >= p->indices)
+ return -EINVAL;
+ }
+ p->set_tc_index = !!tb[TCA_DSMARK_SET_TC_INDEX-1];
+ p->mask = kmalloc(p->indices*2,GFP_KERNEL);
+ if (!p->mask)
+ return -ENOMEM;
+ p->value = p->mask+p->indices;
+ memset(p->mask,0xff,p->indices);
+ memset(p->value,0,p->indices);
+ if (!(p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
+ p->q = &noop_qdisc;
+ DPRINTK("dsmark_init: qdisc %p\n",&p->q);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+
+static void dsmark_reset(struct Qdisc *sch)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n",sch,p);
+ qdisc_reset(p->q);
+ sch->q.qlen = 0;
+}
+
+
+static void dsmark_destroy(struct Qdisc *sch)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+ struct tcf_proto *tp;
+
+ DPRINTK("dsmark_destroy(sch %p,[qdisc %p])\n",sch,p);
+ while (p->filter_list) {
+ tp = p->filter_list;
+ p->filter_list = tp->next;
+ tp->ops->destroy(tp);
+ }
+ qdisc_destroy(p->q);
+ p->q = &noop_qdisc;
+ kfree(p->mask);
+ MOD_DEC_USE_COUNT;
+}
+
+
+#ifdef CONFIG_RTNETLINK
+
+static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+ unsigned char *b = skb->tail;
+ struct rtattr *rta;
+
+ DPRINTK("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n",sch,p,cl);
+ if (!cl || cl > p->indices)
+ return -EINVAL;
+ tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle),cl-1);
+ rta = (struct rtattr *) b;
+ RTA_PUT(skb,TCA_OPTIONS,0,NULL);
+ RTA_PUT(skb,TCA_DSMARK_MASK,1,&p->mask[cl-1]);
+ RTA_PUT(skb,TCA_DSMARK_VALUE,1,&p->value[cl-1]);
+ rta->rta_len = skb->tail-b;
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb,b-skb->data);
+ return -1;
+}
+
+static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct dsmark_qdisc_data *p = PRIV(sch);
+ unsigned char *b = skb->tail;
+ struct rtattr *rta;
+
+ rta = (struct rtattr *) b;
+ RTA_PUT(skb,TCA_OPTIONS,0,NULL);
+ RTA_PUT(skb,TCA_DSMARK_INDICES,sizeof(__u16),&p->indices);
+ if (p->default_index)
+ RTA_PUT(skb,TCA_DSMARK_DEFAULT_INDEX, sizeof(__u16),
+ &p->default_index);
+ if (p->set_tc_index)
+ RTA_PUT(skb, TCA_DSMARK_SET_TC_INDEX, 0, NULL);
+ rta->rta_len = skb->tail-b;
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb,b-skb->data);
+ return -1;
+}
+
+#endif
+
+
+static struct Qdisc_class_ops dsmark_class_ops =
+{
+ dsmark_graft, /* graft */
+ dsmark_leaf, /* leaf */
+ dsmark_get, /* get */
+ dsmark_put, /* put */
+ dsmark_change, /* change */
+ dsmark_delete, /* delete */
+ dsmark_walk, /* walk */
+
+ dsmark_find_tcf, /* tcf_chain */
+ dsmark_bind_filter, /* bind_tcf */
+ dsmark_put, /* unbind_tcf */
+
+#ifdef CONFIG_RTNETLINK
+ dsmark_dump_class, /* dump */
+#endif
+};
+
+struct Qdisc_ops dsmark_qdisc_ops =
+{
+ NULL, /* next */
+ &dsmark_class_ops, /* cl_ops */
+ "dsmark",
+ sizeof(struct dsmark_qdisc_data),
+
+ dsmark_enqueue, /* enqueue */
+ dsmark_dequeue, /* dequeue */
+ dsmark_requeue, /* requeue */
+ dsmark_drop, /* drop */
+
+ dsmark_init, /* init */
+ dsmark_reset, /* reset */
+ dsmark_destroy, /* destroy */
+ NULL, /* change */
+
+#ifdef CONFIG_RTNETLINK
+ dsmark_dump /* dump */
+#endif
+};
+
+#ifdef MODULE
+int init_module(void)
+{
+ return register_qdisc(&dsmark_qdisc_ops);
+}
+
+
+void cleanup_module(void)
+{
+ unregister_qdisc(&dsmark_qdisc_ops);
+}
+#endif
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2faf6094f..65e4c3e36 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -7,6 +7,8 @@
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ * Jamal Hadi Salim, <hadi@nortelnetworks.com> 990601
+ * - Ingress support
*/
#include <asm/uaccess.h>
@@ -590,6 +592,12 @@ void dev_shutdown(struct net_device *dev)
dev->qdisc = &noop_qdisc;
dev->qdisc_sleeping = &noop_qdisc;
qdisc_destroy(qdisc);
+#ifdef CONFIG_NET_SCH_INGRESS
+ if ((qdisc = dev->qdisc_ingress) != NULL) {
+ dev->qdisc_ingress = NULL;
+ qdisc_destroy(qdisc);
+ }
+#endif
BUG_TRAP(dev->qdisc_list == NULL);
dev->qdisc_list = NULL;
spin_unlock_bh(&dev->queue_lock);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
new file mode 100644
index 000000000..a9f05e20f
--- /dev/null
+++ b/net/sched/sch_gred.c
@@ -0,0 +1,606 @@
+/*
+ * net/sched/sch_gred.c Generic Random Early Detection queue.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: J Hadi Salim (hadi@nortelnetworks.com) 1998,1999
+ *
+ * 991129: - Bug fix with grio mode
+ * - a better sing. AvgQ mode with Grio
+ * - A finer grained VQ dequeue based on sugestion
+ * from Ren Liu
+ *
+ *
+ *
+ * For all the glorious comments look at Alexey's sch_red.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+
+#if 1 /* control */
+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define DPRINTK(format,args...)
+#endif
+
+#if 0 /* data */
+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define D2PRINTK(format,args...)
+#endif
+
+struct gred_sched_data;
+struct gred_sched;
+
+struct gred_sched_data
+{
+/* Parameters */
+ u32 limit; /* HARD maximal queue length */
+ u32 qth_min; /* Min average length threshold: A scaled */
+ u32 qth_max; /* Max average length threshold: A scaled */
+ u32 DP; /* the drop pramaters */
+ char Wlog; /* log(W) */
+ char Plog; /* random number bits */
+ u32 Scell_max;
+ u32 Rmask;
+ u32 bytesin; /* bytes seen on virtualQ so far*/
+ u32 packetsin; /* packets seen on virtualQ so far*/
+ u32 backlog; /* bytes on the virtualQ */
+ u32 forced; /* packets dropped for exceeding limits */
+ u32 early; /* packets dropped as a warning */
+ u32 other; /* packets dropped by invoking drop() */
+ u32 pdrop; /* packets dropped because we exceeded physical queue limits */
+ char Scell_log;
+ u8 Stab[256];
+ u8 prio; /* the prio of this vq */
+
+/* Variables */
+ unsigned long qave; /* Average queue length: A scaled */
+ int qcount; /* Packets since last random number generation */
+ u32 qR; /* Cached random number */
+
+ psched_time_t qidlestart; /* Start of idle period */
+};
+
+struct gred_sched
+{
+ struct gred_sched_data *tab[MAX_DPs];
+ u32 DPs;
+ u32 def;
+ u8 initd;
+ u8 grio;
+ u8 eqp;
+};
+
+static int
+gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ psched_time_t now;
+ struct gred_sched_data *q=NULL;
+ struct gred_sched *t= (struct gred_sched *)sch->data;
+ unsigned long qave=0;
+ int i=0;
+
+ if (!t->initd) {
+ DPRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
+ if (q->backlog <= q->limit) {
+ __skb_queue_tail(&sch->q, skb);
+ return NET_XMIT_DROP; /* @@@@ */
+ }
+ }
+
+
+ if ( ((skb->tc_index&0xf) > t->DPs) || !(q=t->tab[skb->tc_index&0xf])) {
+ printk("GRED: setting to default (%d)\n ",t->def);
+ if (!(q=t->tab[t->def])) {
+ DPRINTK("GRED: setting to default FAILED! dropping!! "
+ "(%d)\n ", t->def);
+ goto drop;
+ }
+ /* fix tc_index? --could be controvesial but needed for
+ requeueing */
+ skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
+ }
+
+ D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
+ "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
+ sch->stats.backlog);
+ /* sum up all the qaves of prios <= to ours to get the new qave*/
+ if (t->grio) {
+ for (i=0;i<t->DPs;i++) {
+ if ((!t->tab[i]) || (i==q->DP))
+ continue;
+ if (t->tab[i]->prio == q->prio ){
+ qave=0;
+ t->eqp=1;
+ q->qave=t->tab[t->def]->qave;
+ q->qidlestart=t->tab[t->def]->qidlestart;
+ break;
+ }
+
+ if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart)))
+ qave +=t->tab[i]->qave;
+ }
+
+ }
+
+ q->packetsin++;
+ q->bytesin+=skb->len;
+
+ if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
+ long us_idle;
+ PSCHED_GET_TIME(now);
+ us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max, 0);
+ PSCHED_SET_PASTPERFECT(q->qidlestart);
+
+ q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
+ } else {
+ q->qave += q->backlog - (q->qave >> q->Wlog);
+ }
+
+
+ if (t->eqp && t->grio)
+ t->tab[t->def]->qave=q->qave;
+
+ if ((q->qave+qave) < q->qth_min) {
+ q->qcount = -1;
+enqueue:
+ if (q->backlog <= q->limit) {
+ __skb_queue_tail(&sch->q, skb);
+ sch->stats.backlog += skb->len;
+ sch->stats.bytes += skb->len;
+ sch->stats.packets++;
+ q->backlog += skb->len;
+ return 0;
+ } else {
+ q->pdrop++;
+ }
+
+drop:
+ kfree_skb(skb);
+ sch->stats.drops++;
+ return NET_XMIT_DROP;
+ }
+ if ((q->qave+qave) >= q->qth_max) {
+ q->qcount = -1;
+ sch->stats.overlimits++;
+ q->forced++;
+ goto drop;
+ }
+ if (++q->qcount) {
+ if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
+ goto enqueue;
+ q->qcount = 0;
+ q->qR = net_random()&q->Rmask;
+ sch->stats.overlimits++;
+ q->early++;
+ goto drop;
+ }
+ q->qR = net_random()&q->Rmask;
+ goto enqueue;
+}
+
+static int
+gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct gred_sched_data *q;
+ struct gred_sched *t= (struct gred_sched *)sch->data;
+ q= t->tab[(skb->tc_index&0xf)];
+/* error checking here -- probably unnecessary */
+ PSCHED_SET_PASTPERFECT(q->qidlestart);
+
+ __skb_queue_head(&sch->q, skb);
+ sch->stats.backlog += skb->len;
+ q->backlog += skb->len;
+ return 0;
+}
+
+static struct sk_buff *
+gred_dequeue(struct Qdisc* sch)
+{
+ struct sk_buff *skb;
+ struct gred_sched_data *q;
+ struct gred_sched *t= (struct gred_sched *)sch->data;
+
+ skb = __skb_dequeue(&sch->q);
+ if (skb) {
+ q= t->tab[(skb->tc_index&0xf)];
+ sch->stats.backlog -= skb->len;
+ q->backlog -= skb->len;
+ if (!q->backlog && !t->eqp)
+ PSCHED_GET_TIME(q->qidlestart);
+ return skb;
+ }
+
+ if (t->eqp) {
+ q= t->tab[t->def];
+ if (!q)
+ printk("no default VQ set: Results will be "
+ "screwed up\n");
+ else
+ PSCHED_GET_TIME(q->qidlestart);
+ }
+
+ return NULL;
+}
+
+static int
+gred_drop(struct Qdisc* sch)
+{
+ struct sk_buff *skb;
+ int i;
+
+ struct gred_sched_data *q;
+ struct gred_sched *t= (struct gred_sched *)sch->data;
+
+ skb = __skb_dequeue_tail(&sch->q);
+ if (skb) {
+ q= t->tab[(skb->tc_index&0xf)];
+ sch->stats.backlog -= skb->len;
+ sch->stats.drops++;
+ q->backlog -= skb->len;
+ q->other++;
+ kfree_skb(skb);
+ return 1;
+ }
+
+/* could probably do it for a single VQ before freeing the skb */
+ for (i=0;i<t->DPs;i++) {
+ q= t->tab[i];
+ if (!q)
+ continue;
+ PSCHED_GET_TIME(q->qidlestart);
+ }
+
+ return 0;
+}
+
+static void gred_reset(struct Qdisc* sch)
+{
+ struct sk_buff *skb;
+ int i;
+
+ struct gred_sched_data *q;
+ struct gred_sched *t= (struct gred_sched *)sch->data;
+
+ while((skb=__skb_dequeue(&sch->q))!=NULL)
+ kfree_skb(skb);
+ sch->stats.backlog = 0;
+
+/* could probably do it for a single VQ before freeing the skb */
+ for (i=0;i<t->DPs;i++) {
+ q= t->tab[i];
+ if (!q)
+ continue;
+ PSCHED_SET_PASTPERFECT(q->qidlestart);
+ q->qave = 0;
+ q->qcount = -1;
+ q->backlog = 0;
+ q->other=0;
+ q->forced=0;
+ q->pdrop=0;
+ q->early=0;
+ }
+}
+
+static int gred_change(struct Qdisc *sch, struct rtattr *opt)
+{
+ struct gred_sched *table = (struct gred_sched *)sch->data;
+ struct gred_sched_data *q;
+ struct tc_gred_qopt *ctl;
+ struct tc_gred_sopt *sopt;
+ struct rtattr *tb[TCA_GRED_STAB];
+ struct rtattr *tb2[TCA_GRED_STAB];
+
+ if (opt == NULL ||
+ rtattr_parse(tb, TCA_GRED_STAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) )
+ return -EINVAL;
+
+ if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0 &&
+ tb[TCA_GRED_DPS-1] != 0) {
+ rtattr_parse(tb2, TCA_GRED_DPS, RTA_DATA(opt),
+ RTA_PAYLOAD(opt));
+
+ sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
+ table->DPs=sopt->DPs;
+ table->def=sopt->def_DP;
+ table->grio=sopt->grio;
+ table->initd=0;
+ /* probably need to clear all the table DP entries as well */
+ MOD_INC_USE_COUNT;
+ return 0;
+ }
+
+
+ if (!table->DPs || tb[TCA_GRED_PARMS-1] == 0 || tb[TCA_GRED_STAB-1] == 0 ||
+ RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
+ RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
+ return -EINVAL;
+
+ ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
+ if (ctl->DP > MAX_DPs-1 || ctl->DP <0) {
+ /* misbehaving is punished! Put in the default drop probability */
+ DPRINTK("\nGRED: DP %u not in the proper range fixed. New DP "
+ "set to default at %d\n",ctl->DP,table->def);
+ ctl->DP=table->def;
+ }
+
+ if (table->tab[ctl->DP] == NULL) {
+ table->tab[ctl->DP]=kmalloc(sizeof(struct gred_sched_data),
+ GFP_KERNEL);
+ memset(table->tab[ctl->DP], 0, (sizeof(struct gred_sched_data)));
+ }
+ q= table->tab[ctl->DP];
+
+ if (table->grio) {
+ if (ctl->prio <=0) {
+ if (table->def && table->tab[table->def]) {
+ DPRINTK("\nGRED: DP %u does not have a prio setting "
+ "default to %d\n",ctl->DP,
+ table->tab[table->def]->prio);
+ q->prio=table->tab[table->def]->prio;
+ } else {
+ DPRINTK("\nGRED: DP %u does not have a prio setting "
+ "default to 8\n",ctl->DP);
+ q->prio=8;
+ }
+ } else {
+ q->prio=ctl->prio;
+ }
+ } else {
+ q->prio=8;
+ }
+
+
+ q->DP=ctl->DP;
+ q->Wlog = ctl->Wlog;
+ q->Plog = ctl->Plog;
+ q->limit = ctl->limit;
+ q->Scell_log = ctl->Scell_log;
+ q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
+ q->Scell_max = (255<<q->Scell_log);
+ q->qth_min = ctl->qth_min<<ctl->Wlog;
+ q->qth_max = ctl->qth_max<<ctl->Wlog;
+ q->qave=0;
+ q->backlog=0;
+ q->qcount = -1;
+ q->other=0;
+ q->forced=0;
+ q->pdrop=0;
+ q->early=0;
+
+ PSCHED_SET_PASTPERFECT(q->qidlestart);
+ memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
+
+ if (!table->initd) {
+ table->initd=1;
+ /*
+ the first entry also goes into the default until
+ over-written
+ */
+
+ if (table->tab[table->def] == NULL) {
+ table->tab[table->def]=
+ kmalloc(sizeof(struct gred_sched_data), GFP_KERNEL);
+ memset(table->tab[table->def], 0,
+ (sizeof(struct gred_sched_data)));
+ }
+ q= table->tab[table->def];
+ q->DP=table->def;
+ q->Wlog = ctl->Wlog;
+ q->Plog = ctl->Plog;
+ q->limit = ctl->limit;
+ q->Scell_log = ctl->Scell_log;
+ q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
+ q->Scell_max = (255<<q->Scell_log);
+ q->qth_min = ctl->qth_min<<ctl->Wlog;
+ q->qth_max = ctl->qth_max<<ctl->Wlog;
+
+ if (table->grio)
+ q->prio=table->tab[ctl->DP]->prio;
+ else
+ q->prio=8;
+
+ q->qcount = -1;
+ PSCHED_SET_PASTPERFECT(q->qidlestart);
+ memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
+ }
+ return 0;
+
+}
+
+static int gred_init(struct Qdisc *sch, struct rtattr *opt)
+{
+ struct gred_sched *table = (struct gred_sched *)sch->data;
+ struct tc_gred_sopt *sopt;
+ struct rtattr *tb[TCA_GRED_STAB];
+ struct rtattr *tb2[TCA_GRED_STAB];
+
+ if (opt == NULL ||
+ rtattr_parse(tb, TCA_GRED_STAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) )
+ return -EINVAL;
+
+ if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0 &&
+ tb[TCA_GRED_DPS-1] != 0) {
+ rtattr_parse(tb2, TCA_GRED_DPS, RTA_DATA(opt),
+ RTA_PAYLOAD(opt));
+
+ sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
+ table->DPs=sopt->DPs;
+ table->def=sopt->def_DP;
+ table->grio=sopt->grio;
+ table->initd=0;
+ MOD_INC_USE_COUNT;
+ return 0;
+ }
+
+ DPRINTK("\n GRED_INIT error!\n");
+ return -EINVAL;
+}
+
+#ifdef CONFIG_RTNETLINK
+static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ unsigned long qave;
+ struct rtattr *rta;
+ struct tc_gred_qopt *opt;
+ struct tc_gred_qopt *dst;
+ struct gred_sched *table = (struct gred_sched *)sch->data;
+ struct gred_sched_data *q;
+ int i;
+ unsigned char *b = skb->tail;
+
+ rta = (struct rtattr*)b;
+ RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
+
+ opt=kmalloc(sizeof(struct tc_gred_qopt)*MAX_DPs, GFP_KERNEL);
+
+ if (opt == NULL) {
+ DPRINTK("gred_dump:failed to malloc for %d\n",
+ sizeof(struct tc_gred_qopt)*MAX_DPs);
+ goto rtattr_failure;
+ }
+
+ memset(opt, 0, (sizeof(struct tc_gred_qopt))*table->DPs);
+
+ if (!table->initd) {
+ DPRINTK("NO GRED Queues setup!\n");
+ return -1;
+ }
+
+ for (i=0;i<MAX_DPs;i++) {
+ dst= &opt[i];
+ q= table->tab[i];
+
+ if (!q) {
+ /* hack -- fix at some point with proper message
+ This is how we indicate to tc that there is no VQ
+ at this DP */
+
+ dst->DP=MAX_DPs+i;
+ continue;
+ }
+
+ dst->limit=q->limit;
+ dst->qth_min=q->qth_min>>q->Wlog;
+ dst->qth_max=q->qth_max>>q->Wlog;
+ dst->DP=q->DP;
+ dst->backlog=q->backlog;
+ if (q->qave) {
+ if (table->eqp && table->grio) {
+ q->qidlestart=table->tab[table->def]->qidlestart;
+ q->qave=table->tab[table->def]->qave;
+ }
+ if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
+ long idle;
+ psched_time_t now;
+ PSCHED_GET_TIME(now);
+ idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max, 0);
+ qave = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
+ dst->qave = qave >> q->Wlog;
+
+ } else {
+ dst->qave = q->qave >> q->Wlog;
+ }
+ } else {
+ dst->qave = 0;
+ }
+
+
+ dst->Wlog = q->Wlog;
+ dst->Plog = q->Plog;
+ dst->Scell_log = q->Scell_log;
+ dst->other = q->other;
+ dst->forced = q->forced;
+ dst->early = q->early;
+ dst->pdrop = q->pdrop;
+ dst->prio = q->prio;
+ dst->packets=q->packetsin;
+ dst->bytesin=q->bytesin;
+ }
+
+ RTA_PUT(skb, TCA_GRED_PARMS, sizeof(struct tc_gred_qopt)*MAX_DPs, opt);
+ rta->rta_len = skb->tail - b;
+
+ return skb->len;
+
+rtattr_failure:
+ DPRINTK("gred_dump: FAILURE!!!!\n");
+
+/* also free the opt struct here */
+ skb_trim(skb, b - skb->data);
+ return -1;
+}
+#endif
+
+static void gred_destroy(struct Qdisc *sch)
+{
+ struct gred_sched *table = (struct gred_sched *)sch->data;
+ int i;
+
+ for (i = 0;i < table->DPs; i++) {
+ if (table->tab[i])
+ kfree(table->tab[i]);
+ }
+ MOD_DEC_USE_COUNT;
+}
+
+struct Qdisc_ops gred_qdisc_ops =
+{
+ NULL,
+ NULL,
+ "gred",
+ sizeof(struct gred_sched),
+ gred_enqueue,
+ gred_dequeue,
+ gred_requeue,
+ gred_drop,
+ gred_init,
+ gred_reset,
+ gred_destroy,
+ gred_change, /* change */
+#ifdef CONFIG_RTNETLINK
+ gred_dump,
+#endif
+};
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ return register_qdisc(&gred_qdisc_ops);
+}
+
+void cleanup_module(void)
+{
+ unregister_qdisc(&gred_qdisc_ops);
+}
+#endif
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
new file mode 100644
index 000000000..852f56b22
--- /dev/null
+++ b/net/sched/sch_ingress.c
@@ -0,0 +1,392 @@
+/* net/sched/sch_ingress.c - Ingress qdisc
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Jamal Hadi Salim 1999
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter.h>
+#include <net/pkt_sched.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <linux/kmod.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+
+
+
+
+#if 0 /* control */
+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define DPRINTK(format,args...)
+#endif
+
+#if 0 /* data */
+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define D2PRINTK(format,args...)
+#endif
+
+
+#define PRIV(sch) ((struct ingress_qdisc_data *) (sch)->data)
+
+
+
+struct ingress_qdisc_data {
+ struct Qdisc *q;
+ struct tcf_proto *filter_list;
+};
+
+
+/* ------------------------- Class/flow operations ------------------------- */
+
+
+static int ingress_graft(struct Qdisc *sch,unsigned long arg,
+ struct Qdisc *new,struct Qdisc **old)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("ingress_graft(sch %p,[qdisc %p],new %p,old %p)\n",
+ sch, p, new, old);
+ DPRINTK("\n ingress_graft: You cannot add qdiscs to classes");
+ return 1;
+}
+
+
+static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+
+static unsigned long ingress_get(struct Qdisc *sch,u32 classid)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("ingress_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
+ return TC_H_MIN(classid) + 1;
+}
+
+
+static unsigned long ingress_bind_filter(struct Qdisc *sch,
+ unsigned long parent, u32 classid)
+{
+ return ingress_get(sch, classid);
+}
+
+
+static void ingress_put(struct Qdisc *sch, unsigned long cl)
+{
+}
+
+
+static int ingress_change(struct Qdisc *sch, u32 classid, u32 parent,
+ struct rtattr **tca, unsigned long *arg)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("ingress_change(sch %p,[qdisc %p],classid %x,parent %x),"
+ "arg 0x%lx\n", sch, p, classid, parent, *arg);
+ DPRINTK("No effect. sch_ingress doesnt maintain classes at the moment");
+ return 0;
+}
+
+
+
+static void ingress_walk(struct Qdisc *sch,struct qdisc_walker *walker)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("ingress_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
+ DPRINTK("No effect. sch_ingress doesnt maintain classes at the moment");
+}
+
+
+static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch,unsigned long cl)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+
+ return &p->filter_list;
+}
+
+
+/* --------------------------- Qdisc operations ---------------------------- */
+
+
+static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+ struct tcf_result res;
+ int result;
+
+ D2PRINTK("ingress_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
+ result = tc_classify(skb, p->filter_list, &res);
+ D2PRINTK("result %d class 0x%04x\n", result, res.classid);
+ /*
+ * Unlike normal "enqueue" functions, ingress_enqueue returns a
+ * firewall FW_* code.
+ */
+ switch (result) {
+#ifdef CONFIG_NET_CLS_POLICE
+ case TC_POLICE_SHOT:
+ result = NF_DROP;
+ break;
+ case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */
+ case TC_POLICE_OK:
+ case TC_POLICE_UNSPEC:
+ default:
+ result = NF_ACCEPT;
+ break;
+#endif
+ };
+
+#ifdef CONFIG_NET_CLS_TCINDEX
+ skb->tc_index = TC_H_MIN(res.classid);
+#endif
+ return result;
+}
+
+
+static struct sk_buff *ingress_dequeue(struct Qdisc *sch)
+{
+/*
+ struct ingress_qdisc_data *p = PRIV(sch);
+ D2PRINTK("ingress_dequeue(sch %p,[qdisc %p])\n",sch,PRIV(p));
+*/
+ return NULL;
+}
+
+
+static int ingress_requeue(struct sk_buff *skb,struct Qdisc *sch)
+{
+/*
+ struct ingress_qdisc_data *p = PRIV(sch);
+ D2PRINTK("ingress_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,PRIV(p));
+*/
+ return 0;
+}
+
+static int ingress_drop(struct Qdisc *sch)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("ingress_drop(sch %p,[qdisc %p])\n", sch, p);
+ return 0;
+}
+
+static unsigned int
+ing_hook(unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *indev,
+ const struct net_device *outdev,
+ int (*okfn)(struct sk_buff *))
+{
+
+ struct Qdisc *q;
+ struct sk_buff *skb = *pskb;
+ struct net_device *dev = skb->dev;
+ int fwres=NF_ACCEPT;
+
+ DPRINTK("ing_hook: skb %s dev=%s len=%u\n",
+ skb->sk ? "(owned)" : "(unowned)",
+ skb->dev ? (*pskb)->dev->name : "(no dev)",
+ skb->len);
+
+/*
+revisit later: Use a private since lock dev->queue_lock is also
+used on the egress (might slow things for an iota)
+*/
+
+ if (dev->qdisc_ingress) {
+ spin_lock(&dev->queue_lock);
+ if ((q = dev->qdisc_ingress) != NULL)
+ fwres = q->enqueue(skb, q);
+ spin_unlock(&dev->queue_lock);
+ }
+
+ return fwres;
+}
+
+
+/* after iptables */
+static struct nf_hook_ops ing_ops =
+{
+ { NULL, NULL},
+ ing_hook,
+ NULL,
+ PF_INET,
+ NF_IP_PRE_ROUTING,
+ 1
+};
+
+int ingress_init(struct Qdisc *sch,struct rtattr *opt)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("ingress_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt);
+ memset(p, 0, sizeof(*p));
+ p->filter_list = NULL;
+ p->q = &noop_qdisc;
+#ifndef MODULE
+ if (nf_register_hook(&ing_ops) < 0) {
+ printk("Unable to register ingress \n");
+ goto error;
+ }
+#endif
+ DPRINTK("ingress_init: qdisc %p\n", sch);
+ MOD_INC_USE_COUNT;
+ return 0;
+#ifndef MODULE
+error:
+#endif
+ return -EINVAL;
+}
+
+
+static void ingress_reset(struct Qdisc *sch)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+
+ DPRINTK("ingress_reset(sch %p,[qdisc %p])\n", sch, p);
+
+/*
+#if 0
+*/
+/* for future use */
+ qdisc_reset(p->q);
+/*
+#endif
+*/
+}
+
+/* ------------------------------------------------------------- */
+
+
+/* ------------------------------------------------------------- */
+
+static void ingress_destroy(struct Qdisc *sch)
+{
+ struct ingress_qdisc_data *p = PRIV(sch);
+ struct tcf_proto *tp;
+
+ DPRINTK("ingress_destroy(sch %p,[qdisc %p])\n", sch, p);
+ while (p->filter_list) {
+ tp = p->filter_list;
+ p->filter_list = tp->next;
+ tp->ops->destroy(tp);
+ }
+ memset(p, 0, sizeof(*p));
+ p->filter_list = NULL;
+
+#if 0
+/* for future use */
+ qdisc_destroy(p->q);
+#endif
+
+#ifndef MODULE
+ nf_unregister_hook(&ing_ops);
+#endif
+
+ MOD_DEC_USE_COUNT;
+}
+
+
+#ifdef CONFIG_RTNETLINK
+
+
+static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ unsigned char *b = skb->tail;
+ struct rtattr *rta;
+
+ rta = (struct rtattr *) b;
+ RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
+ rta->rta_len = skb->tail - b;
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb, b - skb->data);
+ return -1;
+}
+
+#endif
+
+
+static struct Qdisc_class_ops ingress_class_ops =
+{
+ ingress_graft, /* graft */
+ ingress_leaf, /* leaf */
+ ingress_get, /* get */
+ ingress_put, /* put */
+ ingress_change, /* change */
+ NULL, /* delete */
+ ingress_walk, /* walk */
+
+ ingress_find_tcf, /* tcf_chain */
+ ingress_bind_filter, /* bind_tcf */
+ ingress_put, /* unbind_tcf */
+
+#ifdef CONFIG_RTNETLINK
+ NULL, /* dump */
+#endif
+};
+
+struct Qdisc_ops ingress_qdisc_ops =
+{
+ NULL, /* next */
+ &ingress_class_ops, /* cl_ops */
+ "ingress",
+ sizeof(struct ingress_qdisc_data),
+
+ ingress_enqueue, /* enqueue */
+ ingress_dequeue, /* dequeue */
+ ingress_requeue, /* requeue */
+ ingress_drop, /* drop */
+
+ ingress_init, /* init */
+ ingress_reset, /* reset */
+ ingress_destroy, /* destroy */
+ NULL, /* change */
+
+#ifdef CONFIG_RTNETLINK
+ ingress_dump, /* dump */
+#endif
+};
+
+#ifdef MODULE
+int init_module(void)
+{
+ int ret = 0;
+
+ if ((ret = register_qdisc(&ingress_qdisc_ops)) < 0) {
+ printk("Unable to register Ingress qdisc\n");
+ return ret;
+ }
+
+ if (nf_register_hook(&ing_ops) < 0) {
+ printk("Unable to register ingress on hook \n");
+ unregister_qdisc(&ingress_qdisc_ops);
+ return 0;
+ }
+
+ return ret;
+}
+
+
+void cleanup_module(void)
+{
+ nf_unregister_hook(&ing_ops);
+ unregister_qdisc(&ingress_qdisc_ops);
+}
+#endif
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 0fc53b4c9..015cab96b 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -7,6 +7,8 @@
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>:
+ * Init -- EINVAL when opt undefined
*/
#include <linux/config.h>
@@ -211,8 +213,6 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
static int prio_init(struct Qdisc *sch, struct rtattr *opt)
{
- static const u8 prio2band[TC_PRIO_MAX+1] =
- { 1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 };
struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
int i;
@@ -220,14 +220,7 @@ static int prio_init(struct Qdisc *sch, struct rtattr *opt)
q->queues[i] = &noop_qdisc;
if (opt == NULL) {
- q->bands = 3;
- memcpy(q->prio2band, prio2band, sizeof(prio2band));
- for (i=0; i<3; i++) {
- struct Qdisc *child;
- child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
- if (child)
- q->queues[i] = child;
- }
+ return -EINVAL;
} else {
int err;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 3f666e0f8..ede1e96cd 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -437,7 +437,7 @@ static int teql_master_init(struct net_device *dev)
dev->stop = teql_master_close;
dev->get_stats = teql_master_stats;
dev->change_mtu = teql_master_mtu;
- dev->type = 0;
+ dev->type = ARPHRD_VOID;
dev->mtu = 1500;
dev->tx_queue_len = 100;
dev->flags = IFF_NOARP;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 72fae92b9..e6b0eb50c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: af_unix.c,v 1.84 1999/09/08 03:47:18 davem Exp $
+ * Version: $Id: af_unix.c,v 1.87 1999/12/09 00:54:25 davem Exp $
*
* Fixes:
* Linus Torvalds : Assorted bug cures.
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 930da4863..744a4446a 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -22,7 +22,7 @@ extern int sysctl_unix_max_dgram_qlen;
ctl_table unix_table[] = {
{NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen",
&sysctl_unix_max_dgram_qlen, sizeof(int), 0600, NULL,
- &proc_dointvec_jiffies},
+ &proc_dointvec },
{0}
};